controller.go 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631
  1. /*
  2. * Copyright (c) 2014, Psiphon Inc.
  3. * All rights reserved.
  4. *
  5. * This program is free software: you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation, either version 3 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. *
  18. */
  19. // Package psiphon implements the core tunnel functionality of a Psiphon client.
  20. // The main function is RunForever, which runs a Controller that obtains lists of
  21. // servers, establishes tunnel connections, and runs local proxies through which
  22. // tunneled traffic may be sent.
  23. package psiphon
  24. import (
  25. "errors"
  26. "fmt"
  27. "io"
  28. "log"
  29. "net"
  30. "os"
  31. "sync"
  32. "time"
  33. )
  34. // Controller is a tunnel lifecycle coordinator. It manages lists of servers to
  35. // connect to; establishes and monitors tunnels; and runs local proxies which
  36. // route traffic through the tunnels.
  37. type Controller struct {
  38. config *Config
  39. failureSignal chan struct{}
  40. shutdownBroadcast chan struct{}
  41. runWaitGroup *sync.WaitGroup
  42. establishedTunnels chan *Tunnel
  43. failedTunnels chan *Tunnel
  44. tunnelMutex sync.Mutex
  45. tunnels []*Tunnel
  46. nextTunnel int
  47. operateWaitGroup *sync.WaitGroup
  48. isEstablishing bool
  49. establishWaitGroup *sync.WaitGroup
  50. stopEstablishingBroadcast chan struct{}
  51. candidateServerEntries chan *ServerEntry
  52. pendingConns *Conns
  53. }
  54. // NewController initializes a new controller.
  55. func NewController(config *Config) (controller *Controller) {
  56. return &Controller{
  57. config: config,
  58. // failureSignal receives a signal from a component (including socks and
  59. // http local proxies) if they unexpectedly fail. Senders should not block.
  60. // A buffer allows at least one stop signal to be sent before there is a receiver.
  61. failureSignal: make(chan struct{}, 1),
  62. shutdownBroadcast: make(chan struct{}),
  63. runWaitGroup: new(sync.WaitGroup),
  64. // establishedTunnels and failedTunnels buffer sizes are large enough to
  65. // receive full pools of tunnels without blocking. Senders should not block.
  66. establishedTunnels: make(chan *Tunnel, config.TunnelPoolSize),
  67. failedTunnels: make(chan *Tunnel, config.TunnelPoolSize),
  68. tunnels: make([]*Tunnel, 0),
  69. operateWaitGroup: new(sync.WaitGroup),
  70. isEstablishing: false,
  71. pendingConns: new(Conns),
  72. }
  73. }
  74. // Run executes the controller. It launches components and then monitors
  75. // for a shutdown signal; after receiving the signal it shuts down the
  76. // controller.
  77. // The components include:
  78. // - the periodic remote server list fetcher
  79. // - the tunnel manager
  80. // - a local SOCKS proxy that port forwards through the pool of tunnels
  81. // - a local HTTP proxy that port forwards through the pool of tunnels
  82. func (controller *Controller) Run(shutdownBroadcast <-chan struct{}) {
  83. socksProxy, err := NewSocksProxy(controller.config, controller)
  84. if err != nil {
  85. Notice(NOTICE_ALERT, "error initializing local SOCKS proxy: %s", err)
  86. return
  87. }
  88. defer socksProxy.Close()
  89. httpProxy, err := NewHttpProxy(controller.config, controller)
  90. if err != nil {
  91. Notice(NOTICE_ALERT, "error initializing local SOCKS proxy: %s", err)
  92. return
  93. }
  94. defer httpProxy.Close()
  95. controller.runWaitGroup.Add(2)
  96. go controller.remoteServerListFetcher()
  97. go controller.runTunnels()
  98. select {
  99. case <-shutdownBroadcast:
  100. Notice(NOTICE_INFO, "controller shutdown by request")
  101. case <-controller.failureSignal:
  102. Notice(NOTICE_ALERT, "controller shutdown due to failure")
  103. }
  104. // Note: in addition to establish(), this pendingConns will interrupt
  105. // FetchRemoteServerList
  106. controller.pendingConns.CloseAll()
  107. close(controller.shutdownBroadcast)
  108. controller.runWaitGroup.Wait()
  109. Notice(NOTICE_INFO, "exiting controller")
  110. }
  111. // SignalFailure notifies the controller that an associated component has failed.
  112. // This will terminate the controller.
  113. func (controller *Controller) SignalFailure() {
  114. select {
  115. case controller.failureSignal <- *new(struct{}):
  116. default:
  117. }
  118. }
  119. // remoteServerListFetcher fetches an out-of-band list of server entries
  120. // for more tunnel candidates. It fetches immediately, retries after failure
  121. // with a wait period, and refetches after success with a longer wait period.
  122. func (controller *Controller) remoteServerListFetcher() {
  123. defer controller.runWaitGroup.Done()
  124. // Note: unlike existing Psiphon clients, this code
  125. // always makes the fetch remote server list request
  126. loop:
  127. for {
  128. // TODO: FetchRemoteServerList should have its own pendingConns,
  129. // otherwise it may needlessly abort when establish is stopped.
  130. err := FetchRemoteServerList(controller.config, controller.pendingConns)
  131. var duration time.Duration
  132. if err != nil {
  133. Notice(NOTICE_ALERT, "failed to fetch remote server list: %s", err)
  134. duration = FETCH_REMOTE_SERVER_LIST_RETRY_TIMEOUT
  135. } else {
  136. duration = FETCH_REMOTE_SERVER_LIST_STALE_TIMEOUT
  137. }
  138. timeout := time.After(duration)
  139. select {
  140. case <-timeout:
  141. // Fetch again
  142. case <-controller.shutdownBroadcast:
  143. break loop
  144. }
  145. }
  146. Notice(NOTICE_INFO, "exiting remote server list fetcher")
  147. }
  148. // runTunnels is the controller tunnel management main loop. It starts and stops
  149. // establishing tunnels based on the target tunnel pool size and the current size
  150. // of the pool. Tunnels are established asynchronously using worker goroutines.
  151. // When a tunnel is established, it's added to the active pool and a corresponding
  152. // operateTunnel goroutine is launched which starts a session in the tunnel and
  153. // monitors the tunnel for failures.
  154. // When a tunnel fails, it's removed from the pool and the establish process is
  155. // restarted to fill the pool.
  156. func (controller *Controller) runTunnels() {
  157. defer controller.runWaitGroup.Done()
  158. // Don't start establishing until there are some server candidates. The
  159. // typical case is a client with no server entries which will wait for
  160. // the first successful FetchRemoteServerList to populate the data store.
  161. for {
  162. if HasServerEntries(
  163. controller.config.EgressRegion, controller.config.TunnelProtocol) {
  164. break
  165. }
  166. // TODO: replace polling with signal
  167. timeout := time.After(1 * time.Second)
  168. select {
  169. case <-timeout:
  170. case <-controller.shutdownBroadcast:
  171. return
  172. }
  173. }
  174. controller.startEstablishing()
  175. loop:
  176. for {
  177. select {
  178. case failedTunnel := <-controller.failedTunnels:
  179. Notice(NOTICE_ALERT, "tunnel failed: %s", failedTunnel.serverEntry.IpAddress)
  180. controller.terminateTunnel(failedTunnel)
  181. // Note: only this goroutine may call startEstablishing/stopEstablishing and access
  182. // isEstablishing.
  183. if !controller.isEstablishing {
  184. controller.startEstablishing()
  185. }
  186. // !TODO! design issue: might not be enough server entries with region/caps to ever fill tunnel slots
  187. // solution(?) target MIN(CountServerEntries(region, protocol), TunnelPoolSize)
  188. case establishedTunnel := <-controller.establishedTunnels:
  189. Notice(NOTICE_INFO, "established tunnel: %s", establishedTunnel.serverEntry.IpAddress)
  190. if controller.registerTunnel(establishedTunnel) {
  191. Notice(NOTICE_INFO, "active tunnel: %s", establishedTunnel.serverEntry.IpAddress)
  192. controller.operateWaitGroup.Add(1)
  193. go controller.operateTunnel(establishedTunnel)
  194. } else {
  195. controller.discardTunnel(establishedTunnel)
  196. }
  197. if controller.isFullyEstablished() {
  198. controller.stopEstablishing()
  199. }
  200. case <-controller.shutdownBroadcast:
  201. break loop
  202. }
  203. }
  204. controller.stopEstablishing()
  205. controller.terminateAllTunnels()
  206. controller.operateWaitGroup.Wait()
  207. // Drain tunnel channels
  208. close(controller.establishedTunnels)
  209. for tunnel := range controller.establishedTunnels {
  210. controller.discardTunnel(tunnel)
  211. }
  212. close(controller.failedTunnels)
  213. for tunnel := range controller.failedTunnels {
  214. controller.discardTunnel(tunnel)
  215. }
  216. Notice(NOTICE_INFO, "exiting run tunnels")
  217. }
  218. // discardTunnel disposes of a successful connection that is no longer required.
  219. func (controller *Controller) discardTunnel(tunnel *Tunnel) {
  220. Notice(NOTICE_INFO, "discard tunnel: %s", tunnel.serverEntry.IpAddress)
  221. // TODO: not calling PromoteServerEntry, since that would rank the
  222. // discarded tunnel before fully active tunnels. Can a discarded tunnel
  223. // be promoted (since it connects), but with lower rank than all active
  224. // tunnels?
  225. tunnel.Close()
  226. }
  227. // registerTunnel adds the connected tunnel to the pool of active tunnels
  228. // which are candidates for port forwarding. Returns true if the pool has an
  229. // empty slot and false if the pool is full (caller should discard the tunnel).
  230. func (controller *Controller) registerTunnel(tunnel *Tunnel) bool {
  231. controller.tunnelMutex.Lock()
  232. defer controller.tunnelMutex.Unlock()
  233. if len(controller.tunnels) >= controller.config.TunnelPoolSize {
  234. return false
  235. }
  236. // Perform a fail-safe check just in case we've established
  237. // a duplicate connection.
  238. for _, activeTunnel := range controller.tunnels {
  239. if activeTunnel.serverEntry.IpAddress == tunnel.serverEntry.IpAddress {
  240. Notice(NOTICE_ALERT, "duplicate tunnel: %s", tunnel.serverEntry.IpAddress)
  241. return false
  242. }
  243. }
  244. controller.tunnels = append(controller.tunnels, tunnel)
  245. Notice(NOTICE_TUNNEL, "%d tunnels", len(controller.tunnels))
  246. return true
  247. }
  248. // isFullyEstablished indicates if the pool of active tunnels is full.
  249. func (controller *Controller) isFullyEstablished() bool {
  250. controller.tunnelMutex.Lock()
  251. defer controller.tunnelMutex.Unlock()
  252. return len(controller.tunnels) >= controller.config.TunnelPoolSize
  253. }
  254. // terminateTunnel removes a tunnel from the pool of active tunnels
  255. // and closes the tunnel. The next-tunnel state used by getNextActiveTunnel
  256. // is adjusted as required.
  257. func (controller *Controller) terminateTunnel(tunnel *Tunnel) {
  258. controller.tunnelMutex.Lock()
  259. defer controller.tunnelMutex.Unlock()
  260. for index, activeTunnel := range controller.tunnels {
  261. if tunnel == activeTunnel {
  262. controller.tunnels = append(
  263. controller.tunnels[:index], controller.tunnels[index+1:]...)
  264. if controller.nextTunnel > index {
  265. controller.nextTunnel--
  266. }
  267. if controller.nextTunnel >= len(controller.tunnels) {
  268. controller.nextTunnel = 0
  269. }
  270. activeTunnel.Close()
  271. Notice(NOTICE_TUNNEL, "%d tunnels", len(controller.tunnels))
  272. break
  273. }
  274. }
  275. }
  276. // terminateAllTunnels empties the tunnel pool, closing all active tunnels.
  277. // This is used when shutting down the controller.
  278. func (controller *Controller) terminateAllTunnels() {
  279. controller.tunnelMutex.Lock()
  280. defer controller.tunnelMutex.Unlock()
  281. for _, activeTunnel := range controller.tunnels {
  282. activeTunnel.Close()
  283. }
  284. controller.tunnels = make([]*Tunnel, 0)
  285. controller.nextTunnel = 0
  286. Notice(NOTICE_TUNNEL, "%d tunnels", len(controller.tunnels))
  287. }
  288. // getNextActiveTunnel returns the next tunnel from the pool of active
  289. // tunnels. Currently, tunnel selection order is simple round-robin.
  290. func (controller *Controller) getNextActiveTunnel() (tunnel *Tunnel) {
  291. controller.tunnelMutex.Lock()
  292. defer controller.tunnelMutex.Unlock()
  293. for i := len(controller.tunnels); i >= 0; i-- {
  294. tunnel = controller.tunnels[controller.nextTunnel]
  295. controller.nextTunnel =
  296. (controller.nextTunnel + 1) % len(controller.tunnels)
  297. // A tunnel must[*] have started its session (performed the server
  298. // API handshake sequence) before it may be used for tunneling traffic
  299. // [*]currently not enforced by the server, but may be in the future.
  300. if tunnel.IsSessionStarted() {
  301. return tunnel
  302. }
  303. }
  304. return nil
  305. }
  306. // getActiveTunnelServerEntries lists the Server Entries for
  307. // all the active tunnels. This is used to exclude those servers
  308. // from the set of candidates to establish connections to.
  309. func (controller *Controller) getActiveTunnelServerEntries() (serverEntries []*ServerEntry) {
  310. controller.tunnelMutex.Lock()
  311. defer controller.tunnelMutex.Unlock()
  312. serverEntries = make([]*ServerEntry, 0)
  313. for _, activeTunnel := range controller.tunnels {
  314. serverEntries = append(serverEntries, activeTunnel.serverEntry)
  315. }
  316. return serverEntries
  317. }
  318. // operateTunnel starts a Psiphon session (handshake, etc.) on a newly
  319. // connected tunnel, and then monitors the tunnel for failures:
  320. //
  321. // 1. Overall tunnel failure: the tunnel sends a signal to the ClosedSignal
  322. // channel on keep-alive failure and other transport I/O errors. In case
  323. // of such a failure, the tunnel is marked as failed.
  324. //
  325. // 2. Tunnel port forward failures: the tunnel connection may stay up but
  326. // the client may still fail to establish port forwards due to server load
  327. // and other conditions. After a threshold number of such failures, the
  328. // overall tunnel is marked as failed.
  329. //
  330. // TODO: currently, any connect (dial), read, or write error associated with
  331. // a port forward is counted as a failure. It may be important to differentiate
  332. // between failures due to Psiphon server conditions and failures due to the
  333. // origin/target server (in the latter case, the tunnel is healthy). Here are
  334. // some typical error messages to consider matching against (or ignoring):
  335. //
  336. // - "ssh: rejected: administratively prohibited (open failed)"
  337. // - "ssh: rejected: connect failed (Connection timed out)"
  338. // - "write tcp ... broken pipe"
  339. // - "read tcp ... connection reset by peer"
  340. // - "ssh: unexpected packet in response to channel open: <nil>"
  341. //
  342. func (controller *Controller) operateTunnel(tunnel *Tunnel) {
  343. defer controller.operateWaitGroup.Done()
  344. tunnelClosedSignal := make(chan struct{}, 1)
  345. err := tunnel.conn.SetClosedSignal(tunnelClosedSignal)
  346. if err != nil {
  347. err = fmt.Errorf("failed to set closed signal: %s", err)
  348. }
  349. Notice(NOTICE_INFO, "starting session for %s", tunnel.serverEntry.IpAddress)
  350. // TODO: NewSession server API calls may block shutdown
  351. _, err = NewSession(controller.config, tunnel)
  352. if err != nil {
  353. err = fmt.Errorf("error starting session for %s: %s", tunnel.serverEntry.IpAddress, err)
  354. }
  355. // Promote this successful tunnel to first rank so it's one
  356. // of the first candidates next time establish runs.
  357. PromoteServerEntry(tunnel.serverEntry.IpAddress)
  358. for err == nil {
  359. select {
  360. case failures := <-tunnel.portForwardFailures:
  361. tunnel.portForwardFailureTotal += failures
  362. Notice(
  363. NOTICE_INFO, "port forward failures for %s: %d",
  364. tunnel.serverEntry.IpAddress, tunnel.portForwardFailureTotal)
  365. if tunnel.portForwardFailureTotal > controller.config.PortForwardFailureThreshold {
  366. err = errors.New("tunnel exceeded port forward failure threshold")
  367. }
  368. case <-tunnelClosedSignal:
  369. // TODO: this signal can be received during a commanded shutdown due to
  370. // how tunnels are closed; should rework this to avoid log noise.
  371. err = errors.New("tunnel closed unexpectedly")
  372. case <-controller.shutdownBroadcast:
  373. Notice(NOTICE_INFO, "shutdown operate tunnel")
  374. return
  375. }
  376. }
  377. if err != nil {
  378. Notice(NOTICE_ALERT, "operate tunnel error for %s: %s", tunnel.serverEntry.IpAddress, err)
  379. // Don't block. Assumes the receiver has a buffer large enough for
  380. // the typical number of operated tunnels. In case there's no room,
  381. // terminate the tunnel (runTunnels won't get a signal in this case).
  382. select {
  383. case controller.failedTunnels <- tunnel:
  384. default:
  385. controller.terminateTunnel(tunnel)
  386. }
  387. }
  388. }
  389. // TunneledConn implements net.Conn and wraps a port foward connection.
  390. // It is used to hook into Read and Write to observe I/O errors and
  391. // report these errors back to the tunnel monitor as port forward failures.
  392. type TunneledConn struct {
  393. net.Conn
  394. tunnel *Tunnel
  395. }
  396. func (conn *TunneledConn) Read(buffer []byte) (n int, err error) {
  397. n, err = conn.Conn.Read(buffer)
  398. if err != nil && err != io.EOF {
  399. // Report 1 new failure. Won't block; assumes the receiver
  400. // has a sufficient buffer for the threshold number of reports.
  401. // TODO: conditional on type of error or error message?
  402. select {
  403. case conn.tunnel.portForwardFailures <- 1:
  404. default:
  405. }
  406. }
  407. return
  408. }
  409. func (conn *TunneledConn) Write(buffer []byte) (n int, err error) {
  410. n, err = conn.Conn.Write(buffer)
  411. if err != nil && err != io.EOF {
  412. // Same as TunneledConn.Read()
  413. select {
  414. case conn.tunnel.portForwardFailures <- 1:
  415. default:
  416. }
  417. }
  418. return
  419. }
  420. // Dial selects an active tunnel and establishes a port forward
  421. // connection through the selected tunnel. Failure to connect is considered
  422. // a port foward failure, for the purpose of monitoring tunnel health.
  423. func (controller *Controller) Dial(remoteAddr string) (conn net.Conn, err error) {
  424. tunnel := controller.getNextActiveTunnel()
  425. if tunnel == nil {
  426. return nil, ContextError(errors.New("no active tunnels"))
  427. }
  428. tunnelConn, err := tunnel.Dial(remoteAddr)
  429. if err != nil {
  430. // TODO: conditional on type of error or error message?
  431. select {
  432. case tunnel.portForwardFailures <- 1:
  433. default:
  434. }
  435. return nil, ContextError(err)
  436. }
  437. return &TunneledConn{
  438. Conn: tunnelConn,
  439. tunnel: tunnel},
  440. nil
  441. }
  442. // startEstablishing creates a pool of worker goroutines which will
  443. // attempt to establish tunnels to candidate servers. The candidates
  444. // are generated by another goroutine.
  445. func (controller *Controller) startEstablishing() {
  446. if controller.isEstablishing {
  447. return
  448. }
  449. Notice(NOTICE_INFO, "start establishing")
  450. controller.isEstablishing = true
  451. controller.establishWaitGroup = new(sync.WaitGroup)
  452. controller.stopEstablishingBroadcast = make(chan struct{})
  453. controller.candidateServerEntries = make(chan *ServerEntry)
  454. for i := 0; i < controller.config.ConnectionWorkerPoolSize; i++ {
  455. controller.establishWaitGroup.Add(1)
  456. go controller.establishTunnelWorker()
  457. }
  458. controller.establishWaitGroup.Add(1)
  459. go controller.establishCandidateGenerator()
  460. }
  461. // stopEstablishing signals the establish goroutines to stop and waits
  462. // for the group to halt. pendingConns is used to interrupt any worker
  463. // blocked on a socket connect.
  464. func (controller *Controller) stopEstablishing() {
  465. if !controller.isEstablishing {
  466. return
  467. }
  468. Notice(NOTICE_INFO, "stop establishing")
  469. // Note: on Windows, interruptibleTCPClose doesn't really interrupt socket connects
  470. // and may leave goroutines running for a time after the Wait call.
  471. controller.pendingConns.CloseAll()
  472. close(controller.stopEstablishingBroadcast)
  473. // Note: establishCandidateGenerator closes controller.candidateServerEntries
  474. // (as it may be sending to that channel).
  475. controller.establishWaitGroup.Wait()
  476. controller.isEstablishing = false
  477. controller.establishWaitGroup = nil
  478. controller.stopEstablishingBroadcast = nil
  479. controller.candidateServerEntries = nil
  480. }
  481. // establishCandidateGenerator populates the candidate queue with server entries
  482. // from the data store. Server entries are iterated in rank order, so that promoted
  483. // servers with higher rank are priority candidates.
  484. func (controller *Controller) establishCandidateGenerator() {
  485. defer controller.establishWaitGroup.Done()
  486. loop:
  487. for {
  488. // Note: it's possible that an active tunnel in excludeServerEntries will
  489. // fail during this iteration of server entries and in that case the
  490. // cooresponding server will not be retried (within the same iteration).
  491. // TODO: is there also a race that can result in multiple tunnels to the same
  492. // server? (if there is, registerTunnel will reject the duplicate instance.)
  493. excludeServerEntries := controller.getActiveTunnelServerEntries()
  494. iterator, err := NewServerEntryIterator(
  495. controller.config.EgressRegion, controller.config.TunnelProtocol, excludeServerEntries)
  496. if err != nil {
  497. Notice(NOTICE_ALERT, "failed to iterate over candidates: %s", err)
  498. controller.SignalFailure()
  499. break loop
  500. }
  501. for {
  502. serverEntry, err := iterator.Next()
  503. if err != nil {
  504. Notice(NOTICE_ALERT, "failed to get next candidate: %s", err)
  505. controller.SignalFailure()
  506. break loop
  507. }
  508. if serverEntry == nil {
  509. // Completed this iteration
  510. break
  511. }
  512. select {
  513. case controller.candidateServerEntries <- serverEntry:
  514. case <-controller.stopEstablishingBroadcast:
  515. break loop
  516. case <-controller.shutdownBroadcast:
  517. break loop
  518. }
  519. }
  520. iterator.Close()
  521. // After a complete iteration of candidate servers, pause before iterating again.
  522. // This helps avoid some busy wait loop conditions, and also allows some time for
  523. // network conditions to change.
  524. timeout := time.After(ESTABLISH_TUNNEL_PAUSE_PERIOD)
  525. select {
  526. case <-timeout:
  527. // Retry iterating
  528. case <-controller.stopEstablishingBroadcast:
  529. break loop
  530. case <-controller.shutdownBroadcast:
  531. break loop
  532. }
  533. }
  534. close(controller.candidateServerEntries)
  535. Notice(NOTICE_INFO, "stopped candidate generator")
  536. }
  537. // establishTunnelWorker pulls candidates from the candidate queue, establishes
  538. // a connection to the tunnel server, and delivers the established tunnel to a channel.
  539. func (controller *Controller) establishTunnelWorker() {
  540. defer controller.establishWaitGroup.Done()
  541. for serverEntry := range controller.candidateServerEntries {
  542. // Note: don't receive from candidateQueue and broadcastStopWorkers in the same
  543. // select, since we want to prioritize receiving the stop signal
  544. select {
  545. case <-controller.stopEstablishingBroadcast:
  546. return
  547. default:
  548. }
  549. tunnel, err := EstablishTunnel(
  550. controller.config, controller.pendingConns, serverEntry)
  551. if err != nil {
  552. // TODO: distingush case where conn is interrupted?
  553. Notice(NOTICE_INFO, "failed to connect to %s: %s", serverEntry.IpAddress, err)
  554. } else {
  555. // Don't block. Assumes the receiver has a buffer large enough for
  556. // the number of desired tunnels. If there's no room, the tunnel must
  557. // not be required so it's discarded.
  558. select {
  559. case controller.establishedTunnels <- tunnel:
  560. default:
  561. controller.discardTunnel(tunnel)
  562. }
  563. }
  564. }
  565. Notice(NOTICE_INFO, "stopped establish worker")
  566. }
  567. // RunForever executes the main loop of the Psiphon client. It launches
  568. // the controller with a shutdown that it never signaled.
  569. func RunForever(config *Config) {
  570. if config.LogFilename != "" {
  571. logFile, err := os.OpenFile(config.LogFilename, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
  572. if err != nil {
  573. Fatal("error opening log file: %s", err)
  574. }
  575. defer logFile.Close()
  576. log.SetOutput(logFile)
  577. }
  578. Notice(NOTICE_VERSION, VERSION)
  579. controller := NewController(config)
  580. shutdownBroadcast := make(chan struct{})
  581. controller.Run(shutdownBroadcast)
  582. }