controller.go 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594
  1. /*
  2. * Copyright (c) 2014, Psiphon Inc.
  3. * All rights reserved.
  4. *
  5. * This program is free software: you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation, either version 3 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. *
  18. */
  19. // Package psiphon implements the core tunnel functionality of a Psiphon client.
  20. // The main function is RunForever, which runs a Controller that obtains lists of
  21. // servers, establishes tunnel connections, and runs local proxies through which
  22. // tunneled traffic may be sent.
  23. package psiphon
  24. import (
  25. "errors"
  26. "fmt"
  27. "log"
  28. "net"
  29. "os"
  30. "sync"
  31. "time"
  32. )
  33. // Controller is a tunnel lifecycle coordinator. It manages lists of servers to
  34. // connect to; establishes and monitors tunnels; and runs local proxies which
  35. // route traffic through the tunnels.
  36. type Controller struct {
  37. config *Config
  38. failureSignal chan struct{}
  39. shutdownBroadcast chan struct{}
  40. runWaitGroup *sync.WaitGroup
  41. establishedTunnels chan *Tunnel
  42. failedTunnels chan *Tunnel
  43. tunnelMutex sync.Mutex
  44. tunnels []*Tunnel
  45. nextTunnel int
  46. operateWaitGroup *sync.WaitGroup
  47. isEstablishing bool
  48. establishWaitGroup *sync.WaitGroup
  49. stopEstablishingBroadcast chan struct{}
  50. candidateServerEntries chan *ServerEntry
  51. pendingConns *Conns
  52. }
  53. // NewController initializes a new controller.
  54. func NewController(config *Config) (controller *Controller) {
  55. return &Controller{
  56. config: config,
  57. // failureSignal receives a signal from a component (including socks and
  58. // http local proxies) if they unexpectedly fail. Senders should not block.
  59. // A buffer allows at least one stop signal to be sent before there is a receiver.
  60. failureSignal: make(chan struct{}, 1),
  61. shutdownBroadcast: make(chan struct{}),
  62. runWaitGroup: new(sync.WaitGroup),
  63. // establishedTunnels and failedTunnels buffer sizes are large enough to
  64. // receive full pools of tunnels without blocking. Senders should not block.
  65. establishedTunnels: make(chan *Tunnel, config.TunnelPoolSize),
  66. failedTunnels: make(chan *Tunnel, config.TunnelPoolSize),
  67. tunnels: make([]*Tunnel, 0),
  68. operateWaitGroup: new(sync.WaitGroup),
  69. isEstablishing: false,
  70. establishWaitGroup: new(sync.WaitGroup),
  71. stopEstablishingBroadcast: make(chan struct{}),
  72. candidateServerEntries: make(chan *ServerEntry),
  73. pendingConns: new(Conns),
  74. }
  75. }
  76. // Run executes the controller. It launches components and then monitors
  77. // for a shutdown signal; after receiving the signal it shuts down the
  78. // controller.
  79. // The components include:
  80. // - the periodic remote server list fetcher
  81. // - the tunnel manager
  82. // - a local SOCKS proxy that port forwards through the pool of tunnels
  83. // - a local HTTP proxy that port forwards through the pool of tunnels
  84. func (controller *Controller) Run(shutdownBroadcast <-chan struct{}) {
  85. socksProxy, err := NewSocksProxy(controller)
  86. if err != nil {
  87. Notice(NOTICE_ALERT, "error initializing local SOCKS proxy: %s", err)
  88. return
  89. }
  90. defer socksProxy.Close()
  91. httpProxy, err := NewHttpProxy(controller)
  92. if err != nil {
  93. Notice(NOTICE_ALERT, "error initializing local SOCKS proxy: %s", err)
  94. return
  95. }
  96. defer httpProxy.Close()
  97. controller.runWaitGroup.Add(2)
  98. go controller.remoteServerListFetcher()
  99. go controller.runTunnels()
  100. select {
  101. case <-shutdownBroadcast:
  102. Notice(NOTICE_INFO, "controller shutdown by request")
  103. case <-controller.failureSignal:
  104. Notice(NOTICE_ALERT, "controller shutdown due to failure")
  105. }
  106. close(controller.shutdownBroadcast)
  107. controller.runWaitGroup.Wait()
  108. Notice(NOTICE_INFO, "exiting controller")
  109. }
  110. // SignalFailure notifies the controller than a component has failed.
  111. // This will terminate the controller.
  112. func (controller *Controller) SignalFailure() {
  113. select {
  114. case controller.failureSignal <- *new(struct{}):
  115. default:
  116. }
  117. }
  118. // remoteServerListFetcher fetches an out-of-band list of server entries
  119. // for more tunnel candidates. It fetches immediately, retries after failure
  120. // with a wait period, and refetches after success with a longer wait period.
  121. func (controller *Controller) remoteServerListFetcher() {
  122. defer controller.runWaitGroup.Done()
  123. // Note: unlike existing Psiphon clients, this code
  124. // always makes the fetch remote server list request
  125. loop:
  126. for {
  127. // TODO: FetchRemoteServerList should abort immediately on shutdownBroadcast
  128. err := FetchRemoteServerList(controller.config)
  129. var duration time.Duration
  130. if err != nil {
  131. Notice(NOTICE_ALERT, "failed to fetch remote server list: %s", err)
  132. duration = FETCH_REMOTE_SERVER_LIST_RETRY_TIMEOUT
  133. } else {
  134. duration = FETCH_REMOTE_SERVER_LIST_STALE_TIMEOUT
  135. }
  136. timeout := time.After(duration)
  137. select {
  138. case <-timeout:
  139. // Fetch again
  140. case <-controller.shutdownBroadcast:
  141. break loop
  142. }
  143. }
  144. Notice(NOTICE_INFO, "exiting remote server list fetcher")
  145. }
  146. // runTunnels is the controller tunnel management main loop. It starts and stops
  147. // establishing tunnels based on the target tunnel pool size and the current size
  148. // of the pool. Tunnels are established asynchronously using worker goroutines.
  149. // When a tunnel is established, it's added to the active pool and a corresponding
  150. // operateTunnel goroutine is launched which starts a session in the tunnel and
  151. // monitors the tunnel for failures.
  152. // When a tunnel fails, it's removed from the pool and the establish process is
  153. // restarted to fill the pool.
  154. func (controller *Controller) runTunnels() {
  155. defer controller.runWaitGroup.Done()
  156. // Don't start establishing until there are some server candidates. The
  157. // typical case is a client with no server entries which will wait for
  158. // the first successful FetchRemoteServerList to populate the data store.
  159. for {
  160. if HasServerEntries(
  161. controller.config.EgressRegion, controller.config.TunnelProtocol) {
  162. break
  163. }
  164. // TODO: replace polling with signal
  165. timeout := time.After(1 * time.Second)
  166. select {
  167. case <-timeout:
  168. case <-controller.shutdownBroadcast:
  169. return
  170. }
  171. }
  172. controller.startEstablishing()
  173. loop:
  174. for {
  175. select {
  176. case failedTunnel := <-controller.failedTunnels:
  177. Notice(NOTICE_ALERT, "tunnel failed: %s", failedTunnel.serverEntry.IpAddress)
  178. controller.terminateTunnel(failedTunnel)
  179. // Note: only this goroutine may call startEstablishing/stopEstablishing and access
  180. // isEstablishing.
  181. if !controller.isEstablishing {
  182. controller.startEstablishing()
  183. }
  184. // !TODO! design issue: might not be enough server entries with region/caps to ever fill tunnel slots
  185. // solution(?) target MIN(CountServerEntries(region, protocol), TunnelPoolSize)
  186. case establishedTunnel := <-controller.establishedTunnels:
  187. Notice(NOTICE_INFO, "established tunnel: %s", establishedTunnel.serverEntry.IpAddress)
  188. // !TODO! design issue: activateTunnel makes tunnel avail for port forward *before* operates does handshake
  189. // solution(?) distinguish between two stages or states: connected, and then active.
  190. if !controller.activateTunnel(establishedTunnel) {
  191. controller.discardTunnel(establishedTunnel)
  192. controller.stopEstablishing()
  193. } else {
  194. Notice(NOTICE_INFO, "active tunnel: %s", establishedTunnel.serverEntry.IpAddress)
  195. controller.operateWaitGroup.Add(1)
  196. go controller.operateTunnel(establishedTunnel)
  197. }
  198. case <-controller.shutdownBroadcast:
  199. break loop
  200. }
  201. }
  202. controller.stopEstablishing()
  203. controller.terminateAllTunnels()
  204. controller.operateWaitGroup.Wait()
  205. // Drain tunnel channels
  206. close(controller.establishedTunnels)
  207. for tunnel := range controller.establishedTunnels {
  208. controller.discardTunnel(tunnel)
  209. }
  210. close(controller.failedTunnels)
  211. for tunnel := range controller.failedTunnels {
  212. controller.discardTunnel(tunnel)
  213. }
  214. Notice(NOTICE_INFO, "exiting run tunnels")
  215. }
  216. // discardTunnel disposes of a successful connection that is no longer required.
  217. func (controller *Controller) discardTunnel(tunnel *Tunnel) {
  218. Notice(NOTICE_INFO, "discard tunnel: %s", tunnel.serverEntry.IpAddress)
  219. // TODO: not calling PromoteServerEntry, since that would rank the
  220. // discarded tunnel before fully active tunnels. Can a discarded tunnel
  221. // be promoted (since it connects), but with lower rank than all active
  222. // tunnels?
  223. tunnel.Close()
  224. }
  225. // activateTunnel adds the connected tunnel to the pool of active tunnels
  226. // which are used for port forwarding. Returns true if the pool has an empty
  227. // slot and false if the pool is full (caller should discard the tunnel).
  228. func (controller *Controller) activateTunnel(tunnel *Tunnel) bool {
  229. controller.tunnelMutex.Lock()
  230. defer controller.tunnelMutex.Unlock()
  231. // !TODO! double check not already a tunnel to this server
  232. if len(controller.tunnels) >= controller.config.TunnelPoolSize {
  233. return false
  234. }
  235. controller.tunnels = append(controller.tunnels, tunnel)
  236. Notice(NOTICE_TUNNEL, "%d tunnels", len(controller.tunnels))
  237. return true
  238. }
  239. // terminateTunnel removes a tunnel from the pool of active tunnels
  240. // and closes the tunnel. The next-tunnel state used by getNextActiveTunnel
  241. // is adjusted as required.
  242. func (controller *Controller) terminateTunnel(tunnel *Tunnel) {
  243. controller.tunnelMutex.Lock()
  244. defer controller.tunnelMutex.Unlock()
  245. for index, activeTunnel := range controller.tunnels {
  246. if tunnel == activeTunnel {
  247. controller.tunnels = append(
  248. controller.tunnels[:index], controller.tunnels[index+1:]...)
  249. if controller.nextTunnel > index {
  250. controller.nextTunnel--
  251. }
  252. if controller.nextTunnel >= len(controller.tunnels) {
  253. controller.nextTunnel = 0
  254. }
  255. activeTunnel.Close()
  256. Notice(NOTICE_TUNNEL, "%d tunnels", len(controller.tunnels))
  257. break
  258. }
  259. }
  260. }
  261. // terminateAllTunnels empties the tunnel pool, closing all active tunnels.
  262. // This is used when shutting down the controller.
  263. func (controller *Controller) terminateAllTunnels() {
  264. controller.tunnelMutex.Lock()
  265. defer controller.tunnelMutex.Unlock()
  266. for _, activeTunnel := range controller.tunnels {
  267. activeTunnel.Close()
  268. }
  269. controller.tunnels = make([]*Tunnel, 0)
  270. controller.nextTunnel = 0
  271. Notice(NOTICE_TUNNEL, "%d tunnels", len(controller.tunnels))
  272. }
  273. // getNextActiveTunnel returns the next tunnel from the pool of active
  274. // tunnels. Currently, tunnel selection order is simple round-robin.
  275. func (controller *Controller) getNextActiveTunnel() (tunnel *Tunnel) {
  276. controller.tunnelMutex.Lock()
  277. defer controller.tunnelMutex.Unlock()
  278. if len(controller.tunnels) == 0 {
  279. return nil
  280. }
  281. tunnel = controller.tunnels[controller.nextTunnel]
  282. controller.nextTunnel =
  283. (controller.nextTunnel + 1) % len(controller.tunnels)
  284. return tunnel
  285. }
  286. // getActiveTunnelServerEntries lists the Server Entries for
  287. // all the active tunnels. This is used to exclude those servers
  288. // from the set of candidates to establish connections to.
  289. func (controller *Controller) getActiveTunnelServerEntries() (serverEntries []*ServerEntry) {
  290. controller.tunnelMutex.Lock()
  291. defer controller.tunnelMutex.Unlock()
  292. serverEntries = make([]*ServerEntry, 0)
  293. for _, activeTunnel := range controller.tunnels {
  294. serverEntries = append(serverEntries, activeTunnel.serverEntry)
  295. }
  296. return serverEntries
  297. }
  298. // operateTunnel starts a Psiphon session (handshake, etc.) on a newly
  299. // connected tunnel, and then monitors the tunnel for failures:
  300. //
  301. // 1. Overall tunnel failure: the tunnel sends a signal to the ClosedSignal
  302. // channel on keep-alive failure and other transport I/O errors. In case
  303. // of such a failure, the tunnel is marked as failed.
  304. //
  305. // 2. Tunnel port forward failures: the tunnel connection may stay up but
  306. // the client may still fail to establish port forwards due to server load
  307. // and other conditions. After a threshold number of such failures, the
  308. // overall tunnel is marked as failed.
  309. //
  310. // TODO: currently, any connect (dial), read, or write error associated with
  311. // a port forward is counted as a failure. It may be important to differentiate
  312. // between failures due to Psiphon server conditions and failures due to the
  313. // origin/target server (in the latter case, the tunnel is healthy). Here are
  314. // some typical error messages to consider matching against (or ignoring):
  315. //
  316. // - "ssh: rejected: administratively prohibited (open failed)"
  317. // - "ssh: rejected: connect failed (Connection timed out)"
  318. // - "write tcp ... broken pipe"
  319. // - "read tcp ... connection reset by peer"
  320. // - "ssh: unexpected packet in response to channel open: <nil>"
  321. //
  322. func (controller *Controller) operateTunnel(tunnel *Tunnel) {
  323. defer controller.operateWaitGroup.Done()
  324. tunnelClosedSignal := make(chan struct{}, 1)
  325. err := tunnel.conn.SetClosedSignal(tunnelClosedSignal)
  326. if err != nil {
  327. err = fmt.Errorf("failed to set closed signal: %s", err)
  328. }
  329. Notice(NOTICE_INFO, "starting session for %s", tunnel.serverEntry.IpAddress)
  330. // TODO: NewSession server API calls may block shutdown
  331. _, err = NewSession(controller.config, tunnel)
  332. if err != nil {
  333. err = fmt.Errorf("error starting session for %s: %s", tunnel.serverEntry.IpAddress, err)
  334. }
  335. // Promote this successful tunnel to first rank so it's one
  336. // of the first candidates next time establish runs.
  337. PromoteServerEntry(tunnel.serverEntry.IpAddress)
  338. for err == nil {
  339. select {
  340. case failures := <-tunnel.portForwardFailures:
  341. tunnel.portForwardFailureTotal += failures
  342. if tunnel.portForwardFailureTotal > controller.config.PortForwardFailureThreshold {
  343. err = errors.New("tunnel exceeded port forward failure threshold")
  344. }
  345. case <-tunnelClosedSignal:
  346. // TODO: this signal can be received during a commanded shutdown due to
  347. // how tunnels are closed; should rework this to avoid log noise.
  348. err = errors.New("tunnel closed unexpectedly")
  349. case <-controller.shutdownBroadcast:
  350. Notice(NOTICE_INFO, "shutdown operate tunnel")
  351. return
  352. }
  353. }
  354. if err != nil {
  355. Notice(NOTICE_ALERT, "operate tunnel error for %s: %s", tunnel.serverEntry.IpAddress, err)
  356. // Don't block. Assumes the receiver has a buffer large enough for
  357. // the typical number of operated tunnels. In case there's no room,
  358. // terminate the tunnel (runTunnels won't get a signal in this case).
  359. select {
  360. case controller.failedTunnels <- tunnel:
  361. default:
  362. controller.terminateTunnel(tunnel)
  363. }
  364. }
  365. }
  366. // TunneledConn implements net.Conn and wraps a port foward connection.
  367. // It is used to hook into Read and Write to observe I/O errors and
  368. // report these errors back to the tunnel monitor as port forward failures.
  369. type TunneledConn struct {
  370. net.Conn
  371. tunnel *Tunnel
  372. }
  373. func (conn *TunneledConn) Read(buffer []byte) (n int, err error) {
  374. n, err = conn.Conn.Read(buffer)
  375. if err != nil {
  376. // Report 1 new failure. Won't block; assumes the receiver
  377. // has a sufficient buffer for the threshold number of reports.
  378. // TODO: conditional on type of error or error message?
  379. select {
  380. case conn.tunnel.portForwardFailures <- 1:
  381. default:
  382. }
  383. }
  384. return
  385. }
  386. func (conn *TunneledConn) Write(buffer []byte) (n int, err error) {
  387. n, err = conn.Conn.Write(buffer)
  388. if err != nil {
  389. // Same as TunneledConn.Read()
  390. select {
  391. case conn.tunnel.portForwardFailures <- 1:
  392. default:
  393. }
  394. }
  395. return
  396. }
  397. // dialWithTunnel selects an active tunnel and establishes a port forward
  398. // connection through the selected tunnel. Failure to connect is considered
  399. // a port foward failure, for the purpose of monitoring tunnel health.
  400. func (controller *Controller) dialWithTunnel(remoteAddr string) (conn net.Conn, err error) {
  401. tunnel := controller.getNextActiveTunnel()
  402. if tunnel == nil {
  403. return nil, ContextError(errors.New("no active tunnels"))
  404. }
  405. sshPortForward, err := tunnel.sshClient.Dial("tcp", remoteAddr)
  406. if err != nil {
  407. // TODO: conditional on type of error or error message?
  408. select {
  409. case tunnel.portForwardFailures <- 1:
  410. default:
  411. }
  412. return nil, ContextError(err)
  413. }
  414. return &TunneledConn{Conn: sshPortForward}, nil
  415. }
  416. // startEstablishing creates a pool of worker goroutines which will
  417. // attempt to establish tunnels to candidate servers. The candidates
  418. // are generated by another goroutine.
  419. func (controller *Controller) startEstablishing() {
  420. if controller.isEstablishing {
  421. return
  422. }
  423. Notice(NOTICE_INFO, "start establishing")
  424. controller.isEstablishing = true
  425. controller.establishWaitGroup = new(sync.WaitGroup)
  426. controller.stopEstablishingBroadcast = make(chan struct{})
  427. controller.candidateServerEntries = make(chan *ServerEntry)
  428. for i := 0; i < controller.config.ConnectionWorkerPoolSize; i++ {
  429. controller.establishWaitGroup.Add(1)
  430. go controller.establishTunnelWorker()
  431. }
  432. controller.establishWaitGroup.Add(1)
  433. go controller.establishCandidateGenerator()
  434. }
  435. // stopEstablishing signals the establish goroutines to stop and waits
  436. // for the group to halt. pendingConns is used to interrupt any worker
  437. // blocked on a socket connect.
  438. func (controller *Controller) stopEstablishing() {
  439. Notice(NOTICE_INFO, "stop establishing")
  440. // Note: on Windows, interruptibleTCPClose doesn't really interrupt socket connects
  441. // and may leave goroutines running for a time after the Wait call.
  442. controller.pendingConns.CloseAll()
  443. close(controller.stopEstablishingBroadcast)
  444. controller.establishWaitGroup.Wait()
  445. controller.isEstablishing = false
  446. controller.establishWaitGroup = nil
  447. controller.stopEstablishingBroadcast = nil
  448. controller.candidateServerEntries = nil
  449. }
  450. // establishCandidateGenerator populates the candidate queue with server entries
  451. // from the data store. Server entries are iterated in rank order, so that promoted
  452. // servers with higher rank are priority candidates.
  453. func (controller *Controller) establishCandidateGenerator() {
  454. defer controller.establishWaitGroup.Done()
  455. loop:
  456. for {
  457. //** note race condition (exclude will exclude servers that fail while running establish)
  458. //** also a race that can result in dup tunnels?
  459. excludeServerEntries := controller.getActiveTunnelServerEntries()
  460. iterator, err := NewServerEntryIterator(
  461. controller.config.EgressRegion, controller.config.TunnelProtocol, excludeServerEntries)
  462. if err != nil {
  463. Notice(NOTICE_ALERT, "failed to iterate over candidates: %s", err)
  464. controller.SignalFailure()
  465. break loop
  466. }
  467. for {
  468. serverEntry, err := iterator.Next()
  469. if err != nil {
  470. Notice(NOTICE_ALERT, "failed to get next candidate: %s", err)
  471. controller.SignalFailure()
  472. break loop
  473. }
  474. if serverEntry == nil {
  475. // Completed this iteration
  476. break
  477. }
  478. select {
  479. case controller.candidateServerEntries <- serverEntry:
  480. case <-controller.stopEstablishingBroadcast:
  481. break loop
  482. case <-controller.shutdownBroadcast:
  483. break loop
  484. }
  485. }
  486. iterator.Close()
  487. // After a complete iteration of candidate servers, pause before iterating again.
  488. // This helps avoid some busy wait loop conditions, and also allows some time for
  489. // network conditions to change.
  490. timeout := time.After(ESTABLISH_TUNNEL_PAUSE_PERIOD)
  491. select {
  492. case <-timeout:
  493. // Retry iterating
  494. case <-controller.stopEstablishingBroadcast:
  495. break loop
  496. case <-controller.shutdownBroadcast:
  497. break loop
  498. }
  499. }
  500. Notice(NOTICE_INFO, "stopped candidate generator")
  501. }
  502. // establishTunnelWorker pulls candidates from the candidate queue, establishes
  503. // a connection to the tunnel server, and delivers the established tunnel to a channel.
  504. func (controller *Controller) establishTunnelWorker() {
  505. defer controller.establishWaitGroup.Done()
  506. for serverEntry := range controller.candidateServerEntries {
  507. // Note: don't receive from candidateQueue and broadcastStopWorkers in the same
  508. // select, since we want to prioritize receiving the stop signal
  509. select {
  510. case <-controller.stopEstablishingBroadcast:
  511. return
  512. default:
  513. }
  514. tunnel, err := EstablishTunnel(controller, serverEntry)
  515. if err != nil {
  516. // TODO: distingush case where conn is interrupted?
  517. Notice(NOTICE_INFO, "failed to connect to %s: %s", serverEntry.IpAddress, err)
  518. } else {
  519. // Don't block. Assumes the receiver has a buffer large enough for
  520. // the number of desired tunnels. If there's no room, the tunnel must
  521. // not be required so it's discarded.
  522. select {
  523. case controller.establishedTunnels <- tunnel:
  524. default:
  525. controller.discardTunnel(tunnel)
  526. }
  527. }
  528. }
  529. Notice(NOTICE_INFO, "stopped establish worker")
  530. }
  531. // RunForever executes the main loop of the Psiphon client. It launches
  532. // the controller with a shutdown that it never signaled.
  533. func RunForever(config *Config) {
  534. if config.LogFilename != "" {
  535. logFile, err := os.OpenFile(config.LogFilename, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
  536. if err != nil {
  537. Fatal("error opening log file: %s", err)
  538. }
  539. defer logFile.Close()
  540. log.SetOutput(logFile)
  541. }
  542. Notice(NOTICE_VERSION, VERSION)
  543. controller := NewController(config)
  544. shutdownBroadcast := make(chan struct{})
  545. controller.Run(shutdownBroadcast)
  546. }