controller.go 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557
  1. /*
  2. * Copyright (c) 2015, Psiphon Inc.
  3. * All rights reserved.
  4. *
  5. * This program is free software: you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation, either version 3 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. *
  18. */
  19. // Package psiphon implements the core tunnel functionality of a Psiphon client.
  20. // The main function is RunForever, which runs a Controller that obtains lists of
  21. // servers, establishes tunnel connections, and runs local proxies through which
  22. // tunneled traffic may be sent.
  23. package psiphon
  24. import (
  25. "errors"
  26. "io"
  27. "net"
  28. "sync"
  29. "time"
  30. )
  31. // Controller is a tunnel lifecycle coordinator. It manages lists of servers to
  32. // connect to; establishes and monitors tunnels; and runs local proxies which
  33. // route traffic through the tunnels.
  34. type Controller struct {
  35. config *Config
  36. componentFailureSignal chan struct{}
  37. shutdownBroadcast chan struct{}
  38. runWaitGroup *sync.WaitGroup
  39. establishedTunnels chan *Tunnel
  40. failedTunnels chan *Tunnel
  41. tunnelMutex sync.Mutex
  42. tunnels []*Tunnel
  43. nextTunnel int
  44. isEstablishing bool
  45. establishWaitGroup *sync.WaitGroup
  46. stopEstablishingBroadcast chan struct{}
  47. candidateServerEntries chan *ServerEntry
  48. pendingConns *Conns
  49. }
  50. // NewController initializes a new controller.
  51. func NewController(config *Config) (controller *Controller) {
  52. return &Controller{
  53. config: config,
  54. // componentFailureSignal receives a signal from a component (including socks and
  55. // http local proxies) if they unexpectedly fail. Senders should not block.
  56. // A buffer allows at least one stop signal to be sent before there is a receiver.
  57. componentFailureSignal: make(chan struct{}, 1),
  58. shutdownBroadcast: make(chan struct{}),
  59. runWaitGroup: new(sync.WaitGroup),
  60. // establishedTunnels and failedTunnels buffer sizes are large enough to
  61. // receive full pools of tunnels without blocking. Senders should not block.
  62. establishedTunnels: make(chan *Tunnel, config.TunnelPoolSize),
  63. failedTunnels: make(chan *Tunnel, config.TunnelPoolSize),
  64. tunnels: make([]*Tunnel, 0),
  65. isEstablishing: false,
  66. pendingConns: new(Conns),
  67. }
  68. }
  69. // Run executes the controller. It launches components and then monitors
  70. // for a shutdown signal; after receiving the signal it shuts down the
  71. // controller.
  72. // The components include:
  73. // - the periodic remote server list fetcher
  74. // - the tunnel manager
  75. // - a local SOCKS proxy that port forwards through the pool of tunnels
  76. // - a local HTTP proxy that port forwards through the pool of tunnels
  77. func (controller *Controller) Run(shutdownBroadcast <-chan struct{}) {
  78. Notice(NOTICE_VERSION, VERSION)
  79. socksProxy, err := NewSocksProxy(controller.config, controller)
  80. if err != nil {
  81. Notice(NOTICE_ALERT, "error initializing local SOCKS proxy: %s", err)
  82. return
  83. }
  84. defer socksProxy.Close()
  85. httpProxy, err := NewHttpProxy(controller.config, controller)
  86. if err != nil {
  87. Notice(NOTICE_ALERT, "error initializing local SOCKS proxy: %s", err)
  88. return
  89. }
  90. defer httpProxy.Close()
  91. controller.runWaitGroup.Add(2)
  92. go controller.remoteServerListFetcher()
  93. go controller.runTunnels()
  94. select {
  95. case <-shutdownBroadcast:
  96. Notice(NOTICE_INFO, "controller shutdown by request")
  97. case <-controller.componentFailureSignal:
  98. Notice(NOTICE_ALERT, "controller shutdown due to component failure")
  99. }
  100. // Note: in addition to establish(), this pendingConns will interrupt
  101. // FetchRemoteServerList
  102. controller.pendingConns.CloseAll()
  103. close(controller.shutdownBroadcast)
  104. controller.runWaitGroup.Wait()
  105. Notice(NOTICE_INFO, "exiting controller")
  106. }
  107. // SignalComponentFailure notifies the controller that an associated component has failed.
  108. // This will terminate the controller.
  109. func (controller *Controller) SignalComponentFailure() {
  110. select {
  111. case controller.componentFailureSignal <- *new(struct{}):
  112. default:
  113. }
  114. }
  115. // remoteServerListFetcher fetches an out-of-band list of server entries
  116. // for more tunnel candidates. It fetches immediately, retries after failure
  117. // with a wait period, and refetches after success with a longer wait period.
  118. func (controller *Controller) remoteServerListFetcher() {
  119. defer controller.runWaitGroup.Done()
  120. // Note: unlike existing Psiphon clients, this code
  121. // always makes the fetch remote server list request
  122. loop:
  123. for {
  124. // TODO: FetchRemoteServerList should have its own pendingConns,
  125. // otherwise it may needlessly abort when establish is stopped.
  126. err := FetchRemoteServerList(controller.config, controller.pendingConns)
  127. var duration time.Duration
  128. if err != nil {
  129. Notice(NOTICE_ALERT, "failed to fetch remote server list: %s", err)
  130. duration = FETCH_REMOTE_SERVER_LIST_RETRY_TIMEOUT
  131. } else {
  132. duration = FETCH_REMOTE_SERVER_LIST_STALE_TIMEOUT
  133. }
  134. timeout := time.After(duration)
  135. select {
  136. case <-timeout:
  137. // Fetch again
  138. case <-controller.shutdownBroadcast:
  139. break loop
  140. }
  141. }
  142. Notice(NOTICE_INFO, "exiting remote server list fetcher")
  143. }
  144. // runTunnels is the controller tunnel management main loop. It starts and stops
  145. // establishing tunnels based on the target tunnel pool size and the current size
  146. // of the pool. Tunnels are established asynchronously using worker goroutines.
  147. // When a tunnel is established, it's added to the active pool. The tunnel's
  148. // operateTunnel goroutine monitors the tunnel.
  149. // When a tunnel fails, it's removed from the pool and the establish process is
  150. // restarted to fill the pool.
  151. func (controller *Controller) runTunnels() {
  152. defer controller.runWaitGroup.Done()
  153. // Don't start establishing until there are some server candidates. The
  154. // typical case is a client with no server entries which will wait for
  155. // the first successful FetchRemoteServerList to populate the data store.
  156. for {
  157. if HasServerEntries(
  158. controller.config.EgressRegion, controller.config.TunnelProtocol) {
  159. break
  160. }
  161. // TODO: replace polling with signal
  162. timeout := time.After(5 * time.Second)
  163. select {
  164. case <-timeout:
  165. case <-controller.shutdownBroadcast:
  166. return
  167. }
  168. }
  169. controller.startEstablishing()
  170. loop:
  171. for {
  172. select {
  173. case failedTunnel := <-controller.failedTunnels:
  174. Notice(NOTICE_ALERT, "tunnel failed: %s", failedTunnel.serverEntry.IpAddress)
  175. controller.terminateTunnel(failedTunnel)
  176. // Note: only this goroutine may call startEstablishing/stopEstablishing and access
  177. // isEstablishing.
  178. if !controller.isEstablishing {
  179. controller.startEstablishing()
  180. }
  181. // !TODO! design issue: might not be enough server entries with region/caps to ever fill tunnel slots
  182. // solution(?) target MIN(CountServerEntries(region, protocol), TunnelPoolSize)
  183. case establishedTunnel := <-controller.establishedTunnels:
  184. Notice(NOTICE_INFO, "established tunnel: %s", establishedTunnel.serverEntry.IpAddress)
  185. if controller.registerTunnel(establishedTunnel) {
  186. Notice(NOTICE_INFO, "active tunnel: %s", establishedTunnel.serverEntry.IpAddress)
  187. } else {
  188. controller.discardTunnel(establishedTunnel)
  189. }
  190. if controller.isFullyEstablished() {
  191. controller.stopEstablishing()
  192. }
  193. case <-controller.shutdownBroadcast:
  194. break loop
  195. }
  196. }
  197. controller.stopEstablishing()
  198. controller.terminateAllTunnels()
  199. // Drain tunnel channels
  200. close(controller.establishedTunnels)
  201. for tunnel := range controller.establishedTunnels {
  202. controller.discardTunnel(tunnel)
  203. }
  204. close(controller.failedTunnels)
  205. for tunnel := range controller.failedTunnels {
  206. controller.discardTunnel(tunnel)
  207. }
  208. Notice(NOTICE_INFO, "exiting run tunnels")
  209. }
  210. // HandleFailedTunnel implements the TunnelOwner interface. This function
  211. // is called by Tunnel.operateTunnel when the tunnel has detected that it
  212. // has failed. The Controller will signal runTunnels to create a new
  213. // tunnel and/or remove the tunnel from the list of active tunnels.
  214. func (controller *Controller) SignalTunnelFailure(tunnel *Tunnel) {
  215. // Don't block. Assumes the receiver has a buffer large enough for
  216. // the typical number of operated tunnels. In case there's no room,
  217. // terminate the tunnel (runTunnels won't get a signal in this case,
  218. // but the tunnel will be removed from the list of active tunnels).
  219. select {
  220. case controller.failedTunnels <- tunnel:
  221. default:
  222. controller.terminateTunnel(tunnel)
  223. }
  224. }
  225. // discardTunnel disposes of a successful connection that is no longer required.
  226. func (controller *Controller) discardTunnel(tunnel *Tunnel) {
  227. Notice(NOTICE_INFO, "discard tunnel: %s", tunnel.serverEntry.IpAddress)
  228. // TODO: not calling PromoteServerEntry, since that would rank the
  229. // discarded tunnel before fully active tunnels. Can a discarded tunnel
  230. // be promoted (since it connects), but with lower rank than all active
  231. // tunnels?
  232. tunnel.Close()
  233. }
  234. // registerTunnel adds the connected tunnel to the pool of active tunnels
  235. // which are candidates for port forwarding. Returns true if the pool has an
  236. // empty slot and false if the pool is full (caller should discard the tunnel).
  237. func (controller *Controller) registerTunnel(tunnel *Tunnel) bool {
  238. controller.tunnelMutex.Lock()
  239. defer controller.tunnelMutex.Unlock()
  240. if len(controller.tunnels) >= controller.config.TunnelPoolSize {
  241. return false
  242. }
  243. // Perform a final check just in case we've established
  244. // a duplicate connection.
  245. for _, activeTunnel := range controller.tunnels {
  246. if activeTunnel.serverEntry.IpAddress == tunnel.serverEntry.IpAddress {
  247. Notice(NOTICE_ALERT, "duplicate tunnel: %s", tunnel.serverEntry.IpAddress)
  248. return false
  249. }
  250. }
  251. controller.tunnels = append(controller.tunnels, tunnel)
  252. Notice(NOTICE_TUNNELS, "%d", len(controller.tunnels))
  253. return true
  254. }
  255. // isFullyEstablished indicates if the pool of active tunnels is full.
  256. func (controller *Controller) isFullyEstablished() bool {
  257. controller.tunnelMutex.Lock()
  258. defer controller.tunnelMutex.Unlock()
  259. return len(controller.tunnels) >= controller.config.TunnelPoolSize
  260. }
  261. // terminateTunnel removes a tunnel from the pool of active tunnels
  262. // and closes the tunnel. The next-tunnel state used by getNextActiveTunnel
  263. // is adjusted as required.
  264. func (controller *Controller) terminateTunnel(tunnel *Tunnel) {
  265. controller.tunnelMutex.Lock()
  266. defer controller.tunnelMutex.Unlock()
  267. for index, activeTunnel := range controller.tunnels {
  268. if tunnel == activeTunnel {
  269. controller.tunnels = append(
  270. controller.tunnels[:index], controller.tunnels[index+1:]...)
  271. if controller.nextTunnel > index {
  272. controller.nextTunnel--
  273. }
  274. if controller.nextTunnel >= len(controller.tunnels) {
  275. controller.nextTunnel = 0
  276. }
  277. activeTunnel.Close()
  278. Notice(NOTICE_TUNNELS, "%d", len(controller.tunnels))
  279. break
  280. }
  281. }
  282. }
  283. // terminateAllTunnels empties the tunnel pool, closing all active tunnels.
  284. // This is used when shutting down the controller.
  285. func (controller *Controller) terminateAllTunnels() {
  286. controller.tunnelMutex.Lock()
  287. defer controller.tunnelMutex.Unlock()
  288. for _, activeTunnel := range controller.tunnels {
  289. activeTunnel.Close()
  290. }
  291. controller.tunnels = make([]*Tunnel, 0)
  292. controller.nextTunnel = 0
  293. Notice(NOTICE_TUNNELS, "%d", len(controller.tunnels))
  294. }
  295. // getNextActiveTunnel returns the next tunnel from the pool of active
  296. // tunnels. Currently, tunnel selection order is simple round-robin.
  297. func (controller *Controller) getNextActiveTunnel() (tunnel *Tunnel) {
  298. controller.tunnelMutex.Lock()
  299. defer controller.tunnelMutex.Unlock()
  300. for i := len(controller.tunnels); i > 0; i-- {
  301. tunnel = controller.tunnels[controller.nextTunnel]
  302. controller.nextTunnel =
  303. (controller.nextTunnel + 1) % len(controller.tunnels)
  304. return tunnel
  305. }
  306. return nil
  307. }
  308. // isActiveTunnelServerEntries is used to check if there's already
  309. // an existing tunnel to a candidate server.
  310. func (controller *Controller) isActiveTunnelServerEntry(serverEntry *ServerEntry) bool {
  311. controller.tunnelMutex.Lock()
  312. defer controller.tunnelMutex.Unlock()
  313. for _, activeTunnel := range controller.tunnels {
  314. if activeTunnel.serverEntry.IpAddress == serverEntry.IpAddress {
  315. return true
  316. }
  317. }
  318. return false
  319. }
  320. // TunneledConn implements net.Conn and wraps a port foward connection.
  321. // It is used to hook into Read and Write to observe I/O errors and
  322. // report these errors back to the tunnel monitor as port forward failures.
  323. type TunneledConn struct {
  324. net.Conn
  325. tunnel *Tunnel
  326. }
  327. func (conn *TunneledConn) Read(buffer []byte) (n int, err error) {
  328. n, err = conn.Conn.Read(buffer)
  329. if err != nil && err != io.EOF {
  330. // Report 1 new failure. Won't block; assumes the receiver
  331. // has a sufficient buffer for the threshold number of reports.
  332. // TODO: conditional on type of error or error message?
  333. select {
  334. case conn.tunnel.portForwardFailures <- 1:
  335. default:
  336. }
  337. }
  338. return
  339. }
  340. func (conn *TunneledConn) Write(buffer []byte) (n int, err error) {
  341. n, err = conn.Conn.Write(buffer)
  342. if err != nil && err != io.EOF {
  343. // Same as TunneledConn.Read()
  344. select {
  345. case conn.tunnel.portForwardFailures <- 1:
  346. default:
  347. }
  348. }
  349. return
  350. }
  351. // Dial selects an active tunnel and establishes a port forward
  352. // connection through the selected tunnel. Failure to connect is considered
  353. // a port foward failure, for the purpose of monitoring tunnel health.
  354. func (controller *Controller) Dial(remoteAddr string) (conn net.Conn, err error) {
  355. tunnel := controller.getNextActiveTunnel()
  356. if tunnel == nil {
  357. return nil, ContextError(errors.New("no active tunnels"))
  358. }
  359. tunnelConn, err := tunnel.Dial(remoteAddr)
  360. if err != nil {
  361. // TODO: conditional on type of error or error message?
  362. select {
  363. case tunnel.portForwardFailures <- 1:
  364. default:
  365. }
  366. return nil, ContextError(err)
  367. }
  368. statsConn := NewStatsConn(
  369. tunnelConn, tunnel.session.StatsServerID(), tunnel.session.StatsRegexps())
  370. conn = &TunneledConn{
  371. Conn: statsConn,
  372. tunnel: tunnel}
  373. return
  374. }
  375. // startEstablishing creates a pool of worker goroutines which will
  376. // attempt to establish tunnels to candidate servers. The candidates
  377. // are generated by another goroutine.
  378. func (controller *Controller) startEstablishing() {
  379. if controller.isEstablishing {
  380. return
  381. }
  382. Notice(NOTICE_INFO, "start establishing")
  383. controller.isEstablishing = true
  384. controller.establishWaitGroup = new(sync.WaitGroup)
  385. controller.stopEstablishingBroadcast = make(chan struct{})
  386. controller.candidateServerEntries = make(chan *ServerEntry)
  387. for i := 0; i < controller.config.ConnectionWorkerPoolSize; i++ {
  388. controller.establishWaitGroup.Add(1)
  389. go controller.establishTunnelWorker()
  390. }
  391. controller.establishWaitGroup.Add(1)
  392. go controller.establishCandidateGenerator()
  393. }
  394. // stopEstablishing signals the establish goroutines to stop and waits
  395. // for the group to halt. pendingConns is used to interrupt any worker
  396. // blocked on a socket connect.
  397. func (controller *Controller) stopEstablishing() {
  398. if !controller.isEstablishing {
  399. return
  400. }
  401. Notice(NOTICE_INFO, "stop establishing")
  402. close(controller.stopEstablishingBroadcast)
  403. // Note: on Windows, interruptibleTCPClose doesn't really interrupt socket connects
  404. // and may leave goroutines running for a time after the Wait call.
  405. controller.pendingConns.CloseAll()
  406. // Note: establishCandidateGenerator closes controller.candidateServerEntries
  407. // (as it may be sending to that channel).
  408. controller.establishWaitGroup.Wait()
  409. controller.isEstablishing = false
  410. controller.establishWaitGroup = nil
  411. controller.stopEstablishingBroadcast = nil
  412. controller.candidateServerEntries = nil
  413. }
  414. // establishCandidateGenerator populates the candidate queue with server entries
  415. // from the data store. Server entries are iterated in rank order, so that promoted
  416. // servers with higher rank are priority candidates.
  417. func (controller *Controller) establishCandidateGenerator() {
  418. defer controller.establishWaitGroup.Done()
  419. iterator, err := NewServerEntryIterator(
  420. controller.config.EgressRegion, controller.config.TunnelProtocol)
  421. if err != nil {
  422. Notice(NOTICE_ALERT, "failed to iterate over candidates: %s", err)
  423. controller.SignalComponentFailure()
  424. return
  425. }
  426. defer iterator.Close()
  427. loop:
  428. for {
  429. for {
  430. serverEntry, err := iterator.Next()
  431. if err != nil {
  432. Notice(NOTICE_ALERT, "failed to get next candidate: %s", err)
  433. controller.SignalComponentFailure()
  434. break loop
  435. }
  436. if serverEntry == nil {
  437. // Completed this iteration
  438. break
  439. }
  440. select {
  441. case controller.candidateServerEntries <- serverEntry:
  442. case <-controller.stopEstablishingBroadcast:
  443. break loop
  444. case <-controller.shutdownBroadcast:
  445. break loop
  446. }
  447. }
  448. iterator.Reset()
  449. // After a complete iteration of candidate servers, pause before iterating again.
  450. // This helps avoid some busy wait loop conditions, and also allows some time for
  451. // network conditions to change.
  452. timeout := time.After(ESTABLISH_TUNNEL_PAUSE_PERIOD)
  453. select {
  454. case <-timeout:
  455. // Retry iterating
  456. case <-controller.stopEstablishingBroadcast:
  457. break loop
  458. case <-controller.shutdownBroadcast:
  459. break loop
  460. }
  461. }
  462. close(controller.candidateServerEntries)
  463. Notice(NOTICE_INFO, "stopped candidate generator")
  464. }
  465. // establishTunnelWorker pulls candidates from the candidate queue, establishes
  466. // a connection to the tunnel server, and delivers the established tunnel to a channel.
  467. func (controller *Controller) establishTunnelWorker() {
  468. defer controller.establishWaitGroup.Done()
  469. loop:
  470. for serverEntry := range controller.candidateServerEntries {
  471. // Note: don't receive from candidateQueue and broadcastStopWorkers in the same
  472. // select, since we want to prioritize receiving the stop signal
  473. select {
  474. case <-controller.stopEstablishingBroadcast:
  475. break loop
  476. default:
  477. }
  478. // There may already be a tunnel to this candidate. If so, skip it.
  479. if controller.isActiveTunnelServerEntry(serverEntry) {
  480. continue
  481. }
  482. tunnel, err := EstablishTunnel(
  483. controller.config,
  484. controller.pendingConns,
  485. serverEntry,
  486. controller) // TunnelOwner
  487. if err != nil {
  488. // Before emitting error, check if establish interrupted, in which
  489. // case the error is noise.
  490. select {
  491. case <-controller.stopEstablishingBroadcast:
  492. break loop
  493. default:
  494. }
  495. Notice(NOTICE_INFO, "failed to connect to %s: %s", serverEntry.IpAddress, err)
  496. continue
  497. }
  498. // Deliver established tunnel.
  499. // Don't block. Assumes the receiver has a buffer large enough for
  500. // the number of desired tunnels. If there's no room, the tunnel must
  501. // not be required so it's discarded.
  502. select {
  503. case controller.establishedTunnels <- tunnel:
  504. default:
  505. controller.discardTunnel(tunnel)
  506. }
  507. }
  508. Notice(NOTICE_INFO, "stopped establish worker")
  509. }