Browse Source

Update vendored Psiphon-Labs/quic-go

- Update to upstream quic-go v0.32
Rod Hynes 3 years ago
parent
commit
b5db6ee0b6
100 changed files with 9798 additions and 812 deletions
  1. 5 2
      go.mod
  2. 17 4
      go.sum
  3. 2 2
      vendor/github.com/Psiphon-Labs/quic-go/Changelog.md
  4. 15 14
      vendor/github.com/Psiphon-Labs/quic-go/README.md
  5. 1 0
      vendor/github.com/Psiphon-Labs/quic-go/config.go
  6. 0 4
      vendor/github.com/Psiphon-Labs/quic-go/conn_id_generator.go
  7. 149 88
      vendor/github.com/Psiphon-Labs/quic-go/connection.go
  8. 6 1
      vendor/github.com/Psiphon-Labs/quic-go/errors.go
  9. 8 1
      vendor/github.com/Psiphon-Labs/quic-go/frame_sorter.go
  10. 14 17
      vendor/github.com/Psiphon-Labs/quic-go/framer.go
  11. 18 3
      vendor/github.com/Psiphon-Labs/quic-go/http3/client.go
  12. 1 3
      vendor/github.com/Psiphon-Labs/quic-go/http3/request.go
  13. 4 3
      vendor/github.com/Psiphon-Labs/quic-go/http3/request_writer.go
  14. 5 4
      vendor/github.com/Psiphon-Labs/quic-go/http3/response_writer.go
  15. 48 23
      vendor/github.com/Psiphon-Labs/quic-go/http3/roundtrip.go
  16. 11 24
      vendor/github.com/Psiphon-Labs/quic-go/http3/server.go
  17. 5 1
      vendor/github.com/Psiphon-Labs/quic-go/interface.go
  18. 1 1
      vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/ack_eliciting.go
  19. 1 2
      vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/ackhandler.go
  20. 21 1
      vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/frame.go
  21. 8 2
      vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/packet.go
  22. 3 4
      vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/received_packet_handler.go
  23. 9 1
      vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/received_packet_history.go
  24. 0 4
      vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/received_packet_tracker.go
  25. 11 8
      vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/sent_packet_history.go
  26. 33 14
      vendor/github.com/Psiphon-Labs/quic-go/internal/handshake/crypto_setup.go
  27. 1 1
      vendor/github.com/Psiphon-Labs/quic-go/internal/handshake/initial_aead.go
  28. 22 14
      vendor/github.com/Psiphon-Labs/quic-go/internal/handshake/retry.go
  29. 1 1
      vendor/github.com/Psiphon-Labs/quic-go/internal/protocol/version.go
  30. 5 0
      vendor/github.com/Psiphon-Labs/quic-go/internal/qtls/go121.go
  31. 1 1
      vendor/github.com/Psiphon-Labs/quic-go/internal/qtls/go_oldversion.go
  32. 3 1
      vendor/github.com/Psiphon-Labs/quic-go/internal/utils/linkedlist/README.md
  33. 31 2
      vendor/github.com/Psiphon-Labs/quic-go/internal/utils/linkedlist/linkedlist.go
  34. 1 1
      vendor/github.com/Psiphon-Labs/quic-go/internal/utils/log.go
  35. 57 96
      vendor/github.com/Psiphon-Labs/quic-go/internal/wire/extended_header.go
  36. 28 29
      vendor/github.com/Psiphon-Labs/quic-go/internal/wire/frame_parser.go
  37. 17 49
      vendor/github.com/Psiphon-Labs/quic-go/internal/wire/header.go
  38. 1 1
      vendor/github.com/Psiphon-Labs/quic-go/internal/wire/interface.go
  39. 18 0
      vendor/github.com/Psiphon-Labs/quic-go/internal/wire/short_header.go
  40. 3 0
      vendor/github.com/Psiphon-Labs/quic-go/internal/wire/transport_parameters.go
  41. 3 3
      vendor/github.com/Psiphon-Labs/quic-go/logging/interface.go
  42. 8 2
      vendor/github.com/Psiphon-Labs/quic-go/logging/multiplex.go
  43. 6 5
      vendor/github.com/Psiphon-Labs/quic-go/logging/null_tracer.go
  44. 0 3
      vendor/github.com/Psiphon-Labs/quic-go/logging/packet_header.go
  45. 2 2
      vendor/github.com/Psiphon-Labs/quic-go/packet_handler_map.go
  46. 412 244
      vendor/github.com/Psiphon-Labs/quic-go/packet_packer.go
  47. 11 13
      vendor/github.com/Psiphon-Labs/quic-go/packet_unpacker.go
  48. 9 9
      vendor/github.com/Psiphon-Labs/quic-go/quicvarint/varint.go
  49. 3 5
      vendor/github.com/Psiphon-Labs/quic-go/receive_stream.go
  50. 10 12
      vendor/github.com/Psiphon-Labs/quic-go/retransmission_queue.go
  51. 22 25
      vendor/github.com/Psiphon-Labs/quic-go/send_stream.go
  52. 32 38
      vendor/github.com/Psiphon-Labs/quic-go/server.go
  53. 4 7
      vendor/github.com/Psiphon-Labs/quic-go/stream.go
  54. 4 7
      vendor/github.com/Psiphon-Labs/quic-go/streams_map.go
  55. 2 1
      vendor/github.com/Psiphon-Labs/quic-go/sys_conn_df_linux.go
  56. 2 1
      vendor/github.com/Psiphon-Labs/quic-go/sys_conn_df_windows.go
  57. 8 8
      vendor/github.com/Psiphon-Labs/quic-go/token_store.go
  58. 14 0
      vendor/github.com/go-task/slim-sprig/.editorconfig
  59. 1 0
      vendor/github.com/go-task/slim-sprig/.gitattributes
  60. 2 0
      vendor/github.com/go-task/slim-sprig/.gitignore
  61. 364 0
      vendor/github.com/go-task/slim-sprig/CHANGELOG.md
  62. 19 0
      vendor/github.com/go-task/slim-sprig/LICENSE.txt
  63. 73 0
      vendor/github.com/go-task/slim-sprig/README.md
  64. 12 0
      vendor/github.com/go-task/slim-sprig/Taskfile.yml
  65. 24 0
      vendor/github.com/go-task/slim-sprig/crypto.go
  66. 152 0
      vendor/github.com/go-task/slim-sprig/date.go
  67. 163 0
      vendor/github.com/go-task/slim-sprig/defaults.go
  68. 118 0
      vendor/github.com/go-task/slim-sprig/dict.go
  69. 19 0
      vendor/github.com/go-task/slim-sprig/doc.go
  70. 317 0
      vendor/github.com/go-task/slim-sprig/functions.go
  71. 464 0
      vendor/github.com/go-task/slim-sprig/list.go
  72. 12 0
      vendor/github.com/go-task/slim-sprig/network.go
  73. 228 0
      vendor/github.com/go-task/slim-sprig/numeric.go
  74. 28 0
      vendor/github.com/go-task/slim-sprig/reflect.go
  75. 83 0
      vendor/github.com/go-task/slim-sprig/regex.go
  76. 189 0
      vendor/github.com/go-task/slim-sprig/strings.go
  77. 66 0
      vendor/github.com/go-task/slim-sprig/url.go
  78. 7 0
      vendor/github.com/google/pprof/AUTHORS
  79. 16 0
      vendor/github.com/google/pprof/CONTRIBUTORS
  80. 202 0
      vendor/github.com/google/pprof/LICENSE
  81. 567 0
      vendor/github.com/google/pprof/profile/encode.go
  82. 270 0
      vendor/github.com/google/pprof/profile/filter.go
  83. 64 0
      vendor/github.com/google/pprof/profile/index.go
  84. 315 0
      vendor/github.com/google/pprof/profile/legacy_java_profile.go
  85. 1225 0
      vendor/github.com/google/pprof/profile/legacy_profile.go
  86. 481 0
      vendor/github.com/google/pprof/profile/merge.go
  87. 805 0
      vendor/github.com/google/pprof/profile/profile.go
  88. 370 0
      vendor/github.com/google/pprof/profile/proto.go
  89. 178 0
      vendor/github.com/google/pprof/profile/prune.go
  90. 20 0
      vendor/github.com/onsi/ginkgo/v2/LICENSE
  91. 69 0
      vendor/github.com/onsi/ginkgo/v2/config/deprecated.go
  92. 41 0
      vendor/github.com/onsi/ginkgo/v2/formatter/colorable_others.go
  93. 809 0
      vendor/github.com/onsi/ginkgo/v2/formatter/colorable_windows.go
  94. 195 0
      vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go
  95. 61 0
      vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go
  96. 50 0
      vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go
  97. 182 0
      vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go
  98. 48 0
      vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go
  99. 113 0
      vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go
  100. 239 0
      vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go

+ 5 - 2
go.mod

@@ -18,7 +18,7 @@ require (
 	github.com/Psiphon-Inc/rotate-safe-writer v0.0.0-20210303140923-464a7a37606e
 	github.com/Psiphon-Labs/bolt v0.0.0-20200624191537-23cedaef7ad7
 	github.com/Psiphon-Labs/goptlib v0.0.0-20200406165125-c0e32a7a3464
-	github.com/Psiphon-Labs/quic-go v0.0.0-20230124165616-fe8e9a215a66
+	github.com/Psiphon-Labs/quic-go v0.0.0-20230215230806-9b1ddbf778cc
 	github.com/Psiphon-Labs/tls-tris v0.0.0-20210713133851-676a693d51ad
 	github.com/armon/go-proxyproto v0.0.0-20180202201750-5b7edb60ff5f
 	github.com/bifurcation/mint v0.0.0-20180306135233-198357931e61
@@ -66,19 +66,22 @@ require (
 	github.com/davecgh/go-spew v1.1.1 // indirect
 	github.com/dchest/siphash v1.2.3-0.20201109081723-a21c2e7914a8 // indirect
 	github.com/dgryski/go-farm v0.0.0-20180109070241-2de33835d102 // indirect
+	github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect
 	github.com/golang/mock v1.6.0 // indirect
 	github.com/golang/protobuf v1.5.3-0.20210916003710-5d5e8c018a13 // indirect
 	github.com/google/go-cmp v0.5.8 // indirect
 	github.com/google/gxui v0.0.0-20151028112939-f85e0a97b3a4 // indirect
+	github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 // indirect
 	github.com/josharian/native v1.0.0 // indirect
 	github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1 // indirect
 	github.com/klauspost/compress v1.15.10-0.20220729101446-5a3a4a965cc6 // indirect
-	github.com/marten-seemann/qpack v0.3.0 // indirect
 	github.com/mdlayher/netlink v1.4.2-0.20210930205308-a81a8c23d40a // indirect
 	github.com/mdlayher/socket v0.0.0-20210624160740-9dbe287ded84 // indirect
 	github.com/mroth/weightedrand v0.4.1 // indirect
+	github.com/onsi/ginkgo/v2 v2.2.0 // indirect
 	github.com/pkg/errors v0.9.1 // indirect
 	github.com/pmezard/go-difflib v1.0.0 // indirect
+	github.com/quic-go/qpack v0.4.0 // indirect
 	github.com/sergeyfrolov/bsbuffer v0.0.0-20180903213811-94e85abb8507 // indirect
 	github.com/smartystreets/goconvey v1.7.2 // indirect
 	gitlab.com/yawning/obfs4.git v0.0.0-20190120164510-816cff15f425 // indirect

+ 17 - 4
go.sum

@@ -24,6 +24,8 @@ github.com/Psiphon-Labs/quic-go v0.0.0-20221014165902-1b7c3975fcf3 h1:BKSZdSkhOG
 github.com/Psiphon-Labs/quic-go v0.0.0-20221014165902-1b7c3975fcf3/go.mod h1:llhtSl7dUXTssUN4m4MjUDJrULGNxgZBMKYjExuk6EM=
 github.com/Psiphon-Labs/quic-go v0.0.0-20230124165616-fe8e9a215a66 h1:xTGnmXqEEUphnohYLGwGrlVzWPNpTNAFViJYxQUMHRU=
 github.com/Psiphon-Labs/quic-go v0.0.0-20230124165616-fe8e9a215a66/go.mod h1:cu4yhfHkyt+uQ9FFFjTpjCjcQYf52ntEAyoV4Zg0+fg=
+github.com/Psiphon-Labs/quic-go v0.0.0-20230215230806-9b1ddbf778cc h1:FUmGSvMiMbf1tFXWbK0+N7+5zBhOol8CHQdpB4ZQlDg=
+github.com/Psiphon-Labs/quic-go v0.0.0-20230215230806-9b1ddbf778cc/go.mod h1:cu4yhfHkyt+uQ9FFFjTpjCjcQYf52ntEAyoV4Zg0+fg=
 github.com/Psiphon-Labs/tls-tris v0.0.0-20210713133851-676a693d51ad h1:m6HS84+b5xDPLj7D/ya1CeixyaHOCZoMbBilJ48y+Ts=
 github.com/Psiphon-Labs/tls-tris v0.0.0-20210713133851-676a693d51ad/go.mod h1:v3y9GXFo9Sf2mO6auD2ExGG7oDgrK8TI7eb49ZnUxrE=
 github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412 h1:w1UutsfOrms1J05zt7ISrnJIXKzwaspym5BTKGx93EI=
@@ -37,6 +39,9 @@ github.com/bifurcation/mint v0.0.0-20180306135233-198357931e61 h1:BU+NxuoaYPIvvp
 github.com/bifurcation/mint v0.0.0-20180306135233-198357931e61/go.mod h1:zVt7zX3K/aDCk9Tj+VM7YymsX66ERvzCJzw8rFCX2JU=
 github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9 h1:a1zrFsLFac2xoM6zG1u72DWJwZG3ayttYLfmLbxVETk=
 github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
 github.com/cilium/ebpf v0.5.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
 github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
 github.com/cognusion/go-cache-lru v0.0.0-20170419142635-f73e2280ecea h1:9C2rdYRp8Vzwhm3sbFX0yYfB+70zKFRjn7cnPCucHSw=
@@ -63,6 +68,8 @@ github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM
 github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
 github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
 github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
+github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I=
+github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
 github.com/gobwas/glob v0.2.4-0.20180402141543-f00a7392b439 h1:T6zlOdzrYuHf6HUKujm9bzkzbZ5Iv/xf6rs8BHZDpoI=
 github.com/gobwas/glob v0.2.4-0.20180402141543-f00a7392b439/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
 github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=
@@ -94,6 +101,8 @@ github.com/google/gopacket v1.1.19-0.20200831200443-df1bbd09a561 h1:VB5cLlMqQWru
 github.com/google/gopacket v1.1.19-0.20200831200443-df1bbd09a561/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
 github.com/google/gxui v0.0.0-20151028112939-f85e0a97b3a4 h1:OL2d27ueTKnlQJoqLW2fc9pWYulFnJYLWzomGV7HqZo=
 github.com/google/gxui v0.0.0-20151028112939-f85e0a97b3a4/go.mod h1:Pw1H1OjSNHiqeuxAduB1BKYXIwFtsyrY47nEqSgEiCM=
+github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 h1:yAJXTCF9TqKcTiHJAE8dj7HMvPfh66eeA2JYW7eFpSE=
+github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
 github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
 github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
 github.com/grafov/m3u8 v0.0.0-20171211212457-6ab8f28ed427 h1:xh96CCAZTX8LJPFoOVRgTwZbn2DvJl8fyCyivohhSIg=
@@ -102,6 +111,7 @@ github.com/h2non/gock v1.0.9/go.mod h1:CZMcB0Lg5IWnr9bF79pPMg9WeV6WumxQiUJ1UvdO1
 github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47 h1:UnszMmmmm5vLwWzDjTFVIkfhvWF1NdrmChl8L2NUDCw=
 github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
 github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
 github.com/jinzhu/copier v0.3.5/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg=
 github.com/josharian/native v0.0.0-20200817173448-b6b71def0850/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w=
 github.com/josharian/native v1.0.0 h1:Ts/E8zCSEsG17dUqv7joXJFybuMLjQfWE04tsBODTxk=
@@ -131,10 +141,6 @@ github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfn
 github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
 github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
 github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k=
-github.com/marten-seemann/qpack v0.2.1 h1:jvTsT/HpCn2UZJdP+UUB53FfUUgeOyG5K1ns0OJOGVs=
-github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc=
-github.com/marten-seemann/qpack v0.3.0 h1:UiWstOgT8+znlkDPOg2+3rIuYXJ2CnGDkGUXN6ki6hE=
-github.com/marten-seemann/qpack v0.3.0/go.mod h1:cGfKPBiP4a9EQdxCwEwI/GEeWAsjSekBvx/X8mh58+g=
 github.com/marusama/semaphore v0.0.0-20171214154724-565ffd8e868a h1:6SRny9FLB1eWasPyDUqBQnMi9NhXU01XIlB0ao89YoI=
 github.com/marusama/semaphore v0.0.0-20171214154724-565ffd8e868a/go.mod h1:TmeOqAKoDinfPfSohs14CO3VcEf7o+Bem6JiNe05yrQ=
 github.com/mdlayher/ethtool v0.0.0-20210210192532-2b88debcdd43 h1:WgyLFv10Ov49JAQI/ZLUkCZ7VJS3r74hwFIGXJsgZlY=
@@ -171,6 +177,8 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
 github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
 github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA=
 github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
+github.com/onsi/ginkgo/v2 v2.2.0 h1:3ZNA3L1c5FYDFTTxbFeVGGD8jYvjYauHD30YgLxVsNI=
+github.com/onsi/ginkgo/v2 v2.2.0/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk=
 github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
 github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE=
 github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
@@ -186,6 +194,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
 github.com/pkg/profile v1.6.0/go.mod h1:qBsxPvzyUincmltOk6iyRVxHYg4adc0OFOv72ZdLa18=
 github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo=
+github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A=
 github.com/refraction-networking/utls v1.0.0/go.mod h1:tz9gX959MEFfFN5whTIocCLUG57WiILqtdVxI8c6Wj0=
 github.com/refraction-networking/utls v1.1.3 h1:K9opY+iKxcGvHOBG2019wFEVtsNFh0f5WqHyc2i3iU0=
 github.com/refraction-networking/utls v1.1.3/go.mod h1:+D89TUtA8+NKVFj1IXWr0p3tSdX1+SqUB7rL0QnGqyg=
@@ -202,6 +212,7 @@ github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hg
 github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM=
 github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
 github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
 github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
 github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
 github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8 h1:zLV6q4e8Jv9EHjNg/iHfzwDkCve6Ua5jCygptrtXHvI=
@@ -276,6 +287,7 @@ golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7w
 golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -348,6 +360,7 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8
 gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
 gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
 gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
 gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
 gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
 gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

+ 2 - 2
vendor/github.com/Psiphon-Labs/quic-go/Changelog.md

@@ -101,8 +101,8 @@
 - Add a `quic.Config` option to configure keep-alive
 - Rename the STK to Cookie
 - Implement `net.Conn`-style deadlines for streams
-- Remove the `tls.Config` from the `quic.Config`. The `tls.Config` must now be passed to the `Dial` and `Listen` functions as a separate parameter. See the [Godoc](https://godoc.org/github.com/lucas-clemente/quic-go) for details.
-- Changed the log level environment variable to only accept strings ("DEBUG", "INFO", "ERROR"), see [the wiki](https://github.com/lucas-clemente/quic-go/wiki/Logging) for more details.
+- Remove the `tls.Config` from the `quic.Config`. The `tls.Config` must now be passed to the `Dial` and `Listen` functions as a separate parameter. See the [Godoc](https://godoc.org/github.com/quic-go/quic-go) for details.
+- Changed the log level environment variable to only accept strings ("DEBUG", "INFO", "ERROR"), see [the wiki](https://github.com/quic-go/quic-go/wiki/Logging) for more details.
 - Rename the `h2quic.QuicRoundTripper` to `h2quic.RoundTripper`
 - Changed `h2quic.Server.Serve()` to accept a `net.PacketConn`
 - Drop support for Go 1.7 and 1.8.

+ 15 - 14
vendor/github.com/Psiphon-Labs/quic-go/README.md

@@ -2,8 +2,8 @@
 
 <img src="docs/quic.png" width=303 height=124>
 
-[![PkgGoDev](https://pkg.go.dev/badge/github.com/lucas-clemente/quic-go)](https://pkg.go.dev/github.com/lucas-clemente/quic-go)
-[![Code Coverage](https://img.shields.io/codecov/c/github/lucas-clemente/quic-go/master.svg?style=flat-square)](https://codecov.io/gh/lucas-clemente/quic-go/)
+[![PkgGoDev](https://pkg.go.dev/badge/github.com/quic-go/quic-go)](https://pkg.go.dev/github.com/quic-go/quic-go)
+[![Code Coverage](https://img.shields.io/codecov/c/github/quic-go/quic-go/master.svg?style=flat-square)](https://codecov.io/gh/quic-go/quic-go/)
 
 quic-go is an implementation of the QUIC protocol ([RFC 9000](https://datatracker.ietf.org/doc/html/rfc9000), [RFC 9001](https://datatracker.ietf.org/doc/html/rfc9001), [RFC 9002](https://datatracker.ietf.org/doc/html/rfc9002)) in Go, including the Unreliable Datagram Extension ([RFC 9221](https://datatracker.ietf.org/doc/html/rfc9221)) and Datagram Packetization Layer Path MTU
    Discovery (DPLPMTUD, [RFC 8899](https://datatracker.ietf.org/doc/html/rfc8899)). It has support for HTTP/3 ([RFC 9114](https://datatracker.ietf.org/doc/html/rfc9114)), including QPACK ([RFC 9204](https://datatracker.ietf.org/doc/html/rfc9204)).
@@ -45,18 +45,19 @@ http.Client{
 
 ## Projects using quic-go
 
-| Project                                              | Description                                                                                            | Stars |
-|------------------------------------------------------|--------------------------------------------------------------------------------------------------------|-------|
-| [algernon](https://github.com/xyproto/algernon)      | Small self-contained pure-Go web server with Lua, Markdown, HTTP/2, QUIC, Redis and PostgreSQL support | ![GitHub Repo stars](https://img.shields.io/github/stars/xyproto/algernon?style=flat-square) |
-| [caddy](https://github.com/caddyserver/caddy/)       | Fast, multi-platform web server with automatic HTTPS                                                   | ![GitHub Repo stars](https://img.shields.io/github/stars/caddyserver/caddy?style=flat-square) |
-| [go-ipfs](https://github.com/ipfs/go-ipfs)           | IPFS implementation in go                                                                              | ![GitHub Repo stars](https://img.shields.io/github/stars/ipfs/go-ipfs?style=flat-square) |
-| [syncthing](https://github.com/syncthing/syncthing/) | Open Source Continuous File Synchronization                                                            | ![GitHub Repo stars](https://img.shields.io/github/stars/syncthing/syncthing?style=flat-square) |
-| [traefik](https://github.com/traefik/traefik)        | The Cloud Native Application Proxy                                                                     | ![GitHub Repo stars](https://img.shields.io/github/stars/traefik/traefik?style=flat-square) |
-| [v2ray-core](https://github.com/v2fly/v2ray-core)    | A platform for building proxies to bypass network restrictions                                         | ![GitHub Repo stars](https://img.shields.io/github/stars/v2fly/v2ray-core?style=flat-square) |
-| [cloudflared](https://github.com/cloudflare/cloudflared)    | A tunneling daemon that proxies traffic from the Cloudflare network to your origins             | ![GitHub Repo stars](https://img.shields.io/github/stars/cloudflare/cloudflared?style=flat-square) |
-| [OONI Probe](https://github.com/ooni/probe-cli)            | The Open Observatory of Network Interference (OONI) aims to empower decentralized efforts in documenting Internet censorship around the world.   | ![GitHub Repo stars](https://img.shields.io/github/stars/ooni/probe-cli?style=flat-square) |
-| [YoMo](https://github.com/yomorun/yomo)    | Streaming Serverless Framework for Geo-distributed System | ![GitHub Repo stars](https://img.shields.io/github/stars/yomorun/yomo?style=flat-square) |
+| Project                                                   | Description                                                                                             | Stars |
+|-----------------------------------------------------------|---------------------------------------------------------------------------------------------------------|-------|
+| [AdGuardHome](https://github.com/AdguardTeam/AdGuardHome) | Free and open source, powerful network-wide ads & trackers blocking DNS server.                         | ![GitHub Repo stars](https://img.shields.io/github/stars/AdguardTeam/AdGuardHome?style=flat-square) |
+| [algernon](https://github.com/xyproto/algernon)           | Small self-contained pure-Go web server with Lua, Markdown, HTTP/2, QUIC, Redis and PostgreSQL support  | ![GitHub Repo stars](https://img.shields.io/github/stars/xyproto/algernon?style=flat-square) |
+| [caddy](https://github.com/caddyserver/caddy/)            | Fast, multi-platform web server with automatic HTTPS                                                    | ![GitHub Repo stars](https://img.shields.io/github/stars/caddyserver/caddy?style=flat-square) |
+| [cloudflared](https://github.com/cloudflare/cloudflared)  | A tunneling daemon that proxies traffic from the Cloudflare network to your origins                     | ![GitHub Repo stars](https://img.shields.io/github/stars/cloudflare/cloudflared?style=flat-square) |
+| [go-libp2p](https://github.com/libp2p/go-libp2p)          | libp2p implementation in Go, powering [Kubo](https://github.com/ipfs/kubo) (IPFS) and [Lotus](https://github.com/filecoin-project/lotus) (Filecoin), among others                                   | ![GitHub Repo stars](https://img.shields.io/github/stars/libp2p/go-libp2p?style=flat-square) |
+| [OONI Probe](https://github.com/ooni/probe-cli)           | Next generation OONI Probe. Library and CLI tool.                                                       | ![GitHub Repo stars](https://img.shields.io/github/stars/ooni/probe-cli?style=flat-square) |
+| [syncthing](https://github.com/syncthing/syncthing/)      | Open Source Continuous File Synchronization                                                             | ![GitHub Repo stars](https://img.shields.io/github/stars/syncthing/syncthing?style=flat-square) |
+| [traefik](https://github.com/traefik/traefik)             | The Cloud Native Application Proxy                                                                      | ![GitHub Repo stars](https://img.shields.io/github/stars/traefik/traefik?style=flat-square) |
+| [v2ray-core](https://github.com/v2fly/v2ray-core)         | A platform for building proxies to bypass network restrictions                                          | ![GitHub Repo stars](https://img.shields.io/github/stars/v2fly/v2ray-core?style=flat-square) |
+| [YoMo](https://github.com/yomorun/yomo)                   | Streaming Serverless Framework for Geo-distributed System                                               | ![GitHub Repo stars](https://img.shields.io/github/stars/yomorun/yomo?style=flat-square) |
 
 ## Contributing
 
-We are always happy to welcome new contributors! We have a number of self-contained issues that are suitable for first-time contributors, they are tagged with [help wanted](https://github.com/lucas-clemente/quic-go/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22). If you have any questions, please feel free to reach out by opening an issue or leaving a comment.
+We are always happy to welcome new contributors! We have a number of self-contained issues that are suitable for first-time contributors, they are tagged with [help wanted](https://github.com/quic-go/quic-go/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22). If you have any questions, please feel free to reach out by opening an issue or leaving a comment.

+ 1 - 0
vendor/github.com/Psiphon-Labs/quic-go/config.go

@@ -135,6 +135,7 @@ func populateConfig(config *Config, defaultConnIDLen int) *Config {
 		EnableDatagrams:                  config.EnableDatagrams,
 		DisablePathMTUDiscovery:          config.DisablePathMTUDiscovery,
 		DisableVersionNegotiationPackets: config.DisableVersionNegotiationPackets,
+		Allow0RTT:                        config.Allow0RTT,
 		Tracer:                           config.Tracer,
 
 		// [Psiphon]

+ 0 - 4
vendor/github.com/Psiphon-Labs/quic-go/conn_id_generator.go

@@ -22,8 +22,6 @@ type connIDGenerator struct {
 	retireConnectionID     func(protocol.ConnectionID)
 	replaceWithClosed      func([]protocol.ConnectionID, protocol.Perspective, []byte)
 	queueControlFrame      func(wire.Frame)
-
-	version protocol.VersionNumber
 }
 
 func newConnIDGenerator(
@@ -36,7 +34,6 @@ func newConnIDGenerator(
 	replaceWithClosed func([]protocol.ConnectionID, protocol.Perspective, []byte),
 	queueControlFrame func(wire.Frame),
 	generator ConnectionIDGenerator,
-	version protocol.VersionNumber,
 ) *connIDGenerator {
 	m := &connIDGenerator{
 		generator:              generator,
@@ -47,7 +44,6 @@ func newConnIDGenerator(
 		retireConnectionID:     retireConnectionID,
 		replaceWithClosed:      replaceWithClosed,
 		queueControlFrame:      queueControlFrame,
-		version:                version,
 	}
 	m.activeSrcConnIDs[0] = initialConnectionID
 	m.initialClientDestConnID = initialClientDestConnID

+ 149 - 88
vendor/github.com/Psiphon-Labs/quic-go/connection.go

@@ -25,7 +25,7 @@ import (
 )
 
 type unpacker interface {
-	UnpackLongHeader(hdr *wire.Header, rcvTime time.Time, data []byte) (*unpackedPacket, error)
+	UnpackLongHeader(hdr *wire.Header, rcvTime time.Time, data []byte, v protocol.VersionNumber) (*unpackedPacket, error)
 	UnpackShortHeader(rcvTime time.Time, data []byte) (protocol.PacketNumber, protocol.PacketNumberLen, protocol.KeyPhaseBit, []byte, error)
 }
 
@@ -218,6 +218,9 @@ type connection struct {
 
 	datagramQueue *datagramQueue
 
+	connStateMutex sync.Mutex
+	connState      ConnectionState
+
 	logID  string
 	tracer logging.ConnectionTracer
 	logger utils.Logger
@@ -241,7 +244,6 @@ var newConnection = func(
 	conf *Config,
 	tlsConf *tls.Config,
 	tokenGenerator *handshake.TokenGenerator,
-	enable0RTT bool,
 	clientAddressValidated bool,
 	tracer logging.ConnectionTracer,
 	tracingID uint64,
@@ -282,7 +284,6 @@ var newConnection = func(
 		runner.ReplaceWithClosed,
 		s.queueControlFrame,
 		s.config.ConnectionIDGenerator,
-		s.version,
 	)
 	s.preSetup()
 	s.ctx, s.ctxCancel = context.WithCancel(context.WithValue(context.Background(), ConnectionTracingKey, tracingID))
@@ -301,7 +302,6 @@ var newConnection = func(
 		s.perspective,
 		s.tracer,
 		s.logger,
-		s.version,
 	)
 	initialStream := newCryptoStream()
 	handshakeStream := newCryptoStream()
@@ -330,6 +330,10 @@ var newConnection = func(
 	if s.tracer != nil {
 		s.tracer.SentTransportParameters(params)
 	}
+	var allow0RTT func() bool
+	if conf.Allow0RTT != nil {
+		allow0RTT = func() bool { return conf.Allow0RTT(conn.RemoteAddr()) }
+	}
 	cs := handshake.NewCryptoSetupServer(
 		initialStream,
 		handshakeStream,
@@ -347,7 +351,7 @@ var newConnection = func(
 			},
 		},
 		tlsConf,
-		enable0RTT,
+		allow0RTT,
 		s.rttStats,
 		tracer,
 		logger,
@@ -371,9 +375,8 @@ var newConnection = func(
 		s.receivedPacketHandler,
 		s.datagramQueue,
 		s.perspective,
-		s.version,
 	)
-	s.unpacker = newPacketUnpacker(cs, s.srcConnIDLen, s.version)
+	s.unpacker = newPacketUnpacker(cs, s.srcConnIDLen)
 	s.cryptoStreamManager = newCryptoStreamManager(cs, initialStream, handshakeStream, s.oneRTTStream)
 	return s
 }
@@ -424,7 +427,6 @@ var newClientConnection = func(
 		runner.ReplaceWithClosed,
 		s.queueControlFrame,
 		s.config.ConnectionIDGenerator,
-		s.version,
 	)
 	s.preSetup()
 	s.ctx, s.ctxCancel = context.WithCancel(context.WithValue(context.Background(), ConnectionTracingKey, tracingID))
@@ -439,7 +441,6 @@ var newClientConnection = func(
 		s.perspective,
 		s.tracer,
 		s.logger,
-		s.version,
 	)
 	initialStream := newCryptoStream()
 	handshakeStream := newCryptoStream()
@@ -493,7 +494,7 @@ var newClientConnection = func(
 	s.clientHelloWritten = clientHelloWritten
 	s.cryptoStreamHandler = cs
 	s.cryptoStreamManager = newCryptoStreamManager(cs, initialStream, handshakeStream, newCryptoStream())
-	s.unpacker = newPacketUnpacker(cs, s.srcConnIDLen, s.version)
+	s.unpacker = newPacketUnpacker(cs, s.srcConnIDLen)
 	s.packer = newPacketPacker(
 		srcConnID,
 		s.connIDManager.Get,
@@ -511,7 +512,6 @@ var newClientConnection = func(
 		s.receivedPacketHandler,
 		s.datagramQueue,
 		s.perspective,
-		s.version,
 	)
 	if len(tlsConf.ServerName) > 0 {
 		s.tokenStoreKey = tlsConf.ServerName
@@ -528,8 +528,8 @@ var newClientConnection = func(
 
 func (s *connection) preSetup() {
 	s.sendQueue = newSendQueue(s.conn)
-	s.retransmissionQueue = newRetransmissionQueue(s.version)
-	s.frameParser = wire.NewFrameParser(s.config.EnableDatagrams, s.version)
+	s.retransmissionQueue = newRetransmissionQueue()
+	s.frameParser = wire.NewFrameParser(s.config.EnableDatagrams)
 	s.rttStats = &utils.RTTStats{}
 	s.connFlowController = flowcontrol.NewConnectionFlowController(
 		protocol.ByteCount(s.config.InitialConnectionReceiveWindow),
@@ -551,9 +551,8 @@ func (s *connection) preSetup() {
 		uint64(s.config.MaxIncomingStreams),
 		uint64(s.config.MaxIncomingUniStreams),
 		s.perspective,
-		s.version,
 	)
-	s.framer = newFramer(s.streamsMap, s.version)
+	s.framer = newFramer(s.streamsMap)
 	s.receivedPackets = make(chan *receivedPacket, protocol.MaxConnUnprocessedPackets)
 	s.closeChan = make(chan closeError, 1)
 	s.sendingScheduled = make(chan struct{}, 1)
@@ -565,6 +564,7 @@ func (s *connection) preSetup() {
 
 	s.windowUpdateQueue = newWindowUpdateQueue(s.streamsMap, s.connFlowController, s.framer.QueueControlFrame)
 	s.datagramQueue = newDatagramQueue(s.scheduleSending, s.logger)
+	s.connState.Version = s.version
 }
 
 // run the connection main loop
@@ -758,11 +758,10 @@ func (s *connection) supportsDatagrams() bool {
 }
 
 func (s *connection) ConnectionState() ConnectionState {
-	return ConnectionState{
-		TLS:               s.cryptoStreamHandler.ConnectionState(),
-		SupportsDatagrams: s.supportsDatagrams(),
-		Version:           s.version,
-	}
+	s.connStateMutex.Lock()
+	defer s.connStateMutex.Unlock()
+	s.connState.TLS = s.cryptoStreamHandler.ConnectionState()
+	return s.connState
 }
 
 // Time when the next keep-alive packet should be sent.
@@ -916,7 +915,7 @@ func (s *connection) handlePacketImpl(rp *receivedPacket) bool {
 		}
 
 		if wire.IsLongHeaderPacket(p.data[0]) {
-			hdr, packetData, rest, err := wire.ParsePacket(p.data, s.srcConnIDLen)
+			hdr, packetData, rest, err := wire.ParsePacket(p.data)
 			if err != nil {
 				if s.tracer != nil {
 					dropReason := logging.PacketDropHeaderParseError
@@ -1049,7 +1048,7 @@ func (s *connection) handleLongHeaderPacket(p *receivedPacket, hdr *wire.Header)
 		return false
 	}
 
-	packet, err := s.unpacker.UnpackLongHeader(hdr, p.rcvTime, p.data)
+	packet, err := s.unpacker.UnpackLongHeader(hdr, p.rcvTime, p.data, s.version)
 	if err != nil {
 		wasQueued = s.handleUnpackError(err, p, logging.PacketTypeFromHeader(hdr))
 		return false
@@ -1068,7 +1067,7 @@ func (s *connection) handleLongHeaderPacket(p *receivedPacket, hdr *wire.Header)
 		return false
 	}
 
-	if err := s.handleUnpackedPacket(packet, p.ecn, p.rcvTime, p.Size()); err != nil {
+	if err := s.handleUnpackedLongHeaderPacket(packet, p.ecn, p.rcvTime, p.Size()); err != nil {
 		s.closeLocal(err)
 		return false
 	}
@@ -1231,7 +1230,7 @@ func (s *connection) handleVersionNegotiationPacket(p *receivedPacket) {
 	})
 }
 
-func (s *connection) handleUnpackedPacket(
+func (s *connection) handleUnpackedLongHeaderPacket(
 	packet *unpackedPacket,
 	ecn protocol.ECN,
 	rcvTime time.Time,
@@ -1250,7 +1249,7 @@ func (s *connection) handleUnpackedPacket(
 			s.tracer.NegotiatedVersion(s.version, clientVersions, serverVersions)
 		}
 		// The server can change the source connection ID with the first Handshake packet.
-		if s.perspective == protocol.PerspectiveClient && packet.hdr.IsLongHeader && packet.hdr.SrcConnectionID != s.handshakeDestConnID {
+		if s.perspective == protocol.PerspectiveClient && packet.hdr.SrcConnectionID != s.handshakeDestConnID {
 			cid := packet.hdr.SrcConnectionID
 			s.logger.Debugf("Received first packet. Switching destination connection ID to: %s", cid)
 			s.handshakeDestConnID = cid
@@ -1323,7 +1322,7 @@ func (s *connection) handleFrames(
 	// If we're not tracing, this slice will always remain empty.
 	var frames []wire.Frame
 	for len(data) > 0 {
-		l, frame, err := s.frameParser.ParseNext(data, encLevel)
+		l, frame, err := s.frameParser.ParseNext(data, encLevel, s.version)
 		if err != nil {
 			return false, err
 		}
@@ -1716,6 +1715,9 @@ func (s *connection) restoreTransportParameters(params *wire.TransportParameters
 	s.connIDGenerator.SetMaxActiveConnIDs(params.ActiveConnectionIDLimit)
 	s.connFlowController.UpdateSendWindow(params.InitialMaxData)
 	s.streamsMap.UpdateLimits(params)
+	s.connStateMutex.Lock()
+	s.connState.SupportsDatagrams = s.supportsDatagrams()
+	s.connStateMutex.Unlock()
 }
 
 func (s *connection) handleTransportParameters(params *wire.TransportParameters) {
@@ -1734,6 +1736,10 @@ func (s *connection) handleTransportParameters(params *wire.TransportParameters)
 		// the client's transport parameters.
 		close(s.earlyConnReadyChan)
 	}
+
+	s.connStateMutex.Lock()
+	s.connState.SupportsDatagrams = s.supportsDatagrams()
+	s.connStateMutex.Unlock()
 }
 
 func (s *connection) checkTransportParameters(params *wire.TransportParameters) error {
@@ -1858,43 +1864,40 @@ func (s *connection) sendPackets() error {
 
 func (s *connection) maybeSendAckOnlyPacket() error {
 	if !s.handshakeConfirmed {
-		packet, err := s.packer.PackCoalescedPacket(true)
+		packet, err := s.packer.PackCoalescedPacket(true, s.version)
 		if err != nil {
 			return err
 		}
 		if packet == nil {
 			return nil
 		}
-		s.logCoalescedPacket(packet)
-		for _, p := range packet.packets {
-			s.sentPacketHandler.SentPacket(p.ToAckHandlerPacket(time.Now(), s.retransmissionQueue))
-		}
-		s.connIDManager.SentPacket()
-		s.sendQueue.Send(packet.buffer)
+		s.sendPackedCoalescedPacket(packet, time.Now())
 		return nil
 	}
 
-	packet, err := s.packer.PackPacket(true)
+	now := time.Now()
+	p, buffer, err := s.packer.PackPacket(true, now, s.version)
 	if err != nil {
+		if err == errNothingToPack {
+			return nil
+		}
 		return err
 	}
-	if packet == nil {
-		return nil
-	}
-	s.sendPackedPacket(packet, time.Now())
+	s.logShortHeaderPacket(p.DestConnID, p.Ack, p.Frames, p.PacketNumber, p.PacketNumberLen, p.KeyPhase, buffer.Len(), false)
+	s.sendPackedShortHeaderPacket(buffer, p.Packet, now)
 	return nil
 }
 
 func (s *connection) sendProbePacket(encLevel protocol.EncryptionLevel) error {
 	// Queue probe packets until we actually send out a packet,
 	// or until there are no more packets to queue.
-	var packet *packedPacket
+	var packet *coalescedPacket
 	for {
 		if wasQueued := s.sentPacketHandler.QueueProbePacket(encLevel); !wasQueued {
 			break
 		}
 		var err error
-		packet, err = s.packer.MaybePackProbePacket(encLevel)
+		packet, err = s.packer.MaybePackProbePacket(encLevel, s.version)
 		if err != nil {
 			return err
 		}
@@ -1915,15 +1918,15 @@ func (s *connection) sendProbePacket(encLevel protocol.EncryptionLevel) error {
 			panic("unexpected encryption level")
 		}
 		var err error
-		packet, err = s.packer.MaybePackProbePacket(encLevel)
+		packet, err = s.packer.MaybePackProbePacket(encLevel, s.version)
 		if err != nil {
 			return err
 		}
 	}
-	if packet == nil || packet.packetContents == nil {
+	if packet == nil || (len(packet.longHdrPackets) == 0 && packet.shortHdrPacket == nil) {
 		return fmt.Errorf("connection BUG: couldn't pack %s probe packet", encLevel)
 	}
-	s.sendPackedPacket(packet, time.Now())
+	s.sendPackedCoalescedPacket(packet, time.Now())
 	return nil
 }
 
@@ -1935,44 +1938,59 @@ func (s *connection) sendPacket() (bool, error) {
 
 	now := time.Now()
 	if !s.handshakeConfirmed {
-		packet, err := s.packer.PackCoalescedPacket(false)
+		packet, err := s.packer.PackCoalescedPacket(false, s.version)
 		if err != nil || packet == nil {
 			return false, err
 		}
 		s.sentFirstPacket = true
-		s.logCoalescedPacket(packet)
-		for _, p := range packet.packets {
-			if s.firstAckElicitingPacketAfterIdleSentTime.IsZero() && p.IsAckEliciting() {
-				s.firstAckElicitingPacketAfterIdleSentTime = now
-			}
-			s.sentPacketHandler.SentPacket(p.ToAckHandlerPacket(now, s.retransmissionQueue))
-		}
-		s.connIDManager.SentPacket()
-		s.sendQueue.Send(packet.buffer)
+		s.sendPackedCoalescedPacket(packet, now)
 		return true, nil
-	}
-	if !s.config.DisablePathMTUDiscovery && s.mtuDiscoverer.ShouldSendProbe(now) {
-		packet, err := s.packer.PackMTUProbePacket(s.mtuDiscoverer.GetPing())
+	} else if !s.config.DisablePathMTUDiscovery && s.mtuDiscoverer.ShouldSendProbe(now) {
+		ping, size := s.mtuDiscoverer.GetPing()
+		p, buffer, err := s.packer.PackMTUProbePacket(ping, size, now, s.version)
 		if err != nil {
 			return false, err
 		}
-		s.sendPackedPacket(packet, now)
+		s.logShortHeaderPacket(p.DestConnID, p.Ack, p.Frames, p.PacketNumber, p.PacketNumberLen, p.KeyPhase, buffer.Len(), false)
+		s.sendPackedShortHeaderPacket(buffer, p.Packet, now)
 		return true, nil
 	}
-	packet, err := s.packer.PackPacket(false)
-	if err != nil || packet == nil {
+	p, buffer, err := s.packer.PackPacket(false, now, s.version)
+	if err != nil {
+		if err == errNothingToPack {
+			return false, nil
+		}
 		return false, err
 	}
-	s.sendPackedPacket(packet, now)
+	s.logShortHeaderPacket(p.DestConnID, p.Ack, p.Frames, p.PacketNumber, p.PacketNumberLen, p.KeyPhase, buffer.Len(), false)
+	s.sendPackedShortHeaderPacket(buffer, p.Packet, now)
 	return true, nil
 }
 
-func (s *connection) sendPackedPacket(packet *packedPacket, now time.Time) {
-	if s.firstAckElicitingPacketAfterIdleSentTime.IsZero() && packet.IsAckEliciting() {
+func (s *connection) sendPackedShortHeaderPacket(buffer *packetBuffer, p *ackhandler.Packet, now time.Time) {
+	if s.firstAckElicitingPacketAfterIdleSentTime.IsZero() && ackhandler.HasAckElicitingFrames(p.Frames) {
 		s.firstAckElicitingPacketAfterIdleSentTime = now
 	}
-	s.logPacket(packet)
-	s.sentPacketHandler.SentPacket(packet.ToAckHandlerPacket(now, s.retransmissionQueue))
+
+	s.sentPacketHandler.SentPacket(p)
+	s.connIDManager.SentPacket()
+	s.sendQueue.Send(buffer)
+}
+
+func (s *connection) sendPackedCoalescedPacket(packet *coalescedPacket, now time.Time) {
+	s.logCoalescedPacket(packet)
+	for _, p := range packet.longHdrPackets {
+		if s.firstAckElicitingPacketAfterIdleSentTime.IsZero() && p.IsAckEliciting() {
+			s.firstAckElicitingPacketAfterIdleSentTime = now
+		}
+		s.sentPacketHandler.SentPacket(p.ToAckHandlerPacket(now, s.retransmissionQueue))
+	}
+	if p := packet.shortHdrPacket; p != nil {
+		if s.firstAckElicitingPacketAfterIdleSentTime.IsZero() && p.IsAckEliciting() {
+			s.firstAckElicitingPacketAfterIdleSentTime = now
+		}
+		s.sentPacketHandler.SentPacket(p.Packet)
+	}
 	s.connIDManager.SentPacket()
 	s.sendQueue.Send(packet.buffer)
 }
@@ -1983,14 +2001,14 @@ func (s *connection) sendConnectionClose(e error) ([]byte, error) {
 	var transportErr *qerr.TransportError
 	var applicationErr *qerr.ApplicationError
 	if errors.As(e, &transportErr) {
-		packet, err = s.packer.PackConnectionClose(transportErr)
+		packet, err = s.packer.PackConnectionClose(transportErr, s.version)
 	} else if errors.As(e, &applicationErr) {
-		packet, err = s.packer.PackApplicationClose(applicationErr)
+		packet, err = s.packer.PackApplicationClose(applicationErr, s.version)
 	} else {
 		packet, err = s.packer.PackConnectionClose(&qerr.TransportError{
 			ErrorCode:    qerr.InternalError,
 			ErrorMessage: fmt.Sprintf("connection BUG: unspecified error type (msg: %s)", e.Error()),
-		})
+		}, s.version)
 	}
 	if err != nil {
 		return nil, err
@@ -1999,7 +2017,18 @@ func (s *connection) sendConnectionClose(e error) ([]byte, error) {
 	return packet.buffer.Data, s.conn.Write(packet.buffer.Data)
 }
 
-func (s *connection) logPacketContents(p *packetContents) {
+func (s *connection) logLongHeaderPacket(p *longHeaderPacket) {
+	// quic-go logging
+	if s.logger.Debug() {
+		p.header.Log(s.logger)
+		if p.ack != nil {
+			wire.LogFrame(s.logger, p.ack, true)
+		}
+		for _, frame := range p.frames {
+			wire.LogFrame(s.logger, frame.Frame, true)
+		}
+	}
+
 	// tracing
 	if s.tracer != nil {
 		frames := make([]logging.Frame, 0, len(p.frames))
@@ -2010,40 +2039,72 @@ func (s *connection) logPacketContents(p *packetContents) {
 		if p.ack != nil {
 			ack = logutils.ConvertAckFrame(p.ack)
 		}
-		s.tracer.SentPacket(p.header, p.length, ack, frames)
+		s.tracer.SentLongHeaderPacket(p.header, p.length, ack, frames)
 	}
+}
 
-	// quic-go logging
-	if !s.logger.Debug() {
-		return
+func (s *connection) logShortHeaderPacket(
+	destConnID protocol.ConnectionID,
+	ackFrame *wire.AckFrame,
+	frames []*ackhandler.Frame,
+	pn protocol.PacketNumber,
+	pnLen protocol.PacketNumberLen,
+	kp protocol.KeyPhaseBit,
+	size protocol.ByteCount,
+	isCoalesced bool,
+) {
+	if s.logger.Debug() && !isCoalesced {
+		s.logger.Debugf("-> Sending packet %d (%d bytes) for connection %s, 1-RTT", pn, size, s.logID)
 	}
-	p.header.Log(s.logger)
-	if p.ack != nil {
-		wire.LogFrame(s.logger, p.ack, true)
+	// quic-go logging
+	if s.logger.Debug() {
+		wire.LogShortHeader(s.logger, destConnID, pn, pnLen, kp)
+		if ackFrame != nil {
+			wire.LogFrame(s.logger, ackFrame, true)
+		}
+		for _, frame := range frames {
+			wire.LogFrame(s.logger, frame.Frame, true)
+		}
 	}
-	for _, frame := range p.frames {
-		wire.LogFrame(s.logger, frame.Frame, true)
+
+	// tracing
+	if s.tracer != nil {
+		fs := make([]logging.Frame, 0, len(frames))
+		for _, f := range frames {
+			fs = append(fs, logutils.ConvertFrame(f.Frame))
+		}
+		var ack *logging.AckFrame
+		if ackFrame != nil {
+			ack = logutils.ConvertAckFrame(ackFrame)
+		}
+		s.tracer.SentShortHeaderPacket(
+			&logging.ShortHeader{
+				DestConnectionID: destConnID,
+				PacketNumber:     pn,
+				PacketNumberLen:  pnLen,
+				KeyPhase:         kp,
+			},
+			size,
+			ack,
+			fs,
+		)
 	}
 }
 
 func (s *connection) logCoalescedPacket(packet *coalescedPacket) {
 	if s.logger.Debug() {
-		if len(packet.packets) > 1 {
-			s.logger.Debugf("-> Sending coalesced packet (%d parts, %d bytes) for connection %s", len(packet.packets), packet.buffer.Len(), s.logID)
+		if len(packet.longHdrPackets) > 1 {
+			s.logger.Debugf("-> Sending coalesced packet (%d parts, %d bytes) for connection %s", len(packet.longHdrPackets), packet.buffer.Len(), s.logID)
 		} else {
-			s.logger.Debugf("-> Sending packet %d (%d bytes) for connection %s, %s", packet.packets[0].header.PacketNumber, packet.buffer.Len(), s.logID, packet.packets[0].EncryptionLevel())
+			s.logger.Debugf("-> Sending packet %d (%d bytes) for connection %s, %s", packet.longHdrPackets[0].header.PacketNumber, packet.buffer.Len(), s.logID, packet.longHdrPackets[0].EncryptionLevel())
 		}
 	}
-	for _, p := range packet.packets {
-		s.logPacketContents(p)
+	for _, p := range packet.longHdrPackets {
+		s.logLongHeaderPacket(p)
 	}
-}
-
-func (s *connection) logPacket(packet *packedPacket) {
-	if s.logger.Debug() {
-		s.logger.Debugf("-> Sending packet %d (%d bytes) for connection %s, %s", packet.header.PacketNumber, packet.buffer.Len(), s.logID, packet.EncryptionLevel())
+	if p := packet.shortHdrPacket; p != nil {
+		s.logShortHeaderPacket(p.DestConnID, p.Ack, p.Frames, p.PacketNumber, p.PacketNumberLen, p.KeyPhase, p.Length, true)
 	}
-	s.logPacketContents(packet.packetContents)
 }
 
 // AcceptStream returns the next stream openend by the peer

+ 6 - 1
vendor/github.com/Psiphon-Labs/quic-go/errors.go

@@ -46,6 +46,7 @@ const (
 type StreamError struct {
 	StreamID  StreamID
 	ErrorCode StreamErrorCode
+	Remote    bool
 }
 
 func (e *StreamError) Is(target error) bool {
@@ -54,5 +55,9 @@ func (e *StreamError) Is(target error) bool {
 }
 
 func (e *StreamError) Error() string {
-	return fmt.Sprintf("stream %d canceled with error code %d", e.StreamID, e.ErrorCode)
+	pers := "local"
+	if e.Remote {
+		pers = "remote"
+	}
+	return fmt.Sprintf("stream %d canceled by %s with error code %d", e.StreamID, pers, e.ErrorCode)
 }

+ 8 - 1
vendor/github.com/Psiphon-Labs/quic-go/frame_sorter.go

@@ -2,6 +2,7 @@ package quic
 
 import (
 	"errors"
+	"sync"
 
 	"github.com/Psiphon-Labs/quic-go/internal/protocol"
 	list "github.com/Psiphon-Labs/quic-go/internal/utils/linkedlist"
@@ -13,6 +14,12 @@ type byteInterval struct {
 	End   protocol.ByteCount
 }
 
+var byteIntervalElementPool sync.Pool
+
+func init() {
+	byteIntervalElementPool = *list.NewPool[byteInterval]()
+}
+
 type frameSorterEntry struct {
 	Data   []byte
 	DoneCb func()
@@ -28,7 +35,7 @@ var errDuplicateStreamData = errors.New("duplicate stream data")
 
 func newFrameSorter() *frameSorter {
 	s := frameSorter{
-		gaps:  list.New[byteInterval](),
+		gaps:  list.NewWithPool[byteInterval](&byteIntervalElementPool),
 		queue: make(map[protocol.ByteCount]frameSorterEntry),
 	}
 	s.gaps.PushFront(byteInterval{Start: 0, End: protocol.MaxByteCount})

+ 14 - 17
vendor/github.com/Psiphon-Labs/quic-go/framer.go

@@ -14,10 +14,10 @@ type framer interface {
 	HasData() bool
 
 	QueueControlFrame(wire.Frame)
-	AppendControlFrames([]ackhandler.Frame, protocol.ByteCount) ([]ackhandler.Frame, protocol.ByteCount)
+	AppendControlFrames([]*ackhandler.Frame, protocol.ByteCount, protocol.VersionNumber) ([]*ackhandler.Frame, protocol.ByteCount)
 
 	AddActiveStream(protocol.StreamID)
-	AppendStreamFrames([]ackhandler.Frame, protocol.ByteCount) ([]ackhandler.Frame, protocol.ByteCount)
+	AppendStreamFrames([]*ackhandler.Frame, protocol.ByteCount, protocol.VersionNumber) ([]*ackhandler.Frame, protocol.ByteCount)
 
 	Handle0RTTRejection() error
 }
@@ -26,7 +26,6 @@ type framerI struct {
 	mutex sync.Mutex
 
 	streamGetter streamGetter
-	version      protocol.VersionNumber
 
 	activeStreams map[protocol.StreamID]struct{}
 	streamQueue   []protocol.StreamID
@@ -37,14 +36,10 @@ type framerI struct {
 
 var _ framer = &framerI{}
 
-func newFramer(
-	streamGetter streamGetter,
-	v protocol.VersionNumber,
-) framer {
+func newFramer(streamGetter streamGetter) framer {
 	return &framerI{
 		streamGetter:  streamGetter,
 		activeStreams: make(map[protocol.StreamID]struct{}),
-		version:       v,
 	}
 }
 
@@ -67,16 +62,18 @@ func (f *framerI) QueueControlFrame(frame wire.Frame) {
 	f.controlFrameMutex.Unlock()
 }
 
-func (f *framerI) AppendControlFrames(frames []ackhandler.Frame, maxLen protocol.ByteCount) ([]ackhandler.Frame, protocol.ByteCount) {
+func (f *framerI) AppendControlFrames(frames []*ackhandler.Frame, maxLen protocol.ByteCount, v protocol.VersionNumber) ([]*ackhandler.Frame, protocol.ByteCount) {
 	var length protocol.ByteCount
 	f.controlFrameMutex.Lock()
 	for len(f.controlFrames) > 0 {
 		frame := f.controlFrames[len(f.controlFrames)-1]
-		frameLen := frame.Length(f.version)
+		frameLen := frame.Length(v)
 		if length+frameLen > maxLen {
 			break
 		}
-		frames = append(frames, ackhandler.Frame{Frame: frame})
+		af := ackhandler.GetFrame()
+		af.Frame = frame
+		frames = append(frames, af)
 		length += frameLen
 		f.controlFrames = f.controlFrames[:len(f.controlFrames)-1]
 	}
@@ -93,7 +90,7 @@ func (f *framerI) AddActiveStream(id protocol.StreamID) {
 	f.mutex.Unlock()
 }
 
-func (f *framerI) AppendStreamFrames(frames []ackhandler.Frame, maxLen protocol.ByteCount) ([]ackhandler.Frame, protocol.ByteCount) {
+func (f *framerI) AppendStreamFrames(frames []*ackhandler.Frame, maxLen protocol.ByteCount, v protocol.VersionNumber) ([]*ackhandler.Frame, protocol.ByteCount) {
 	var length protocol.ByteCount
 	var lastFrame *ackhandler.Frame
 	f.mutex.Lock()
@@ -118,7 +115,7 @@ func (f *framerI) AppendStreamFrames(frames []ackhandler.Frame, maxLen protocol.
 		// Therefore, we can pretend to have more bytes available when popping
 		// the STREAM frame (which will always have the DataLen set).
 		remainingLen += quicvarint.Len(uint64(remainingLen))
-		frame, hasMoreData := str.popStreamFrame(remainingLen)
+		frame, hasMoreData := str.popStreamFrame(remainingLen, v)
 		if hasMoreData { // put the stream back in the queue (at the end)
 			f.streamQueue = append(f.streamQueue, id)
 		} else { // no more data to send. Stream is not active any more
@@ -130,16 +127,16 @@ func (f *framerI) AppendStreamFrames(frames []ackhandler.Frame, maxLen protocol.
 		if frame == nil {
 			continue
 		}
-		frames = append(frames, *frame)
-		length += frame.Length(f.version)
+		frames = append(frames, frame)
+		length += frame.Length(v)
 		lastFrame = frame
 	}
 	f.mutex.Unlock()
 	if lastFrame != nil {
-		lastFrameLen := lastFrame.Length(f.version)
+		lastFrameLen := lastFrame.Length(v)
 		// account for the smaller size of the last STREAM frame
 		lastFrame.Frame.(*wire.StreamFrame).DataLenPresent = false
-		length += lastFrame.Length(f.version) - lastFrameLen
+		length += lastFrame.Length(v) - lastFrameLen
 	}
 	return frames, length
 }

+ 18 - 3
vendor/github.com/Psiphon-Labs/quic-go/http3/client.go

@@ -16,7 +16,8 @@ import (
 	"github.com/Psiphon-Labs/quic-go/internal/qtls"
 	"github.com/Psiphon-Labs/quic-go/internal/utils"
 	"github.com/Psiphon-Labs/quic-go/quicvarint"
-	"github.com/marten-seemann/qpack"
+
+	"github.com/quic-go/qpack"
 )
 
 // MethodGet0RTT allows a GET request to be sent using 0-RTT.
@@ -72,7 +73,9 @@ type client struct {
 	logger utils.Logger
 }
 
-func newClient(hostname string, tlsConf *tls.Config, opts *roundTripperOpts, conf *quic.Config, dialer dialFunc) (*client, error) {
+var _ roundTripCloser = &client{}
+
+func newClient(hostname string, tlsConf *tls.Config, opts *roundTripperOpts, conf *quic.Config, dialer dialFunc) (roundTripCloser, error) {
 	if conf == nil {
 		conf = defaultQuicConfig.Clone()
 	} else if len(conf.Versions) == 0 {
@@ -433,7 +436,7 @@ func (c *client) doRequest(req *http.Request, str quic.Stream, opt RoundTripOpt,
 	// Rules for when to set Content-Length are defined in https://tools.ietf.org/html/rfc7230#section-3.3.2.
 	_, hasTransferEncoding := res.Header["Transfer-Encoding"]
 	isInformational := res.StatusCode >= 100 && res.StatusCode < 200
-	isNoContent := res.StatusCode == 204
+	isNoContent := res.StatusCode == http.StatusNoContent
 	isSuccessfulConnect := req.Method == http.MethodConnect && res.StatusCode >= 200 && res.StatusCode < 300
 	if !hasTransferEncoding && !isInformational && !isNoContent && !isSuccessfulConnect {
 		res.ContentLength = -1
@@ -456,3 +459,15 @@ func (c *client) doRequest(req *http.Request, str quic.Stream, opt RoundTripOpt,
 
 	return res, requestError{}
 }
+
+func (c *client) HandshakeComplete() bool {
+	if c.conn == nil {
+		return false
+	}
+	select {
+	case <-c.conn.HandshakeComplete().Done():
+		return true
+	default:
+		return false
+	}
+}

+ 1 - 3
vendor/github.com/Psiphon-Labs/quic-go/http3/request.go

@@ -1,14 +1,13 @@
 package http3
 
 import (
-	"crypto/tls"
 	"errors"
 	"net/http"
 	"net/url"
 	"strconv"
 	"strings"
 
-	"github.com/marten-seemann/qpack"
+	"github.com/quic-go/qpack"
 )
 
 func requestFromHeaders(headers []qpack.HeaderField) (*http.Request, error) {
@@ -101,7 +100,6 @@ func requestFromHeaders(headers []qpack.HeaderField) (*http.Request, error) {
 		ContentLength: contentLength,
 		Host:          authority,
 		RequestURI:    requestURI,
-		TLS:           &tls.ConnectionState{},
 	}, nil
 }
 

+ 4 - 3
vendor/github.com/Psiphon-Labs/quic-go/http3/request_writer.go

@@ -10,12 +10,13 @@ import (
 	"strings"
 	"sync"
 
-	"github.com/Psiphon-Labs/quic-go"
-	"github.com/Psiphon-Labs/quic-go/internal/utils"
-	"github.com/marten-seemann/qpack"
 	"golang.org/x/net/http/httpguts"
 	"golang.org/x/net/http2/hpack"
 	"golang.org/x/net/idna"
+
+	"github.com/Psiphon-Labs/quic-go"
+	"github.com/Psiphon-Labs/quic-go/internal/utils"
+	"github.com/quic-go/qpack"
 )
 
 const bodyCopyBufferSize = 8 * 1024

+ 5 - 4
vendor/github.com/Psiphon-Labs/quic-go/http3/response_writer.go

@@ -9,7 +9,8 @@ import (
 
 	"github.com/Psiphon-Labs/quic-go"
 	"github.com/Psiphon-Labs/quic-go/internal/utils"
-	"github.com/marten-seemann/qpack"
+
+	"github.com/quic-go/qpack"
 )
 
 type responseWriter struct {
@@ -80,7 +81,7 @@ func (w *responseWriter) WriteHeader(status int) {
 
 func (w *responseWriter) Write(p []byte) (int, error) {
 	if !w.headerWritten {
-		w.WriteHeader(200)
+		w.WriteHeader(http.StatusOK)
 	}
 	if !bodyAllowedForStatus(w.status) {
 		return 0, http.ErrBodyNotAllowed
@@ -111,9 +112,9 @@ func bodyAllowedForStatus(status int) bool {
 	switch {
 	case status >= 100 && status <= 199:
 		return false
-	case status == 204:
+	case status == http.StatusNoContent:
 		return false
-	case status == 304:
+	case status == http.StatusNotModified:
 		return false
 	}
 	return true

+ 48 - 23
vendor/github.com/Psiphon-Labs/quic-go/http3/roundtrip.go

@@ -6,17 +6,19 @@ import (
 	"errors"
 	"fmt"
 	"io"
+	"net"
 	"net/http"
 	"strings"
 	"sync"
 
-	"github.com/Psiphon-Labs/quic-go"
-
 	"golang.org/x/net/http/httpguts"
+
+	"github.com/Psiphon-Labs/quic-go"
 )
 
 type roundTripCloser interface {
 	RoundTripOpt(*http.Request, RoundTripOpt) (*http.Response, error)
+	HandshakeComplete() bool
 	io.Closer
 }
 
@@ -75,7 +77,8 @@ type RoundTripper struct {
 	// Zero means to use a default limit.
 	MaxResponseHeaderBytes int64
 
-	clients map[string]roundTripCloser
+	newClient func(hostname string, tlsConf *tls.Config, opts *roundTripperOpts, conf *quic.Config, dialer dialFunc) (roundTripCloser, error) // so we can mock it in tests
+	clients   map[string]roundTripCloser
 }
 
 // RoundTripOpt are options for the Transport.RoundTripOpt method.
@@ -110,22 +113,20 @@ func (r *RoundTripper) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.
 		closeRequestBody(req)
 		return nil, errors.New("http3: nil Request.Header")
 	}
-
-	if req.URL.Scheme == "https" {
-		for k, vv := range req.Header {
-			if !httpguts.ValidHeaderFieldName(k) {
-				return nil, fmt.Errorf("http3: invalid http header field name %q", k)
-			}
-			for _, v := range vv {
-				if !httpguts.ValidHeaderFieldValue(v) {
-					return nil, fmt.Errorf("http3: invalid http header field value %q for key %v", v, k)
-				}
-			}
-		}
-	} else {
+	if req.URL.Scheme != "https" {
 		closeRequestBody(req)
 		return nil, fmt.Errorf("http3: unsupported protocol scheme: %s", req.URL.Scheme)
 	}
+	for k, vv := range req.Header {
+		if !httpguts.ValidHeaderFieldName(k) {
+			return nil, fmt.Errorf("http3: invalid http header field name %q", k)
+		}
+		for _, v := range vv {
+			if !httpguts.ValidHeaderFieldValue(v) {
+				return nil, fmt.Errorf("http3: invalid http header field value %q for key %v", v, k)
+			}
+		}
+	}
 
 	if req.Method != "" && !validMethod(req.Method) {
 		closeRequestBody(req)
@@ -133,11 +134,20 @@ func (r *RoundTripper) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.
 	}
 
 	hostname := authorityAddr("https", hostnameFromRequest(req))
-	cl, err := r.getClient(hostname, opt.OnlyCachedConn)
+	cl, isReused, err := r.getClient(hostname, opt.OnlyCachedConn)
 	if err != nil {
 		return nil, err
 	}
-	return cl.RoundTripOpt(req, opt)
+	rsp, err := cl.RoundTripOpt(req, opt)
+	if err != nil {
+		r.removeClient(hostname)
+		if isReused {
+			if nerr, ok := err.(net.Error); ok && nerr.Timeout() {
+				return r.RoundTripOpt(req, opt)
+			}
+		}
+	}
+	return rsp, err
 }
 
 // RoundTrip does a round trip.
@@ -145,7 +155,7 @@ func (r *RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
 	return r.RoundTripOpt(req, RoundTripOpt{})
 }
 
-func (r *RoundTripper) getClient(hostname string, onlyCached bool) (roundTripCloser, error) {
+func (r *RoundTripper) getClient(hostname string, onlyCached bool) (rtc roundTripCloser, isReused bool, err error) {
 	r.mutex.Lock()
 	defer r.mutex.Unlock()
 
@@ -156,10 +166,14 @@ func (r *RoundTripper) getClient(hostname string, onlyCached bool) (roundTripClo
 	client, ok := r.clients[hostname]
 	if !ok {
 		if onlyCached {
-			return nil, ErrNoCachedConn
+			return nil, false, ErrNoCachedConn
 		}
 		var err error
-		client, err = newClient(
+		newCl := newClient
+		if r.newClient != nil {
+			newCl = r.newClient
+		}
+		client, err = newCl(
 			hostname,
 			r.TLSClientConfig,
 			&roundTripperOpts{
@@ -173,11 +187,22 @@ func (r *RoundTripper) getClient(hostname string, onlyCached bool) (roundTripClo
 			r.Dial,
 		)
 		if err != nil {
-			return nil, err
+			return nil, false, err
 		}
 		r.clients[hostname] = client
+	} else if client.HandshakeComplete() {
+		isReused = true
+	}
+	return client, isReused, nil
+}
+
+func (r *RoundTripper) removeClient(hostname string) {
+	r.mutex.Lock()
+	defer r.mutex.Unlock()
+	if r.clients == nil {
+		return
 	}
-	return client, nil
+	delete(r.clients, hostname)
 }
 
 // Close closes the QUIC connections that this RoundTripper has used

+ 11 - 24
vendor/github.com/Psiphon-Labs/quic-go/http3/server.go

@@ -18,7 +18,8 @@ import (
 	"github.com/Psiphon-Labs/quic-go/internal/protocol"
 	"github.com/Psiphon-Labs/quic-go/internal/utils"
 	"github.com/Psiphon-Labs/quic-go/quicvarint"
-	"github.com/marten-seemann/qpack"
+
+	"github.com/quic-go/qpack"
 )
 
 // allows mocking of quic.Listen and quic.ListenAddr
@@ -272,7 +273,7 @@ func (s *Server) serveConn(tlsConf *tls.Config, conn net.PacketConn) error {
 	baseConf := ConfigureTLSConfig(tlsConf)
 	quicConf := s.QuicConfig
 	if quicConf == nil {
-		quicConf = &quic.Config{}
+		quicConf = &quic.Config{Allow0RTT: func(net.Addr) bool { return true }}
 	} else {
 		quicConf = s.QuicConfig.Clone()
 	}
@@ -570,6 +571,8 @@ func (s *Server) handleRequest(conn quic.Connection, str quic.Stream, decoder *q
 		return newStreamError(errorGeneralProtocolError, err)
 	}
 
+	connState := conn.ConnectionState().TLS.ConnectionState
+	req.TLS = &connState
 	req.RemoteAddr = conn.RemoteAddr().String()
 	body := newRequestBody(newStream(str, onFrameError))
 	req.Body = body
@@ -614,9 +617,9 @@ func (s *Server) handleRequest(conn quic.Connection, str quic.Stream, decoder *q
 	}
 
 	if panicked {
-		r.WriteHeader(500)
+		r.WriteHeader(http.StatusInternalServerError)
 	} else {
-		r.WriteHeader(200)
+		r.WriteHeader(http.StatusOK)
 	}
 	// If the EOF was read by the handler, CancelRead() is a no-op.
 	str.CancelRead(quic.StreamErrorCode(errorNoError))
@@ -717,19 +720,6 @@ func ListenAndServe(addr, certFile, keyFile string, handler http.Handler) error
 	}
 	defer udpConn.Close()
 
-	tcpAddr, err := net.ResolveTCPAddr("tcp", addr)
-	if err != nil {
-		return err
-	}
-	tcpConn, err := net.ListenTCP("tcp", tcpAddr)
-	if err != nil {
-		return err
-	}
-	defer tcpConn.Close()
-
-	tlsConn := tls.NewListener(tcpConn, config)
-	defer tlsConn.Close()
-
 	if handler == nil {
 		handler = http.DefaultServeMux
 	}
@@ -738,17 +728,14 @@ func ListenAndServe(addr, certFile, keyFile string, handler http.Handler) error
 		TLSConfig: config,
 		Handler:   handler,
 	}
-	httpServer := &http.Server{
-		Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
-			quicServer.SetQuicHeaders(w.Header())
-			handler.ServeHTTP(w, r)
-		}),
-	}
 
 	hErr := make(chan error)
 	qErr := make(chan error)
 	go func() {
-		hErr <- httpServer.Serve(tlsConn)
+		hErr <- http.ListenAndServeTLS(addr, certFile, keyFile, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+			quicServer.SetQuicHeaders(w.Header())
+			handler.ServeHTTP(w, r)
+		}))
 	}()
 	go func() {
 		qErr <- quicServer.Serve(udpConn)

+ 5 - 1
vendor/github.com/Psiphon-Labs/quic-go/interface.go

@@ -177,7 +177,6 @@ type Connection interface {
 	// Context returns a context that is cancelled when the connection is closed.
 	Context() context.Context
 	// ConnectionState returns basic details about the QUIC connection.
-	// It blocks until the handshake completes.
 	// Warning: This API should not be considered stable and might change soon.
 	ConnectionState() ConnectionState
 
@@ -326,6 +325,11 @@ type Config struct {
 	// This can be useful if version information is exchanged out-of-band.
 	// It has no effect for a client.
 	DisableVersionNegotiationPackets bool
+	// Allow0RTT allows the application to decide if a 0-RTT connection attempt should be accepted.
+	// When set, 0-RTT is enabled. When not set, 0-RTT is disabled.
+	// Only valid for the server.
+	// Warning: This API should not be considered stable and might change soon.
+	Allow0RTT func(net.Addr) bool
 	// Enable QUIC datagram support (RFC 9221).
 	EnableDatagrams bool
 	Tracer          logging.Tracer

+ 1 - 1
vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/ack_eliciting.go

@@ -10,7 +10,7 @@ func IsFrameAckEliciting(f wire.Frame) bool {
 }
 
 // HasAckElicitingFrames returns true if at least one frame is ack-eliciting.
-func HasAckElicitingFrames(fs []Frame) bool {
+func HasAckElicitingFrames(fs []*Frame) bool {
 	for _, f := range fs {
 		if IsFrameAckEliciting(f.Frame) {
 			return true

+ 1 - 2
vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/ackhandler.go

@@ -17,8 +17,7 @@ func NewAckHandler(
 	pers protocol.Perspective,
 	tracer logging.ConnectionTracer,
 	logger utils.Logger,
-	version protocol.VersionNumber,
 ) (SentPacketHandler, ReceivedPacketHandler) {
 	sph := newSentPacketHandler(initialPacketNumber, initialMaxDatagramSize, rttStats, clientAddressValidated, pers, tracer, logger)
-	return sph, newReceivedPacketHandler(sph, rttStats, logger, version)
+	return sph, newReceivedPacketHandler(sph, rttStats, logger)
 }

+ 21 - 1
vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/frame.go

@@ -1,9 +1,29 @@
 package ackhandler
 
-import "github.com/Psiphon-Labs/quic-go/internal/wire"
+import (
+	"sync"
+
+	"github.com/Psiphon-Labs/quic-go/internal/wire"
+)
 
 type Frame struct {
 	wire.Frame // nil if the frame has already been acknowledged in another packet
 	OnLost     func(wire.Frame)
 	OnAcked    func(wire.Frame)
 }
+
+var framePool = sync.Pool{New: func() any { return &Frame{} }}
+
+func GetFrame() *Frame {
+	f := framePool.Get().(*Frame)
+	f.OnLost = nil
+	f.OnAcked = nil
+	return f
+}
+
+func putFrame(f *Frame) {
+	f.Frame = nil
+	f.OnLost = nil
+	f.OnAcked = nil
+	framePool.Put(f)
+}

+ 8 - 2
vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/packet.go

@@ -10,7 +10,7 @@ import (
 // A Packet is a packet
 type Packet struct {
 	PacketNumber    protocol.PacketNumber
-	Frames          []Frame
+	Frames          []*Frame
 	LargestAcked    protocol.PacketNumber // InvalidPacketNumber if the packet doesn't contain an ACK
 	Length          protocol.ByteCount
 	EncryptionLevel protocol.EncryptionLevel
@@ -46,4 +46,10 @@ func GetPacket() *Packet {
 
 // We currently only return Packets back into the pool when they're acknowledged (not when they're lost).
 // This simplifies the code, and gives the vast majority of the performance benefit we can gain from using the pool.
-func putPacket(p *Packet) { packetPool.Put(p) }
+func putPacket(p *Packet) {
+	for _, f := range p.Frames {
+		putFrame(f)
+	}
+	p.Frames = nil
+	packetPool.Put(p)
+}

+ 3 - 4
vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/received_packet_handler.go

@@ -25,13 +25,12 @@ func newReceivedPacketHandler(
 	sentPackets sentPacketTracker,
 	rttStats *utils.RTTStats,
 	logger utils.Logger,
-	version protocol.VersionNumber,
 ) ReceivedPacketHandler {
 	return &receivedPacketHandler{
 		sentPackets:      sentPackets,
-		initialPackets:   newReceivedPacketTracker(rttStats, logger, version),
-		handshakePackets: newReceivedPacketTracker(rttStats, logger, version),
-		appDataPackets:   newReceivedPacketTracker(rttStats, logger, version),
+		initialPackets:   newReceivedPacketTracker(rttStats, logger),
+		handshakePackets: newReceivedPacketTracker(rttStats, logger),
+		appDataPackets:   newReceivedPacketTracker(rttStats, logger),
 		lowest1RTTPacket: protocol.InvalidPacketNumber,
 	}
 }

+ 9 - 1
vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/received_packet_history.go

@@ -1,6 +1,8 @@
 package ackhandler
 
 import (
+	"sync"
+
 	"github.com/Psiphon-Labs/quic-go/internal/protocol"
 	list "github.com/Psiphon-Labs/quic-go/internal/utils/linkedlist"
 	"github.com/Psiphon-Labs/quic-go/internal/wire"
@@ -12,6 +14,12 @@ type interval struct {
 	End   protocol.PacketNumber
 }
 
+var intervalElementPool sync.Pool
+
+func init() {
+	intervalElementPool = *list.NewPool[interval]()
+}
+
 // The receivedPacketHistory stores if a packet number has already been received.
 // It generates ACK ranges which can be used to assemble an ACK frame.
 // It does not store packet contents.
@@ -23,7 +31,7 @@ type receivedPacketHistory struct {
 
 func newReceivedPacketHistory() *receivedPacketHistory {
 	return &receivedPacketHistory{
-		ranges: list.New[interval](),
+		ranges: list.NewWithPool[interval](&intervalElementPool),
 	}
 }
 

+ 0 - 4
vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/received_packet_tracker.go

@@ -31,21 +31,17 @@ type receivedPacketTracker struct {
 	lastAck                                 *wire.AckFrame
 
 	logger utils.Logger
-
-	version protocol.VersionNumber
 }
 
 func newReceivedPacketTracker(
 	rttStats *utils.RTTStats,
 	logger utils.Logger,
-	version protocol.VersionNumber,
 ) *receivedPacketTracker {
 	return &receivedPacketTracker{
 		packetHistory: newReceivedPacketHistory(),
 		maxAckDelay:   protocol.MaxAckDelay,
 		rttStats:      rttStats,
 		logger:        logger,
-		version:       version,
 	}
 }
 

+ 11 - 8
vendor/github.com/Psiphon-Labs/quic-go/internal/ackhandler/sent_packet_history.go

@@ -2,6 +2,7 @@ package ackhandler
 
 import (
 	"fmt"
+	"sync"
 	"time"
 
 	"github.com/Psiphon-Labs/quic-go/internal/protocol"
@@ -17,11 +18,17 @@ type sentPacketHistory struct {
 	highestSent           protocol.PacketNumber
 }
 
+var packetElementPool sync.Pool
+
+func init() {
+	packetElementPool = *list.NewPool[*Packet]()
+}
+
 func newSentPacketHistory(rttStats *utils.RTTStats) *sentPacketHistory {
 	return &sentPacketHistory{
 		rttStats:              rttStats,
-		outstandingPacketList: list.New[*Packet](),
-		etcPacketList:         list.New[*Packet](),
+		outstandingPacketList: list.NewWithPool[*Packet](&packetElementPool),
+		etcPacketList:         list.NewWithPool[*Packet](&packetElementPool),
 		packetMap:             make(map[protocol.PacketNumber]*list.Element[*Packet]),
 		highestSent:           protocol.InvalidPacketNumber,
 	}
@@ -108,8 +115,7 @@ func (h *sentPacketHistory) Remove(p protocol.PacketNumber) error {
 	if !ok {
 		return fmt.Errorf("packet %d not found in sent packet history", p)
 	}
-	h.outstandingPacketList.Remove(el)
-	h.etcPacketList.Remove(el)
+	el.List().Remove(el)
 	delete(h.packetMap, p)
 	return nil
 }
@@ -139,10 +145,7 @@ func (h *sentPacketHistory) DeclareLost(p *Packet) *Packet {
 	if !ok {
 		return nil
 	}
-	// try to remove it from both lists, as we don't know which one it currently belongs to.
-	// Remove is a no-op for elements that are not in the list.
-	h.outstandingPacketList.Remove(el)
-	h.etcPacketList.Remove(el)
+	el.List().Remove(el)
 	p.declaredLost = true
 	// move it to the correct position in the etc list (based on the packet number)
 	for el = h.etcPacketList.Back(); el != nil; el = el.Prev() {

+ 33 - 14
vendor/github.com/Psiphon-Labs/quic-go/internal/handshake/crypto_setup.go

@@ -6,6 +6,7 @@ import (
 	"errors"
 	"fmt"
 	"io"
+	"math"
 	"net"
 	"sync"
 	"time"
@@ -116,6 +117,7 @@ type cryptoSetup struct {
 	clientHelloWritten     bool
 	clientHelloWrittenChan chan struct{} // is closed as soon as the ClientHello is written
 	zeroRTTParametersChan  chan<- *wire.TransportParameters
+	allow0RTT              func() bool
 
 	rttStats *utils.RTTStats
 
@@ -216,7 +218,7 @@ func NewCryptoSetupServer(
 	tp *wire.TransportParameters,
 	runner handshakeRunner,
 	tlsConf *tls.Config,
-	enable0RTT bool,
+	allow0RTT func() bool,
 	rttStats *utils.RTTStats,
 	tracer logging.ConnectionTracer,
 	logger utils.Logger,
@@ -229,7 +231,7 @@ func NewCryptoSetupServer(
 		tp,
 		runner,
 		tlsConf,
-		enable0RTT,
+		allow0RTT != nil,
 		rttStats,
 		tracer,
 		logger,
@@ -239,6 +241,7 @@ func NewCryptoSetupServer(
 		// [Psiphon]
 		nil,
 	)
+	cs.allow0RTT = allow0RTT
 	cs.conn = qtls.Server(newConn(localAddr, remoteAddr, version), cs.tlsConf, cs.extraConf)
 	return cs
 }
@@ -292,7 +295,7 @@ func newCryptoSetup(
 	}
 	var maxEarlyData uint32
 	if enable0RTT {
-		maxEarlyData = 0xffffffff
+		maxEarlyData = math.MaxUint32
 	}
 	cs.extraConf = &qtls.ExtraConfig{
 		GetExtensions:              extHandler.GetExtensions,
@@ -515,13 +518,17 @@ func (h *cryptoSetup) accept0RTT(sessionTicketData []byte) bool {
 		return false
 	}
 	valid := h.ourParams.ValidFor0RTT(t.Parameters)
-	if valid {
-		h.logger.Debugf("Accepting 0-RTT. Restoring RTT from session ticket: %s", t.RTT)
-		h.rttStats.SetInitialRTT(t.RTT)
-	} else {
+	if !valid {
 		h.logger.Debugf("Transport parameters changed. Rejecting 0-RTT.")
+		return false
+	}
+	if !h.allow0RTT() {
+		h.logger.Debugf("0-RTT not allowed. Rejecting 0-RTT.")
+		return false
 	}
-	return valid
+	h.logger.Debugf("Accepting 0-RTT. Restoring RTT from session ticket: %s", t.RTT)
+	h.rttStats.SetInitialRTT(t.RTT)
+	return true
 }
 
 // rejected0RTT is called for the client when the server rejects 0-RTT.
@@ -600,7 +607,9 @@ func (h *cryptoSetup) SetReadKey(encLevel qtls.EncryptionLevel, suite *qtls.Ciph
 			newHeaderProtector(suite, trafficSecret, true, h.version),
 		)
 		h.mutex.Unlock()
-		h.logger.Debugf("Installed 0-RTT Read keys (using %s)", tls.CipherSuiteName(suite.ID))
+		if h.logger.Debug() {
+			h.logger.Debugf("Installed 0-RTT Read keys (using %s)", tls.CipherSuiteName(suite.ID))
+		}
 		if h.tracer != nil {
 			h.tracer.UpdatedKeyFromTLS(protocol.Encryption0RTT, h.perspective.Opposite())
 		}
@@ -613,12 +622,16 @@ func (h *cryptoSetup) SetReadKey(encLevel qtls.EncryptionLevel, suite *qtls.Ciph
 			h.dropInitialKeys,
 			h.perspective,
 		)
-		h.logger.Debugf("Installed Handshake Read keys (using %s)", tls.CipherSuiteName(suite.ID))
+		if h.logger.Debug() {
+			h.logger.Debugf("Installed Handshake Read keys (using %s)", tls.CipherSuiteName(suite.ID))
+		}
 	case qtls.EncryptionApplication:
 		h.readEncLevel = protocol.Encryption1RTT
 		h.aead.SetReadKey(suite, trafficSecret)
 		h.has1RTTOpener = true
-		h.logger.Debugf("Installed 1-RTT Read keys (using %s)", tls.CipherSuiteName(suite.ID))
+		if h.logger.Debug() {
+			h.logger.Debugf("Installed 1-RTT Read keys (using %s)", tls.CipherSuiteName(suite.ID))
+		}
 	default:
 		panic("unexpected read encryption level")
 	}
@@ -640,7 +653,9 @@ func (h *cryptoSetup) SetWriteKey(encLevel qtls.EncryptionLevel, suite *qtls.Cip
 			newHeaderProtector(suite, trafficSecret, true, h.version),
 		)
 		h.mutex.Unlock()
-		h.logger.Debugf("Installed 0-RTT Write keys (using %s)", tls.CipherSuiteName(suite.ID))
+		if h.logger.Debug() {
+			h.logger.Debugf("Installed 0-RTT Write keys (using %s)", tls.CipherSuiteName(suite.ID))
+		}
 		if h.tracer != nil {
 			h.tracer.UpdatedKeyFromTLS(protocol.Encryption0RTT, h.perspective)
 		}
@@ -653,12 +668,16 @@ func (h *cryptoSetup) SetWriteKey(encLevel qtls.EncryptionLevel, suite *qtls.Cip
 			h.dropInitialKeys,
 			h.perspective,
 		)
-		h.logger.Debugf("Installed Handshake Write keys (using %s)", tls.CipherSuiteName(suite.ID))
+		if h.logger.Debug() {
+			h.logger.Debugf("Installed Handshake Write keys (using %s)", tls.CipherSuiteName(suite.ID))
+		}
 	case qtls.EncryptionApplication:
 		h.writeEncLevel = protocol.Encryption1RTT
 		h.aead.SetWriteKey(suite, trafficSecret)
 		h.has1RTTSealer = true
-		h.logger.Debugf("Installed 1-RTT Write keys (using %s)", tls.CipherSuiteName(suite.ID))
+		if h.logger.Debug() {
+			h.logger.Debugf("Installed 1-RTT Write keys (using %s)", tls.CipherSuiteName(suite.ID))
+		}
 		if h.zeroRTTSealer != nil {
 			h.zeroRTTSealer = nil
 			h.logger.Debugf("Dropping 0-RTT keys.")

+ 1 - 1
vendor/github.com/Psiphon-Labs/quic-go/internal/handshake/initial_aead.go

@@ -13,7 +13,7 @@ import (
 var (
 	quicSaltOld = []byte{0xaf, 0xbf, 0xec, 0x28, 0x99, 0x93, 0xd2, 0x4c, 0x9e, 0x97, 0x86, 0xf1, 0x9c, 0x61, 0x11, 0xe0, 0x43, 0x90, 0xa8, 0x99}
 	quicSaltV1  = []byte{0x38, 0x76, 0x2c, 0xf7, 0xf5, 0x59, 0x34, 0xb3, 0x4d, 0x17, 0x9a, 0xe6, 0xa4, 0xc8, 0x0c, 0xad, 0xcc, 0xbb, 0x7f, 0x0a}
-	quicSaltV2  = []byte{0xa7, 0x07, 0xc2, 0x03, 0xa5, 0x9b, 0x47, 0x18, 0x4a, 0x1d, 0x62, 0xca, 0x57, 0x04, 0x06, 0xea, 0x7a, 0xe3, 0xe5, 0xd3}
+	quicSaltV2  = []byte{0x0d, 0xed, 0xe3, 0xde, 0xf7, 0x00, 0xa6, 0xdb, 0x81, 0x93, 0x81, 0xbe, 0x6e, 0x26, 0x9d, 0xcb, 0xf9, 0xbd, 0x2e, 0xd9}
 )
 
 const (

+ 22 - 14
vendor/github.com/Psiphon-Labs/quic-go/internal/handshake/retry.go

@@ -11,13 +11,15 @@ import (
 )
 
 var (
-	oldRetryAEAD cipher.AEAD // used for QUIC draft versions up to 34
-	retryAEAD    cipher.AEAD // used for QUIC draft-34
+	retryAEADdraft29 cipher.AEAD // used for QUIC draft versions up to 34
+	retryAEADv1      cipher.AEAD // used for QUIC v1 (RFC 9000)
+	retryAEADv2      cipher.AEAD // used for QUIC v2
 )
 
 func init() {
-	oldRetryAEAD = initAEAD([16]byte{0xcc, 0xce, 0x18, 0x7e, 0xd0, 0x9a, 0x09, 0xd0, 0x57, 0x28, 0x15, 0x5a, 0x6c, 0xb9, 0x6b, 0xe1})
-	retryAEAD = initAEAD([16]byte{0xbe, 0x0c, 0x69, 0x0b, 0x9f, 0x66, 0x57, 0x5a, 0x1d, 0x76, 0x6b, 0x54, 0xe3, 0x68, 0xc8, 0x4e})
+	retryAEADdraft29 = initAEAD([16]byte{0xcc, 0xce, 0x18, 0x7e, 0xd0, 0x9a, 0x09, 0xd0, 0x57, 0x28, 0x15, 0x5a, 0x6c, 0xb9, 0x6b, 0xe1})
+	retryAEADv1 = initAEAD([16]byte{0xbe, 0x0c, 0x69, 0x0b, 0x9f, 0x66, 0x57, 0x5a, 0x1d, 0x76, 0x6b, 0x54, 0xe3, 0x68, 0xc8, 0x4e})
+	retryAEADv2 = initAEAD([16]byte{0x8f, 0xb4, 0xb0, 0x1b, 0x56, 0xac, 0x48, 0xe2, 0x60, 0xfb, 0xcb, 0xce, 0xad, 0x7c, 0xcc, 0x92})
 }
 
 func initAEAD(key [16]byte) cipher.AEAD {
@@ -33,30 +35,36 @@ func initAEAD(key [16]byte) cipher.AEAD {
 }
 
 var (
-	retryBuf      bytes.Buffer
-	retryMutex    sync.Mutex
-	oldRetryNonce = [12]byte{0xe5, 0x49, 0x30, 0xf9, 0x7f, 0x21, 0x36, 0xf0, 0x53, 0x0a, 0x8c, 0x1c}
-	retryNonce    = [12]byte{0x46, 0x15, 0x99, 0xd3, 0x5d, 0x63, 0x2b, 0xf2, 0x23, 0x98, 0x25, 0xbb}
+	retryBuf          bytes.Buffer
+	retryMutex        sync.Mutex
+	retryNonceDraft29 = [12]byte{0xe5, 0x49, 0x30, 0xf9, 0x7f, 0x21, 0x36, 0xf0, 0x53, 0x0a, 0x8c, 0x1c}
+	retryNonceV1      = [12]byte{0x46, 0x15, 0x99, 0xd3, 0x5d, 0x63, 0x2b, 0xf2, 0x23, 0x98, 0x25, 0xbb}
+	retryNonceV2      = [12]byte{0xd8, 0x69, 0x69, 0xbc, 0x2d, 0x7c, 0x6d, 0x99, 0x90, 0xef, 0xb0, 0x4a}
 )
 
 // GetRetryIntegrityTag calculates the integrity tag on a Retry packet
 func GetRetryIntegrityTag(retry []byte, origDestConnID protocol.ConnectionID, version protocol.VersionNumber) *[16]byte {
 	retryMutex.Lock()
+	defer retryMutex.Unlock()
+
 	retryBuf.WriteByte(uint8(origDestConnID.Len()))
 	retryBuf.Write(origDestConnID.Bytes())
 	retryBuf.Write(retry)
+	defer retryBuf.Reset()
 
 	var tag [16]byte
 	var sealed []byte
-	if version != protocol.Version1 {
-		sealed = oldRetryAEAD.Seal(tag[:0], oldRetryNonce[:], nil, retryBuf.Bytes())
-	} else {
-		sealed = retryAEAD.Seal(tag[:0], retryNonce[:], nil, retryBuf.Bytes())
+	//nolint:exhaustive // These are all the versions we support
+	switch version {
+	case protocol.Version1:
+		sealed = retryAEADv1.Seal(tag[:0], retryNonceV1[:], nil, retryBuf.Bytes())
+	case protocol.Version2:
+		sealed = retryAEADv2.Seal(tag[:0], retryNonceV2[:], nil, retryBuf.Bytes())
+	default:
+		sealed = retryAEADdraft29.Seal(tag[:0], retryNonceDraft29[:], nil, retryBuf.Bytes())
 	}
 	if len(sealed) != 16 {
 		panic(fmt.Sprintf("unexpected Retry integrity tag length: %d", len(sealed)))
 	}
-	retryBuf.Reset()
-	retryMutex.Unlock()
 	return &tag
 }

+ 1 - 1
vendor/github.com/Psiphon-Labs/quic-go/internal/protocol/version.go

@@ -23,7 +23,7 @@ const (
 	VersionUnknown  VersionNumber = math.MaxUint32
 	VersionDraft29  VersionNumber = 0xff00001d
 	Version1        VersionNumber = 0x1
-	Version2        VersionNumber = 0x709a50c4
+	Version2        VersionNumber = 0x6b3343cf
 )
 
 // SupportedVersions lists the versions that the server supports

+ 5 - 0
vendor/github.com/Psiphon-Labs/quic-go/internal/qtls/go121.go

@@ -0,0 +1,5 @@
+//go:build go1.21
+
+package qtls
+
+var _ int = "The version of quic-go you're using can't be built on Go 1.21 yet. For more details, please see https://github.com/quic-go/quic-go/wiki/quic-go-and-Go-versions."

+ 1 - 1
vendor/github.com/Psiphon-Labs/quic-go/internal/qtls/go_oldversion.go

@@ -2,4 +2,4 @@
 
 package qtls
 
-var _ int = "The version of quic-go you're using can't be built using outdated Go versions. For more details, please see https://github.com/lucas-clemente/quic-go/wiki/quic-go-and-Go-versions."
+var _ int = "The version of quic-go you're using can't be built using outdated Go versions. For more details, please see https://github.com/quic-go/quic-go/wiki/quic-go-and-Go-versions."

+ 3 - 1
vendor/github.com/Psiphon-Labs/quic-go/internal/utils/linkedlist/README.md

@@ -1,4 +1,6 @@
 # Usage
 
 This is the Go standard library implementation of a linked list 
-(https://golang.org/src/container/list/list.go), modified to use Go generics.
+(https://golang.org/src/container/list/list.go), with the following modifications:
+* it uses Go generics
+* it allows passing in a `sync.Pool` (via the `NewWithPool` constructor) to reduce allocations of `Element` structs

+ 31 - 2
vendor/github.com/Psiphon-Labs/quic-go/internal/utils/linkedlist/linkedlist.go

@@ -11,6 +11,12 @@
 //	}
 package list
 
+import "sync"
+
+func NewPool[T any]() *sync.Pool {
+	return &sync.Pool{New: func() any { return &Element[T]{} }}
+}
+
 // Element is an element of a linked list.
 type Element[T any] struct {
 	// Next and previous pointers in the doubly-linked list of elements.
@@ -43,11 +49,17 @@ func (e *Element[T]) Prev() *Element[T] {
 	return nil
 }
 
+func (e *Element[T]) List() *List[T] {
+	return e.list
+}
+
 // List represents a doubly linked list.
 // The zero value for List is an empty list ready to use.
 type List[T any] struct {
 	root Element[T] // sentinel list element, only &root, root.prev, and root.next are used
 	len  int        // current list length excluding (this) sentinel element
+
+	pool *sync.Pool
 }
 
 // Init initializes or clears list l.
@@ -61,6 +73,12 @@ func (l *List[T]) Init() *List[T] {
 // New returns an initialized list.
 func New[T any]() *List[T] { return new(List[T]).Init() }
 
+// NewWithPool returns an initialized list, using a sync.Pool for list elements.
+func NewWithPool[T any](pool *sync.Pool) *List[T] {
+	l := &List[T]{pool: pool}
+	return l.Init()
+}
+
 // Len returns the number of elements of list l.
 // The complexity is O(1).
 func (l *List[T]) Len() int { return l.len }
@@ -101,7 +119,14 @@ func (l *List[T]) insert(e, at *Element[T]) *Element[T] {
 
 // insertValue is a convenience wrapper for insert(&Element{Value: v}, at).
 func (l *List[T]) insertValue(v T, at *Element[T]) *Element[T] {
-	return l.insert(&Element[T]{Value: v}, at)
+	var e *Element[T]
+	if l.pool != nil {
+		e = l.pool.Get().(*Element[T])
+	} else {
+		e = &Element[T]{}
+	}
+	e.Value = v
+	return l.insert(e, at)
 }
 
 // remove removes e from its list, decrements l.len
@@ -111,6 +136,9 @@ func (l *List[T]) remove(e *Element[T]) {
 	e.next = nil // avoid memory leaks
 	e.prev = nil // avoid memory leaks
 	e.list = nil
+	if l.pool != nil {
+		l.pool.Put(e)
+	}
 	l.len--
 }
 
@@ -132,12 +160,13 @@ func (l *List[T]) move(e, at *Element[T]) {
 // It returns the element value e.Value.
 // The element must not be nil.
 func (l *List[T]) Remove(e *Element[T]) T {
+	v := e.Value
 	if e.list == l {
 		// if e.list == l, l must have been initialized when e was inserted
 		// in l or l == nil (e is a zero Element) and l.remove will crash
 		l.remove(e)
 	}
-	return e.Value
+	return v
 }
 
 // PushFront inserts a new element e with value v at the front of list l and returns e.

+ 1 - 1
vendor/github.com/Psiphon-Labs/quic-go/internal/utils/log.go

@@ -125,7 +125,7 @@ func readLoggingEnv() LogLevel {
 	case "error":
 		return LogLevelError
 	default:
-		fmt.Fprintln(os.Stderr, "invalid quic-go log level, see https://github.com/Psiphon-Labs/quic-go/wiki/Logging")
+		fmt.Fprintln(os.Stderr, "invalid quic-go log level, see https://github.com/quic-go/quic-go/wiki/Logging")
 		return LogLevelNothing
 	}
 }

+ 57 - 96
vendor/github.com/Psiphon-Labs/quic-go/internal/wire/extended_header.go

@@ -2,6 +2,7 @@ package wire
 
 import (
 	"bytes"
+	"encoding/binary"
 	"errors"
 	"fmt"
 	"io"
@@ -42,12 +43,7 @@ func (h *ExtendedHeader) parse(b *bytes.Reader, v protocol.VersionNumber) (bool
 	if _, err := b.Seek(int64(h.Header.ParsedLen())-1, io.SeekCurrent); err != nil {
 		return false, err
 	}
-	var reservedBitsValid bool
-	if h.IsLongHeader {
-		reservedBitsValid, err = h.parseLongHeader(b, v)
-	} else {
-		reservedBitsValid, err = h.parseShortHeader(b, v)
-	}
+	reservedBitsValid, err := h.parseLongHeader(b, v)
 	if err != nil {
 		return false, err
 	}
@@ -65,21 +61,6 @@ func (h *ExtendedHeader) parseLongHeader(b *bytes.Reader, _ protocol.VersionNumb
 	return true, nil
 }
 
-func (h *ExtendedHeader) parseShortHeader(b *bytes.Reader, _ protocol.VersionNumber) (bool /* reserved bits valid */, error) {
-	h.KeyPhase = protocol.KeyPhaseZero
-	if h.typeByte&0x4 > 0 {
-		h.KeyPhase = protocol.KeyPhaseOne
-	}
-
-	if err := h.readPacketNumber(b); err != nil {
-		return false, err
-	}
-	if h.typeByte&0x18 != 0 {
-		return false, nil
-	}
-	return true, nil
-}
-
 func (h *ExtendedHeader) readPacketNumber(b *bytes.Reader) error {
 	h.PacketNumberLen = protocol.PacketNumberLen(h.typeByte&0x3) + 1
 	switch h.PacketNumberLen {
@@ -113,23 +94,17 @@ func (h *ExtendedHeader) readPacketNumber(b *bytes.Reader) error {
 	return nil
 }
 
-// Write writes the Header.
-func (h *ExtendedHeader) Write(b *bytes.Buffer, ver protocol.VersionNumber) error {
+// Append appends the Header.
+func (h *ExtendedHeader) Append(b []byte, v protocol.VersionNumber) ([]byte, error) {
 	if h.DestConnectionID.Len() > protocol.MaxConnIDLen {
-		return fmt.Errorf("invalid connection ID length: %d bytes", h.DestConnectionID.Len())
+		return nil, fmt.Errorf("invalid connection ID length: %d bytes", h.DestConnectionID.Len())
 	}
 	if h.SrcConnectionID.Len() > protocol.MaxConnIDLen {
-		return fmt.Errorf("invalid connection ID length: %d bytes", h.SrcConnectionID.Len())
-	}
-	if h.IsLongHeader {
-		return h.writeLongHeader(b, ver)
+		return nil, fmt.Errorf("invalid connection ID length: %d bytes", h.SrcConnectionID.Len())
 	}
-	return h.writeShortHeader(b, ver)
-}
 
-func (h *ExtendedHeader) writeLongHeader(b *bytes.Buffer, version protocol.VersionNumber) error {
 	var packetType uint8
-	if version == protocol.Version2 {
+	if v == protocol.Version2 {
 		//nolint:exhaustive
 		switch h.Type {
 		case protocol.PacketTypeInitial:
@@ -160,51 +135,25 @@ func (h *ExtendedHeader) writeLongHeader(b *bytes.Buffer, version protocol.Versi
 		firstByte |= uint8(h.PacketNumberLen - 1)
 	}
 
-	b.WriteByte(firstByte)
-	utils.BigEndian.WriteUint32(b, uint32(h.Version))
-	b.WriteByte(uint8(h.DestConnectionID.Len()))
-	b.Write(h.DestConnectionID.Bytes())
-	b.WriteByte(uint8(h.SrcConnectionID.Len()))
-	b.Write(h.SrcConnectionID.Bytes())
+	b = append(b, firstByte)
+	b = append(b, make([]byte, 4)...)
+	binary.BigEndian.PutUint32(b[len(b)-4:], uint32(h.Version))
+	b = append(b, uint8(h.DestConnectionID.Len()))
+	b = append(b, h.DestConnectionID.Bytes()...)
+	b = append(b, uint8(h.SrcConnectionID.Len()))
+	b = append(b, h.SrcConnectionID.Bytes()...)
 
 	//nolint:exhaustive
 	switch h.Type {
 	case protocol.PacketTypeRetry:
-		b.Write(h.Token)
-		return nil
+		b = append(b, h.Token...)
+		return b, nil
 	case protocol.PacketTypeInitial:
-		quicvarint.Write(b, uint64(len(h.Token)))
-		b.Write(h.Token)
-	}
-	quicvarint.WriteWithLen(b, uint64(h.Length), 2)
-	return h.writePacketNumber(b)
-}
-
-func (h *ExtendedHeader) writeShortHeader(b *bytes.Buffer, _ protocol.VersionNumber) error {
-	typeByte := 0x40 | uint8(h.PacketNumberLen-1)
-	if h.KeyPhase == protocol.KeyPhaseOne {
-		typeByte |= byte(1 << 2)
+		b = quicvarint.Append(b, uint64(len(h.Token)))
+		b = append(b, h.Token...)
 	}
-
-	b.WriteByte(typeByte)
-	b.Write(h.DestConnectionID.Bytes())
-	return h.writePacketNumber(b)
-}
-
-func (h *ExtendedHeader) writePacketNumber(b *bytes.Buffer) error {
-	switch h.PacketNumberLen {
-	case protocol.PacketNumberLen1:
-		b.WriteByte(uint8(h.PacketNumber))
-	case protocol.PacketNumberLen2:
-		utils.BigEndian.WriteUint16(b, uint16(h.PacketNumber))
-	case protocol.PacketNumberLen3:
-		utils.BigEndian.WriteUint24(b, uint32(h.PacketNumber))
-	case protocol.PacketNumberLen4:
-		utils.BigEndian.WriteUint32(b, uint32(h.PacketNumber))
-	default:
-		return fmt.Errorf("invalid packet number length: %d", h.PacketNumberLen)
-	}
-	return nil
+	b = quicvarint.AppendWithLen(b, uint64(h.Length), 2)
+	return appendPacketNumber(b, h.PacketNumber, h.PacketNumberLen)
 }
 
 // ParsedLen returns the number of bytes that were consumed when parsing the header
@@ -213,37 +162,49 @@ func (h *ExtendedHeader) ParsedLen() protocol.ByteCount {
 }
 
 // GetLength determines the length of the Header.
-func (h *ExtendedHeader) GetLength(v protocol.VersionNumber) protocol.ByteCount {
-	if h.IsLongHeader {
-		length := 1 /* type byte */ + 4 /* version */ + 1 /* dest conn ID len */ + protocol.ByteCount(h.DestConnectionID.Len()) + 1 /* src conn ID len */ + protocol.ByteCount(h.SrcConnectionID.Len()) + protocol.ByteCount(h.PacketNumberLen) + 2 /* length */
-		if h.Type == protocol.PacketTypeInitial {
-			length += quicvarint.Len(uint64(len(h.Token))) + protocol.ByteCount(len(h.Token))
-		}
-		return length
+func (h *ExtendedHeader) GetLength(_ protocol.VersionNumber) protocol.ByteCount {
+	length := 1 /* type byte */ + 4 /* version */ + 1 /* dest conn ID len */ + protocol.ByteCount(h.DestConnectionID.Len()) + 1 /* src conn ID len */ + protocol.ByteCount(h.SrcConnectionID.Len()) + protocol.ByteCount(h.PacketNumberLen) + 2 /* length */
+	if h.Type == protocol.PacketTypeInitial {
+		length += quicvarint.Len(uint64(len(h.Token))) + protocol.ByteCount(len(h.Token))
 	}
-
-	length := protocol.ByteCount(1 /* type byte */ + h.DestConnectionID.Len())
-	length += protocol.ByteCount(h.PacketNumberLen)
 	return length
 }
 
 // Log logs the Header
 func (h *ExtendedHeader) Log(logger utils.Logger) {
-	if h.IsLongHeader {
-		var token string
-		if h.Type == protocol.PacketTypeInitial || h.Type == protocol.PacketTypeRetry {
-			if len(h.Token) == 0 {
-				token = "Token: (empty), "
-			} else {
-				token = fmt.Sprintf("Token: %#x, ", h.Token)
-			}
-			if h.Type == protocol.PacketTypeRetry {
-				logger.Debugf("\tLong Header{Type: %s, DestConnectionID: %s, SrcConnectionID: %s, %sVersion: %s}", h.Type, h.DestConnectionID, h.SrcConnectionID, token, h.Version)
-				return
-			}
+	var token string
+	if h.Type == protocol.PacketTypeInitial || h.Type == protocol.PacketTypeRetry {
+		if len(h.Token) == 0 {
+			token = "Token: (empty), "
+		} else {
+			token = fmt.Sprintf("Token: %#x, ", h.Token)
 		}
-		logger.Debugf("\tLong Header{Type: %s, DestConnectionID: %s, SrcConnectionID: %s, %sPacketNumber: %d, PacketNumberLen: %d, Length: %d, Version: %s}", h.Type, h.DestConnectionID, h.SrcConnectionID, token, h.PacketNumber, h.PacketNumberLen, h.Length, h.Version)
-	} else {
-		logger.Debugf("\tShort Header{DestConnectionID: %s, PacketNumber: %d, PacketNumberLen: %d, KeyPhase: %s}", h.DestConnectionID, h.PacketNumber, h.PacketNumberLen, h.KeyPhase)
+		if h.Type == protocol.PacketTypeRetry {
+			logger.Debugf("\tLong Header{Type: %s, DestConnectionID: %s, SrcConnectionID: %s, %sVersion: %s}", h.Type, h.DestConnectionID, h.SrcConnectionID, token, h.Version)
+			return
+		}
+	}
+	logger.Debugf("\tLong Header{Type: %s, DestConnectionID: %s, SrcConnectionID: %s, %sPacketNumber: %d, PacketNumberLen: %d, Length: %d, Version: %s}", h.Type, h.DestConnectionID, h.SrcConnectionID, token, h.PacketNumber, h.PacketNumberLen, h.Length, h.Version)
+}
+
+func appendPacketNumber(b []byte, pn protocol.PacketNumber, pnLen protocol.PacketNumberLen) ([]byte, error) {
+	switch pnLen {
+	case protocol.PacketNumberLen1:
+		b = append(b, uint8(pn))
+	case protocol.PacketNumberLen2:
+		buf := make([]byte, 2)
+		binary.BigEndian.PutUint16(buf, uint16(pn))
+		b = append(b, buf...)
+	case protocol.PacketNumberLen3:
+		buf := make([]byte, 4)
+		binary.BigEndian.PutUint32(buf, uint32(pn))
+		b = append(b, buf[1:]...)
+	case protocol.PacketNumberLen4:
+		buf := make([]byte, 4)
+		binary.BigEndian.PutUint32(buf, uint32(pn))
+		b = append(b, buf...)
+	default:
+		return nil, fmt.Errorf("invalid packet number length: %d", pnLen)
 	}
+	return b, nil
 }

+ 28 - 29
vendor/github.com/Psiphon-Labs/quic-go/internal/wire/frame_parser.go

@@ -16,31 +16,30 @@ type frameParser struct {
 	ackDelayExponent uint8
 
 	supportsDatagrams bool
-
-	version protocol.VersionNumber
 }
 
+var _ FrameParser = &frameParser{}
+
 // NewFrameParser creates a new frame parser.
-func NewFrameParser(supportsDatagrams bool, v protocol.VersionNumber) FrameParser {
+func NewFrameParser(supportsDatagrams bool) *frameParser {
 	return &frameParser{
 		r:                 *bytes.NewReader(nil),
 		supportsDatagrams: supportsDatagrams,
-		version:           v,
 	}
 }
 
 // ParseNext parses the next frame.
 // It skips PADDING frames.
-func (p *frameParser) ParseNext(data []byte, encLevel protocol.EncryptionLevel) (int, Frame, error) {
+func (p *frameParser) ParseNext(data []byte, encLevel protocol.EncryptionLevel, v protocol.VersionNumber) (int, Frame, error) {
 	startLen := len(data)
 	p.r.Reset(data)
-	frame, err := p.parseNext(&p.r, encLevel)
+	frame, err := p.parseNext(&p.r, encLevel, v)
 	n := startLen - p.r.Len()
 	p.r.Reset(nil)
 	return n, frame, err
 }
 
-func (p *frameParser) parseNext(r *bytes.Reader, encLevel protocol.EncryptionLevel) (Frame, error) {
+func (p *frameParser) parseNext(r *bytes.Reader, encLevel protocol.EncryptionLevel, v protocol.VersionNumber) (Frame, error) {
 	for r.Len() != 0 {
 		typeByte, _ := p.r.ReadByte()
 		if typeByte == 0x0 { // PADDING frame
@@ -48,7 +47,7 @@ func (p *frameParser) parseNext(r *bytes.Reader, encLevel protocol.EncryptionLev
 		}
 		r.UnreadByte()
 
-		f, err := p.parseFrame(r, typeByte, encLevel)
+		f, err := p.parseFrame(r, typeByte, encLevel, v)
 		if err != nil {
 			return nil, &qerr.TransportError{
 				FrameType:    uint64(typeByte),
@@ -61,56 +60,56 @@ func (p *frameParser) parseNext(r *bytes.Reader, encLevel protocol.EncryptionLev
 	return nil, nil
 }
 
-func (p *frameParser) parseFrame(r *bytes.Reader, typeByte byte, encLevel protocol.EncryptionLevel) (Frame, error) {
+func (p *frameParser) parseFrame(r *bytes.Reader, typeByte byte, encLevel protocol.EncryptionLevel, v protocol.VersionNumber) (Frame, error) {
 	var frame Frame
 	var err error
 	if typeByte&0xf8 == 0x8 {
-		frame, err = parseStreamFrame(r, p.version)
+		frame, err = parseStreamFrame(r, v)
 	} else {
 		switch typeByte {
 		case 0x1:
-			frame, err = parsePingFrame(r, p.version)
+			frame, err = parsePingFrame(r, v)
 		case 0x2, 0x3:
 			ackDelayExponent := p.ackDelayExponent
 			if encLevel != protocol.Encryption1RTT {
 				ackDelayExponent = protocol.DefaultAckDelayExponent
 			}
-			frame, err = parseAckFrame(r, ackDelayExponent, p.version)
+			frame, err = parseAckFrame(r, ackDelayExponent, v)
 		case 0x4:
-			frame, err = parseResetStreamFrame(r, p.version)
+			frame, err = parseResetStreamFrame(r, v)
 		case 0x5:
-			frame, err = parseStopSendingFrame(r, p.version)
+			frame, err = parseStopSendingFrame(r, v)
 		case 0x6:
-			frame, err = parseCryptoFrame(r, p.version)
+			frame, err = parseCryptoFrame(r, v)
 		case 0x7:
-			frame, err = parseNewTokenFrame(r, p.version)
+			frame, err = parseNewTokenFrame(r, v)
 		case 0x10:
-			frame, err = parseMaxDataFrame(r, p.version)
+			frame, err = parseMaxDataFrame(r, v)
 		case 0x11:
-			frame, err = parseMaxStreamDataFrame(r, p.version)
+			frame, err = parseMaxStreamDataFrame(r, v)
 		case 0x12, 0x13:
-			frame, err = parseMaxStreamsFrame(r, p.version)
+			frame, err = parseMaxStreamsFrame(r, v)
 		case 0x14:
-			frame, err = parseDataBlockedFrame(r, p.version)
+			frame, err = parseDataBlockedFrame(r, v)
 		case 0x15:
-			frame, err = parseStreamDataBlockedFrame(r, p.version)
+			frame, err = parseStreamDataBlockedFrame(r, v)
 		case 0x16, 0x17:
-			frame, err = parseStreamsBlockedFrame(r, p.version)
+			frame, err = parseStreamsBlockedFrame(r, v)
 		case 0x18:
-			frame, err = parseNewConnectionIDFrame(r, p.version)
+			frame, err = parseNewConnectionIDFrame(r, v)
 		case 0x19:
-			frame, err = parseRetireConnectionIDFrame(r, p.version)
+			frame, err = parseRetireConnectionIDFrame(r, v)
 		case 0x1a:
-			frame, err = parsePathChallengeFrame(r, p.version)
+			frame, err = parsePathChallengeFrame(r, v)
 		case 0x1b:
-			frame, err = parsePathResponseFrame(r, p.version)
+			frame, err = parsePathResponseFrame(r, v)
 		case 0x1c, 0x1d:
-			frame, err = parseConnectionCloseFrame(r, p.version)
+			frame, err = parseConnectionCloseFrame(r, v)
 		case 0x1e:
-			frame, err = parseHandshakeDoneFrame(r, p.version)
+			frame, err = parseHandshakeDoneFrame(r, v)
 		case 0x30, 0x31:
 			if p.supportsDatagrams {
-				frame, err = parseDatagramFrame(r, p.version)
+				frame, err = parseDatagramFrame(r, v)
 				break
 			}
 			fallthrough

+ 17 - 49
vendor/github.com/Psiphon-Labs/quic-go/internal/wire/header.go

@@ -121,9 +121,8 @@ var ErrUnsupportedVersion = errors.New("unsupported version")
 
 // The Header is the version independent part of the header
 type Header struct {
-	IsLongHeader bool
-	typeByte     byte
-	Type         protocol.PacketType
+	typeByte byte
+	Type     protocol.PacketType
 
 	Version          protocol.VersionNumber
 	SrcConnectionID  protocol.ConnectionID
@@ -140,24 +139,22 @@ type Header struct {
 // If the packet has a long header, the packet is cut according to the length field.
 // If we understand the version, the packet is header up unto the packet number.
 // Otherwise, only the invariant part of the header is parsed.
-func ParsePacket(data []byte, shortHeaderConnIDLen int) (*Header, []byte /* packet data */, []byte /* rest */, error) {
-	hdr, err := parseHeader(bytes.NewReader(data), shortHeaderConnIDLen)
+func ParsePacket(data []byte) (*Header, []byte, []byte, error) {
+	if len(data) == 0 || !IsLongHeaderPacket(data[0]) {
+		return nil, nil, nil, errors.New("not a long header packet")
+	}
+	hdr, err := parseHeader(bytes.NewReader(data))
 	if err != nil {
 		if err == ErrUnsupportedVersion {
 			return hdr, nil, nil, ErrUnsupportedVersion
 		}
 		return nil, nil, nil, err
 	}
-	var rest []byte
-	if hdr.IsLongHeader {
-		if protocol.ByteCount(len(data)) < hdr.ParsedLen()+hdr.Length {
-			return nil, nil, nil, fmt.Errorf("packet length (%d bytes) is smaller than the expected length (%d bytes)", len(data)-int(hdr.ParsedLen()), hdr.Length)
-		}
-		packetLen := int(hdr.ParsedLen() + hdr.Length)
-		rest = data[packetLen:]
-		data = data[:packetLen]
+	if protocol.ByteCount(len(data)) < hdr.ParsedLen()+hdr.Length {
+		return nil, nil, nil, fmt.Errorf("packet length (%d bytes) is smaller than the expected length (%d bytes)", len(data)-int(hdr.ParsedLen()), hdr.Length)
 	}
-	return hdr, data, rest, nil
+	packetLen := int(hdr.ParsedLen() + hdr.Length)
+	return hdr, data[:packetLen], data[packetLen:], nil
 }
 
 // ParseHeader parses the header.
@@ -165,43 +162,17 @@ func ParsePacket(data []byte, shortHeaderConnIDLen int) (*Header, []byte /* pack
 // For long header packets:
 // * if we understand the version: up to the packet number
 // * if not, only the invariant part of the header
-func parseHeader(b *bytes.Reader, shortHeaderConnIDLen int) (*Header, error) {
+func parseHeader(b *bytes.Reader) (*Header, error) {
 	startLen := b.Len()
-	h, err := parseHeaderImpl(b, shortHeaderConnIDLen)
-	if err != nil {
-		return h, err
-	}
-	h.parsedLen = protocol.ByteCount(startLen - b.Len())
-	return h, err
-}
-
-func parseHeaderImpl(b *bytes.Reader, shortHeaderConnIDLen int) (*Header, error) {
 	typeByte, err := b.ReadByte()
 	if err != nil {
 		return nil, err
 	}
 
-	h := &Header{
-		typeByte:     typeByte,
-		IsLongHeader: IsLongHeaderPacket(typeByte),
-	}
-
-	if !h.IsLongHeader {
-		if h.typeByte&0x40 == 0 {
-			return nil, errors.New("not a QUIC packet")
-		}
-		if err := h.parseShortHeader(b, shortHeaderConnIDLen); err != nil {
-			return nil, err
-		}
-		return h, nil
-	}
-	return h, h.parseLongHeader(b)
-}
-
-func (h *Header) parseShortHeader(b *bytes.Reader, shortHeaderConnIDLen int) error {
-	var err error
-	h.DestConnectionID, err = protocol.ReadConnectionID(b, shortHeaderConnIDLen)
-	return err
+	h := &Header{typeByte: typeByte}
+	err = h.parseLongHeader(b)
+	h.parsedLen = protocol.ByteCount(startLen - b.Len())
+	return h, err
 }
 
 func (h *Header) parseLongHeader(b *bytes.Reader) error {
@@ -321,8 +292,5 @@ func (h *Header) toExtendedHeader() *ExtendedHeader {
 
 // PacketType is the type of the packet, for logging purposes
 func (h *Header) PacketType() string {
-	if h.IsLongHeader {
-		return h.Type.String()
-	}
-	return "1-RTT"
+	return h.Type.String()
 }

+ 1 - 1
vendor/github.com/Psiphon-Labs/quic-go/internal/wire/interface.go

@@ -12,6 +12,6 @@ type Frame interface {
 
 // A FrameParser parses QUIC frames, one by one.
 type FrameParser interface {
-	ParseNext([]byte, protocol.EncryptionLevel) (int, Frame, error)
+	ParseNext([]byte, protocol.EncryptionLevel, protocol.VersionNumber) (int, Frame, error)
 	SetAckDelayExponent(uint8)
 }

+ 18 - 0
vendor/github.com/Psiphon-Labs/quic-go/internal/wire/short_header.go

@@ -9,6 +9,9 @@ import (
 	"github.com/Psiphon-Labs/quic-go/internal/utils"
 )
 
+// ParseShortHeader parses a short header packet.
+// It must be called after header protection was removed.
+// Otherwise, the check for the reserved bits will (most likely) fail.
 func ParseShortHeader(data []byte, connIDLen int) (length int, _ protocol.PacketNumber, _ protocol.PacketNumberLen, _ protocol.KeyPhaseBit, _ error) {
 	if len(data) == 0 {
 		return 0, 0, 0, 0, io.EOF
@@ -50,6 +53,21 @@ func ParseShortHeader(data []byte, connIDLen int) (length int, _ protocol.Packet
 	return 1 + connIDLen + int(pnLen), pn, pnLen, kp, err
 }
 
+// AppendShortHeader writes a short header.
+func AppendShortHeader(b []byte, connID protocol.ConnectionID, pn protocol.PacketNumber, pnLen protocol.PacketNumberLen, kp protocol.KeyPhaseBit) ([]byte, error) {
+	typeByte := 0x40 | uint8(pnLen-1)
+	if kp == protocol.KeyPhaseOne {
+		typeByte |= byte(1 << 2)
+	}
+	b = append(b, typeByte)
+	b = append(b, connID.Bytes()...)
+	return appendPacketNumber(b, pn, pnLen)
+}
+
+func ShortHeaderLen(dest protocol.ConnectionID, pnLen protocol.PacketNumberLen) protocol.ByteCount {
+	return 1 + protocol.ByteCount(dest.Len()) + protocol.ByteCount(pnLen)
+}
+
 func LogShortHeader(logger utils.Logger, dest protocol.ConnectionID, pn protocol.PacketNumber, pnLen protocol.PacketNumberLen, kp protocol.KeyPhaseBit) {
 	logger.Debugf("\tShort Header{DestConnectionID: %s, PacketNumber: %d, PacketNumberLen: %d, KeyPhase: %s}", dest, pn, pnLen, kp)
 }

+ 3 - 0
vendor/github.com/Psiphon-Labs/quic-go/internal/wire/transport_parameters.go

@@ -304,6 +304,9 @@ func (p *TransportParameters) readNumericTransportParameter(
 		}
 		p.MaxAckDelay = time.Duration(val) * time.Millisecond
 	case activeConnectionIDLimitParameterID:
+		if val < 2 {
+			return fmt.Errorf("invalid value for active_connection_id_limit: %d (minimum 2)", val)
+		}
 		p.ActiveConnectionIDLimit = val
 	case maxDatagramFrameSizeParameterID:
 		p.MaxDatagramFrameSize = protocol.ByteCount(val)

+ 3 - 3
vendor/github.com/Psiphon-Labs/quic-go/logging/interface.go

@@ -7,10 +7,9 @@ import (
 	"net"
 	"time"
 
-	"github.com/Psiphon-Labs/quic-go/internal/utils"
-
 	"github.com/Psiphon-Labs/quic-go/internal/protocol"
 	"github.com/Psiphon-Labs/quic-go/internal/qerr"
+	"github.com/Psiphon-Labs/quic-go/internal/utils"
 	"github.com/Psiphon-Labs/quic-go/internal/wire"
 )
 
@@ -121,7 +120,8 @@ type ConnectionTracer interface {
 	SentTransportParameters(*TransportParameters)
 	ReceivedTransportParameters(*TransportParameters)
 	RestoredTransportParameters(parameters *TransportParameters) // for 0-RTT
-	SentPacket(hdr *ExtendedHeader, size ByteCount, ack *AckFrame, frames []Frame)
+	SentLongHeaderPacket(hdr *ExtendedHeader, size ByteCount, ack *AckFrame, frames []Frame)
+	SentShortHeaderPacket(hdr *ShortHeader, size ByteCount, ack *AckFrame, frames []Frame)
 	ReceivedVersionNegotiationPacket(dest, src ArbitraryLenConnectionID, _ []VersionNumber)
 	ReceivedRetry(*Header)
 	ReceivedLongHeaderPacket(hdr *ExtendedHeader, size ByteCount, frames []Frame)

+ 8 - 2
vendor/github.com/Psiphon-Labs/quic-go/logging/multiplex.go

@@ -104,9 +104,15 @@ func (m *connTracerMultiplexer) RestoredTransportParameters(tp *TransportParamet
 	}
 }
 
-func (m *connTracerMultiplexer) SentPacket(hdr *ExtendedHeader, size ByteCount, ack *AckFrame, frames []Frame) {
+func (m *connTracerMultiplexer) SentLongHeaderPacket(hdr *ExtendedHeader, size ByteCount, ack *AckFrame, frames []Frame) {
 	for _, t := range m.tracers {
-		t.SentPacket(hdr, size, ack, frames)
+		t.SentLongHeaderPacket(hdr, size, ack, frames)
+	}
+}
+
+func (m *connTracerMultiplexer) SentShortHeaderPacket(hdr *ShortHeader, size ByteCount, ack *AckFrame, frames []Frame) {
+	for _, t := range m.tracers {
+		t.SentShortHeaderPacket(hdr, size, ack, frames)
 	}
 }
 

+ 6 - 5
vendor/github.com/Psiphon-Labs/quic-go/logging/null_tracer.go

@@ -31,11 +31,12 @@ func (n NullConnectionTracer) StartedConnection(local, remote net.Addr, srcConnI
 
 func (n NullConnectionTracer) NegotiatedVersion(chosen VersionNumber, clientVersions, serverVersions []VersionNumber) {
 }
-func (n NullConnectionTracer) ClosedConnection(err error)                                {}
-func (n NullConnectionTracer) SentTransportParameters(*TransportParameters)              {}
-func (n NullConnectionTracer) ReceivedTransportParameters(*TransportParameters)          {}
-func (n NullConnectionTracer) RestoredTransportParameters(*TransportParameters)          {}
-func (n NullConnectionTracer) SentPacket(*ExtendedHeader, ByteCount, *AckFrame, []Frame) {}
+func (n NullConnectionTracer) ClosedConnection(err error)                                          {}
+func (n NullConnectionTracer) SentTransportParameters(*TransportParameters)                        {}
+func (n NullConnectionTracer) ReceivedTransportParameters(*TransportParameters)                    {}
+func (n NullConnectionTracer) RestoredTransportParameters(*TransportParameters)                    {}
+func (n NullConnectionTracer) SentLongHeaderPacket(*ExtendedHeader, ByteCount, *AckFrame, []Frame) {}
+func (n NullConnectionTracer) SentShortHeaderPacket(*ShortHeader, ByteCount, *AckFrame, []Frame)   {}
 func (n NullConnectionTracer) ReceivedVersionNegotiationPacket(dest, src ArbitraryLenConnectionID, _ []VersionNumber) {
 }
 func (n NullConnectionTracer) ReceivedRetry(*Header)                                        {}

+ 0 - 3
vendor/github.com/Psiphon-Labs/quic-go/logging/packet_header.go

@@ -6,9 +6,6 @@ import (
 
 // PacketTypeFromHeader determines the packet type from a *wire.Header.
 func PacketTypeFromHeader(hdr *Header) PacketType {
-	if !hdr.IsLongHeader {
-		return PacketType1RTT
-	}
 	if hdr.Version == 0 {
 		return PacketTypeVersionNegotiation
 	}

+ 2 - 2
vendor/github.com/Psiphon-Labs/quic-go/packet_handler_map.go

@@ -117,7 +117,7 @@ func newPacketHandlerMap(
 
 				// [Psiphon]
 				// Do not emit alert to stderr (was log.Printf).
-				logger.Errorf("%s. See https://github.com/lucas-clemente/quic-go/wiki/UDP-Receive-Buffer-Size for details.", err)
+				logger.Errorf("%s. See https://github.com/quic-go/quic-go/wiki/UDP-Receive-Buffer-Size for details.", err)
 			})
 		}
 	}
@@ -367,7 +367,7 @@ func (h *packetHandlerMap) listen() {
 		//nolint:staticcheck // SA1019 ignore this!
 		// TODO: This code is used to ignore wsa errors on Windows.
 		// Since net.Error.Temporary is deprecated as of Go 1.18, we should find a better solution.
-		// See https://github.com/lucas-clemente/quic-go/issues/1737 for details.
+		// See https://github.com/quic-go/quic-go/issues/1737 for details.
 		if nerr, ok := err.(net.Error); ok && nerr.Temporary() {
 			h.logger.Debugf("Temporary error reading from conn: %w", err)
 			continue

File diff suppressed because it is too large
+ 412 - 244
vendor/github.com/Psiphon-Labs/quic-go/packet_packer.go


+ 11 - 13
vendor/github.com/Psiphon-Labs/quic-go/packet_unpacker.go

@@ -38,16 +38,14 @@ type packetUnpacker struct {
 	cs handshake.CryptoSetup
 
 	shortHdrConnIDLen int
-	version           protocol.VersionNumber
 }
 
 var _ unpacker = &packetUnpacker{}
 
-func newPacketUnpacker(cs handshake.CryptoSetup, shortHdrConnIDLen int, version protocol.VersionNumber) unpacker {
+func newPacketUnpacker(cs handshake.CryptoSetup, shortHdrConnIDLen int) *packetUnpacker {
 	return &packetUnpacker{
 		cs:                cs,
 		shortHdrConnIDLen: shortHdrConnIDLen,
-		version:           version,
 	}
 }
 
@@ -55,7 +53,7 @@ func newPacketUnpacker(cs handshake.CryptoSetup, shortHdrConnIDLen int, version
 // If the reserved bits are invalid, the error is wire.ErrInvalidReservedBits.
 // If any other error occurred when parsing the header, the error is of type headerParseError.
 // If decrypting the payload fails for any reason, the error is the error returned by the AEAD.
-func (u *packetUnpacker) UnpackLongHeader(hdr *wire.Header, rcvTime time.Time, data []byte) (*unpackedPacket, error) {
+func (u *packetUnpacker) UnpackLongHeader(hdr *wire.Header, rcvTime time.Time, data []byte, v protocol.VersionNumber) (*unpackedPacket, error) {
 	var encLevel protocol.EncryptionLevel
 	var extHdr *wire.ExtendedHeader
 	var decrypted []byte
@@ -67,7 +65,7 @@ func (u *packetUnpacker) UnpackLongHeader(hdr *wire.Header, rcvTime time.Time, d
 		if err != nil {
 			return nil, err
 		}
-		extHdr, decrypted, err = u.unpackLongHeaderPacket(opener, hdr, data)
+		extHdr, decrypted, err = u.unpackLongHeaderPacket(opener, hdr, data, v)
 		if err != nil {
 			return nil, err
 		}
@@ -77,7 +75,7 @@ func (u *packetUnpacker) UnpackLongHeader(hdr *wire.Header, rcvTime time.Time, d
 		if err != nil {
 			return nil, err
 		}
-		extHdr, decrypted, err = u.unpackLongHeaderPacket(opener, hdr, data)
+		extHdr, decrypted, err = u.unpackLongHeaderPacket(opener, hdr, data, v)
 		if err != nil {
 			return nil, err
 		}
@@ -87,7 +85,7 @@ func (u *packetUnpacker) UnpackLongHeader(hdr *wire.Header, rcvTime time.Time, d
 		if err != nil {
 			return nil, err
 		}
-		extHdr, decrypted, err = u.unpackLongHeaderPacket(opener, hdr, data)
+		extHdr, decrypted, err = u.unpackLongHeaderPacket(opener, hdr, data, v)
 		if err != nil {
 			return nil, err
 		}
@@ -127,8 +125,8 @@ func (u *packetUnpacker) UnpackShortHeader(rcvTime time.Time, data []byte) (prot
 	return pn, pnLen, kp, decrypted, nil
 }
 
-func (u *packetUnpacker) unpackLongHeaderPacket(opener handshake.LongHeaderOpener, hdr *wire.Header, data []byte) (*wire.ExtendedHeader, []byte, error) {
-	extHdr, parseErr := u.unpackLongHeader(opener, hdr, data)
+func (u *packetUnpacker) unpackLongHeaderPacket(opener handshake.LongHeaderOpener, hdr *wire.Header, data []byte, v protocol.VersionNumber) (*wire.ExtendedHeader, []byte, error) {
+	extHdr, parseErr := u.unpackLongHeader(opener, hdr, data, v)
 	// If the reserved bits are set incorrectly, we still need to continue unpacking.
 	// This avoids a timing side-channel, which otherwise might allow an attacker
 	// to gain information about the header encryption.
@@ -189,15 +187,15 @@ func (u *packetUnpacker) unpackShortHeader(hd headerDecryptor, data []byte) (int
 }
 
 // The error is either nil, a wire.ErrInvalidReservedBits or of type headerParseError.
-func (u *packetUnpacker) unpackLongHeader(hd headerDecryptor, hdr *wire.Header, data []byte) (*wire.ExtendedHeader, error) {
-	extHdr, err := unpackLongHeader(hd, hdr, data, u.version)
+func (u *packetUnpacker) unpackLongHeader(hd headerDecryptor, hdr *wire.Header, data []byte, v protocol.VersionNumber) (*wire.ExtendedHeader, error) {
+	extHdr, err := unpackLongHeader(hd, hdr, data, v)
 	if err != nil && err != wire.ErrInvalidReservedBits {
 		return nil, &headerParseError{err: err}
 	}
 	return extHdr, err
 }
 
-func unpackLongHeader(hd headerDecryptor, hdr *wire.Header, data []byte, version protocol.VersionNumber) (*wire.ExtendedHeader, error) {
+func unpackLongHeader(hd headerDecryptor, hdr *wire.Header, data []byte, v protocol.VersionNumber) (*wire.ExtendedHeader, error) {
 	r := bytes.NewReader(data)
 
 	hdrLen := hdr.ParsedLen()
@@ -216,7 +214,7 @@ func unpackLongHeader(hd headerDecryptor, hdr *wire.Header, data []byte, version
 		data[hdrLen:hdrLen+4],
 	)
 	// 3. parse the header (and learn the actual length of the packet number)
-	extHdr, parseErr := hdr.ParseExtended(r, version)
+	extHdr, parseErr := hdr.ParseExtended(r, v)
 	if parseErr != nil && parseErr != wire.ErrInvalidReservedBits {
 		return nil, parseErr
 	}

+ 9 - 9
vendor/github.com/Psiphon-Labs/quic-go/quicvarint/varint.go

@@ -107,32 +107,32 @@ func Append(b []byte, i uint64) []byte {
 	panic(fmt.Sprintf("%#x doesn't fit into 62 bits", i))
 }
 
-// WriteWithLen writes i in the QUIC varint format with the desired length to w.
-func WriteWithLen(w Writer, i uint64, length protocol.ByteCount) {
+// AppendWithLen append i in the QUIC varint format with the desired length.
+func AppendWithLen(b []byte, i uint64, length protocol.ByteCount) []byte {
 	if length != 1 && length != 2 && length != 4 && length != 8 {
 		panic("invalid varint length")
 	}
 	l := Len(i)
 	if l == length {
-		Write(w, i)
-		return
+		return Append(b, i)
 	}
 	if l > length {
 		panic(fmt.Sprintf("cannot encode %d in %d bytes", i, length))
 	}
 	if length == 2 {
-		w.WriteByte(0b01000000)
+		b = append(b, 0b01000000)
 	} else if length == 4 {
-		w.WriteByte(0b10000000)
+		b = append(b, 0b10000000)
 	} else if length == 8 {
-		w.WriteByte(0b11000000)
+		b = append(b, 0b11000000)
 	}
 	for j := protocol.ByteCount(1); j < length-l; j++ {
-		w.WriteByte(0)
+		b = append(b, 0)
 	}
 	for j := protocol.ByteCount(0); j < l; j++ {
-		w.WriteByte(uint8(i >> (8 * (l - 1 - j))))
+		b = append(b, uint8(i>>(8*(l-1-j))))
 	}
+	return b
 }
 
 // Len determines the number of bytes that will be needed to write the number i.

+ 3 - 5
vendor/github.com/Psiphon-Labs/quic-go/receive_stream.go

@@ -44,14 +44,13 @@ type receiveStream struct {
 	closedForShutdown bool // set when CloseForShutdown() is called
 	finRead           bool // set once we read a frame with a Fin
 	canceledRead      bool // set when CancelRead() is called
-	resetRemotely     bool // set when HandleResetStreamFrame() is called
+	resetRemotely     bool // set when handleResetStreamFrame() is called
 
 	readChan chan struct{}
 	readOnce chan struct{} // cap: 1, to protect against concurrent use of Read
 	deadline time.Time
 
 	flowController flowcontrol.StreamFlowController
-	version        protocol.VersionNumber
 }
 
 var (
@@ -63,7 +62,6 @@ func newReceiveStream(
 	streamID protocol.StreamID,
 	sender streamSender,
 	flowController flowcontrol.StreamFlowController,
-	version protocol.VersionNumber,
 ) *receiveStream {
 	return &receiveStream{
 		streamID:       streamID,
@@ -73,7 +71,6 @@ func newReceiveStream(
 		readChan:       make(chan struct{}, 1),
 		readOnce:       make(chan struct{}, 1),
 		finalOffset:    protocol.MaxByteCount,
-		version:        version,
 	}
 }
 
@@ -218,7 +215,7 @@ func (s *receiveStream) cancelReadImpl(errorCode qerr.StreamErrorCode) bool /* c
 		return false
 	}
 	s.canceledRead = true
-	s.cancelReadErr = fmt.Errorf("Read on stream %d canceled with error code %d", s.streamID, errorCode)
+	s.cancelReadErr = &StreamError{StreamID: s.streamID, ErrorCode: errorCode, Remote: false}
 	s.signalRead()
 	s.sender.queueControlFrame(&wire.StopSendingFrame{
 		StreamID:  s.streamID,
@@ -290,6 +287,7 @@ func (s *receiveStream) handleResetStreamFrameImpl(frame *wire.ResetStreamFrame)
 	s.resetRemotelyErr = &StreamError{
 		StreamID:  s.streamID,
 		ErrorCode: frame.ErrorCode,
+		Remote:    true,
 	}
 	s.signalRead()
 	return newlyRcvdFinalOffset, nil

+ 10 - 12
vendor/github.com/Psiphon-Labs/quic-go/retransmission_queue.go

@@ -15,12 +15,10 @@ type retransmissionQueue struct {
 	handshakeCryptoData []*wire.CryptoFrame
 
 	appData []wire.Frame
-
-	version protocol.VersionNumber
 }
 
-func newRetransmissionQueue(ver protocol.VersionNumber) *retransmissionQueue {
-	return &retransmissionQueue{version: ver}
+func newRetransmissionQueue() *retransmissionQueue {
+	return &retransmissionQueue{}
 }
 
 func (q *retransmissionQueue) AddInitial(f wire.Frame) {
@@ -58,10 +56,10 @@ func (q *retransmissionQueue) AddAppData(f wire.Frame) {
 	q.appData = append(q.appData, f)
 }
 
-func (q *retransmissionQueue) GetInitialFrame(maxLen protocol.ByteCount) wire.Frame {
+func (q *retransmissionQueue) GetInitialFrame(maxLen protocol.ByteCount, v protocol.VersionNumber) wire.Frame {
 	if len(q.initialCryptoData) > 0 {
 		f := q.initialCryptoData[0]
-		newFrame, needsSplit := f.MaybeSplitOffFrame(maxLen, q.version)
+		newFrame, needsSplit := f.MaybeSplitOffFrame(maxLen, v)
 		if newFrame == nil && !needsSplit { // the whole frame fits
 			q.initialCryptoData = q.initialCryptoData[1:]
 			return f
@@ -74,17 +72,17 @@ func (q *retransmissionQueue) GetInitialFrame(maxLen protocol.ByteCount) wire.Fr
 		return nil
 	}
 	f := q.initial[0]
-	if f.Length(q.version) > maxLen {
+	if f.Length(v) > maxLen {
 		return nil
 	}
 	q.initial = q.initial[1:]
 	return f
 }
 
-func (q *retransmissionQueue) GetHandshakeFrame(maxLen protocol.ByteCount) wire.Frame {
+func (q *retransmissionQueue) GetHandshakeFrame(maxLen protocol.ByteCount, v protocol.VersionNumber) wire.Frame {
 	if len(q.handshakeCryptoData) > 0 {
 		f := q.handshakeCryptoData[0]
-		newFrame, needsSplit := f.MaybeSplitOffFrame(maxLen, q.version)
+		newFrame, needsSplit := f.MaybeSplitOffFrame(maxLen, v)
 		if newFrame == nil && !needsSplit { // the whole frame fits
 			q.handshakeCryptoData = q.handshakeCryptoData[1:]
 			return f
@@ -97,19 +95,19 @@ func (q *retransmissionQueue) GetHandshakeFrame(maxLen protocol.ByteCount) wire.
 		return nil
 	}
 	f := q.handshake[0]
-	if f.Length(q.version) > maxLen {
+	if f.Length(v) > maxLen {
 		return nil
 	}
 	q.handshake = q.handshake[1:]
 	return f
 }
 
-func (q *retransmissionQueue) GetAppDataFrame(maxLen protocol.ByteCount) wire.Frame {
+func (q *retransmissionQueue) GetAppDataFrame(maxLen protocol.ByteCount, v protocol.VersionNumber) wire.Frame {
 	if len(q.appData) == 0 {
 		return nil
 	}
 	f := q.appData[0]
-	if f.Length(q.version) > maxLen {
+	if f.Length(v) > maxLen {
 		return nil
 	}
 	q.appData = q.appData[1:]

+ 22 - 25
vendor/github.com/Psiphon-Labs/quic-go/send_stream.go

@@ -18,7 +18,7 @@ type sendStreamI interface {
 	SendStream
 	handleStopSendingFrame(*wire.StopSendingFrame)
 	hasData() bool
-	popStreamFrame(maxBytes protocol.ByteCount) (*ackhandler.Frame, bool)
+	popStreamFrame(maxBytes protocol.ByteCount, v protocol.VersionNumber) (*ackhandler.Frame, bool)
 	closeForShutdown(error)
 	updateSendWindow(protocol.ByteCount)
 }
@@ -54,8 +54,6 @@ type sendStream struct {
 	deadline  time.Time
 
 	flowController flowcontrol.StreamFlowController
-
-	version protocol.VersionNumber
 }
 
 var (
@@ -67,7 +65,6 @@ func newSendStream(
 	streamID protocol.StreamID,
 	sender streamSender,
 	flowController flowcontrol.StreamFlowController,
-	version protocol.VersionNumber,
 ) *sendStream {
 	s := &sendStream{
 		streamID:       streamID,
@@ -75,7 +72,6 @@ func newSendStream(
 		flowController: flowController,
 		writeChan:      make(chan struct{}, 1),
 		writeOnce:      make(chan struct{}, 1), // cap: 1, to protect against concurrent use of Write
-		version:        version,
 	}
 	s.ctx, s.ctxCancel = context.WithCancel(context.Background())
 	return s
@@ -204,9 +200,9 @@ func (s *sendStream) canBufferStreamFrame() bool {
 
 // popStreamFrame returns the next STREAM frame that is supposed to be sent on this stream
 // maxBytes is the maximum length this frame (including frame header) will have.
-func (s *sendStream) popStreamFrame(maxBytes protocol.ByteCount) (*ackhandler.Frame, bool /* has more data to send */) {
+func (s *sendStream) popStreamFrame(maxBytes protocol.ByteCount, v protocol.VersionNumber) (*ackhandler.Frame, bool /* has more data to send */) {
 	s.mutex.Lock()
-	f, hasMoreData := s.popNewOrRetransmittedStreamFrame(maxBytes)
+	f, hasMoreData := s.popNewOrRetransmittedStreamFrame(maxBytes, v)
 	if f != nil {
 		s.numOutstandingFrames++
 	}
@@ -215,16 +211,20 @@ func (s *sendStream) popStreamFrame(maxBytes protocol.ByteCount) (*ackhandler.Fr
 	if f == nil {
 		return nil, hasMoreData
 	}
-	return &ackhandler.Frame{Frame: f, OnLost: s.queueRetransmission, OnAcked: s.frameAcked}, hasMoreData
+	af := ackhandler.GetFrame()
+	af.Frame = f
+	af.OnLost = s.queueRetransmission
+	af.OnAcked = s.frameAcked
+	return af, hasMoreData
 }
 
-func (s *sendStream) popNewOrRetransmittedStreamFrame(maxBytes protocol.ByteCount) (*wire.StreamFrame, bool /* has more data to send */) {
+func (s *sendStream) popNewOrRetransmittedStreamFrame(maxBytes protocol.ByteCount, v protocol.VersionNumber) (*wire.StreamFrame, bool /* has more data to send */) {
 	if s.canceledWrite || s.closeForShutdownErr != nil {
 		return nil, false
 	}
 
 	if len(s.retransmissionQueue) > 0 {
-		f, hasMoreRetransmissions := s.maybeGetRetransmission(maxBytes)
+		f, hasMoreRetransmissions := s.maybeGetRetransmission(maxBytes, v)
 		if f != nil || hasMoreRetransmissions {
 			if f == nil {
 				return nil, true
@@ -260,7 +260,7 @@ func (s *sendStream) popNewOrRetransmittedStreamFrame(maxBytes protocol.ByteCoun
 		return nil, true
 	}
 
-	f, hasMoreData := s.popNewStreamFrame(maxBytes, sendWindow)
+	f, hasMoreData := s.popNewStreamFrame(maxBytes, sendWindow, v)
 	if dataLen := f.DataLen(); dataLen > 0 {
 		s.writeOffset += f.DataLen()
 		s.flowController.AddBytesSent(f.DataLen())
@@ -272,12 +272,12 @@ func (s *sendStream) popNewOrRetransmittedStreamFrame(maxBytes protocol.ByteCoun
 	return f, hasMoreData
 }
 
-func (s *sendStream) popNewStreamFrame(maxBytes, sendWindow protocol.ByteCount) (*wire.StreamFrame, bool) {
+func (s *sendStream) popNewStreamFrame(maxBytes, sendWindow protocol.ByteCount, v protocol.VersionNumber) (*wire.StreamFrame, bool) {
 	if s.nextFrame != nil {
 		nextFrame := s.nextFrame
 		s.nextFrame = nil
 
-		maxDataLen := utils.Min(sendWindow, nextFrame.MaxDataLen(maxBytes, s.version))
+		maxDataLen := utils.Min(sendWindow, nextFrame.MaxDataLen(maxBytes, v))
 		if nextFrame.DataLen() > maxDataLen {
 			s.nextFrame = wire.GetStreamFrame()
 			s.nextFrame.StreamID = s.streamID
@@ -299,7 +299,7 @@ func (s *sendStream) popNewStreamFrame(maxBytes, sendWindow protocol.ByteCount)
 	f.DataLenPresent = true
 	f.Data = f.Data[:0]
 
-	hasMoreData := s.popNewStreamFrameWithoutBuffer(f, maxBytes, sendWindow)
+	hasMoreData := s.popNewStreamFrameWithoutBuffer(f, maxBytes, sendWindow, v)
 	if len(f.Data) == 0 && !f.Fin {
 		f.PutBack()
 		return nil, hasMoreData
@@ -307,8 +307,8 @@ func (s *sendStream) popNewStreamFrame(maxBytes, sendWindow protocol.ByteCount)
 	return f, hasMoreData
 }
 
-func (s *sendStream) popNewStreamFrameWithoutBuffer(f *wire.StreamFrame, maxBytes, sendWindow protocol.ByteCount) bool {
-	maxDataLen := f.MaxDataLen(maxBytes, s.version)
+func (s *sendStream) popNewStreamFrameWithoutBuffer(f *wire.StreamFrame, maxBytes, sendWindow protocol.ByteCount, v protocol.VersionNumber) bool {
+	maxDataLen := f.MaxDataLen(maxBytes, v)
 	if maxDataLen == 0 { // a STREAM frame must have at least one byte of data
 		return s.dataForWriting != nil || s.nextFrame != nil || s.finishedWriting
 	}
@@ -317,9 +317,9 @@ func (s *sendStream) popNewStreamFrameWithoutBuffer(f *wire.StreamFrame, maxByte
 	return s.dataForWriting != nil || s.nextFrame != nil || s.finishedWriting
 }
 
-func (s *sendStream) maybeGetRetransmission(maxBytes protocol.ByteCount) (*wire.StreamFrame, bool /* has more retransmissions */) {
+func (s *sendStream) maybeGetRetransmission(maxBytes protocol.ByteCount, v protocol.VersionNumber) (*wire.StreamFrame, bool /* has more retransmissions */) {
 	f := s.retransmissionQueue[0]
-	newFrame, needsSplit := f.MaybeSplitOffFrame(maxBytes, s.version)
+	newFrame, needsSplit := f.MaybeSplitOffFrame(maxBytes, v)
 	if needsSplit {
 		return newFrame, true
 	}
@@ -416,11 +416,11 @@ func (s *sendStream) Close() error {
 }
 
 func (s *sendStream) CancelWrite(errorCode StreamErrorCode) {
-	s.cancelWriteImpl(errorCode, fmt.Errorf("Write on stream %d canceled with error code %d", s.streamID, errorCode))
+	s.cancelWriteImpl(errorCode, false)
 }
 
 // must be called after locking the mutex
-func (s *sendStream) cancelWriteImpl(errorCode qerr.StreamErrorCode, writeErr error) {
+func (s *sendStream) cancelWriteImpl(errorCode qerr.StreamErrorCode, remote bool) {
 	s.mutex.Lock()
 	if s.canceledWrite {
 		s.mutex.Unlock()
@@ -428,7 +428,7 @@ func (s *sendStream) cancelWriteImpl(errorCode qerr.StreamErrorCode, writeErr er
 	}
 	s.ctxCancel()
 	s.canceledWrite = true
-	s.cancelWriteErr = writeErr
+	s.cancelWriteErr = &StreamError{StreamID: s.streamID, ErrorCode: errorCode, Remote: remote}
 	s.numOutstandingFrames = 0
 	s.retransmissionQueue = nil
 	newlyCompleted := s.isNewlyCompleted()
@@ -457,10 +457,7 @@ func (s *sendStream) updateSendWindow(limit protocol.ByteCount) {
 }
 
 func (s *sendStream) handleStopSendingFrame(frame *wire.StopSendingFrame) {
-	s.cancelWriteImpl(frame.ErrorCode, &StreamError{
-		StreamID:  s.streamID,
-		ErrorCode: frame.ErrorCode,
-	})
+	s.cancelWriteImpl(frame.ErrorCode, true)
 }
 
 func (s *sendStream) Context() context.Context {

+ 32 - 38
vendor/github.com/Psiphon-Labs/quic-go/server.go

@@ -1,7 +1,6 @@
 package quic
 
 import (
-	"bytes"
 	"context"
 	"crypto/rand"
 	"crypto/tls"
@@ -89,7 +88,6 @@ type baseServer struct {
 		*Config,
 		*tls.Config,
 		*handshake.TokenGenerator,
-		bool, /* enable 0-RTT */
 		bool, /* client address validated by an address validation token */
 		logging.ConnectionTracer,
 		uint64,
@@ -350,7 +348,7 @@ func (s *baseServer) handlePacketImpl(p *receivedPacket) bool /* is the buffer s
 	}
 	// If we're creating a new connection, the packet will be passed to the connection.
 	// The header will then be parsed again.
-	hdr, _, _, err := wire.ParsePacket(p.data, s.config.ConnectionIDGenerator.ConnectionIDLen())
+	hdr, _, _, err := wire.ParsePacket(p.data)
 	if err != nil {
 		if s.config.Tracer != nil {
 			s.config.Tracer.DroppedPacket(p.remoteAddr, logging.PacketTypeNotDetermined, p.Size(), logging.PacketDropHeaderParseError)
@@ -358,10 +356,6 @@ func (s *baseServer) handlePacketImpl(p *receivedPacket) bool /* is the buffer s
 		s.logger.Debugf("Error parsing packet: %s", err)
 		return false
 	}
-	// Short header packets should never end up here in the first place
-	if !hdr.IsLongHeader {
-		panic(fmt.Sprintf("misrouted packet: %#v", hdr))
-	}
 
 	// [Psiphon]
 	// To accomodate additional messages, obfuscated QUIC packets may reserve
@@ -380,7 +374,7 @@ func (s *baseServer) handlePacketImpl(p *receivedPacket) bool /* is the buffer s
 		return false
 	}
 
-	if hdr.IsLongHeader && hdr.Type != protocol.PacketTypeInitial {
+	if hdr.Type != protocol.PacketTypeInitial {
 		// Drop long header packets.
 		// There's little point in sending a Stateless Reset, since the client
 		// might not have received the token yet.
@@ -474,8 +468,11 @@ func (s *stubCryptoSetup) Get1RTTSealer() (handshake.ShortHeaderSealer, error) {
 // frame, and calls Config.VerifyClientHelloRandom.
 func (s *baseServer) verifyClientHelloRandom(p *receivedPacket, hdr *wire.Header) error {
 
+	// TODO: support QUICv2
+	versionNumber := protocol.Version1
+
 	_, initialOpener := handshake.NewInitialAEAD(
-		hdr.DestConnectionID, protocol.PerspectiveServer, protocol.Version1)
+		hdr.DestConnectionID, protocol.PerspectiveServer, versionNumber)
 
 	cs := &stubCryptoSetup{
 		initialOpener: initialOpener,
@@ -485,17 +482,17 @@ func (s *baseServer) verifyClientHelloRandom(p *receivedPacket, hdr *wire.Header
 	// original packet data must be retained for subsequent processing.
 	data := append([]byte(nil), p.data...)
 
-	unpacker := newPacketUnpacker(cs, 0, protocol.Version1)
-	unpacked, err := unpacker.UnpackLongHeader(hdr, p.rcvTime, data)
+	unpacker := newPacketUnpacker(cs, 0)
+	unpacked, err := unpacker.UnpackLongHeader(hdr, p.rcvTime, data, versionNumber)
 	if err != nil {
 		return fmt.Errorf("verifyClientHelloRandom: UnpackLongHeader: %w", err)
 	}
 
-	parser := wire.NewFrameParser(s.config.EnableDatagrams, protocol.Version1)
+	parser := wire.NewFrameParser(s.config.EnableDatagrams)
 
 	d := unpacked.data
 	for len(d) > 0 {
-		l, frame, err := parser.ParseNext(d, protocol.EncryptionInitial)
+		l, frame, err := parser.ParseNext(d, protocol.EncryptionInitial, versionNumber)
 		if err != nil {
 			return fmt.Errorf("verifyClientHelloRandom: ParseNext: %w", err)
 		}
@@ -653,7 +650,6 @@ func (s *baseServer) handleInitialImpl(p *receivedPacket, hdr *wire.Header) erro
 			s.config,
 			s.tlsConf,
 			s.tokenGenerator,
-			s.acceptEarlyConns,
 			clientAddrIsValid,
 			tracer,
 			tracingID,
@@ -715,7 +711,6 @@ func (s *baseServer) sendRetry(remoteAddr net.Addr, hdr *wire.Header, info *pack
 		return err
 	}
 	replyHdr := &wire.ExtendedHeader{}
-	replyHdr.IsLongHeader = true
 	replyHdr.Type = protocol.PacketTypeRetry
 	replyHdr.Version = hdr.Version
 	replyHdr.SrcConnectionID = srcConnID
@@ -727,19 +722,19 @@ func (s *baseServer) sendRetry(remoteAddr net.Addr, hdr *wire.Header, info *pack
 		replyHdr.Log(s.logger)
 	}
 
-	packetBuffer := getPacketBuffer()
-	defer packetBuffer.Release()
-	buf := bytes.NewBuffer(packetBuffer.Data)
-	if err := replyHdr.Write(buf, hdr.Version); err != nil {
+	buf := getPacketBuffer()
+	defer buf.Release()
+	buf.Data, err = replyHdr.Append(buf.Data, hdr.Version)
+	if err != nil {
 		return err
 	}
 	// append the Retry integrity tag
-	tag := handshake.GetRetryIntegrityTag(buf.Bytes(), hdr.DestConnectionID, hdr.Version)
-	buf.Write(tag[:])
+	tag := handshake.GetRetryIntegrityTag(buf.Data, hdr.DestConnectionID, hdr.Version)
+	buf.Data = append(buf.Data, tag[:]...)
 	if s.config.Tracer != nil {
-		s.config.Tracer.SentPacket(remoteAddr, &replyHdr.Header, protocol.ByteCount(buf.Len()), nil)
+		s.config.Tracer.SentPacket(remoteAddr, &replyHdr.Header, protocol.ByteCount(len(buf.Data)), nil)
 	}
-	_, err = s.conn.WritePacket(buf.Bytes(), remoteAddr, info.OOB())
+	_, err = s.conn.WritePacket(buf.Data, remoteAddr, info.OOB())
 	return err
 }
 
@@ -777,47 +772,46 @@ func (s *baseServer) sendConnectionRefused(remoteAddr net.Addr, hdr *wire.Header
 
 // sendError sends the error as a response to the packet received with header hdr
 func (s *baseServer) sendError(remoteAddr net.Addr, hdr *wire.Header, sealer handshake.LongHeaderSealer, errorCode qerr.TransportErrorCode, info *packetInfo) error {
-	packetBuffer := getPacketBuffer()
-	defer packetBuffer.Release()
-	buf := bytes.NewBuffer(packetBuffer.Data)
+	b := getPacketBuffer()
+	defer b.Release()
 
 	ccf := &wire.ConnectionCloseFrame{ErrorCode: uint64(errorCode)}
 
 	replyHdr := &wire.ExtendedHeader{}
-	replyHdr.IsLongHeader = true
 	replyHdr.Type = protocol.PacketTypeInitial
 	replyHdr.Version = hdr.Version
 	replyHdr.SrcConnectionID = hdr.DestConnectionID
 	replyHdr.DestConnectionID = hdr.SrcConnectionID
 	replyHdr.PacketNumberLen = protocol.PacketNumberLen4
 	replyHdr.Length = 4 /* packet number len */ + ccf.Length(hdr.Version) + protocol.ByteCount(sealer.Overhead())
-	if err := replyHdr.Write(buf, hdr.Version); err != nil {
+	var err error
+	b.Data, err = replyHdr.Append(b.Data, hdr.Version)
+	if err != nil {
 		return err
 	}
-	payloadOffset := buf.Len()
+	payloadOffset := len(b.Data)
 
-	raw := buf.Bytes()
-	raw, err := ccf.Append(raw, hdr.Version)
+	b.Data, err = ccf.Append(b.Data, hdr.Version)
 	if err != nil {
 		return err
 	}
 
-	_ = sealer.Seal(raw[payloadOffset:payloadOffset], raw[payloadOffset:], replyHdr.PacketNumber, raw[:payloadOffset])
-	raw = raw[0 : len(raw)+sealer.Overhead()]
+	_ = sealer.Seal(b.Data[payloadOffset:payloadOffset], b.Data[payloadOffset:], replyHdr.PacketNumber, b.Data[:payloadOffset])
+	b.Data = b.Data[0 : len(b.Data)+sealer.Overhead()]
 
 	pnOffset := payloadOffset - int(replyHdr.PacketNumberLen)
 	sealer.EncryptHeader(
-		raw[pnOffset+4:pnOffset+4+16],
-		&raw[0],
-		raw[pnOffset:payloadOffset],
+		b.Data[pnOffset+4:pnOffset+4+16],
+		&b.Data[0],
+		b.Data[pnOffset:payloadOffset],
 	)
 
 	replyHdr.Log(s.logger)
 	wire.LogFrame(s.logger, ccf, true)
 	if s.config.Tracer != nil {
-		s.config.Tracer.SentPacket(remoteAddr, &replyHdr.Header, protocol.ByteCount(len(raw)), []logging.Frame{ccf})
+		s.config.Tracer.SentPacket(remoteAddr, &replyHdr.Header, protocol.ByteCount(len(b.Data)), []logging.Frame{ccf})
 	}
-	_, err = s.conn.WritePacket(raw, remoteAddr, info.OOB())
+	_, err = s.conn.WritePacket(b.Data, remoteAddr, info.OOB())
 	return err
 }
 

+ 4 - 7
vendor/github.com/Psiphon-Labs/quic-go/stream.go

@@ -60,7 +60,7 @@ type streamI interface {
 	// for sending
 	hasData() bool
 	handleStopSendingFrame(*wire.StopSendingFrame)
-	popStreamFrame(maxBytes protocol.ByteCount) (*ackhandler.Frame, bool)
+	popStreamFrame(maxBytes protocol.ByteCount, v protocol.VersionNumber) (*ackhandler.Frame, bool)
 	updateSendWindow(protocol.ByteCount)
 }
 
@@ -80,8 +80,6 @@ type stream struct {
 	sender                 streamSender
 	receiveStreamCompleted bool
 	sendStreamCompleted    bool
-
-	version protocol.VersionNumber
 }
 
 var _ Stream = &stream{}
@@ -90,9 +88,8 @@ var _ Stream = &stream{}
 func newStream(streamID protocol.StreamID,
 	sender streamSender,
 	flowController flowcontrol.StreamFlowController,
-	version protocol.VersionNumber,
 ) *stream {
-	s := &stream{sender: sender, version: version}
+	s := &stream{sender: sender}
 	senderForSendStream := &uniStreamSender{
 		streamSender: sender,
 		onStreamCompletedImpl: func() {
@@ -102,7 +99,7 @@ func newStream(streamID protocol.StreamID,
 			s.completedMutex.Unlock()
 		},
 	}
-	s.sendStream = *newSendStream(streamID, senderForSendStream, flowController, version)
+	s.sendStream = *newSendStream(streamID, senderForSendStream, flowController)
 	senderForReceiveStream := &uniStreamSender{
 		streamSender: sender,
 		onStreamCompletedImpl: func() {
@@ -112,7 +109,7 @@ func newStream(streamID protocol.StreamID,
 			s.completedMutex.Unlock()
 		},
 	}
-	s.receiveStream = *newReceiveStream(streamID, senderForReceiveStream, flowController, version)
+	s.receiveStream = *newReceiveStream(streamID, senderForReceiveStream, flowController)
 	return s
 }
 

+ 4 - 7
vendor/github.com/Psiphon-Labs/quic-go/streams_map.go

@@ -46,7 +46,6 @@ var errTooManyOpenStreams = errors.New("too many open streams")
 
 type streamsMap struct {
 	perspective protocol.Perspective
-	version     protocol.VersionNumber
 
 	maxIncomingBidiStreams uint64
 	maxIncomingUniStreams  uint64
@@ -70,7 +69,6 @@ func newStreamsMap(
 	maxIncomingBidiStreams uint64,
 	maxIncomingUniStreams uint64,
 	perspective protocol.Perspective,
-	version protocol.VersionNumber,
 ) streamManager {
 	m := &streamsMap{
 		perspective:            perspective,
@@ -78,7 +76,6 @@ func newStreamsMap(
 		maxIncomingBidiStreams: maxIncomingBidiStreams,
 		maxIncomingUniStreams:  maxIncomingUniStreams,
 		sender:                 sender,
-		version:                version,
 	}
 	m.initMaps()
 	return m
@@ -89,7 +86,7 @@ func (m *streamsMap) initMaps() {
 		protocol.StreamTypeBidi,
 		func(num protocol.StreamNum) streamI {
 			id := num.StreamID(protocol.StreamTypeBidi, m.perspective)
-			return newStream(id, m.sender, m.newFlowController(id), m.version)
+			return newStream(id, m.sender, m.newFlowController(id))
 		},
 		m.sender.queueControlFrame,
 	)
@@ -97,7 +94,7 @@ func (m *streamsMap) initMaps() {
 		protocol.StreamTypeBidi,
 		func(num protocol.StreamNum) streamI {
 			id := num.StreamID(protocol.StreamTypeBidi, m.perspective.Opposite())
-			return newStream(id, m.sender, m.newFlowController(id), m.version)
+			return newStream(id, m.sender, m.newFlowController(id))
 		},
 		m.maxIncomingBidiStreams,
 		m.sender.queueControlFrame,
@@ -106,7 +103,7 @@ func (m *streamsMap) initMaps() {
 		protocol.StreamTypeUni,
 		func(num protocol.StreamNum) sendStreamI {
 			id := num.StreamID(protocol.StreamTypeUni, m.perspective)
-			return newSendStream(id, m.sender, m.newFlowController(id), m.version)
+			return newSendStream(id, m.sender, m.newFlowController(id))
 		},
 		m.sender.queueControlFrame,
 	)
@@ -114,7 +111,7 @@ func (m *streamsMap) initMaps() {
 		protocol.StreamTypeUni,
 		func(num protocol.StreamNum) receiveStreamI {
 			id := num.StreamID(protocol.StreamTypeUni, m.perspective.Opposite())
-			return newReceiveStream(id, m.sender, m.newFlowController(id), m.version)
+			return newReceiveStream(id, m.sender, m.newFlowController(id))
 		},
 		m.maxIncomingUniStreams,
 		m.sender.queueControlFrame,

+ 2 - 1
vendor/github.com/Psiphon-Labs/quic-go/sys_conn_df_linux.go

@@ -6,8 +6,9 @@ import (
 	"errors"
 	"syscall"
 
-	"github.com/Psiphon-Labs/quic-go/internal/utils"
 	"golang.org/x/sys/unix"
+
+	"github.com/Psiphon-Labs/quic-go/internal/utils"
 )
 
 func setDF(rawConn syscall.RawConn) error {

+ 2 - 1
vendor/github.com/Psiphon-Labs/quic-go/sys_conn_df_windows.go

@@ -6,8 +6,9 @@ import (
 	"errors"
 	"syscall"
 
-	"github.com/Psiphon-Labs/quic-go/internal/utils"
 	"golang.org/x/sys/windows"
+
+	"github.com/Psiphon-Labs/quic-go/internal/utils"
 )
 
 const (

+ 8 - 8
vendor/github.com/Psiphon-Labs/quic-go/token_store.go

@@ -1,10 +1,10 @@
 package quic
 
 import (
-	"container/list"
 	"sync"
 
 	"github.com/Psiphon-Labs/quic-go/internal/utils"
+	list "github.com/Psiphon-Labs/quic-go/internal/utils/linkedlist"
 )
 
 type singleOriginTokenStore struct {
@@ -48,8 +48,8 @@ type lruTokenStoreEntry struct {
 type lruTokenStore struct {
 	mutex sync.Mutex
 
-	m                map[string]*list.Element
-	q                *list.List
+	m                map[string]*list.Element[*lruTokenStoreEntry]
+	q                *list.List[*lruTokenStoreEntry]
 	capacity         int
 	singleOriginSize int
 }
@@ -61,8 +61,8 @@ var _ TokenStore = &lruTokenStore{}
 // tokensPerOrigin specifies the maximum number of tokens per origin.
 func NewLRUTokenStore(maxOrigins, tokensPerOrigin int) TokenStore {
 	return &lruTokenStore{
-		m:                make(map[string]*list.Element),
-		q:                list.New(),
+		m:                make(map[string]*list.Element[*lruTokenStoreEntry]),
+		q:                list.New[*lruTokenStoreEntry](),
 		capacity:         maxOrigins,
 		singleOriginSize: tokensPerOrigin,
 	}
@@ -73,7 +73,7 @@ func (s *lruTokenStore) Put(key string, token *ClientToken) {
 	defer s.mutex.Unlock()
 
 	if el, ok := s.m[key]; ok {
-		entry := el.Value.(*lruTokenStoreEntry)
+		entry := el.Value
 		entry.cache.Add(token)
 		s.q.MoveToFront(el)
 		return
@@ -90,7 +90,7 @@ func (s *lruTokenStore) Put(key string, token *ClientToken) {
 	}
 
 	elem := s.q.Back()
-	entry := elem.Value.(*lruTokenStoreEntry)
+	entry := elem.Value
 	delete(s.m, entry.key)
 	entry.key = key
 	entry.cache = newSingleOriginTokenStore(s.singleOriginSize)
@@ -106,7 +106,7 @@ func (s *lruTokenStore) Pop(key string) *ClientToken {
 	var token *ClientToken
 	if el, ok := s.m[key]; ok {
 		s.q.MoveToFront(el)
-		cache := el.Value.(*lruTokenStoreEntry).cache
+		cache := el.Value.cache
 		token = cache.Pop()
 		if cache.Len() == 0 {
 			s.q.Remove(el)

+ 14 - 0
vendor/github.com/go-task/slim-sprig/.editorconfig

@@ -0,0 +1,14 @@
+# editorconfig.org
+
+root = true
+
+[*]
+insert_final_newline = true
+charset = utf-8
+trim_trailing_whitespace = true
+indent_style = tab
+indent_size = 8
+
+[*.{md,yml,yaml,json}]
+indent_style = space
+indent_size = 2

+ 1 - 0
vendor/github.com/go-task/slim-sprig/.gitattributes

@@ -0,0 +1 @@
+* text=auto

+ 2 - 0
vendor/github.com/go-task/slim-sprig/.gitignore

@@ -0,0 +1,2 @@
+vendor/
+/.glide

+ 364 - 0
vendor/github.com/go-task/slim-sprig/CHANGELOG.md

@@ -0,0 +1,364 @@
+# Changelog
+
+## Release 3.2.0 (2020-12-14)
+
+### Added
+
+- #211: Added randInt function (thanks @kochurovro)
+- #223: Added fromJson and mustFromJson functions (thanks @mholt)
+- #242: Added a bcrypt function (thanks @robbiet480)
+- #253: Added randBytes function (thanks @MikaelSmith)
+- #254: Added dig function for dicts (thanks @nyarly)
+- #257: Added regexQuoteMeta for quoting regex metadata (thanks @rheaton)
+- #261: Added filepath functions osBase, osDir, osExt, osClean, osIsAbs (thanks @zugl)
+- #268: Added and and all functions for testing conditions (thanks @phuslu)
+- #181: Added float64 arithmetic addf, add1f, subf, divf, mulf, maxf, and minf
+  (thanks @andrewmostello)
+- #265: Added chunk function to split array into smaller arrays (thanks @karelbilek)
+- #270: Extend certificate functions to handle non-RSA keys + add support for
+  ed25519 keys (thanks @misberner)
+
+### Changed
+
+- Removed testing and support for Go 1.12. ed25519 support requires Go 1.13 or newer
+- Using semver 3.1.1 and mergo 0.3.11
+
+### Fixed
+
+- #249: Fix htmlDateInZone example (thanks @spawnia)
+
+NOTE: The dependency github.com/imdario/mergo reverted the breaking change in
+0.3.9 via 0.3.10 release.
+
+## Release 3.1.0 (2020-04-16)
+
+NOTE: The dependency github.com/imdario/mergo made a behavior change in 0.3.9
+that impacts sprig functionality. Do not use sprig with a version newer than 0.3.8.
+
+### Added
+
+- #225: Added support for generating htpasswd hash (thanks @rustycl0ck)
+- #224: Added duration filter (thanks @frebib)
+- #205: Added `seq` function (thanks @thadc23)
+
+### Changed
+
+- #203: Unlambda functions with correct signature (thanks @muesli)
+- #236: Updated the license formatting for GitHub display purposes
+- #238: Updated package dependency versions. Note, mergo not updated to 0.3.9
+        as it causes a breaking change for sprig. That issue is tracked at
+        https://github.com/imdario/mergo/issues/139
+
+### Fixed
+
+- #229: Fix `seq` example in docs (thanks @kalmant)
+
+## Release 3.0.2 (2019-12-13)
+
+### Fixed
+
+- #220: Updating to semver v3.0.3 to fix issue with <= ranges
+- #218: fix typo elyptical->elliptic in ecdsa key description (thanks @laverya)
+
+## Release 3.0.1 (2019-12-08)
+
+### Fixed
+
+- #212: Updated semver fixing broken constraint checking with ^0.0
+
+## Release 3.0.0 (2019-10-02)
+
+### Added
+
+- #187: Added durationRound function (thanks @yjp20)
+- #189: Added numerous template functions that return errors rather than panic (thanks @nrvnrvn)
+- #193: Added toRawJson support (thanks @Dean-Coakley)
+- #197: Added get support to dicts (thanks @Dean-Coakley)
+
+### Changed
+
+- #186: Moving dependency management to Go modules
+- #186: Updated semver to v3. This has changes in the way ^ is handled
+- #194: Updated documentation on merging and how it copies. Added example using deepCopy
+- #196: trunc now supports negative values (thanks @Dean-Coakley)
+
+## Release 2.22.0 (2019-10-02)
+
+### Added
+
+- #173: Added getHostByName function to resolve dns names to ips (thanks @fcgravalos)
+- #195: Added deepCopy function for use with dicts
+
+### Changed
+
+- Updated merge and mergeOverwrite documentation to explain copying and how to
+  use deepCopy with it
+
+## Release 2.21.0 (2019-09-18)
+
+### Added
+
+- #122: Added encryptAES/decryptAES functions (thanks @n0madic)
+- #128: Added toDecimal support (thanks @Dean-Coakley)
+- #169: Added list contcat (thanks @astorath)
+- #174: Added deepEqual function (thanks @bonifaido)
+- #170: Added url parse and join functions (thanks @astorath)
+
+### Changed
+
+- #171: Updated glide config for Google UUID to v1 and to add ranges to semver and testify
+
+### Fixed
+
+- #172: Fix semver wildcard example (thanks @piepmatz)
+- #175: Fix dateInZone doc example (thanks @s3than)
+
+## Release 2.20.0 (2019-06-18)
+
+### Added
+
+- #164: Adding function to get unix epoch for a time (@mattfarina)
+- #166: Adding tests for date_in_zone (@mattfarina)
+
+### Changed
+
+- #144: Fix function comments based on best practices from Effective Go (@CodeLingoTeam)
+- #150: Handles pointer type for time.Time in "htmlDate" (@mapreal19)
+- #161, #157, #160,  #153, #158, #156,  #155,  #159, #152 documentation updates (@badeadan)
+
+### Fixed
+
+## Release 2.19.0 (2019-03-02)
+
+IMPORTANT: This release reverts a change from 2.18.0
+
+In the previous release (2.18), we prematurely merged a partial change to the crypto functions that led to creating two sets of crypto functions (I blame @technosophos -- since that's me). This release rolls back that change, and does what was originally intended: It alters the existing crypto functions to use secure random.
+
+We debated whether this classifies as a change worthy of major revision, but given the proximity to the last release, we have decided that treating 2.18 as a faulty release is the correct course of action. We apologize for any inconvenience.
+
+### Changed
+
+- Fix substr panic 35fb796 (Alexey igrychev)
+- Remove extra period 1eb7729 (Matthew Lorimor)
+- Make random string functions use crypto by default 6ceff26 (Matthew Lorimor)
+- README edits/fixes/suggestions 08fe136 (Lauri Apple)
+
+
+## Release 2.18.0 (2019-02-12)
+
+### Added
+
+- Added mergeOverwrite function
+- cryptographic functions that use secure random (see fe1de12)
+
+### Changed
+
+- Improve documentation of regexMatch function, resolves #139 90b89ce (Jan Tagscherer)
+- Handle has for nil list 9c10885 (Daniel Cohen)
+- Document behaviour of mergeOverwrite fe0dbe9 (Lukas Rieder)
+- doc: adds missing documentation. 4b871e6 (Fernandez Ludovic)
+- Replace outdated goutils imports 01893d2 (Matthew Lorimor)
+- Surface crypto secure random strings from goutils fe1de12 (Matthew Lorimor)
+- Handle untyped nil values as paramters to string functions 2b2ec8f (Morten Torkildsen)
+
+### Fixed
+
+- Fix dict merge issue and provide mergeOverwrite .dst .src1 to overwrite from src -> dst 4c59c12 (Lukas Rieder)
+- Fix substr var names and comments d581f80 (Dean Coakley)
+- Fix substr documentation 2737203 (Dean Coakley)
+
+## Release 2.17.1 (2019-01-03)
+
+### Fixed
+
+The 2.17.0 release did not have a version pinned for xstrings, which caused compilation failures when xstrings < 1.2 was used. This adds the correct version string to glide.yaml.
+
+## Release 2.17.0 (2019-01-03)
+
+### Added
+
+- adds alder32sum function and test 6908fc2 (marshallford)
+- Added kebabcase function ca331a1 (Ilyes512)
+
+### Changed
+
+- Update goutils to 1.1.0 4e1125d (Matt Butcher)
+
+### Fixed
+
+- Fix 'has' documentation e3f2a85 (dean-coakley)
+- docs(dict): fix typo in pick example dc424f9 (Dustin Specker)
+- fixes spelling errors... not sure how that happened 4cf188a (marshallford)
+
+## Release 2.16.0 (2018-08-13)
+
+### Added
+
+- add splitn function fccb0b0 (Helgi Þorbjörnsson)
+- Add slice func df28ca7 (gongdo)
+- Generate serial number a3bdffd (Cody Coons)
+- Extract values of dict with values function df39312 (Lawrence Jones)
+
+### Changed
+
+- Modify panic message for list.slice ae38335 (gongdo)
+- Minor improvement in code quality - Removed an unreachable piece of code at defaults.go#L26:6 - Resolve formatting issues. 5834241 (Abhishek Kashyap)
+- Remove duplicated documentation 1d97af1 (Matthew Fisher)
+- Test on go 1.11 49df809 (Helgi Þormar Þorbjörnsson)
+
+### Fixed
+
+- Fix file permissions c5f40b5 (gongdo)
+- Fix example for buildCustomCert 7779e0d (Tin Lam)
+
+## Release 2.15.0 (2018-04-02)
+
+### Added
+
+- #68 and #69: Add json helpers to docs (thanks @arunvelsriram)
+- #66: Add ternary function (thanks @binoculars)
+- #67: Allow keys function to take multiple dicts (thanks @binoculars)
+- #89: Added sha1sum to crypto function (thanks @benkeil)
+- #81: Allow customizing Root CA that used by genSignedCert (thanks @chenzhiwei)
+- #92: Add travis testing for go 1.10
+- #93: Adding appveyor config for windows testing
+
+### Changed
+
+- #90: Updating to more recent dependencies
+- #73: replace satori/go.uuid with google/uuid (thanks @petterw)
+
+### Fixed
+
+- #76: Fixed documentation typos (thanks @Thiht)
+- Fixed rounding issue on the `ago` function. Note, the removes support for Go 1.8 and older
+
+## Release 2.14.1 (2017-12-01)
+
+### Fixed
+
+- #60: Fix typo in function name documentation (thanks @neil-ca-moore)
+- #61: Removing line with {{ due to blocking github pages genertion
+- #64: Update the list functions to handle int, string, and other slices for compatibility
+
+## Release 2.14.0 (2017-10-06)
+
+This new version of Sprig adds a set of functions for generating and working with SSL certificates.
+
+- `genCA` generates an SSL Certificate Authority
+- `genSelfSignedCert` generates an SSL self-signed certificate
+- `genSignedCert` generates an SSL certificate and key based on a given CA
+
+## Release 2.13.0 (2017-09-18)
+
+This release adds new functions, including:
+
+- `regexMatch`, `regexFindAll`, `regexFind`, `regexReplaceAll`, `regexReplaceAllLiteral`, and `regexSplit` to work with regular expressions
+- `floor`, `ceil`, and `round` math functions
+- `toDate` converts a string to a date
+- `nindent` is just like `indent` but also prepends a new line
+- `ago` returns the time from `time.Now`
+
+### Added
+
+- #40: Added basic regex functionality (thanks @alanquillin)
+- #41: Added ceil floor and round functions (thanks @alanquillin)
+- #48: Added toDate function (thanks @andreynering)
+- #50: Added nindent function (thanks @binoculars)
+- #46: Added ago function (thanks @slayer)
+
+### Changed
+
+- #51: Updated godocs to include new string functions (thanks @curtisallen)
+- #49: Added ability to merge multiple dicts (thanks @binoculars)
+
+## Release 2.12.0 (2017-05-17)
+
+- `snakecase`, `camelcase`, and `shuffle` are three new string functions
+- `fail` allows you to bail out of a template render when conditions are not met
+
+## Release 2.11.0 (2017-05-02)
+
+- Added `toJson` and `toPrettyJson`
+- Added `merge`
+- Refactored documentation
+
+## Release 2.10.0 (2017-03-15)
+
+- Added `semver` and `semverCompare` for Semantic Versions
+- `list` replaces `tuple`
+- Fixed issue with `join`
+- Added `first`, `last`, `intial`, `rest`, `prepend`, `append`, `toString`, `toStrings`, `sortAlpha`, `reverse`, `coalesce`, `pluck`, `pick`, `compact`, `keys`, `omit`, `uniq`, `has`, `without`
+
+## Release 2.9.0 (2017-02-23)
+
+- Added `splitList` to split a list
+- Added crypto functions of `genPrivateKey` and `derivePassword`
+
+## Release 2.8.0 (2016-12-21)
+
+- Added access to several path functions (`base`, `dir`, `clean`, `ext`, and `abs`)
+- Added functions for _mutating_ dictionaries (`set`, `unset`, `hasKey`)
+
+## Release 2.7.0 (2016-12-01)
+
+- Added `sha256sum` to generate a hash of an input
+- Added functions to convert a numeric or string to `int`, `int64`, `float64`
+
+## Release 2.6.0 (2016-10-03)
+
+- Added a `uuidv4` template function for generating UUIDs inside of a template.
+
+## Release 2.5.0 (2016-08-19)
+
+- New `trimSuffix`, `trimPrefix`, `hasSuffix`, and `hasPrefix` functions
+- New aliases have been added for a few functions that didn't follow the naming conventions (`trimAll` and `abbrevBoth`)
+- `trimall` and `abbrevboth` (notice the case) are deprecated and will be removed in 3.0.0
+
+## Release 2.4.0 (2016-08-16)
+
+- Adds two functions: `until` and `untilStep`
+
+## Release 2.3.0 (2016-06-21)
+
+- cat: Concatenate strings with whitespace separators.
+- replace: Replace parts of a string: `replace " " "-" "Me First"` renders "Me-First"
+- plural: Format plurals: `len "foo" | plural "one foo" "many foos"` renders "many foos"
+- indent: Indent blocks of text in a way that is sensitive to "\n" characters.
+
+## Release 2.2.0 (2016-04-21)
+
+- Added a `genPrivateKey` function (Thanks @bacongobbler)
+
+## Release 2.1.0 (2016-03-30)
+
+- `default` now prints the default value when it does not receive a value down the pipeline. It is much safer now to do `{{.Foo | default "bar"}}`.
+- Added accessors for "hermetic" functions. These return only functions that, when given the same input, produce the same output.
+
+## Release 2.0.0 (2016-03-29)
+
+Because we switched from `int` to `int64` as the return value for all integer math functions, the library's major version number has been incremented.
+
+- `min` complements `max` (formerly `biggest`)
+- `empty` indicates that a value is the empty value for its type
+- `tuple` creates a tuple inside of a template: `{{$t := tuple "a", "b" "c"}}`
+- `dict` creates a dictionary inside of a template `{{$d := dict "key1" "val1" "key2" "val2"}}` 
+- Date formatters have been added for HTML dates (as used in `date` input fields)
+- Integer math functions can convert from a number of types, including `string` (via `strconv.ParseInt`).
+
+## Release 1.2.0 (2016-02-01)
+
+- Added quote and squote
+- Added b32enc and b32dec
+- add now takes varargs
+- biggest now takes varargs
+
+## Release 1.1.0 (2015-12-29)
+
+- Added #4: Added contains function. strings.Contains, but with the arguments
+  switched to simplify common pipelines. (thanks krancour)
+- Added Travis-CI testing support
+
+## Release 1.0.0 (2015-12-23)
+
+- Initial release

+ 19 - 0
vendor/github.com/go-task/slim-sprig/LICENSE.txt

@@ -0,0 +1,19 @@
+Copyright (C) 2013-2020 Masterminds
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.

+ 73 - 0
vendor/github.com/go-task/slim-sprig/README.md

@@ -0,0 +1,73 @@
+# Slim-Sprig: Template functions for Go templates [![GoDoc](https://godoc.org/github.com/go-task/slim-sprig?status.svg)](https://godoc.org/github.com/go-task/slim-sprig) [![Go Report Card](https://goreportcard.com/badge/github.com/go-task/slim-sprig)](https://goreportcard.com/report/github.com/go-task/slim-sprig)
+
+Slim-Sprig is a fork of [Sprig](https://github.com/Masterminds/sprig), but with
+all functions that depend on external (non standard library) or crypto packages
+removed.
+The reason for this is to make this library more lightweight. Most of these
+functions (specially crypto ones) are not needed on most apps, but costs a lot
+in terms of binary size and compilation time.
+
+## Usage
+
+**Template developers**: Please use Slim-Sprig's [function documentation](https://go-task.github.io/slim-sprig/) for
+detailed instructions and code snippets for the >100 template functions available.
+
+**Go developers**: If you'd like to include Slim-Sprig as a library in your program,
+our API documentation is available [at GoDoc.org](http://godoc.org/github.com/go-task/slim-sprig).
+
+For standard usage, read on.
+
+### Load the Slim-Sprig library
+
+To load the Slim-Sprig `FuncMap`:
+
+```go
+
+import (
+  "html/template"
+
+  "github.com/go-task/slim-sprig"
+)
+
+// This example illustrates that the FuncMap *must* be set before the
+// templates themselves are loaded.
+tpl := template.Must(
+  template.New("base").Funcs(sprig.FuncMap()).ParseGlob("*.html")
+)
+```
+
+### Calling the functions inside of templates
+
+By convention, all functions are lowercase. This seems to follow the Go
+idiom for template functions (as opposed to template methods, which are
+TitleCase). For example, this:
+
+```
+{{ "hello!" | upper | repeat 5 }}
+```
+
+produces this:
+
+```
+HELLO!HELLO!HELLO!HELLO!HELLO!
+```
+
+## Principles Driving Our Function Selection
+
+We followed these principles to decide which functions to add and how to implement them:
+
+- Use template functions to build layout. The following
+  types of operations are within the domain of template functions:
+  - Formatting
+  - Layout
+  - Simple type conversions
+  - Utilities that assist in handling common formatting and layout needs (e.g. arithmetic)
+- Template functions should not return errors unless there is no way to print
+  a sensible value. For example, converting a string to an integer should not
+  produce an error if conversion fails. Instead, it should display a default
+  value.
+- Simple math is necessary for grid layouts, pagers, and so on. Complex math
+  (anything other than arithmetic) should be done outside of templates.
+- Template functions only deal with the data passed into them. They never retrieve
+  data from a source.
+- Finally, do not override core Go template functions.

+ 12 - 0
vendor/github.com/go-task/slim-sprig/Taskfile.yml

@@ -0,0 +1,12 @@
+# https://taskfile.dev
+
+version: '2'
+
+tasks:
+  default:
+    cmds:
+      - task: test
+
+  test:
+    cmds:
+      - go test -v .

+ 24 - 0
vendor/github.com/go-task/slim-sprig/crypto.go

@@ -0,0 +1,24 @@
+package sprig
+
+import (
+	"crypto/sha1"
+	"crypto/sha256"
+	"encoding/hex"
+	"fmt"
+	"hash/adler32"
+)
+
+func sha256sum(input string) string {
+	hash := sha256.Sum256([]byte(input))
+	return hex.EncodeToString(hash[:])
+}
+
+func sha1sum(input string) string {
+	hash := sha1.Sum([]byte(input))
+	return hex.EncodeToString(hash[:])
+}
+
+func adler32sum(input string) string {
+	hash := adler32.Checksum([]byte(input))
+	return fmt.Sprintf("%d", hash)
+}

+ 152 - 0
vendor/github.com/go-task/slim-sprig/date.go

@@ -0,0 +1,152 @@
+package sprig
+
+import (
+	"strconv"
+	"time"
+)
+
+// Given a format and a date, format the date string.
+//
+// Date can be a `time.Time` or an `int, int32, int64`.
+// In the later case, it is treated as seconds since UNIX
+// epoch.
+func date(fmt string, date interface{}) string {
+	return dateInZone(fmt, date, "Local")
+}
+
+func htmlDate(date interface{}) string {
+	return dateInZone("2006-01-02", date, "Local")
+}
+
+func htmlDateInZone(date interface{}, zone string) string {
+	return dateInZone("2006-01-02", date, zone)
+}
+
+func dateInZone(fmt string, date interface{}, zone string) string {
+	var t time.Time
+	switch date := date.(type) {
+	default:
+		t = time.Now()
+	case time.Time:
+		t = date
+	case *time.Time:
+		t = *date
+	case int64:
+		t = time.Unix(date, 0)
+	case int:
+		t = time.Unix(int64(date), 0)
+	case int32:
+		t = time.Unix(int64(date), 0)
+	}
+
+	loc, err := time.LoadLocation(zone)
+	if err != nil {
+		loc, _ = time.LoadLocation("UTC")
+	}
+
+	return t.In(loc).Format(fmt)
+}
+
+func dateModify(fmt string, date time.Time) time.Time {
+	d, err := time.ParseDuration(fmt)
+	if err != nil {
+		return date
+	}
+	return date.Add(d)
+}
+
+func mustDateModify(fmt string, date time.Time) (time.Time, error) {
+	d, err := time.ParseDuration(fmt)
+	if err != nil {
+		return time.Time{}, err
+	}
+	return date.Add(d), nil
+}
+
+func dateAgo(date interface{}) string {
+	var t time.Time
+
+	switch date := date.(type) {
+	default:
+		t = time.Now()
+	case time.Time:
+		t = date
+	case int64:
+		t = time.Unix(date, 0)
+	case int:
+		t = time.Unix(int64(date), 0)
+	}
+	// Drop resolution to seconds
+	duration := time.Since(t).Round(time.Second)
+	return duration.String()
+}
+
+func duration(sec interface{}) string {
+	var n int64
+	switch value := sec.(type) {
+	default:
+		n = 0
+	case string:
+		n, _ = strconv.ParseInt(value, 10, 64)
+	case int64:
+		n = value
+	}
+	return (time.Duration(n) * time.Second).String()
+}
+
+func durationRound(duration interface{}) string {
+	var d time.Duration
+	switch duration := duration.(type) {
+	default:
+		d = 0
+	case string:
+		d, _ = time.ParseDuration(duration)
+	case int64:
+		d = time.Duration(duration)
+	case time.Time:
+		d = time.Since(duration)
+	}
+
+	u := uint64(d)
+	neg := d < 0
+	if neg {
+		u = -u
+	}
+
+	var (
+		year   = uint64(time.Hour) * 24 * 365
+		month  = uint64(time.Hour) * 24 * 30
+		day    = uint64(time.Hour) * 24
+		hour   = uint64(time.Hour)
+		minute = uint64(time.Minute)
+		second = uint64(time.Second)
+	)
+	switch {
+	case u > year:
+		return strconv.FormatUint(u/year, 10) + "y"
+	case u > month:
+		return strconv.FormatUint(u/month, 10) + "mo"
+	case u > day:
+		return strconv.FormatUint(u/day, 10) + "d"
+	case u > hour:
+		return strconv.FormatUint(u/hour, 10) + "h"
+	case u > minute:
+		return strconv.FormatUint(u/minute, 10) + "m"
+	case u > second:
+		return strconv.FormatUint(u/second, 10) + "s"
+	}
+	return "0s"
+}
+
+func toDate(fmt, str string) time.Time {
+	t, _ := time.ParseInLocation(fmt, str, time.Local)
+	return t
+}
+
+func mustToDate(fmt, str string) (time.Time, error) {
+	return time.ParseInLocation(fmt, str, time.Local)
+}
+
+func unixEpoch(date time.Time) string {
+	return strconv.FormatInt(date.Unix(), 10)
+}

+ 163 - 0
vendor/github.com/go-task/slim-sprig/defaults.go

@@ -0,0 +1,163 @@
+package sprig
+
+import (
+	"bytes"
+	"encoding/json"
+	"math/rand"
+	"reflect"
+	"strings"
+	"time"
+)
+
+func init() {
+	rand.Seed(time.Now().UnixNano())
+}
+
+// dfault checks whether `given` is set, and returns default if not set.
+//
+// This returns `d` if `given` appears not to be set, and `given` otherwise.
+//
+// For numeric types 0 is unset.
+// For strings, maps, arrays, and slices, len() = 0 is considered unset.
+// For bool, false is unset.
+// Structs are never considered unset.
+//
+// For everything else, including pointers, a nil value is unset.
+func dfault(d interface{}, given ...interface{}) interface{} {
+
+	if empty(given) || empty(given[0]) {
+		return d
+	}
+	return given[0]
+}
+
+// empty returns true if the given value has the zero value for its type.
+func empty(given interface{}) bool {
+	g := reflect.ValueOf(given)
+	if !g.IsValid() {
+		return true
+	}
+
+	// Basically adapted from text/template.isTrue
+	switch g.Kind() {
+	default:
+		return g.IsNil()
+	case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
+		return g.Len() == 0
+	case reflect.Bool:
+		return !g.Bool()
+	case reflect.Complex64, reflect.Complex128:
+		return g.Complex() == 0
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return g.Int() == 0
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return g.Uint() == 0
+	case reflect.Float32, reflect.Float64:
+		return g.Float() == 0
+	case reflect.Struct:
+		return false
+	}
+}
+
+// coalesce returns the first non-empty value.
+func coalesce(v ...interface{}) interface{} {
+	for _, val := range v {
+		if !empty(val) {
+			return val
+		}
+	}
+	return nil
+}
+
+// all returns true if empty(x) is false for all values x in the list.
+// If the list is empty, return true.
+func all(v ...interface{}) bool {
+	for _, val := range v {
+		if empty(val) {
+			return false
+		}
+	}
+	return true
+}
+
+// any returns true if empty(x) is false for any x in the list.
+// If the list is empty, return false.
+func any(v ...interface{}) bool {
+	for _, val := range v {
+		if !empty(val) {
+			return true
+		}
+	}
+	return false
+}
+
+// fromJson decodes JSON into a structured value, ignoring errors.
+func fromJson(v string) interface{} {
+	output, _ := mustFromJson(v)
+	return output
+}
+
+// mustFromJson decodes JSON into a structured value, returning errors.
+func mustFromJson(v string) (interface{}, error) {
+	var output interface{}
+	err := json.Unmarshal([]byte(v), &output)
+	return output, err
+}
+
+// toJson encodes an item into a JSON string
+func toJson(v interface{}) string {
+	output, _ := json.Marshal(v)
+	return string(output)
+}
+
+func mustToJson(v interface{}) (string, error) {
+	output, err := json.Marshal(v)
+	if err != nil {
+		return "", err
+	}
+	return string(output), nil
+}
+
+// toPrettyJson encodes an item into a pretty (indented) JSON string
+func toPrettyJson(v interface{}) string {
+	output, _ := json.MarshalIndent(v, "", "  ")
+	return string(output)
+}
+
+func mustToPrettyJson(v interface{}) (string, error) {
+	output, err := json.MarshalIndent(v, "", "  ")
+	if err != nil {
+		return "", err
+	}
+	return string(output), nil
+}
+
+// toRawJson encodes an item into a JSON string with no escaping of HTML characters.
+func toRawJson(v interface{}) string {
+	output, err := mustToRawJson(v)
+	if err != nil {
+		panic(err)
+	}
+	return string(output)
+}
+
+// mustToRawJson encodes an item into a JSON string with no escaping of HTML characters.
+func mustToRawJson(v interface{}) (string, error) {
+	buf := new(bytes.Buffer)
+	enc := json.NewEncoder(buf)
+	enc.SetEscapeHTML(false)
+	err := enc.Encode(&v)
+	if err != nil {
+		return "", err
+	}
+	return strings.TrimSuffix(buf.String(), "\n"), nil
+}
+
+// ternary returns the first value if the last value is true, otherwise returns the second value.
+func ternary(vt interface{}, vf interface{}, v bool) interface{} {
+	if v {
+		return vt
+	}
+
+	return vf
+}

+ 118 - 0
vendor/github.com/go-task/slim-sprig/dict.go

@@ -0,0 +1,118 @@
+package sprig
+
+func get(d map[string]interface{}, key string) interface{} {
+	if val, ok := d[key]; ok {
+		return val
+	}
+	return ""
+}
+
+func set(d map[string]interface{}, key string, value interface{}) map[string]interface{} {
+	d[key] = value
+	return d
+}
+
+func unset(d map[string]interface{}, key string) map[string]interface{} {
+	delete(d, key)
+	return d
+}
+
+func hasKey(d map[string]interface{}, key string) bool {
+	_, ok := d[key]
+	return ok
+}
+
+func pluck(key string, d ...map[string]interface{}) []interface{} {
+	res := []interface{}{}
+	for _, dict := range d {
+		if val, ok := dict[key]; ok {
+			res = append(res, val)
+		}
+	}
+	return res
+}
+
+func keys(dicts ...map[string]interface{}) []string {
+	k := []string{}
+	for _, dict := range dicts {
+		for key := range dict {
+			k = append(k, key)
+		}
+	}
+	return k
+}
+
+func pick(dict map[string]interface{}, keys ...string) map[string]interface{} {
+	res := map[string]interface{}{}
+	for _, k := range keys {
+		if v, ok := dict[k]; ok {
+			res[k] = v
+		}
+	}
+	return res
+}
+
+func omit(dict map[string]interface{}, keys ...string) map[string]interface{} {
+	res := map[string]interface{}{}
+
+	omit := make(map[string]bool, len(keys))
+	for _, k := range keys {
+		omit[k] = true
+	}
+
+	for k, v := range dict {
+		if _, ok := omit[k]; !ok {
+			res[k] = v
+		}
+	}
+	return res
+}
+
+func dict(v ...interface{}) map[string]interface{} {
+	dict := map[string]interface{}{}
+	lenv := len(v)
+	for i := 0; i < lenv; i += 2 {
+		key := strval(v[i])
+		if i+1 >= lenv {
+			dict[key] = ""
+			continue
+		}
+		dict[key] = v[i+1]
+	}
+	return dict
+}
+
+func values(dict map[string]interface{}) []interface{} {
+	values := []interface{}{}
+	for _, value := range dict {
+		values = append(values, value)
+	}
+
+	return values
+}
+
+func dig(ps ...interface{}) (interface{}, error) {
+	if len(ps) < 3 {
+		panic("dig needs at least three arguments")
+	}
+	dict := ps[len(ps)-1].(map[string]interface{})
+	def := ps[len(ps)-2]
+	ks := make([]string, len(ps)-2)
+	for i := 0; i < len(ks); i++ {
+		ks[i] = ps[i].(string)
+	}
+
+	return digFromDict(dict, def, ks)
+}
+
+func digFromDict(dict map[string]interface{}, d interface{}, ks []string) (interface{}, error) {
+	k, ns := ks[0], ks[1:len(ks)]
+	step, has := dict[k]
+	if !has {
+		return d, nil
+	}
+	if len(ns) == 0 {
+		return step, nil
+	}
+	return digFromDict(step.(map[string]interface{}), d, ns)
+}

+ 19 - 0
vendor/github.com/go-task/slim-sprig/doc.go

@@ -0,0 +1,19 @@
+/*
+Package sprig provides template functions for Go.
+
+This package contains a number of utility functions for working with data
+inside of Go `html/template` and `text/template` files.
+
+To add these functions, use the `template.Funcs()` method:
+
+	t := templates.New("foo").Funcs(sprig.FuncMap())
+
+Note that you should add the function map before you parse any template files.
+
+	In several cases, Sprig reverses the order of arguments from the way they
+	appear in the standard library. This is to make it easier to pipe
+	arguments into functions.
+
+See http://masterminds.github.io/sprig/ for more detailed documentation on each of the available functions.
+*/
+package sprig

+ 317 - 0
vendor/github.com/go-task/slim-sprig/functions.go

@@ -0,0 +1,317 @@
+package sprig
+
+import (
+	"errors"
+	"html/template"
+	"math/rand"
+	"os"
+	"path"
+	"path/filepath"
+	"reflect"
+	"strconv"
+	"strings"
+	ttemplate "text/template"
+	"time"
+)
+
+// FuncMap produces the function map.
+//
+// Use this to pass the functions into the template engine:
+//
+// 	tpl := template.New("foo").Funcs(sprig.FuncMap()))
+//
+func FuncMap() template.FuncMap {
+	return HtmlFuncMap()
+}
+
+// HermeticTxtFuncMap returns a 'text/template'.FuncMap with only repeatable functions.
+func HermeticTxtFuncMap() ttemplate.FuncMap {
+	r := TxtFuncMap()
+	for _, name := range nonhermeticFunctions {
+		delete(r, name)
+	}
+	return r
+}
+
+// HermeticHtmlFuncMap returns an 'html/template'.Funcmap with only repeatable functions.
+func HermeticHtmlFuncMap() template.FuncMap {
+	r := HtmlFuncMap()
+	for _, name := range nonhermeticFunctions {
+		delete(r, name)
+	}
+	return r
+}
+
+// TxtFuncMap returns a 'text/template'.FuncMap
+func TxtFuncMap() ttemplate.FuncMap {
+	return ttemplate.FuncMap(GenericFuncMap())
+}
+
+// HtmlFuncMap returns an 'html/template'.Funcmap
+func HtmlFuncMap() template.FuncMap {
+	return template.FuncMap(GenericFuncMap())
+}
+
+// GenericFuncMap returns a copy of the basic function map as a map[string]interface{}.
+func GenericFuncMap() map[string]interface{} {
+	gfm := make(map[string]interface{}, len(genericMap))
+	for k, v := range genericMap {
+		gfm[k] = v
+	}
+	return gfm
+}
+
+// These functions are not guaranteed to evaluate to the same result for given input, because they
+// refer to the environment or global state.
+var nonhermeticFunctions = []string{
+	// Date functions
+	"date",
+	"date_in_zone",
+	"date_modify",
+	"now",
+	"htmlDate",
+	"htmlDateInZone",
+	"dateInZone",
+	"dateModify",
+
+	// Strings
+	"randAlphaNum",
+	"randAlpha",
+	"randAscii",
+	"randNumeric",
+	"randBytes",
+	"uuidv4",
+
+	// OS
+	"env",
+	"expandenv",
+
+	// Network
+	"getHostByName",
+}
+
+var genericMap = map[string]interface{}{
+	"hello": func() string { return "Hello!" },
+
+	// Date functions
+	"ago":              dateAgo,
+	"date":             date,
+	"date_in_zone":     dateInZone,
+	"date_modify":      dateModify,
+	"dateInZone":       dateInZone,
+	"dateModify":       dateModify,
+	"duration":         duration,
+	"durationRound":    durationRound,
+	"htmlDate":         htmlDate,
+	"htmlDateInZone":   htmlDateInZone,
+	"must_date_modify": mustDateModify,
+	"mustDateModify":   mustDateModify,
+	"mustToDate":       mustToDate,
+	"now":              time.Now,
+	"toDate":           toDate,
+	"unixEpoch":        unixEpoch,
+
+	// Strings
+	"trunc":  trunc,
+	"trim":   strings.TrimSpace,
+	"upper":  strings.ToUpper,
+	"lower":  strings.ToLower,
+	"title":  strings.Title,
+	"substr": substring,
+	// Switch order so that "foo" | repeat 5
+	"repeat": func(count int, str string) string { return strings.Repeat(str, count) },
+	// Deprecated: Use trimAll.
+	"trimall": func(a, b string) string { return strings.Trim(b, a) },
+	// Switch order so that "$foo" | trimall "$"
+	"trimAll":    func(a, b string) string { return strings.Trim(b, a) },
+	"trimSuffix": func(a, b string) string { return strings.TrimSuffix(b, a) },
+	"trimPrefix": func(a, b string) string { return strings.TrimPrefix(b, a) },
+	// Switch order so that "foobar" | contains "foo"
+	"contains":   func(substr string, str string) bool { return strings.Contains(str, substr) },
+	"hasPrefix":  func(substr string, str string) bool { return strings.HasPrefix(str, substr) },
+	"hasSuffix":  func(substr string, str string) bool { return strings.HasSuffix(str, substr) },
+	"quote":      quote,
+	"squote":     squote,
+	"cat":        cat,
+	"indent":     indent,
+	"nindent":    nindent,
+	"replace":    replace,
+	"plural":     plural,
+	"sha1sum":    sha1sum,
+	"sha256sum":  sha256sum,
+	"adler32sum": adler32sum,
+	"toString":   strval,
+
+	// Wrap Atoi to stop errors.
+	"atoi":      func(a string) int { i, _ := strconv.Atoi(a); return i },
+	"int64":     toInt64,
+	"int":       toInt,
+	"float64":   toFloat64,
+	"seq":       seq,
+	"toDecimal": toDecimal,
+
+	//"gt": func(a, b int) bool {return a > b},
+	//"gte": func(a, b int) bool {return a >= b},
+	//"lt": func(a, b int) bool {return a < b},
+	//"lte": func(a, b int) bool {return a <= b},
+
+	// split "/" foo/bar returns map[int]string{0: foo, 1: bar}
+	"split":     split,
+	"splitList": func(sep, orig string) []string { return strings.Split(orig, sep) },
+	// splitn "/" foo/bar/fuu returns map[int]string{0: foo, 1: bar/fuu}
+	"splitn":    splitn,
+	"toStrings": strslice,
+
+	"until":     until,
+	"untilStep": untilStep,
+
+	// VERY basic arithmetic.
+	"add1": func(i interface{}) int64 { return toInt64(i) + 1 },
+	"add": func(i ...interface{}) int64 {
+		var a int64 = 0
+		for _, b := range i {
+			a += toInt64(b)
+		}
+		return a
+	},
+	"sub": func(a, b interface{}) int64 { return toInt64(a) - toInt64(b) },
+	"div": func(a, b interface{}) int64 { return toInt64(a) / toInt64(b) },
+	"mod": func(a, b interface{}) int64 { return toInt64(a) % toInt64(b) },
+	"mul": func(a interface{}, v ...interface{}) int64 {
+		val := toInt64(a)
+		for _, b := range v {
+			val = val * toInt64(b)
+		}
+		return val
+	},
+	"randInt": func(min, max int) int { return rand.Intn(max-min) + min },
+	"biggest": max,
+	"max":     max,
+	"min":     min,
+	"maxf":    maxf,
+	"minf":    minf,
+	"ceil":    ceil,
+	"floor":   floor,
+	"round":   round,
+
+	// string slices. Note that we reverse the order b/c that's better
+	// for template processing.
+	"join":      join,
+	"sortAlpha": sortAlpha,
+
+	// Defaults
+	"default":          dfault,
+	"empty":            empty,
+	"coalesce":         coalesce,
+	"all":              all,
+	"any":              any,
+	"compact":          compact,
+	"mustCompact":      mustCompact,
+	"fromJson":         fromJson,
+	"toJson":           toJson,
+	"toPrettyJson":     toPrettyJson,
+	"toRawJson":        toRawJson,
+	"mustFromJson":     mustFromJson,
+	"mustToJson":       mustToJson,
+	"mustToPrettyJson": mustToPrettyJson,
+	"mustToRawJson":    mustToRawJson,
+	"ternary":          ternary,
+
+	// Reflection
+	"typeOf":     typeOf,
+	"typeIs":     typeIs,
+	"typeIsLike": typeIsLike,
+	"kindOf":     kindOf,
+	"kindIs":     kindIs,
+	"deepEqual":  reflect.DeepEqual,
+
+	// OS:
+	"env":       os.Getenv,
+	"expandenv": os.ExpandEnv,
+
+	// Network:
+	"getHostByName": getHostByName,
+
+	// Paths:
+	"base":  path.Base,
+	"dir":   path.Dir,
+	"clean": path.Clean,
+	"ext":   path.Ext,
+	"isAbs": path.IsAbs,
+
+	// Filepaths:
+	"osBase":  filepath.Base,
+	"osClean": filepath.Clean,
+	"osDir":   filepath.Dir,
+	"osExt":   filepath.Ext,
+	"osIsAbs": filepath.IsAbs,
+
+	// Encoding:
+	"b64enc": base64encode,
+	"b64dec": base64decode,
+	"b32enc": base32encode,
+	"b32dec": base32decode,
+
+	// Data Structures:
+	"tuple":  list, // FIXME: with the addition of append/prepend these are no longer immutable.
+	"list":   list,
+	"dict":   dict,
+	"get":    get,
+	"set":    set,
+	"unset":  unset,
+	"hasKey": hasKey,
+	"pluck":  pluck,
+	"keys":   keys,
+	"pick":   pick,
+	"omit":   omit,
+	"values": values,
+
+	"append": push, "push": push,
+	"mustAppend": mustPush, "mustPush": mustPush,
+	"prepend":     prepend,
+	"mustPrepend": mustPrepend,
+	"first":       first,
+	"mustFirst":   mustFirst,
+	"rest":        rest,
+	"mustRest":    mustRest,
+	"last":        last,
+	"mustLast":    mustLast,
+	"initial":     initial,
+	"mustInitial": mustInitial,
+	"reverse":     reverse,
+	"mustReverse": mustReverse,
+	"uniq":        uniq,
+	"mustUniq":    mustUniq,
+	"without":     without,
+	"mustWithout": mustWithout,
+	"has":         has,
+	"mustHas":     mustHas,
+	"slice":       slice,
+	"mustSlice":   mustSlice,
+	"concat":      concat,
+	"dig":         dig,
+	"chunk":       chunk,
+	"mustChunk":   mustChunk,
+
+	// Flow Control:
+	"fail": func(msg string) (string, error) { return "", errors.New(msg) },
+
+	// Regex
+	"regexMatch":                 regexMatch,
+	"mustRegexMatch":             mustRegexMatch,
+	"regexFindAll":               regexFindAll,
+	"mustRegexFindAll":           mustRegexFindAll,
+	"regexFind":                  regexFind,
+	"mustRegexFind":              mustRegexFind,
+	"regexReplaceAll":            regexReplaceAll,
+	"mustRegexReplaceAll":        mustRegexReplaceAll,
+	"regexReplaceAllLiteral":     regexReplaceAllLiteral,
+	"mustRegexReplaceAllLiteral": mustRegexReplaceAllLiteral,
+	"regexSplit":                 regexSplit,
+	"mustRegexSplit":             mustRegexSplit,
+	"regexQuoteMeta":             regexQuoteMeta,
+
+	// URLs:
+	"urlParse": urlParse,
+	"urlJoin":  urlJoin,
+}

+ 464 - 0
vendor/github.com/go-task/slim-sprig/list.go

@@ -0,0 +1,464 @@
+package sprig
+
+import (
+	"fmt"
+	"math"
+	"reflect"
+	"sort"
+)
+
+// Reflection is used in these functions so that slices and arrays of strings,
+// ints, and other types not implementing []interface{} can be worked with.
+// For example, this is useful if you need to work on the output of regexs.
+
+func list(v ...interface{}) []interface{} {
+	return v
+}
+
+func push(list interface{}, v interface{}) []interface{} {
+	l, err := mustPush(list, v)
+	if err != nil {
+		panic(err)
+	}
+
+	return l
+}
+
+func mustPush(list interface{}, v interface{}) ([]interface{}, error) {
+	tp := reflect.TypeOf(list).Kind()
+	switch tp {
+	case reflect.Slice, reflect.Array:
+		l2 := reflect.ValueOf(list)
+
+		l := l2.Len()
+		nl := make([]interface{}, l)
+		for i := 0; i < l; i++ {
+			nl[i] = l2.Index(i).Interface()
+		}
+
+		return append(nl, v), nil
+
+	default:
+		return nil, fmt.Errorf("Cannot push on type %s", tp)
+	}
+}
+
+func prepend(list interface{}, v interface{}) []interface{} {
+	l, err := mustPrepend(list, v)
+	if err != nil {
+		panic(err)
+	}
+
+	return l
+}
+
+func mustPrepend(list interface{}, v interface{}) ([]interface{}, error) {
+	//return append([]interface{}{v}, list...)
+
+	tp := reflect.TypeOf(list).Kind()
+	switch tp {
+	case reflect.Slice, reflect.Array:
+		l2 := reflect.ValueOf(list)
+
+		l := l2.Len()
+		nl := make([]interface{}, l)
+		for i := 0; i < l; i++ {
+			nl[i] = l2.Index(i).Interface()
+		}
+
+		return append([]interface{}{v}, nl...), nil
+
+	default:
+		return nil, fmt.Errorf("Cannot prepend on type %s", tp)
+	}
+}
+
+func chunk(size int, list interface{}) [][]interface{} {
+	l, err := mustChunk(size, list)
+	if err != nil {
+		panic(err)
+	}
+
+	return l
+}
+
+func mustChunk(size int, list interface{}) ([][]interface{}, error) {
+	tp := reflect.TypeOf(list).Kind()
+	switch tp {
+	case reflect.Slice, reflect.Array:
+		l2 := reflect.ValueOf(list)
+
+		l := l2.Len()
+
+		cs := int(math.Floor(float64(l-1)/float64(size)) + 1)
+		nl := make([][]interface{}, cs)
+
+		for i := 0; i < cs; i++ {
+			clen := size
+			if i == cs-1 {
+				clen = int(math.Floor(math.Mod(float64(l), float64(size))))
+				if clen == 0 {
+					clen = size
+				}
+			}
+
+			nl[i] = make([]interface{}, clen)
+
+			for j := 0; j < clen; j++ {
+				ix := i*size + j
+				nl[i][j] = l2.Index(ix).Interface()
+			}
+		}
+
+		return nl, nil
+
+	default:
+		return nil, fmt.Errorf("Cannot chunk type %s", tp)
+	}
+}
+
+func last(list interface{}) interface{} {
+	l, err := mustLast(list)
+	if err != nil {
+		panic(err)
+	}
+
+	return l
+}
+
+func mustLast(list interface{}) (interface{}, error) {
+	tp := reflect.TypeOf(list).Kind()
+	switch tp {
+	case reflect.Slice, reflect.Array:
+		l2 := reflect.ValueOf(list)
+
+		l := l2.Len()
+		if l == 0 {
+			return nil, nil
+		}
+
+		return l2.Index(l - 1).Interface(), nil
+	default:
+		return nil, fmt.Errorf("Cannot find last on type %s", tp)
+	}
+}
+
+func first(list interface{}) interface{} {
+	l, err := mustFirst(list)
+	if err != nil {
+		panic(err)
+	}
+
+	return l
+}
+
+func mustFirst(list interface{}) (interface{}, error) {
+	tp := reflect.TypeOf(list).Kind()
+	switch tp {
+	case reflect.Slice, reflect.Array:
+		l2 := reflect.ValueOf(list)
+
+		l := l2.Len()
+		if l == 0 {
+			return nil, nil
+		}
+
+		return l2.Index(0).Interface(), nil
+	default:
+		return nil, fmt.Errorf("Cannot find first on type %s", tp)
+	}
+}
+
+func rest(list interface{}) []interface{} {
+	l, err := mustRest(list)
+	if err != nil {
+		panic(err)
+	}
+
+	return l
+}
+
+func mustRest(list interface{}) ([]interface{}, error) {
+	tp := reflect.TypeOf(list).Kind()
+	switch tp {
+	case reflect.Slice, reflect.Array:
+		l2 := reflect.ValueOf(list)
+
+		l := l2.Len()
+		if l == 0 {
+			return nil, nil
+		}
+
+		nl := make([]interface{}, l-1)
+		for i := 1; i < l; i++ {
+			nl[i-1] = l2.Index(i).Interface()
+		}
+
+		return nl, nil
+	default:
+		return nil, fmt.Errorf("Cannot find rest on type %s", tp)
+	}
+}
+
+func initial(list interface{}) []interface{} {
+	l, err := mustInitial(list)
+	if err != nil {
+		panic(err)
+	}
+
+	return l
+}
+
+func mustInitial(list interface{}) ([]interface{}, error) {
+	tp := reflect.TypeOf(list).Kind()
+	switch tp {
+	case reflect.Slice, reflect.Array:
+		l2 := reflect.ValueOf(list)
+
+		l := l2.Len()
+		if l == 0 {
+			return nil, nil
+		}
+
+		nl := make([]interface{}, l-1)
+		for i := 0; i < l-1; i++ {
+			nl[i] = l2.Index(i).Interface()
+		}
+
+		return nl, nil
+	default:
+		return nil, fmt.Errorf("Cannot find initial on type %s", tp)
+	}
+}
+
+func sortAlpha(list interface{}) []string {
+	k := reflect.Indirect(reflect.ValueOf(list)).Kind()
+	switch k {
+	case reflect.Slice, reflect.Array:
+		a := strslice(list)
+		s := sort.StringSlice(a)
+		s.Sort()
+		return s
+	}
+	return []string{strval(list)}
+}
+
+func reverse(v interface{}) []interface{} {
+	l, err := mustReverse(v)
+	if err != nil {
+		panic(err)
+	}
+
+	return l
+}
+
+func mustReverse(v interface{}) ([]interface{}, error) {
+	tp := reflect.TypeOf(v).Kind()
+	switch tp {
+	case reflect.Slice, reflect.Array:
+		l2 := reflect.ValueOf(v)
+
+		l := l2.Len()
+		// We do not sort in place because the incoming array should not be altered.
+		nl := make([]interface{}, l)
+		for i := 0; i < l; i++ {
+			nl[l-i-1] = l2.Index(i).Interface()
+		}
+
+		return nl, nil
+	default:
+		return nil, fmt.Errorf("Cannot find reverse on type %s", tp)
+	}
+}
+
+func compact(list interface{}) []interface{} {
+	l, err := mustCompact(list)
+	if err != nil {
+		panic(err)
+	}
+
+	return l
+}
+
+func mustCompact(list interface{}) ([]interface{}, error) {
+	tp := reflect.TypeOf(list).Kind()
+	switch tp {
+	case reflect.Slice, reflect.Array:
+		l2 := reflect.ValueOf(list)
+
+		l := l2.Len()
+		nl := []interface{}{}
+		var item interface{}
+		for i := 0; i < l; i++ {
+			item = l2.Index(i).Interface()
+			if !empty(item) {
+				nl = append(nl, item)
+			}
+		}
+
+		return nl, nil
+	default:
+		return nil, fmt.Errorf("Cannot compact on type %s", tp)
+	}
+}
+
+func uniq(list interface{}) []interface{} {
+	l, err := mustUniq(list)
+	if err != nil {
+		panic(err)
+	}
+
+	return l
+}
+
+func mustUniq(list interface{}) ([]interface{}, error) {
+	tp := reflect.TypeOf(list).Kind()
+	switch tp {
+	case reflect.Slice, reflect.Array:
+		l2 := reflect.ValueOf(list)
+
+		l := l2.Len()
+		dest := []interface{}{}
+		var item interface{}
+		for i := 0; i < l; i++ {
+			item = l2.Index(i).Interface()
+			if !inList(dest, item) {
+				dest = append(dest, item)
+			}
+		}
+
+		return dest, nil
+	default:
+		return nil, fmt.Errorf("Cannot find uniq on type %s", tp)
+	}
+}
+
+func inList(haystack []interface{}, needle interface{}) bool {
+	for _, h := range haystack {
+		if reflect.DeepEqual(needle, h) {
+			return true
+		}
+	}
+	return false
+}
+
+func without(list interface{}, omit ...interface{}) []interface{} {
+	l, err := mustWithout(list, omit...)
+	if err != nil {
+		panic(err)
+	}
+
+	return l
+}
+
+func mustWithout(list interface{}, omit ...interface{}) ([]interface{}, error) {
+	tp := reflect.TypeOf(list).Kind()
+	switch tp {
+	case reflect.Slice, reflect.Array:
+		l2 := reflect.ValueOf(list)
+
+		l := l2.Len()
+		res := []interface{}{}
+		var item interface{}
+		for i := 0; i < l; i++ {
+			item = l2.Index(i).Interface()
+			if !inList(omit, item) {
+				res = append(res, item)
+			}
+		}
+
+		return res, nil
+	default:
+		return nil, fmt.Errorf("Cannot find without on type %s", tp)
+	}
+}
+
+func has(needle interface{}, haystack interface{}) bool {
+	l, err := mustHas(needle, haystack)
+	if err != nil {
+		panic(err)
+	}
+
+	return l
+}
+
+func mustHas(needle interface{}, haystack interface{}) (bool, error) {
+	if haystack == nil {
+		return false, nil
+	}
+	tp := reflect.TypeOf(haystack).Kind()
+	switch tp {
+	case reflect.Slice, reflect.Array:
+		l2 := reflect.ValueOf(haystack)
+		var item interface{}
+		l := l2.Len()
+		for i := 0; i < l; i++ {
+			item = l2.Index(i).Interface()
+			if reflect.DeepEqual(needle, item) {
+				return true, nil
+			}
+		}
+
+		return false, nil
+	default:
+		return false, fmt.Errorf("Cannot find has on type %s", tp)
+	}
+}
+
+// $list := [1, 2, 3, 4, 5]
+// slice $list     -> list[0:5] = list[:]
+// slice $list 0 3 -> list[0:3] = list[:3]
+// slice $list 3 5 -> list[3:5]
+// slice $list 3   -> list[3:5] = list[3:]
+func slice(list interface{}, indices ...interface{}) interface{} {
+	l, err := mustSlice(list, indices...)
+	if err != nil {
+		panic(err)
+	}
+
+	return l
+}
+
+func mustSlice(list interface{}, indices ...interface{}) (interface{}, error) {
+	tp := reflect.TypeOf(list).Kind()
+	switch tp {
+	case reflect.Slice, reflect.Array:
+		l2 := reflect.ValueOf(list)
+
+		l := l2.Len()
+		if l == 0 {
+			return nil, nil
+		}
+
+		var start, end int
+		if len(indices) > 0 {
+			start = toInt(indices[0])
+		}
+		if len(indices) < 2 {
+			end = l
+		} else {
+			end = toInt(indices[1])
+		}
+
+		return l2.Slice(start, end).Interface(), nil
+	default:
+		return nil, fmt.Errorf("list should be type of slice or array but %s", tp)
+	}
+}
+
+func concat(lists ...interface{}) interface{} {
+	var res []interface{}
+	for _, list := range lists {
+		tp := reflect.TypeOf(list).Kind()
+		switch tp {
+		case reflect.Slice, reflect.Array:
+			l2 := reflect.ValueOf(list)
+			for i := 0; i < l2.Len(); i++ {
+				res = append(res, l2.Index(i).Interface())
+			}
+		default:
+			panic(fmt.Sprintf("Cannot concat type %s as list", tp))
+		}
+	}
+	return res
+}

+ 12 - 0
vendor/github.com/go-task/slim-sprig/network.go

@@ -0,0 +1,12 @@
+package sprig
+
+import (
+	"math/rand"
+	"net"
+)
+
+func getHostByName(name string) string {
+	addrs, _ := net.LookupHost(name)
+	//TODO: add error handing when release v3 comes out
+	return addrs[rand.Intn(len(addrs))]
+}

+ 228 - 0
vendor/github.com/go-task/slim-sprig/numeric.go

@@ -0,0 +1,228 @@
+package sprig
+
+import (
+	"fmt"
+	"math"
+	"reflect"
+	"strconv"
+	"strings"
+)
+
+// toFloat64 converts 64-bit floats
+func toFloat64(v interface{}) float64 {
+	if str, ok := v.(string); ok {
+		iv, err := strconv.ParseFloat(str, 64)
+		if err != nil {
+			return 0
+		}
+		return iv
+	}
+
+	val := reflect.Indirect(reflect.ValueOf(v))
+	switch val.Kind() {
+	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+		return float64(val.Int())
+	case reflect.Uint8, reflect.Uint16, reflect.Uint32:
+		return float64(val.Uint())
+	case reflect.Uint, reflect.Uint64:
+		return float64(val.Uint())
+	case reflect.Float32, reflect.Float64:
+		return val.Float()
+	case reflect.Bool:
+		if val.Bool() {
+			return 1
+		}
+		return 0
+	default:
+		return 0
+	}
+}
+
+func toInt(v interface{}) int {
+	//It's not optimal. Bud I don't want duplicate toInt64 code.
+	return int(toInt64(v))
+}
+
+// toInt64 converts integer types to 64-bit integers
+func toInt64(v interface{}) int64 {
+	if str, ok := v.(string); ok {
+		iv, err := strconv.ParseInt(str, 10, 64)
+		if err != nil {
+			return 0
+		}
+		return iv
+	}
+
+	val := reflect.Indirect(reflect.ValueOf(v))
+	switch val.Kind() {
+	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+		return val.Int()
+	case reflect.Uint8, reflect.Uint16, reflect.Uint32:
+		return int64(val.Uint())
+	case reflect.Uint, reflect.Uint64:
+		tv := val.Uint()
+		if tv <= math.MaxInt64 {
+			return int64(tv)
+		}
+		// TODO: What is the sensible thing to do here?
+		return math.MaxInt64
+	case reflect.Float32, reflect.Float64:
+		return int64(val.Float())
+	case reflect.Bool:
+		if val.Bool() {
+			return 1
+		}
+		return 0
+	default:
+		return 0
+	}
+}
+
+func max(a interface{}, i ...interface{}) int64 {
+	aa := toInt64(a)
+	for _, b := range i {
+		bb := toInt64(b)
+		if bb > aa {
+			aa = bb
+		}
+	}
+	return aa
+}
+
+func maxf(a interface{}, i ...interface{}) float64 {
+	aa := toFloat64(a)
+	for _, b := range i {
+		bb := toFloat64(b)
+		aa = math.Max(aa, bb)
+	}
+	return aa
+}
+
+func min(a interface{}, i ...interface{}) int64 {
+	aa := toInt64(a)
+	for _, b := range i {
+		bb := toInt64(b)
+		if bb < aa {
+			aa = bb
+		}
+	}
+	return aa
+}
+
+func minf(a interface{}, i ...interface{}) float64 {
+	aa := toFloat64(a)
+	for _, b := range i {
+		bb := toFloat64(b)
+		aa = math.Min(aa, bb)
+	}
+	return aa
+}
+
+func until(count int) []int {
+	step := 1
+	if count < 0 {
+		step = -1
+	}
+	return untilStep(0, count, step)
+}
+
+func untilStep(start, stop, step int) []int {
+	v := []int{}
+
+	if stop < start {
+		if step >= 0 {
+			return v
+		}
+		for i := start; i > stop; i += step {
+			v = append(v, i)
+		}
+		return v
+	}
+
+	if step <= 0 {
+		return v
+	}
+	for i := start; i < stop; i += step {
+		v = append(v, i)
+	}
+	return v
+}
+
+func floor(a interface{}) float64 {
+	aa := toFloat64(a)
+	return math.Floor(aa)
+}
+
+func ceil(a interface{}) float64 {
+	aa := toFloat64(a)
+	return math.Ceil(aa)
+}
+
+func round(a interface{}, p int, rOpt ...float64) float64 {
+	roundOn := .5
+	if len(rOpt) > 0 {
+		roundOn = rOpt[0]
+	}
+	val := toFloat64(a)
+	places := toFloat64(p)
+
+	var round float64
+	pow := math.Pow(10, places)
+	digit := pow * val
+	_, div := math.Modf(digit)
+	if div >= roundOn {
+		round = math.Ceil(digit)
+	} else {
+		round = math.Floor(digit)
+	}
+	return round / pow
+}
+
+// converts unix octal to decimal
+func toDecimal(v interface{}) int64 {
+	result, err := strconv.ParseInt(fmt.Sprint(v), 8, 64)
+	if err != nil {
+		return 0
+	}
+	return result
+}
+
+func seq(params ...int) string {
+	increment := 1
+	switch len(params) {
+	case 0:
+		return ""
+	case 1:
+		start := 1
+		end := params[0]
+		if end < start {
+			increment = -1
+		}
+		return intArrayToString(untilStep(start, end+increment, increment), " ")
+	case 3:
+		start := params[0]
+		end := params[2]
+		step := params[1]
+		if end < start {
+			increment = -1
+			if step > 0 {
+				return ""
+			}
+		}
+		return intArrayToString(untilStep(start, end+increment, step), " ")
+	case 2:
+		start := params[0]
+		end := params[1]
+		step := 1
+		if end < start {
+			step = -1
+		}
+		return intArrayToString(untilStep(start, end+step, step), " ")
+	default:
+		return ""
+	}
+}
+
+func intArrayToString(slice []int, delimeter string) string {
+	return strings.Trim(strings.Join(strings.Fields(fmt.Sprint(slice)), delimeter), "[]")
+}

+ 28 - 0
vendor/github.com/go-task/slim-sprig/reflect.go

@@ -0,0 +1,28 @@
+package sprig
+
+import (
+	"fmt"
+	"reflect"
+)
+
+// typeIs returns true if the src is the type named in target.
+func typeIs(target string, src interface{}) bool {
+	return target == typeOf(src)
+}
+
+func typeIsLike(target string, src interface{}) bool {
+	t := typeOf(src)
+	return target == t || "*"+target == t
+}
+
+func typeOf(src interface{}) string {
+	return fmt.Sprintf("%T", src)
+}
+
+func kindIs(target string, src interface{}) bool {
+	return target == kindOf(src)
+}
+
+func kindOf(src interface{}) string {
+	return reflect.ValueOf(src).Kind().String()
+}

+ 83 - 0
vendor/github.com/go-task/slim-sprig/regex.go

@@ -0,0 +1,83 @@
+package sprig
+
+import (
+	"regexp"
+)
+
+func regexMatch(regex string, s string) bool {
+	match, _ := regexp.MatchString(regex, s)
+	return match
+}
+
+func mustRegexMatch(regex string, s string) (bool, error) {
+	return regexp.MatchString(regex, s)
+}
+
+func regexFindAll(regex string, s string, n int) []string {
+	r := regexp.MustCompile(regex)
+	return r.FindAllString(s, n)
+}
+
+func mustRegexFindAll(regex string, s string, n int) ([]string, error) {
+	r, err := regexp.Compile(regex)
+	if err != nil {
+		return []string{}, err
+	}
+	return r.FindAllString(s, n), nil
+}
+
+func regexFind(regex string, s string) string {
+	r := regexp.MustCompile(regex)
+	return r.FindString(s)
+}
+
+func mustRegexFind(regex string, s string) (string, error) {
+	r, err := regexp.Compile(regex)
+	if err != nil {
+		return "", err
+	}
+	return r.FindString(s), nil
+}
+
+func regexReplaceAll(regex string, s string, repl string) string {
+	r := regexp.MustCompile(regex)
+	return r.ReplaceAllString(s, repl)
+}
+
+func mustRegexReplaceAll(regex string, s string, repl string) (string, error) {
+	r, err := regexp.Compile(regex)
+	if err != nil {
+		return "", err
+	}
+	return r.ReplaceAllString(s, repl), nil
+}
+
+func regexReplaceAllLiteral(regex string, s string, repl string) string {
+	r := regexp.MustCompile(regex)
+	return r.ReplaceAllLiteralString(s, repl)
+}
+
+func mustRegexReplaceAllLiteral(regex string, s string, repl string) (string, error) {
+	r, err := regexp.Compile(regex)
+	if err != nil {
+		return "", err
+	}
+	return r.ReplaceAllLiteralString(s, repl), nil
+}
+
+func regexSplit(regex string, s string, n int) []string {
+	r := regexp.MustCompile(regex)
+	return r.Split(s, n)
+}
+
+func mustRegexSplit(regex string, s string, n int) ([]string, error) {
+	r, err := regexp.Compile(regex)
+	if err != nil {
+		return []string{}, err
+	}
+	return r.Split(s, n), nil
+}
+
+func regexQuoteMeta(s string) string {
+	return regexp.QuoteMeta(s)
+}

+ 189 - 0
vendor/github.com/go-task/slim-sprig/strings.go

@@ -0,0 +1,189 @@
+package sprig
+
+import (
+	"encoding/base32"
+	"encoding/base64"
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+)
+
+func base64encode(v string) string {
+	return base64.StdEncoding.EncodeToString([]byte(v))
+}
+
+func base64decode(v string) string {
+	data, err := base64.StdEncoding.DecodeString(v)
+	if err != nil {
+		return err.Error()
+	}
+	return string(data)
+}
+
+func base32encode(v string) string {
+	return base32.StdEncoding.EncodeToString([]byte(v))
+}
+
+func base32decode(v string) string {
+	data, err := base32.StdEncoding.DecodeString(v)
+	if err != nil {
+		return err.Error()
+	}
+	return string(data)
+}
+
+func quote(str ...interface{}) string {
+	out := make([]string, 0, len(str))
+	for _, s := range str {
+		if s != nil {
+			out = append(out, fmt.Sprintf("%q", strval(s)))
+		}
+	}
+	return strings.Join(out, " ")
+}
+
+func squote(str ...interface{}) string {
+	out := make([]string, 0, len(str))
+	for _, s := range str {
+		if s != nil {
+			out = append(out, fmt.Sprintf("'%v'", s))
+		}
+	}
+	return strings.Join(out, " ")
+}
+
+func cat(v ...interface{}) string {
+	v = removeNilElements(v)
+	r := strings.TrimSpace(strings.Repeat("%v ", len(v)))
+	return fmt.Sprintf(r, v...)
+}
+
+func indent(spaces int, v string) string {
+	pad := strings.Repeat(" ", spaces)
+	return pad + strings.Replace(v, "\n", "\n"+pad, -1)
+}
+
+func nindent(spaces int, v string) string {
+	return "\n" + indent(spaces, v)
+}
+
+func replace(old, new, src string) string {
+	return strings.Replace(src, old, new, -1)
+}
+
+func plural(one, many string, count int) string {
+	if count == 1 {
+		return one
+	}
+	return many
+}
+
+func strslice(v interface{}) []string {
+	switch v := v.(type) {
+	case []string:
+		return v
+	case []interface{}:
+		b := make([]string, 0, len(v))
+		for _, s := range v {
+			if s != nil {
+				b = append(b, strval(s))
+			}
+		}
+		return b
+	default:
+		val := reflect.ValueOf(v)
+		switch val.Kind() {
+		case reflect.Array, reflect.Slice:
+			l := val.Len()
+			b := make([]string, 0, l)
+			for i := 0; i < l; i++ {
+				value := val.Index(i).Interface()
+				if value != nil {
+					b = append(b, strval(value))
+				}
+			}
+			return b
+		default:
+			if v == nil {
+				return []string{}
+			}
+
+			return []string{strval(v)}
+		}
+	}
+}
+
+func removeNilElements(v []interface{}) []interface{} {
+	newSlice := make([]interface{}, 0, len(v))
+	for _, i := range v {
+		if i != nil {
+			newSlice = append(newSlice, i)
+		}
+	}
+	return newSlice
+}
+
+func strval(v interface{}) string {
+	switch v := v.(type) {
+	case string:
+		return v
+	case []byte:
+		return string(v)
+	case error:
+		return v.Error()
+	case fmt.Stringer:
+		return v.String()
+	default:
+		return fmt.Sprintf("%v", v)
+	}
+}
+
+func trunc(c int, s string) string {
+	if c < 0 && len(s)+c > 0 {
+		return s[len(s)+c:]
+	}
+	if c >= 0 && len(s) > c {
+		return s[:c]
+	}
+	return s
+}
+
+func join(sep string, v interface{}) string {
+	return strings.Join(strslice(v), sep)
+}
+
+func split(sep, orig string) map[string]string {
+	parts := strings.Split(orig, sep)
+	res := make(map[string]string, len(parts))
+	for i, v := range parts {
+		res["_"+strconv.Itoa(i)] = v
+	}
+	return res
+}
+
+func splitn(sep string, n int, orig string) map[string]string {
+	parts := strings.SplitN(orig, sep, n)
+	res := make(map[string]string, len(parts))
+	for i, v := range parts {
+		res["_"+strconv.Itoa(i)] = v
+	}
+	return res
+}
+
+// substring creates a substring of the given string.
+//
+// If start is < 0, this calls string[:end].
+//
+// If start is >= 0 and end < 0 or end bigger than s length, this calls string[start:]
+//
+// Otherwise, this calls string[start, end].
+func substring(start, end int, s string) string {
+	if start < 0 {
+		return s[:end]
+	}
+	if end < 0 || end > len(s) {
+		return s[start:]
+	}
+	return s[start:end]
+}

+ 66 - 0
vendor/github.com/go-task/slim-sprig/url.go

@@ -0,0 +1,66 @@
+package sprig
+
+import (
+	"fmt"
+	"net/url"
+	"reflect"
+)
+
+func dictGetOrEmpty(dict map[string]interface{}, key string) string {
+	value, ok := dict[key]
+	if !ok {
+		return ""
+	}
+	tp := reflect.TypeOf(value).Kind()
+	if tp != reflect.String {
+		panic(fmt.Sprintf("unable to parse %s key, must be of type string, but %s found", key, tp.String()))
+	}
+	return reflect.ValueOf(value).String()
+}
+
+// parses given URL to return dict object
+func urlParse(v string) map[string]interface{} {
+	dict := map[string]interface{}{}
+	parsedURL, err := url.Parse(v)
+	if err != nil {
+		panic(fmt.Sprintf("unable to parse url: %s", err))
+	}
+	dict["scheme"] = parsedURL.Scheme
+	dict["host"] = parsedURL.Host
+	dict["hostname"] = parsedURL.Hostname()
+	dict["path"] = parsedURL.Path
+	dict["query"] = parsedURL.RawQuery
+	dict["opaque"] = parsedURL.Opaque
+	dict["fragment"] = parsedURL.Fragment
+	if parsedURL.User != nil {
+		dict["userinfo"] = parsedURL.User.String()
+	} else {
+		dict["userinfo"] = ""
+	}
+
+	return dict
+}
+
+// join given dict to URL string
+func urlJoin(d map[string]interface{}) string {
+	resURL := url.URL{
+		Scheme:   dictGetOrEmpty(d, "scheme"),
+		Host:     dictGetOrEmpty(d, "host"),
+		Path:     dictGetOrEmpty(d, "path"),
+		RawQuery: dictGetOrEmpty(d, "query"),
+		Opaque:   dictGetOrEmpty(d, "opaque"),
+		Fragment: dictGetOrEmpty(d, "fragment"),
+	}
+	userinfo := dictGetOrEmpty(d, "userinfo")
+	var user *url.Userinfo
+	if userinfo != "" {
+		tempURL, err := url.Parse(fmt.Sprintf("proto://%s@host", userinfo))
+		if err != nil {
+			panic(fmt.Sprintf("unable to parse userinfo in dict: %s", err))
+		}
+		user = tempURL.User
+	}
+
+	resURL.User = user
+	return resURL.String()
+}

+ 7 - 0
vendor/github.com/google/pprof/AUTHORS

@@ -0,0 +1,7 @@
+# This is the official list of pprof authors for copyright purposes.
+# This file is distinct from the CONTRIBUTORS files.
+# See the latter for an explanation.
+# Names should be added to this file as:
+# Name or Organization <email address>
+# The email address is not required for organizations.
+Google Inc.

+ 16 - 0
vendor/github.com/google/pprof/CONTRIBUTORS

@@ -0,0 +1,16 @@
+# People who have agreed to one of the CLAs and can contribute patches.
+# The AUTHORS file lists the copyright holders; this file
+# lists people.  For example, Google employees are listed here
+# but not in AUTHORS, because Google holds the copyright.
+#
+# https://developers.google.com/open-source/cla/individual
+# https://developers.google.com/open-source/cla/corporate
+#
+# Names should be added to this file as:
+#     Name <email address>
+Raul Silvera <[email protected]>
+Tipp Moseley <[email protected]>
+Hyoun Kyu Cho <[email protected]>
+Martin Spier <[email protected]>
+Taco de Wolff <[email protected]>
+Andrew Hunter <[email protected]>

+ 202 - 0
vendor/github.com/google/pprof/LICENSE

@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.

+ 567 - 0
vendor/github.com/google/pprof/profile/encode.go

@@ -0,0 +1,567 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package profile
+
+import (
+	"errors"
+	"sort"
+)
+
+func (p *Profile) decoder() []decoder {
+	return profileDecoder
+}
+
+// preEncode populates the unexported fields to be used by encode
+// (with suffix X) from the corresponding exported fields. The
+// exported fields are cleared up to facilitate testing.
+func (p *Profile) preEncode() {
+	strings := make(map[string]int)
+	addString(strings, "")
+
+	for _, st := range p.SampleType {
+		st.typeX = addString(strings, st.Type)
+		st.unitX = addString(strings, st.Unit)
+	}
+
+	for _, s := range p.Sample {
+		s.labelX = nil
+		var keys []string
+		for k := range s.Label {
+			keys = append(keys, k)
+		}
+		sort.Strings(keys)
+		for _, k := range keys {
+			vs := s.Label[k]
+			for _, v := range vs {
+				s.labelX = append(s.labelX,
+					label{
+						keyX: addString(strings, k),
+						strX: addString(strings, v),
+					},
+				)
+			}
+		}
+		var numKeys []string
+		for k := range s.NumLabel {
+			numKeys = append(numKeys, k)
+		}
+		sort.Strings(numKeys)
+		for _, k := range numKeys {
+			keyX := addString(strings, k)
+			vs := s.NumLabel[k]
+			units := s.NumUnit[k]
+			for i, v := range vs {
+				var unitX int64
+				if len(units) != 0 {
+					unitX = addString(strings, units[i])
+				}
+				s.labelX = append(s.labelX,
+					label{
+						keyX:  keyX,
+						numX:  v,
+						unitX: unitX,
+					},
+				)
+			}
+		}
+		s.locationIDX = make([]uint64, len(s.Location))
+		for i, loc := range s.Location {
+			s.locationIDX[i] = loc.ID
+		}
+	}
+
+	for _, m := range p.Mapping {
+		m.fileX = addString(strings, m.File)
+		m.buildIDX = addString(strings, m.BuildID)
+	}
+
+	for _, l := range p.Location {
+		for i, ln := range l.Line {
+			if ln.Function != nil {
+				l.Line[i].functionIDX = ln.Function.ID
+			} else {
+				l.Line[i].functionIDX = 0
+			}
+		}
+		if l.Mapping != nil {
+			l.mappingIDX = l.Mapping.ID
+		} else {
+			l.mappingIDX = 0
+		}
+	}
+	for _, f := range p.Function {
+		f.nameX = addString(strings, f.Name)
+		f.systemNameX = addString(strings, f.SystemName)
+		f.filenameX = addString(strings, f.Filename)
+	}
+
+	p.dropFramesX = addString(strings, p.DropFrames)
+	p.keepFramesX = addString(strings, p.KeepFrames)
+
+	if pt := p.PeriodType; pt != nil {
+		pt.typeX = addString(strings, pt.Type)
+		pt.unitX = addString(strings, pt.Unit)
+	}
+
+	p.commentX = nil
+	for _, c := range p.Comments {
+		p.commentX = append(p.commentX, addString(strings, c))
+	}
+
+	p.defaultSampleTypeX = addString(strings, p.DefaultSampleType)
+
+	p.stringTable = make([]string, len(strings))
+	for s, i := range strings {
+		p.stringTable[i] = s
+	}
+}
+
+func (p *Profile) encode(b *buffer) {
+	for _, x := range p.SampleType {
+		encodeMessage(b, 1, x)
+	}
+	for _, x := range p.Sample {
+		encodeMessage(b, 2, x)
+	}
+	for _, x := range p.Mapping {
+		encodeMessage(b, 3, x)
+	}
+	for _, x := range p.Location {
+		encodeMessage(b, 4, x)
+	}
+	for _, x := range p.Function {
+		encodeMessage(b, 5, x)
+	}
+	encodeStrings(b, 6, p.stringTable)
+	encodeInt64Opt(b, 7, p.dropFramesX)
+	encodeInt64Opt(b, 8, p.keepFramesX)
+	encodeInt64Opt(b, 9, p.TimeNanos)
+	encodeInt64Opt(b, 10, p.DurationNanos)
+	if pt := p.PeriodType; pt != nil && (pt.typeX != 0 || pt.unitX != 0) {
+		encodeMessage(b, 11, p.PeriodType)
+	}
+	encodeInt64Opt(b, 12, p.Period)
+	encodeInt64s(b, 13, p.commentX)
+	encodeInt64(b, 14, p.defaultSampleTypeX)
+}
+
+var profileDecoder = []decoder{
+	nil, // 0
+	// repeated ValueType sample_type = 1
+	func(b *buffer, m message) error {
+		x := new(ValueType)
+		pp := m.(*Profile)
+		pp.SampleType = append(pp.SampleType, x)
+		return decodeMessage(b, x)
+	},
+	// repeated Sample sample = 2
+	func(b *buffer, m message) error {
+		x := new(Sample)
+		pp := m.(*Profile)
+		pp.Sample = append(pp.Sample, x)
+		return decodeMessage(b, x)
+	},
+	// repeated Mapping mapping = 3
+	func(b *buffer, m message) error {
+		x := new(Mapping)
+		pp := m.(*Profile)
+		pp.Mapping = append(pp.Mapping, x)
+		return decodeMessage(b, x)
+	},
+	// repeated Location location = 4
+	func(b *buffer, m message) error {
+		x := new(Location)
+		x.Line = make([]Line, 0, 8) // Pre-allocate Line buffer
+		pp := m.(*Profile)
+		pp.Location = append(pp.Location, x)
+		err := decodeMessage(b, x)
+		var tmp []Line
+		x.Line = append(tmp, x.Line...) // Shrink to allocated size
+		return err
+	},
+	// repeated Function function = 5
+	func(b *buffer, m message) error {
+		x := new(Function)
+		pp := m.(*Profile)
+		pp.Function = append(pp.Function, x)
+		return decodeMessage(b, x)
+	},
+	// repeated string string_table = 6
+	func(b *buffer, m message) error {
+		err := decodeStrings(b, &m.(*Profile).stringTable)
+		if err != nil {
+			return err
+		}
+		if m.(*Profile).stringTable[0] != "" {
+			return errors.New("string_table[0] must be ''")
+		}
+		return nil
+	},
+	// int64 drop_frames = 7
+	func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).dropFramesX) },
+	// int64 keep_frames = 8
+	func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).keepFramesX) },
+	// int64 time_nanos = 9
+	func(b *buffer, m message) error {
+		if m.(*Profile).TimeNanos != 0 {
+			return errConcatProfile
+		}
+		return decodeInt64(b, &m.(*Profile).TimeNanos)
+	},
+	// int64 duration_nanos = 10
+	func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).DurationNanos) },
+	// ValueType period_type = 11
+	func(b *buffer, m message) error {
+		x := new(ValueType)
+		pp := m.(*Profile)
+		pp.PeriodType = x
+		return decodeMessage(b, x)
+	},
+	// int64 period = 12
+	func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).Period) },
+	// repeated int64 comment = 13
+	func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Profile).commentX) },
+	// int64 defaultSampleType = 14
+	func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).defaultSampleTypeX) },
+}
+
+// postDecode takes the unexported fields populated by decode (with
+// suffix X) and populates the corresponding exported fields.
+// The unexported fields are cleared up to facilitate testing.
+func (p *Profile) postDecode() error {
+	var err error
+	mappings := make(map[uint64]*Mapping, len(p.Mapping))
+	mappingIds := make([]*Mapping, len(p.Mapping)+1)
+	for _, m := range p.Mapping {
+		m.File, err = getString(p.stringTable, &m.fileX, err)
+		m.BuildID, err = getString(p.stringTable, &m.buildIDX, err)
+		if m.ID < uint64(len(mappingIds)) {
+			mappingIds[m.ID] = m
+		} else {
+			mappings[m.ID] = m
+		}
+	}
+
+	functions := make(map[uint64]*Function, len(p.Function))
+	functionIds := make([]*Function, len(p.Function)+1)
+	for _, f := range p.Function {
+		f.Name, err = getString(p.stringTable, &f.nameX, err)
+		f.SystemName, err = getString(p.stringTable, &f.systemNameX, err)
+		f.Filename, err = getString(p.stringTable, &f.filenameX, err)
+		if f.ID < uint64(len(functionIds)) {
+			functionIds[f.ID] = f
+		} else {
+			functions[f.ID] = f
+		}
+	}
+
+	locations := make(map[uint64]*Location, len(p.Location))
+	locationIds := make([]*Location, len(p.Location)+1)
+	for _, l := range p.Location {
+		if id := l.mappingIDX; id < uint64(len(mappingIds)) {
+			l.Mapping = mappingIds[id]
+		} else {
+			l.Mapping = mappings[id]
+		}
+		l.mappingIDX = 0
+		for i, ln := range l.Line {
+			if id := ln.functionIDX; id != 0 {
+				l.Line[i].functionIDX = 0
+				if id < uint64(len(functionIds)) {
+					l.Line[i].Function = functionIds[id]
+				} else {
+					l.Line[i].Function = functions[id]
+				}
+			}
+		}
+		if l.ID < uint64(len(locationIds)) {
+			locationIds[l.ID] = l
+		} else {
+			locations[l.ID] = l
+		}
+	}
+
+	for _, st := range p.SampleType {
+		st.Type, err = getString(p.stringTable, &st.typeX, err)
+		st.Unit, err = getString(p.stringTable, &st.unitX, err)
+	}
+
+	for _, s := range p.Sample {
+		labels := make(map[string][]string, len(s.labelX))
+		numLabels := make(map[string][]int64, len(s.labelX))
+		numUnits := make(map[string][]string, len(s.labelX))
+		for _, l := range s.labelX {
+			var key, value string
+			key, err = getString(p.stringTable, &l.keyX, err)
+			if l.strX != 0 {
+				value, err = getString(p.stringTable, &l.strX, err)
+				labels[key] = append(labels[key], value)
+			} else if l.numX != 0 || l.unitX != 0 {
+				numValues := numLabels[key]
+				units := numUnits[key]
+				if l.unitX != 0 {
+					var unit string
+					unit, err = getString(p.stringTable, &l.unitX, err)
+					units = padStringArray(units, len(numValues))
+					numUnits[key] = append(units, unit)
+				}
+				numLabels[key] = append(numLabels[key], l.numX)
+			}
+		}
+		if len(labels) > 0 {
+			s.Label = labels
+		}
+		if len(numLabels) > 0 {
+			s.NumLabel = numLabels
+			for key, units := range numUnits {
+				if len(units) > 0 {
+					numUnits[key] = padStringArray(units, len(numLabels[key]))
+				}
+			}
+			s.NumUnit = numUnits
+		}
+		s.Location = make([]*Location, len(s.locationIDX))
+		for i, lid := range s.locationIDX {
+			if lid < uint64(len(locationIds)) {
+				s.Location[i] = locationIds[lid]
+			} else {
+				s.Location[i] = locations[lid]
+			}
+		}
+		s.locationIDX = nil
+	}
+
+	p.DropFrames, err = getString(p.stringTable, &p.dropFramesX, err)
+	p.KeepFrames, err = getString(p.stringTable, &p.keepFramesX, err)
+
+	if pt := p.PeriodType; pt == nil {
+		p.PeriodType = &ValueType{}
+	}
+
+	if pt := p.PeriodType; pt != nil {
+		pt.Type, err = getString(p.stringTable, &pt.typeX, err)
+		pt.Unit, err = getString(p.stringTable, &pt.unitX, err)
+	}
+
+	for _, i := range p.commentX {
+		var c string
+		c, err = getString(p.stringTable, &i, err)
+		p.Comments = append(p.Comments, c)
+	}
+
+	p.commentX = nil
+	p.DefaultSampleType, err = getString(p.stringTable, &p.defaultSampleTypeX, err)
+	p.stringTable = nil
+	return err
+}
+
+// padStringArray pads arr with enough empty strings to make arr
+// length l when arr's length is less than l.
+func padStringArray(arr []string, l int) []string {
+	if l <= len(arr) {
+		return arr
+	}
+	return append(arr, make([]string, l-len(arr))...)
+}
+
+func (p *ValueType) decoder() []decoder {
+	return valueTypeDecoder
+}
+
+func (p *ValueType) encode(b *buffer) {
+	encodeInt64Opt(b, 1, p.typeX)
+	encodeInt64Opt(b, 2, p.unitX)
+}
+
+var valueTypeDecoder = []decoder{
+	nil, // 0
+	// optional int64 type = 1
+	func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).typeX) },
+	// optional int64 unit = 2
+	func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).unitX) },
+}
+
+func (p *Sample) decoder() []decoder {
+	return sampleDecoder
+}
+
+func (p *Sample) encode(b *buffer) {
+	encodeUint64s(b, 1, p.locationIDX)
+	encodeInt64s(b, 2, p.Value)
+	for _, x := range p.labelX {
+		encodeMessage(b, 3, x)
+	}
+}
+
+var sampleDecoder = []decoder{
+	nil, // 0
+	// repeated uint64 location = 1
+	func(b *buffer, m message) error { return decodeUint64s(b, &m.(*Sample).locationIDX) },
+	// repeated int64 value = 2
+	func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Sample).Value) },
+	// repeated Label label = 3
+	func(b *buffer, m message) error {
+		s := m.(*Sample)
+		n := len(s.labelX)
+		s.labelX = append(s.labelX, label{})
+		return decodeMessage(b, &s.labelX[n])
+	},
+}
+
+func (p label) decoder() []decoder {
+	return labelDecoder
+}
+
+func (p label) encode(b *buffer) {
+	encodeInt64Opt(b, 1, p.keyX)
+	encodeInt64Opt(b, 2, p.strX)
+	encodeInt64Opt(b, 3, p.numX)
+	encodeInt64Opt(b, 4, p.unitX)
+}
+
+var labelDecoder = []decoder{
+	nil, // 0
+	// optional int64 key = 1
+	func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).keyX) },
+	// optional int64 str = 2
+	func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).strX) },
+	// optional int64 num = 3
+	func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).numX) },
+	// optional int64 num = 4
+	func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).unitX) },
+}
+
+func (p *Mapping) decoder() []decoder {
+	return mappingDecoder
+}
+
+func (p *Mapping) encode(b *buffer) {
+	encodeUint64Opt(b, 1, p.ID)
+	encodeUint64Opt(b, 2, p.Start)
+	encodeUint64Opt(b, 3, p.Limit)
+	encodeUint64Opt(b, 4, p.Offset)
+	encodeInt64Opt(b, 5, p.fileX)
+	encodeInt64Opt(b, 6, p.buildIDX)
+	encodeBoolOpt(b, 7, p.HasFunctions)
+	encodeBoolOpt(b, 8, p.HasFilenames)
+	encodeBoolOpt(b, 9, p.HasLineNumbers)
+	encodeBoolOpt(b, 10, p.HasInlineFrames)
+}
+
+var mappingDecoder = []decoder{
+	nil, // 0
+	func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).ID) },            // optional uint64 id = 1
+	func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Start) },         // optional uint64 memory_offset = 2
+	func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Limit) },         // optional uint64 memory_limit = 3
+	func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Offset) },        // optional uint64 file_offset = 4
+	func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).fileX) },          // optional int64 filename = 5
+	func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).buildIDX) },       // optional int64 build_id = 6
+	func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFunctions) },    // optional bool has_functions = 7
+	func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFilenames) },    // optional bool has_filenames = 8
+	func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasLineNumbers) },  // optional bool has_line_numbers = 9
+	func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasInlineFrames) }, // optional bool has_inline_frames = 10
+}
+
+func (p *Location) decoder() []decoder {
+	return locationDecoder
+}
+
+func (p *Location) encode(b *buffer) {
+	encodeUint64Opt(b, 1, p.ID)
+	encodeUint64Opt(b, 2, p.mappingIDX)
+	encodeUint64Opt(b, 3, p.Address)
+	for i := range p.Line {
+		encodeMessage(b, 4, &p.Line[i])
+	}
+	encodeBoolOpt(b, 5, p.IsFolded)
+}
+
+var locationDecoder = []decoder{
+	nil, // 0
+	func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).ID) },         // optional uint64 id = 1;
+	func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).mappingIDX) }, // optional uint64 mapping_id = 2;
+	func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).Address) },    // optional uint64 address = 3;
+	func(b *buffer, m message) error { // repeated Line line = 4
+		pp := m.(*Location)
+		n := len(pp.Line)
+		pp.Line = append(pp.Line, Line{})
+		return decodeMessage(b, &pp.Line[n])
+	},
+	func(b *buffer, m message) error { return decodeBool(b, &m.(*Location).IsFolded) }, // optional bool is_folded = 5;
+}
+
+func (p *Line) decoder() []decoder {
+	return lineDecoder
+}
+
+func (p *Line) encode(b *buffer) {
+	encodeUint64Opt(b, 1, p.functionIDX)
+	encodeInt64Opt(b, 2, p.Line)
+}
+
+var lineDecoder = []decoder{
+	nil, // 0
+	// optional uint64 function_id = 1
+	func(b *buffer, m message) error { return decodeUint64(b, &m.(*Line).functionIDX) },
+	// optional int64 line = 2
+	func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Line) },
+}
+
+func (p *Function) decoder() []decoder {
+	return functionDecoder
+}
+
+func (p *Function) encode(b *buffer) {
+	encodeUint64Opt(b, 1, p.ID)
+	encodeInt64Opt(b, 2, p.nameX)
+	encodeInt64Opt(b, 3, p.systemNameX)
+	encodeInt64Opt(b, 4, p.filenameX)
+	encodeInt64Opt(b, 5, p.StartLine)
+}
+
+var functionDecoder = []decoder{
+	nil, // 0
+	// optional uint64 id = 1
+	func(b *buffer, m message) error { return decodeUint64(b, &m.(*Function).ID) },
+	// optional int64 function_name = 2
+	func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).nameX) },
+	// optional int64 function_system_name = 3
+	func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).systemNameX) },
+	// repeated int64 filename = 4
+	func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).filenameX) },
+	// optional int64 start_line = 5
+	func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).StartLine) },
+}
+
+func addString(strings map[string]int, s string) int64 {
+	i, ok := strings[s]
+	if !ok {
+		i = len(strings)
+		strings[s] = i
+	}
+	return int64(i)
+}
+
+func getString(strings []string, strng *int64, err error) (string, error) {
+	if err != nil {
+		return "", err
+	}
+	s := int(*strng)
+	if s < 0 || s >= len(strings) {
+		return "", errMalformed
+	}
+	*strng = 0
+	return strings[s], nil
+}

+ 270 - 0
vendor/github.com/google/pprof/profile/filter.go

@@ -0,0 +1,270 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package profile
+
+// Implements methods to filter samples from profiles.
+
+import "regexp"
+
+// FilterSamplesByName filters the samples in a profile and only keeps
+// samples where at least one frame matches focus but none match ignore.
+// Returns true is the corresponding regexp matched at least one sample.
+func (p *Profile) FilterSamplesByName(focus, ignore, hide, show *regexp.Regexp) (fm, im, hm, hnm bool) {
+	focusOrIgnore := make(map[uint64]bool)
+	hidden := make(map[uint64]bool)
+	for _, l := range p.Location {
+		if ignore != nil && l.matchesName(ignore) {
+			im = true
+			focusOrIgnore[l.ID] = false
+		} else if focus == nil || l.matchesName(focus) {
+			fm = true
+			focusOrIgnore[l.ID] = true
+		}
+
+		if hide != nil && l.matchesName(hide) {
+			hm = true
+			l.Line = l.unmatchedLines(hide)
+			if len(l.Line) == 0 {
+				hidden[l.ID] = true
+			}
+		}
+		if show != nil {
+			l.Line = l.matchedLines(show)
+			if len(l.Line) == 0 {
+				hidden[l.ID] = true
+			} else {
+				hnm = true
+			}
+		}
+	}
+
+	s := make([]*Sample, 0, len(p.Sample))
+	for _, sample := range p.Sample {
+		if focusedAndNotIgnored(sample.Location, focusOrIgnore) {
+			if len(hidden) > 0 {
+				var locs []*Location
+				for _, loc := range sample.Location {
+					if !hidden[loc.ID] {
+						locs = append(locs, loc)
+					}
+				}
+				if len(locs) == 0 {
+					// Remove sample with no locations (by not adding it to s).
+					continue
+				}
+				sample.Location = locs
+			}
+			s = append(s, sample)
+		}
+	}
+	p.Sample = s
+
+	return
+}
+
+// ShowFrom drops all stack frames above the highest matching frame and returns
+// whether a match was found. If showFrom is nil it returns false and does not
+// modify the profile.
+//
+// Example: consider a sample with frames [A, B, C, B], where A is the root.
+// ShowFrom(nil) returns false and has frames [A, B, C, B].
+// ShowFrom(A) returns true and has frames [A, B, C, B].
+// ShowFrom(B) returns true and has frames [B, C, B].
+// ShowFrom(C) returns true and has frames [C, B].
+// ShowFrom(D) returns false and drops the sample because no frames remain.
+func (p *Profile) ShowFrom(showFrom *regexp.Regexp) (matched bool) {
+	if showFrom == nil {
+		return false
+	}
+	// showFromLocs stores location IDs that matched ShowFrom.
+	showFromLocs := make(map[uint64]bool)
+	// Apply to locations.
+	for _, loc := range p.Location {
+		if filterShowFromLocation(loc, showFrom) {
+			showFromLocs[loc.ID] = true
+			matched = true
+		}
+	}
+	// For all samples, strip locations after the highest matching one.
+	s := make([]*Sample, 0, len(p.Sample))
+	for _, sample := range p.Sample {
+		for i := len(sample.Location) - 1; i >= 0; i-- {
+			if showFromLocs[sample.Location[i].ID] {
+				sample.Location = sample.Location[:i+1]
+				s = append(s, sample)
+				break
+			}
+		}
+	}
+	p.Sample = s
+	return matched
+}
+
+// filterShowFromLocation tests a showFrom regex against a location, removes
+// lines after the last match and returns whether a match was found. If the
+// mapping is matched, then all lines are kept.
+func filterShowFromLocation(loc *Location, showFrom *regexp.Regexp) bool {
+	if m := loc.Mapping; m != nil && showFrom.MatchString(m.File) {
+		return true
+	}
+	if i := loc.lastMatchedLineIndex(showFrom); i >= 0 {
+		loc.Line = loc.Line[:i+1]
+		return true
+	}
+	return false
+}
+
+// lastMatchedLineIndex returns the index of the last line that matches a regex,
+// or -1 if no match is found.
+func (loc *Location) lastMatchedLineIndex(re *regexp.Regexp) int {
+	for i := len(loc.Line) - 1; i >= 0; i-- {
+		if fn := loc.Line[i].Function; fn != nil {
+			if re.MatchString(fn.Name) || re.MatchString(fn.Filename) {
+				return i
+			}
+		}
+	}
+	return -1
+}
+
+// FilterTagsByName filters the tags in a profile and only keeps
+// tags that match show and not hide.
+func (p *Profile) FilterTagsByName(show, hide *regexp.Regexp) (sm, hm bool) {
+	matchRemove := func(name string) bool {
+		matchShow := show == nil || show.MatchString(name)
+		matchHide := hide != nil && hide.MatchString(name)
+
+		if matchShow {
+			sm = true
+		}
+		if matchHide {
+			hm = true
+		}
+		return !matchShow || matchHide
+	}
+	for _, s := range p.Sample {
+		for lab := range s.Label {
+			if matchRemove(lab) {
+				delete(s.Label, lab)
+			}
+		}
+		for lab := range s.NumLabel {
+			if matchRemove(lab) {
+				delete(s.NumLabel, lab)
+			}
+		}
+	}
+	return
+}
+
+// matchesName returns whether the location matches the regular
+// expression. It checks any available function names, file names, and
+// mapping object filename.
+func (loc *Location) matchesName(re *regexp.Regexp) bool {
+	for _, ln := range loc.Line {
+		if fn := ln.Function; fn != nil {
+			if re.MatchString(fn.Name) || re.MatchString(fn.Filename) {
+				return true
+			}
+		}
+	}
+	if m := loc.Mapping; m != nil && re.MatchString(m.File) {
+		return true
+	}
+	return false
+}
+
+// unmatchedLines returns the lines in the location that do not match
+// the regular expression.
+func (loc *Location) unmatchedLines(re *regexp.Regexp) []Line {
+	if m := loc.Mapping; m != nil && re.MatchString(m.File) {
+		return nil
+	}
+	var lines []Line
+	for _, ln := range loc.Line {
+		if fn := ln.Function; fn != nil {
+			if re.MatchString(fn.Name) || re.MatchString(fn.Filename) {
+				continue
+			}
+		}
+		lines = append(lines, ln)
+	}
+	return lines
+}
+
+// matchedLines returns the lines in the location that match
+// the regular expression.
+func (loc *Location) matchedLines(re *regexp.Regexp) []Line {
+	if m := loc.Mapping; m != nil && re.MatchString(m.File) {
+		return loc.Line
+	}
+	var lines []Line
+	for _, ln := range loc.Line {
+		if fn := ln.Function; fn != nil {
+			if !re.MatchString(fn.Name) && !re.MatchString(fn.Filename) {
+				continue
+			}
+		}
+		lines = append(lines, ln)
+	}
+	return lines
+}
+
+// focusedAndNotIgnored looks up a slice of ids against a map of
+// focused/ignored locations. The map only contains locations that are
+// explicitly focused or ignored. Returns whether there is at least
+// one focused location but no ignored locations.
+func focusedAndNotIgnored(locs []*Location, m map[uint64]bool) bool {
+	var f bool
+	for _, loc := range locs {
+		if focus, focusOrIgnore := m[loc.ID]; focusOrIgnore {
+			if focus {
+				// Found focused location. Must keep searching in case there
+				// is an ignored one as well.
+				f = true
+			} else {
+				// Found ignored location. Can return false right away.
+				return false
+			}
+		}
+	}
+	return f
+}
+
+// TagMatch selects tags for filtering
+type TagMatch func(s *Sample) bool
+
+// FilterSamplesByTag removes all samples from the profile, except
+// those that match focus and do not match the ignore regular
+// expression.
+func (p *Profile) FilterSamplesByTag(focus, ignore TagMatch) (fm, im bool) {
+	samples := make([]*Sample, 0, len(p.Sample))
+	for _, s := range p.Sample {
+		focused, ignored := true, false
+		if focus != nil {
+			focused = focus(s)
+		}
+		if ignore != nil {
+			ignored = ignore(s)
+		}
+		fm = fm || focused
+		im = im || ignored
+		if focused && !ignored {
+			samples = append(samples, s)
+		}
+	}
+	p.Sample = samples
+	return
+}

+ 64 - 0
vendor/github.com/google/pprof/profile/index.go

@@ -0,0 +1,64 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package profile
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+// SampleIndexByName returns the appropriate index for a value of sample index.
+// If numeric, it returns the number, otherwise it looks up the text in the
+// profile sample types.
+func (p *Profile) SampleIndexByName(sampleIndex string) (int, error) {
+	if sampleIndex == "" {
+		if dst := p.DefaultSampleType; dst != "" {
+			for i, t := range sampleTypes(p) {
+				if t == dst {
+					return i, nil
+				}
+			}
+		}
+		// By default select the last sample value
+		return len(p.SampleType) - 1, nil
+	}
+	if i, err := strconv.Atoi(sampleIndex); err == nil {
+		if i < 0 || i >= len(p.SampleType) {
+			return 0, fmt.Errorf("sample_index %s is outside the range [0..%d]", sampleIndex, len(p.SampleType)-1)
+		}
+		return i, nil
+	}
+
+	// Remove the inuse_ prefix to support legacy pprof options
+	// "inuse_space" and "inuse_objects" for profiles containing types
+	// "space" and "objects".
+	noInuse := strings.TrimPrefix(sampleIndex, "inuse_")
+	for i, t := range p.SampleType {
+		if t.Type == sampleIndex || t.Type == noInuse {
+			return i, nil
+		}
+	}
+
+	return 0, fmt.Errorf("sample_index %q must be one of: %v", sampleIndex, sampleTypes(p))
+}
+
+func sampleTypes(p *Profile) []string {
+	types := make([]string, len(p.SampleType))
+	for i, t := range p.SampleType {
+		types[i] = t.Type
+	}
+	return types
+}

+ 315 - 0
vendor/github.com/google/pprof/profile/legacy_java_profile.go

@@ -0,0 +1,315 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file implements parsers to convert java legacy profiles into
+// the profile.proto format.
+
+package profile
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"path/filepath"
+	"regexp"
+	"strconv"
+	"strings"
+)
+
+var (
+	attributeRx            = regexp.MustCompile(`([\w ]+)=([\w ]+)`)
+	javaSampleRx           = regexp.MustCompile(` *(\d+) +(\d+) +@ +([ x0-9a-f]*)`)
+	javaLocationRx         = regexp.MustCompile(`^\s*0x([[:xdigit:]]+)\s+(.*)\s*$`)
+	javaLocationFileLineRx = regexp.MustCompile(`^(.*)\s+\((.+):(-?[[:digit:]]+)\)$`)
+	javaLocationPathRx     = regexp.MustCompile(`^(.*)\s+\((.*)\)$`)
+)
+
+// javaCPUProfile returns a new Profile from profilez data.
+// b is the profile bytes after the header, period is the profiling
+// period, and parse is a function to parse 8-byte chunks from the
+// profile in its native endianness.
+func javaCPUProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte)) (*Profile, error) {
+	p := &Profile{
+		Period:     period * 1000,
+		PeriodType: &ValueType{Type: "cpu", Unit: "nanoseconds"},
+		SampleType: []*ValueType{{Type: "samples", Unit: "count"}, {Type: "cpu", Unit: "nanoseconds"}},
+	}
+	var err error
+	var locs map[uint64]*Location
+	if b, locs, err = parseCPUSamples(b, parse, false, p); err != nil {
+		return nil, err
+	}
+
+	if err = parseJavaLocations(b, locs, p); err != nil {
+		return nil, err
+	}
+
+	// Strip out addresses for better merge.
+	if err = p.Aggregate(true, true, true, true, false); err != nil {
+		return nil, err
+	}
+
+	return p, nil
+}
+
+// parseJavaProfile returns a new profile from heapz or contentionz
+// data. b is the profile bytes after the header.
+func parseJavaProfile(b []byte) (*Profile, error) {
+	h := bytes.SplitAfterN(b, []byte("\n"), 2)
+	if len(h) < 2 {
+		return nil, errUnrecognized
+	}
+
+	p := &Profile{
+		PeriodType: &ValueType{},
+	}
+	header := string(bytes.TrimSpace(h[0]))
+
+	var err error
+	var pType string
+	switch header {
+	case "--- heapz 1 ---":
+		pType = "heap"
+	case "--- contentionz 1 ---":
+		pType = "contention"
+	default:
+		return nil, errUnrecognized
+	}
+
+	if b, err = parseJavaHeader(pType, h[1], p); err != nil {
+		return nil, err
+	}
+	var locs map[uint64]*Location
+	if b, locs, err = parseJavaSamples(pType, b, p); err != nil {
+		return nil, err
+	}
+	if err = parseJavaLocations(b, locs, p); err != nil {
+		return nil, err
+	}
+
+	// Strip out addresses for better merge.
+	if err = p.Aggregate(true, true, true, true, false); err != nil {
+		return nil, err
+	}
+
+	return p, nil
+}
+
+// parseJavaHeader parses the attribute section on a java profile and
+// populates a profile. Returns the remainder of the buffer after all
+// attributes.
+func parseJavaHeader(pType string, b []byte, p *Profile) ([]byte, error) {
+	nextNewLine := bytes.IndexByte(b, byte('\n'))
+	for nextNewLine != -1 {
+		line := string(bytes.TrimSpace(b[0:nextNewLine]))
+		if line != "" {
+			h := attributeRx.FindStringSubmatch(line)
+			if h == nil {
+				// Not a valid attribute, exit.
+				return b, nil
+			}
+
+			attribute, value := strings.TrimSpace(h[1]), strings.TrimSpace(h[2])
+			var err error
+			switch pType + "/" + attribute {
+			case "heap/format", "cpu/format", "contention/format":
+				if value != "java" {
+					return nil, errUnrecognized
+				}
+			case "heap/resolution":
+				p.SampleType = []*ValueType{
+					{Type: "inuse_objects", Unit: "count"},
+					{Type: "inuse_space", Unit: value},
+				}
+			case "contention/resolution":
+				p.SampleType = []*ValueType{
+					{Type: "contentions", Unit: "count"},
+					{Type: "delay", Unit: value},
+				}
+			case "contention/sampling period":
+				p.PeriodType = &ValueType{
+					Type: "contentions", Unit: "count",
+				}
+				if p.Period, err = strconv.ParseInt(value, 0, 64); err != nil {
+					return nil, fmt.Errorf("failed to parse attribute %s: %v", line, err)
+				}
+			case "contention/ms since reset":
+				millis, err := strconv.ParseInt(value, 0, 64)
+				if err != nil {
+					return nil, fmt.Errorf("failed to parse attribute %s: %v", line, err)
+				}
+				p.DurationNanos = millis * 1000 * 1000
+			default:
+				return nil, errUnrecognized
+			}
+		}
+		// Grab next line.
+		b = b[nextNewLine+1:]
+		nextNewLine = bytes.IndexByte(b, byte('\n'))
+	}
+	return b, nil
+}
+
+// parseJavaSamples parses the samples from a java profile and
+// populates the Samples in a profile. Returns the remainder of the
+// buffer after the samples.
+func parseJavaSamples(pType string, b []byte, p *Profile) ([]byte, map[uint64]*Location, error) {
+	nextNewLine := bytes.IndexByte(b, byte('\n'))
+	locs := make(map[uint64]*Location)
+	for nextNewLine != -1 {
+		line := string(bytes.TrimSpace(b[0:nextNewLine]))
+		if line != "" {
+			sample := javaSampleRx.FindStringSubmatch(line)
+			if sample == nil {
+				// Not a valid sample, exit.
+				return b, locs, nil
+			}
+
+			// Java profiles have data/fields inverted compared to other
+			// profile types.
+			var err error
+			value1, value2, value3 := sample[2], sample[1], sample[3]
+			addrs, err := parseHexAddresses(value3)
+			if err != nil {
+				return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err)
+			}
+
+			var sloc []*Location
+			for _, addr := range addrs {
+				loc := locs[addr]
+				if locs[addr] == nil {
+					loc = &Location{
+						Address: addr,
+					}
+					p.Location = append(p.Location, loc)
+					locs[addr] = loc
+				}
+				sloc = append(sloc, loc)
+			}
+			s := &Sample{
+				Value:    make([]int64, 2),
+				Location: sloc,
+			}
+
+			if s.Value[0], err = strconv.ParseInt(value1, 0, 64); err != nil {
+				return nil, nil, fmt.Errorf("parsing sample %s: %v", line, err)
+			}
+			if s.Value[1], err = strconv.ParseInt(value2, 0, 64); err != nil {
+				return nil, nil, fmt.Errorf("parsing sample %s: %v", line, err)
+			}
+
+			switch pType {
+			case "heap":
+				const javaHeapzSamplingRate = 524288 // 512K
+				if s.Value[0] == 0 {
+					return nil, nil, fmt.Errorf("parsing sample %s: second value must be non-zero", line)
+				}
+				s.NumLabel = map[string][]int64{"bytes": {s.Value[1] / s.Value[0]}}
+				s.Value[0], s.Value[1] = scaleHeapSample(s.Value[0], s.Value[1], javaHeapzSamplingRate)
+			case "contention":
+				if period := p.Period; period != 0 {
+					s.Value[0] = s.Value[0] * p.Period
+					s.Value[1] = s.Value[1] * p.Period
+				}
+			}
+			p.Sample = append(p.Sample, s)
+		}
+		// Grab next line.
+		b = b[nextNewLine+1:]
+		nextNewLine = bytes.IndexByte(b, byte('\n'))
+	}
+	return b, locs, nil
+}
+
+// parseJavaLocations parses the location information in a java
+// profile and populates the Locations in a profile. It uses the
+// location addresses from the profile as both the ID of each
+// location.
+func parseJavaLocations(b []byte, locs map[uint64]*Location, p *Profile) error {
+	r := bytes.NewBuffer(b)
+	fns := make(map[string]*Function)
+	for {
+		line, err := r.ReadString('\n')
+		if err != nil {
+			if err != io.EOF {
+				return err
+			}
+			if line == "" {
+				break
+			}
+		}
+
+		if line = strings.TrimSpace(line); line == "" {
+			continue
+		}
+
+		jloc := javaLocationRx.FindStringSubmatch(line)
+		if len(jloc) != 3 {
+			continue
+		}
+		addr, err := strconv.ParseUint(jloc[1], 16, 64)
+		if err != nil {
+			return fmt.Errorf("parsing sample %s: %v", line, err)
+		}
+		loc := locs[addr]
+		if loc == nil {
+			// Unused/unseen
+			continue
+		}
+		var lineFunc, lineFile string
+		var lineNo int64
+
+		if fileLine := javaLocationFileLineRx.FindStringSubmatch(jloc[2]); len(fileLine) == 4 {
+			// Found a line of the form: "function (file:line)"
+			lineFunc, lineFile = fileLine[1], fileLine[2]
+			if n, err := strconv.ParseInt(fileLine[3], 10, 64); err == nil && n > 0 {
+				lineNo = n
+			}
+		} else if filePath := javaLocationPathRx.FindStringSubmatch(jloc[2]); len(filePath) == 3 {
+			// If there's not a file:line, it's a shared library path.
+			// The path isn't interesting, so just give the .so.
+			lineFunc, lineFile = filePath[1], filepath.Base(filePath[2])
+		} else if strings.Contains(jloc[2], "generated stub/JIT") {
+			lineFunc = "STUB"
+		} else {
+			// Treat whole line as the function name. This is used by the
+			// java agent for internal states such as "GC" or "VM".
+			lineFunc = jloc[2]
+		}
+		fn := fns[lineFunc]
+
+		if fn == nil {
+			fn = &Function{
+				Name:       lineFunc,
+				SystemName: lineFunc,
+				Filename:   lineFile,
+			}
+			fns[lineFunc] = fn
+			p.Function = append(p.Function, fn)
+		}
+		loc.Line = []Line{
+			{
+				Function: fn,
+				Line:     lineNo,
+			},
+		}
+		loc.Address = 0
+	}
+
+	p.remapLocationIDs()
+	p.remapFunctionIDs()
+	p.remapMappingIDs()
+
+	return nil
+}

+ 1225 - 0
vendor/github.com/google/pprof/profile/legacy_profile.go

@@ -0,0 +1,1225 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file implements parsers to convert legacy profiles into the
+// profile.proto format.
+
+package profile
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"math"
+	"regexp"
+	"strconv"
+	"strings"
+)
+
+var (
+	countStartRE = regexp.MustCompile(`\A(\S+) profile: total \d+\z`)
+	countRE      = regexp.MustCompile(`\A(\d+) @(( 0x[0-9a-f]+)+)\z`)
+
+	heapHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] *@ *(heap[_a-z0-9]*)/?(\d*)`)
+	heapSampleRE = regexp.MustCompile(`(-?\d+): *(-?\d+) *\[ *(\d+): *(\d+) *] @([ x0-9a-f]*)`)
+
+	contentionSampleRE = regexp.MustCompile(`(\d+) *(\d+) @([ x0-9a-f]*)`)
+
+	hexNumberRE = regexp.MustCompile(`0x[0-9a-f]+`)
+
+	growthHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] @ growthz?`)
+
+	fragmentationHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] @ fragmentationz?`)
+
+	threadzStartRE = regexp.MustCompile(`--- threadz \d+ ---`)
+	threadStartRE  = regexp.MustCompile(`--- Thread ([[:xdigit:]]+) \(name: (.*)/(\d+)\) stack: ---`)
+
+	// Regular expressions to parse process mappings. Support the format used by Linux /proc/.../maps and other tools.
+	// Recommended format:
+	// Start   End     object file name     offset(optional)   linker build id
+	// 0x40000-0x80000 /path/to/binary      (@FF00)            abc123456
+	spaceDigits = `\s+[[:digit:]]+`
+	hexPair     = `\s+[[:xdigit:]]+:[[:xdigit:]]+`
+	oSpace      = `\s*`
+	// Capturing expressions.
+	cHex           = `(?:0x)?([[:xdigit:]]+)`
+	cHexRange      = `\s*` + cHex + `[\s-]?` + oSpace + cHex + `:?`
+	cSpaceString   = `(?:\s+(\S+))?`
+	cSpaceHex      = `(?:\s+([[:xdigit:]]+))?`
+	cSpaceAtOffset = `(?:\s+\(@([[:xdigit:]]+)\))?`
+	cPerm          = `(?:\s+([-rwxp]+))?`
+
+	procMapsRE  = regexp.MustCompile(`^` + cHexRange + cPerm + cSpaceHex + hexPair + spaceDigits + cSpaceString)
+	briefMapsRE = regexp.MustCompile(`^` + cHexRange + cPerm + cSpaceString + cSpaceAtOffset + cSpaceHex)
+
+	// Regular expression to parse log data, of the form:
+	// ... file:line] msg...
+	logInfoRE = regexp.MustCompile(`^[^\[\]]+:[0-9]+]\s`)
+)
+
+func isSpaceOrComment(line string) bool {
+	trimmed := strings.TrimSpace(line)
+	return len(trimmed) == 0 || trimmed[0] == '#'
+}
+
+// parseGoCount parses a Go count profile (e.g., threadcreate or
+// goroutine) and returns a new Profile.
+func parseGoCount(b []byte) (*Profile, error) {
+	s := bufio.NewScanner(bytes.NewBuffer(b))
+	// Skip comments at the beginning of the file.
+	for s.Scan() && isSpaceOrComment(s.Text()) {
+	}
+	if err := s.Err(); err != nil {
+		return nil, err
+	}
+	m := countStartRE.FindStringSubmatch(s.Text())
+	if m == nil {
+		return nil, errUnrecognized
+	}
+	profileType := m[1]
+	p := &Profile{
+		PeriodType: &ValueType{Type: profileType, Unit: "count"},
+		Period:     1,
+		SampleType: []*ValueType{{Type: profileType, Unit: "count"}},
+	}
+	locations := make(map[uint64]*Location)
+	for s.Scan() {
+		line := s.Text()
+		if isSpaceOrComment(line) {
+			continue
+		}
+		if strings.HasPrefix(line, "---") {
+			break
+		}
+		m := countRE.FindStringSubmatch(line)
+		if m == nil {
+			return nil, errMalformed
+		}
+		n, err := strconv.ParseInt(m[1], 0, 64)
+		if err != nil {
+			return nil, errMalformed
+		}
+		fields := strings.Fields(m[2])
+		locs := make([]*Location, 0, len(fields))
+		for _, stk := range fields {
+			addr, err := strconv.ParseUint(stk, 0, 64)
+			if err != nil {
+				return nil, errMalformed
+			}
+			// Adjust all frames by -1 to land on top of the call instruction.
+			addr--
+			loc := locations[addr]
+			if loc == nil {
+				loc = &Location{
+					Address: addr,
+				}
+				locations[addr] = loc
+				p.Location = append(p.Location, loc)
+			}
+			locs = append(locs, loc)
+		}
+		p.Sample = append(p.Sample, &Sample{
+			Location: locs,
+			Value:    []int64{n},
+		})
+	}
+	if err := s.Err(); err != nil {
+		return nil, err
+	}
+
+	if err := parseAdditionalSections(s, p); err != nil {
+		return nil, err
+	}
+	return p, nil
+}
+
+// remapLocationIDs ensures there is a location for each address
+// referenced by a sample, and remaps the samples to point to the new
+// location ids.
+func (p *Profile) remapLocationIDs() {
+	seen := make(map[*Location]bool, len(p.Location))
+	var locs []*Location
+
+	for _, s := range p.Sample {
+		for _, l := range s.Location {
+			if seen[l] {
+				continue
+			}
+			l.ID = uint64(len(locs) + 1)
+			locs = append(locs, l)
+			seen[l] = true
+		}
+	}
+	p.Location = locs
+}
+
+func (p *Profile) remapFunctionIDs() {
+	seen := make(map[*Function]bool, len(p.Function))
+	var fns []*Function
+
+	for _, l := range p.Location {
+		for _, ln := range l.Line {
+			fn := ln.Function
+			if fn == nil || seen[fn] {
+				continue
+			}
+			fn.ID = uint64(len(fns) + 1)
+			fns = append(fns, fn)
+			seen[fn] = true
+		}
+	}
+	p.Function = fns
+}
+
+// remapMappingIDs matches location addresses with existing mappings
+// and updates them appropriately. This is O(N*M), if this ever shows
+// up as a bottleneck, evaluate sorting the mappings and doing a
+// binary search, which would make it O(N*log(M)).
+func (p *Profile) remapMappingIDs() {
+	// Some profile handlers will incorrectly set regions for the main
+	// executable if its section is remapped. Fix them through heuristics.
+
+	if len(p.Mapping) > 0 {
+		// Remove the initial mapping if named '/anon_hugepage' and has a
+		// consecutive adjacent mapping.
+		if m := p.Mapping[0]; strings.HasPrefix(m.File, "/anon_hugepage") {
+			if len(p.Mapping) > 1 && m.Limit == p.Mapping[1].Start {
+				p.Mapping = p.Mapping[1:]
+			}
+		}
+	}
+
+	// Subtract the offset from the start of the main mapping if it
+	// ends up at a recognizable start address.
+	if len(p.Mapping) > 0 {
+		const expectedStart = 0x400000
+		if m := p.Mapping[0]; m.Start-m.Offset == expectedStart {
+			m.Start = expectedStart
+			m.Offset = 0
+		}
+	}
+
+	// Associate each location with an address to the corresponding
+	// mapping. Create fake mapping if a suitable one isn't found.
+	var fake *Mapping
+nextLocation:
+	for _, l := range p.Location {
+		a := l.Address
+		if l.Mapping != nil || a == 0 {
+			continue
+		}
+		for _, m := range p.Mapping {
+			if m.Start <= a && a < m.Limit {
+				l.Mapping = m
+				continue nextLocation
+			}
+		}
+		// Work around legacy handlers failing to encode the first
+		// part of mappings split into adjacent ranges.
+		for _, m := range p.Mapping {
+			if m.Offset != 0 && m.Start-m.Offset <= a && a < m.Start {
+				m.Start -= m.Offset
+				m.Offset = 0
+				l.Mapping = m
+				continue nextLocation
+			}
+		}
+		// If there is still no mapping, create a fake one.
+		// This is important for the Go legacy handler, which produced
+		// no mappings.
+		if fake == nil {
+			fake = &Mapping{
+				ID:    1,
+				Limit: ^uint64(0),
+			}
+			p.Mapping = append(p.Mapping, fake)
+		}
+		l.Mapping = fake
+	}
+
+	// Reset all mapping IDs.
+	for i, m := range p.Mapping {
+		m.ID = uint64(i + 1)
+	}
+}
+
+var cpuInts = []func([]byte) (uint64, []byte){
+	get32l,
+	get32b,
+	get64l,
+	get64b,
+}
+
+func get32l(b []byte) (uint64, []byte) {
+	if len(b) < 4 {
+		return 0, nil
+	}
+	return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24, b[4:]
+}
+
+func get32b(b []byte) (uint64, []byte) {
+	if len(b) < 4 {
+		return 0, nil
+	}
+	return uint64(b[3]) | uint64(b[2])<<8 | uint64(b[1])<<16 | uint64(b[0])<<24, b[4:]
+}
+
+func get64l(b []byte) (uint64, []byte) {
+	if len(b) < 8 {
+		return 0, nil
+	}
+	return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56, b[8:]
+}
+
+func get64b(b []byte) (uint64, []byte) {
+	if len(b) < 8 {
+		return 0, nil
+	}
+	return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56, b[8:]
+}
+
+// parseCPU parses a profilez legacy profile and returns a newly
+// populated Profile.
+//
+// The general format for profilez samples is a sequence of words in
+// binary format. The first words are a header with the following data:
+//   1st word -- 0
+//   2nd word -- 3
+//   3rd word -- 0 if a c++ application, 1 if a java application.
+//   4th word -- Sampling period (in microseconds).
+//   5th word -- Padding.
+func parseCPU(b []byte) (*Profile, error) {
+	var parse func([]byte) (uint64, []byte)
+	var n1, n2, n3, n4, n5 uint64
+	for _, parse = range cpuInts {
+		var tmp []byte
+		n1, tmp = parse(b)
+		n2, tmp = parse(tmp)
+		n3, tmp = parse(tmp)
+		n4, tmp = parse(tmp)
+		n5, tmp = parse(tmp)
+
+		if tmp != nil && n1 == 0 && n2 == 3 && n3 == 0 && n4 > 0 && n5 == 0 {
+			b = tmp
+			return cpuProfile(b, int64(n4), parse)
+		}
+		if tmp != nil && n1 == 0 && n2 == 3 && n3 == 1 && n4 > 0 && n5 == 0 {
+			b = tmp
+			return javaCPUProfile(b, int64(n4), parse)
+		}
+	}
+	return nil, errUnrecognized
+}
+
+// cpuProfile returns a new Profile from C++ profilez data.
+// b is the profile bytes after the header, period is the profiling
+// period, and parse is a function to parse 8-byte chunks from the
+// profile in its native endianness.
+func cpuProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte)) (*Profile, error) {
+	p := &Profile{
+		Period:     period * 1000,
+		PeriodType: &ValueType{Type: "cpu", Unit: "nanoseconds"},
+		SampleType: []*ValueType{
+			{Type: "samples", Unit: "count"},
+			{Type: "cpu", Unit: "nanoseconds"},
+		},
+	}
+	var err error
+	if b, _, err = parseCPUSamples(b, parse, true, p); err != nil {
+		return nil, err
+	}
+
+	// If *most* samples have the same second-to-the-bottom frame, it
+	// strongly suggests that it is an uninteresting artifact of
+	// measurement -- a stack frame pushed by the signal handler. The
+	// bottom frame is always correct as it is picked up from the signal
+	// structure, not the stack. Check if this is the case and if so,
+	// remove.
+
+	// Remove up to two frames.
+	maxiter := 2
+	// Allow one different sample for this many samples with the same
+	// second-to-last frame.
+	similarSamples := 32
+	margin := len(p.Sample) / similarSamples
+
+	for iter := 0; iter < maxiter; iter++ {
+		addr1 := make(map[uint64]int)
+		for _, s := range p.Sample {
+			if len(s.Location) > 1 {
+				a := s.Location[1].Address
+				addr1[a] = addr1[a] + 1
+			}
+		}
+
+		for id1, count := range addr1 {
+			if count >= len(p.Sample)-margin {
+				// Found uninteresting frame, strip it out from all samples
+				for _, s := range p.Sample {
+					if len(s.Location) > 1 && s.Location[1].Address == id1 {
+						s.Location = append(s.Location[:1], s.Location[2:]...)
+					}
+				}
+				break
+			}
+		}
+	}
+
+	if err := p.ParseMemoryMap(bytes.NewBuffer(b)); err != nil {
+		return nil, err
+	}
+
+	cleanupDuplicateLocations(p)
+	return p, nil
+}
+
+func cleanupDuplicateLocations(p *Profile) {
+	// The profile handler may duplicate the leaf frame, because it gets
+	// its address both from stack unwinding and from the signal
+	// context. Detect this and delete the duplicate, which has been
+	// adjusted by -1. The leaf address should not be adjusted as it is
+	// not a call.
+	for _, s := range p.Sample {
+		if len(s.Location) > 1 && s.Location[0].Address == s.Location[1].Address+1 {
+			s.Location = append(s.Location[:1], s.Location[2:]...)
+		}
+	}
+}
+
+// parseCPUSamples parses a collection of profilez samples from a
+// profile.
+//
+// profilez samples are a repeated sequence of stack frames of the
+// form:
+//    1st word -- The number of times this stack was encountered.
+//    2nd word -- The size of the stack (StackSize).
+//    3rd word -- The first address on the stack.
+//    ...
+//    StackSize + 2 -- The last address on the stack
+// The last stack trace is of the form:
+//   1st word -- 0
+//   2nd word -- 1
+//   3rd word -- 0
+//
+// Addresses from stack traces may point to the next instruction after
+// each call. Optionally adjust by -1 to land somewhere on the actual
+// call (except for the leaf, which is not a call).
+func parseCPUSamples(b []byte, parse func(b []byte) (uint64, []byte), adjust bool, p *Profile) ([]byte, map[uint64]*Location, error) {
+	locs := make(map[uint64]*Location)
+	for len(b) > 0 {
+		var count, nstk uint64
+		count, b = parse(b)
+		nstk, b = parse(b)
+		if b == nil || nstk > uint64(len(b)/4) {
+			return nil, nil, errUnrecognized
+		}
+		var sloc []*Location
+		addrs := make([]uint64, nstk)
+		for i := 0; i < int(nstk); i++ {
+			addrs[i], b = parse(b)
+		}
+
+		if count == 0 && nstk == 1 && addrs[0] == 0 {
+			// End of data marker
+			break
+		}
+		for i, addr := range addrs {
+			if adjust && i > 0 {
+				addr--
+			}
+			loc := locs[addr]
+			if loc == nil {
+				loc = &Location{
+					Address: addr,
+				}
+				locs[addr] = loc
+				p.Location = append(p.Location, loc)
+			}
+			sloc = append(sloc, loc)
+		}
+		p.Sample = append(p.Sample,
+			&Sample{
+				Value:    []int64{int64(count), int64(count) * p.Period},
+				Location: sloc,
+			})
+	}
+	// Reached the end without finding the EOD marker.
+	return b, locs, nil
+}
+
+// parseHeap parses a heapz legacy or a growthz profile and
+// returns a newly populated Profile.
+func parseHeap(b []byte) (p *Profile, err error) {
+	s := bufio.NewScanner(bytes.NewBuffer(b))
+	if !s.Scan() {
+		if err := s.Err(); err != nil {
+			return nil, err
+		}
+		return nil, errUnrecognized
+	}
+	p = &Profile{}
+
+	sampling := ""
+	hasAlloc := false
+
+	line := s.Text()
+	p.PeriodType = &ValueType{Type: "space", Unit: "bytes"}
+	if header := heapHeaderRE.FindStringSubmatch(line); header != nil {
+		sampling, p.Period, hasAlloc, err = parseHeapHeader(line)
+		if err != nil {
+			return nil, err
+		}
+	} else if header = growthHeaderRE.FindStringSubmatch(line); header != nil {
+		p.Period = 1
+	} else if header = fragmentationHeaderRE.FindStringSubmatch(line); header != nil {
+		p.Period = 1
+	} else {
+		return nil, errUnrecognized
+	}
+
+	if hasAlloc {
+		// Put alloc before inuse so that default pprof selection
+		// will prefer inuse_space.
+		p.SampleType = []*ValueType{
+			{Type: "alloc_objects", Unit: "count"},
+			{Type: "alloc_space", Unit: "bytes"},
+			{Type: "inuse_objects", Unit: "count"},
+			{Type: "inuse_space", Unit: "bytes"},
+		}
+	} else {
+		p.SampleType = []*ValueType{
+			{Type: "objects", Unit: "count"},
+			{Type: "space", Unit: "bytes"},
+		}
+	}
+
+	locs := make(map[uint64]*Location)
+	for s.Scan() {
+		line := strings.TrimSpace(s.Text())
+
+		if isSpaceOrComment(line) {
+			continue
+		}
+
+		if isMemoryMapSentinel(line) {
+			break
+		}
+
+		value, blocksize, addrs, err := parseHeapSample(line, p.Period, sampling, hasAlloc)
+		if err != nil {
+			return nil, err
+		}
+
+		var sloc []*Location
+		for _, addr := range addrs {
+			// Addresses from stack traces point to the next instruction after
+			// each call. Adjust by -1 to land somewhere on the actual call.
+			addr--
+			loc := locs[addr]
+			if locs[addr] == nil {
+				loc = &Location{
+					Address: addr,
+				}
+				p.Location = append(p.Location, loc)
+				locs[addr] = loc
+			}
+			sloc = append(sloc, loc)
+		}
+
+		p.Sample = append(p.Sample, &Sample{
+			Value:    value,
+			Location: sloc,
+			NumLabel: map[string][]int64{"bytes": {blocksize}},
+		})
+	}
+	if err := s.Err(); err != nil {
+		return nil, err
+	}
+	if err := parseAdditionalSections(s, p); err != nil {
+		return nil, err
+	}
+	return p, nil
+}
+
+func parseHeapHeader(line string) (sampling string, period int64, hasAlloc bool, err error) {
+	header := heapHeaderRE.FindStringSubmatch(line)
+	if header == nil {
+		return "", 0, false, errUnrecognized
+	}
+
+	if len(header[6]) > 0 {
+		if period, err = strconv.ParseInt(header[6], 10, 64); err != nil {
+			return "", 0, false, errUnrecognized
+		}
+	}
+
+	if (header[3] != header[1] && header[3] != "0") || (header[4] != header[2] && header[4] != "0") {
+		hasAlloc = true
+	}
+
+	switch header[5] {
+	case "heapz_v2", "heap_v2":
+		return "v2", period, hasAlloc, nil
+	case "heapprofile":
+		return "", 1, hasAlloc, nil
+	case "heap":
+		return "v2", period / 2, hasAlloc, nil
+	default:
+		return "", 0, false, errUnrecognized
+	}
+}
+
+// parseHeapSample parses a single row from a heap profile into a new Sample.
+func parseHeapSample(line string, rate int64, sampling string, includeAlloc bool) (value []int64, blocksize int64, addrs []uint64, err error) {
+	sampleData := heapSampleRE.FindStringSubmatch(line)
+	if len(sampleData) != 6 {
+		return nil, 0, nil, fmt.Errorf("unexpected number of sample values: got %d, want 6", len(sampleData))
+	}
+
+	// This is a local-scoped helper function to avoid needing to pass
+	// around rate, sampling and many return parameters.
+	addValues := func(countString, sizeString string, label string) error {
+		count, err := strconv.ParseInt(countString, 10, 64)
+		if err != nil {
+			return fmt.Errorf("malformed sample: %s: %v", line, err)
+		}
+		size, err := strconv.ParseInt(sizeString, 10, 64)
+		if err != nil {
+			return fmt.Errorf("malformed sample: %s: %v", line, err)
+		}
+		if count == 0 && size != 0 {
+			return fmt.Errorf("%s count was 0 but %s bytes was %d", label, label, size)
+		}
+		if count != 0 {
+			blocksize = size / count
+			if sampling == "v2" {
+				count, size = scaleHeapSample(count, size, rate)
+			}
+		}
+		value = append(value, count, size)
+		return nil
+	}
+
+	if includeAlloc {
+		if err := addValues(sampleData[3], sampleData[4], "allocation"); err != nil {
+			return nil, 0, nil, err
+		}
+	}
+
+	if err := addValues(sampleData[1], sampleData[2], "inuse"); err != nil {
+		return nil, 0, nil, err
+	}
+
+	addrs, err = parseHexAddresses(sampleData[5])
+	if err != nil {
+		return nil, 0, nil, fmt.Errorf("malformed sample: %s: %v", line, err)
+	}
+
+	return value, blocksize, addrs, nil
+}
+
+// parseHexAddresses extracts hex numbers from a string, attempts to convert
+// each to an unsigned 64-bit number and returns the resulting numbers as a
+// slice, or an error if the string contains hex numbers which are too large to
+// handle (which means a malformed profile).
+func parseHexAddresses(s string) ([]uint64, error) {
+	hexStrings := hexNumberRE.FindAllString(s, -1)
+	var addrs []uint64
+	for _, s := range hexStrings {
+		if addr, err := strconv.ParseUint(s, 0, 64); err == nil {
+			addrs = append(addrs, addr)
+		} else {
+			return nil, fmt.Errorf("failed to parse as hex 64-bit number: %s", s)
+		}
+	}
+	return addrs, nil
+}
+
+// scaleHeapSample adjusts the data from a heapz Sample to
+// account for its probability of appearing in the collected
+// data. heapz profiles are a sampling of the memory allocations
+// requests in a program. We estimate the unsampled value by dividing
+// each collected sample by its probability of appearing in the
+// profile. heapz v2 profiles rely on a poisson process to determine
+// which samples to collect, based on the desired average collection
+// rate R. The probability of a sample of size S to appear in that
+// profile is 1-exp(-S/R).
+func scaleHeapSample(count, size, rate int64) (int64, int64) {
+	if count == 0 || size == 0 {
+		return 0, 0
+	}
+
+	if rate <= 1 {
+		// if rate==1 all samples were collected so no adjustment is needed.
+		// if rate<1 treat as unknown and skip scaling.
+		return count, size
+	}
+
+	avgSize := float64(size) / float64(count)
+	scale := 1 / (1 - math.Exp(-avgSize/float64(rate)))
+
+	return int64(float64(count) * scale), int64(float64(size) * scale)
+}
+
+// parseContention parses a mutex or contention profile. There are 2 cases:
+// "--- contentionz " for legacy C++ profiles (and backwards compatibility)
+// "--- mutex:" or "--- contention:" for profiles generated by the Go runtime.
+func parseContention(b []byte) (*Profile, error) {
+	s := bufio.NewScanner(bytes.NewBuffer(b))
+	if !s.Scan() {
+		if err := s.Err(); err != nil {
+			return nil, err
+		}
+		return nil, errUnrecognized
+	}
+
+	switch l := s.Text(); {
+	case strings.HasPrefix(l, "--- contentionz "):
+	case strings.HasPrefix(l, "--- mutex:"):
+	case strings.HasPrefix(l, "--- contention:"):
+	default:
+		return nil, errUnrecognized
+	}
+
+	p := &Profile{
+		PeriodType: &ValueType{Type: "contentions", Unit: "count"},
+		Period:     1,
+		SampleType: []*ValueType{
+			{Type: "contentions", Unit: "count"},
+			{Type: "delay", Unit: "nanoseconds"},
+		},
+	}
+
+	var cpuHz int64
+	// Parse text of the form "attribute = value" before the samples.
+	const delimiter = "="
+	for s.Scan() {
+		line := s.Text()
+		if line = strings.TrimSpace(line); isSpaceOrComment(line) {
+			continue
+		}
+		if strings.HasPrefix(line, "---") {
+			break
+		}
+		attr := strings.SplitN(line, delimiter, 2)
+		if len(attr) != 2 {
+			break
+		}
+		key, val := strings.TrimSpace(attr[0]), strings.TrimSpace(attr[1])
+		var err error
+		switch key {
+		case "cycles/second":
+			if cpuHz, err = strconv.ParseInt(val, 0, 64); err != nil {
+				return nil, errUnrecognized
+			}
+		case "sampling period":
+			if p.Period, err = strconv.ParseInt(val, 0, 64); err != nil {
+				return nil, errUnrecognized
+			}
+		case "ms since reset":
+			ms, err := strconv.ParseInt(val, 0, 64)
+			if err != nil {
+				return nil, errUnrecognized
+			}
+			p.DurationNanos = ms * 1000 * 1000
+		case "format":
+			// CPP contentionz profiles don't have format.
+			return nil, errUnrecognized
+		case "resolution":
+			// CPP contentionz profiles don't have resolution.
+			return nil, errUnrecognized
+		case "discarded samples":
+		default:
+			return nil, errUnrecognized
+		}
+	}
+	if err := s.Err(); err != nil {
+		return nil, err
+	}
+
+	locs := make(map[uint64]*Location)
+	for {
+		line := strings.TrimSpace(s.Text())
+		if strings.HasPrefix(line, "---") {
+			break
+		}
+		if !isSpaceOrComment(line) {
+			value, addrs, err := parseContentionSample(line, p.Period, cpuHz)
+			if err != nil {
+				return nil, err
+			}
+			var sloc []*Location
+			for _, addr := range addrs {
+				// Addresses from stack traces point to the next instruction after
+				// each call. Adjust by -1 to land somewhere on the actual call.
+				addr--
+				loc := locs[addr]
+				if locs[addr] == nil {
+					loc = &Location{
+						Address: addr,
+					}
+					p.Location = append(p.Location, loc)
+					locs[addr] = loc
+				}
+				sloc = append(sloc, loc)
+			}
+			p.Sample = append(p.Sample, &Sample{
+				Value:    value,
+				Location: sloc,
+			})
+		}
+		if !s.Scan() {
+			break
+		}
+	}
+	if err := s.Err(); err != nil {
+		return nil, err
+	}
+
+	if err := parseAdditionalSections(s, p); err != nil {
+		return nil, err
+	}
+
+	return p, nil
+}
+
+// parseContentionSample parses a single row from a contention profile
+// into a new Sample.
+func parseContentionSample(line string, period, cpuHz int64) (value []int64, addrs []uint64, err error) {
+	sampleData := contentionSampleRE.FindStringSubmatch(line)
+	if sampleData == nil {
+		return nil, nil, errUnrecognized
+	}
+
+	v1, err := strconv.ParseInt(sampleData[1], 10, 64)
+	if err != nil {
+		return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err)
+	}
+	v2, err := strconv.ParseInt(sampleData[2], 10, 64)
+	if err != nil {
+		return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err)
+	}
+
+	// Unsample values if period and cpuHz are available.
+	// - Delays are scaled to cycles and then to nanoseconds.
+	// - Contentions are scaled to cycles.
+	if period > 0 {
+		if cpuHz > 0 {
+			cpuGHz := float64(cpuHz) / 1e9
+			v1 = int64(float64(v1) * float64(period) / cpuGHz)
+		}
+		v2 = v2 * period
+	}
+
+	value = []int64{v2, v1}
+	addrs, err = parseHexAddresses(sampleData[3])
+	if err != nil {
+		return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err)
+	}
+
+	return value, addrs, nil
+}
+
+// parseThread parses a Threadz profile and returns a new Profile.
+func parseThread(b []byte) (*Profile, error) {
+	s := bufio.NewScanner(bytes.NewBuffer(b))
+	// Skip past comments and empty lines seeking a real header.
+	for s.Scan() && isSpaceOrComment(s.Text()) {
+	}
+
+	line := s.Text()
+	if m := threadzStartRE.FindStringSubmatch(line); m != nil {
+		// Advance over initial comments until first stack trace.
+		for s.Scan() {
+			if line = s.Text(); isMemoryMapSentinel(line) || strings.HasPrefix(line, "-") {
+				break
+			}
+		}
+	} else if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 {
+		return nil, errUnrecognized
+	}
+
+	p := &Profile{
+		SampleType: []*ValueType{{Type: "thread", Unit: "count"}},
+		PeriodType: &ValueType{Type: "thread", Unit: "count"},
+		Period:     1,
+	}
+
+	locs := make(map[uint64]*Location)
+	// Recognize each thread and populate profile samples.
+	for !isMemoryMapSentinel(line) {
+		if strings.HasPrefix(line, "---- no stack trace for") {
+			line = ""
+			break
+		}
+		if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 {
+			return nil, errUnrecognized
+		}
+
+		var addrs []uint64
+		var err error
+		line, addrs, err = parseThreadSample(s)
+		if err != nil {
+			return nil, err
+		}
+		if len(addrs) == 0 {
+			// We got a --same as previous threads--. Bump counters.
+			if len(p.Sample) > 0 {
+				s := p.Sample[len(p.Sample)-1]
+				s.Value[0]++
+			}
+			continue
+		}
+
+		var sloc []*Location
+		for i, addr := range addrs {
+			// Addresses from stack traces point to the next instruction after
+			// each call. Adjust by -1 to land somewhere on the actual call
+			// (except for the leaf, which is not a call).
+			if i > 0 {
+				addr--
+			}
+			loc := locs[addr]
+			if locs[addr] == nil {
+				loc = &Location{
+					Address: addr,
+				}
+				p.Location = append(p.Location, loc)
+				locs[addr] = loc
+			}
+			sloc = append(sloc, loc)
+		}
+
+		p.Sample = append(p.Sample, &Sample{
+			Value:    []int64{1},
+			Location: sloc,
+		})
+	}
+
+	if err := parseAdditionalSections(s, p); err != nil {
+		return nil, err
+	}
+
+	cleanupDuplicateLocations(p)
+	return p, nil
+}
+
+// parseThreadSample parses a symbolized or unsymbolized stack trace.
+// Returns the first line after the traceback, the sample (or nil if
+// it hits a 'same-as-previous' marker) and an error.
+func parseThreadSample(s *bufio.Scanner) (nextl string, addrs []uint64, err error) {
+	var line string
+	sameAsPrevious := false
+	for s.Scan() {
+		line = strings.TrimSpace(s.Text())
+		if line == "" {
+			continue
+		}
+
+		if strings.HasPrefix(line, "---") {
+			break
+		}
+		if strings.Contains(line, "same as previous thread") {
+			sameAsPrevious = true
+			continue
+		}
+
+		curAddrs, err := parseHexAddresses(line)
+		if err != nil {
+			return "", nil, fmt.Errorf("malformed sample: %s: %v", line, err)
+		}
+		addrs = append(addrs, curAddrs...)
+	}
+	if err := s.Err(); err != nil {
+		return "", nil, err
+	}
+	if sameAsPrevious {
+		return line, nil, nil
+	}
+	return line, addrs, nil
+}
+
+// parseAdditionalSections parses any additional sections in the
+// profile, ignoring any unrecognized sections.
+func parseAdditionalSections(s *bufio.Scanner, p *Profile) error {
+	for !isMemoryMapSentinel(s.Text()) && s.Scan() {
+	}
+	if err := s.Err(); err != nil {
+		return err
+	}
+	return p.ParseMemoryMapFromScanner(s)
+}
+
+// ParseProcMaps parses a memory map in the format of /proc/self/maps.
+// ParseMemoryMap should be called after setting on a profile to
+// associate locations to the corresponding mapping based on their
+// address.
+func ParseProcMaps(rd io.Reader) ([]*Mapping, error) {
+	s := bufio.NewScanner(rd)
+	return parseProcMapsFromScanner(s)
+}
+
+func parseProcMapsFromScanner(s *bufio.Scanner) ([]*Mapping, error) {
+	var mapping []*Mapping
+
+	var attrs []string
+	const delimiter = "="
+	r := strings.NewReplacer()
+	for s.Scan() {
+		line := r.Replace(removeLoggingInfo(s.Text()))
+		m, err := parseMappingEntry(line)
+		if err != nil {
+			if err == errUnrecognized {
+				// Recognize assignments of the form: attr=value, and replace
+				// $attr with value on subsequent mappings.
+				if attr := strings.SplitN(line, delimiter, 2); len(attr) == 2 {
+					attrs = append(attrs, "$"+strings.TrimSpace(attr[0]), strings.TrimSpace(attr[1]))
+					r = strings.NewReplacer(attrs...)
+				}
+				// Ignore any unrecognized entries
+				continue
+			}
+			return nil, err
+		}
+		if m == nil {
+			continue
+		}
+		mapping = append(mapping, m)
+	}
+	if err := s.Err(); err != nil {
+		return nil, err
+	}
+	return mapping, nil
+}
+
+// removeLoggingInfo detects and removes log prefix entries generated
+// by the glog package. If no logging prefix is detected, the string
+// is returned unmodified.
+func removeLoggingInfo(line string) string {
+	if match := logInfoRE.FindStringIndex(line); match != nil {
+		return line[match[1]:]
+	}
+	return line
+}
+
+// ParseMemoryMap parses a memory map in the format of
+// /proc/self/maps, and overrides the mappings in the current profile.
+// It renumbers the samples and locations in the profile correspondingly.
+func (p *Profile) ParseMemoryMap(rd io.Reader) error {
+	return p.ParseMemoryMapFromScanner(bufio.NewScanner(rd))
+}
+
+// ParseMemoryMapFromScanner parses a memory map in the format of
+// /proc/self/maps or a variety of legacy format, and overrides the
+// mappings in the current profile.  It renumbers the samples and
+// locations in the profile correspondingly.
+func (p *Profile) ParseMemoryMapFromScanner(s *bufio.Scanner) error {
+	mapping, err := parseProcMapsFromScanner(s)
+	if err != nil {
+		return err
+	}
+	p.Mapping = append(p.Mapping, mapping...)
+	p.massageMappings()
+	p.remapLocationIDs()
+	p.remapFunctionIDs()
+	p.remapMappingIDs()
+	return nil
+}
+
+func parseMappingEntry(l string) (*Mapping, error) {
+	var start, end, perm, file, offset, buildID string
+	if me := procMapsRE.FindStringSubmatch(l); len(me) == 6 {
+		start, end, perm, offset, file = me[1], me[2], me[3], me[4], me[5]
+	} else if me := briefMapsRE.FindStringSubmatch(l); len(me) == 7 {
+		start, end, perm, file, offset, buildID = me[1], me[2], me[3], me[4], me[5], me[6]
+	} else {
+		return nil, errUnrecognized
+	}
+
+	var err error
+	mapping := &Mapping{
+		File:    file,
+		BuildID: buildID,
+	}
+	if perm != "" && !strings.Contains(perm, "x") {
+		// Skip non-executable entries.
+		return nil, nil
+	}
+	if mapping.Start, err = strconv.ParseUint(start, 16, 64); err != nil {
+		return nil, errUnrecognized
+	}
+	if mapping.Limit, err = strconv.ParseUint(end, 16, 64); err != nil {
+		return nil, errUnrecognized
+	}
+	if offset != "" {
+		if mapping.Offset, err = strconv.ParseUint(offset, 16, 64); err != nil {
+			return nil, errUnrecognized
+		}
+	}
+	return mapping, nil
+}
+
+var memoryMapSentinels = []string{
+	"--- Memory map: ---",
+	"MAPPED_LIBRARIES:",
+}
+
+// isMemoryMapSentinel returns true if the string contains one of the
+// known sentinels for memory map information.
+func isMemoryMapSentinel(line string) bool {
+	for _, s := range memoryMapSentinels {
+		if strings.Contains(line, s) {
+			return true
+		}
+	}
+	return false
+}
+
+func (p *Profile) addLegacyFrameInfo() {
+	switch {
+	case isProfileType(p, heapzSampleTypes):
+		p.DropFrames, p.KeepFrames = allocRxStr, allocSkipRxStr
+	case isProfileType(p, contentionzSampleTypes):
+		p.DropFrames, p.KeepFrames = lockRxStr, ""
+	default:
+		p.DropFrames, p.KeepFrames = cpuProfilerRxStr, ""
+	}
+}
+
+var heapzSampleTypes = [][]string{
+	{"allocations", "size"}, // early Go pprof profiles
+	{"objects", "space"},
+	{"inuse_objects", "inuse_space"},
+	{"alloc_objects", "alloc_space"},
+	{"alloc_objects", "alloc_space", "inuse_objects", "inuse_space"}, // Go pprof legacy profiles
+}
+var contentionzSampleTypes = [][]string{
+	{"contentions", "delay"},
+}
+
+func isProfileType(p *Profile, types [][]string) bool {
+	st := p.SampleType
+nextType:
+	for _, t := range types {
+		if len(st) != len(t) {
+			continue
+		}
+
+		for i := range st {
+			if st[i].Type != t[i] {
+				continue nextType
+			}
+		}
+		return true
+	}
+	return false
+}
+
+var allocRxStr = strings.Join([]string{
+	// POSIX entry points.
+	`calloc`,
+	`cfree`,
+	`malloc`,
+	`free`,
+	`memalign`,
+	`do_memalign`,
+	`(__)?posix_memalign`,
+	`pvalloc`,
+	`valloc`,
+	`realloc`,
+
+	// TC malloc.
+	`tcmalloc::.*`,
+	`tc_calloc`,
+	`tc_cfree`,
+	`tc_malloc`,
+	`tc_free`,
+	`tc_memalign`,
+	`tc_posix_memalign`,
+	`tc_pvalloc`,
+	`tc_valloc`,
+	`tc_realloc`,
+	`tc_new`,
+	`tc_delete`,
+	`tc_newarray`,
+	`tc_deletearray`,
+	`tc_new_nothrow`,
+	`tc_newarray_nothrow`,
+
+	// Memory-allocation routines on OS X.
+	`malloc_zone_malloc`,
+	`malloc_zone_calloc`,
+	`malloc_zone_valloc`,
+	`malloc_zone_realloc`,
+	`malloc_zone_memalign`,
+	`malloc_zone_free`,
+
+	// Go runtime
+	`runtime\..*`,
+
+	// Other misc. memory allocation routines
+	`BaseArena::.*`,
+	`(::)?do_malloc_no_errno`,
+	`(::)?do_malloc_pages`,
+	`(::)?do_malloc`,
+	`DoSampledAllocation`,
+	`MallocedMemBlock::MallocedMemBlock`,
+	`_M_allocate`,
+	`__builtin_(vec_)?delete`,
+	`__builtin_(vec_)?new`,
+	`__gnu_cxx::new_allocator::allocate`,
+	`__libc_malloc`,
+	`__malloc_alloc_template::allocate`,
+	`allocate`,
+	`cpp_alloc`,
+	`operator new(\[\])?`,
+	`simple_alloc::allocate`,
+}, `|`)
+
+var allocSkipRxStr = strings.Join([]string{
+	// Preserve Go runtime frames that appear in the middle/bottom of
+	// the stack.
+	`runtime\.panic`,
+	`runtime\.reflectcall`,
+	`runtime\.call[0-9]*`,
+}, `|`)
+
+var cpuProfilerRxStr = strings.Join([]string{
+	`ProfileData::Add`,
+	`ProfileData::prof_handler`,
+	`CpuProfiler::prof_handler`,
+	`__pthread_sighandler`,
+	`__restore`,
+}, `|`)
+
+var lockRxStr = strings.Join([]string{
+	`RecordLockProfileData`,
+	`(base::)?RecordLockProfileData.*`,
+	`(base::)?SubmitMutexProfileData.*`,
+	`(base::)?SubmitSpinLockProfileData.*`,
+	`(base::Mutex::)?AwaitCommon.*`,
+	`(base::Mutex::)?Unlock.*`,
+	`(base::Mutex::)?UnlockSlow.*`,
+	`(base::Mutex::)?ReaderUnlock.*`,
+	`(base::MutexLock::)?~MutexLock.*`,
+	`(Mutex::)?AwaitCommon.*`,
+	`(Mutex::)?Unlock.*`,
+	`(Mutex::)?UnlockSlow.*`,
+	`(Mutex::)?ReaderUnlock.*`,
+	`(MutexLock::)?~MutexLock.*`,
+	`(SpinLock::)?Unlock.*`,
+	`(SpinLock::)?SlowUnlock.*`,
+	`(SpinLockHolder::)?~SpinLockHolder.*`,
+}, `|`)

+ 481 - 0
vendor/github.com/google/pprof/profile/merge.go

@@ -0,0 +1,481 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package profile
+
+import (
+	"fmt"
+	"sort"
+	"strconv"
+	"strings"
+)
+
+// Compact performs garbage collection on a profile to remove any
+// unreferenced fields. This is useful to reduce the size of a profile
+// after samples or locations have been removed.
+func (p *Profile) Compact() *Profile {
+	p, _ = Merge([]*Profile{p})
+	return p
+}
+
+// Merge merges all the profiles in profs into a single Profile.
+// Returns a new profile independent of the input profiles. The merged
+// profile is compacted to eliminate unused samples, locations,
+// functions and mappings. Profiles must have identical profile sample
+// and period types or the merge will fail. profile.Period of the
+// resulting profile will be the maximum of all profiles, and
+// profile.TimeNanos will be the earliest nonzero one. Merges are
+// associative with the caveat of the first profile having some
+// specialization in how headers are combined. There may be other
+// subtleties now or in the future regarding associativity.
+func Merge(srcs []*Profile) (*Profile, error) {
+	if len(srcs) == 0 {
+		return nil, fmt.Errorf("no profiles to merge")
+	}
+	p, err := combineHeaders(srcs)
+	if err != nil {
+		return nil, err
+	}
+
+	pm := &profileMerger{
+		p:         p,
+		samples:   make(map[sampleKey]*Sample, len(srcs[0].Sample)),
+		locations: make(map[locationKey]*Location, len(srcs[0].Location)),
+		functions: make(map[functionKey]*Function, len(srcs[0].Function)),
+		mappings:  make(map[mappingKey]*Mapping, len(srcs[0].Mapping)),
+	}
+
+	for _, src := range srcs {
+		// Clear the profile-specific hash tables
+		pm.locationsByID = make(map[uint64]*Location, len(src.Location))
+		pm.functionsByID = make(map[uint64]*Function, len(src.Function))
+		pm.mappingsByID = make(map[uint64]mapInfo, len(src.Mapping))
+
+		if len(pm.mappings) == 0 && len(src.Mapping) > 0 {
+			// The Mapping list has the property that the first mapping
+			// represents the main binary. Take the first Mapping we see,
+			// otherwise the operations below will add mappings in an
+			// arbitrary order.
+			pm.mapMapping(src.Mapping[0])
+		}
+
+		for _, s := range src.Sample {
+			if !isZeroSample(s) {
+				pm.mapSample(s)
+			}
+		}
+	}
+
+	for _, s := range p.Sample {
+		if isZeroSample(s) {
+			// If there are any zero samples, re-merge the profile to GC
+			// them.
+			return Merge([]*Profile{p})
+		}
+	}
+
+	return p, nil
+}
+
+// Normalize normalizes the source profile by multiplying each value in profile by the
+// ratio of the sum of the base profile's values of that sample type to the sum of the
+// source profile's value of that sample type.
+func (p *Profile) Normalize(pb *Profile) error {
+
+	if err := p.compatible(pb); err != nil {
+		return err
+	}
+
+	baseVals := make([]int64, len(p.SampleType))
+	for _, s := range pb.Sample {
+		for i, v := range s.Value {
+			baseVals[i] += v
+		}
+	}
+
+	srcVals := make([]int64, len(p.SampleType))
+	for _, s := range p.Sample {
+		for i, v := range s.Value {
+			srcVals[i] += v
+		}
+	}
+
+	normScale := make([]float64, len(baseVals))
+	for i := range baseVals {
+		if srcVals[i] == 0 {
+			normScale[i] = 0.0
+		} else {
+			normScale[i] = float64(baseVals[i]) / float64(srcVals[i])
+		}
+	}
+	p.ScaleN(normScale)
+	return nil
+}
+
+func isZeroSample(s *Sample) bool {
+	for _, v := range s.Value {
+		if v != 0 {
+			return false
+		}
+	}
+	return true
+}
+
+type profileMerger struct {
+	p *Profile
+
+	// Memoization tables within a profile.
+	locationsByID map[uint64]*Location
+	functionsByID map[uint64]*Function
+	mappingsByID  map[uint64]mapInfo
+
+	// Memoization tables for profile entities.
+	samples   map[sampleKey]*Sample
+	locations map[locationKey]*Location
+	functions map[functionKey]*Function
+	mappings  map[mappingKey]*Mapping
+}
+
+type mapInfo struct {
+	m      *Mapping
+	offset int64
+}
+
+func (pm *profileMerger) mapSample(src *Sample) *Sample {
+	s := &Sample{
+		Location: make([]*Location, len(src.Location)),
+		Value:    make([]int64, len(src.Value)),
+		Label:    make(map[string][]string, len(src.Label)),
+		NumLabel: make(map[string][]int64, len(src.NumLabel)),
+		NumUnit:  make(map[string][]string, len(src.NumLabel)),
+	}
+	for i, l := range src.Location {
+		s.Location[i] = pm.mapLocation(l)
+	}
+	for k, v := range src.Label {
+		vv := make([]string, len(v))
+		copy(vv, v)
+		s.Label[k] = vv
+	}
+	for k, v := range src.NumLabel {
+		u := src.NumUnit[k]
+		vv := make([]int64, len(v))
+		uu := make([]string, len(u))
+		copy(vv, v)
+		copy(uu, u)
+		s.NumLabel[k] = vv
+		s.NumUnit[k] = uu
+	}
+	// Check memoization table. Must be done on the remapped location to
+	// account for the remapped mapping. Add current values to the
+	// existing sample.
+	k := s.key()
+	if ss, ok := pm.samples[k]; ok {
+		for i, v := range src.Value {
+			ss.Value[i] += v
+		}
+		return ss
+	}
+	copy(s.Value, src.Value)
+	pm.samples[k] = s
+	pm.p.Sample = append(pm.p.Sample, s)
+	return s
+}
+
+// key generates sampleKey to be used as a key for maps.
+func (sample *Sample) key() sampleKey {
+	ids := make([]string, len(sample.Location))
+	for i, l := range sample.Location {
+		ids[i] = strconv.FormatUint(l.ID, 16)
+	}
+
+	labels := make([]string, 0, len(sample.Label))
+	for k, v := range sample.Label {
+		labels = append(labels, fmt.Sprintf("%q%q", k, v))
+	}
+	sort.Strings(labels)
+
+	numlabels := make([]string, 0, len(sample.NumLabel))
+	for k, v := range sample.NumLabel {
+		numlabels = append(numlabels, fmt.Sprintf("%q%x%x", k, v, sample.NumUnit[k]))
+	}
+	sort.Strings(numlabels)
+
+	return sampleKey{
+		strings.Join(ids, "|"),
+		strings.Join(labels, ""),
+		strings.Join(numlabels, ""),
+	}
+}
+
+type sampleKey struct {
+	locations string
+	labels    string
+	numlabels string
+}
+
+func (pm *profileMerger) mapLocation(src *Location) *Location {
+	if src == nil {
+		return nil
+	}
+
+	if l, ok := pm.locationsByID[src.ID]; ok {
+		return l
+	}
+
+	mi := pm.mapMapping(src.Mapping)
+	l := &Location{
+		ID:       uint64(len(pm.p.Location) + 1),
+		Mapping:  mi.m,
+		Address:  uint64(int64(src.Address) + mi.offset),
+		Line:     make([]Line, len(src.Line)),
+		IsFolded: src.IsFolded,
+	}
+	for i, ln := range src.Line {
+		l.Line[i] = pm.mapLine(ln)
+	}
+	// Check memoization table. Must be done on the remapped location to
+	// account for the remapped mapping ID.
+	k := l.key()
+	if ll, ok := pm.locations[k]; ok {
+		pm.locationsByID[src.ID] = ll
+		return ll
+	}
+	pm.locationsByID[src.ID] = l
+	pm.locations[k] = l
+	pm.p.Location = append(pm.p.Location, l)
+	return l
+}
+
+// key generates locationKey to be used as a key for maps.
+func (l *Location) key() locationKey {
+	key := locationKey{
+		addr:     l.Address,
+		isFolded: l.IsFolded,
+	}
+	if l.Mapping != nil {
+		// Normalizes address to handle address space randomization.
+		key.addr -= l.Mapping.Start
+		key.mappingID = l.Mapping.ID
+	}
+	lines := make([]string, len(l.Line)*2)
+	for i, line := range l.Line {
+		if line.Function != nil {
+			lines[i*2] = strconv.FormatUint(line.Function.ID, 16)
+		}
+		lines[i*2+1] = strconv.FormatInt(line.Line, 16)
+	}
+	key.lines = strings.Join(lines, "|")
+	return key
+}
+
+type locationKey struct {
+	addr, mappingID uint64
+	lines           string
+	isFolded        bool
+}
+
+func (pm *profileMerger) mapMapping(src *Mapping) mapInfo {
+	if src == nil {
+		return mapInfo{}
+	}
+
+	if mi, ok := pm.mappingsByID[src.ID]; ok {
+		return mi
+	}
+
+	// Check memoization tables.
+	mk := src.key()
+	if m, ok := pm.mappings[mk]; ok {
+		mi := mapInfo{m, int64(m.Start) - int64(src.Start)}
+		pm.mappingsByID[src.ID] = mi
+		return mi
+	}
+	m := &Mapping{
+		ID:              uint64(len(pm.p.Mapping) + 1),
+		Start:           src.Start,
+		Limit:           src.Limit,
+		Offset:          src.Offset,
+		File:            src.File,
+		BuildID:         src.BuildID,
+		HasFunctions:    src.HasFunctions,
+		HasFilenames:    src.HasFilenames,
+		HasLineNumbers:  src.HasLineNumbers,
+		HasInlineFrames: src.HasInlineFrames,
+	}
+	pm.p.Mapping = append(pm.p.Mapping, m)
+
+	// Update memoization tables.
+	pm.mappings[mk] = m
+	mi := mapInfo{m, 0}
+	pm.mappingsByID[src.ID] = mi
+	return mi
+}
+
+// key generates encoded strings of Mapping to be used as a key for
+// maps.
+func (m *Mapping) key() mappingKey {
+	// Normalize addresses to handle address space randomization.
+	// Round up to next 4K boundary to avoid minor discrepancies.
+	const mapsizeRounding = 0x1000
+
+	size := m.Limit - m.Start
+	size = size + mapsizeRounding - 1
+	size = size - (size % mapsizeRounding)
+	key := mappingKey{
+		size:   size,
+		offset: m.Offset,
+	}
+
+	switch {
+	case m.BuildID != "":
+		key.buildIDOrFile = m.BuildID
+	case m.File != "":
+		key.buildIDOrFile = m.File
+	default:
+		// A mapping containing neither build ID nor file name is a fake mapping. A
+		// key with empty buildIDOrFile is used for fake mappings so that they are
+		// treated as the same mapping during merging.
+	}
+	return key
+}
+
+type mappingKey struct {
+	size, offset  uint64
+	buildIDOrFile string
+}
+
+func (pm *profileMerger) mapLine(src Line) Line {
+	ln := Line{
+		Function: pm.mapFunction(src.Function),
+		Line:     src.Line,
+	}
+	return ln
+}
+
+func (pm *profileMerger) mapFunction(src *Function) *Function {
+	if src == nil {
+		return nil
+	}
+	if f, ok := pm.functionsByID[src.ID]; ok {
+		return f
+	}
+	k := src.key()
+	if f, ok := pm.functions[k]; ok {
+		pm.functionsByID[src.ID] = f
+		return f
+	}
+	f := &Function{
+		ID:         uint64(len(pm.p.Function) + 1),
+		Name:       src.Name,
+		SystemName: src.SystemName,
+		Filename:   src.Filename,
+		StartLine:  src.StartLine,
+	}
+	pm.functions[k] = f
+	pm.functionsByID[src.ID] = f
+	pm.p.Function = append(pm.p.Function, f)
+	return f
+}
+
+// key generates a struct to be used as a key for maps.
+func (f *Function) key() functionKey {
+	return functionKey{
+		f.StartLine,
+		f.Name,
+		f.SystemName,
+		f.Filename,
+	}
+}
+
+type functionKey struct {
+	startLine                  int64
+	name, systemName, fileName string
+}
+
+// combineHeaders checks that all profiles can be merged and returns
+// their combined profile.
+func combineHeaders(srcs []*Profile) (*Profile, error) {
+	for _, s := range srcs[1:] {
+		if err := srcs[0].compatible(s); err != nil {
+			return nil, err
+		}
+	}
+
+	var timeNanos, durationNanos, period int64
+	var comments []string
+	seenComments := map[string]bool{}
+	var defaultSampleType string
+	for _, s := range srcs {
+		if timeNanos == 0 || s.TimeNanos < timeNanos {
+			timeNanos = s.TimeNanos
+		}
+		durationNanos += s.DurationNanos
+		if period == 0 || period < s.Period {
+			period = s.Period
+		}
+		for _, c := range s.Comments {
+			if seen := seenComments[c]; !seen {
+				comments = append(comments, c)
+				seenComments[c] = true
+			}
+		}
+		if defaultSampleType == "" {
+			defaultSampleType = s.DefaultSampleType
+		}
+	}
+
+	p := &Profile{
+		SampleType: make([]*ValueType, len(srcs[0].SampleType)),
+
+		DropFrames: srcs[0].DropFrames,
+		KeepFrames: srcs[0].KeepFrames,
+
+		TimeNanos:     timeNanos,
+		DurationNanos: durationNanos,
+		PeriodType:    srcs[0].PeriodType,
+		Period:        period,
+
+		Comments:          comments,
+		DefaultSampleType: defaultSampleType,
+	}
+	copy(p.SampleType, srcs[0].SampleType)
+	return p, nil
+}
+
+// compatible determines if two profiles can be compared/merged.
+// returns nil if the profiles are compatible; otherwise an error with
+// details on the incompatibility.
+func (p *Profile) compatible(pb *Profile) error {
+	if !equalValueType(p.PeriodType, pb.PeriodType) {
+		return fmt.Errorf("incompatible period types %v and %v", p.PeriodType, pb.PeriodType)
+	}
+
+	if len(p.SampleType) != len(pb.SampleType) {
+		return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType)
+	}
+
+	for i := range p.SampleType {
+		if !equalValueType(p.SampleType[i], pb.SampleType[i]) {
+			return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType)
+		}
+	}
+	return nil
+}
+
+// equalValueType returns true if the two value types are semantically
+// equal. It ignores the internal fields used during encode/decode.
+func equalValueType(st1, st2 *ValueType) bool {
+	return st1.Type == st2.Type && st1.Unit == st2.Unit
+}

+ 805 - 0
vendor/github.com/google/pprof/profile/profile.go

@@ -0,0 +1,805 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package profile provides a representation of profile.proto and
+// methods to encode/decode profiles in this format.
+package profile
+
+import (
+	"bytes"
+	"compress/gzip"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"math"
+	"path/filepath"
+	"regexp"
+	"sort"
+	"strings"
+	"sync"
+	"time"
+)
+
+// Profile is an in-memory representation of profile.proto.
+type Profile struct {
+	SampleType        []*ValueType
+	DefaultSampleType string
+	Sample            []*Sample
+	Mapping           []*Mapping
+	Location          []*Location
+	Function          []*Function
+	Comments          []string
+
+	DropFrames string
+	KeepFrames string
+
+	TimeNanos     int64
+	DurationNanos int64
+	PeriodType    *ValueType
+	Period        int64
+
+	// The following fields are modified during encoding and copying,
+	// so are protected by a Mutex.
+	encodeMu sync.Mutex
+
+	commentX           []int64
+	dropFramesX        int64
+	keepFramesX        int64
+	stringTable        []string
+	defaultSampleTypeX int64
+}
+
+// ValueType corresponds to Profile.ValueType
+type ValueType struct {
+	Type string // cpu, wall, inuse_space, etc
+	Unit string // seconds, nanoseconds, bytes, etc
+
+	typeX int64
+	unitX int64
+}
+
+// Sample corresponds to Profile.Sample
+type Sample struct {
+	Location []*Location
+	Value    []int64
+	Label    map[string][]string
+	NumLabel map[string][]int64
+	NumUnit  map[string][]string
+
+	locationIDX []uint64
+	labelX      []label
+}
+
+// label corresponds to Profile.Label
+type label struct {
+	keyX int64
+	// Exactly one of the two following values must be set
+	strX int64
+	numX int64 // Integer value for this label
+	// can be set if numX has value
+	unitX int64
+}
+
+// Mapping corresponds to Profile.Mapping
+type Mapping struct {
+	ID              uint64
+	Start           uint64
+	Limit           uint64
+	Offset          uint64
+	File            string
+	BuildID         string
+	HasFunctions    bool
+	HasFilenames    bool
+	HasLineNumbers  bool
+	HasInlineFrames bool
+
+	fileX    int64
+	buildIDX int64
+}
+
+// Location corresponds to Profile.Location
+type Location struct {
+	ID       uint64
+	Mapping  *Mapping
+	Address  uint64
+	Line     []Line
+	IsFolded bool
+
+	mappingIDX uint64
+}
+
+// Line corresponds to Profile.Line
+type Line struct {
+	Function *Function
+	Line     int64
+
+	functionIDX uint64
+}
+
+// Function corresponds to Profile.Function
+type Function struct {
+	ID         uint64
+	Name       string
+	SystemName string
+	Filename   string
+	StartLine  int64
+
+	nameX       int64
+	systemNameX int64
+	filenameX   int64
+}
+
+// Parse parses a profile and checks for its validity. The input
+// may be a gzip-compressed encoded protobuf or one of many legacy
+// profile formats which may be unsupported in the future.
+func Parse(r io.Reader) (*Profile, error) {
+	data, err := ioutil.ReadAll(r)
+	if err != nil {
+		return nil, err
+	}
+	return ParseData(data)
+}
+
+// ParseData parses a profile from a buffer and checks for its
+// validity.
+func ParseData(data []byte) (*Profile, error) {
+	var p *Profile
+	var err error
+	if len(data) >= 2 && data[0] == 0x1f && data[1] == 0x8b {
+		gz, err := gzip.NewReader(bytes.NewBuffer(data))
+		if err == nil {
+			data, err = ioutil.ReadAll(gz)
+		}
+		if err != nil {
+			return nil, fmt.Errorf("decompressing profile: %v", err)
+		}
+	}
+	if p, err = ParseUncompressed(data); err != nil && err != errNoData && err != errConcatProfile {
+		p, err = parseLegacy(data)
+	}
+
+	if err != nil {
+		return nil, fmt.Errorf("parsing profile: %v", err)
+	}
+
+	if err := p.CheckValid(); err != nil {
+		return nil, fmt.Errorf("malformed profile: %v", err)
+	}
+	return p, nil
+}
+
+var errUnrecognized = fmt.Errorf("unrecognized profile format")
+var errMalformed = fmt.Errorf("malformed profile format")
+var errNoData = fmt.Errorf("empty input file")
+var errConcatProfile = fmt.Errorf("concatenated profiles detected")
+
+func parseLegacy(data []byte) (*Profile, error) {
+	parsers := []func([]byte) (*Profile, error){
+		parseCPU,
+		parseHeap,
+		parseGoCount, // goroutine, threadcreate
+		parseThread,
+		parseContention,
+		parseJavaProfile,
+	}
+
+	for _, parser := range parsers {
+		p, err := parser(data)
+		if err == nil {
+			p.addLegacyFrameInfo()
+			return p, nil
+		}
+		if err != errUnrecognized {
+			return nil, err
+		}
+	}
+	return nil, errUnrecognized
+}
+
+// ParseUncompressed parses an uncompressed protobuf into a profile.
+func ParseUncompressed(data []byte) (*Profile, error) {
+	if len(data) == 0 {
+		return nil, errNoData
+	}
+	p := &Profile{}
+	if err := unmarshal(data, p); err != nil {
+		return nil, err
+	}
+
+	if err := p.postDecode(); err != nil {
+		return nil, err
+	}
+
+	return p, nil
+}
+
+var libRx = regexp.MustCompile(`([.]so$|[.]so[._][0-9]+)`)
+
+// massageMappings applies heuristic-based changes to the profile
+// mappings to account for quirks of some environments.
+func (p *Profile) massageMappings() {
+	// Merge adjacent regions with matching names, checking that the offsets match
+	if len(p.Mapping) > 1 {
+		mappings := []*Mapping{p.Mapping[0]}
+		for _, m := range p.Mapping[1:] {
+			lm := mappings[len(mappings)-1]
+			if adjacent(lm, m) {
+				lm.Limit = m.Limit
+				if m.File != "" {
+					lm.File = m.File
+				}
+				if m.BuildID != "" {
+					lm.BuildID = m.BuildID
+				}
+				p.updateLocationMapping(m, lm)
+				continue
+			}
+			mappings = append(mappings, m)
+		}
+		p.Mapping = mappings
+	}
+
+	// Use heuristics to identify main binary and move it to the top of the list of mappings
+	for i, m := range p.Mapping {
+		file := strings.TrimSpace(strings.Replace(m.File, "(deleted)", "", -1))
+		if len(file) == 0 {
+			continue
+		}
+		if len(libRx.FindStringSubmatch(file)) > 0 {
+			continue
+		}
+		if file[0] == '[' {
+			continue
+		}
+		// Swap what we guess is main to position 0.
+		p.Mapping[0], p.Mapping[i] = p.Mapping[i], p.Mapping[0]
+		break
+	}
+
+	// Keep the mapping IDs neatly sorted
+	for i, m := range p.Mapping {
+		m.ID = uint64(i + 1)
+	}
+}
+
+// adjacent returns whether two mapping entries represent the same
+// mapping that has been split into two. Check that their addresses are adjacent,
+// and if the offsets match, if they are available.
+func adjacent(m1, m2 *Mapping) bool {
+	if m1.File != "" && m2.File != "" {
+		if m1.File != m2.File {
+			return false
+		}
+	}
+	if m1.BuildID != "" && m2.BuildID != "" {
+		if m1.BuildID != m2.BuildID {
+			return false
+		}
+	}
+	if m1.Limit != m2.Start {
+		return false
+	}
+	if m1.Offset != 0 && m2.Offset != 0 {
+		offset := m1.Offset + (m1.Limit - m1.Start)
+		if offset != m2.Offset {
+			return false
+		}
+	}
+	return true
+}
+
+func (p *Profile) updateLocationMapping(from, to *Mapping) {
+	for _, l := range p.Location {
+		if l.Mapping == from {
+			l.Mapping = to
+		}
+	}
+}
+
+func serialize(p *Profile) []byte {
+	p.encodeMu.Lock()
+	p.preEncode()
+	b := marshal(p)
+	p.encodeMu.Unlock()
+	return b
+}
+
+// Write writes the profile as a gzip-compressed marshaled protobuf.
+func (p *Profile) Write(w io.Writer) error {
+	zw := gzip.NewWriter(w)
+	defer zw.Close()
+	_, err := zw.Write(serialize(p))
+	return err
+}
+
+// WriteUncompressed writes the profile as a marshaled protobuf.
+func (p *Profile) WriteUncompressed(w io.Writer) error {
+	_, err := w.Write(serialize(p))
+	return err
+}
+
+// CheckValid tests whether the profile is valid. Checks include, but are
+// not limited to:
+//   - len(Profile.Sample[n].value) == len(Profile.value_unit)
+//   - Sample.id has a corresponding Profile.Location
+func (p *Profile) CheckValid() error {
+	// Check that sample values are consistent
+	sampleLen := len(p.SampleType)
+	if sampleLen == 0 && len(p.Sample) != 0 {
+		return fmt.Errorf("missing sample type information")
+	}
+	for _, s := range p.Sample {
+		if s == nil {
+			return fmt.Errorf("profile has nil sample")
+		}
+		if len(s.Value) != sampleLen {
+			return fmt.Errorf("mismatch: sample has %d values vs. %d types", len(s.Value), len(p.SampleType))
+		}
+		for _, l := range s.Location {
+			if l == nil {
+				return fmt.Errorf("sample has nil location")
+			}
+		}
+	}
+
+	// Check that all mappings/locations/functions are in the tables
+	// Check that there are no duplicate ids
+	mappings := make(map[uint64]*Mapping, len(p.Mapping))
+	for _, m := range p.Mapping {
+		if m == nil {
+			return fmt.Errorf("profile has nil mapping")
+		}
+		if m.ID == 0 {
+			return fmt.Errorf("found mapping with reserved ID=0")
+		}
+		if mappings[m.ID] != nil {
+			return fmt.Errorf("multiple mappings with same id: %d", m.ID)
+		}
+		mappings[m.ID] = m
+	}
+	functions := make(map[uint64]*Function, len(p.Function))
+	for _, f := range p.Function {
+		if f == nil {
+			return fmt.Errorf("profile has nil function")
+		}
+		if f.ID == 0 {
+			return fmt.Errorf("found function with reserved ID=0")
+		}
+		if functions[f.ID] != nil {
+			return fmt.Errorf("multiple functions with same id: %d", f.ID)
+		}
+		functions[f.ID] = f
+	}
+	locations := make(map[uint64]*Location, len(p.Location))
+	for _, l := range p.Location {
+		if l == nil {
+			return fmt.Errorf("profile has nil location")
+		}
+		if l.ID == 0 {
+			return fmt.Errorf("found location with reserved id=0")
+		}
+		if locations[l.ID] != nil {
+			return fmt.Errorf("multiple locations with same id: %d", l.ID)
+		}
+		locations[l.ID] = l
+		if m := l.Mapping; m != nil {
+			if m.ID == 0 || mappings[m.ID] != m {
+				return fmt.Errorf("inconsistent mapping %p: %d", m, m.ID)
+			}
+		}
+		for _, ln := range l.Line {
+			f := ln.Function
+			if f == nil {
+				return fmt.Errorf("location id: %d has a line with nil function", l.ID)
+			}
+			if f.ID == 0 || functions[f.ID] != f {
+				return fmt.Errorf("inconsistent function %p: %d", f, f.ID)
+			}
+		}
+	}
+	return nil
+}
+
+// Aggregate merges the locations in the profile into equivalence
+// classes preserving the request attributes. It also updates the
+// samples to point to the merged locations.
+func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address bool) error {
+	for _, m := range p.Mapping {
+		m.HasInlineFrames = m.HasInlineFrames && inlineFrame
+		m.HasFunctions = m.HasFunctions && function
+		m.HasFilenames = m.HasFilenames && filename
+		m.HasLineNumbers = m.HasLineNumbers && linenumber
+	}
+
+	// Aggregate functions
+	if !function || !filename {
+		for _, f := range p.Function {
+			if !function {
+				f.Name = ""
+				f.SystemName = ""
+			}
+			if !filename {
+				f.Filename = ""
+			}
+		}
+	}
+
+	// Aggregate locations
+	if !inlineFrame || !address || !linenumber {
+		for _, l := range p.Location {
+			if !inlineFrame && len(l.Line) > 1 {
+				l.Line = l.Line[len(l.Line)-1:]
+			}
+			if !linenumber {
+				for i := range l.Line {
+					l.Line[i].Line = 0
+				}
+			}
+			if !address {
+				l.Address = 0
+			}
+		}
+	}
+
+	return p.CheckValid()
+}
+
+// NumLabelUnits returns a map of numeric label keys to the units
+// associated with those keys and a map of those keys to any units
+// that were encountered but not used.
+// Unit for a given key is the first encountered unit for that key. If multiple
+// units are encountered for values paired with a particular key, then the first
+// unit encountered is used and all other units are returned in sorted order
+// in map of ignored units.
+// If no units are encountered for a particular key, the unit is then inferred
+// based on the key.
+func (p *Profile) NumLabelUnits() (map[string]string, map[string][]string) {
+	numLabelUnits := map[string]string{}
+	ignoredUnits := map[string]map[string]bool{}
+	encounteredKeys := map[string]bool{}
+
+	// Determine units based on numeric tags for each sample.
+	for _, s := range p.Sample {
+		for k := range s.NumLabel {
+			encounteredKeys[k] = true
+			for _, unit := range s.NumUnit[k] {
+				if unit == "" {
+					continue
+				}
+				if wantUnit, ok := numLabelUnits[k]; !ok {
+					numLabelUnits[k] = unit
+				} else if wantUnit != unit {
+					if v, ok := ignoredUnits[k]; ok {
+						v[unit] = true
+					} else {
+						ignoredUnits[k] = map[string]bool{unit: true}
+					}
+				}
+			}
+		}
+	}
+	// Infer units for keys without any units associated with
+	// numeric tag values.
+	for key := range encounteredKeys {
+		unit := numLabelUnits[key]
+		if unit == "" {
+			switch key {
+			case "alignment", "request":
+				numLabelUnits[key] = "bytes"
+			default:
+				numLabelUnits[key] = key
+			}
+		}
+	}
+
+	// Copy ignored units into more readable format
+	unitsIgnored := make(map[string][]string, len(ignoredUnits))
+	for key, values := range ignoredUnits {
+		units := make([]string, len(values))
+		i := 0
+		for unit := range values {
+			units[i] = unit
+			i++
+		}
+		sort.Strings(units)
+		unitsIgnored[key] = units
+	}
+
+	return numLabelUnits, unitsIgnored
+}
+
+// String dumps a text representation of a profile. Intended mainly
+// for debugging purposes.
+func (p *Profile) String() string {
+	ss := make([]string, 0, len(p.Comments)+len(p.Sample)+len(p.Mapping)+len(p.Location))
+	for _, c := range p.Comments {
+		ss = append(ss, "Comment: "+c)
+	}
+	if pt := p.PeriodType; pt != nil {
+		ss = append(ss, fmt.Sprintf("PeriodType: %s %s", pt.Type, pt.Unit))
+	}
+	ss = append(ss, fmt.Sprintf("Period: %d", p.Period))
+	if p.TimeNanos != 0 {
+		ss = append(ss, fmt.Sprintf("Time: %v", time.Unix(0, p.TimeNanos)))
+	}
+	if p.DurationNanos != 0 {
+		ss = append(ss, fmt.Sprintf("Duration: %.4v", time.Duration(p.DurationNanos)))
+	}
+
+	ss = append(ss, "Samples:")
+	var sh1 string
+	for _, s := range p.SampleType {
+		dflt := ""
+		if s.Type == p.DefaultSampleType {
+			dflt = "[dflt]"
+		}
+		sh1 = sh1 + fmt.Sprintf("%s/%s%s ", s.Type, s.Unit, dflt)
+	}
+	ss = append(ss, strings.TrimSpace(sh1))
+	for _, s := range p.Sample {
+		ss = append(ss, s.string())
+	}
+
+	ss = append(ss, "Locations")
+	for _, l := range p.Location {
+		ss = append(ss, l.string())
+	}
+
+	ss = append(ss, "Mappings")
+	for _, m := range p.Mapping {
+		ss = append(ss, m.string())
+	}
+
+	return strings.Join(ss, "\n") + "\n"
+}
+
+// string dumps a text representation of a mapping. Intended mainly
+// for debugging purposes.
+func (m *Mapping) string() string {
+	bits := ""
+	if m.HasFunctions {
+		bits = bits + "[FN]"
+	}
+	if m.HasFilenames {
+		bits = bits + "[FL]"
+	}
+	if m.HasLineNumbers {
+		bits = bits + "[LN]"
+	}
+	if m.HasInlineFrames {
+		bits = bits + "[IN]"
+	}
+	return fmt.Sprintf("%d: %#x/%#x/%#x %s %s %s",
+		m.ID,
+		m.Start, m.Limit, m.Offset,
+		m.File,
+		m.BuildID,
+		bits)
+}
+
+// string dumps a text representation of a location. Intended mainly
+// for debugging purposes.
+func (l *Location) string() string {
+	ss := []string{}
+	locStr := fmt.Sprintf("%6d: %#x ", l.ID, l.Address)
+	if m := l.Mapping; m != nil {
+		locStr = locStr + fmt.Sprintf("M=%d ", m.ID)
+	}
+	if l.IsFolded {
+		locStr = locStr + "[F] "
+	}
+	if len(l.Line) == 0 {
+		ss = append(ss, locStr)
+	}
+	for li := range l.Line {
+		lnStr := "??"
+		if fn := l.Line[li].Function; fn != nil {
+			lnStr = fmt.Sprintf("%s %s:%d s=%d",
+				fn.Name,
+				fn.Filename,
+				l.Line[li].Line,
+				fn.StartLine)
+			if fn.Name != fn.SystemName {
+				lnStr = lnStr + "(" + fn.SystemName + ")"
+			}
+		}
+		ss = append(ss, locStr+lnStr)
+		// Do not print location details past the first line
+		locStr = "             "
+	}
+	return strings.Join(ss, "\n")
+}
+
+// string dumps a text representation of a sample. Intended mainly
+// for debugging purposes.
+func (s *Sample) string() string {
+	ss := []string{}
+	var sv string
+	for _, v := range s.Value {
+		sv = fmt.Sprintf("%s %10d", sv, v)
+	}
+	sv = sv + ": "
+	for _, l := range s.Location {
+		sv = sv + fmt.Sprintf("%d ", l.ID)
+	}
+	ss = append(ss, sv)
+	const labelHeader = "                "
+	if len(s.Label) > 0 {
+		ss = append(ss, labelHeader+labelsToString(s.Label))
+	}
+	if len(s.NumLabel) > 0 {
+		ss = append(ss, labelHeader+numLabelsToString(s.NumLabel, s.NumUnit))
+	}
+	return strings.Join(ss, "\n")
+}
+
+// labelsToString returns a string representation of a
+// map representing labels.
+func labelsToString(labels map[string][]string) string {
+	ls := []string{}
+	for k, v := range labels {
+		ls = append(ls, fmt.Sprintf("%s:%v", k, v))
+	}
+	sort.Strings(ls)
+	return strings.Join(ls, " ")
+}
+
+// numLabelsToString returns a string representation of a map
+// representing numeric labels.
+func numLabelsToString(numLabels map[string][]int64, numUnits map[string][]string) string {
+	ls := []string{}
+	for k, v := range numLabels {
+		units := numUnits[k]
+		var labelString string
+		if len(units) == len(v) {
+			values := make([]string, len(v))
+			for i, vv := range v {
+				values[i] = fmt.Sprintf("%d %s", vv, units[i])
+			}
+			labelString = fmt.Sprintf("%s:%v", k, values)
+		} else {
+			labelString = fmt.Sprintf("%s:%v", k, v)
+		}
+		ls = append(ls, labelString)
+	}
+	sort.Strings(ls)
+	return strings.Join(ls, " ")
+}
+
+// SetLabel sets the specified key to the specified value for all samples in the
+// profile.
+func (p *Profile) SetLabel(key string, value []string) {
+	for _, sample := range p.Sample {
+		if sample.Label == nil {
+			sample.Label = map[string][]string{key: value}
+		} else {
+			sample.Label[key] = value
+		}
+	}
+}
+
+// RemoveLabel removes all labels associated with the specified key for all
+// samples in the profile.
+func (p *Profile) RemoveLabel(key string) {
+	for _, sample := range p.Sample {
+		delete(sample.Label, key)
+	}
+}
+
+// HasLabel returns true if a sample has a label with indicated key and value.
+func (s *Sample) HasLabel(key, value string) bool {
+	for _, v := range s.Label[key] {
+		if v == value {
+			return true
+		}
+	}
+	return false
+}
+
+// DiffBaseSample returns true if a sample belongs to the diff base and false
+// otherwise.
+func (s *Sample) DiffBaseSample() bool {
+	return s.HasLabel("pprof::base", "true")
+}
+
+// Scale multiplies all sample values in a profile by a constant and keeps
+// only samples that have at least one non-zero value.
+func (p *Profile) Scale(ratio float64) {
+	if ratio == 1 {
+		return
+	}
+	ratios := make([]float64, len(p.SampleType))
+	for i := range p.SampleType {
+		ratios[i] = ratio
+	}
+	p.ScaleN(ratios)
+}
+
+// ScaleN multiplies each sample values in a sample by a different amount
+// and keeps only samples that have at least one non-zero value.
+func (p *Profile) ScaleN(ratios []float64) error {
+	if len(p.SampleType) != len(ratios) {
+		return fmt.Errorf("mismatched scale ratios, got %d, want %d", len(ratios), len(p.SampleType))
+	}
+	allOnes := true
+	for _, r := range ratios {
+		if r != 1 {
+			allOnes = false
+			break
+		}
+	}
+	if allOnes {
+		return nil
+	}
+	fillIdx := 0
+	for _, s := range p.Sample {
+		keepSample := false
+		for i, v := range s.Value {
+			if ratios[i] != 1 {
+				val := int64(math.Round(float64(v) * ratios[i]))
+				s.Value[i] = val
+				keepSample = keepSample || val != 0
+			}
+		}
+		if keepSample {
+			p.Sample[fillIdx] = s
+			fillIdx++
+		}
+	}
+	p.Sample = p.Sample[:fillIdx]
+	return nil
+}
+
+// HasFunctions determines if all locations in this profile have
+// symbolized function information.
+func (p *Profile) HasFunctions() bool {
+	for _, l := range p.Location {
+		if l.Mapping != nil && !l.Mapping.HasFunctions {
+			return false
+		}
+	}
+	return true
+}
+
+// HasFileLines determines if all locations in this profile have
+// symbolized file and line number information.
+func (p *Profile) HasFileLines() bool {
+	for _, l := range p.Location {
+		if l.Mapping != nil && (!l.Mapping.HasFilenames || !l.Mapping.HasLineNumbers) {
+			return false
+		}
+	}
+	return true
+}
+
+// Unsymbolizable returns true if a mapping points to a binary for which
+// locations can't be symbolized in principle, at least now. Examples are
+// "[vdso]", [vsyscall]" and some others, see the code.
+func (m *Mapping) Unsymbolizable() bool {
+	name := filepath.Base(m.File)
+	return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/")
+}
+
+// Copy makes a fully independent copy of a profile.
+func (p *Profile) Copy() *Profile {
+	pp := &Profile{}
+	if err := unmarshal(serialize(p), pp); err != nil {
+		panic(err)
+	}
+	if err := pp.postDecode(); err != nil {
+		panic(err)
+	}
+
+	return pp
+}

+ 370 - 0
vendor/github.com/google/pprof/profile/proto.go

@@ -0,0 +1,370 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file is a simple protocol buffer encoder and decoder.
+// The format is described at
+// https://developers.google.com/protocol-buffers/docs/encoding
+//
+// A protocol message must implement the message interface:
+//   decoder() []decoder
+//   encode(*buffer)
+//
+// The decode method returns a slice indexed by field number that gives the
+// function to decode that field.
+// The encode method encodes its receiver into the given buffer.
+//
+// The two methods are simple enough to be implemented by hand rather than
+// by using a protocol compiler.
+//
+// See profile.go for examples of messages implementing this interface.
+//
+// There is no support for groups, message sets, or "has" bits.
+
+package profile
+
+import (
+	"errors"
+	"fmt"
+)
+
+type buffer struct {
+	field int // field tag
+	typ   int // proto wire type code for field
+	u64   uint64
+	data  []byte
+	tmp   [16]byte
+}
+
+type decoder func(*buffer, message) error
+
+type message interface {
+	decoder() []decoder
+	encode(*buffer)
+}
+
+func marshal(m message) []byte {
+	var b buffer
+	m.encode(&b)
+	return b.data
+}
+
+func encodeVarint(b *buffer, x uint64) {
+	for x >= 128 {
+		b.data = append(b.data, byte(x)|0x80)
+		x >>= 7
+	}
+	b.data = append(b.data, byte(x))
+}
+
+func encodeLength(b *buffer, tag int, len int) {
+	encodeVarint(b, uint64(tag)<<3|2)
+	encodeVarint(b, uint64(len))
+}
+
+func encodeUint64(b *buffer, tag int, x uint64) {
+	// append varint to b.data
+	encodeVarint(b, uint64(tag)<<3)
+	encodeVarint(b, x)
+}
+
+func encodeUint64s(b *buffer, tag int, x []uint64) {
+	if len(x) > 2 {
+		// Use packed encoding
+		n1 := len(b.data)
+		for _, u := range x {
+			encodeVarint(b, u)
+		}
+		n2 := len(b.data)
+		encodeLength(b, tag, n2-n1)
+		n3 := len(b.data)
+		copy(b.tmp[:], b.data[n2:n3])
+		copy(b.data[n1+(n3-n2):], b.data[n1:n2])
+		copy(b.data[n1:], b.tmp[:n3-n2])
+		return
+	}
+	for _, u := range x {
+		encodeUint64(b, tag, u)
+	}
+}
+
+func encodeUint64Opt(b *buffer, tag int, x uint64) {
+	if x == 0 {
+		return
+	}
+	encodeUint64(b, tag, x)
+}
+
+func encodeInt64(b *buffer, tag int, x int64) {
+	u := uint64(x)
+	encodeUint64(b, tag, u)
+}
+
+func encodeInt64s(b *buffer, tag int, x []int64) {
+	if len(x) > 2 {
+		// Use packed encoding
+		n1 := len(b.data)
+		for _, u := range x {
+			encodeVarint(b, uint64(u))
+		}
+		n2 := len(b.data)
+		encodeLength(b, tag, n2-n1)
+		n3 := len(b.data)
+		copy(b.tmp[:], b.data[n2:n3])
+		copy(b.data[n1+(n3-n2):], b.data[n1:n2])
+		copy(b.data[n1:], b.tmp[:n3-n2])
+		return
+	}
+	for _, u := range x {
+		encodeInt64(b, tag, u)
+	}
+}
+
+func encodeInt64Opt(b *buffer, tag int, x int64) {
+	if x == 0 {
+		return
+	}
+	encodeInt64(b, tag, x)
+}
+
+func encodeString(b *buffer, tag int, x string) {
+	encodeLength(b, tag, len(x))
+	b.data = append(b.data, x...)
+}
+
+func encodeStrings(b *buffer, tag int, x []string) {
+	for _, s := range x {
+		encodeString(b, tag, s)
+	}
+}
+
+func encodeBool(b *buffer, tag int, x bool) {
+	if x {
+		encodeUint64(b, tag, 1)
+	} else {
+		encodeUint64(b, tag, 0)
+	}
+}
+
+func encodeBoolOpt(b *buffer, tag int, x bool) {
+	if x {
+		encodeBool(b, tag, x)
+	}
+}
+
+func encodeMessage(b *buffer, tag int, m message) {
+	n1 := len(b.data)
+	m.encode(b)
+	n2 := len(b.data)
+	encodeLength(b, tag, n2-n1)
+	n3 := len(b.data)
+	copy(b.tmp[:], b.data[n2:n3])
+	copy(b.data[n1+(n3-n2):], b.data[n1:n2])
+	copy(b.data[n1:], b.tmp[:n3-n2])
+}
+
+func unmarshal(data []byte, m message) (err error) {
+	b := buffer{data: data, typ: 2}
+	return decodeMessage(&b, m)
+}
+
+func le64(p []byte) uint64 {
+	return uint64(p[0]) | uint64(p[1])<<8 | uint64(p[2])<<16 | uint64(p[3])<<24 | uint64(p[4])<<32 | uint64(p[5])<<40 | uint64(p[6])<<48 | uint64(p[7])<<56
+}
+
+func le32(p []byte) uint32 {
+	return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24
+}
+
+func decodeVarint(data []byte) (uint64, []byte, error) {
+	var u uint64
+	for i := 0; ; i++ {
+		if i >= 10 || i >= len(data) {
+			return 0, nil, errors.New("bad varint")
+		}
+		u |= uint64(data[i]&0x7F) << uint(7*i)
+		if data[i]&0x80 == 0 {
+			return u, data[i+1:], nil
+		}
+	}
+}
+
+func decodeField(b *buffer, data []byte) ([]byte, error) {
+	x, data, err := decodeVarint(data)
+	if err != nil {
+		return nil, err
+	}
+	b.field = int(x >> 3)
+	b.typ = int(x & 7)
+	b.data = nil
+	b.u64 = 0
+	switch b.typ {
+	case 0:
+		b.u64, data, err = decodeVarint(data)
+		if err != nil {
+			return nil, err
+		}
+	case 1:
+		if len(data) < 8 {
+			return nil, errors.New("not enough data")
+		}
+		b.u64 = le64(data[:8])
+		data = data[8:]
+	case 2:
+		var n uint64
+		n, data, err = decodeVarint(data)
+		if err != nil {
+			return nil, err
+		}
+		if n > uint64(len(data)) {
+			return nil, errors.New("too much data")
+		}
+		b.data = data[:n]
+		data = data[n:]
+	case 5:
+		if len(data) < 4 {
+			return nil, errors.New("not enough data")
+		}
+		b.u64 = uint64(le32(data[:4]))
+		data = data[4:]
+	default:
+		return nil, fmt.Errorf("unknown wire type: %d", b.typ)
+	}
+
+	return data, nil
+}
+
+func checkType(b *buffer, typ int) error {
+	if b.typ != typ {
+		return errors.New("type mismatch")
+	}
+	return nil
+}
+
+func decodeMessage(b *buffer, m message) error {
+	if err := checkType(b, 2); err != nil {
+		return err
+	}
+	dec := m.decoder()
+	data := b.data
+	for len(data) > 0 {
+		// pull varint field# + type
+		var err error
+		data, err = decodeField(b, data)
+		if err != nil {
+			return err
+		}
+		if b.field >= len(dec) || dec[b.field] == nil {
+			continue
+		}
+		if err := dec[b.field](b, m); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func decodeInt64(b *buffer, x *int64) error {
+	if err := checkType(b, 0); err != nil {
+		return err
+	}
+	*x = int64(b.u64)
+	return nil
+}
+
+func decodeInt64s(b *buffer, x *[]int64) error {
+	if b.typ == 2 {
+		// Packed encoding
+		data := b.data
+		tmp := make([]int64, 0, len(data)) // Maximally sized
+		for len(data) > 0 {
+			var u uint64
+			var err error
+
+			if u, data, err = decodeVarint(data); err != nil {
+				return err
+			}
+			tmp = append(tmp, int64(u))
+		}
+		*x = append(*x, tmp...)
+		return nil
+	}
+	var i int64
+	if err := decodeInt64(b, &i); err != nil {
+		return err
+	}
+	*x = append(*x, i)
+	return nil
+}
+
+func decodeUint64(b *buffer, x *uint64) error {
+	if err := checkType(b, 0); err != nil {
+		return err
+	}
+	*x = b.u64
+	return nil
+}
+
+func decodeUint64s(b *buffer, x *[]uint64) error {
+	if b.typ == 2 {
+		data := b.data
+		// Packed encoding
+		tmp := make([]uint64, 0, len(data)) // Maximally sized
+		for len(data) > 0 {
+			var u uint64
+			var err error
+
+			if u, data, err = decodeVarint(data); err != nil {
+				return err
+			}
+			tmp = append(tmp, u)
+		}
+		*x = append(*x, tmp...)
+		return nil
+	}
+	var u uint64
+	if err := decodeUint64(b, &u); err != nil {
+		return err
+	}
+	*x = append(*x, u)
+	return nil
+}
+
+func decodeString(b *buffer, x *string) error {
+	if err := checkType(b, 2); err != nil {
+		return err
+	}
+	*x = string(b.data)
+	return nil
+}
+
+func decodeStrings(b *buffer, x *[]string) error {
+	var s string
+	if err := decodeString(b, &s); err != nil {
+		return err
+	}
+	*x = append(*x, s)
+	return nil
+}
+
+func decodeBool(b *buffer, x *bool) error {
+	if err := checkType(b, 0); err != nil {
+		return err
+	}
+	if int64(b.u64) == 0 {
+		*x = false
+	} else {
+		*x = true
+	}
+	return nil
+}

+ 178 - 0
vendor/github.com/google/pprof/profile/prune.go

@@ -0,0 +1,178 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Implements methods to remove frames from profiles.
+
+package profile
+
+import (
+	"fmt"
+	"regexp"
+	"strings"
+)
+
+var (
+	reservedNames = []string{"(anonymous namespace)", "operator()"}
+	bracketRx     = func() *regexp.Regexp {
+		var quotedNames []string
+		for _, name := range append(reservedNames, "(") {
+			quotedNames = append(quotedNames, regexp.QuoteMeta(name))
+		}
+		return regexp.MustCompile(strings.Join(quotedNames, "|"))
+	}()
+)
+
+// simplifyFunc does some primitive simplification of function names.
+func simplifyFunc(f string) string {
+	// Account for leading '.' on the PPC ELF v1 ABI.
+	funcName := strings.TrimPrefix(f, ".")
+	// Account for unsimplified names -- try  to remove the argument list by trimming
+	// starting from the first '(', but skipping reserved names that have '('.
+	for _, ind := range bracketRx.FindAllStringSubmatchIndex(funcName, -1) {
+		foundReserved := false
+		for _, res := range reservedNames {
+			if funcName[ind[0]:ind[1]] == res {
+				foundReserved = true
+				break
+			}
+		}
+		if !foundReserved {
+			funcName = funcName[:ind[0]]
+			break
+		}
+	}
+	return funcName
+}
+
+// Prune removes all nodes beneath a node matching dropRx, and not
+// matching keepRx. If the root node of a Sample matches, the sample
+// will have an empty stack.
+func (p *Profile) Prune(dropRx, keepRx *regexp.Regexp) {
+	prune := make(map[uint64]bool)
+	pruneBeneath := make(map[uint64]bool)
+
+	for _, loc := range p.Location {
+		var i int
+		for i = len(loc.Line) - 1; i >= 0; i-- {
+			if fn := loc.Line[i].Function; fn != nil && fn.Name != "" {
+				funcName := simplifyFunc(fn.Name)
+				if dropRx.MatchString(funcName) {
+					if keepRx == nil || !keepRx.MatchString(funcName) {
+						break
+					}
+				}
+			}
+		}
+
+		if i >= 0 {
+			// Found matching entry to prune.
+			pruneBeneath[loc.ID] = true
+
+			// Remove the matching location.
+			if i == len(loc.Line)-1 {
+				// Matched the top entry: prune the whole location.
+				prune[loc.ID] = true
+			} else {
+				loc.Line = loc.Line[i+1:]
+			}
+		}
+	}
+
+	// Prune locs from each Sample
+	for _, sample := range p.Sample {
+		// Scan from the root to the leaves to find the prune location.
+		// Do not prune frames before the first user frame, to avoid
+		// pruning everything.
+		foundUser := false
+		for i := len(sample.Location) - 1; i >= 0; i-- {
+			id := sample.Location[i].ID
+			if !prune[id] && !pruneBeneath[id] {
+				foundUser = true
+				continue
+			}
+			if !foundUser {
+				continue
+			}
+			if prune[id] {
+				sample.Location = sample.Location[i+1:]
+				break
+			}
+			if pruneBeneath[id] {
+				sample.Location = sample.Location[i:]
+				break
+			}
+		}
+	}
+}
+
+// RemoveUninteresting prunes and elides profiles using built-in
+// tables of uninteresting function names.
+func (p *Profile) RemoveUninteresting() error {
+	var keep, drop *regexp.Regexp
+	var err error
+
+	if p.DropFrames != "" {
+		if drop, err = regexp.Compile("^(" + p.DropFrames + ")$"); err != nil {
+			return fmt.Errorf("failed to compile regexp %s: %v", p.DropFrames, err)
+		}
+		if p.KeepFrames != "" {
+			if keep, err = regexp.Compile("^(" + p.KeepFrames + ")$"); err != nil {
+				return fmt.Errorf("failed to compile regexp %s: %v", p.KeepFrames, err)
+			}
+		}
+		p.Prune(drop, keep)
+	}
+	return nil
+}
+
+// PruneFrom removes all nodes beneath the lowest node matching dropRx, not including itself.
+//
+// Please see the example below to understand this method as well as
+// the difference from Prune method.
+//
+// A sample contains Location of [A,B,C,B,D] where D is the top frame and there's no inline.
+//
+// PruneFrom(A) returns [A,B,C,B,D] because there's no node beneath A.
+// Prune(A, nil) returns [B,C,B,D] by removing A itself.
+//
+// PruneFrom(B) returns [B,C,B,D] by removing all nodes beneath the first B when scanning from the bottom.
+// Prune(B, nil) returns [D] because a matching node is found by scanning from the root.
+func (p *Profile) PruneFrom(dropRx *regexp.Regexp) {
+	pruneBeneath := make(map[uint64]bool)
+
+	for _, loc := range p.Location {
+		for i := 0; i < len(loc.Line); i++ {
+			if fn := loc.Line[i].Function; fn != nil && fn.Name != "" {
+				funcName := simplifyFunc(fn.Name)
+				if dropRx.MatchString(funcName) {
+					// Found matching entry to prune.
+					pruneBeneath[loc.ID] = true
+					loc.Line = loc.Line[i:]
+					break
+				}
+			}
+		}
+	}
+
+	// Prune locs from each Sample
+	for _, sample := range p.Sample {
+		// Scan from the bottom leaf to the root to find the prune location.
+		for i, loc := range sample.Location {
+			if pruneBeneath[loc.ID] {
+				sample.Location = sample.Location[i:]
+				break
+			}
+		}
+	}
+}

+ 20 - 0
vendor/github.com/onsi/ginkgo/v2/LICENSE

@@ -0,0 +1,20 @@
+Copyright (c) 2013-2014 Onsi Fakhouri
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

+ 69 - 0
vendor/github.com/onsi/ginkgo/v2/config/deprecated.go

@@ -0,0 +1,69 @@
+package config
+
+// GinkgoConfigType has been deprecated and its equivalent now lives in
+// the types package.  You can no longer access Ginkgo configuration from the config
+// package.  Instead use the DSL's GinkgoConfiguration() function to get copies of the
+// current configuration
+//
+// GinkgoConfigType is still here so custom V1 reporters do not result in a compilation error
+// It will be removed in a future minor release of Ginkgo
+type GinkgoConfigType = DeprecatedGinkgoConfigType
+type DeprecatedGinkgoConfigType struct {
+	RandomSeed         int64
+	RandomizeAllSpecs  bool
+	RegexScansFilePath bool
+	FocusStrings       []string
+	SkipStrings        []string
+	SkipMeasurements   bool
+	FailOnPending      bool
+	FailFast           bool
+	FlakeAttempts      int
+	EmitSpecProgress   bool
+	DryRun             bool
+	DebugParallel      bool
+
+	ParallelNode  int
+	ParallelTotal int
+	SyncHost      string
+	StreamHost    string
+}
+
+// DefaultReporterConfigType has been deprecated and its equivalent now lives in
+// the types package.  You can no longer access Ginkgo configuration from the config
+// package.  Instead use the DSL's GinkgoConfiguration() function to get copies of the
+// current configuration
+//
+// DefaultReporterConfigType is still here so custom V1 reporters do not result in a compilation error
+// It will be removed in a future minor release of Ginkgo
+type DefaultReporterConfigType = DeprecatedDefaultReporterConfigType
+type DeprecatedDefaultReporterConfigType struct {
+	NoColor           bool
+	SlowSpecThreshold float64
+	NoisyPendings     bool
+	NoisySkippings    bool
+	Succinct          bool
+	Verbose           bool
+	FullTrace         bool
+	ReportPassed      bool
+	ReportFile        string
+}
+
+// Sadly there is no way to gracefully deprecate access to these global config variables.
+// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method
+// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails
+type GinkgoConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead struct{}
+
+// Sadly there is no way to gracefully deprecate access to these global config variables.
+// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method
+// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails
+var GinkgoConfig = GinkgoConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead{}
+
+// Sadly there is no way to gracefully deprecate access to these global config variables.
+// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method
+// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails
+type DefaultReporterConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead struct{}
+
+// Sadly there is no way to gracefully deprecate access to these global config variables.
+// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method
+// These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails
+var DefaultReporterConfig = DefaultReporterConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead{}

+ 41 - 0
vendor/github.com/onsi/ginkgo/v2/formatter/colorable_others.go

@@ -0,0 +1,41 @@
+// +build !windows
+
+/*
+These packages are used for colorize on Windows and contributed by [email protected]
+
+  * go-colorable: <https://github.com/mattn/go-colorable>
+  * go-isatty: <https://github.com/mattn/go-isatty>
+
+The MIT License (MIT)
+
+Copyright (c) 2016 Yasuhiro Matsumoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+*/
+
+package formatter
+
+import (
+	"io"
+	"os"
+)
+
+func newColorable(file *os.File) io.Writer {
+	return file
+}

+ 809 - 0
vendor/github.com/onsi/ginkgo/v2/formatter/colorable_windows.go

@@ -0,0 +1,809 @@
+/*
+These packages are used for colorize on Windows and contributed by [email protected]
+
+  * go-colorable: <https://github.com/mattn/go-colorable>
+  * go-isatty: <https://github.com/mattn/go-isatty>
+
+The MIT License (MIT)
+
+Copyright (c) 2016 Yasuhiro Matsumoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+*/
+
+package formatter
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"math"
+	"os"
+	"strconv"
+	"strings"
+	"syscall"
+	"unsafe"
+)
+
+var (
+	kernel32                       = syscall.NewLazyDLL("kernel32.dll")
+	procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
+	procSetConsoleTextAttribute    = kernel32.NewProc("SetConsoleTextAttribute")
+	procSetConsoleCursorPosition   = kernel32.NewProc("SetConsoleCursorPosition")
+	procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW")
+	procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute")
+	procGetConsoleMode             = kernel32.NewProc("GetConsoleMode")
+)
+
+func isTerminal(fd uintptr) bool {
+	var st uint32
+	r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0)
+	return r != 0 && e == 0
+}
+
+const (
+	foregroundBlue      = 0x1
+	foregroundGreen     = 0x2
+	foregroundRed       = 0x4
+	foregroundIntensity = 0x8
+	foregroundMask      = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity)
+	backgroundBlue      = 0x10
+	backgroundGreen     = 0x20
+	backgroundRed       = 0x40
+	backgroundIntensity = 0x80
+	backgroundMask      = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity)
+)
+
+type wchar uint16
+type short int16
+type dword uint32
+type word uint16
+
+type coord struct {
+	x short
+	y short
+}
+
+type smallRect struct {
+	left   short
+	top    short
+	right  short
+	bottom short
+}
+
+type consoleScreenBufferInfo struct {
+	size              coord
+	cursorPosition    coord
+	attributes        word
+	window            smallRect
+	maximumWindowSize coord
+}
+
+type writer struct {
+	out     io.Writer
+	handle  syscall.Handle
+	lastbuf bytes.Buffer
+	oldattr word
+}
+
+func newColorable(file *os.File) io.Writer {
+	if file == nil {
+		panic("nil passed instead of *os.File to NewColorable()")
+	}
+
+	if isTerminal(file.Fd()) {
+		var csbi consoleScreenBufferInfo
+		handle := syscall.Handle(file.Fd())
+		procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
+		return &writer{out: file, handle: handle, oldattr: csbi.attributes}
+	} else {
+		return file
+	}
+}
+
+var color256 = map[int]int{
+	0:   0x000000,
+	1:   0x800000,
+	2:   0x008000,
+	3:   0x808000,
+	4:   0x000080,
+	5:   0x800080,
+	6:   0x008080,
+	7:   0xc0c0c0,
+	8:   0x808080,
+	9:   0xff0000,
+	10:  0x00ff00,
+	11:  0xffff00,
+	12:  0x0000ff,
+	13:  0xff00ff,
+	14:  0x00ffff,
+	15:  0xffffff,
+	16:  0x000000,
+	17:  0x00005f,
+	18:  0x000087,
+	19:  0x0000af,
+	20:  0x0000d7,
+	21:  0x0000ff,
+	22:  0x005f00,
+	23:  0x005f5f,
+	24:  0x005f87,
+	25:  0x005faf,
+	26:  0x005fd7,
+	27:  0x005fff,
+	28:  0x008700,
+	29:  0x00875f,
+	30:  0x008787,
+	31:  0x0087af,
+	32:  0x0087d7,
+	33:  0x0087ff,
+	34:  0x00af00,
+	35:  0x00af5f,
+	36:  0x00af87,
+	37:  0x00afaf,
+	38:  0x00afd7,
+	39:  0x00afff,
+	40:  0x00d700,
+	41:  0x00d75f,
+	42:  0x00d787,
+	43:  0x00d7af,
+	44:  0x00d7d7,
+	45:  0x00d7ff,
+	46:  0x00ff00,
+	47:  0x00ff5f,
+	48:  0x00ff87,
+	49:  0x00ffaf,
+	50:  0x00ffd7,
+	51:  0x00ffff,
+	52:  0x5f0000,
+	53:  0x5f005f,
+	54:  0x5f0087,
+	55:  0x5f00af,
+	56:  0x5f00d7,
+	57:  0x5f00ff,
+	58:  0x5f5f00,
+	59:  0x5f5f5f,
+	60:  0x5f5f87,
+	61:  0x5f5faf,
+	62:  0x5f5fd7,
+	63:  0x5f5fff,
+	64:  0x5f8700,
+	65:  0x5f875f,
+	66:  0x5f8787,
+	67:  0x5f87af,
+	68:  0x5f87d7,
+	69:  0x5f87ff,
+	70:  0x5faf00,
+	71:  0x5faf5f,
+	72:  0x5faf87,
+	73:  0x5fafaf,
+	74:  0x5fafd7,
+	75:  0x5fafff,
+	76:  0x5fd700,
+	77:  0x5fd75f,
+	78:  0x5fd787,
+	79:  0x5fd7af,
+	80:  0x5fd7d7,
+	81:  0x5fd7ff,
+	82:  0x5fff00,
+	83:  0x5fff5f,
+	84:  0x5fff87,
+	85:  0x5fffaf,
+	86:  0x5fffd7,
+	87:  0x5fffff,
+	88:  0x870000,
+	89:  0x87005f,
+	90:  0x870087,
+	91:  0x8700af,
+	92:  0x8700d7,
+	93:  0x8700ff,
+	94:  0x875f00,
+	95:  0x875f5f,
+	96:  0x875f87,
+	97:  0x875faf,
+	98:  0x875fd7,
+	99:  0x875fff,
+	100: 0x878700,
+	101: 0x87875f,
+	102: 0x878787,
+	103: 0x8787af,
+	104: 0x8787d7,
+	105: 0x8787ff,
+	106: 0x87af00,
+	107: 0x87af5f,
+	108: 0x87af87,
+	109: 0x87afaf,
+	110: 0x87afd7,
+	111: 0x87afff,
+	112: 0x87d700,
+	113: 0x87d75f,
+	114: 0x87d787,
+	115: 0x87d7af,
+	116: 0x87d7d7,
+	117: 0x87d7ff,
+	118: 0x87ff00,
+	119: 0x87ff5f,
+	120: 0x87ff87,
+	121: 0x87ffaf,
+	122: 0x87ffd7,
+	123: 0x87ffff,
+	124: 0xaf0000,
+	125: 0xaf005f,
+	126: 0xaf0087,
+	127: 0xaf00af,
+	128: 0xaf00d7,
+	129: 0xaf00ff,
+	130: 0xaf5f00,
+	131: 0xaf5f5f,
+	132: 0xaf5f87,
+	133: 0xaf5faf,
+	134: 0xaf5fd7,
+	135: 0xaf5fff,
+	136: 0xaf8700,
+	137: 0xaf875f,
+	138: 0xaf8787,
+	139: 0xaf87af,
+	140: 0xaf87d7,
+	141: 0xaf87ff,
+	142: 0xafaf00,
+	143: 0xafaf5f,
+	144: 0xafaf87,
+	145: 0xafafaf,
+	146: 0xafafd7,
+	147: 0xafafff,
+	148: 0xafd700,
+	149: 0xafd75f,
+	150: 0xafd787,
+	151: 0xafd7af,
+	152: 0xafd7d7,
+	153: 0xafd7ff,
+	154: 0xafff00,
+	155: 0xafff5f,
+	156: 0xafff87,
+	157: 0xafffaf,
+	158: 0xafffd7,
+	159: 0xafffff,
+	160: 0xd70000,
+	161: 0xd7005f,
+	162: 0xd70087,
+	163: 0xd700af,
+	164: 0xd700d7,
+	165: 0xd700ff,
+	166: 0xd75f00,
+	167: 0xd75f5f,
+	168: 0xd75f87,
+	169: 0xd75faf,
+	170: 0xd75fd7,
+	171: 0xd75fff,
+	172: 0xd78700,
+	173: 0xd7875f,
+	174: 0xd78787,
+	175: 0xd787af,
+	176: 0xd787d7,
+	177: 0xd787ff,
+	178: 0xd7af00,
+	179: 0xd7af5f,
+	180: 0xd7af87,
+	181: 0xd7afaf,
+	182: 0xd7afd7,
+	183: 0xd7afff,
+	184: 0xd7d700,
+	185: 0xd7d75f,
+	186: 0xd7d787,
+	187: 0xd7d7af,
+	188: 0xd7d7d7,
+	189: 0xd7d7ff,
+	190: 0xd7ff00,
+	191: 0xd7ff5f,
+	192: 0xd7ff87,
+	193: 0xd7ffaf,
+	194: 0xd7ffd7,
+	195: 0xd7ffff,
+	196: 0xff0000,
+	197: 0xff005f,
+	198: 0xff0087,
+	199: 0xff00af,
+	200: 0xff00d7,
+	201: 0xff00ff,
+	202: 0xff5f00,
+	203: 0xff5f5f,
+	204: 0xff5f87,
+	205: 0xff5faf,
+	206: 0xff5fd7,
+	207: 0xff5fff,
+	208: 0xff8700,
+	209: 0xff875f,
+	210: 0xff8787,
+	211: 0xff87af,
+	212: 0xff87d7,
+	213: 0xff87ff,
+	214: 0xffaf00,
+	215: 0xffaf5f,
+	216: 0xffaf87,
+	217: 0xffafaf,
+	218: 0xffafd7,
+	219: 0xffafff,
+	220: 0xffd700,
+	221: 0xffd75f,
+	222: 0xffd787,
+	223: 0xffd7af,
+	224: 0xffd7d7,
+	225: 0xffd7ff,
+	226: 0xffff00,
+	227: 0xffff5f,
+	228: 0xffff87,
+	229: 0xffffaf,
+	230: 0xffffd7,
+	231: 0xffffff,
+	232: 0x080808,
+	233: 0x121212,
+	234: 0x1c1c1c,
+	235: 0x262626,
+	236: 0x303030,
+	237: 0x3a3a3a,
+	238: 0x444444,
+	239: 0x4e4e4e,
+	240: 0x585858,
+	241: 0x626262,
+	242: 0x6c6c6c,
+	243: 0x767676,
+	244: 0x808080,
+	245: 0x8a8a8a,
+	246: 0x949494,
+	247: 0x9e9e9e,
+	248: 0xa8a8a8,
+	249: 0xb2b2b2,
+	250: 0xbcbcbc,
+	251: 0xc6c6c6,
+	252: 0xd0d0d0,
+	253: 0xdadada,
+	254: 0xe4e4e4,
+	255: 0xeeeeee,
+}
+
+func (w *writer) Write(data []byte) (n int, err error) {
+	var csbi consoleScreenBufferInfo
+	procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+
+	er := bytes.NewBuffer(data)
+loop:
+	for {
+		r1, _, err := procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+		if r1 == 0 {
+			break loop
+		}
+
+		c1, _, err := er.ReadRune()
+		if err != nil {
+			break loop
+		}
+		if c1 != 0x1b {
+			fmt.Fprint(w.out, string(c1))
+			continue
+		}
+		c2, _, err := er.ReadRune()
+		if err != nil {
+			w.lastbuf.WriteRune(c1)
+			break loop
+		}
+		if c2 != 0x5b {
+			w.lastbuf.WriteRune(c1)
+			w.lastbuf.WriteRune(c2)
+			continue
+		}
+
+		var buf bytes.Buffer
+		var m rune
+		for {
+			c, _, err := er.ReadRune()
+			if err != nil {
+				w.lastbuf.WriteRune(c1)
+				w.lastbuf.WriteRune(c2)
+				w.lastbuf.Write(buf.Bytes())
+				break loop
+			}
+			if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' {
+				m = c
+				break
+			}
+			buf.Write([]byte(string(c)))
+		}
+
+		var csbi consoleScreenBufferInfo
+		switch m {
+		case 'A':
+			n, err = strconv.Atoi(buf.String())
+			if err != nil {
+				continue
+			}
+			procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+			csbi.cursorPosition.y -= short(n)
+			procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+		case 'B':
+			n, err = strconv.Atoi(buf.String())
+			if err != nil {
+				continue
+			}
+			procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+			csbi.cursorPosition.y += short(n)
+			procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+		case 'C':
+			n, err = strconv.Atoi(buf.String())
+			if err != nil {
+				continue
+			}
+			procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+			csbi.cursorPosition.x -= short(n)
+			procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+		case 'D':
+			n, err = strconv.Atoi(buf.String())
+			if err != nil {
+				continue
+			}
+			if n, err = strconv.Atoi(buf.String()); err == nil {
+				var csbi consoleScreenBufferInfo
+				procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+				csbi.cursorPosition.x += short(n)
+				procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+			}
+		case 'E':
+			n, err = strconv.Atoi(buf.String())
+			if err != nil {
+				continue
+			}
+			procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+			csbi.cursorPosition.x = 0
+			csbi.cursorPosition.y += short(n)
+			procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+		case 'F':
+			n, err = strconv.Atoi(buf.String())
+			if err != nil {
+				continue
+			}
+			procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+			csbi.cursorPosition.x = 0
+			csbi.cursorPosition.y -= short(n)
+			procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+		case 'G':
+			n, err = strconv.Atoi(buf.String())
+			if err != nil {
+				continue
+			}
+			procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+			csbi.cursorPosition.x = short(n)
+			procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+		case 'H':
+			token := strings.Split(buf.String(), ";")
+			if len(token) != 2 {
+				continue
+			}
+			n1, err := strconv.Atoi(token[0])
+			if err != nil {
+				continue
+			}
+			n2, err := strconv.Atoi(token[1])
+			if err != nil {
+				continue
+			}
+			csbi.cursorPosition.x = short(n2)
+			csbi.cursorPosition.x = short(n1)
+			procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+		case 'J':
+			n, err := strconv.Atoi(buf.String())
+			if err != nil {
+				continue
+			}
+			var cursor coord
+			switch n {
+			case 0:
+				cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y}
+			case 1:
+				cursor = coord{x: csbi.window.left, y: csbi.window.top}
+			case 2:
+				cursor = coord{x: csbi.window.left, y: csbi.window.top}
+			}
+			var count, written dword
+			count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.size.y-csbi.cursorPosition.y)*csbi.size.x)
+			procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
+			procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
+		case 'K':
+			n, err := strconv.Atoi(buf.String())
+			if err != nil {
+				continue
+			}
+			var cursor coord
+			switch n {
+			case 0:
+				cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y}
+			case 1:
+				cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y}
+			case 2:
+				cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y}
+			}
+			var count, written dword
+			count = dword(csbi.size.x - csbi.cursorPosition.x)
+			procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
+			procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
+		case 'm':
+			attr := csbi.attributes
+			cs := buf.String()
+			if cs == "" {
+				procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(w.oldattr))
+				continue
+			}
+			token := strings.Split(cs, ";")
+			for i := 0; i < len(token); i += 1 {
+				ns := token[i]
+				if n, err = strconv.Atoi(ns); err == nil {
+					switch {
+					case n == 0 || n == 100:
+						attr = w.oldattr
+					case 1 <= n && n <= 5:
+						attr |= foregroundIntensity
+					case n == 7:
+						attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4)
+					case 22 == n || n == 25 || n == 25:
+						attr |= foregroundIntensity
+					case n == 27:
+						attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4)
+					case 30 <= n && n <= 37:
+						attr = (attr & backgroundMask)
+						if (n-30)&1 != 0 {
+							attr |= foregroundRed
+						}
+						if (n-30)&2 != 0 {
+							attr |= foregroundGreen
+						}
+						if (n-30)&4 != 0 {
+							attr |= foregroundBlue
+						}
+					case n == 38: // set foreground color.
+						if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") {
+							if n256, err := strconv.Atoi(token[i+2]); err == nil {
+								if n256foreAttr == nil {
+									n256setup()
+								}
+								attr &= backgroundMask
+								attr |= n256foreAttr[n256]
+								i += 2
+							}
+						} else {
+							attr = attr & (w.oldattr & backgroundMask)
+						}
+					case n == 39: // reset foreground color.
+						attr &= backgroundMask
+						attr |= w.oldattr & foregroundMask
+					case 40 <= n && n <= 47:
+						attr = (attr & foregroundMask)
+						if (n-40)&1 != 0 {
+							attr |= backgroundRed
+						}
+						if (n-40)&2 != 0 {
+							attr |= backgroundGreen
+						}
+						if (n-40)&4 != 0 {
+							attr |= backgroundBlue
+						}
+					case n == 48: // set background color.
+						if i < len(token)-2 && token[i+1] == "5" {
+							if n256, err := strconv.Atoi(token[i+2]); err == nil {
+								if n256backAttr == nil {
+									n256setup()
+								}
+								attr &= foregroundMask
+								attr |= n256backAttr[n256]
+								i += 2
+							}
+						} else {
+							attr = attr & (w.oldattr & foregroundMask)
+						}
+					case n == 49: // reset foreground color.
+						attr &= foregroundMask
+						attr |= w.oldattr & backgroundMask
+					case 90 <= n && n <= 97:
+						attr = (attr & backgroundMask)
+						attr |= foregroundIntensity
+						if (n-90)&1 != 0 {
+							attr |= foregroundRed
+						}
+						if (n-90)&2 != 0 {
+							attr |= foregroundGreen
+						}
+						if (n-90)&4 != 0 {
+							attr |= foregroundBlue
+						}
+					case 100 <= n && n <= 107:
+						attr = (attr & foregroundMask)
+						attr |= backgroundIntensity
+						if (n-100)&1 != 0 {
+							attr |= backgroundRed
+						}
+						if (n-100)&2 != 0 {
+							attr |= backgroundGreen
+						}
+						if (n-100)&4 != 0 {
+							attr |= backgroundBlue
+						}
+					}
+					procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(attr))
+				}
+			}
+		}
+	}
+	return len(data) - w.lastbuf.Len(), nil
+}
+
+type consoleColor struct {
+	rgb       int
+	red       bool
+	green     bool
+	blue      bool
+	intensity bool
+}
+
+func (c consoleColor) foregroundAttr() (attr word) {
+	if c.red {
+		attr |= foregroundRed
+	}
+	if c.green {
+		attr |= foregroundGreen
+	}
+	if c.blue {
+		attr |= foregroundBlue
+	}
+	if c.intensity {
+		attr |= foregroundIntensity
+	}
+	return
+}
+
+func (c consoleColor) backgroundAttr() (attr word) {
+	if c.red {
+		attr |= backgroundRed
+	}
+	if c.green {
+		attr |= backgroundGreen
+	}
+	if c.blue {
+		attr |= backgroundBlue
+	}
+	if c.intensity {
+		attr |= backgroundIntensity
+	}
+	return
+}
+
+var color16 = []consoleColor{
+	consoleColor{0x000000, false, false, false, false},
+	consoleColor{0x000080, false, false, true, false},
+	consoleColor{0x008000, false, true, false, false},
+	consoleColor{0x008080, false, true, true, false},
+	consoleColor{0x800000, true, false, false, false},
+	consoleColor{0x800080, true, false, true, false},
+	consoleColor{0x808000, true, true, false, false},
+	consoleColor{0xc0c0c0, true, true, true, false},
+	consoleColor{0x808080, false, false, false, true},
+	consoleColor{0x0000ff, false, false, true, true},
+	consoleColor{0x00ff00, false, true, false, true},
+	consoleColor{0x00ffff, false, true, true, true},
+	consoleColor{0xff0000, true, false, false, true},
+	consoleColor{0xff00ff, true, false, true, true},
+	consoleColor{0xffff00, true, true, false, true},
+	consoleColor{0xffffff, true, true, true, true},
+}
+
+type hsv struct {
+	h, s, v float32
+}
+
+func (a hsv) dist(b hsv) float32 {
+	dh := a.h - b.h
+	switch {
+	case dh > 0.5:
+		dh = 1 - dh
+	case dh < -0.5:
+		dh = -1 - dh
+	}
+	ds := a.s - b.s
+	dv := a.v - b.v
+	return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv)))
+}
+
+func toHSV(rgb int) hsv {
+	r, g, b := float32((rgb&0xFF0000)>>16)/256.0,
+		float32((rgb&0x00FF00)>>8)/256.0,
+		float32(rgb&0x0000FF)/256.0
+	min, max := minmax3f(r, g, b)
+	h := max - min
+	if h > 0 {
+		if max == r {
+			h = (g - b) / h
+			if h < 0 {
+				h += 6
+			}
+		} else if max == g {
+			h = 2 + (b-r)/h
+		} else {
+			h = 4 + (r-g)/h
+		}
+	}
+	h /= 6.0
+	s := max - min
+	if max != 0 {
+		s /= max
+	}
+	v := max
+	return hsv{h: h, s: s, v: v}
+}
+
+type hsvTable []hsv
+
+func toHSVTable(rgbTable []consoleColor) hsvTable {
+	t := make(hsvTable, len(rgbTable))
+	for i, c := range rgbTable {
+		t[i] = toHSV(c.rgb)
+	}
+	return t
+}
+
+func (t hsvTable) find(rgb int) consoleColor {
+	hsv := toHSV(rgb)
+	n := 7
+	l := float32(5.0)
+	for i, p := range t {
+		d := hsv.dist(p)
+		if d < l {
+			l, n = d, i
+		}
+	}
+	return color16[n]
+}
+
+func minmax3f(a, b, c float32) (min, max float32) {
+	if a < b {
+		if b < c {
+			return a, c
+		} else if a < c {
+			return a, b
+		} else {
+			return c, b
+		}
+	} else {
+		if a < c {
+			return b, c
+		} else if b < c {
+			return b, a
+		} else {
+			return c, a
+		}
+	}
+}
+
+var n256foreAttr []word
+var n256backAttr []word
+
+func n256setup() {
+	n256foreAttr = make([]word, 256)
+	n256backAttr = make([]word, 256)
+	t := toHSVTable(color16)
+	for i, rgb := range color256 {
+		c := t.find(rgb)
+		n256foreAttr[i] = c.foregroundAttr()
+		n256backAttr[i] = c.backgroundAttr()
+	}
+}

+ 195 - 0
vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go

@@ -0,0 +1,195 @@
+package formatter
+
+import (
+	"fmt"
+	"os"
+	"regexp"
+	"strings"
+)
+
+// ColorableStdOut and ColorableStdErr enable color output support on Windows
+var ColorableStdOut = newColorable(os.Stdout)
+var ColorableStdErr = newColorable(os.Stderr)
+
+const COLS = 80
+
+type ColorMode uint8
+
+const (
+	ColorModeNone ColorMode = iota
+	ColorModeTerminal
+	ColorModePassthrough
+)
+
+var SingletonFormatter = New(ColorModeTerminal)
+
+func F(format string, args ...interface{}) string {
+	return SingletonFormatter.F(format, args...)
+}
+
+func Fi(indentation uint, format string, args ...interface{}) string {
+	return SingletonFormatter.Fi(indentation, format, args...)
+}
+
+func Fiw(indentation uint, maxWidth uint, format string, args ...interface{}) string {
+	return SingletonFormatter.Fiw(indentation, maxWidth, format, args...)
+}
+
+type Formatter struct {
+	ColorMode                ColorMode
+	colors                   map[string]string
+	styleRe                  *regexp.Regexp
+	preserveColorStylingTags bool
+}
+
+func NewWithNoColorBool(noColor bool) Formatter {
+	if noColor {
+		return New(ColorModeNone)
+	}
+	return New(ColorModeTerminal)
+}
+
+func New(colorMode ColorMode) Formatter {
+	f := Formatter{
+		ColorMode: colorMode,
+		colors: map[string]string{
+			"/":         "\x1b[0m",
+			"bold":      "\x1b[1m",
+			"underline": "\x1b[4m",
+
+			"red":          "\x1b[38;5;9m",
+			"orange":       "\x1b[38;5;214m",
+			"coral":        "\x1b[38;5;204m",
+			"magenta":      "\x1b[38;5;13m",
+			"green":        "\x1b[38;5;10m",
+			"dark-green":   "\x1b[38;5;28m",
+			"yellow":       "\x1b[38;5;11m",
+			"light-yellow": "\x1b[38;5;228m",
+			"cyan":         "\x1b[38;5;14m",
+			"gray":         "\x1b[38;5;243m",
+			"light-gray":   "\x1b[38;5;246m",
+			"blue":         "\x1b[38;5;12m",
+		},
+	}
+	colors := []string{}
+	for color := range f.colors {
+		colors = append(colors, color)
+	}
+	f.styleRe = regexp.MustCompile("{{(" + strings.Join(colors, "|") + ")}}")
+	return f
+}
+
+func (f Formatter) F(format string, args ...interface{}) string {
+	return f.Fi(0, format, args...)
+}
+
+func (f Formatter) Fi(indentation uint, format string, args ...interface{}) string {
+	return f.Fiw(indentation, 0, format, args...)
+}
+
+func (f Formatter) Fiw(indentation uint, maxWidth uint, format string, args ...interface{}) string {
+	out := fmt.Sprintf(f.style(format), args...)
+
+	if indentation == 0 && maxWidth == 0 {
+		return out
+	}
+
+	lines := strings.Split(out, "\n")
+
+	if maxWidth != 0 {
+		outLines := []string{}
+
+		maxWidth = maxWidth - indentation*2
+		for _, line := range lines {
+			if f.length(line) <= maxWidth {
+				outLines = append(outLines, line)
+				continue
+			}
+			words := strings.Split(line, " ")
+			outWords := []string{words[0]}
+			length := uint(f.length(words[0]))
+			for _, word := range words[1:] {
+				wordLength := f.length(word)
+				if length+wordLength+1 <= maxWidth {
+					length += wordLength + 1
+					outWords = append(outWords, word)
+					continue
+				}
+				outLines = append(outLines, strings.Join(outWords, " "))
+				outWords = []string{word}
+				length = wordLength
+			}
+			if len(outWords) > 0 {
+				outLines = append(outLines, strings.Join(outWords, " "))
+			}
+		}
+
+		lines = outLines
+	}
+
+	if indentation == 0 {
+		return strings.Join(lines, "\n")
+	}
+
+	padding := strings.Repeat("  ", int(indentation))
+	for i := range lines {
+		if lines[i] != "" {
+			lines[i] = padding + lines[i]
+		}
+	}
+
+	return strings.Join(lines, "\n")
+}
+
+func (f Formatter) length(styled string) uint {
+	n := uint(0)
+	inStyle := false
+	for _, b := range styled {
+		if inStyle {
+			if b == 'm' {
+				inStyle = false
+			}
+			continue
+		}
+		if b == '\x1b' {
+			inStyle = true
+			continue
+		}
+		n += 1
+	}
+	return n
+}
+
+func (f Formatter) CycleJoin(elements []string, joiner string, cycle []string) string {
+	if len(elements) == 0 {
+		return ""
+	}
+	n := len(cycle)
+	out := ""
+	for i, text := range elements {
+		out += cycle[i%n] + text
+		if i < len(elements)-1 {
+			out += joiner
+		}
+	}
+	out += "{{/}}"
+	return f.style(out)
+}
+
+func (f Formatter) style(s string) string {
+	switch f.ColorMode {
+	case ColorModeNone:
+		return f.styleRe.ReplaceAllString(s, "")
+	case ColorModePassthrough:
+		return s
+	case ColorModeTerminal:
+		return f.styleRe.ReplaceAllStringFunc(s, func(match string) string {
+			if out, ok := f.colors[strings.Trim(match, "{}")]; ok {
+				return out
+			}
+			return match
+		})
+	}
+
+	return ""
+}

+ 61 - 0
vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go

@@ -0,0 +1,61 @@
+package command
+
+import "fmt"
+
+type AbortDetails struct {
+	ExitCode  int
+	Error     error
+	EmitUsage bool
+}
+
+func Abort(details AbortDetails) {
+	panic(details)
+}
+
+func AbortGracefullyWith(format string, args ...interface{}) {
+	Abort(AbortDetails{
+		ExitCode:  0,
+		Error:     fmt.Errorf(format, args...),
+		EmitUsage: false,
+	})
+}
+
+func AbortWith(format string, args ...interface{}) {
+	Abort(AbortDetails{
+		ExitCode:  1,
+		Error:     fmt.Errorf(format, args...),
+		EmitUsage: false,
+	})
+}
+
+func AbortWithUsage(format string, args ...interface{}) {
+	Abort(AbortDetails{
+		ExitCode:  1,
+		Error:     fmt.Errorf(format, args...),
+		EmitUsage: true,
+	})
+}
+
+func AbortIfError(preamble string, err error) {
+	if err != nil {
+		Abort(AbortDetails{
+			ExitCode:  1,
+			Error:     fmt.Errorf("%s\n%s", preamble, err.Error()),
+			EmitUsage: false,
+		})
+	}
+}
+
+func AbortIfErrors(preamble string, errors []error) {
+	if len(errors) > 0 {
+		out := ""
+		for _, err := range errors {
+			out += err.Error()
+		}
+		Abort(AbortDetails{
+			ExitCode:  1,
+			Error:     fmt.Errorf("%s\n%s", preamble, out),
+			EmitUsage: false,
+		})
+	}
+}

+ 50 - 0
vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go

@@ -0,0 +1,50 @@
+package command
+
+import (
+	"fmt"
+	"io"
+	"strings"
+
+	"github.com/onsi/ginkgo/v2/formatter"
+	"github.com/onsi/ginkgo/v2/types"
+)
+
+type Command struct {
+	Name          string
+	Flags         types.GinkgoFlagSet
+	Usage         string
+	ShortDoc      string
+	Documentation string
+	DocLink       string
+	Command       func(args []string, additionalArgs []string)
+}
+
+func (c Command) Run(args []string, additionalArgs []string) {
+	args, err := c.Flags.Parse(args)
+	if err != nil {
+		AbortWithUsage(err.Error())
+	}
+
+	c.Command(args, additionalArgs)
+}
+
+func (c Command) EmitUsage(writer io.Writer) {
+	fmt.Fprintln(writer, formatter.F("{{bold}}"+c.Usage+"{{/}}"))
+	fmt.Fprintln(writer, formatter.F("{{gray}}%s{{/}}", strings.Repeat("-", len(c.Usage))))
+	if c.ShortDoc != "" {
+		fmt.Fprintln(writer, formatter.Fiw(0, formatter.COLS, c.ShortDoc))
+		fmt.Fprintln(writer, "")
+	}
+	if c.Documentation != "" {
+		fmt.Fprintln(writer, formatter.Fiw(0, formatter.COLS, c.Documentation))
+		fmt.Fprintln(writer, "")
+	}
+	if c.DocLink != "" {
+		fmt.Fprintln(writer, formatter.Fi(0, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}http://onsi.github.io/ginkgo/#%s{{/}}", c.DocLink))
+		fmt.Fprintln(writer, "")
+	}
+	flagUsage := c.Flags.Usage()
+	if flagUsage != "" {
+		fmt.Fprintf(writer, formatter.F(flagUsage))
+	}
+}

+ 182 - 0
vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go

@@ -0,0 +1,182 @@
+package command
+
+import (
+	"fmt"
+	"io"
+	"os"
+	"strings"
+
+	"github.com/onsi/ginkgo/v2/formatter"
+	"github.com/onsi/ginkgo/v2/types"
+)
+
+type Program struct {
+	Name               string
+	Heading            string
+	Commands           []Command
+	DefaultCommand     Command
+	DeprecatedCommands []DeprecatedCommand
+
+	//For testing - leave as nil in production
+	OutWriter io.Writer
+	ErrWriter io.Writer
+	Exiter    func(code int)
+}
+
+type DeprecatedCommand struct {
+	Name        string
+	Deprecation types.Deprecation
+}
+
+func (p Program) RunAndExit(osArgs []string) {
+	var command Command
+	deprecationTracker := types.NewDeprecationTracker()
+	if p.Exiter == nil {
+		p.Exiter = os.Exit
+	}
+	if p.OutWriter == nil {
+		p.OutWriter = formatter.ColorableStdOut
+	}
+	if p.ErrWriter == nil {
+		p.ErrWriter = formatter.ColorableStdErr
+	}
+
+	defer func() {
+		exitCode := 0
+
+		if r := recover(); r != nil {
+			details, ok := r.(AbortDetails)
+			if !ok {
+				panic(r)
+			}
+
+			if details.Error != nil {
+				fmt.Fprintln(p.ErrWriter, formatter.F("{{red}}{{bold}}%s %s{{/}} {{red}}failed{{/}}", p.Name, command.Name))
+				fmt.Fprintln(p.ErrWriter, formatter.Fi(1, details.Error.Error()))
+			}
+			if details.EmitUsage {
+				if details.Error != nil {
+					fmt.Fprintln(p.ErrWriter, "")
+				}
+				command.EmitUsage(p.ErrWriter)
+			}
+			exitCode = details.ExitCode
+		}
+
+		command.Flags.ValidateDeprecations(deprecationTracker)
+		if deprecationTracker.DidTrackDeprecations() {
+			fmt.Fprintln(p.ErrWriter, deprecationTracker.DeprecationsReport())
+		}
+		p.Exiter(exitCode)
+		return
+	}()
+
+	args, additionalArgs := []string{}, []string{}
+
+	foundDelimiter := false
+	for _, arg := range osArgs[1:] {
+		if !foundDelimiter {
+			if arg == "--" {
+				foundDelimiter = true
+				continue
+			}
+		}
+
+		if foundDelimiter {
+			additionalArgs = append(additionalArgs, arg)
+		} else {
+			args = append(args, arg)
+		}
+	}
+
+	command = p.DefaultCommand
+	if len(args) > 0 {
+		p.handleHelpRequestsAndExit(p.OutWriter, args)
+		if command.Name == args[0] {
+			args = args[1:]
+		} else {
+			for _, deprecatedCommand := range p.DeprecatedCommands {
+				if deprecatedCommand.Name == args[0] {
+					deprecationTracker.TrackDeprecation(deprecatedCommand.Deprecation)
+					return
+				}
+			}
+			for _, tryCommand := range p.Commands {
+				if tryCommand.Name == args[0] {
+					command, args = tryCommand, args[1:]
+					break
+				}
+			}
+		}
+	}
+
+	command.Run(args, additionalArgs)
+}
+
+func (p Program) handleHelpRequestsAndExit(writer io.Writer, args []string) {
+	if len(args) == 0 {
+		return
+	}
+
+	matchesHelpFlag := func(args ...string) bool {
+		for _, arg := range args {
+			if arg == "--help" || arg == "-help" || arg == "-h" || arg == "--h" {
+				return true
+			}
+		}
+		return false
+	}
+	if len(args) == 1 {
+		if args[0] == "help" || matchesHelpFlag(args[0]) {
+			p.EmitUsage(writer)
+			Abort(AbortDetails{})
+		}
+	} else {
+		var name string
+		if args[0] == "help" || matchesHelpFlag(args[0]) {
+			name = args[1]
+		} else if matchesHelpFlag(args[1:]...) {
+			name = args[0]
+		} else {
+			return
+		}
+
+		if p.DefaultCommand.Name == name || p.Name == name {
+			p.DefaultCommand.EmitUsage(writer)
+			Abort(AbortDetails{})
+		}
+		for _, command := range p.Commands {
+			if command.Name == name {
+				command.EmitUsage(writer)
+				Abort(AbortDetails{})
+			}
+		}
+
+		fmt.Fprintln(writer, formatter.F("{{red}}Unknown Command: {{bold}}%s{{/}}", name))
+		fmt.Fprintln(writer, "")
+		p.EmitUsage(writer)
+		Abort(AbortDetails{ExitCode: 1})
+	}
+	return
+}
+
+func (p Program) EmitUsage(writer io.Writer) {
+	fmt.Fprintln(writer, formatter.F(p.Heading))
+	fmt.Fprintln(writer, formatter.F("{{gray}}%s{{/}}", strings.Repeat("-", len(p.Heading))))
+	fmt.Fprintln(writer, formatter.F("For usage information for a command, run {{bold}}%s help COMMAND{{/}}.", p.Name))
+	fmt.Fprintln(writer, formatter.F("For usage information for the default command, run {{bold}}%s help %s{{/}} or {{bold}}%s help %s{{/}}.", p.Name, p.Name, p.Name, p.DefaultCommand.Name))
+	fmt.Fprintln(writer, "")
+	fmt.Fprintln(writer, formatter.F("The following commands are available:"))
+
+	fmt.Fprintln(writer, formatter.Fi(1, "{{bold}}%s{{/}} or %s {{bold}}%s{{/}} - {{gray}}%s{{/}}", p.Name, p.Name, p.DefaultCommand.Name, p.DefaultCommand.Usage))
+	if p.DefaultCommand.ShortDoc != "" {
+		fmt.Fprintln(writer, formatter.Fi(2, p.DefaultCommand.ShortDoc))
+	}
+
+	for _, command := range p.Commands {
+		fmt.Fprintln(writer, formatter.Fi(1, "{{bold}}%s{{/}} - {{gray}}%s{{/}}", command.Name, command.Usage))
+		if command.ShortDoc != "" {
+			fmt.Fprintln(writer, formatter.Fi(2, command.ShortDoc))
+		}
+	}
+}

+ 48 - 0
vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go

@@ -0,0 +1,48 @@
+package generators
+
+var bootstrapText = `package {{.Package}}
+
+import (
+	"testing"
+
+	{{.GinkgoImport}}
+	{{.GomegaImport}}
+)
+
+func Test{{.FormattedName}}(t *testing.T) {
+	{{.GomegaPackage}}RegisterFailHandler({{.GinkgoPackage}}Fail)
+	{{.GinkgoPackage}}RunSpecs(t, "{{.FormattedName}} Suite")
+}
+`
+
+var agoutiBootstrapText = `package {{.Package}}
+
+import (
+	"testing"
+
+	{{.GinkgoImport}}
+	{{.GomegaImport}}
+	"github.com/sclevine/agouti"
+)
+
+func Test{{.FormattedName}}(t *testing.T) {
+	{{.GomegaPackage}}RegisterFailHandler({{.GinkgoPackage}}Fail)
+	{{.GinkgoPackage}}RunSpecs(t, "{{.FormattedName}} Suite")
+}
+
+var agoutiDriver *agouti.WebDriver
+
+var _ = {{.GinkgoPackage}}BeforeSuite(func() {
+	// Choose a WebDriver:
+
+	agoutiDriver = agouti.PhantomJS()
+	// agoutiDriver = agouti.Selenium()
+	// agoutiDriver = agouti.ChromeDriver()
+
+	{{.GomegaPackage}}Expect(agoutiDriver.Start()).To({{.GomegaPackage}}Succeed())
+})
+
+var _ = {{.GinkgoPackage}}AfterSuite(func() {
+	{{.GomegaPackage}}Expect(agoutiDriver.Stop()).To({{.GomegaPackage}}Succeed())
+})
+`

+ 113 - 0
vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go

@@ -0,0 +1,113 @@
+package generators
+
+import (
+	"bytes"
+	"fmt"
+	"os"
+	"text/template"
+
+	sprig "github.com/go-task/slim-sprig"
+	"github.com/onsi/ginkgo/v2/ginkgo/command"
+	"github.com/onsi/ginkgo/v2/ginkgo/internal"
+	"github.com/onsi/ginkgo/v2/types"
+)
+
+func BuildBootstrapCommand() command.Command {
+	conf := GeneratorsConfig{}
+	flags, err := types.NewGinkgoFlagSet(
+		types.GinkgoFlags{
+			{Name: "agouti", KeyPath: "Agouti",
+				Usage: "If set, bootstrap will generate a bootstrap file for writing Agouti tests"},
+			{Name: "nodot", KeyPath: "NoDot",
+				Usage: "If set, bootstrap will generate a bootstrap test file that does not dot-import ginkgo and gomega"},
+			{Name: "internal", KeyPath: "Internal",
+				Usage: "If set, bootstrap will generate a bootstrap test file that uses the regular package name (i.e. `package X`, not `package X_test`)"},
+			{Name: "template", KeyPath: "CustomTemplate",
+				UsageArgument: "template-file",
+				Usage:         "If specified, generate will use the contents of the file passed as the bootstrap template"},
+		},
+		&conf,
+		types.GinkgoFlagSections{},
+	)
+
+	if err != nil {
+		panic(err)
+	}
+
+	return command.Command{
+		Name:     "bootstrap",
+		Usage:    "ginkgo bootstrap",
+		ShortDoc: "Bootstrap a test suite for the current package",
+		Documentation: `Tests written in Ginkgo and Gomega require a small amount of boilerplate to hook into Go's testing infrastructure.
+
+{{bold}}ginkgo bootstrap{{/}} generates this boilerplate for you in a file named X_suite_test.go where X is the name of the package under test.`,
+		DocLink: "generators",
+		Flags:   flags,
+		Command: func(_ []string, _ []string) {
+			generateBootstrap(conf)
+		},
+	}
+}
+
+type bootstrapData struct {
+	Package       string
+	FormattedName string
+
+	GinkgoImport  string
+	GomegaImport  string
+	GinkgoPackage string
+	GomegaPackage string
+}
+
+func generateBootstrap(conf GeneratorsConfig) {
+	packageName, bootstrapFilePrefix, formattedName := getPackageAndFormattedName()
+
+	data := bootstrapData{
+		Package:       determinePackageName(packageName, conf.Internal),
+		FormattedName: formattedName,
+
+		GinkgoImport:  `. "github.com/onsi/ginkgo/v2"`,
+		GomegaImport:  `. "github.com/onsi/gomega"`,
+		GinkgoPackage: "",
+		GomegaPackage: "",
+	}
+
+	if conf.NoDot {
+		data.GinkgoImport = `"github.com/onsi/ginkgo/v2"`
+		data.GomegaImport = `"github.com/onsi/gomega"`
+		data.GinkgoPackage = `ginkgo.`
+		data.GomegaPackage = `gomega.`
+	}
+
+	targetFile := fmt.Sprintf("%s_suite_test.go", bootstrapFilePrefix)
+	if internal.FileExists(targetFile) {
+		command.AbortWith("{{bold}}%s{{/}} already exists", targetFile)
+	} else {
+		fmt.Printf("Generating ginkgo test suite bootstrap for %s in:\n\t%s\n", packageName, targetFile)
+	}
+
+	f, err := os.Create(targetFile)
+	command.AbortIfError("Failed to create file:", err)
+	defer f.Close()
+
+	var templateText string
+	if conf.CustomTemplate != "" {
+		tpl, err := os.ReadFile(conf.CustomTemplate)
+		command.AbortIfError("Failed to read custom bootstrap file:", err)
+		templateText = string(tpl)
+	} else if conf.Agouti {
+		templateText = agoutiBootstrapText
+	} else {
+		templateText = bootstrapText
+	}
+
+	bootstrapTemplate, err := template.New("bootstrap").Funcs(sprig.TxtFuncMap()).Parse(templateText)
+	command.AbortIfError("Failed to parse bootstrap template:", err)
+
+	buf := &bytes.Buffer{}
+	bootstrapTemplate.Execute(buf, data)
+
+	buf.WriteTo(f)
+
+	internal.GoFmt(targetFile)
+}

+ 239 - 0
vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go

@@ -0,0 +1,239 @@
+package generators
+
+import (
+	"bytes"
+	"fmt"
+	"os"
+	"path/filepath"
+	"strconv"
+	"strings"
+	"text/template"
+
+	sprig "github.com/go-task/slim-sprig"
+	"github.com/onsi/ginkgo/v2/ginkgo/command"
+	"github.com/onsi/ginkgo/v2/ginkgo/internal"
+	"github.com/onsi/ginkgo/v2/types"
+)
+
+func BuildGenerateCommand() command.Command {
+	conf := GeneratorsConfig{}
+	flags, err := types.NewGinkgoFlagSet(
+		types.GinkgoFlags{
+			{Name: "agouti", KeyPath: "Agouti",
+				Usage: "If set, generate will create a test file for writing Agouti tests"},
+			{Name: "nodot", KeyPath: "NoDot",
+				Usage: "If set, generate will create a test file that does not dot-import ginkgo and gomega"},
+			{Name: "internal", KeyPath: "Internal",
+				Usage: "If set, generate will create a test file that uses the regular package name (i.e. `package X`, not `package X_test`)"},
+			{Name: "template", KeyPath: "CustomTemplate",
+				UsageArgument: "template-file",
+				Usage:         "If specified, generate will use the contents of the file passed as the test file template"},
+		},
+		&conf,
+		types.GinkgoFlagSections{},
+	)
+
+	if err != nil {
+		panic(err)
+	}
+
+	return command.Command{
+		Name:     "generate",
+		Usage:    "ginkgo generate <filename(s)>",
+		ShortDoc: "Generate a test file named <filename>_test.go",
+		Documentation: `If the optional <filename> argument is omitted, a file named after the package in the current directory will be created.
+
+You can pass multiple <filename(s)> to generate multiple files simultaneously.  The resulting files are named <filename>_test.go.
+
+You can also pass a <filename> of the form "file.go" and generate will emit "file_test.go".`,
+		DocLink: "generators",
+		Flags:   flags,
+		Command: func(args []string, _ []string) {
+			generateTestFiles(conf, args)
+		},
+	}
+}
+
+type specData struct {
+	Package           string
+	Subject           string
+	PackageImportPath string
+	ImportPackage     bool
+
+	GinkgoImport  string
+	GomegaImport  string
+	GinkgoPackage string
+	GomegaPackage string
+}
+
+func generateTestFiles(conf GeneratorsConfig, args []string) {
+	subjects := args
+	if len(subjects) == 0 {
+		subjects = []string{""}
+	}
+	for _, subject := range subjects {
+		generateTestFileForSubject(subject, conf)
+	}
+}
+
+func generateTestFileForSubject(subject string, conf GeneratorsConfig) {
+	packageName, specFilePrefix, formattedName := getPackageAndFormattedName()
+	if subject != "" {
+		specFilePrefix = formatSubject(subject)
+		formattedName = prettifyName(specFilePrefix)
+	}
+
+	if conf.Internal {
+		specFilePrefix = specFilePrefix + "_internal"
+	}
+
+	data := specData{
+		Package:           determinePackageName(packageName, conf.Internal),
+		Subject:           formattedName,
+		PackageImportPath: getPackageImportPath(),
+		ImportPackage:     !conf.Internal,
+
+		GinkgoImport:  `. "github.com/onsi/ginkgo/v2"`,
+		GomegaImport:  `. "github.com/onsi/gomega"`,
+		GinkgoPackage: "",
+		GomegaPackage: "",
+	}
+
+	if conf.NoDot {
+		data.GinkgoImport = `"github.com/onsi/ginkgo/v2"`
+		data.GomegaImport = `"github.com/onsi/gomega"`
+		data.GinkgoPackage = `ginkgo.`
+		data.GomegaPackage = `gomega.`
+	}
+
+	targetFile := fmt.Sprintf("%s_test.go", specFilePrefix)
+	if internal.FileExists(targetFile) {
+		command.AbortWith("{{bold}}%s{{/}} already exists", targetFile)
+	} else {
+		fmt.Printf("Generating ginkgo test for %s in:\n  %s\n", data.Subject, targetFile)
+	}
+
+	f, err := os.Create(targetFile)
+	command.AbortIfError("Failed to create test file:", err)
+	defer f.Close()
+
+	var templateText string
+	if conf.CustomTemplate != "" {
+		tpl, err := os.ReadFile(conf.CustomTemplate)
+		command.AbortIfError("Failed to read custom template file:", err)
+		templateText = string(tpl)
+	} else if conf.Agouti {
+		templateText = agoutiSpecText
+	} else {
+		templateText = specText
+	}
+
+	specTemplate, err := template.New("spec").Funcs(sprig.TxtFuncMap()).Parse(templateText)
+	command.AbortIfError("Failed to read parse test template:", err)
+
+	specTemplate.Execute(f, data)
+	internal.GoFmt(targetFile)
+}
+
+func formatSubject(name string) string {
+	name = strings.ReplaceAll(name, "-", "_")
+	name = strings.ReplaceAll(name, " ", "_")
+	name = strings.Split(name, ".go")[0]
+	name = strings.Split(name, "_test")[0]
+	return name
+}
+
+// moduleName returns module name from go.mod from given module root directory
+func moduleName(modRoot string) string {
+	modFile, err := os.Open(filepath.Join(modRoot, "go.mod"))
+	if err != nil {
+		return ""
+	}
+
+	mod := make([]byte, 128)
+	_, err = modFile.Read(mod)
+	if err != nil {
+		return ""
+	}
+
+	slashSlash := []byte("//")
+	moduleStr := []byte("module")
+
+	for len(mod) > 0 {
+		line := mod
+		mod = nil
+		if i := bytes.IndexByte(line, '\n'); i >= 0 {
+			line, mod = line[:i], line[i+1:]
+		}
+		if i := bytes.Index(line, slashSlash); i >= 0 {
+			line = line[:i]
+		}
+		line = bytes.TrimSpace(line)
+		if !bytes.HasPrefix(line, moduleStr) {
+			continue
+		}
+		line = line[len(moduleStr):]
+		n := len(line)
+		line = bytes.TrimSpace(line)
+		if len(line) == n || len(line) == 0 {
+			continue
+		}
+
+		if line[0] == '"' || line[0] == '`' {
+			p, err := strconv.Unquote(string(line))
+			if err != nil {
+				return "" // malformed quoted string or multiline module path
+			}
+			return p
+		}
+
+		return string(line)
+	}
+
+	return "" // missing module path
+}
+
+func findModuleRoot(dir string) (root string) {
+	dir = filepath.Clean(dir)
+
+	// Look for enclosing go.mod.
+	for {
+		if fi, err := os.Stat(filepath.Join(dir, "go.mod")); err == nil && !fi.IsDir() {
+			return dir
+		}
+		d := filepath.Dir(dir)
+		if d == dir {
+			break
+		}
+		dir = d
+	}
+	return ""
+}
+
+func getPackageImportPath() string {
+	workingDir, err := os.Getwd()
+	if err != nil {
+		panic(err.Error())
+	}
+
+	sep := string(filepath.Separator)
+
+	// Try go.mod file first
+	modRoot := findModuleRoot(workingDir)
+	if modRoot != "" {
+		modName := moduleName(modRoot)
+		if modName != "" {
+			cd := strings.ReplaceAll(workingDir, modRoot, "")
+			cd = strings.ReplaceAll(cd, sep, "/")
+			return modName + cd
+		}
+	}
+
+	// Fallback to GOPATH structure
+	paths := strings.Split(workingDir, sep+"src"+sep)
+	if len(paths) == 1 {
+		fmt.Printf("\nCouldn't identify package import path.\n\n\tginkgo generate\n\nMust be run within a package directory under $GOPATH/src/...\nYou're going to have to change UNKNOWN_PACKAGE_PATH in the generated file...\n\n")
+		return "UNKNOWN_PACKAGE_PATH"
+	}
+	return filepath.ToSlash(paths[len(paths)-1])
+}

Some files were not shown because too many files changed in this diff