reassembly_queue.go 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366
  1. // SPDX-FileCopyrightText: 2023 The Pion community <https://pion.ly>
  2. // SPDX-License-Identifier: MIT
  3. package sctp
  4. import (
  5. "errors"
  6. "io"
  7. "sort"
  8. "sync/atomic"
  9. )
  10. func sortChunksByTSN(a []*chunkPayloadData) {
  11. sort.Slice(a, func(i, j int) bool {
  12. return sna32LT(a[i].tsn, a[j].tsn)
  13. })
  14. }
  15. func sortChunksBySSN(a []*chunkSet) {
  16. sort.Slice(a, func(i, j int) bool {
  17. return sna16LT(a[i].ssn, a[j].ssn)
  18. })
  19. }
  20. // chunkSet is a set of chunks that share the same SSN
  21. type chunkSet struct {
  22. ssn uint16 // used only with the ordered chunks
  23. ppi PayloadProtocolIdentifier
  24. chunks []*chunkPayloadData
  25. }
  26. func newChunkSet(ssn uint16, ppi PayloadProtocolIdentifier) *chunkSet {
  27. return &chunkSet{
  28. ssn: ssn,
  29. ppi: ppi,
  30. chunks: []*chunkPayloadData{},
  31. }
  32. }
  33. func (set *chunkSet) push(chunk *chunkPayloadData) bool {
  34. // check if dup
  35. for _, c := range set.chunks {
  36. if c.tsn == chunk.tsn {
  37. return false
  38. }
  39. }
  40. // append and sort
  41. set.chunks = append(set.chunks, chunk)
  42. sortChunksByTSN(set.chunks)
  43. // Check if we now have a complete set
  44. complete := set.isComplete()
  45. return complete
  46. }
  47. func (set *chunkSet) isComplete() bool {
  48. // Condition for complete set
  49. // 0. Has at least one chunk.
  50. // 1. Begins with beginningFragment set to true
  51. // 2. Ends with endingFragment set to true
  52. // 3. TSN monotinically increase by 1 from beginning to end
  53. // 0.
  54. nChunks := len(set.chunks)
  55. if nChunks == 0 {
  56. return false
  57. }
  58. // 1.
  59. if !set.chunks[0].beginningFragment {
  60. return false
  61. }
  62. // 2.
  63. if !set.chunks[nChunks-1].endingFragment {
  64. return false
  65. }
  66. // 3.
  67. var lastTSN uint32
  68. for i, c := range set.chunks {
  69. if i > 0 {
  70. // Fragments must have contiguous TSN
  71. // From RFC 4960 Section 3.3.1:
  72. // When a user message is fragmented into multiple chunks, the TSNs are
  73. // used by the receiver to reassemble the message. This means that the
  74. // TSNs for each fragment of a fragmented user message MUST be strictly
  75. // sequential.
  76. if c.tsn != lastTSN+1 {
  77. // mid or end fragment is missing
  78. return false
  79. }
  80. }
  81. lastTSN = c.tsn
  82. }
  83. return true
  84. }
  85. type reassemblyQueue struct {
  86. si uint16
  87. nextSSN uint16 // expected SSN for next ordered chunk
  88. ordered []*chunkSet
  89. unordered []*chunkSet
  90. unorderedChunks []*chunkPayloadData
  91. nBytes uint64
  92. }
  93. var errTryAgain = errors.New("try again")
  94. func newReassemblyQueue(si uint16) *reassemblyQueue {
  95. // From RFC 4960 Sec 6.5:
  96. // The Stream Sequence Number in all the streams MUST start from 0 when
  97. // the association is established. Also, when the Stream Sequence
  98. // Number reaches the value 65535 the next Stream Sequence Number MUST
  99. // be set to 0.
  100. return &reassemblyQueue{
  101. si: si,
  102. nextSSN: 0, // From RFC 4960 Sec 6.5:
  103. ordered: make([]*chunkSet, 0),
  104. unordered: make([]*chunkSet, 0),
  105. }
  106. }
  107. func (r *reassemblyQueue) push(chunk *chunkPayloadData) bool {
  108. var cset *chunkSet
  109. if chunk.streamIdentifier != r.si {
  110. return false
  111. }
  112. if chunk.unordered {
  113. // First, insert into unorderedChunks array
  114. r.unorderedChunks = append(r.unorderedChunks, chunk)
  115. atomic.AddUint64(&r.nBytes, uint64(len(chunk.userData)))
  116. sortChunksByTSN(r.unorderedChunks)
  117. // Scan unorderedChunks that are contiguous (in TSN)
  118. cset = r.findCompleteUnorderedChunkSet()
  119. // If found, append the complete set to the unordered array
  120. if cset != nil {
  121. r.unordered = append(r.unordered, cset)
  122. return true
  123. }
  124. return false
  125. }
  126. // This is an ordered chunk
  127. if sna16LT(chunk.streamSequenceNumber, r.nextSSN) {
  128. return false
  129. }
  130. // Check if a fragmented chunkSet with the fragmented SSN already exists
  131. if chunk.isFragmented() {
  132. for _, set := range r.ordered {
  133. // nolint:godox
  134. // TODO: add caution around SSN wrapping here... this helps only a little bit
  135. // by ensuring we don't add to an unfragmented cset (1 chunk). There's
  136. // a case where if the SSN does wrap around, we may see the same SSN
  137. // for a different chunk.
  138. // nolint:godox
  139. // TODO: this slice can get pretty big; it may be worth maintaining a map
  140. // for O(1) lookups at the cost of 2x memory.
  141. if set.ssn == chunk.streamSequenceNumber && set.chunks[0].isFragmented() {
  142. cset = set
  143. break
  144. }
  145. }
  146. }
  147. // If not found, create a new chunkSet
  148. if cset == nil {
  149. cset = newChunkSet(chunk.streamSequenceNumber, chunk.payloadType)
  150. r.ordered = append(r.ordered, cset)
  151. if !chunk.unordered {
  152. sortChunksBySSN(r.ordered)
  153. }
  154. }
  155. atomic.AddUint64(&r.nBytes, uint64(len(chunk.userData)))
  156. return cset.push(chunk)
  157. }
  158. func (r *reassemblyQueue) findCompleteUnorderedChunkSet() *chunkSet {
  159. startIdx := -1
  160. nChunks := 0
  161. var lastTSN uint32
  162. var found bool
  163. for i, c := range r.unorderedChunks {
  164. // seek beigining
  165. if c.beginningFragment {
  166. startIdx = i
  167. nChunks = 1
  168. lastTSN = c.tsn
  169. if c.endingFragment {
  170. found = true
  171. break
  172. }
  173. continue
  174. }
  175. if startIdx < 0 {
  176. continue
  177. }
  178. // Check if contiguous in TSN
  179. if c.tsn != lastTSN+1 {
  180. startIdx = -1
  181. continue
  182. }
  183. lastTSN = c.tsn
  184. nChunks++
  185. if c.endingFragment {
  186. found = true
  187. break
  188. }
  189. }
  190. if !found {
  191. return nil
  192. }
  193. // Extract the range of chunks
  194. var chunks []*chunkPayloadData
  195. chunks = append(chunks, r.unorderedChunks[startIdx:startIdx+nChunks]...)
  196. r.unorderedChunks = append(
  197. r.unorderedChunks[:startIdx],
  198. r.unorderedChunks[startIdx+nChunks:]...)
  199. chunkSet := newChunkSet(0, chunks[0].payloadType)
  200. chunkSet.chunks = chunks
  201. return chunkSet
  202. }
  203. func (r *reassemblyQueue) isReadable() bool {
  204. // Check unordered first
  205. if len(r.unordered) > 0 {
  206. // The chunk sets in r.unordered should all be complete.
  207. return true
  208. }
  209. // Check ordered sets
  210. if len(r.ordered) > 0 {
  211. cset := r.ordered[0]
  212. if cset.isComplete() {
  213. if sna16LTE(cset.ssn, r.nextSSN) {
  214. return true
  215. }
  216. }
  217. }
  218. return false
  219. }
  220. func (r *reassemblyQueue) read(buf []byte) (int, PayloadProtocolIdentifier, error) {
  221. var cset *chunkSet
  222. // Check unordered first
  223. switch {
  224. case len(r.unordered) > 0:
  225. cset = r.unordered[0]
  226. r.unordered = r.unordered[1:]
  227. case len(r.ordered) > 0:
  228. // Now, check ordered
  229. cset = r.ordered[0]
  230. if !cset.isComplete() {
  231. return 0, 0, errTryAgain
  232. }
  233. if sna16GT(cset.ssn, r.nextSSN) {
  234. return 0, 0, errTryAgain
  235. }
  236. r.ordered = r.ordered[1:]
  237. if cset.ssn == r.nextSSN {
  238. r.nextSSN++
  239. }
  240. default:
  241. return 0, 0, errTryAgain
  242. }
  243. // Concat all fragments into the buffer
  244. nWritten := 0
  245. ppi := cset.ppi
  246. var err error
  247. for _, c := range cset.chunks {
  248. toCopy := len(c.userData)
  249. r.subtractNumBytes(toCopy)
  250. if err == nil {
  251. n := copy(buf[nWritten:], c.userData)
  252. nWritten += n
  253. if n < toCopy {
  254. err = io.ErrShortBuffer
  255. }
  256. }
  257. }
  258. return nWritten, ppi, err
  259. }
  260. func (r *reassemblyQueue) forwardTSNForOrdered(lastSSN uint16) {
  261. // Use lastSSN to locate a chunkSet then remove it if the set has
  262. // not been complete
  263. keep := []*chunkSet{}
  264. for _, set := range r.ordered {
  265. if sna16LTE(set.ssn, lastSSN) {
  266. if !set.isComplete() {
  267. // drop the set
  268. for _, c := range set.chunks {
  269. r.subtractNumBytes(len(c.userData))
  270. }
  271. continue
  272. }
  273. }
  274. keep = append(keep, set)
  275. }
  276. r.ordered = keep
  277. // Finally, forward nextSSN
  278. if sna16LTE(r.nextSSN, lastSSN) {
  279. r.nextSSN = lastSSN + 1
  280. }
  281. }
  282. func (r *reassemblyQueue) forwardTSNForUnordered(newCumulativeTSN uint32) {
  283. // Remove all fragments in the unordered sets that contains chunks
  284. // equal to or older than `newCumulativeTSN`.
  285. // We know all sets in the r.unordered are complete ones.
  286. // Just remove chunks that are equal to or older than newCumulativeTSN
  287. // from the unorderedChunks
  288. lastIdx := -1
  289. for i, c := range r.unorderedChunks {
  290. if sna32GT(c.tsn, newCumulativeTSN) {
  291. break
  292. }
  293. lastIdx = i
  294. }
  295. if lastIdx >= 0 {
  296. for _, c := range r.unorderedChunks[0 : lastIdx+1] {
  297. r.subtractNumBytes(len(c.userData))
  298. }
  299. r.unorderedChunks = r.unorderedChunks[lastIdx+1:]
  300. }
  301. }
  302. func (r *reassemblyQueue) subtractNumBytes(nBytes int) {
  303. cur := atomic.LoadUint64(&r.nBytes)
  304. if int(cur) >= nBytes {
  305. atomic.AddUint64(&r.nBytes, -uint64(nBytes))
  306. } else {
  307. atomic.StoreUint64(&r.nBytes, 0)
  308. }
  309. }
  310. func (r *reassemblyQueue) getNumBytes() int {
  311. return int(atomic.LoadUint64(&r.nBytes))
  312. }