reassembly_queue.go 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355
  1. // SPDX-FileCopyrightText: 2023 The Pion community <https://pion.ly>
  2. // SPDX-License-Identifier: MIT
  3. package sctp
  4. import (
  5. "errors"
  6. "io"
  7. "sort"
  8. "sync/atomic"
  9. )
  10. func sortChunksByTSN(a []*chunkPayloadData) {
  11. sort.Slice(a, func(i, j int) bool {
  12. return sna32LT(a[i].tsn, a[j].tsn)
  13. })
  14. }
  15. func sortChunksBySSN(a []*chunkSet) {
  16. sort.Slice(a, func(i, j int) bool {
  17. return sna16LT(a[i].ssn, a[j].ssn)
  18. })
  19. }
  20. // chunkSet is a set of chunks that share the same SSN
  21. type chunkSet struct {
  22. ssn uint16 // used only with the ordered chunks
  23. ppi PayloadProtocolIdentifier
  24. chunks []*chunkPayloadData
  25. }
  26. func newChunkSet(ssn uint16, ppi PayloadProtocolIdentifier) *chunkSet {
  27. return &chunkSet{
  28. ssn: ssn,
  29. ppi: ppi,
  30. chunks: []*chunkPayloadData{},
  31. }
  32. }
  33. func (set *chunkSet) push(chunk *chunkPayloadData) bool {
  34. // check if dup
  35. for _, c := range set.chunks {
  36. if c.tsn == chunk.tsn {
  37. return false
  38. }
  39. }
  40. // append and sort
  41. set.chunks = append(set.chunks, chunk)
  42. sortChunksByTSN(set.chunks)
  43. // Check if we now have a complete set
  44. complete := set.isComplete()
  45. return complete
  46. }
  47. func (set *chunkSet) isComplete() bool {
  48. // Condition for complete set
  49. // 0. Has at least one chunk.
  50. // 1. Begins with beginningFragment set to true
  51. // 2. Ends with endingFragment set to true
  52. // 3. TSN monotinically increase by 1 from beginning to end
  53. // 0.
  54. nChunks := len(set.chunks)
  55. if nChunks == 0 {
  56. return false
  57. }
  58. // 1.
  59. if !set.chunks[0].beginningFragment {
  60. return false
  61. }
  62. // 2.
  63. if !set.chunks[nChunks-1].endingFragment {
  64. return false
  65. }
  66. // 3.
  67. var lastTSN uint32
  68. for i, c := range set.chunks {
  69. if i > 0 {
  70. // Fragments must have contiguous TSN
  71. // From RFC 4960 Section 3.3.1:
  72. // When a user message is fragmented into multiple chunks, the TSNs are
  73. // used by the receiver to reassemble the message. This means that the
  74. // TSNs for each fragment of a fragmented user message MUST be strictly
  75. // sequential.
  76. if c.tsn != lastTSN+1 {
  77. // mid or end fragment is missing
  78. return false
  79. }
  80. }
  81. lastTSN = c.tsn
  82. }
  83. return true
  84. }
  85. type reassemblyQueue struct {
  86. si uint16
  87. nextSSN uint16 // expected SSN for next ordered chunk
  88. ordered []*chunkSet
  89. unordered []*chunkSet
  90. unorderedChunks []*chunkPayloadData
  91. nBytes uint64
  92. }
  93. var errTryAgain = errors.New("try again")
  94. func newReassemblyQueue(si uint16) *reassemblyQueue {
  95. // From RFC 4960 Sec 6.5:
  96. // The Stream Sequence Number in all the streams MUST start from 0 when
  97. // the association is established. Also, when the Stream Sequence
  98. // Number reaches the value 65535 the next Stream Sequence Number MUST
  99. // be set to 0.
  100. return &reassemblyQueue{
  101. si: si,
  102. nextSSN: 0, // From RFC 4960 Sec 6.5:
  103. ordered: make([]*chunkSet, 0),
  104. unordered: make([]*chunkSet, 0),
  105. }
  106. }
  107. func (r *reassemblyQueue) push(chunk *chunkPayloadData) bool {
  108. var cset *chunkSet
  109. if chunk.streamIdentifier != r.si {
  110. return false
  111. }
  112. if chunk.unordered {
  113. // First, insert into unorderedChunks array
  114. r.unorderedChunks = append(r.unorderedChunks, chunk)
  115. atomic.AddUint64(&r.nBytes, uint64(len(chunk.userData)))
  116. sortChunksByTSN(r.unorderedChunks)
  117. // Scan unorderedChunks that are contiguous (in TSN)
  118. cset = r.findCompleteUnorderedChunkSet()
  119. // If found, append the complete set to the unordered array
  120. if cset != nil {
  121. r.unordered = append(r.unordered, cset)
  122. return true
  123. }
  124. return false
  125. }
  126. // This is an ordered chunk
  127. if sna16LT(chunk.streamSequenceNumber, r.nextSSN) {
  128. return false
  129. }
  130. // Check if a chunkSet with the SSN already exists
  131. for _, set := range r.ordered {
  132. if set.ssn == chunk.streamSequenceNumber {
  133. cset = set
  134. break
  135. }
  136. }
  137. // If not found, create a new chunkSet
  138. if cset == nil {
  139. cset = newChunkSet(chunk.streamSequenceNumber, chunk.payloadType)
  140. r.ordered = append(r.ordered, cset)
  141. if !chunk.unordered {
  142. sortChunksBySSN(r.ordered)
  143. }
  144. }
  145. atomic.AddUint64(&r.nBytes, uint64(len(chunk.userData)))
  146. return cset.push(chunk)
  147. }
  148. func (r *reassemblyQueue) findCompleteUnorderedChunkSet() *chunkSet {
  149. startIdx := -1
  150. nChunks := 0
  151. var lastTSN uint32
  152. var found bool
  153. for i, c := range r.unorderedChunks {
  154. // seek beigining
  155. if c.beginningFragment {
  156. startIdx = i
  157. nChunks = 1
  158. lastTSN = c.tsn
  159. if c.endingFragment {
  160. found = true
  161. break
  162. }
  163. continue
  164. }
  165. if startIdx < 0 {
  166. continue
  167. }
  168. // Check if contiguous in TSN
  169. if c.tsn != lastTSN+1 {
  170. startIdx = -1
  171. continue
  172. }
  173. lastTSN = c.tsn
  174. nChunks++
  175. if c.endingFragment {
  176. found = true
  177. break
  178. }
  179. }
  180. if !found {
  181. return nil
  182. }
  183. // Extract the range of chunks
  184. var chunks []*chunkPayloadData
  185. chunks = append(chunks, r.unorderedChunks[startIdx:startIdx+nChunks]...)
  186. r.unorderedChunks = append(
  187. r.unorderedChunks[:startIdx],
  188. r.unorderedChunks[startIdx+nChunks:]...)
  189. chunkSet := newChunkSet(0, chunks[0].payloadType)
  190. chunkSet.chunks = chunks
  191. return chunkSet
  192. }
  193. func (r *reassemblyQueue) isReadable() bool {
  194. // Check unordered first
  195. if len(r.unordered) > 0 {
  196. // The chunk sets in r.unordered should all be complete.
  197. return true
  198. }
  199. // Check ordered sets
  200. if len(r.ordered) > 0 {
  201. cset := r.ordered[0]
  202. if cset.isComplete() {
  203. if sna16LTE(cset.ssn, r.nextSSN) {
  204. return true
  205. }
  206. }
  207. }
  208. return false
  209. }
  210. func (r *reassemblyQueue) read(buf []byte) (int, PayloadProtocolIdentifier, error) {
  211. var cset *chunkSet
  212. // Check unordered first
  213. switch {
  214. case len(r.unordered) > 0:
  215. cset = r.unordered[0]
  216. r.unordered = r.unordered[1:]
  217. case len(r.ordered) > 0:
  218. // Now, check ordered
  219. cset = r.ordered[0]
  220. if !cset.isComplete() {
  221. return 0, 0, errTryAgain
  222. }
  223. if sna16GT(cset.ssn, r.nextSSN) {
  224. return 0, 0, errTryAgain
  225. }
  226. r.ordered = r.ordered[1:]
  227. if cset.ssn == r.nextSSN {
  228. r.nextSSN++
  229. }
  230. default:
  231. return 0, 0, errTryAgain
  232. }
  233. // Concat all fragments into the buffer
  234. nWritten := 0
  235. ppi := cset.ppi
  236. var err error
  237. for _, c := range cset.chunks {
  238. toCopy := len(c.userData)
  239. r.subtractNumBytes(toCopy)
  240. if err == nil {
  241. n := copy(buf[nWritten:], c.userData)
  242. nWritten += n
  243. if n < toCopy {
  244. err = io.ErrShortBuffer
  245. }
  246. }
  247. }
  248. return nWritten, ppi, err
  249. }
  250. func (r *reassemblyQueue) forwardTSNForOrdered(lastSSN uint16) {
  251. // Use lastSSN to locate a chunkSet then remove it if the set has
  252. // not been complete
  253. keep := []*chunkSet{}
  254. for _, set := range r.ordered {
  255. if sna16LTE(set.ssn, lastSSN) {
  256. if !set.isComplete() {
  257. // drop the set
  258. for _, c := range set.chunks {
  259. r.subtractNumBytes(len(c.userData))
  260. }
  261. continue
  262. }
  263. }
  264. keep = append(keep, set)
  265. }
  266. r.ordered = keep
  267. // Finally, forward nextSSN
  268. if sna16LTE(r.nextSSN, lastSSN) {
  269. r.nextSSN = lastSSN + 1
  270. }
  271. }
  272. func (r *reassemblyQueue) forwardTSNForUnordered(newCumulativeTSN uint32) {
  273. // Remove all fragments in the unordered sets that contains chunks
  274. // equal to or older than `newCumulativeTSN`.
  275. // We know all sets in the r.unordered are complete ones.
  276. // Just remove chunks that are equal to or older than newCumulativeTSN
  277. // from the unorderedChunks
  278. lastIdx := -1
  279. for i, c := range r.unorderedChunks {
  280. if sna32GT(c.tsn, newCumulativeTSN) {
  281. break
  282. }
  283. lastIdx = i
  284. }
  285. if lastIdx >= 0 {
  286. for _, c := range r.unorderedChunks[0 : lastIdx+1] {
  287. r.subtractNumBytes(len(c.userData))
  288. }
  289. r.unorderedChunks = r.unorderedChunks[lastIdx+1:]
  290. }
  291. }
  292. func (r *reassemblyQueue) subtractNumBytes(nBytes int) {
  293. cur := atomic.LoadUint64(&r.nBytes)
  294. if int(cur) >= nBytes {
  295. atomic.AddUint64(&r.nBytes, -uint64(nBytes))
  296. } else {
  297. atomic.StoreUint64(&r.nBytes, 0)
  298. }
  299. }
  300. func (r *reassemblyQueue) getNumBytes() int {
  301. return int(atomic.LoadUint64(&r.nBytes))
  302. }