scan.go 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986
  1. package dns
  2. import (
  3. "io"
  4. "log"
  5. "os"
  6. "strconv"
  7. "strings"
  8. )
  9. type debugging bool
  10. const debug debugging = false
  11. func (d debugging) Printf(format string, args ...interface{}) {
  12. if d {
  13. log.Printf(format, args...)
  14. }
  15. }
  16. const maxTok = 2048 // Largest token we can return.
  17. const maxUint16 = 1<<16 - 1
  18. // Tokinize a RFC 1035 zone file. The tokenizer will normalize it:
  19. // * Add ownernames if they are left blank;
  20. // * Suppress sequences of spaces;
  21. // * Make each RR fit on one line (_NEWLINE is send as last)
  22. // * Handle comments: ;
  23. // * Handle braces - anywhere.
  24. const (
  25. // Zonefile
  26. zEOF = iota
  27. zString
  28. zBlank
  29. zQuote
  30. zNewline
  31. zRrtpe
  32. zOwner
  33. zClass
  34. zDirOrigin // $ORIGIN
  35. zDirTtl // $TTL
  36. zDirInclude // $INCLUDE
  37. zDirGenerate // $GENERATE
  38. // Privatekey file
  39. zValue
  40. zKey
  41. zExpectOwnerDir // Ownername
  42. zExpectOwnerBl // Whitespace after the ownername
  43. zExpectAny // Expect rrtype, ttl or class
  44. zExpectAnyNoClass // Expect rrtype or ttl
  45. zExpectAnyNoClassBl // The whitespace after _EXPECT_ANY_NOCLASS
  46. zExpectAnyNoTtl // Expect rrtype or class
  47. zExpectAnyNoTtlBl // Whitespace after _EXPECT_ANY_NOTTL
  48. zExpectRrtype // Expect rrtype
  49. zExpectRrtypeBl // Whitespace BEFORE rrtype
  50. zExpectRdata // The first element of the rdata
  51. zExpectDirTtlBl // Space after directive $TTL
  52. zExpectDirTtl // Directive $TTL
  53. zExpectDirOriginBl // Space after directive $ORIGIN
  54. zExpectDirOrigin // Directive $ORIGIN
  55. zExpectDirIncludeBl // Space after directive $INCLUDE
  56. zExpectDirInclude // Directive $INCLUDE
  57. zExpectDirGenerate // Directive $GENERATE
  58. zExpectDirGenerateBl // Space after directive $GENERATE
  59. )
  60. // ParseError is a parsing error. It contains the parse error and the location in the io.Reader
  61. // where the error occurred.
  62. type ParseError struct {
  63. file string
  64. err string
  65. lex lex
  66. }
  67. func (e *ParseError) Error() (s string) {
  68. if e.file != "" {
  69. s = e.file + ": "
  70. }
  71. s += "dns: " + e.err + ": " + strconv.QuoteToASCII(e.lex.token) + " at line: " +
  72. strconv.Itoa(e.lex.line) + ":" + strconv.Itoa(e.lex.column)
  73. return
  74. }
  75. type lex struct {
  76. token string // text of the token
  77. tokenUpper string // uppercase text of the token
  78. length int // length of the token
  79. err bool // when true, token text has lexer error
  80. value uint8 // value: zString, _BLANK, etc.
  81. line int // line in the file
  82. column int // column in the file
  83. torc uint16 // type or class as parsed in the lexer, we only need to look this up in the grammar
  84. comment string // any comment text seen
  85. }
  86. // Token holds the token that are returned when a zone file is parsed.
  87. type Token struct {
  88. // The scanned resource record when error is not nil.
  89. RR
  90. // When an error occurred, this has the error specifics.
  91. Error *ParseError
  92. // A potential comment positioned after the RR and on the same line.
  93. Comment string
  94. }
  95. // NewRR reads the RR contained in the string s. Only the first RR is
  96. // returned. If s contains no RR, return nil with no error. The class
  97. // defaults to IN and TTL defaults to 3600. The full zone file syntax
  98. // like $TTL, $ORIGIN, etc. is supported. All fields of the returned
  99. // RR are set, except RR.Header().Rdlength which is set to 0.
  100. func NewRR(s string) (RR, error) {
  101. if len(s) > 0 && s[len(s)-1] != '\n' { // We need a closing newline
  102. return ReadRR(strings.NewReader(s+"\n"), "")
  103. }
  104. return ReadRR(strings.NewReader(s), "")
  105. }
  106. // ReadRR reads the RR contained in q.
  107. // See NewRR for more documentation.
  108. func ReadRR(q io.Reader, filename string) (RR, error) {
  109. r := <-parseZoneHelper(q, ".", filename, 1)
  110. if r == nil {
  111. return nil, nil
  112. }
  113. if r.Error != nil {
  114. return nil, r.Error
  115. }
  116. return r.RR, nil
  117. }
  118. // ParseZone reads a RFC 1035 style zonefile from r. It returns *Tokens on the
  119. // returned channel, which consist out the parsed RR, a potential comment or an error.
  120. // If there is an error the RR is nil. The string file is only used
  121. // in error reporting. The string origin is used as the initial origin, as
  122. // if the file would start with: $ORIGIN origin .
  123. // The directives $INCLUDE, $ORIGIN, $TTL and $GENERATE are supported.
  124. // The channel t is closed by ParseZone when the end of r is reached.
  125. //
  126. // Basic usage pattern when reading from a string (z) containing the
  127. // zone data:
  128. //
  129. // for x := range dns.ParseZone(strings.NewReader(z), "", "") {
  130. // if x.Error != nil {
  131. // // log.Println(x.Error)
  132. // } else {
  133. // // Do something with x.RR
  134. // }
  135. // }
  136. //
  137. // Comments specified after an RR (and on the same line!) are returned too:
  138. //
  139. // foo. IN A 10.0.0.1 ; this is a comment
  140. //
  141. // The text "; this is comment" is returned in Token.Comment. Comments inside the
  142. // RR are discarded. Comments on a line by themselves are discarded too.
  143. func ParseZone(r io.Reader, origin, file string) chan *Token {
  144. return parseZoneHelper(r, origin, file, 10000)
  145. }
  146. func parseZoneHelper(r io.Reader, origin, file string, chansize int) chan *Token {
  147. t := make(chan *Token, chansize)
  148. go parseZone(r, origin, file, t, 0)
  149. return t
  150. }
  151. func parseZone(r io.Reader, origin, f string, t chan *Token, include int) {
  152. defer func() {
  153. if include == 0 {
  154. close(t)
  155. }
  156. }()
  157. s := scanInit(r)
  158. c := make(chan lex)
  159. // Start the lexer
  160. go zlexer(s, c)
  161. // 6 possible beginnings of a line, _ is a space
  162. // 0. zRRTYPE -> all omitted until the rrtype
  163. // 1. zOwner _ zRrtype -> class/ttl omitted
  164. // 2. zOwner _ zString _ zRrtype -> class omitted
  165. // 3. zOwner _ zString _ zClass _ zRrtype -> ttl/class
  166. // 4. zOwner _ zClass _ zRrtype -> ttl omitted
  167. // 5. zOwner _ zClass _ zString _ zRrtype -> class/ttl (reversed)
  168. // After detecting these, we know the zRrtype so we can jump to functions
  169. // handling the rdata for each of these types.
  170. if origin == "" {
  171. origin = "."
  172. }
  173. origin = Fqdn(origin)
  174. if _, ok := IsDomainName(origin); !ok {
  175. t <- &Token{Error: &ParseError{f, "bad initial origin name", lex{}}}
  176. return
  177. }
  178. st := zExpectOwnerDir // initial state
  179. var h RR_Header
  180. var defttl uint32 = defaultTtl
  181. var prevName string
  182. for l := range c {
  183. // Lexer spotted an error already
  184. if l.err == true {
  185. t <- &Token{Error: &ParseError{f, l.token, l}}
  186. return
  187. }
  188. switch st {
  189. case zExpectOwnerDir:
  190. // We can also expect a directive, like $TTL or $ORIGIN
  191. h.Ttl = defttl
  192. h.Class = ClassINET
  193. switch l.value {
  194. case zNewline:
  195. st = zExpectOwnerDir
  196. case zOwner:
  197. h.Name = l.token
  198. if l.token[0] == '@' {
  199. h.Name = origin
  200. prevName = h.Name
  201. st = zExpectOwnerBl
  202. break
  203. }
  204. if h.Name[l.length-1] != '.' {
  205. h.Name = appendOrigin(h.Name, origin)
  206. }
  207. _, ok := IsDomainName(l.token)
  208. if !ok {
  209. t <- &Token{Error: &ParseError{f, "bad owner name", l}}
  210. return
  211. }
  212. prevName = h.Name
  213. st = zExpectOwnerBl
  214. case zDirTtl:
  215. st = zExpectDirTtlBl
  216. case zDirOrigin:
  217. st = zExpectDirOriginBl
  218. case zDirInclude:
  219. st = zExpectDirIncludeBl
  220. case zDirGenerate:
  221. st = zExpectDirGenerateBl
  222. case zRrtpe:
  223. h.Name = prevName
  224. h.Rrtype = l.torc
  225. st = zExpectRdata
  226. case zClass:
  227. h.Name = prevName
  228. h.Class = l.torc
  229. st = zExpectAnyNoClassBl
  230. case zBlank:
  231. // Discard, can happen when there is nothing on the
  232. // line except the RR type
  233. case zString:
  234. ttl, ok := stringToTtl(l.token)
  235. if !ok {
  236. t <- &Token{Error: &ParseError{f, "not a TTL", l}}
  237. return
  238. }
  239. h.Ttl = ttl
  240. // Don't about the defttl, we should take the $TTL value
  241. // defttl = ttl
  242. st = zExpectAnyNoTtlBl
  243. default:
  244. t <- &Token{Error: &ParseError{f, "syntax error at beginning", l}}
  245. return
  246. }
  247. case zExpectDirIncludeBl:
  248. if l.value != zBlank {
  249. t <- &Token{Error: &ParseError{f, "no blank after $INCLUDE-directive", l}}
  250. return
  251. }
  252. st = zExpectDirInclude
  253. case zExpectDirInclude:
  254. if l.value != zString {
  255. t <- &Token{Error: &ParseError{f, "expecting $INCLUDE value, not this...", l}}
  256. return
  257. }
  258. neworigin := origin // There may be optionally a new origin set after the filename, if not use current one
  259. switch l := <-c; l.value {
  260. case zBlank:
  261. l := <-c
  262. if l.value == zString {
  263. if _, ok := IsDomainName(l.token); !ok || l.length == 0 || l.err {
  264. t <- &Token{Error: &ParseError{f, "bad origin name", l}}
  265. return
  266. }
  267. // a new origin is specified.
  268. if l.token[l.length-1] != '.' {
  269. if origin != "." { // Prevent .. endings
  270. neworigin = l.token + "." + origin
  271. } else {
  272. neworigin = l.token + origin
  273. }
  274. } else {
  275. neworigin = l.token
  276. }
  277. }
  278. case zNewline, zEOF:
  279. // Ok
  280. default:
  281. t <- &Token{Error: &ParseError{f, "garbage after $INCLUDE", l}}
  282. return
  283. }
  284. // Start with the new file
  285. r1, e1 := os.Open(l.token)
  286. if e1 != nil {
  287. t <- &Token{Error: &ParseError{f, "failed to open `" + l.token + "'", l}}
  288. return
  289. }
  290. if include+1 > 7 {
  291. t <- &Token{Error: &ParseError{f, "too deeply nested $INCLUDE", l}}
  292. return
  293. }
  294. parseZone(r1, l.token, neworigin, t, include+1)
  295. st = zExpectOwnerDir
  296. case zExpectDirTtlBl:
  297. if l.value != zBlank {
  298. t <- &Token{Error: &ParseError{f, "no blank after $TTL-directive", l}}
  299. return
  300. }
  301. st = zExpectDirTtl
  302. case zExpectDirTtl:
  303. if l.value != zString {
  304. t <- &Token{Error: &ParseError{f, "expecting $TTL value, not this...", l}}
  305. return
  306. }
  307. if e, _ := slurpRemainder(c, f); e != nil {
  308. t <- &Token{Error: e}
  309. return
  310. }
  311. ttl, ok := stringToTtl(l.token)
  312. if !ok {
  313. t <- &Token{Error: &ParseError{f, "expecting $TTL value, not this...", l}}
  314. return
  315. }
  316. defttl = ttl
  317. st = zExpectOwnerDir
  318. case zExpectDirOriginBl:
  319. if l.value != zBlank {
  320. t <- &Token{Error: &ParseError{f, "no blank after $ORIGIN-directive", l}}
  321. return
  322. }
  323. st = zExpectDirOrigin
  324. case zExpectDirOrigin:
  325. if l.value != zString {
  326. t <- &Token{Error: &ParseError{f, "expecting $ORIGIN value, not this...", l}}
  327. return
  328. }
  329. if e, _ := slurpRemainder(c, f); e != nil {
  330. t <- &Token{Error: e}
  331. }
  332. if _, ok := IsDomainName(l.token); !ok {
  333. t <- &Token{Error: &ParseError{f, "bad origin name", l}}
  334. return
  335. }
  336. if l.token[l.length-1] != '.' {
  337. if origin != "." { // Prevent .. endings
  338. origin = l.token + "." + origin
  339. } else {
  340. origin = l.token + origin
  341. }
  342. } else {
  343. origin = l.token
  344. }
  345. st = zExpectOwnerDir
  346. case zExpectDirGenerateBl:
  347. if l.value != zBlank {
  348. t <- &Token{Error: &ParseError{f, "no blank after $GENERATE-directive", l}}
  349. return
  350. }
  351. st = zExpectDirGenerate
  352. case zExpectDirGenerate:
  353. if l.value != zString {
  354. t <- &Token{Error: &ParseError{f, "expecting $GENERATE value, not this...", l}}
  355. return
  356. }
  357. if errMsg := generate(l, c, t, origin); errMsg != "" {
  358. t <- &Token{Error: &ParseError{f, errMsg, l}}
  359. return
  360. }
  361. st = zExpectOwnerDir
  362. case zExpectOwnerBl:
  363. if l.value != zBlank {
  364. t <- &Token{Error: &ParseError{f, "no blank after owner", l}}
  365. return
  366. }
  367. st = zExpectAny
  368. case zExpectAny:
  369. switch l.value {
  370. case zRrtpe:
  371. h.Rrtype = l.torc
  372. st = zExpectRdata
  373. case zClass:
  374. h.Class = l.torc
  375. st = zExpectAnyNoClassBl
  376. case zString:
  377. ttl, ok := stringToTtl(l.token)
  378. if !ok {
  379. t <- &Token{Error: &ParseError{f, "not a TTL", l}}
  380. return
  381. }
  382. h.Ttl = ttl
  383. // defttl = ttl // don't set the defttl here
  384. st = zExpectAnyNoTtlBl
  385. default:
  386. t <- &Token{Error: &ParseError{f, "expecting RR type, TTL or class, not this...", l}}
  387. return
  388. }
  389. case zExpectAnyNoClassBl:
  390. if l.value != zBlank {
  391. t <- &Token{Error: &ParseError{f, "no blank before class", l}}
  392. return
  393. }
  394. st = zExpectAnyNoClass
  395. case zExpectAnyNoTtlBl:
  396. if l.value != zBlank {
  397. t <- &Token{Error: &ParseError{f, "no blank before TTL", l}}
  398. return
  399. }
  400. st = zExpectAnyNoTtl
  401. case zExpectAnyNoTtl:
  402. switch l.value {
  403. case zClass:
  404. h.Class = l.torc
  405. st = zExpectRrtypeBl
  406. case zRrtpe:
  407. h.Rrtype = l.torc
  408. st = zExpectRdata
  409. default:
  410. t <- &Token{Error: &ParseError{f, "expecting RR type or class, not this...", l}}
  411. return
  412. }
  413. case zExpectAnyNoClass:
  414. switch l.value {
  415. case zString:
  416. ttl, ok := stringToTtl(l.token)
  417. if !ok {
  418. t <- &Token{Error: &ParseError{f, "not a TTL", l}}
  419. return
  420. }
  421. h.Ttl = ttl
  422. // defttl = ttl // don't set the def ttl anymore
  423. st = zExpectRrtypeBl
  424. case zRrtpe:
  425. h.Rrtype = l.torc
  426. st = zExpectRdata
  427. default:
  428. t <- &Token{Error: &ParseError{f, "expecting RR type or TTL, not this...", l}}
  429. return
  430. }
  431. case zExpectRrtypeBl:
  432. if l.value != zBlank {
  433. t <- &Token{Error: &ParseError{f, "no blank before RR type", l}}
  434. return
  435. }
  436. st = zExpectRrtype
  437. case zExpectRrtype:
  438. if l.value != zRrtpe {
  439. t <- &Token{Error: &ParseError{f, "unknown RR type", l}}
  440. return
  441. }
  442. h.Rrtype = l.torc
  443. st = zExpectRdata
  444. case zExpectRdata:
  445. r, e, c1 := setRR(h, c, origin, f)
  446. if e != nil {
  447. // If e.lex is nil than we have encounter a unknown RR type
  448. // in that case we substitute our current lex token
  449. if e.lex.token == "" && e.lex.value == 0 {
  450. e.lex = l // Uh, dirty
  451. }
  452. t <- &Token{Error: e}
  453. return
  454. }
  455. t <- &Token{RR: r, Comment: c1}
  456. st = zExpectOwnerDir
  457. }
  458. }
  459. // If we get here, we and the h.Rrtype is still zero, we haven't parsed anything, this
  460. // is not an error, because an empty zone file is still a zone file.
  461. }
  462. // zlexer scans the sourcefile and returns tokens on the channel c.
  463. func zlexer(s *scan, c chan lex) {
  464. var l lex
  465. str := make([]byte, maxTok) // Should be enough for any token
  466. stri := 0 // Offset in str (0 means empty)
  467. com := make([]byte, maxTok) // Hold comment text
  468. comi := 0
  469. quote := false
  470. escape := false
  471. space := false
  472. commt := false
  473. rrtype := false
  474. owner := true
  475. brace := 0
  476. x, err := s.tokenText()
  477. defer close(c)
  478. for err == nil {
  479. l.column = s.position.Column
  480. l.line = s.position.Line
  481. if stri >= maxTok {
  482. l.token = "token length insufficient for parsing"
  483. l.err = true
  484. debug.Printf("[%+v]", l.token)
  485. c <- l
  486. return
  487. }
  488. if comi >= maxTok {
  489. l.token = "comment length insufficient for parsing"
  490. l.err = true
  491. debug.Printf("[%+v]", l.token)
  492. c <- l
  493. return
  494. }
  495. switch x {
  496. case ' ', '\t':
  497. if escape {
  498. escape = false
  499. str[stri] = x
  500. stri++
  501. break
  502. }
  503. if quote {
  504. // Inside quotes this is legal
  505. str[stri] = x
  506. stri++
  507. break
  508. }
  509. if commt {
  510. com[comi] = x
  511. comi++
  512. break
  513. }
  514. if stri == 0 {
  515. // Space directly in the beginning, handled in the grammar
  516. } else if owner {
  517. // If we have a string and its the first, make it an owner
  518. l.value = zOwner
  519. l.token = string(str[:stri])
  520. l.tokenUpper = strings.ToUpper(l.token)
  521. l.length = stri
  522. // escape $... start with a \ not a $, so this will work
  523. switch l.tokenUpper {
  524. case "$TTL":
  525. l.value = zDirTtl
  526. case "$ORIGIN":
  527. l.value = zDirOrigin
  528. case "$INCLUDE":
  529. l.value = zDirInclude
  530. case "$GENERATE":
  531. l.value = zDirGenerate
  532. }
  533. debug.Printf("[7 %+v]", l.token)
  534. c <- l
  535. } else {
  536. l.value = zString
  537. l.token = string(str[:stri])
  538. l.tokenUpper = strings.ToUpper(l.token)
  539. l.length = stri
  540. if !rrtype {
  541. if t, ok := StringToType[l.tokenUpper]; ok {
  542. l.value = zRrtpe
  543. l.torc = t
  544. rrtype = true
  545. } else {
  546. if strings.HasPrefix(l.tokenUpper, "TYPE") {
  547. t, ok := typeToInt(l.token)
  548. if !ok {
  549. l.token = "unknown RR type"
  550. l.err = true
  551. c <- l
  552. return
  553. }
  554. l.value = zRrtpe
  555. l.torc = t
  556. }
  557. }
  558. if t, ok := StringToClass[l.tokenUpper]; ok {
  559. l.value = zClass
  560. l.torc = t
  561. } else {
  562. if strings.HasPrefix(l.tokenUpper, "CLASS") {
  563. t, ok := classToInt(l.token)
  564. if !ok {
  565. l.token = "unknown class"
  566. l.err = true
  567. c <- l
  568. return
  569. }
  570. l.value = zClass
  571. l.torc = t
  572. }
  573. }
  574. }
  575. debug.Printf("[6 %+v]", l.token)
  576. c <- l
  577. }
  578. stri = 0
  579. // I reverse space stuff here
  580. if !space && !commt {
  581. l.value = zBlank
  582. l.token = " "
  583. l.length = 1
  584. debug.Printf("[5 %+v]", l.token)
  585. c <- l
  586. }
  587. owner = false
  588. space = true
  589. case ';':
  590. if escape {
  591. escape = false
  592. str[stri] = x
  593. stri++
  594. break
  595. }
  596. if quote {
  597. // Inside quotes this is legal
  598. str[stri] = x
  599. stri++
  600. break
  601. }
  602. if stri > 0 {
  603. l.value = zString
  604. l.token = string(str[:stri])
  605. l.tokenUpper = strings.ToUpper(l.token)
  606. l.length = stri
  607. debug.Printf("[4 %+v]", l.token)
  608. c <- l
  609. stri = 0
  610. }
  611. commt = true
  612. com[comi] = ';'
  613. comi++
  614. case '\r':
  615. escape = false
  616. if quote {
  617. str[stri] = x
  618. stri++
  619. break
  620. }
  621. // discard if outside of quotes
  622. case '\n':
  623. escape = false
  624. // Escaped newline
  625. if quote {
  626. str[stri] = x
  627. stri++
  628. break
  629. }
  630. // inside quotes this is legal
  631. if commt {
  632. // Reset a comment
  633. commt = false
  634. rrtype = false
  635. stri = 0
  636. // If not in a brace this ends the comment AND the RR
  637. if brace == 0 {
  638. owner = true
  639. owner = true
  640. l.value = zNewline
  641. l.token = "\n"
  642. l.tokenUpper = l.token
  643. l.length = 1
  644. l.comment = string(com[:comi])
  645. debug.Printf("[3 %+v %+v]", l.token, l.comment)
  646. c <- l
  647. l.comment = ""
  648. comi = 0
  649. break
  650. }
  651. com[comi] = ' ' // convert newline to space
  652. comi++
  653. break
  654. }
  655. if brace == 0 {
  656. // If there is previous text, we should output it here
  657. if stri != 0 {
  658. l.value = zString
  659. l.token = string(str[:stri])
  660. l.tokenUpper = strings.ToUpper(l.token)
  661. l.length = stri
  662. if !rrtype {
  663. if t, ok := StringToType[l.tokenUpper]; ok {
  664. l.value = zRrtpe
  665. l.torc = t
  666. rrtype = true
  667. }
  668. }
  669. debug.Printf("[2 %+v]", l.token)
  670. c <- l
  671. }
  672. l.value = zNewline
  673. l.token = "\n"
  674. l.tokenUpper = l.token
  675. l.length = 1
  676. debug.Printf("[1 %+v]", l.token)
  677. c <- l
  678. stri = 0
  679. commt = false
  680. rrtype = false
  681. owner = true
  682. comi = 0
  683. }
  684. case '\\':
  685. // comments do not get escaped chars, everything is copied
  686. if commt {
  687. com[comi] = x
  688. comi++
  689. break
  690. }
  691. // something already escaped must be in string
  692. if escape {
  693. str[stri] = x
  694. stri++
  695. escape = false
  696. break
  697. }
  698. // something escaped outside of string gets added to string
  699. str[stri] = x
  700. stri++
  701. escape = true
  702. case '"':
  703. if commt {
  704. com[comi] = x
  705. comi++
  706. break
  707. }
  708. if escape {
  709. str[stri] = x
  710. stri++
  711. escape = false
  712. break
  713. }
  714. space = false
  715. // send previous gathered text and the quote
  716. if stri != 0 {
  717. l.value = zString
  718. l.token = string(str[:stri])
  719. l.tokenUpper = strings.ToUpper(l.token)
  720. l.length = stri
  721. debug.Printf("[%+v]", l.token)
  722. c <- l
  723. stri = 0
  724. }
  725. // send quote itself as separate token
  726. l.value = zQuote
  727. l.token = "\""
  728. l.tokenUpper = l.token
  729. l.length = 1
  730. c <- l
  731. quote = !quote
  732. case '(', ')':
  733. if commt {
  734. com[comi] = x
  735. comi++
  736. break
  737. }
  738. if escape {
  739. str[stri] = x
  740. stri++
  741. escape = false
  742. break
  743. }
  744. if quote {
  745. str[stri] = x
  746. stri++
  747. break
  748. }
  749. switch x {
  750. case ')':
  751. brace--
  752. if brace < 0 {
  753. l.token = "extra closing brace"
  754. l.tokenUpper = l.token
  755. l.err = true
  756. debug.Printf("[%+v]", l.token)
  757. c <- l
  758. return
  759. }
  760. case '(':
  761. brace++
  762. }
  763. default:
  764. escape = false
  765. if commt {
  766. com[comi] = x
  767. comi++
  768. break
  769. }
  770. str[stri] = x
  771. stri++
  772. space = false
  773. }
  774. x, err = s.tokenText()
  775. }
  776. if stri > 0 {
  777. // Send remainder
  778. l.token = string(str[:stri])
  779. l.tokenUpper = strings.ToUpper(l.token)
  780. l.length = stri
  781. l.value = zString
  782. debug.Printf("[%+v]", l.token)
  783. c <- l
  784. }
  785. if brace != 0 {
  786. l.token = "unbalanced brace"
  787. l.tokenUpper = l.token
  788. l.err = true
  789. c <- l
  790. }
  791. }
  792. // Extract the class number from CLASSxx
  793. func classToInt(token string) (uint16, bool) {
  794. offset := 5
  795. if len(token) < offset+1 {
  796. return 0, false
  797. }
  798. class, err := strconv.ParseUint(token[offset:], 10, 16)
  799. if err != nil {
  800. return 0, false
  801. }
  802. return uint16(class), true
  803. }
  804. // Extract the rr number from TYPExxx
  805. func typeToInt(token string) (uint16, bool) {
  806. offset := 4
  807. if len(token) < offset+1 {
  808. return 0, false
  809. }
  810. typ, err := strconv.ParseUint(token[offset:], 10, 16)
  811. if err != nil {
  812. return 0, false
  813. }
  814. return uint16(typ), true
  815. }
  816. // Parse things like 2w, 2m, etc, Return the time in seconds.
  817. func stringToTtl(token string) (uint32, bool) {
  818. s := uint32(0)
  819. i := uint32(0)
  820. for _, c := range token {
  821. switch c {
  822. case 's', 'S':
  823. s += i
  824. i = 0
  825. case 'm', 'M':
  826. s += i * 60
  827. i = 0
  828. case 'h', 'H':
  829. s += i * 60 * 60
  830. i = 0
  831. case 'd', 'D':
  832. s += i * 60 * 60 * 24
  833. i = 0
  834. case 'w', 'W':
  835. s += i * 60 * 60 * 24 * 7
  836. i = 0
  837. case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
  838. i *= 10
  839. i += uint32(c) - '0'
  840. default:
  841. return 0, false
  842. }
  843. }
  844. return s + i, true
  845. }
  846. // Parse LOC records' <digits>[.<digits>][mM] into a
  847. // mantissa exponent format. Token should contain the entire
  848. // string (i.e. no spaces allowed)
  849. func stringToCm(token string) (e, m uint8, ok bool) {
  850. if token[len(token)-1] == 'M' || token[len(token)-1] == 'm' {
  851. token = token[0 : len(token)-1]
  852. }
  853. s := strings.SplitN(token, ".", 2)
  854. var meters, cmeters, val int
  855. var err error
  856. switch len(s) {
  857. case 2:
  858. if cmeters, err = strconv.Atoi(s[1]); err != nil {
  859. return
  860. }
  861. fallthrough
  862. case 1:
  863. if meters, err = strconv.Atoi(s[0]); err != nil {
  864. return
  865. }
  866. case 0:
  867. // huh?
  868. return 0, 0, false
  869. }
  870. ok = true
  871. if meters > 0 {
  872. e = 2
  873. val = meters
  874. } else {
  875. e = 0
  876. val = cmeters
  877. }
  878. for val > 10 {
  879. e++
  880. val /= 10
  881. }
  882. if e > 9 {
  883. ok = false
  884. }
  885. m = uint8(val)
  886. return
  887. }
  888. func appendOrigin(name, origin string) string {
  889. if origin == "." {
  890. return name + origin
  891. }
  892. return name + "." + origin
  893. }
  894. // LOC record helper function
  895. func locCheckNorth(token string, latitude uint32) (uint32, bool) {
  896. switch token {
  897. case "n", "N":
  898. return LOC_EQUATOR + latitude, true
  899. case "s", "S":
  900. return LOC_EQUATOR - latitude, true
  901. }
  902. return latitude, false
  903. }
  904. // LOC record helper function
  905. func locCheckEast(token string, longitude uint32) (uint32, bool) {
  906. switch token {
  907. case "e", "E":
  908. return LOC_EQUATOR + longitude, true
  909. case "w", "W":
  910. return LOC_EQUATOR - longitude, true
  911. }
  912. return longitude, false
  913. }
  914. // "Eat" the rest of the "line". Return potential comments
  915. func slurpRemainder(c chan lex, f string) (*ParseError, string) {
  916. l := <-c
  917. com := ""
  918. switch l.value {
  919. case zBlank:
  920. l = <-c
  921. com = l.comment
  922. if l.value != zNewline && l.value != zEOF {
  923. return &ParseError{f, "garbage after rdata", l}, ""
  924. }
  925. case zNewline:
  926. com = l.comment
  927. case zEOF:
  928. default:
  929. return &ParseError{f, "garbage after rdata", l}, ""
  930. }
  931. return nil, com
  932. }
  933. // Parse a 64 bit-like ipv6 address: "0014:4fff:ff20:ee64"
  934. // Used for NID and L64 record.
  935. func stringToNodeID(l lex) (uint64, *ParseError) {
  936. if len(l.token) < 19 {
  937. return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l}
  938. }
  939. // There must be three colons at fixes postitions, if not its a parse error
  940. if l.token[4] != ':' && l.token[9] != ':' && l.token[14] != ':' {
  941. return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l}
  942. }
  943. s := l.token[0:4] + l.token[5:9] + l.token[10:14] + l.token[15:19]
  944. u, err := strconv.ParseUint(s, 16, 64)
  945. if err != nil {
  946. return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l}
  947. }
  948. return u, nil
  949. }