diff options
| author | Felix Hanley <felix@monk.local> | 2017-07-12 13:41:54 +0000 |
|---|---|---|
| committer | Felix Hanley <felix@monk.local> | 2017-07-12 13:41:54 +0000 |
| commit | d257530cb0278c71b6edbbd0d5a60531f4536d09 (patch) | |
| tree | 2b55fe5ac959a0d19340498b030c08216bce13dc | |
| download | nomoreads-master.tar.gz nomoreads-master.tar.bz2 | |
147 files changed, 36655 insertions, 0 deletions
diff --git a/Gopkg.lock b/Gopkg.lock new file mode 100644 index 0000000..84fdc72 --- /dev/null +++ b/Gopkg.lock @@ -0,0 +1,33 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + branch = "master" + name = "github.com/BurntSushi/toml" + packages = ["."] + revision = "b26d9c308763d68093482582cea63d69be07a0f0" + +[[projects]] + branch = "master" + name = "github.com/hashicorp/golang-lru" + packages = [".","simplelru"] + revision = "0a025b7e63adc15a622f29b0b2c4c3848243bbf6" + +[[projects]] + name = "github.com/imdario/mergo" + packages = ["."] + revision = "3e95a51e0639b4cf372f2ccf74c86749d747fbdc" + version = "0.2.2" + +[[projects]] + branch = "master" + name = "github.com/miekg/dns" + packages = ["."] + revision = "e46719b2fef404d2e531c0dd9055b1c95ff01e2e" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "e6b61cbe7f7e6623c17df76feea460869b2440e85d3b6a6b92a36daa95d6af80" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml new file mode 100644 index 0000000..b4e12bf --- /dev/null +++ b/Gopkg.toml @@ -0,0 +1,83 @@ + +## Gopkg.toml example (these lines may be deleted) + +## "metadata" defines metadata about the project that could be used by other independent +## systems. The metadata defined here will be ignored by dep. +# [metadata] +# key1 = "value that convey data to other systems" +# system1-data = "value that is used by a system" +# system2-data = "value that is used by another system" + +## "required" lists a set of packages (not projects) that must be included in +## Gopkg.lock. This list is merged with the set of packages imported by the current +## project. Use it when your project needs a package it doesn't explicitly import - +## including "main" packages. +# required = ["github.com/user/thing/cmd/thing"] + +## "ignored" lists a set of packages (not projects) that are ignored when +## dep statically analyzes source code. Ignored packages can be in this project, +## or in a dependency. +# ignored = ["github.com/user/project/badpkg"] + +## Constraints are rules for how directly imported projects +## may be incorporated into the depgraph. They are respected by +## dep whether coming from the Gopkg.toml of the current project or a dependency. +# [[constraint]] +## Required: the root import path of the project being constrained. +# name = "github.com/user/project" +# +## Recommended: the version constraint to enforce for the project. +## Only one of "branch", "version" or "revision" can be specified. +# version = "1.0.0" +# branch = "master" +# revision = "abc123" +# +## Optional: an alternate location (URL or import path) for the project's source. +# source = "https://github.com/myfork/package.git" +# +## "metadata" defines metadata about the dependency or override that could be used +## by other independent systems. The metadata defined here will be ignored by dep. +# [metadata] +# key1 = "value that convey data to other systems" +# system1-data = "value that is used by a system" +# system2-data = "value that is used by another system" + +## Overrides have the same structure as [[constraint]], but supersede all +## [[constraint]] declarations from all projects. Only [[override]] from +## the current project's are applied. +## +## Overrides are a sledgehammer. Use them only as a last resort. +# [[override]] +## Required: the root import path of the project being constrained. +# name = "github.com/user/project" +# +## Optional: specifying a version constraint override will cause all other +## constraints on this project to be ignored; only the overridden constraint +## need be satisfied. +## Again, only one of "branch", "version" or "revision" can be specified. +# version = "1.0.0" +# branch = "master" +# revision = "abc123" +# +## Optional: specifying an alternate source location as an override will +## enforce that the alternate location is used for that project, regardless of +## what source location any dependent projects specify. +# source = "https://github.com/myfork/package.git" + + + +[[constraint]] + branch = "master" + name = "github.com/BurntSushi/toml" + +[[constraint]] + name = "github.com/imdario/mergo" + version = "0.2.2" + +[[constraint]] + branch = "master" + name = "github.com/hashicorp/golang-lru" + +[[constraint]] + branch = "master" + name = "github.com/miekg/dns" diff --git a/config.go b/config.go new file mode 100644 index 0000000..78bdc31 --- /dev/null +++ b/config.go @@ -0,0 +1,90 @@ +package main + +import ( + "flag" + "fmt" + "github.com/BurntSushi/toml" + "github.com/imdario/mergo" + "os" + "path/filepath" + "sync" +) + +type config struct { + Address *string + Port *int + Debug *bool + Quiet *bool + CfgFile *string + Servers *[]string + NoTcp *bool `toml:"no-tcp"` + NoHttp *bool `toml:"no-http"` + HttpAddress *string `toml:"http-address"` + HttpPort *int `toml:"http-port"` + UdpTimeout *int `toml:"udp-timeout"` + TcpTimeout *int `toml:"tcp-timeout"` + sync.RWMutex +} + +// Global +var cfg config + +// Defaults, overwritten by config and then flags +// TODO pick better nameservers +var defaultCfg = config{ + Address: ptrStr("127.0.0.1"), + Port: ptrInt(6881), + Debug: ptrBool(false), + CfgFile: ptrStr("/etc/nomoreads/config"), + Servers: &[]string{"8.8.8.8", "8.8.4.4"}, + Quiet: ptrBool(false), + NoTcp: ptrBool(false), + NoHttp: ptrBool(false), + HttpAddress: ptrStr("localhost"), + HttpPort: ptrInt(8080), + UdpTimeout: ptrInt(10), + TcpTimeout: ptrInt(10), +} + +func loadConfigFlags() config { + newCfg := config{} + flag.StringVar(&newCfg.Address, "address", defaultCfg.Address, "listen address") + flag.IntVar(&newCfg.Port, "port", defaultCfg.BasePort, "listen port") + flag.BoolVar(&newCfg.Debug, "debug", defaultCfg.Debug, "provide debug output") + flag.BoolVar(&newCfg.Quiet, "quiet", defaultCfg.Quiet, "log only errors") + flag.BoolVar(&newCfg.NoTcp, "no-tcp", defaultCfg.NoTcp, "no TCP listener") + flag.BoolVar(&newCfg.NoHttp, "no-http", defaultCfg.NoHttp, "no HTTP service") + flag.StringVar(&newCfg.HttpAddress, "http-address", defaultCfg.HttpAddress, "HTTP listen address") + flag.IntVar(&newCfg.HttpPort, "http-port", defaultCfg.HttpPort, "HTTP listen port") + flag.IntVar(&newCfg.UdpTimeout, "udp-timeout", defaultCfg.UdpTimeout, "UDP timeout in seconds") + flag.IntVar(&newCfg.TcpTimeout, "tcp-timeout", defaultCfg.TcpTimeout, "TCP timeout in seconds") + + flag.Parse() + + if cfg.Debug { + fmt.Printf("Configuration flags read: %v\n", newCfg) + } + mergo.MergeWitOverwrite(&cfg, newCfg) +} + +func loadConfigFile() config { + newCfg := config{} + // Read current value of configuration file + cfgPath, _ := filepath.Abs(cfg.CfgFile) + if _, err := os.Stat(cfgPath); !os.IsNotExist(err) { + // fmt.Printf("Using configuration from %s\n", cfgPath) + md, err := toml.DecodeFile(cfgPath, &newCfg) + if err != nil { + fmt.Printf("Failed to read configuration: %q\n", err) + os.Exit(1) + } + if len(md.Undecoded()) > 0 { + fmt.Printf("Extraneous configuration keys: %q\n", md.Undecoded()) + } + } + + if cfg.Debug { + fmt.Printf("Configuration file read: %v\n", newCfg) + } + mergo.MergeWitOverwrite(&cfg, newCfg) +} diff --git a/handler.go b/handler.go new file mode 100644 index 0000000..0cdddfa --- /dev/null +++ b/handler.go @@ -0,0 +1,21 @@ +package main + +import ( + //"fmt" + //"github.com/miekg/dns" + //"net" + //"time" + "github.com/hashicorp/golang-lru" +) + +type handler struct { + resolver *Resolver + cache *lru.Cache +} + +// Create a handler for miekg/dns +func newHandler(net string) *handler { + resolver := &Resolver{} + cache := NewARC(MaxCaches) + return &handler{resolver, cache} +} @@ -0,0 +1,78 @@ +package main + +import ( + "expvar" + "fmt" + "syscall" + "time" + //"github.com/pkg/profile" + "net" + "net/http" + "os" + "os/signal" +) + +// Exported vars +var ( + queriesTotal = expvar.NewInt("queriesTotal") + start = time.Now() +) + +func uptime() interface{} { + return int64(time.Since(start).Seconds()) +} + +func main() { + //defer profile.Start(profile.CPUProfile).Stop() + expvar.Publish("uptime", expvar.Func(uptime)) + + loadConfig() + + server := &Server{ + host: host, + port: port, + } + server.Run() + + // HTTP Server + if !Config.NoHttp { + http.HandleFunc("/", indexHandler) + httpPort, err := strconv.Itoa(Config.HttpPort) + if err != nil { + fmt.Println("Error parsing HTTP port, using default 8080") + httpPort = "8080" + } + sock, _ := net.Listen("tcp", Config.HttpAddrress+":"+httpPort) + go func() { + if !Config.Quiet { + fmt.Printf("HTTP now available at %s:%s\n", Config.HttpAddress, httpPort) + } + http.Serve(sock, nil) + }() + } + + // Notify on all signals, one at a time + signals := make(chan os.Signal, 1) + signal.Notify(signals) + + // Trigger for list updates + ticker := time.Tick(time.Second * cfg.ListUpdate) + + // Persistent loop to catch signals etc. + for { + select { + case sig = <-signals: + select { + case sig == syscall.SIGKILL: + os.Exit(1) + case sig == syscall.SIGINT || sig == syscall.SIGTERM: + os.Exit(0) + case sig == syscall.SIGHUP: + loadConfigFile() + } + fmt.Printf("Received signal %v\n", sig) + case <-ticker: + // Reload lists + } + } +} diff --git a/nameserver.go b/nameserver.go new file mode 100644 index 0000000..653eaa4 --- /dev/null +++ b/nameserver.go @@ -0,0 +1,8 @@ +package main + +type nameserver struct { + host string + bad bool +} + +// type upstream []nameserver diff --git a/query.go b/query.go new file mode 100644 index 0000000..e8f44bd --- /dev/null +++ b/query.go @@ -0,0 +1,11 @@ +package main + +type query struct { + qname string + qtype string + qclass string +} + +func (q *query) String() string { + return q.qname + " " + q.qclass + " " + q.qtype +} diff --git a/resolver.go b/resolver.go new file mode 100644 index 0000000..3628ddd --- /dev/null +++ b/resolver.go @@ -0,0 +1,104 @@ +package main + +import ( + "fmt" + "github.com/miekg/dns" + "strings" +) + +func (r *resolver) lookup(qname string) error { + c := new(dns.Client) + t := new(dns.Transfer) + c.Net = "udp" + if *four { + c.Net = "udp4" + } + if *six { + c.Net = "udp6" + } + if *tcp { + c.Net = "tcp" + if *four { + c.Net = "tcp4" + } + if *six { + c.Net = "tcp6" + } + } + + m := &dns.Msg{ + MsgHdr: dns.MsgHdr{ + Authoritative: false, + AuthenticatedData: false, + CheckingDisabled: false, + RecursionDesired: true, + Opcode: "query", + }, + Question: make([]dns.Question, 1), + } + if op, ok := dns.StringToOpcode[strings.ToUpper(*opcode)]; ok { + m.Opcode = op + } + m.Rcode = dns.RcodeSuccess + if rc, ok := dns.StringToRcode[strings.ToUpper(*rcode)]; ok { + m.Rcode = rc + } + +Query: + for i, v := range qname { + if i < len(qtype) { + qt = qtype[i] + } + if i < len(qclass) { + qc = qclass[i] + } + m.Question[0] = dns.Question{Name: dns.Fqdn(v), Qtype: qt, Qclass: qc} + m.Id = dns.Id() + if *query { + fmt.Printf("%s", m.String()) + fmt.Printf("\n;; size: %d bytes\n\n", m.Len()) + } + r, rtt, err := c.Exchange(m, nameserver) + + Redo: + switch err { + case nil: + //do nothing + case dns.ErrTruncated: + if *fallback { + if !*dnssec { + fmt.Printf(";; Truncated, trying %d bytes bufsize\n", dns.DefaultMsgSize) + o := new(dns.OPT) + o.Hdr.Name = "." + o.Hdr.Rrtype = dns.TypeOPT + o.SetUDPSize(dns.DefaultMsgSize) + m.Extra = append(m.Extra, o) + r, rtt, err = c.Exchange(m, nameserver) + *dnssec = true + goto Redo + } else { + // First EDNS, then TCP + fmt.Printf(";; Truncated, trying TCP\n") + c.Net = "tcp" + r, rtt, err = c.Exchange(m, nameserver) + *fallback = false + goto Redo + } + } + fmt.Printf(";; Truncated\n") + default: + fmt.Printf(";; %s\n", err.Error()) + continue + } + if r.Id != m.Id { + fmt.Fprintf(os.Stderr, "Id mismatch\n") + return + } + if *short { + // shortenMsg(r) + } + + fmt.Printf("%v", r) + fmt.Printf("\n;; query time: %.3d µs, server: %s(%s), size: %d bytes\n", rtt/1e3, nameserver, c.Net, r.Len()) + } +} diff --git a/server.go b/server.go new file mode 100644 index 0000000..9761e71 --- /dev/null +++ b/server.go @@ -0,0 +1,52 @@ +package main + +import ( + "fmt" + "github.com/miekg/dns" + //"strconv" + //"time" +) + +type server struct { + address string + port int +} + +func (s *server) Run() { + udpHandler := dns.NewServeMux() + udpHandler.HandleFunc(".", Handler.DoUDP) + + udpServer := &dns.Server{ + Addr: s.Addr(), + Net: "udp", + Handler: udpHandler, + UDPSize: 65535, + ReadTimeout: Config.UdpTimeout, + WriteTimeout: Config.UdpTimeout, + } + go s.start(udpServer) + + if !Config.NoTcp { + tcpHandler := dns.NewServeMux() + tcpHandler.HandleFunc(".", Handler.DoTCP) + + tcpServer := &dns.Server{ + Addr: s.Addr(), + Net: "tcp", + Handler: tcpHandler, + ReadTimeout: Config.TcpTimeout, + WriteTimeout: Config.TcpTimeout, + } + go s.start(tcpServer) + } +} + +func (s *Server) start(ds *dns.Server) { + if !Config.Quiet { + fmt.Println("Listening on ", ds.Net, ":", s.Addr()) + } + err := ds.ListenAndServe() + if err != nil { + fmt.Println("Failed to listen on ", ds.Net, ":", s.Addr(), ": ", err.Error()) + } +} @@ -0,0 +1,13 @@ +package main + +func ptrStr(str string) *string { + return &str +} + +func ptrInt(i int) *int { + return &i +} + +func ptrBool(b bool) *bool { + return &b +} diff --git a/vendor/github.com/BurntSushi/toml/.gitignore b/vendor/github.com/BurntSushi/toml/.gitignore new file mode 100644 index 0000000..0cd3800 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/.gitignore @@ -0,0 +1,5 @@ +TAGS +tags +.*.swp +tomlcheck/tomlcheck +toml.test diff --git a/vendor/github.com/BurntSushi/toml/.travis.yml b/vendor/github.com/BurntSushi/toml/.travis.yml new file mode 100644 index 0000000..8b8afc4 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/.travis.yml @@ -0,0 +1,15 @@ +language: go +go: + - 1.1 + - 1.2 + - 1.3 + - 1.4 + - 1.5 + - 1.6 + - tip +install: + - go install ./... + - go get github.com/BurntSushi/toml-test +script: + - export PATH="$PATH:$HOME/gopath/bin" + - make test diff --git a/vendor/github.com/BurntSushi/toml/COMPATIBLE b/vendor/github.com/BurntSushi/toml/COMPATIBLE new file mode 100644 index 0000000..6efcfd0 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/COMPATIBLE @@ -0,0 +1,3 @@ +Compatible with TOML version +[v0.4.0](https://github.com/toml-lang/toml/blob/v0.4.0/versions/en/toml-v0.4.0.md) + diff --git a/vendor/github.com/BurntSushi/toml/COPYING b/vendor/github.com/BurntSushi/toml/COPYING new file mode 100644 index 0000000..5a8e332 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/COPYING @@ -0,0 +1,14 @@ + DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE + Version 2, December 2004 + + Copyright (C) 2004 Sam Hocevar <sam@hocevar.net> + + Everyone is permitted to copy and distribute verbatim or modified + copies of this license document, and changing it is allowed as long + as the name is changed. + + DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. You just DO WHAT THE FUCK YOU WANT TO. + diff --git a/vendor/github.com/BurntSushi/toml/Makefile b/vendor/github.com/BurntSushi/toml/Makefile new file mode 100644 index 0000000..3600848 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/Makefile @@ -0,0 +1,19 @@ +install: + go install ./... + +test: install + go test -v + toml-test toml-test-decoder + toml-test -encoder toml-test-encoder + +fmt: + gofmt -w *.go */*.go + colcheck *.go */*.go + +tags: + find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS + +push: + git push origin master + git push github master + diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md new file mode 100644 index 0000000..7c1b37e --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/README.md @@ -0,0 +1,218 @@ +## TOML parser and encoder for Go with reflection + +TOML stands for Tom's Obvious, Minimal Language. This Go package provides a +reflection interface similar to Go's standard library `json` and `xml` +packages. This package also supports the `encoding.TextUnmarshaler` and +`encoding.TextMarshaler` interfaces so that you can define custom data +representations. (There is an example of this below.) + +Spec: https://github.com/toml-lang/toml + +Compatible with TOML version +[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md) + +Documentation: https://godoc.org/github.com/BurntSushi/toml + +Installation: + +```bash +go get github.com/BurntSushi/toml +``` + +Try the toml validator: + +```bash +go get github.com/BurntSushi/toml/cmd/tomlv +tomlv some-toml-file.toml +``` + +[](https://travis-ci.org/BurntSushi/toml) [](https://godoc.org/github.com/BurntSushi/toml) + +### Testing + +This package passes all tests in +[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder +and the encoder. + +### Examples + +This package works similarly to how the Go standard library handles `XML` +and `JSON`. Namely, data is loaded into Go values via reflection. + +For the simplest example, consider some TOML file as just a list of keys +and values: + +```toml +Age = 25 +Cats = [ "Cauchy", "Plato" ] +Pi = 3.14 +Perfection = [ 6, 28, 496, 8128 ] +DOB = 1987-07-05T05:45:00Z +``` + +Which could be defined in Go as: + +```go +type Config struct { + Age int + Cats []string + Pi float64 + Perfection []int + DOB time.Time // requires `import time` +} +``` + +And then decoded with: + +```go +var conf Config +if _, err := toml.Decode(tomlData, &conf); err != nil { + // handle error +} +``` + +You can also use struct tags if your struct field name doesn't map to a TOML +key value directly: + +```toml +some_key_NAME = "wat" +``` + +```go +type TOML struct { + ObscureKey string `toml:"some_key_NAME"` +} +``` + +### Using the `encoding.TextUnmarshaler` interface + +Here's an example that automatically parses duration strings into +`time.Duration` values: + +```toml +[[song]] +name = "Thunder Road" +duration = "4m49s" + +[[song]] +name = "Stairway to Heaven" +duration = "8m03s" +``` + +Which can be decoded with: + +```go +type song struct { + Name string + Duration duration +} +type songs struct { + Song []song +} +var favorites songs +if _, err := toml.Decode(blob, &favorites); err != nil { + log.Fatal(err) +} + +for _, s := range favorites.Song { + fmt.Printf("%s (%s)\n", s.Name, s.Duration) +} +``` + +And you'll also need a `duration` type that satisfies the +`encoding.TextUnmarshaler` interface: + +```go +type duration struct { + time.Duration +} + +func (d *duration) UnmarshalText(text []byte) error { + var err error + d.Duration, err = time.ParseDuration(string(text)) + return err +} +``` + +### More complex usage + +Here's an example of how to load the example from the official spec page: + +```toml +# This is a TOML document. Boom. + +title = "TOML Example" + +[owner] +name = "Tom Preston-Werner" +organization = "GitHub" +bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." +dob = 1979-05-27T07:32:00Z # First class dates? Why not? + +[database] +server = "192.168.1.1" +ports = [ 8001, 8001, 8002 ] +connection_max = 5000 +enabled = true + +[servers] + + # You can indent as you please. Tabs or spaces. TOML don't care. + [servers.alpha] + ip = "10.0.0.1" + dc = "eqdc10" + + [servers.beta] + ip = "10.0.0.2" + dc = "eqdc10" + +[clients] +data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it + +# Line breaks are OK when inside arrays +hosts = [ + "alpha", + "omega" +] +``` + +And the corresponding Go types are: + +```go +type tomlConfig struct { + Title string + Owner ownerInfo + DB database `toml:"database"` + Servers map[string]server + Clients clients +} + +type ownerInfo struct { + Name string + Org string `toml:"organization"` + Bio string + DOB time.Time +} + +type database struct { + Server string + Ports []int + ConnMax int `toml:"connection_max"` + Enabled bool +} + +type server struct { + IP string + DC string +} + +type clients struct { + Data [][]interface{} + Hosts []string +} +``` + +Note that a case insensitive match will be tried if an exact match can't be +found. + +A working example of the above can be found in `_examples/example.{go,toml}`. diff --git a/vendor/github.com/BurntSushi/toml/_examples/example.go b/vendor/github.com/BurntSushi/toml/_examples/example.go new file mode 100644 index 0000000..79f31f2 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/_examples/example.go @@ -0,0 +1,61 @@ +package main + +import ( + "fmt" + "time" + + "github.com/BurntSushi/toml" +) + +type tomlConfig struct { + Title string + Owner ownerInfo + DB database `toml:"database"` + Servers map[string]server + Clients clients +} + +type ownerInfo struct { + Name string + Org string `toml:"organization"` + Bio string + DOB time.Time +} + +type database struct { + Server string + Ports []int + ConnMax int `toml:"connection_max"` + Enabled bool +} + +type server struct { + IP string + DC string +} + +type clients struct { + Data [][]interface{} + Hosts []string +} + +func main() { + var config tomlConfig + if _, err := toml.DecodeFile("example.toml", &config); err != nil { + fmt.Println(err) + return + } + + fmt.Printf("Title: %s\n", config.Title) + fmt.Printf("Owner: %s (%s, %s), Born: %s\n", + config.Owner.Name, config.Owner.Org, config.Owner.Bio, + config.Owner.DOB) + fmt.Printf("Database: %s %v (Max conn. %d), Enabled? %v\n", + config.DB.Server, config.DB.Ports, config.DB.ConnMax, + config.DB.Enabled) + for serverName, server := range config.Servers { + fmt.Printf("Server: %s (%s, %s)\n", serverName, server.IP, server.DC) + } + fmt.Printf("Client data: %v\n", config.Clients.Data) + fmt.Printf("Client hosts: %v\n", config.Clients.Hosts) +} diff --git a/vendor/github.com/BurntSushi/toml/_examples/example.toml b/vendor/github.com/BurntSushi/toml/_examples/example.toml new file mode 100644 index 0000000..32c7a4f --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/_examples/example.toml @@ -0,0 +1,35 @@ +# This is a TOML document. Boom. + +title = "TOML Example" + +[owner] +name = "Tom Preston-Werner" +organization = "GitHub" +bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." +dob = 1979-05-27T07:32:00Z # First class dates? Why not? + +[database] +server = "192.168.1.1" +ports = [ 8001, 8001, 8002 ] +connection_max = 5000 +enabled = true + +[servers] + + # You can indent as you please. Tabs or spaces. TOML don't care. + [servers.alpha] + ip = "10.0.0.1" + dc = "eqdc10" + + [servers.beta] + ip = "10.0.0.2" + dc = "eqdc10" + +[clients] +data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it + +# Line breaks are OK when inside arrays +hosts = [ + "alpha", + "omega" +] diff --git a/vendor/github.com/BurntSushi/toml/_examples/hard.toml b/vendor/github.com/BurntSushi/toml/_examples/hard.toml new file mode 100644 index 0000000..26145d2 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/_examples/hard.toml @@ -0,0 +1,22 @@ +# Test file for TOML +# Only this one tries to emulate a TOML file written by a user of the kind of parser writers probably hate +# This part you'll really hate + +[the] +test_string = "You'll hate me after this - #" # " Annoying, isn't it? + + [the.hard] + test_array = [ "] ", " # "] # ] There you go, parse this! + test_array2 = [ "Test #11 ]proved that", "Experiment #9 was a success" ] + # You didn't think it'd as easy as chucking out the last #, did you? + another_test_string = " Same thing, but with a string #" + harder_test_string = " And when \"'s are in the string, along with # \"" # "and comments are there too" + # Things will get harder + + [the.hard.bit#] + what? = "You don't think some user won't do that?" + multi_line_array = [ + "]", + # ] Oh yes I did + ] + diff --git a/vendor/github.com/BurntSushi/toml/_examples/implicit.toml b/vendor/github.com/BurntSushi/toml/_examples/implicit.toml new file mode 100644 index 0000000..1dea5ce --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/_examples/implicit.toml @@ -0,0 +1,4 @@ +# [x] you +# [x.y] don't +# [x.y.z] need these +[x.y.z.w] # for this to work diff --git a/vendor/github.com/BurntSushi/toml/_examples/invalid-apples.toml b/vendor/github.com/BurntSushi/toml/_examples/invalid-apples.toml new file mode 100644 index 0000000..74e9e33 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/_examples/invalid-apples.toml @@ -0,0 +1,6 @@ +# DO NOT WANT +[fruit] +type = "apple" + +[fruit.type] +apple = "yes" diff --git a/vendor/github.com/BurntSushi/toml/_examples/invalid.toml b/vendor/github.com/BurntSushi/toml/_examples/invalid.toml new file mode 100644 index 0000000..beb1dba --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/_examples/invalid.toml @@ -0,0 +1,35 @@ +# This is an INVALID TOML document. Boom. +# Can you spot the error without help? + +title = "TOML Example" + +[owner] +name = "Tom Preston-Werner" +organization = "GitHub" +bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." +dob = 1979-05-27T7:32:00Z # First class dates? Why not? + +[database] +server = "192.168.1.1" +ports = [ 8001, 8001, 8002 ] +connection_max = 5000 +enabled = true + +[servers] + # You can indent as you please. Tabs or spaces. TOML don't care. + [servers.alpha] + ip = "10.0.0.1" + dc = "eqdc10" + + [servers.beta] + ip = "10.0.0.2" + dc = "eqdc10" + +[clients] +data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it + +# Line breaks are OK when inside arrays +hosts = [ + "alpha", + "omega" +] diff --git a/vendor/github.com/BurntSushi/toml/_examples/readme1.toml b/vendor/github.com/BurntSushi/toml/_examples/readme1.toml new file mode 100644 index 0000000..3e1261d --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/_examples/readme1.toml @@ -0,0 +1,5 @@ +Age = 25 +Cats = [ "Cauchy", "Plato" ] +Pi = 3.14 +Perfection = [ 6, 28, 496, 8128 ] +DOB = 1987-07-05T05:45:00Z diff --git a/vendor/github.com/BurntSushi/toml/_examples/readme2.toml b/vendor/github.com/BurntSushi/toml/_examples/readme2.toml new file mode 100644 index 0000000..b51cd93 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/_examples/readme2.toml @@ -0,0 +1 @@ +some_key_NAME = "wat" diff --git a/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING b/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING new file mode 100644 index 0000000..5a8e332 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING @@ -0,0 +1,14 @@ + DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE + Version 2, December 2004 + + Copyright (C) 2004 Sam Hocevar <sam@hocevar.net> + + Everyone is permitted to copy and distribute verbatim or modified + copies of this license document, and changing it is allowed as long + as the name is changed. + + DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. You just DO WHAT THE FUCK YOU WANT TO. + diff --git a/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/README.md b/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/README.md new file mode 100644 index 0000000..93f4e3a --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/README.md @@ -0,0 +1,13 @@ +# Implements the TOML test suite interface + +This is an implementation of the interface expected by +[toml-test](https://github.com/BurntSushi/toml-test) for my +[toml parser written in Go](https://github.com/BurntSushi/toml). +In particular, it maps TOML data on `stdin` to a JSON format on `stdout`. + + +Compatible with TOML version +[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md) + +Compatible with `toml-test` version +[v0.2.0](https://github.com/BurntSushi/toml-test/tree/v0.2.0) diff --git a/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/main.go b/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/main.go new file mode 100644 index 0000000..14e7557 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/main.go @@ -0,0 +1,90 @@ +// Command toml-test-decoder satisfies the toml-test interface for testing +// TOML decoders. Namely, it accepts TOML on stdin and outputs JSON on stdout. +package main + +import ( + "encoding/json" + "flag" + "fmt" + "log" + "os" + "path" + "time" + + "github.com/BurntSushi/toml" +) + +func init() { + log.SetFlags(0) + + flag.Usage = usage + flag.Parse() +} + +func usage() { + log.Printf("Usage: %s < toml-file\n", path.Base(os.Args[0])) + flag.PrintDefaults() + + os.Exit(1) +} + +func main() { + if flag.NArg() != 0 { + flag.Usage() + } + + var tmp interface{} + if _, err := toml.DecodeReader(os.Stdin, &tmp); err != nil { + log.Fatalf("Error decoding TOML: %s", err) + } + + typedTmp := translate(tmp) + if err := json.NewEncoder(os.Stdout).Encode(typedTmp); err != nil { + log.Fatalf("Error encoding JSON: %s", err) + } +} + +func translate(tomlData interface{}) interface{} { + switch orig := tomlData.(type) { + case map[string]interface{}: + typed := make(map[string]interface{}, len(orig)) + for k, v := range orig { + typed[k] = translate(v) + } + return typed + case []map[string]interface{}: + typed := make([]map[string]interface{}, len(orig)) + for i, v := range orig { + typed[i] = translate(v).(map[string]interface{}) + } + return typed + case []interface{}: + typed := make([]interface{}, len(orig)) + for i, v := range orig { + typed[i] = translate(v) + } + + // We don't really need to tag arrays, but let's be future proof. + // (If TOML ever supports tuples, we'll need this.) + return tag("array", typed) + case time.Time: + return tag("datetime", orig.Format("2006-01-02T15:04:05Z")) + case bool: + return tag("bool", fmt.Sprintf("%v", orig)) + case int64: + return tag("integer", fmt.Sprintf("%d", orig)) + case float64: + return tag("float", fmt.Sprintf("%v", orig)) + case string: + return tag("string", orig) + } + + panic(fmt.Sprintf("Unknown type: %T", tomlData)) +} + +func tag(typeName string, data interface{}) map[string]interface{} { + return map[string]interface{}{ + "type": typeName, + "value": data, + } +} diff --git a/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING b/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING new file mode 100644 index 0000000..5a8e332 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING @@ -0,0 +1,14 @@ + DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE + Version 2, December 2004 + + Copyright (C) 2004 Sam Hocevar <sam@hocevar.net> + + Everyone is permitted to copy and distribute verbatim or modified + copies of this license document, and changing it is allowed as long + as the name is changed. + + DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. You just DO WHAT THE FUCK YOU WANT TO. + diff --git a/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/README.md b/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/README.md new file mode 100644 index 0000000..a45bd4d --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/README.md @@ -0,0 +1,13 @@ +# Implements the TOML test suite interface for TOML encoders + +This is an implementation of the interface expected by +[toml-test](https://github.com/BurntSushi/toml-test) for the +[TOML encoder](https://github.com/BurntSushi/toml). +In particular, it maps JSON data on `stdin` to a TOML format on `stdout`. + + +Compatible with TOML version +[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md) + +Compatible with `toml-test` version +[v0.2.0](https://github.com/BurntSushi/toml-test/tree/v0.2.0) diff --git a/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/main.go b/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/main.go new file mode 100644 index 0000000..092cc68 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/main.go @@ -0,0 +1,131 @@ +// Command toml-test-encoder satisfies the toml-test interface for testing +// TOML encoders. Namely, it accepts JSON on stdin and outputs TOML on stdout. +package main + +import ( + "encoding/json" + "flag" + "log" + "os" + "path" + "strconv" + "time" + + "github.com/BurntSushi/toml" +) + +func init() { + log.SetFlags(0) + + flag.Usage = usage + flag.Parse() +} + +func usage() { + log.Printf("Usage: %s < json-file\n", path.Base(os.Args[0])) + flag.PrintDefaults() + + os.Exit(1) +} + +func main() { + if flag.NArg() != 0 { + flag.Usage() + } + + var tmp interface{} + if err := json.NewDecoder(os.Stdin).Decode(&tmp); err != nil { + log.Fatalf("Error decoding JSON: %s", err) + } + + tomlData := translate(tmp) + if err := toml.NewEncoder(os.Stdout).Encode(tomlData); err != nil { + log.Fatalf("Error encoding TOML: %s", err) + } +} + +func translate(typedJson interface{}) interface{} { + switch v := typedJson.(type) { + case map[string]interface{}: + if len(v) == 2 && in("type", v) && in("value", v) { + return untag(v) + } + m := make(map[string]interface{}, len(v)) + for k, v2 := range v { + m[k] = translate(v2) + } + return m + case []interface{}: + tabArray := make([]map[string]interface{}, len(v)) + for i := range v { + if m, ok := translate(v[i]).(map[string]interface{}); ok { + tabArray[i] = m + } else { + log.Fatalf("JSON arrays may only contain objects. This " + + "corresponds to only tables being allowed in " + + "TOML table arrays.") + } + } + return tabArray + } + log.Fatalf("Unrecognized JSON format '%T'.", typedJson) + panic("unreachable") +} + +func untag(typed map[string]interface{}) interface{} { + t := typed["type"].(string) + v := typed["value"] + switch t { + case "string": + return v.(string) + case "integer": + v := v.(string) + n, err := strconv.Atoi(v) + if err != nil { + log.Fatalf("Could not parse '%s' as integer: %s", v, err) + } + return n + case "float": + v := v.(string) + f, err := strconv.ParseFloat(v, 64) + if err != nil { + log.Fatalf("Could not parse '%s' as float64: %s", v, err) + } + return f + case "datetime": + v := v.(string) + t, err := time.Parse("2006-01-02T15:04:05Z", v) + if err != nil { + log.Fatalf("Could not parse '%s' as a datetime: %s", v, err) + } + return t + case "bool": + v := v.(string) + switch v { + case "true": + return true + case "false": + return false + } + log.Fatalf("Could not parse '%s' as a boolean.", v) + case "array": + v := v.([]interface{}) + array := make([]interface{}, len(v)) + for i := range v { + if m, ok := v[i].(map[string]interface{}); ok { + array[i] = untag(m) + } else { + log.Fatalf("Arrays may only contain other arrays or "+ + "primitive values, but found a '%T'.", m) + } + } + return array + } + log.Fatalf("Unrecognized tag type '%s'.", t) + panic("unreachable") +} + +func in(key string, m map[string]interface{}) bool { + _, ok := m[key] + return ok +} diff --git a/vendor/github.com/BurntSushi/toml/cmd/tomlv/COPYING b/vendor/github.com/BurntSushi/toml/cmd/tomlv/COPYING new file mode 100644 index 0000000..5a8e332 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/cmd/tomlv/COPYING @@ -0,0 +1,14 @@ + DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE + Version 2, December 2004 + + Copyright (C) 2004 Sam Hocevar <sam@hocevar.net> + + Everyone is permitted to copy and distribute verbatim or modified + copies of this license document, and changing it is allowed as long + as the name is changed. + + DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. You just DO WHAT THE FUCK YOU WANT TO. + diff --git a/vendor/github.com/BurntSushi/toml/cmd/tomlv/README.md b/vendor/github.com/BurntSushi/toml/cmd/tomlv/README.md new file mode 100644 index 0000000..51231e2 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/cmd/tomlv/README.md @@ -0,0 +1,21 @@ +# TOML Validator + +If Go is installed, it's simple to try it out: + +```bash +go get github.com/BurntSushi/toml/cmd/tomlv +tomlv some-toml-file.toml +``` + +You can see the types of every key in a TOML file with: + +```bash +tomlv -types some-toml-file.toml +``` + +At the moment, only one error message is reported at a time. Error messages +include line numbers. No output means that the files given are valid TOML, or +there is a bug in `tomlv`. + +Compatible with TOML version +[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md) diff --git a/vendor/github.com/BurntSushi/toml/cmd/tomlv/main.go b/vendor/github.com/BurntSushi/toml/cmd/tomlv/main.go new file mode 100644 index 0000000..c7d689a --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/cmd/tomlv/main.go @@ -0,0 +1,61 @@ +// Command tomlv validates TOML documents and prints each key's type. +package main + +import ( + "flag" + "fmt" + "log" + "os" + "path" + "strings" + "text/tabwriter" + + "github.com/BurntSushi/toml" +) + +var ( + flagTypes = false +) + +func init() { + log.SetFlags(0) + + flag.BoolVar(&flagTypes, "types", flagTypes, + "When set, the types of every defined key will be shown.") + + flag.Usage = usage + flag.Parse() +} + +func usage() { + log.Printf("Usage: %s toml-file [ toml-file ... ]\n", + path.Base(os.Args[0])) + flag.PrintDefaults() + + os.Exit(1) +} + +func main() { + if flag.NArg() < 1 { + flag.Usage() + } + for _, f := range flag.Args() { + var tmp interface{} + md, err := toml.DecodeFile(f, &tmp) + if err != nil { + log.Fatalf("Error in '%s': %s", f, err) + } + if flagTypes { + printTypes(md) + } + } +} + +func printTypes(md toml.MetaData) { + tabw := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) + for _, key := range md.Keys() { + fmt.Fprintf(tabw, "%s%s\t%s\n", + strings.Repeat(" ", len(key)-1), key, md.Type(key...)) + } + tabw.Flush() +} diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go new file mode 100644 index 0000000..b0fd51d --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/decode.go @@ -0,0 +1,509 @@ +package toml + +import ( + "fmt" + "io" + "io/ioutil" + "math" + "reflect" + "strings" + "time" +) + +func e(format string, args ...interface{}) error { + return fmt.Errorf("toml: "+format, args...) +} + +// Unmarshaler is the interface implemented by objects that can unmarshal a +// TOML description of themselves. +type Unmarshaler interface { + UnmarshalTOML(interface{}) error +} + +// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`. +func Unmarshal(p []byte, v interface{}) error { + _, err := Decode(string(p), v) + return err +} + +// Primitive is a TOML value that hasn't been decoded into a Go value. +// When using the various `Decode*` functions, the type `Primitive` may +// be given to any value, and its decoding will be delayed. +// +// A `Primitive` value can be decoded using the `PrimitiveDecode` function. +// +// The underlying representation of a `Primitive` value is subject to change. +// Do not rely on it. +// +// N.B. Primitive values are still parsed, so using them will only avoid +// the overhead of reflection. They can be useful when you don't know the +// exact type of TOML data until run time. +type Primitive struct { + undecoded interface{} + context Key +} + +// DEPRECATED! +// +// Use MetaData.PrimitiveDecode instead. +func PrimitiveDecode(primValue Primitive, v interface{}) error { + md := MetaData{decoded: make(map[string]bool)} + return md.unify(primValue.undecoded, rvalue(v)) +} + +// PrimitiveDecode is just like the other `Decode*` functions, except it +// decodes a TOML value that has already been parsed. Valid primitive values +// can *only* be obtained from values filled by the decoder functions, +// including this method. (i.e., `v` may contain more `Primitive` +// values.) +// +// Meta data for primitive values is included in the meta data returned by +// the `Decode*` functions with one exception: keys returned by the Undecoded +// method will only reflect keys that were decoded. Namely, any keys hidden +// behind a Primitive will be considered undecoded. Executing this method will +// update the undecoded keys in the meta data. (See the example.) +func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { + md.context = primValue.context + defer func() { md.context = nil }() + return md.unify(primValue.undecoded, rvalue(v)) +} + +// Decode will decode the contents of `data` in TOML format into a pointer +// `v`. +// +// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be +// used interchangeably.) +// +// TOML arrays of tables correspond to either a slice of structs or a slice +// of maps. +// +// TOML datetimes correspond to Go `time.Time` values. +// +// All other TOML types (float, string, int, bool and array) correspond +// to the obvious Go types. +// +// An exception to the above rules is if a type implements the +// encoding.TextUnmarshaler interface. In this case, any primitive TOML value +// (floats, strings, integers, booleans and datetimes) will be converted to +// a byte string and given to the value's UnmarshalText method. See the +// Unmarshaler example for a demonstration with time duration strings. +// +// Key mapping +// +// TOML keys can map to either keys in a Go map or field names in a Go +// struct. The special `toml` struct tag may be used to map TOML keys to +// struct fields that don't match the key name exactly. (See the example.) +// A case insensitive match to struct names will be tried if an exact match +// can't be found. +// +// The mapping between TOML values and Go values is loose. That is, there +// may exist TOML values that cannot be placed into your representation, and +// there may be parts of your representation that do not correspond to +// TOML values. This loose mapping can be made stricter by using the IsDefined +// and/or Undecoded methods on the MetaData returned. +// +// This decoder will not handle cyclic types. If a cyclic type is passed, +// `Decode` will not terminate. +func Decode(data string, v interface{}) (MetaData, error) { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr { + return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v)) + } + if rv.IsNil() { + return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v)) + } + p, err := parse(data) + if err != nil { + return MetaData{}, err + } + md := MetaData{ + p.mapping, p.types, p.ordered, + make(map[string]bool, len(p.ordered)), nil, + } + return md, md.unify(p.mapping, indirect(rv)) +} + +// DecodeFile is just like Decode, except it will automatically read the +// contents of the file at `fpath` and decode it for you. +func DecodeFile(fpath string, v interface{}) (MetaData, error) { + bs, err := ioutil.ReadFile(fpath) + if err != nil { + return MetaData{}, err + } + return Decode(string(bs), v) +} + +// DecodeReader is just like Decode, except it will consume all bytes +// from the reader and decode it for you. +func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { + bs, err := ioutil.ReadAll(r) + if err != nil { + return MetaData{}, err + } + return Decode(string(bs), v) +} + +// unify performs a sort of type unification based on the structure of `rv`, +// which is the client representation. +// +// Any type mismatch produces an error. Finding a type that we don't know +// how to handle produces an unsupported type error. +func (md *MetaData) unify(data interface{}, rv reflect.Value) error { + + // Special case. Look for a `Primitive` value. + if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() { + // Save the undecoded data and the key context into the primitive + // value. + context := make(Key, len(md.context)) + copy(context, md.context) + rv.Set(reflect.ValueOf(Primitive{ + undecoded: data, + context: context, + })) + return nil + } + + // Special case. Unmarshaler Interface support. + if rv.CanAddr() { + if v, ok := rv.Addr().Interface().(Unmarshaler); ok { + return v.UnmarshalTOML(data) + } + } + + // Special case. Handle time.Time values specifically. + // TODO: Remove this code when we decide to drop support for Go 1.1. + // This isn't necessary in Go 1.2 because time.Time satisfies the encoding + // interfaces. + if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) { + return md.unifyDatetime(data, rv) + } + + // Special case. Look for a value satisfying the TextUnmarshaler interface. + if v, ok := rv.Interface().(TextUnmarshaler); ok { + return md.unifyText(data, v) + } + // BUG(burntsushi) + // The behavior here is incorrect whenever a Go type satisfies the + // encoding.TextUnmarshaler interface but also corresponds to a TOML + // hash or array. In particular, the unmarshaler should only be applied + // to primitive TOML values. But at this point, it will be applied to + // all kinds of values and produce an incorrect error whenever those values + // are hashes or arrays (including arrays of tables). + + k := rv.Kind() + + // laziness + if k >= reflect.Int && k <= reflect.Uint64 { + return md.unifyInt(data, rv) + } + switch k { + case reflect.Ptr: + elem := reflect.New(rv.Type().Elem()) + err := md.unify(data, reflect.Indirect(elem)) + if err != nil { + return err + } + rv.Set(elem) + return nil + case reflect.Struct: + return md.unifyStruct(data, rv) + case reflect.Map: + return md.unifyMap(data, rv) + case reflect.Array: + return md.unifyArray(data, rv) + case reflect.Slice: + return md.unifySlice(data, rv) + case reflect.String: + return md.unifyString(data, rv) + case reflect.Bool: + return md.unifyBool(data, rv) + case reflect.Interface: + // we only support empty interfaces. + if rv.NumMethod() > 0 { + return e("unsupported type %s", rv.Type()) + } + return md.unifyAnything(data, rv) + case reflect.Float32: + fallthrough + case reflect.Float64: + return md.unifyFloat64(data, rv) + } + return e("unsupported type %s", rv.Kind()) +} + +func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { + tmap, ok := mapping.(map[string]interface{}) + if !ok { + if mapping == nil { + return nil + } + return e("type mismatch for %s: expected table but found %T", + rv.Type().String(), mapping) + } + + for key, datum := range tmap { + var f *field + fields := cachedTypeFields(rv.Type()) + for i := range fields { + ff := &fields[i] + if ff.name == key { + f = ff + break + } + if f == nil && strings.EqualFold(ff.name, key) { + f = ff + } + } + if f != nil { + subv := rv + for _, i := range f.index { + subv = indirect(subv.Field(i)) + } + if isUnifiable(subv) { + md.decoded[md.context.add(key).String()] = true + md.context = append(md.context, key) + if err := md.unify(datum, subv); err != nil { + return err + } + md.context = md.context[0 : len(md.context)-1] + } else if f.name != "" { + // Bad user! No soup for you! + return e("cannot write unexported field %s.%s", + rv.Type().String(), f.name) + } + } + } + return nil +} + +func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error { + tmap, ok := mapping.(map[string]interface{}) + if !ok { + if tmap == nil { + return nil + } + return badtype("map", mapping) + } + if rv.IsNil() { + rv.Set(reflect.MakeMap(rv.Type())) + } + for k, v := range tmap { + md.decoded[md.context.add(k).String()] = true + md.context = append(md.context, k) + + rvkey := indirect(reflect.New(rv.Type().Key())) + rvval := reflect.Indirect(reflect.New(rv.Type().Elem())) + if err := md.unify(v, rvval); err != nil { + return err + } + md.context = md.context[0 : len(md.context)-1] + + rvkey.SetString(k) + rv.SetMapIndex(rvkey, rvval) + } + return nil +} + +func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error { + datav := reflect.ValueOf(data) + if datav.Kind() != reflect.Slice { + if !datav.IsValid() { + return nil + } + return badtype("slice", data) + } + sliceLen := datav.Len() + if sliceLen != rv.Len() { + return e("expected array length %d; got TOML array of length %d", + rv.Len(), sliceLen) + } + return md.unifySliceArray(datav, rv) +} + +func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error { + datav := reflect.ValueOf(data) + if datav.Kind() != reflect.Slice { + if !datav.IsValid() { + return nil + } + return badtype("slice", data) + } + n := datav.Len() + if rv.IsNil() || rv.Cap() < n { + rv.Set(reflect.MakeSlice(rv.Type(), n, n)) + } + rv.SetLen(n) + return md.unifySliceArray(datav, rv) +} + +func (md *MetaData) unifySliceArray(data, rv reflect.Value) error { + sliceLen := data.Len() + for i := 0; i < sliceLen; i++ { + v := data.Index(i).Interface() + sliceval := indirect(rv.Index(i)) + if err := md.unify(v, sliceval); err != nil { + return err + } + } + return nil +} + +func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error { + if _, ok := data.(time.Time); ok { + rv.Set(reflect.ValueOf(data)) + return nil + } + return badtype("time.Time", data) +} + +func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error { + if s, ok := data.(string); ok { + rv.SetString(s) + return nil + } + return badtype("string", data) +} + +func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { + if num, ok := data.(float64); ok { + switch rv.Kind() { + case reflect.Float32: + fallthrough + case reflect.Float64: + rv.SetFloat(num) + default: + panic("bug") + } + return nil + } + return badtype("float", data) +} + +func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error { + if num, ok := data.(int64); ok { + if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 { + switch rv.Kind() { + case reflect.Int, reflect.Int64: + // No bounds checking necessary. + case reflect.Int8: + if num < math.MinInt8 || num > math.MaxInt8 { + return e("value %d is out of range for int8", num) + } + case reflect.Int16: + if num < math.MinInt16 || num > math.MaxInt16 { + return e("value %d is out of range for int16", num) + } + case reflect.Int32: + if num < math.MinInt32 || num > math.MaxInt32 { + return e("value %d is out of range for int32", num) + } + } + rv.SetInt(num) + } else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 { + unum := uint64(num) + switch rv.Kind() { + case reflect.Uint, reflect.Uint64: + // No bounds checking necessary. + case reflect.Uint8: + if num < 0 || unum > math.MaxUint8 { + return e("value %d is out of range for uint8", num) + } + case reflect.Uint16: + if num < 0 || unum > math.MaxUint16 { + return e("value %d is out of range for uint16", num) + } + case reflect.Uint32: + if num < 0 || unum > math.MaxUint32 { + return e("value %d is out of range for uint32", num) + } + } + rv.SetUint(unum) + } else { + panic("unreachable") + } + return nil + } + return badtype("integer", data) +} + +func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error { + if b, ok := data.(bool); ok { + rv.SetBool(b) + return nil + } + return badtype("boolean", data) +} + +func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error { + rv.Set(reflect.ValueOf(data)) + return nil +} + +func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error { + var s string + switch sdata := data.(type) { + case TextMarshaler: + text, err := sdata.MarshalText() + if err != nil { + return err + } + s = string(text) + case fmt.Stringer: + s = sdata.String() + case string: + s = sdata + case bool: + s = fmt.Sprintf("%v", sdata) + case int64: + s = fmt.Sprintf("%d", sdata) + case float64: + s = fmt.Sprintf("%f", sdata) + default: + return badtype("primitive (string-like)", data) + } + if err := v.UnmarshalText([]byte(s)); err != nil { + return err + } + return nil +} + +// rvalue returns a reflect.Value of `v`. All pointers are resolved. +func rvalue(v interface{}) reflect.Value { + return indirect(reflect.ValueOf(v)) +} + +// indirect returns the value pointed to by a pointer. +// Pointers are followed until the value is not a pointer. +// New values are allocated for each nil pointer. +// +// An exception to this rule is if the value satisfies an interface of +// interest to us (like encoding.TextUnmarshaler). +func indirect(v reflect.Value) reflect.Value { + if v.Kind() != reflect.Ptr { + if v.CanSet() { + pv := v.Addr() + if _, ok := pv.Interface().(TextUnmarshaler); ok { + return pv + } + } + return v + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + return indirect(reflect.Indirect(v)) +} + +func isUnifiable(rv reflect.Value) bool { + if rv.CanSet() { + return true + } + if _, ok := rv.Interface().(TextUnmarshaler); ok { + return true + } + return false +} + +func badtype(expected string, data interface{}) error { + return e("cannot load TOML value of type %T into a Go %s", data, expected) +} diff --git a/vendor/github.com/BurntSushi/toml/decode_meta.go b/vendor/github.com/BurntSushi/toml/decode_meta.go new file mode 100644 index 0000000..b9914a6 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/decode_meta.go @@ -0,0 +1,121 @@ +package toml + +import "strings" + +// MetaData allows access to meta information about TOML data that may not +// be inferrable via reflection. In particular, whether a key has been defined +// and the TOML type of a key. +type MetaData struct { + mapping map[string]interface{} + types map[string]tomlType + keys []Key + decoded map[string]bool + context Key // Used only during decoding. +} + +// IsDefined returns true if the key given exists in the TOML data. The key +// should be specified hierarchially. e.g., +// +// // access the TOML key 'a.b.c' +// IsDefined("a", "b", "c") +// +// IsDefined will return false if an empty key given. Keys are case sensitive. +func (md *MetaData) IsDefined(key ...string) bool { + if len(key) == 0 { + return false + } + + var hash map[string]interface{} + var ok bool + var hashOrVal interface{} = md.mapping + for _, k := range key { + if hash, ok = hashOrVal.(map[string]interface{}); !ok { + return false + } + if hashOrVal, ok = hash[k]; !ok { + return false + } + } + return true +} + +// Type returns a string representation of the type of the key specified. +// +// Type will return the empty string if given an empty key or a key that +// does not exist. Keys are case sensitive. +func (md *MetaData) Type(key ...string) string { + fullkey := strings.Join(key, ".") + if typ, ok := md.types[fullkey]; ok { + return typ.typeString() + } + return "" +} + +// Key is the type of any TOML key, including key groups. Use (MetaData).Keys +// to get values of this type. +type Key []string + +func (k Key) String() string { + return strings.Join(k, ".") +} + +func (k Key) maybeQuotedAll() string { + var ss []string + for i := range k { + ss = append(ss, k.maybeQuoted(i)) + } + return strings.Join(ss, ".") +} + +func (k Key) maybeQuoted(i int) string { + quote := false + for _, c := range k[i] { + if !isBareKeyChar(c) { + quote = true + break + } + } + if quote { + return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\"" + } + return k[i] +} + +func (k Key) add(piece string) Key { + newKey := make(Key, len(k)+1) + copy(newKey, k) + newKey[len(k)] = piece + return newKey +} + +// Keys returns a slice of every key in the TOML data, including key groups. +// Each key is itself a slice, where the first element is the top of the +// hierarchy and the last is the most specific. +// +// The list will have the same order as the keys appeared in the TOML data. +// +// All keys returned are non-empty. +func (md *MetaData) Keys() []Key { + return md.keys +} + +// Undecoded returns all keys that have not been decoded in the order in which +// they appear in the original TOML document. +// +// This includes keys that haven't been decoded because of a Primitive value. +// Once the Primitive value is decoded, the keys will be considered decoded. +// +// Also note that decoding into an empty interface will result in no decoding, +// and so no keys will be considered decoded. +// +// In this sense, the Undecoded keys correspond to keys in the TOML document +// that do not have a concrete type in your representation. +func (md *MetaData) Undecoded() []Key { + undecoded := make([]Key, 0, len(md.keys)) + for _, key := range md.keys { + if !md.decoded[key.String()] { + undecoded = append(undecoded, key) + } + } + return undecoded +} diff --git a/vendor/github.com/BurntSushi/toml/decode_test.go b/vendor/github.com/BurntSushi/toml/decode_test.go new file mode 100644 index 0000000..0c36b33 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/decode_test.go @@ -0,0 +1,1447 @@ +package toml + +import ( + "fmt" + "log" + "math" + "reflect" + "strings" + "testing" + "time" +) + +func TestDecodeSimple(t *testing.T) { + var testSimple = ` +age = 250 +andrew = "gallant" +kait = "brady" +now = 1987-07-05T05:45:00Z +yesOrNo = true +pi = 3.14 +colors = [ + ["red", "green", "blue"], + ["cyan", "magenta", "yellow", "black"], +] + +[My.Cats] +plato = "cat 1" +cauchy = "cat 2" +` + + type cats struct { + Plato string + Cauchy string + } + type simple struct { + Age int + Colors [][]string + Pi float64 + YesOrNo bool + Now time.Time + Andrew string + Kait string + My map[string]cats + } + + var val simple + _, err := Decode(testSimple, &val) + if err != nil { + t.Fatal(err) + } + + now, err := time.Parse("2006-01-02T15:04:05", "1987-07-05T05:45:00") + if err != nil { + panic(err) + } + var answer = simple{ + Age: 250, + Andrew: "gallant", + Kait: "brady", + Now: now, + YesOrNo: true, + Pi: 3.14, + Colors: [][]string{ + {"red", "green", "blue"}, + {"cyan", "magenta", "yellow", "black"}, + }, + My: map[string]cats{ + "Cats": {Plato: "cat 1", Cauchy: "cat 2"}, + }, + } + if !reflect.DeepEqual(val, answer) { + t.Fatalf("Expected\n-----\n%#v\n-----\nbut got\n-----\n%#v\n", + answer, val) + } +} + +func TestDecodeEmbedded(t *testing.T) { + type Dog struct{ Name string } + type Age int + type cat struct{ Name string } + + for _, test := range []struct { + label string + input string + decodeInto interface{} + wantDecoded interface{} + }{ + { + label: "embedded struct", + input: `Name = "milton"`, + decodeInto: &struct{ Dog }{}, + wantDecoded: &struct{ Dog }{Dog{"milton"}}, + }, + { + label: "embedded non-nil pointer to struct", + input: `Name = "milton"`, + decodeInto: &struct{ *Dog }{}, + wantDecoded: &struct{ *Dog }{&Dog{"milton"}}, + }, + { + label: "embedded nil pointer to struct", + input: ``, + decodeInto: &struct{ *Dog }{}, + wantDecoded: &struct{ *Dog }{nil}, + }, + { + label: "unexported embedded struct", + input: `Name = "socks"`, + decodeInto: &struct{ cat }{}, + wantDecoded: &struct{ cat }{cat{"socks"}}, + }, + { + label: "embedded int", + input: `Age = -5`, + decodeInto: &struct{ Age }{}, + wantDecoded: &struct{ Age }{-5}, + }, + } { + _, err := Decode(test.input, test.decodeInto) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(test.wantDecoded, test.decodeInto) { + t.Errorf("%s: want decoded == %+v, got %+v", + test.label, test.wantDecoded, test.decodeInto) + } + } +} + +func TestDecodeIgnoredFields(t *testing.T) { + type simple struct { + Number int `toml:"-"` + } + const input = ` +Number = 123 +- = 234 +` + var s simple + if _, err := Decode(input, &s); err != nil { + t.Fatal(err) + } + if s.Number != 0 { + t.Errorf("got: %d; want 0", s.Number) + } +} + +func TestTableArrays(t *testing.T) { + var tomlTableArrays = ` +[[albums]] +name = "Born to Run" + + [[albums.songs]] + name = "Jungleland" + + [[albums.songs]] + name = "Meeting Across the River" + +[[albums]] +name = "Born in the USA" + + [[albums.songs]] + name = "Glory Days" + + [[albums.songs]] + name = "Dancing in the Dark" +` + + type Song struct { + Name string + } + + type Album struct { + Name string + Songs []Song + } + + type Music struct { + Albums []Album + } + + expected := Music{[]Album{ + {"Born to Run", []Song{{"Jungleland"}, {"Meeting Across the River"}}}, + {"Born in the USA", []Song{{"Glory Days"}, {"Dancing in the Dark"}}}, + }} + var got Music + if _, err := Decode(tomlTableArrays, &got); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(expected, got) { + t.Fatalf("\n%#v\n!=\n%#v\n", expected, got) + } +} + +func TestTableNesting(t *testing.T) { + for _, tt := range []struct { + t string + want []string + }{ + {"[a.b.c]", []string{"a", "b", "c"}}, + {`[a."b.c"]`, []string{"a", "b.c"}}, + {`[a.'b.c']`, []string{"a", "b.c"}}, + {`[a.' b ']`, []string{"a", " b "}}, + {"[ d.e.f ]", []string{"d", "e", "f"}}, + {"[ g . h . i ]", []string{"g", "h", "i"}}, + {`[ j . "ʞ" . 'l' ]`, []string{"j", "ʞ", "l"}}, + } { + var m map[string]interface{} + if _, err := Decode(tt.t, &m); err != nil { + t.Errorf("Decode(%q): got error: %s", tt.t, err) + continue + } + if keys := extractNestedKeys(m); !reflect.DeepEqual(keys, tt.want) { + t.Errorf("Decode(%q): got nested keys %#v; want %#v", + tt.t, keys, tt.want) + } + } +} + +func extractNestedKeys(v map[string]interface{}) []string { + var result []string + for { + if len(v) != 1 { + return result + } + for k, m := range v { + result = append(result, k) + var ok bool + v, ok = m.(map[string]interface{}) + if !ok { + return result + } + } + + } +} + +// Case insensitive matching tests. +// A bit more comprehensive than needed given the current implementation, +// but implementations change. +// Probably still missing demonstrations of some ugly corner cases regarding +// case insensitive matching and multiple fields. +func TestCase(t *testing.T) { + var caseToml = ` +tOpString = "string" +tOpInt = 1 +tOpFloat = 1.1 +tOpBool = true +tOpdate = 2006-01-02T15:04:05Z +tOparray = [ "array" ] +Match = "i should be in Match only" +MatcH = "i should be in MatcH only" +once = "just once" +[nEst.eD] +nEstedString = "another string" +` + + type InsensitiveEd struct { + NestedString string + } + + type InsensitiveNest struct { + Ed InsensitiveEd + } + + type Insensitive struct { + TopString string + TopInt int + TopFloat float64 + TopBool bool + TopDate time.Time + TopArray []string + Match string + MatcH string + Once string + OncE string + Nest InsensitiveNest + } + + tme, err := time.Parse(time.RFC3339, time.RFC3339[:len(time.RFC3339)-5]) + if err != nil { + panic(err) + } + expected := Insensitive{ + TopString: "string", + TopInt: 1, + TopFloat: 1.1, + TopBool: true, + TopDate: tme, + TopArray: []string{"array"}, + MatcH: "i should be in MatcH only", + Match: "i should be in Match only", + Once: "just once", + OncE: "", + Nest: InsensitiveNest{ + Ed: InsensitiveEd{NestedString: "another string"}, + }, + } + var got Insensitive + if _, err := Decode(caseToml, &got); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(expected, got) { + t.Fatalf("\n%#v\n!=\n%#v\n", expected, got) + } +} + +func TestPointers(t *testing.T) { + type Object struct { + Type string + Description string + } + + type Dict struct { + NamedObject map[string]*Object + BaseObject *Object + Strptr *string + Strptrs []*string + } + s1, s2, s3 := "blah", "abc", "def" + expected := &Dict{ + Strptr: &s1, + Strptrs: []*string{&s2, &s3}, + NamedObject: map[string]*Object{ + "foo": {"FOO", "fooooo!!!"}, + "bar": {"BAR", "ba-ba-ba-ba-barrrr!!!"}, + }, + BaseObject: &Object{"BASE", "da base"}, + } + + ex1 := ` +Strptr = "blah" +Strptrs = ["abc", "def"] + +[NamedObject.foo] +Type = "FOO" +Description = "fooooo!!!" + +[NamedObject.bar] +Type = "BAR" +Description = "ba-ba-ba-ba-barrrr!!!" + +[BaseObject] +Type = "BASE" +Description = "da base" +` + dict := new(Dict) + _, err := Decode(ex1, dict) + if err != nil { + t.Errorf("Decode error: %v", err) + } + if !reflect.DeepEqual(expected, dict) { + t.Fatalf("\n%#v\n!=\n%#v\n", expected, dict) + } +} + +func TestDecodeDatetime(t *testing.T) { + const noTimestamp = "2006-01-02T15:04:05" + for _, tt := range []struct { + s string + t string + format string + }{ + {"1979-05-27T07:32:00Z", "1979-05-27T07:32:00Z", time.RFC3339}, + {"1979-05-27T00:32:00-07:00", "1979-05-27T00:32:00-07:00", time.RFC3339}, + { + "1979-05-27T00:32:00.999999-07:00", + "1979-05-27T00:32:00.999999-07:00", + time.RFC3339, + }, + {"1979-05-27T07:32:00", "1979-05-27T07:32:00", noTimestamp}, + { + "1979-05-27T00:32:00.999999", + "1979-05-27T00:32:00.999999", + noTimestamp, + }, + {"1979-05-27", "1979-05-27T00:00:00", noTimestamp}, + } { + var x struct{ D time.Time } + input := "d = " + tt.s + if _, err := Decode(input, &x); err != nil { + t.Errorf("Decode(%q): got error: %s", input, err) + continue + } + want, err := time.ParseInLocation(tt.format, tt.t, time.Local) + if err != nil { + panic(err) + } + if !x.D.Equal(want) { + t.Errorf("Decode(%q): got %s; want %s", input, x.D, want) + } + } +} + +func TestDecodeBadDatetime(t *testing.T) { + var x struct{ T time.Time } + for _, s := range []string{ + "123", + "2006-01-50T00:00:00Z", + "2006-01-30T00:00", + "2006-01-30T", + } { + input := "T = " + s + if _, err := Decode(input, &x); err == nil { + t.Errorf("Expected invalid DateTime error for %q", s) + } + } +} + +func TestDecodeMultilineStrings(t *testing.T) { + var x struct { + S string + } + const s0 = `s = """ +a b \n c +d e f +"""` + if _, err := Decode(s0, &x); err != nil { + t.Fatal(err) + } + if want := "a b \n c\nd e f\n"; x.S != want { + t.Errorf("got: %q; want: %q", x.S, want) + } + const s1 = `s = """a b c\ +"""` + if _, err := Decode(s1, &x); err != nil { + t.Fatal(err) + } + if want := "a b c"; x.S != want { + t.Errorf("got: %q; want: %q", x.S, want) + } +} + +type sphere struct { + Center [3]float64 + Radius float64 +} + +func TestDecodeSimpleArray(t *testing.T) { + var s1 sphere + if _, err := Decode(`center = [0.0, 1.5, 0.0]`, &s1); err != nil { + t.Fatal(err) + } +} + +func TestDecodeArrayWrongSize(t *testing.T) { + var s1 sphere + if _, err := Decode(`center = [0.1, 2.3]`, &s1); err == nil { + t.Fatal("Expected array type mismatch error") + } +} + +func TestDecodeLargeIntoSmallInt(t *testing.T) { + type table struct { + Value int8 + } + var tab table + if _, err := Decode(`value = 500`, &tab); err == nil { + t.Fatal("Expected integer out-of-bounds error.") + } +} + +func TestDecodeSizedInts(t *testing.T) { + type table struct { + U8 uint8 + U16 uint16 + U32 uint32 + U64 uint64 + U uint + I8 int8 + I16 int16 + I32 int32 + I64 int64 + I int + } + answer := table{1, 1, 1, 1, 1, -1, -1, -1, -1, -1} + toml := ` + u8 = 1 + u16 = 1 + u32 = 1 + u64 = 1 + u = 1 + i8 = -1 + i16 = -1 + i32 = -1 + i64 = -1 + i = -1 + ` + var tab table + if _, err := Decode(toml, &tab); err != nil { + t.Fatal(err.Error()) + } + if answer != tab { + t.Fatalf("Expected %#v but got %#v", answer, tab) + } +} + +func TestDecodeInts(t *testing.T) { + for _, tt := range []struct { + s string + want int64 + }{ + {"0", 0}, + {"+99", 99}, + {"-10", -10}, + {"1_234_567", 1234567}, + {"1_2_3_4", 1234}, + {"-9_223_372_036_854_775_808", math.MinInt64}, + {"9_223_372_036_854_775_807", math.MaxInt64}, + } { + var x struct{ N int64 } + input := "n = " + tt.s + if _, err := Decode(input, &x); err != nil { + t.Errorf("Decode(%q): got error: %s", input, err) + continue + } + if x.N != tt.want { + t.Errorf("Decode(%q): got %d; want %d", input, x.N, tt.want) + } + } +} + +func TestDecodeFloats(t *testing.T) { + for _, tt := range []struct { + s string + want float64 + }{ + {"+1.0", 1}, + {"3.1415", 3.1415}, + {"-0.01", -0.01}, + {"5e+22", 5e22}, + {"1e6", 1e6}, + {"-2E-2", -2e-2}, + {"6.626e-34", 6.626e-34}, + {"9_224_617.445_991_228_313", 9224617.445991228313}, + {"9_876.54_32e1_0", 9876.5432e10}, + } { + var x struct{ N float64 } + input := "n = " + tt.s + if _, err := Decode(input, &x); err != nil { + t.Errorf("Decode(%q): got error: %s", input, err) + continue + } + if x.N != tt.want { + t.Errorf("Decode(%q): got %f; want %f", input, x.N, tt.want) + } + } +} + +func TestDecodeMalformedNumbers(t *testing.T) { + for _, tt := range []struct { + s string + want string + }{ + {"++99", "expected a digit"}, + {"0..1", "must be followed by one or more digits"}, + {"0.1.2", "Invalid float value"}, + {"1e2.3", "Invalid float value"}, + {"1e2e3", "Invalid float value"}, + {"_123", "expected value"}, + {"123_", "surrounded by digits"}, + {"1._23", "surrounded by digits"}, + {"1e__23", "surrounded by digits"}, + {"123.", "must be followed by one or more digits"}, + {"1.e2", "must be followed by one or more digits"}, + } { + var x struct{ N interface{} } + input := "n = " + tt.s + _, err := Decode(input, &x) + if err == nil { + t.Errorf("Decode(%q): got nil, want error containing %q", + input, tt.want) + continue + } + if !strings.Contains(err.Error(), tt.want) { + t.Errorf("Decode(%q): got %q, want error containing %q", + input, err, tt.want) + } + } +} + +func TestDecodeBadValues(t *testing.T) { + for _, tt := range []struct { + v interface{} + want string + }{ + {3, "non-pointer int"}, + {(*int)(nil), "nil"}, + } { + _, err := Decode(`x = 3`, tt.v) + if err == nil { + t.Errorf("Decode(%v): got nil; want error containing %q", + tt.v, tt.want) + continue + } + if !strings.Contains(err.Error(), tt.want) { + t.Errorf("Decode(%v): got %q; want error containing %q", + tt.v, err, tt.want) + } + } +} + +func TestUnmarshaler(t *testing.T) { + + var tomlBlob = ` +[dishes.hamboogie] +name = "Hamboogie with fries" +price = 10.99 + +[[dishes.hamboogie.ingredients]] +name = "Bread Bun" + +[[dishes.hamboogie.ingredients]] +name = "Lettuce" + +[[dishes.hamboogie.ingredients]] +name = "Real Beef Patty" + +[[dishes.hamboogie.ingredients]] +name = "Tomato" + +[dishes.eggsalad] +name = "Egg Salad with rice" +price = 3.99 + +[[dishes.eggsalad.ingredients]] +name = "Egg" + +[[dishes.eggsalad.ingredients]] +name = "Mayo" + +[[dishes.eggsalad.ingredients]] +name = "Rice" +` + m := &menu{} + if _, err := Decode(tomlBlob, m); err != nil { + t.Fatal(err) + } + + if len(m.Dishes) != 2 { + t.Log("two dishes should be loaded with UnmarshalTOML()") + t.Errorf("expected %d but got %d", 2, len(m.Dishes)) + } + + eggSalad := m.Dishes["eggsalad"] + if _, ok := interface{}(eggSalad).(dish); !ok { + t.Errorf("expected a dish") + } + + if eggSalad.Name != "Egg Salad with rice" { + t.Errorf("expected the dish to be named 'Egg Salad with rice'") + } + + if len(eggSalad.Ingredients) != 3 { + t.Log("dish should be loaded with UnmarshalTOML()") + t.Errorf("expected %d but got %d", 3, len(eggSalad.Ingredients)) + } + + found := false + for _, i := range eggSalad.Ingredients { + if i.Name == "Rice" { + found = true + break + } + } + if !found { + t.Error("Rice was not loaded in UnmarshalTOML()") + } + + // test on a value - must be passed as * + o := menu{} + if _, err := Decode(tomlBlob, &o); err != nil { + t.Fatal(err) + } + +} + +func TestDecodeInlineTable(t *testing.T) { + input := ` +[CookieJar] +Types = {Chocolate = "yummy", Oatmeal = "best ever"} + +[Seasons] +Locations = {NY = {Temp = "not cold", Rating = 4}, MI = {Temp = "freezing", Rating = 9}} +` + type cookieJar struct { + Types map[string]string + } + type properties struct { + Temp string + Rating int + } + type seasons struct { + Locations map[string]properties + } + type wrapper struct { + CookieJar cookieJar + Seasons seasons + } + var got wrapper + + meta, err := Decode(input, &got) + if err != nil { + t.Fatal(err) + } + want := wrapper{ + CookieJar: cookieJar{ + Types: map[string]string{ + "Chocolate": "yummy", + "Oatmeal": "best ever", + }, + }, + Seasons: seasons{ + Locations: map[string]properties{ + "NY": { + Temp: "not cold", + Rating: 4, + }, + "MI": { + Temp: "freezing", + Rating: 9, + }, + }, + }, + } + if !reflect.DeepEqual(got, want) { + t.Fatalf("after decode, got:\n\n%#v\n\nwant:\n\n%#v", got, want) + } + if len(meta.keys) != 12 { + t.Errorf("after decode, got %d meta keys; want 12", len(meta.keys)) + } + if len(meta.types) != 12 { + t.Errorf("after decode, got %d meta types; want 12", len(meta.types)) + } +} + +func TestDecodeInlineTableArray(t *testing.T) { + type point struct { + X, Y, Z int + } + var got struct { + Points []point + } + // Example inline table array from the spec. + const in = ` +points = [ { x = 1, y = 2, z = 3 }, + { x = 7, y = 8, z = 9 }, + { x = 2, y = 4, z = 8 } ] + +` + if _, err := Decode(in, &got); err != nil { + t.Fatal(err) + } + want := []point{ + {X: 1, Y: 2, Z: 3}, + {X: 7, Y: 8, Z: 9}, + {X: 2, Y: 4, Z: 8}, + } + if !reflect.DeepEqual(got.Points, want) { + t.Errorf("got %#v; want %#v", got.Points, want) + } +} + +func TestDecodeMalformedInlineTable(t *testing.T) { + for _, tt := range []struct { + s string + want string + }{ + {"{,}", "unexpected comma"}, + {"{x = 3 y = 4}", "expected a comma or an inline table terminator"}, + {"{x=3,,y=4}", "unexpected comma"}, + {"{x=3,\ny=4}", "newlines not allowed"}, + {"{x=3\n,y=4}", "newlines not allowed"}, + } { + var x struct{ A map[string]int } + input := "a = " + tt.s + _, err := Decode(input, &x) + if err == nil { + t.Errorf("Decode(%q): got nil, want error containing %q", + input, tt.want) + continue + } + if !strings.Contains(err.Error(), tt.want) { + t.Errorf("Decode(%q): got %q, want error containing %q", + input, err, tt.want) + } + } +} + +type menu struct { + Dishes map[string]dish +} + +func (m *menu) UnmarshalTOML(p interface{}) error { + m.Dishes = make(map[string]dish) + data, _ := p.(map[string]interface{}) + dishes := data["dishes"].(map[string]interface{}) + for n, v := range dishes { + if d, ok := v.(map[string]interface{}); ok { + nd := dish{} + nd.UnmarshalTOML(d) + m.Dishes[n] = nd + } else { + return fmt.Errorf("not a dish") + } + } + return nil +} + +type dish struct { + Name string + Price float32 + Ingredients []ingredient +} + +func (d *dish) UnmarshalTOML(p interface{}) error { + data, _ := p.(map[string]interface{}) + d.Name, _ = data["name"].(string) + d.Price, _ = data["price"].(float32) + ingredients, _ := data["ingredients"].([]map[string]interface{}) + for _, e := range ingredients { + n, _ := interface{}(e).(map[string]interface{}) + name, _ := n["name"].(string) + i := ingredient{name} + d.Ingredients = append(d.Ingredients, i) + } + return nil +} + +type ingredient struct { + Name string +} + +func TestDecodeSlices(t *testing.T) { + type T struct { + S []string + } + for i, tt := range []struct { + v T + input string + want T + }{ + {T{}, "", T{}}, + {T{[]string{}}, "", T{[]string{}}}, + {T{[]string{"a", "b"}}, "", T{[]string{"a", "b"}}}, + {T{}, "S = []", T{[]string{}}}, + {T{[]string{}}, "S = []", T{[]string{}}}, + {T{[]string{"a", "b"}}, "S = []", T{[]string{}}}, + {T{}, `S = ["x"]`, T{[]string{"x"}}}, + {T{[]string{}}, `S = ["x"]`, T{[]string{"x"}}}, + {T{[]string{"a", "b"}}, `S = ["x"]`, T{[]string{"x"}}}, + } { + if _, err := Decode(tt.input, &tt.v); err != nil { + t.Errorf("[%d] %s", i, err) + continue + } + if !reflect.DeepEqual(tt.v, tt.want) { + t.Errorf("[%d] got %#v; want %#v", i, tt.v, tt.want) + } + } +} + +func TestDecodePrimitive(t *testing.T) { + type S struct { + P Primitive + } + type T struct { + S []int + } + slicep := func(s []int) *[]int { return &s } + arrayp := func(a [2]int) *[2]int { return &a } + mapp := func(m map[string]int) *map[string]int { return &m } + for i, tt := range []struct { + v interface{} + input string + want interface{} + }{ + // slices + {slicep(nil), "", slicep(nil)}, + {slicep([]int{}), "", slicep([]int{})}, + {slicep([]int{1, 2, 3}), "", slicep([]int{1, 2, 3})}, + {slicep(nil), "P = [1,2]", slicep([]int{1, 2})}, + {slicep([]int{}), "P = [1,2]", slicep([]int{1, 2})}, + {slicep([]int{1, 2, 3}), "P = [1,2]", slicep([]int{1, 2})}, + + // arrays + {arrayp([2]int{2, 3}), "", arrayp([2]int{2, 3})}, + {arrayp([2]int{2, 3}), "P = [3,4]", arrayp([2]int{3, 4})}, + + // maps + {mapp(nil), "", mapp(nil)}, + {mapp(map[string]int{}), "", mapp(map[string]int{})}, + {mapp(map[string]int{"a": 1}), "", mapp(map[string]int{"a": 1})}, + {mapp(nil), "[P]\na = 2", mapp(map[string]int{"a": 2})}, + {mapp(map[string]int{}), "[P]\na = 2", mapp(map[string]int{"a": 2})}, + {mapp(map[string]int{"a": 1, "b": 3}), "[P]\na = 2", mapp(map[string]int{"a": 2, "b": 3})}, + + // structs + {&T{nil}, "[P]", &T{nil}}, + {&T{[]int{}}, "[P]", &T{[]int{}}}, + {&T{[]int{1, 2, 3}}, "[P]", &T{[]int{1, 2, 3}}}, + {&T{nil}, "[P]\nS = [1,2]", &T{[]int{1, 2}}}, + {&T{[]int{}}, "[P]\nS = [1,2]", &T{[]int{1, 2}}}, + {&T{[]int{1, 2, 3}}, "[P]\nS = [1,2]", &T{[]int{1, 2}}}, + } { + var s S + md, err := Decode(tt.input, &s) + if err != nil { + t.Errorf("[%d] Decode error: %s", i, err) + continue + } + if err := md.PrimitiveDecode(s.P, tt.v); err != nil { + t.Errorf("[%d] PrimitiveDecode error: %s", i, err) + continue + } + if !reflect.DeepEqual(tt.v, tt.want) { + t.Errorf("[%d] got %#v; want %#v", i, tt.v, tt.want) + } + } +} + +func TestDecodeErrors(t *testing.T) { + for _, s := range []string{ + `x="`, + `x='`, + `x='''`, + + // Cases found by fuzzing in + // https://github.com/BurntSushi/toml/issues/155. + `""�`, // used to panic with index out of range + `e="""`, // used to hang + } { + var x struct{} + _, err := Decode(s, &x) + if err == nil { + t.Errorf("Decode(%q): got nil error", s) + } + } +} + +// Test for https://github.com/BurntSushi/toml/pull/166. +func TestDecodeBoolArray(t *testing.T) { + for _, tt := range []struct { + s string + got interface{} + want interface{} + }{ + { + "a = [true, false]", + &struct{ A []bool }{}, + &struct{ A []bool }{[]bool{true, false}}, + }, + { + "a = {a = true, b = false}", + &struct{ A map[string]bool }{}, + &struct{ A map[string]bool }{map[string]bool{"a": true, "b": false}}, + }, + } { + if _, err := Decode(tt.s, tt.got); err != nil { + t.Errorf("Decode(%q): %s", tt.s, err) + continue + } + if !reflect.DeepEqual(tt.got, tt.want) { + t.Errorf("Decode(%q): got %#v; want %#v", tt.s, tt.got, tt.want) + } + } +} + +func ExampleMetaData_PrimitiveDecode() { + var md MetaData + var err error + + var tomlBlob = ` +ranking = ["Springsteen", "J Geils"] + +[bands.Springsteen] +started = 1973 +albums = ["Greetings", "WIESS", "Born to Run", "Darkness"] + +[bands."J Geils"] +started = 1970 +albums = ["The J. Geils Band", "Full House", "Blow Your Face Out"] +` + + type band struct { + Started int + Albums []string + } + type classics struct { + Ranking []string + Bands map[string]Primitive + } + + // Do the initial decode. Reflection is delayed on Primitive values. + var music classics + if md, err = Decode(tomlBlob, &music); err != nil { + log.Fatal(err) + } + + // MetaData still includes information on Primitive values. + fmt.Printf("Is `bands.Springsteen` defined? %v\n", + md.IsDefined("bands", "Springsteen")) + + // Decode primitive data into Go values. + for _, artist := range music.Ranking { + // A band is a primitive value, so we need to decode it to get a + // real `band` value. + primValue := music.Bands[artist] + + var aBand band + if err = md.PrimitiveDecode(primValue, &aBand); err != nil { + log.Fatal(err) + } + fmt.Printf("%s started in %d.\n", artist, aBand.Started) + } + // Check to see if there were any fields left undecoded. + // Note that this won't be empty before decoding the Primitive value! + fmt.Printf("Undecoded: %q\n", md.Undecoded()) + + // Output: + // Is `bands.Springsteen` defined? true + // Springsteen started in 1973. + // J Geils started in 1970. + // Undecoded: [] +} + +func ExampleDecode() { + var tomlBlob = ` +# Some comments. +[alpha] +ip = "10.0.0.1" + + [alpha.config] + Ports = [ 8001, 8002 ] + Location = "Toronto" + Created = 1987-07-05T05:45:00Z + +[beta] +ip = "10.0.0.2" + + [beta.config] + Ports = [ 9001, 9002 ] + Location = "New Jersey" + Created = 1887-01-05T05:55:00Z +` + + type serverConfig struct { + Ports []int + Location string + Created time.Time + } + + type server struct { + IP string `toml:"ip,omitempty"` + Config serverConfig `toml:"config"` + } + + type servers map[string]server + + var config servers + if _, err := Decode(tomlBlob, &config); err != nil { + log.Fatal(err) + } + + for _, name := range []string{"alpha", "beta"} { + s := config[name] + fmt.Printf("Server: %s (ip: %s) in %s created on %s\n", + name, s.IP, s.Config.Location, + s.Config.Created.Format("2006-01-02")) + fmt.Printf("Ports: %v\n", s.Config.Ports) + } + + // Output: + // Server: alpha (ip: 10.0.0.1) in Toronto created on 1987-07-05 + // Ports: [8001 8002] + // Server: beta (ip: 10.0.0.2) in New Jersey created on 1887-01-05 + // Ports: [9001 9002] +} + +type duration struct { + time.Duration +} + +func (d *duration) UnmarshalText(text []byte) error { + var err error + d.Duration, err = time.ParseDuration(string(text)) + return err +} + +// Example Unmarshaler shows how to decode TOML strings into your own +// custom data type. +func Example_unmarshaler() { + blob := ` +[[song]] +name = "Thunder Road" +duration = "4m49s" + +[[song]] +name = "Stairway to Heaven" +duration = "8m03s" +` + type song struct { + Name string + Duration duration + } + type songs struct { + Song []song + } + var favorites songs + if _, err := Decode(blob, &favorites); err != nil { + log.Fatal(err) + } + + // Code to implement the TextUnmarshaler interface for `duration`: + // + // type duration struct { + // time.Duration + // } + // + // func (d *duration) UnmarshalText(text []byte) error { + // var err error + // d.Duration, err = time.ParseDuration(string(text)) + // return err + // } + + for _, s := range favorites.Song { + fmt.Printf("%s (%s)\n", s.Name, s.Duration) + } + // Output: + // Thunder Road (4m49s) + // Stairway to Heaven (8m3s) +} + +// Example StrictDecoding shows how to detect whether there are keys in the +// TOML document that weren't decoded into the value given. This is useful +// for returning an error to the user if they've included extraneous fields +// in their configuration. +func Example_strictDecoding() { + var blob = ` +key1 = "value1" +key2 = "value2" +key3 = "value3" +` + type config struct { + Key1 string + Key3 string + } + + var conf config + md, err := Decode(blob, &conf) + if err != nil { + log.Fatal(err) + } + fmt.Printf("Undecoded keys: %q\n", md.Undecoded()) + // Output: + // Undecoded keys: ["key2"] +} + +// Example UnmarshalTOML shows how to implement a struct type that knows how to +// unmarshal itself. The struct must take full responsibility for mapping the +// values passed into the struct. The method may be used with interfaces in a +// struct in cases where the actual type is not known until the data is +// examined. +func Example_unmarshalTOML() { + + var blob = ` +[[parts]] +type = "valve" +id = "valve-1" +size = 1.2 +rating = 4 + +[[parts]] +type = "valve" +id = "valve-2" +size = 2.1 +rating = 5 + +[[parts]] +type = "pipe" +id = "pipe-1" +length = 2.1 +diameter = 12 + +[[parts]] +type = "cable" +id = "cable-1" +length = 12 +rating = 3.1 +` + o := &order{} + err := Unmarshal([]byte(blob), o) + if err != nil { + log.Fatal(err) + } + + fmt.Println(len(o.parts)) + + for _, part := range o.parts { + fmt.Println(part.Name()) + } + + // Code to implement UmarshalJSON. + + // type order struct { + // // NOTE `order.parts` is a private slice of type `part` which is an + // // interface and may only be loaded from toml using the + // // UnmarshalTOML() method of the Umarshaler interface. + // parts parts + // } + + // func (o *order) UnmarshalTOML(data interface{}) error { + + // // NOTE the example below contains detailed type casting to show how + // // the 'data' is retrieved. In operational use, a type cast wrapper + // // may be preferred e.g. + // // + // // func AsMap(v interface{}) (map[string]interface{}, error) { + // // return v.(map[string]interface{}) + // // } + // // + // // resulting in: + // // d, _ := AsMap(data) + // // + + // d, _ := data.(map[string]interface{}) + // parts, _ := d["parts"].([]map[string]interface{}) + + // for _, p := range parts { + + // typ, _ := p["type"].(string) + // id, _ := p["id"].(string) + + // // detect the type of part and handle each case + // switch p["type"] { + // case "valve": + + // size := float32(p["size"].(float64)) + // rating := int(p["rating"].(int64)) + + // valve := &valve{ + // Type: typ, + // ID: id, + // Size: size, + // Rating: rating, + // } + + // o.parts = append(o.parts, valve) + + // case "pipe": + + // length := float32(p["length"].(float64)) + // diameter := int(p["diameter"].(int64)) + + // pipe := &pipe{ + // Type: typ, + // ID: id, + // Length: length, + // Diameter: diameter, + // } + + // o.parts = append(o.parts, pipe) + + // case "cable": + + // length := int(p["length"].(int64)) + // rating := float32(p["rating"].(float64)) + + // cable := &cable{ + // Type: typ, + // ID: id, + // Length: length, + // Rating: rating, + // } + + // o.parts = append(o.parts, cable) + + // } + // } + + // return nil + // } + + // type parts []part + + // type part interface { + // Name() string + // } + + // type valve struct { + // Type string + // ID string + // Size float32 + // Rating int + // } + + // func (v *valve) Name() string { + // return fmt.Sprintf("VALVE: %s", v.ID) + // } + + // type pipe struct { + // Type string + // ID string + // Length float32 + // Diameter int + // } + + // func (p *pipe) Name() string { + // return fmt.Sprintf("PIPE: %s", p.ID) + // } + + // type cable struct { + // Type string + // ID string + // Length int + // Rating float32 + // } + + // func (c *cable) Name() string { + // return fmt.Sprintf("CABLE: %s", c.ID) + // } + + // Output: + // 4 + // VALVE: valve-1 + // VALVE: valve-2 + // PIPE: pipe-1 + // CABLE: cable-1 + +} + +type order struct { + // NOTE `order.parts` is a private slice of type `part` which is an + // interface and may only be loaded from toml using the UnmarshalTOML() + // method of the Umarshaler interface. + parts parts +} + +func (o *order) UnmarshalTOML(data interface{}) error { + + // NOTE the example below contains detailed type casting to show how + // the 'data' is retrieved. In operational use, a type cast wrapper + // may be preferred e.g. + // + // func AsMap(v interface{}) (map[string]interface{}, error) { + // return v.(map[string]interface{}) + // } + // + // resulting in: + // d, _ := AsMap(data) + // + + d, _ := data.(map[string]interface{}) + parts, _ := d["parts"].([]map[string]interface{}) + + for _, p := range parts { + + typ, _ := p["type"].(string) + id, _ := p["id"].(string) + + // detect the type of part and handle each case + switch p["type"] { + case "valve": + + size := float32(p["size"].(float64)) + rating := int(p["rating"].(int64)) + + valve := &valve{ + Type: typ, + ID: id, + Size: size, + Rating: rating, + } + + o.parts = append(o.parts, valve) + + case "pipe": + + length := float32(p["length"].(float64)) + diameter := int(p["diameter"].(int64)) + + pipe := &pipe{ + Type: typ, + ID: id, + Length: length, + Diameter: diameter, + } + + o.parts = append(o.parts, pipe) + + case "cable": + + length := int(p["length"].(int64)) + rating := float32(p["rating"].(float64)) + + cable := &cable{ + Type: typ, + ID: id, + Length: length, + Rating: rating, + } + + o.parts = append(o.parts, cable) + + } + } + + return nil +} + +type parts []part + +type part interface { + Name() string +} + +type valve struct { + Type string + ID string + Size float32 + Rating int +} + +func (v *valve) Name() string { + return fmt.Sprintf("VALVE: %s", v.ID) +} + +type pipe struct { + Type string + ID string + Length float32 + Diameter int +} + +func (p *pipe) Name() string { + return fmt.Sprintf("PIPE: %s", p.ID) +} + +type cable struct { + Type string + ID string + Length int + Rating float32 +} + +func (c *cable) Name() string { + return fmt.Sprintf("CABLE: %s", c.ID) +} diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go new file mode 100644 index 0000000..b371f39 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/doc.go @@ -0,0 +1,27 @@ +/* +Package toml provides facilities for decoding and encoding TOML configuration +files via reflection. There is also support for delaying decoding with +the Primitive type, and querying the set of keys in a TOML document with the +MetaData type. + +The specification implemented: https://github.com/toml-lang/toml + +The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify +whether a file is a valid TOML document. It can also be used to print the +type of each key in a TOML document. + +Testing + +There are two important types of tests used for this package. The first is +contained inside '*_test.go' files and uses the standard Go unit testing +framework. These tests are primarily devoted to holistically testing the +decoder and encoder. + +The second type of testing is used to verify the implementation's adherence +to the TOML specification. These tests have been factored into their own +project: https://github.com/BurntSushi/toml-test + +The reason the tests are in a separate project is so that they can be used by +any implementation of TOML. Namely, it is language agnostic. +*/ +package toml diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go new file mode 100644 index 0000000..d905c21 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/encode.go @@ -0,0 +1,568 @@ +package toml + +import ( + "bufio" + "errors" + "fmt" + "io" + "reflect" + "sort" + "strconv" + "strings" + "time" +) + +type tomlEncodeError struct{ error } + +var ( + errArrayMixedElementTypes = errors.New( + "toml: cannot encode array with mixed element types") + errArrayNilElement = errors.New( + "toml: cannot encode array with nil element") + errNonString = errors.New( + "toml: cannot encode a map with non-string key type") + errAnonNonStruct = errors.New( + "toml: cannot encode an anonymous field that is not a struct") + errArrayNoTable = errors.New( + "toml: TOML array element cannot contain a table") + errNoKey = errors.New( + "toml: top-level values must be Go maps or structs") + errAnything = errors.New("") // used in testing +) + +var quotedReplacer = strings.NewReplacer( + "\t", "\\t", + "\n", "\\n", + "\r", "\\r", + "\"", "\\\"", + "\\", "\\\\", +) + +// Encoder controls the encoding of Go values to a TOML document to some +// io.Writer. +// +// The indentation level can be controlled with the Indent field. +type Encoder struct { + // A single indentation level. By default it is two spaces. + Indent string + + // hasWritten is whether we have written any output to w yet. + hasWritten bool + w *bufio.Writer +} + +// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer +// given. By default, a single indentation level is 2 spaces. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + w: bufio.NewWriter(w), + Indent: " ", + } +} + +// Encode writes a TOML representation of the Go value to the underlying +// io.Writer. If the value given cannot be encoded to a valid TOML document, +// then an error is returned. +// +// The mapping between Go values and TOML values should be precisely the same +// as for the Decode* functions. Similarly, the TextMarshaler interface is +// supported by encoding the resulting bytes as strings. (If you want to write +// arbitrary binary data then you will need to use something like base64 since +// TOML does not have any binary types.) +// +// When encoding TOML hashes (i.e., Go maps or structs), keys without any +// sub-hashes are encoded first. +// +// If a Go map is encoded, then its keys are sorted alphabetically for +// deterministic output. More control over this behavior may be provided if +// there is demand for it. +// +// Encoding Go values without a corresponding TOML representation---like map +// types with non-string keys---will cause an error to be returned. Similarly +// for mixed arrays/slices, arrays/slices with nil elements, embedded +// non-struct types and nested slices containing maps or structs. +// (e.g., [][]map[string]string is not allowed but []map[string]string is OK +// and so is []map[string][]string.) +func (enc *Encoder) Encode(v interface{}) error { + rv := eindirect(reflect.ValueOf(v)) + if err := enc.safeEncode(Key([]string{}), rv); err != nil { + return err + } + return enc.w.Flush() +} + +func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) { + defer func() { + if r := recover(); r != nil { + if terr, ok := r.(tomlEncodeError); ok { + err = terr.error + return + } + panic(r) + } + }() + enc.encode(key, rv) + return nil +} + +func (enc *Encoder) encode(key Key, rv reflect.Value) { + // Special case. Time needs to be in ISO8601 format. + // Special case. If we can marshal the type to text, then we used that. + // Basically, this prevents the encoder for handling these types as + // generic structs (or whatever the underlying type of a TextMarshaler is). + switch rv.Interface().(type) { + case time.Time, TextMarshaler: + enc.keyEqElement(key, rv) + return + } + + k := rv.Kind() + switch k { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64, + reflect.Float32, reflect.Float64, reflect.String, reflect.Bool: + enc.keyEqElement(key, rv) + case reflect.Array, reflect.Slice: + if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) { + enc.eArrayOfTables(key, rv) + } else { + enc.keyEqElement(key, rv) + } + case reflect.Interface: + if rv.IsNil() { + return + } + enc.encode(key, rv.Elem()) + case reflect.Map: + if rv.IsNil() { + return + } + enc.eTable(key, rv) + case reflect.Ptr: + if rv.IsNil() { + return + } + enc.encode(key, rv.Elem()) + case reflect.Struct: + enc.eTable(key, rv) + default: + panic(e("unsupported type for key '%s': %s", key, k)) + } +} + +// eElement encodes any value that can be an array element (primitives and +// arrays). +func (enc *Encoder) eElement(rv reflect.Value) { + switch v := rv.Interface().(type) { + case time.Time: + // Special case time.Time as a primitive. Has to come before + // TextMarshaler below because time.Time implements + // encoding.TextMarshaler, but we need to always use UTC. + enc.wf(v.UTC().Format("2006-01-02T15:04:05Z")) + return + case TextMarshaler: + // Special case. Use text marshaler if it's available for this value. + if s, err := v.MarshalText(); err != nil { + encPanic(err) + } else { + enc.writeQuoted(string(s)) + } + return + } + switch rv.Kind() { + case reflect.Bool: + enc.wf(strconv.FormatBool(rv.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64: + enc.wf(strconv.FormatInt(rv.Int(), 10)) + case reflect.Uint, reflect.Uint8, reflect.Uint16, + reflect.Uint32, reflect.Uint64: + enc.wf(strconv.FormatUint(rv.Uint(), 10)) + case reflect.Float32: + enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32))) + case reflect.Float64: + enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64))) + case reflect.Array, reflect.Slice: + enc.eArrayOrSliceElement(rv) + case reflect.Interface: + enc.eElement(rv.Elem()) + case reflect.String: + enc.writeQuoted(rv.String()) + default: + panic(e("unexpected primitive type: %s", rv.Kind())) + } +} + +// By the TOML spec, all floats must have a decimal with at least one +// number on either side. +func floatAddDecimal(fstr string) string { + if !strings.Contains(fstr, ".") { + return fstr + ".0" + } + return fstr +} + +func (enc *Encoder) writeQuoted(s string) { + enc.wf("\"%s\"", quotedReplacer.Replace(s)) +} + +func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) { + length := rv.Len() + enc.wf("[") + for i := 0; i < length; i++ { + elem := rv.Index(i) + enc.eElement(elem) + if i != length-1 { + enc.wf(", ") + } + } + enc.wf("]") +} + +func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) { + if len(key) == 0 { + encPanic(errNoKey) + } + for i := 0; i < rv.Len(); i++ { + trv := rv.Index(i) + if isNil(trv) { + continue + } + panicIfInvalidKey(key) + enc.newline() + enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll()) + enc.newline() + enc.eMapOrStruct(key, trv) + } +} + +func (enc *Encoder) eTable(key Key, rv reflect.Value) { + panicIfInvalidKey(key) + if len(key) == 1 { + // Output an extra newline between top-level tables. + // (The newline isn't written if nothing else has been written though.) + enc.newline() + } + if len(key) > 0 { + enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll()) + enc.newline() + } + enc.eMapOrStruct(key, rv) +} + +func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) { + switch rv := eindirect(rv); rv.Kind() { + case reflect.Map: + enc.eMap(key, rv) + case reflect.Struct: + enc.eStruct(key, rv) + default: + panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String()) + } +} + +func (enc *Encoder) eMap(key Key, rv reflect.Value) { + rt := rv.Type() + if rt.Key().Kind() != reflect.String { + encPanic(errNonString) + } + + // Sort keys so that we have deterministic output. And write keys directly + // underneath this key first, before writing sub-structs or sub-maps. + var mapKeysDirect, mapKeysSub []string + for _, mapKey := range rv.MapKeys() { + k := mapKey.String() + if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) { + mapKeysSub = append(mapKeysSub, k) + } else { + mapKeysDirect = append(mapKeysDirect, k) + } + } + + var writeMapKeys = func(mapKeys []string) { + sort.Strings(mapKeys) + for _, mapKey := range mapKeys { + mrv := rv.MapIndex(reflect.ValueOf(mapKey)) + if isNil(mrv) { + // Don't write anything for nil fields. + continue + } + enc.encode(key.add(mapKey), mrv) + } + } + writeMapKeys(mapKeysDirect) + writeMapKeys(mapKeysSub) +} + +func (enc *Encoder) eStruct(key Key, rv reflect.Value) { + // Write keys for fields directly under this key first, because if we write + // a field that creates a new table, then all keys under it will be in that + // table (not the one we're writing here). + rt := rv.Type() + var fieldsDirect, fieldsSub [][]int + var addFields func(rt reflect.Type, rv reflect.Value, start []int) + addFields = func(rt reflect.Type, rv reflect.Value, start []int) { + for i := 0; i < rt.NumField(); i++ { + f := rt.Field(i) + // skip unexported fields + if f.PkgPath != "" && !f.Anonymous { + continue + } + frv := rv.Field(i) + if f.Anonymous { + t := f.Type + switch t.Kind() { + case reflect.Struct: + // Treat anonymous struct fields with + // tag names as though they are not + // anonymous, like encoding/json does. + if getOptions(f.Tag).name == "" { + addFields(t, frv, f.Index) + continue + } + case reflect.Ptr: + if t.Elem().Kind() == reflect.Struct && + getOptions(f.Tag).name == "" { + if !frv.IsNil() { + addFields(t.Elem(), frv.Elem(), f.Index) + } + continue + } + // Fall through to the normal field encoding logic below + // for non-struct anonymous fields. + } + } + + if typeIsHash(tomlTypeOfGo(frv)) { + fieldsSub = append(fieldsSub, append(start, f.Index...)) + } else { + fieldsDirect = append(fieldsDirect, append(start, f.Index...)) + } + } + } + addFields(rt, rv, nil) + + var writeFields = func(fields [][]int) { + for _, fieldIndex := range fields { + sft := rt.FieldByIndex(fieldIndex) + sf := rv.FieldByIndex(fieldIndex) + if isNil(sf) { + // Don't write anything for nil fields. + continue + } + + opts := getOptions(sft.Tag) + if opts.skip { + continue + } + keyName := sft.Name + if opts.name != "" { + keyName = opts.name + } + if opts.omitempty && isEmpty(sf) { + continue + } + if opts.omitzero && isZero(sf) { + continue + } + + enc.encode(key.add(keyName), sf) + } + } + writeFields(fieldsDirect) + writeFields(fieldsSub) +} + +// tomlTypeName returns the TOML type name of the Go value's type. It is +// used to determine whether the types of array elements are mixed (which is +// forbidden). If the Go value is nil, then it is illegal for it to be an array +// element, and valueIsNil is returned as true. + +// Returns the TOML type of a Go value. The type may be `nil`, which means +// no concrete TOML type could be found. +func tomlTypeOfGo(rv reflect.Value) tomlType { + if isNil(rv) || !rv.IsValid() { + return nil + } + switch rv.Kind() { + case reflect.Bool: + return tomlBool + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64: + return tomlInteger + case reflect.Float32, reflect.Float64: + return tomlFloat + case reflect.Array, reflect.Slice: + if typeEqual(tomlHash, tomlArrayType(rv)) { + return tomlArrayHash + } + return tomlArray + case reflect.Ptr, reflect.Interface: + return tomlTypeOfGo(rv.Elem()) + case reflect.String: + return tomlString + case reflect.Map: + return tomlHash + case reflect.Struct: + switch rv.Interface().(type) { + case time.Time: + return tomlDatetime + case TextMarshaler: + return tomlString + default: + return tomlHash + } + default: + panic("unexpected reflect.Kind: " + rv.Kind().String()) + } +} + +// tomlArrayType returns the element type of a TOML array. The type returned +// may be nil if it cannot be determined (e.g., a nil slice or a zero length +// slize). This function may also panic if it finds a type that cannot be +// expressed in TOML (such as nil elements, heterogeneous arrays or directly +// nested arrays of tables). +func tomlArrayType(rv reflect.Value) tomlType { + if isNil(rv) || !rv.IsValid() || rv.Len() == 0 { + return nil + } + firstType := tomlTypeOfGo(rv.Index(0)) + if firstType == nil { + encPanic(errArrayNilElement) + } + + rvlen := rv.Len() + for i := 1; i < rvlen; i++ { + elem := rv.Index(i) + switch elemType := tomlTypeOfGo(elem); { + case elemType == nil: + encPanic(errArrayNilElement) + case !typeEqual(firstType, elemType): + encPanic(errArrayMixedElementTypes) + } + } + // If we have a nested array, then we must make sure that the nested + // array contains ONLY primitives. + // This checks arbitrarily nested arrays. + if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) { + nest := tomlArrayType(eindirect(rv.Index(0))) + if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) { + encPanic(errArrayNoTable) + } + } + return firstType +} + +type tagOptions struct { + skip bool // "-" + name string + omitempty bool + omitzero bool +} + +func getOptions(tag reflect.StructTag) tagOptions { + t := tag.Get("toml") + if t == "-" { + return tagOptions{skip: true} + } + var opts tagOptions + parts := strings.Split(t, ",") + opts.name = parts[0] + for _, s := range parts[1:] { + switch s { + case "omitempty": + opts.omitempty = true + case "omitzero": + opts.omitzero = true + } + } + return opts +} + +func isZero(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return rv.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return rv.Uint() == 0 + case reflect.Float32, reflect.Float64: + return rv.Float() == 0.0 + } + return false +} + +func isEmpty(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Array, reflect.Slice, reflect.Map, reflect.String: + return rv.Len() == 0 + case reflect.Bool: + return !rv.Bool() + } + return false +} + +func (enc *Encoder) newline() { + if enc.hasWritten { + enc.wf("\n") + } +} + +func (enc *Encoder) keyEqElement(key Key, val reflect.Value) { + if len(key) == 0 { + encPanic(errNoKey) + } + panicIfInvalidKey(key) + enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) + enc.eElement(val) + enc.newline() +} + +func (enc *Encoder) wf(format string, v ...interface{}) { + if _, err := fmt.Fprintf(enc.w, format, v...); err != nil { + encPanic(err) + } + enc.hasWritten = true +} + +func (enc *Encoder) indentStr(key Key) string { + return strings.Repeat(enc.Indent, len(key)-1) +} + +func encPanic(err error) { + panic(tomlEncodeError{err}) +} + +func eindirect(v reflect.Value) reflect.Value { + switch v.Kind() { + case reflect.Ptr, reflect.Interface: + return eindirect(v.Elem()) + default: + return v + } +} + +func isNil(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return rv.IsNil() + default: + return false + } +} + +func panicIfInvalidKey(key Key) { + for _, k := range key { + if len(k) == 0 { + encPanic(e("Key '%s' is not a valid table name. Key names "+ + "cannot be empty.", key.maybeQuotedAll())) + } + } +} + +func isValidKeyName(s string) bool { + return len(s) != 0 +} diff --git a/vendor/github.com/BurntSushi/toml/encode_test.go b/vendor/github.com/BurntSushi/toml/encode_test.go new file mode 100644 index 0000000..673b7b0 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/encode_test.go @@ -0,0 +1,615 @@ +package toml + +import ( + "bytes" + "fmt" + "log" + "net" + "testing" + "time" +) + +func TestEncodeRoundTrip(t *testing.T) { + type Config struct { + Age int + Cats []string + Pi float64 + Perfection []int + DOB time.Time + Ipaddress net.IP + } + + var inputs = Config{ + 13, + []string{"one", "two", "three"}, + 3.145, + []int{11, 2, 3, 4}, + time.Now(), + net.ParseIP("192.168.59.254"), + } + + var firstBuffer bytes.Buffer + e := NewEncoder(&firstBuffer) + err := e.Encode(inputs) + if err != nil { + t.Fatal(err) + } + var outputs Config + if _, err := Decode(firstBuffer.String(), &outputs); err != nil { + t.Logf("Could not decode:\n-----\n%s\n-----\n", + firstBuffer.String()) + t.Fatal(err) + } + + // could test each value individually, but I'm lazy + var secondBuffer bytes.Buffer + e2 := NewEncoder(&secondBuffer) + err = e2.Encode(outputs) + if err != nil { + t.Fatal(err) + } + if firstBuffer.String() != secondBuffer.String() { + t.Error( + firstBuffer.String(), + "\n\n is not identical to\n\n", + secondBuffer.String()) + } +} + +// XXX(burntsushi) +// I think these tests probably should be removed. They are good, but they +// ought to be obsolete by toml-test. +func TestEncode(t *testing.T) { + type Embedded struct { + Int int `toml:"_int"` + } + type NonStruct int + + date := time.Date(2014, 5, 11, 20, 30, 40, 0, time.FixedZone("IST", 3600)) + dateStr := "2014-05-11T19:30:40Z" + + tests := map[string]struct { + input interface{} + wantOutput string + wantError error + }{ + "bool field": { + input: struct { + BoolTrue bool + BoolFalse bool + }{true, false}, + wantOutput: "BoolTrue = true\nBoolFalse = false\n", + }, + "int fields": { + input: struct { + Int int + Int8 int8 + Int16 int16 + Int32 int32 + Int64 int64 + }{1, 2, 3, 4, 5}, + wantOutput: "Int = 1\nInt8 = 2\nInt16 = 3\nInt32 = 4\nInt64 = 5\n", + }, + "uint fields": { + input: struct { + Uint uint + Uint8 uint8 + Uint16 uint16 + Uint32 uint32 + Uint64 uint64 + }{1, 2, 3, 4, 5}, + wantOutput: "Uint = 1\nUint8 = 2\nUint16 = 3\nUint32 = 4" + + "\nUint64 = 5\n", + }, + "float fields": { + input: struct { + Float32 float32 + Float64 float64 + }{1.5, 2.5}, + wantOutput: "Float32 = 1.5\nFloat64 = 2.5\n", + }, + "string field": { + input: struct{ String string }{"foo"}, + wantOutput: "String = \"foo\"\n", + }, + "string field and unexported field": { + input: struct { + String string + unexported int + }{"foo", 0}, + wantOutput: "String = \"foo\"\n", + }, + "datetime field in UTC": { + input: struct{ Date time.Time }{date}, + wantOutput: fmt.Sprintf("Date = %s\n", dateStr), + }, + "datetime field as primitive": { + // Using a map here to fail if isStructOrMap() returns true for + // time.Time. + input: map[string]interface{}{ + "Date": date, + "Int": 1, + }, + wantOutput: fmt.Sprintf("Date = %s\nInt = 1\n", dateStr), + }, + "array fields": { + input: struct { + IntArray0 [0]int + IntArray3 [3]int + }{[0]int{}, [3]int{1, 2, 3}}, + wantOutput: "IntArray0 = []\nIntArray3 = [1, 2, 3]\n", + }, + "slice fields": { + input: struct{ IntSliceNil, IntSlice0, IntSlice3 []int }{ + nil, []int{}, []int{1, 2, 3}, + }, + wantOutput: "IntSlice0 = []\nIntSlice3 = [1, 2, 3]\n", + }, + "datetime slices": { + input: struct{ DatetimeSlice []time.Time }{ + []time.Time{date, date}, + }, + wantOutput: fmt.Sprintf("DatetimeSlice = [%s, %s]\n", + dateStr, dateStr), + }, + "nested arrays and slices": { + input: struct { + SliceOfArrays [][2]int + ArrayOfSlices [2][]int + SliceOfArraysOfSlices [][2][]int + ArrayOfSlicesOfArrays [2][][2]int + SliceOfMixedArrays [][2]interface{} + ArrayOfMixedSlices [2][]interface{} + }{ + [][2]int{{1, 2}, {3, 4}}, + [2][]int{{1, 2}, {3, 4}}, + [][2][]int{ + { + {1, 2}, {3, 4}, + }, + { + {5, 6}, {7, 8}, + }, + }, + [2][][2]int{ + { + {1, 2}, {3, 4}, + }, + { + {5, 6}, {7, 8}, + }, + }, + [][2]interface{}{ + {1, 2}, {"a", "b"}, + }, + [2][]interface{}{ + {1, 2}, {"a", "b"}, + }, + }, + wantOutput: `SliceOfArrays = [[1, 2], [3, 4]] +ArrayOfSlices = [[1, 2], [3, 4]] +SliceOfArraysOfSlices = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]] +ArrayOfSlicesOfArrays = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]] +SliceOfMixedArrays = [[1, 2], ["a", "b"]] +ArrayOfMixedSlices = [[1, 2], ["a", "b"]] +`, + }, + "empty slice": { + input: struct{ Empty []interface{} }{[]interface{}{}}, + wantOutput: "Empty = []\n", + }, + "(error) slice with element type mismatch (string and integer)": { + input: struct{ Mixed []interface{} }{[]interface{}{1, "a"}}, + wantError: errArrayMixedElementTypes, + }, + "(error) slice with element type mismatch (integer and float)": { + input: struct{ Mixed []interface{} }{[]interface{}{1, 2.5}}, + wantError: errArrayMixedElementTypes, + }, + "slice with elems of differing Go types, same TOML types": { + input: struct { + MixedInts []interface{} + MixedFloats []interface{} + }{ + []interface{}{ + int(1), int8(2), int16(3), int32(4), int64(5), + uint(1), uint8(2), uint16(3), uint32(4), uint64(5), + }, + []interface{}{float32(1.5), float64(2.5)}, + }, + wantOutput: "MixedInts = [1, 2, 3, 4, 5, 1, 2, 3, 4, 5]\n" + + "MixedFloats = [1.5, 2.5]\n", + }, + "(error) slice w/ element type mismatch (one is nested array)": { + input: struct{ Mixed []interface{} }{ + []interface{}{1, []interface{}{2}}, + }, + wantError: errArrayMixedElementTypes, + }, + "(error) slice with 1 nil element": { + input: struct{ NilElement1 []interface{} }{[]interface{}{nil}}, + wantError: errArrayNilElement, + }, + "(error) slice with 1 nil element (and other non-nil elements)": { + input: struct{ NilElement []interface{} }{ + []interface{}{1, nil}, + }, + wantError: errArrayNilElement, + }, + "simple map": { + input: map[string]int{"a": 1, "b": 2}, + wantOutput: "a = 1\nb = 2\n", + }, + "map with interface{} value type": { + input: map[string]interface{}{"a": 1, "b": "c"}, + wantOutput: "a = 1\nb = \"c\"\n", + }, + "map with interface{} value type, some of which are structs": { + input: map[string]interface{}{ + "a": struct{ Int int }{2}, + "b": 1, + }, + wantOutput: "b = 1\n\n[a]\n Int = 2\n", + }, + "nested map": { + input: map[string]map[string]int{ + "a": {"b": 1}, + "c": {"d": 2}, + }, + wantOutput: "[a]\n b = 1\n\n[c]\n d = 2\n", + }, + "nested struct": { + input: struct{ Struct struct{ Int int } }{ + struct{ Int int }{1}, + }, + wantOutput: "[Struct]\n Int = 1\n", + }, + "nested struct and non-struct field": { + input: struct { + Struct struct{ Int int } + Bool bool + }{struct{ Int int }{1}, true}, + wantOutput: "Bool = true\n\n[Struct]\n Int = 1\n", + }, + "2 nested structs": { + input: struct{ Struct1, Struct2 struct{ Int int } }{ + struct{ Int int }{1}, struct{ Int int }{2}, + }, + wantOutput: "[Struct1]\n Int = 1\n\n[Struct2]\n Int = 2\n", + }, + "deeply nested structs": { + input: struct { + Struct1, Struct2 struct{ Struct3 *struct{ Int int } } + }{ + struct{ Struct3 *struct{ Int int } }{&struct{ Int int }{1}}, + struct{ Struct3 *struct{ Int int } }{nil}, + }, + wantOutput: "[Struct1]\n [Struct1.Struct3]\n Int = 1" + + "\n\n[Struct2]\n", + }, + "nested struct with nil struct elem": { + input: struct { + Struct struct{ Inner *struct{ Int int } } + }{ + struct{ Inner *struct{ Int int } }{nil}, + }, + wantOutput: "[Struct]\n", + }, + "nested struct with no fields": { + input: struct { + Struct struct{ Inner struct{} } + }{ + struct{ Inner struct{} }{struct{}{}}, + }, + wantOutput: "[Struct]\n [Struct.Inner]\n", + }, + "struct with tags": { + input: struct { + Struct struct { + Int int `toml:"_int"` + } `toml:"_struct"` + Bool bool `toml:"_bool"` + }{ + struct { + Int int `toml:"_int"` + }{1}, true, + }, + wantOutput: "_bool = true\n\n[_struct]\n _int = 1\n", + }, + "embedded struct": { + input: struct{ Embedded }{Embedded{1}}, + wantOutput: "_int = 1\n", + }, + "embedded *struct": { + input: struct{ *Embedded }{&Embedded{1}}, + wantOutput: "_int = 1\n", + }, + "nested embedded struct": { + input: struct { + Struct struct{ Embedded } `toml:"_struct"` + }{struct{ Embedded }{Embedded{1}}}, + wantOutput: "[_struct]\n _int = 1\n", + }, + "nested embedded *struct": { + input: struct { + Struct struct{ *Embedded } `toml:"_struct"` + }{struct{ *Embedded }{&Embedded{1}}}, + wantOutput: "[_struct]\n _int = 1\n", + }, + "embedded non-struct": { + input: struct{ NonStruct }{5}, + wantOutput: "NonStruct = 5\n", + }, + "array of tables": { + input: struct { + Structs []*struct{ Int int } `toml:"struct"` + }{ + []*struct{ Int int }{{1}, {3}}, + }, + wantOutput: "[[struct]]\n Int = 1\n\n[[struct]]\n Int = 3\n", + }, + "array of tables order": { + input: map[string]interface{}{ + "map": map[string]interface{}{ + "zero": 5, + "arr": []map[string]int{ + { + "friend": 5, + }, + }, + }, + }, + wantOutput: "[map]\n zero = 5\n\n [[map.arr]]\n friend = 5\n", + }, + "(error) top-level slice": { + input: []struct{ Int int }{{1}, {2}, {3}}, + wantError: errNoKey, + }, + "(error) slice of slice": { + input: struct { + Slices [][]struct{ Int int } + }{ + [][]struct{ Int int }{{{1}}, {{2}}, {{3}}}, + }, + wantError: errArrayNoTable, + }, + "(error) map no string key": { + input: map[int]string{1: ""}, + wantError: errNonString, + }, + "(error) empty key name": { + input: map[string]int{"": 1}, + wantError: errAnything, + }, + "(error) empty map name": { + input: map[string]interface{}{ + "": map[string]int{"v": 1}, + }, + wantError: errAnything, + }, + } + for label, test := range tests { + encodeExpected(t, label, test.input, test.wantOutput, test.wantError) + } +} + +func TestEncodeNestedTableArrays(t *testing.T) { + type song struct { + Name string `toml:"name"` + } + type album struct { + Name string `toml:"name"` + Songs []song `toml:"songs"` + } + type springsteen struct { + Albums []album `toml:"albums"` + } + value := springsteen{ + []album{ + {"Born to Run", + []song{{"Jungleland"}, {"Meeting Across the River"}}}, + {"Born in the USA", + []song{{"Glory Days"}, {"Dancing in the Dark"}}}, + }, + } + expected := `[[albums]] + name = "Born to Run" + + [[albums.songs]] + name = "Jungleland" + + [[albums.songs]] + name = "Meeting Across the River" + +[[albums]] + name = "Born in the USA" + + [[albums.songs]] + name = "Glory Days" + + [[albums.songs]] + name = "Dancing in the Dark" +` + encodeExpected(t, "nested table arrays", value, expected, nil) +} + +func TestEncodeArrayHashWithNormalHashOrder(t *testing.T) { + type Alpha struct { + V int + } + type Beta struct { + V int + } + type Conf struct { + V int + A Alpha + B []Beta + } + + val := Conf{ + V: 1, + A: Alpha{2}, + B: []Beta{{3}}, + } + expected := "V = 1\n\n[A]\n V = 2\n\n[[B]]\n V = 3\n" + encodeExpected(t, "array hash with normal hash order", val, expected, nil) +} + +func TestEncodeWithOmitEmpty(t *testing.T) { + type simple struct { + Bool bool `toml:"bool,omitempty"` + String string `toml:"string,omitempty"` + Array [0]byte `toml:"array,omitempty"` + Slice []int `toml:"slice,omitempty"` + Map map[string]string `toml:"map,omitempty"` + } + + var v simple + encodeExpected(t, "fields with omitempty are omitted when empty", v, "", nil) + v = simple{ + Bool: true, + String: " ", + Slice: []int{2, 3, 4}, + Map: map[string]string{"foo": "bar"}, + } + expected := `bool = true +string = " " +slice = [2, 3, 4] + +[map] + foo = "bar" +` + encodeExpected(t, "fields with omitempty are not omitted when non-empty", + v, expected, nil) +} + +func TestEncodeWithOmitZero(t *testing.T) { + type simple struct { + Number int `toml:"number,omitzero"` + Real float64 `toml:"real,omitzero"` + Unsigned uint `toml:"unsigned,omitzero"` + } + + value := simple{0, 0.0, uint(0)} + expected := "" + + encodeExpected(t, "simple with omitzero, all zero", value, expected, nil) + + value.Number = 10 + value.Real = 20 + value.Unsigned = 5 + expected = `number = 10 +real = 20.0 +unsigned = 5 +` + encodeExpected(t, "simple with omitzero, non-zero", value, expected, nil) +} + +func TestEncodeOmitemptyWithEmptyName(t *testing.T) { + type simple struct { + S []int `toml:",omitempty"` + } + v := simple{[]int{1, 2, 3}} + expected := "S = [1, 2, 3]\n" + encodeExpected(t, "simple with omitempty, no name, non-empty field", + v, expected, nil) +} + +func TestEncodeAnonymousStruct(t *testing.T) { + type Inner struct{ N int } + type Outer0 struct{ Inner } + type Outer1 struct { + Inner `toml:"inner"` + } + + v0 := Outer0{Inner{3}} + expected := "N = 3\n" + encodeExpected(t, "embedded anonymous untagged struct", v0, expected, nil) + + v1 := Outer1{Inner{3}} + expected = "[inner]\n N = 3\n" + encodeExpected(t, "embedded anonymous tagged struct", v1, expected, nil) +} + +func TestEncodeAnonymousStructPointerField(t *testing.T) { + type Inner struct{ N int } + type Outer0 struct{ *Inner } + type Outer1 struct { + *Inner `toml:"inner"` + } + + v0 := Outer0{} + expected := "" + encodeExpected(t, "nil anonymous untagged struct pointer field", v0, expected, nil) + + v0 = Outer0{&Inner{3}} + expected = "N = 3\n" + encodeExpected(t, "non-nil anonymous untagged struct pointer field", v0, expected, nil) + + v1 := Outer1{} + expected = "" + encodeExpected(t, "nil anonymous tagged struct pointer field", v1, expected, nil) + + v1 = Outer1{&Inner{3}} + expected = "[inner]\n N = 3\n" + encodeExpected(t, "non-nil anonymous tagged struct pointer field", v1, expected, nil) +} + +func TestEncodeIgnoredFields(t *testing.T) { + type simple struct { + Number int `toml:"-"` + } + value := simple{} + expected := "" + encodeExpected(t, "ignored field", value, expected, nil) +} + +func encodeExpected( + t *testing.T, label string, val interface{}, wantStr string, wantErr error, +) { + var buf bytes.Buffer + enc := NewEncoder(&buf) + err := enc.Encode(val) + if err != wantErr { + if wantErr != nil { + if wantErr == errAnything && err != nil { + return + } + t.Errorf("%s: want Encode error %v, got %v", label, wantErr, err) + } else { + t.Errorf("%s: Encode failed: %s", label, err) + } + } + if err != nil { + return + } + if got := buf.String(); wantStr != got { + t.Errorf("%s: want\n-----\n%q\n-----\nbut got\n-----\n%q\n-----\n", + label, wantStr, got) + } +} + +func ExampleEncoder_Encode() { + date, _ := time.Parse(time.RFC822, "14 Mar 10 18:00 UTC") + var config = map[string]interface{}{ + "date": date, + "counts": []int{1, 1, 2, 3, 5, 8}, + "hash": map[string]string{ + "key1": "val1", + "key2": "val2", + }, + } + buf := new(bytes.Buffer) + if err := NewEncoder(buf).Encode(config); err != nil { + log.Fatal(err) + } + fmt.Println(buf.String()) + + // Output: + // counts = [1, 1, 2, 3, 5, 8] + // date = 2010-03-14T18:00:00Z + // + // [hash] + // key1 = "val1" + // key2 = "val2" +} diff --git a/vendor/github.com/BurntSushi/toml/encoding_types.go b/vendor/github.com/BurntSushi/toml/encoding_types.go new file mode 100644 index 0000000..d36e1dd --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/encoding_types.go @@ -0,0 +1,19 @@ +// +build go1.2 + +package toml + +// In order to support Go 1.1, we define our own TextMarshaler and +// TextUnmarshaler types. For Go 1.2+, we just alias them with the +// standard library interfaces. + +import ( + "encoding" +) + +// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here +// so that Go 1.1 can be supported. +type TextMarshaler encoding.TextMarshaler + +// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined +// here so that Go 1.1 can be supported. +type TextUnmarshaler encoding.TextUnmarshaler diff --git a/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go b/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go new file mode 100644 index 0000000..e8d503d --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go @@ -0,0 +1,18 @@ +// +build !go1.2 + +package toml + +// These interfaces were introduced in Go 1.2, so we add them manually when +// compiling for Go 1.1. + +// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here +// so that Go 1.1 can be supported. +type TextMarshaler interface { + MarshalText() (text []byte, err error) +} + +// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined +// here so that Go 1.1 can be supported. +type TextUnmarshaler interface { + UnmarshalText(text []byte) error +} diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go new file mode 100644 index 0000000..6dee7fc --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/lex.go @@ -0,0 +1,953 @@ +package toml + +import ( + "fmt" + "strings" + "unicode" + "unicode/utf8" +) + +type itemType int + +const ( + itemError itemType = iota + itemNIL // used in the parser to indicate no type + itemEOF + itemText + itemString + itemRawString + itemMultilineString + itemRawMultilineString + itemBool + itemInteger + itemFloat + itemDatetime + itemArray // the start of an array + itemArrayEnd + itemTableStart + itemTableEnd + itemArrayTableStart + itemArrayTableEnd + itemKeyStart + itemCommentStart + itemInlineTableStart + itemInlineTableEnd +) + +const ( + eof = 0 + comma = ',' + tableStart = '[' + tableEnd = ']' + arrayTableStart = '[' + arrayTableEnd = ']' + tableSep = '.' + keySep = '=' + arrayStart = '[' + arrayEnd = ']' + commentStart = '#' + stringStart = '"' + stringEnd = '"' + rawStringStart = '\'' + rawStringEnd = '\'' + inlineTableStart = '{' + inlineTableEnd = '}' +) + +type stateFn func(lx *lexer) stateFn + +type lexer struct { + input string + start int + pos int + line int + state stateFn + items chan item + + // Allow for backing up up to three runes. + // This is necessary because TOML contains 3-rune tokens (""" and '''). + prevWidths [3]int + nprev int // how many of prevWidths are in use + // If we emit an eof, we can still back up, but it is not OK to call + // next again. + atEOF bool + + // A stack of state functions used to maintain context. + // The idea is to reuse parts of the state machine in various places. + // For example, values can appear at the top level or within arbitrarily + // nested arrays. The last state on the stack is used after a value has + // been lexed. Similarly for comments. + stack []stateFn +} + +type item struct { + typ itemType + val string + line int +} + +func (lx *lexer) nextItem() item { + for { + select { + case item := <-lx.items: + return item + default: + lx.state = lx.state(lx) + } + } +} + +func lex(input string) *lexer { + lx := &lexer{ + input: input, + state: lexTop, + line: 1, + items: make(chan item, 10), + stack: make([]stateFn, 0, 10), + } + return lx +} + +func (lx *lexer) push(state stateFn) { + lx.stack = append(lx.stack, state) +} + +func (lx *lexer) pop() stateFn { + if len(lx.stack) == 0 { + return lx.errorf("BUG in lexer: no states to pop") + } + last := lx.stack[len(lx.stack)-1] + lx.stack = lx.stack[0 : len(lx.stack)-1] + return last +} + +func (lx *lexer) current() string { + return lx.input[lx.start:lx.pos] +} + +func (lx *lexer) emit(typ itemType) { + lx.items <- item{typ, lx.current(), lx.line} + lx.start = lx.pos +} + +func (lx *lexer) emitTrim(typ itemType) { + lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line} + lx.start = lx.pos +} + +func (lx *lexer) next() (r rune) { + if lx.atEOF { + panic("next called after EOF") + } + if lx.pos >= len(lx.input) { + lx.atEOF = true + return eof + } + + if lx.input[lx.pos] == '\n' { + lx.line++ + } + lx.prevWidths[2] = lx.prevWidths[1] + lx.prevWidths[1] = lx.prevWidths[0] + if lx.nprev < 3 { + lx.nprev++ + } + r, w := utf8.DecodeRuneInString(lx.input[lx.pos:]) + lx.prevWidths[0] = w + lx.pos += w + return r +} + +// ignore skips over the pending input before this point. +func (lx *lexer) ignore() { + lx.start = lx.pos +} + +// backup steps back one rune. Can be called only twice between calls to next. +func (lx *lexer) backup() { + if lx.atEOF { + lx.atEOF = false + return + } + if lx.nprev < 1 { + panic("backed up too far") + } + w := lx.prevWidths[0] + lx.prevWidths[0] = lx.prevWidths[1] + lx.prevWidths[1] = lx.prevWidths[2] + lx.nprev-- + lx.pos -= w + if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' { + lx.line-- + } +} + +// accept consumes the next rune if it's equal to `valid`. +func (lx *lexer) accept(valid rune) bool { + if lx.next() == valid { + return true + } + lx.backup() + return false +} + +// peek returns but does not consume the next rune in the input. +func (lx *lexer) peek() rune { + r := lx.next() + lx.backup() + return r +} + +// skip ignores all input that matches the given predicate. +func (lx *lexer) skip(pred func(rune) bool) { + for { + r := lx.next() + if pred(r) { + continue + } + lx.backup() + lx.ignore() + return + } +} + +// errorf stops all lexing by emitting an error and returning `nil`. +// Note that any value that is a character is escaped if it's a special +// character (newlines, tabs, etc.). +func (lx *lexer) errorf(format string, values ...interface{}) stateFn { + lx.items <- item{ + itemError, + fmt.Sprintf(format, values...), + lx.line, + } + return nil +} + +// lexTop consumes elements at the top level of TOML data. +func lexTop(lx *lexer) stateFn { + r := lx.next() + if isWhitespace(r) || isNL(r) { + return lexSkip(lx, lexTop) + } + switch r { + case commentStart: + lx.push(lexTop) + return lexCommentStart + case tableStart: + return lexTableStart + case eof: + if lx.pos > lx.start { + return lx.errorf("unexpected EOF") + } + lx.emit(itemEOF) + return nil + } + + // At this point, the only valid item can be a key, so we back up + // and let the key lexer do the rest. + lx.backup() + lx.push(lexTopEnd) + return lexKeyStart +} + +// lexTopEnd is entered whenever a top-level item has been consumed. (A value +// or a table.) It must see only whitespace, and will turn back to lexTop +// upon a newline. If it sees EOF, it will quit the lexer successfully. +func lexTopEnd(lx *lexer) stateFn { + r := lx.next() + switch { + case r == commentStart: + // a comment will read to a newline for us. + lx.push(lexTop) + return lexCommentStart + case isWhitespace(r): + return lexTopEnd + case isNL(r): + lx.ignore() + return lexTop + case r == eof: + lx.emit(itemEOF) + return nil + } + return lx.errorf("expected a top-level item to end with a newline, "+ + "comment, or EOF, but got %q instead", r) +} + +// lexTable lexes the beginning of a table. Namely, it makes sure that +// it starts with a character other than '.' and ']'. +// It assumes that '[' has already been consumed. +// It also handles the case that this is an item in an array of tables. +// e.g., '[[name]]'. +func lexTableStart(lx *lexer) stateFn { + if lx.peek() == arrayTableStart { + lx.next() + lx.emit(itemArrayTableStart) + lx.push(lexArrayTableEnd) + } else { + lx.emit(itemTableStart) + lx.push(lexTableEnd) + } + return lexTableNameStart +} + +func lexTableEnd(lx *lexer) stateFn { + lx.emit(itemTableEnd) + return lexTopEnd +} + +func lexArrayTableEnd(lx *lexer) stateFn { + if r := lx.next(); r != arrayTableEnd { + return lx.errorf("expected end of table array name delimiter %q, "+ + "but got %q instead", arrayTableEnd, r) + } + lx.emit(itemArrayTableEnd) + return lexTopEnd +} + +func lexTableNameStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == tableEnd || r == eof: + return lx.errorf("unexpected end of table name " + + "(table names cannot be empty)") + case r == tableSep: + return lx.errorf("unexpected table separator " + + "(table names cannot be empty)") + case r == stringStart || r == rawStringStart: + lx.ignore() + lx.push(lexTableNameEnd) + return lexValue // reuse string lexing + default: + return lexBareTableName + } +} + +// lexBareTableName lexes the name of a table. It assumes that at least one +// valid character for the table has already been read. +func lexBareTableName(lx *lexer) stateFn { + r := lx.next() + if isBareKeyChar(r) { + return lexBareTableName + } + lx.backup() + lx.emit(itemText) + return lexTableNameEnd +} + +// lexTableNameEnd reads the end of a piece of a table name, optionally +// consuming whitespace. +func lexTableNameEnd(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.next(); { + case isWhitespace(r): + return lexTableNameEnd + case r == tableSep: + lx.ignore() + return lexTableNameStart + case r == tableEnd: + return lx.pop() + default: + return lx.errorf("expected '.' or ']' to end table name, "+ + "but got %q instead", r) + } +} + +// lexKeyStart consumes a key name up until the first non-whitespace character. +// lexKeyStart will ignore whitespace. +func lexKeyStart(lx *lexer) stateFn { + r := lx.peek() + switch { + case r == keySep: + return lx.errorf("unexpected key separator %q", keySep) + case isWhitespace(r) || isNL(r): + lx.next() + return lexSkip(lx, lexKeyStart) + case r == stringStart || r == rawStringStart: + lx.ignore() + lx.emit(itemKeyStart) + lx.push(lexKeyEnd) + return lexValue // reuse string lexing + default: + lx.ignore() + lx.emit(itemKeyStart) + return lexBareKey + } +} + +// lexBareKey consumes the text of a bare key. Assumes that the first character +// (which is not whitespace) has not yet been consumed. +func lexBareKey(lx *lexer) stateFn { + switch r := lx.next(); { + case isBareKeyChar(r): + return lexBareKey + case isWhitespace(r): + lx.backup() + lx.emit(itemText) + return lexKeyEnd + case r == keySep: + lx.backup() + lx.emit(itemText) + return lexKeyEnd + default: + return lx.errorf("bare keys cannot contain %q", r) + } +} + +// lexKeyEnd consumes the end of a key and trims whitespace (up to the key +// separator). +func lexKeyEnd(lx *lexer) stateFn { + switch r := lx.next(); { + case r == keySep: + return lexSkip(lx, lexValue) + case isWhitespace(r): + return lexSkip(lx, lexKeyEnd) + default: + return lx.errorf("expected key separator %q, but got %q instead", + keySep, r) + } +} + +// lexValue starts the consumption of a value anywhere a value is expected. +// lexValue will ignore whitespace. +// After a value is lexed, the last state on the next is popped and returned. +func lexValue(lx *lexer) stateFn { + // We allow whitespace to precede a value, but NOT newlines. + // In array syntax, the array states are responsible for ignoring newlines. + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexValue) + case isDigit(r): + lx.backup() // avoid an extra state and use the same as above + return lexNumberOrDateStart + } + switch r { + case arrayStart: + lx.ignore() + lx.emit(itemArray) + return lexArrayValue + case inlineTableStart: + lx.ignore() + lx.emit(itemInlineTableStart) + return lexInlineTableValue + case stringStart: + if lx.accept(stringStart) { + if lx.accept(stringStart) { + lx.ignore() // Ignore """ + return lexMultilineString + } + lx.backup() + } + lx.ignore() // ignore the '"' + return lexString + case rawStringStart: + if lx.accept(rawStringStart) { + if lx.accept(rawStringStart) { + lx.ignore() // Ignore """ + return lexMultilineRawString + } + lx.backup() + } + lx.ignore() // ignore the "'" + return lexRawString + case '+', '-': + return lexNumberStart + case '.': // special error case, be kind to users + return lx.errorf("floats must start with a digit, not '.'") + } + if unicode.IsLetter(r) { + // Be permissive here; lexBool will give a nice error if the + // user wrote something like + // x = foo + // (i.e. not 'true' or 'false' but is something else word-like.) + lx.backup() + return lexBool + } + return lx.errorf("expected value but found %q instead", r) +} + +// lexArrayValue consumes one value in an array. It assumes that '[' or ',' +// have already been consumed. All whitespace and newlines are ignored. +func lexArrayValue(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r) || isNL(r): + return lexSkip(lx, lexArrayValue) + case r == commentStart: + lx.push(lexArrayValue) + return lexCommentStart + case r == comma: + return lx.errorf("unexpected comma") + case r == arrayEnd: + // NOTE(caleb): The spec isn't clear about whether you can have + // a trailing comma or not, so we'll allow it. + return lexArrayEnd + } + + lx.backup() + lx.push(lexArrayValueEnd) + return lexValue +} + +// lexArrayValueEnd consumes everything between the end of an array value and +// the next value (or the end of the array): it ignores whitespace and newlines +// and expects either a ',' or a ']'. +func lexArrayValueEnd(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r) || isNL(r): + return lexSkip(lx, lexArrayValueEnd) + case r == commentStart: + lx.push(lexArrayValueEnd) + return lexCommentStart + case r == comma: + lx.ignore() + return lexArrayValue // move on to the next value + case r == arrayEnd: + return lexArrayEnd + } + return lx.errorf( + "expected a comma or array terminator %q, but got %q instead", + arrayEnd, r, + ) +} + +// lexArrayEnd finishes the lexing of an array. +// It assumes that a ']' has just been consumed. +func lexArrayEnd(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemArrayEnd) + return lx.pop() +} + +// lexInlineTableValue consumes one key/value pair in an inline table. +// It assumes that '{' or ',' have already been consumed. Whitespace is ignored. +func lexInlineTableValue(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexInlineTableValue) + case isNL(r): + return lx.errorf("newlines not allowed within inline tables") + case r == commentStart: + lx.push(lexInlineTableValue) + return lexCommentStart + case r == comma: + return lx.errorf("unexpected comma") + case r == inlineTableEnd: + return lexInlineTableEnd + } + lx.backup() + lx.push(lexInlineTableValueEnd) + return lexKeyStart +} + +// lexInlineTableValueEnd consumes everything between the end of an inline table +// key/value pair and the next pair (or the end of the table): +// it ignores whitespace and expects either a ',' or a '}'. +func lexInlineTableValueEnd(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexInlineTableValueEnd) + case isNL(r): + return lx.errorf("newlines not allowed within inline tables") + case r == commentStart: + lx.push(lexInlineTableValueEnd) + return lexCommentStart + case r == comma: + lx.ignore() + return lexInlineTableValue + case r == inlineTableEnd: + return lexInlineTableEnd + } + return lx.errorf("expected a comma or an inline table terminator %q, "+ + "but got %q instead", inlineTableEnd, r) +} + +// lexInlineTableEnd finishes the lexing of an inline table. +// It assumes that a '}' has just been consumed. +func lexInlineTableEnd(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemInlineTableEnd) + return lx.pop() +} + +// lexString consumes the inner contents of a string. It assumes that the +// beginning '"' has already been consumed and ignored. +func lexString(lx *lexer) stateFn { + r := lx.next() + switch { + case r == eof: + return lx.errorf("unexpected EOF") + case isNL(r): + return lx.errorf("strings cannot contain newlines") + case r == '\\': + lx.push(lexString) + return lexStringEscape + case r == stringEnd: + lx.backup() + lx.emit(itemString) + lx.next() + lx.ignore() + return lx.pop() + } + return lexString +} + +// lexMultilineString consumes the inner contents of a string. It assumes that +// the beginning '"""' has already been consumed and ignored. +func lexMultilineString(lx *lexer) stateFn { + switch lx.next() { + case eof: + return lx.errorf("unexpected EOF") + case '\\': + return lexMultilineStringEscape + case stringEnd: + if lx.accept(stringEnd) { + if lx.accept(stringEnd) { + lx.backup() + lx.backup() + lx.backup() + lx.emit(itemMultilineString) + lx.next() + lx.next() + lx.next() + lx.ignore() + return lx.pop() + } + lx.backup() + } + } + return lexMultilineString +} + +// lexRawString consumes a raw string. Nothing can be escaped in such a string. +// It assumes that the beginning "'" has already been consumed and ignored. +func lexRawString(lx *lexer) stateFn { + r := lx.next() + switch { + case r == eof: + return lx.errorf("unexpected EOF") + case isNL(r): + return lx.errorf("strings cannot contain newlines") + case r == rawStringEnd: + lx.backup() + lx.emit(itemRawString) + lx.next() + lx.ignore() + return lx.pop() + } + return lexRawString +} + +// lexMultilineRawString consumes a raw string. Nothing can be escaped in such +// a string. It assumes that the beginning "'''" has already been consumed and +// ignored. +func lexMultilineRawString(lx *lexer) stateFn { + switch lx.next() { + case eof: + return lx.errorf("unexpected EOF") + case rawStringEnd: + if lx.accept(rawStringEnd) { + if lx.accept(rawStringEnd) { + lx.backup() + lx.backup() + lx.backup() + lx.emit(itemRawMultilineString) + lx.next() + lx.next() + lx.next() + lx.ignore() + return lx.pop() + } + lx.backup() + } + } + return lexMultilineRawString +} + +// lexMultilineStringEscape consumes an escaped character. It assumes that the +// preceding '\\' has already been consumed. +func lexMultilineStringEscape(lx *lexer) stateFn { + // Handle the special case first: + if isNL(lx.next()) { + return lexMultilineString + } + lx.backup() + lx.push(lexMultilineString) + return lexStringEscape(lx) +} + +func lexStringEscape(lx *lexer) stateFn { + r := lx.next() + switch r { + case 'b': + fallthrough + case 't': + fallthrough + case 'n': + fallthrough + case 'f': + fallthrough + case 'r': + fallthrough + case '"': + fallthrough + case '\\': + return lx.pop() + case 'u': + return lexShortUnicodeEscape + case 'U': + return lexLongUnicodeEscape + } + return lx.errorf("invalid escape character %q; only the following "+ + "escape characters are allowed: "+ + `\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r) +} + +func lexShortUnicodeEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 4; i++ { + r = lx.next() + if !isHexadecimal(r) { + return lx.errorf(`expected four hexadecimal digits after '\u', `+ + "but got %q instead", lx.current()) + } + } + return lx.pop() +} + +func lexLongUnicodeEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 8; i++ { + r = lx.next() + if !isHexadecimal(r) { + return lx.errorf(`expected eight hexadecimal digits after '\U', `+ + "but got %q instead", lx.current()) + } + } + return lx.pop() +} + +// lexNumberOrDateStart consumes either an integer, a float, or datetime. +func lexNumberOrDateStart(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexNumberOrDate + } + switch r { + case '_': + return lexNumber + case 'e', 'E': + return lexFloat + case '.': + return lx.errorf("floats must start with a digit, not '.'") + } + return lx.errorf("expected a digit but got %q", r) +} + +// lexNumberOrDate consumes either an integer, float or datetime. +func lexNumberOrDate(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexNumberOrDate + } + switch r { + case '-': + return lexDatetime + case '_': + return lexNumber + case '.', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDatetime consumes a Datetime, to a first approximation. +// The parser validates that it matches one of the accepted formats. +func lexDatetime(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexDatetime + } + switch r { + case '-', 'T', ':', '.', 'Z': + return lexDatetime + } + + lx.backup() + lx.emit(itemDatetime) + return lx.pop() +} + +// lexNumberStart consumes either an integer or a float. It assumes that a sign +// has already been read, but that *no* digits have been consumed. +// lexNumberStart will move to the appropriate integer or float states. +func lexNumberStart(lx *lexer) stateFn { + // We MUST see a digit. Even floats have to start with a digit. + r := lx.next() + if !isDigit(r) { + if r == '.' { + return lx.errorf("floats must start with a digit, not '.'") + } + return lx.errorf("expected a digit but got %q", r) + } + return lexNumber +} + +// lexNumber consumes an integer or a float after seeing the first digit. +func lexNumber(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexNumber + } + switch r { + case '_': + return lexNumber + case '.', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexFloat consumes the elements of a float. It allows any sequence of +// float-like characters, so floats emitted by the lexer are only a first +// approximation and must be validated by the parser. +func lexFloat(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexFloat + } + switch r { + case '_', '.', '-', '+', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemFloat) + return lx.pop() +} + +// lexBool consumes a bool string: 'true' or 'false. +func lexBool(lx *lexer) stateFn { + var rs []rune + for { + r := lx.next() + if !unicode.IsLetter(r) { + lx.backup() + break + } + rs = append(rs, r) + } + s := string(rs) + switch s { + case "true", "false": + lx.emit(itemBool) + return lx.pop() + } + return lx.errorf("expected value but found %q instead", s) +} + +// lexCommentStart begins the lexing of a comment. It will emit +// itemCommentStart and consume no characters, passing control to lexComment. +func lexCommentStart(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemCommentStart) + return lexComment +} + +// lexComment lexes an entire comment. It assumes that '#' has been consumed. +// It will consume *up to* the first newline character, and pass control +// back to the last state on the stack. +func lexComment(lx *lexer) stateFn { + r := lx.peek() + if isNL(r) || r == eof { + lx.emit(itemText) + return lx.pop() + } + lx.next() + return lexComment +} + +// lexSkip ignores all slurped input and moves on to the next state. +func lexSkip(lx *lexer, nextState stateFn) stateFn { + return func(lx *lexer) stateFn { + lx.ignore() + return nextState + } +} + +// isWhitespace returns true if `r` is a whitespace character according +// to the spec. +func isWhitespace(r rune) bool { + return r == '\t' || r == ' ' +} + +func isNL(r rune) bool { + return r == '\n' || r == '\r' +} + +func isDigit(r rune) bool { + return r >= '0' && r <= '9' +} + +func isHexadecimal(r rune) bool { + return (r >= '0' && r <= '9') || + (r >= 'a' && r <= 'f') || + (r >= 'A' && r <= 'F') +} + +func isBareKeyChar(r rune) bool { + return (r >= 'A' && r <= 'Z') || + (r >= 'a' && r <= 'z') || + (r >= '0' && r <= '9') || + r == '_' || + r == '-' +} + +func (itype itemType) String() string { + switch itype { + case itemError: + return "Error" + case itemNIL: + return "NIL" + case itemEOF: + return "EOF" + case itemText: + return "Text" + case itemString, itemRawString, itemMultilineString, itemRawMultilineString: + return "String" + case itemBool: + return "Bool" + case itemInteger: + return "Integer" + case itemFloat: + return "Float" + case itemDatetime: + return "DateTime" + case itemTableStart: + return "TableStart" + case itemTableEnd: + return "TableEnd" + case itemKeyStart: + return "KeyStart" + case itemArray: + return "Array" + case itemArrayEnd: + return "ArrayEnd" + case itemCommentStart: + return "CommentStart" + } + panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype))) +} + +func (item item) String() string { + return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val) +} diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go new file mode 100644 index 0000000..50869ef --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/parse.go @@ -0,0 +1,592 @@ +package toml + +import ( + "fmt" + "strconv" + "strings" + "time" + "unicode" + "unicode/utf8" +) + +type parser struct { + mapping map[string]interface{} + types map[string]tomlType + lx *lexer + + // A list of keys in the order that they appear in the TOML data. + ordered []Key + + // the full key for the current hash in scope + context Key + + // the base key name for everything except hashes + currentKey string + + // rough approximation of line number + approxLine int + + // A map of 'key.group.names' to whether they were created implicitly. + implicits map[string]bool +} + +type parseError string + +func (pe parseError) Error() string { + return string(pe) +} + +func parse(data string) (p *parser, err error) { + defer func() { + if r := recover(); r != nil { + var ok bool + if err, ok = r.(parseError); ok { + return + } + panic(r) + } + }() + + p = &parser{ + mapping: make(map[string]interface{}), + types: make(map[string]tomlType), + lx: lex(data), + ordered: make([]Key, 0), + implicits: make(map[string]bool), + } + for { + item := p.next() + if item.typ == itemEOF { + break + } + p.topLevel(item) + } + + return p, nil +} + +func (p *parser) panicf(format string, v ...interface{}) { + msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s", + p.approxLine, p.current(), fmt.Sprintf(format, v...)) + panic(parseError(msg)) +} + +func (p *parser) next() item { + it := p.lx.nextItem() + if it.typ == itemError { + p.panicf("%s", it.val) + } + return it +} + +func (p *parser) bug(format string, v ...interface{}) { + panic(fmt.Sprintf("BUG: "+format+"\n\n", v...)) +} + +func (p *parser) expect(typ itemType) item { + it := p.next() + p.assertEqual(typ, it.typ) + return it +} + +func (p *parser) assertEqual(expected, got itemType) { + if expected != got { + p.bug("Expected '%s' but got '%s'.", expected, got) + } +} + +func (p *parser) topLevel(item item) { + switch item.typ { + case itemCommentStart: + p.approxLine = item.line + p.expect(itemText) + case itemTableStart: + kg := p.next() + p.approxLine = kg.line + + var key Key + for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() { + key = append(key, p.keyString(kg)) + } + p.assertEqual(itemTableEnd, kg.typ) + + p.establishContext(key, false) + p.setType("", tomlHash) + p.ordered = append(p.ordered, key) + case itemArrayTableStart: + kg := p.next() + p.approxLine = kg.line + + var key Key + for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() { + key = append(key, p.keyString(kg)) + } + p.assertEqual(itemArrayTableEnd, kg.typ) + + p.establishContext(key, true) + p.setType("", tomlArrayHash) + p.ordered = append(p.ordered, key) + case itemKeyStart: + kname := p.next() + p.approxLine = kname.line + p.currentKey = p.keyString(kname) + + val, typ := p.value(p.next()) + p.setValue(p.currentKey, val) + p.setType(p.currentKey, typ) + p.ordered = append(p.ordered, p.context.add(p.currentKey)) + p.currentKey = "" + default: + p.bug("Unexpected type at top level: %s", item.typ) + } +} + +// Gets a string for a key (or part of a key in a table name). +func (p *parser) keyString(it item) string { + switch it.typ { + case itemText: + return it.val + case itemString, itemMultilineString, + itemRawString, itemRawMultilineString: + s, _ := p.value(it) + return s.(string) + default: + p.bug("Unexpected key type: %s", it.typ) + panic("unreachable") + } +} + +// value translates an expected value from the lexer into a Go value wrapped +// as an empty interface. +func (p *parser) value(it item) (interface{}, tomlType) { + switch it.typ { + case itemString: + return p.replaceEscapes(it.val), p.typeOfPrimitive(it) + case itemMultilineString: + trimmed := stripFirstNewline(stripEscapedWhitespace(it.val)) + return p.replaceEscapes(trimmed), p.typeOfPrimitive(it) + case itemRawString: + return it.val, p.typeOfPrimitive(it) + case itemRawMultilineString: + return stripFirstNewline(it.val), p.typeOfPrimitive(it) + case itemBool: + switch it.val { + case "true": + return true, p.typeOfPrimitive(it) + case "false": + return false, p.typeOfPrimitive(it) + } + p.bug("Expected boolean value, but got '%s'.", it.val) + case itemInteger: + if !numUnderscoresOK(it.val) { + p.panicf("Invalid integer %q: underscores must be surrounded by digits", + it.val) + } + val := strings.Replace(it.val, "_", "", -1) + num, err := strconv.ParseInt(val, 10, 64) + if err != nil { + // Distinguish integer values. Normally, it'd be a bug if the lexer + // provides an invalid integer, but it's possible that the number is + // out of range of valid values (which the lexer cannot determine). + // So mark the former as a bug but the latter as a legitimate user + // error. + if e, ok := err.(*strconv.NumError); ok && + e.Err == strconv.ErrRange { + + p.panicf("Integer '%s' is out of the range of 64-bit "+ + "signed integers.", it.val) + } else { + p.bug("Expected integer value, but got '%s'.", it.val) + } + } + return num, p.typeOfPrimitive(it) + case itemFloat: + parts := strings.FieldsFunc(it.val, func(r rune) bool { + switch r { + case '.', 'e', 'E': + return true + } + return false + }) + for _, part := range parts { + if !numUnderscoresOK(part) { + p.panicf("Invalid float %q: underscores must be "+ + "surrounded by digits", it.val) + } + } + if !numPeriodsOK(it.val) { + // As a special case, numbers like '123.' or '1.e2', + // which are valid as far as Go/strconv are concerned, + // must be rejected because TOML says that a fractional + // part consists of '.' followed by 1+ digits. + p.panicf("Invalid float %q: '.' must be followed "+ + "by one or more digits", it.val) + } + val := strings.Replace(it.val, "_", "", -1) + num, err := strconv.ParseFloat(val, 64) + if err != nil { + if e, ok := err.(*strconv.NumError); ok && + e.Err == strconv.ErrRange { + + p.panicf("Float '%s' is out of the range of 64-bit "+ + "IEEE-754 floating-point numbers.", it.val) + } else { + p.panicf("Invalid float value: %q", it.val) + } + } + return num, p.typeOfPrimitive(it) + case itemDatetime: + var t time.Time + var ok bool + var err error + for _, format := range []string{ + "2006-01-02T15:04:05Z07:00", + "2006-01-02T15:04:05", + "2006-01-02", + } { + t, err = time.ParseInLocation(format, it.val, time.Local) + if err == nil { + ok = true + break + } + } + if !ok { + p.panicf("Invalid TOML Datetime: %q.", it.val) + } + return t, p.typeOfPrimitive(it) + case itemArray: + array := make([]interface{}, 0) + types := make([]tomlType, 0) + + for it = p.next(); it.typ != itemArrayEnd; it = p.next() { + if it.typ == itemCommentStart { + p.expect(itemText) + continue + } + + val, typ := p.value(it) + array = append(array, val) + types = append(types, typ) + } + return array, p.typeOfArray(types) + case itemInlineTableStart: + var ( + hash = make(map[string]interface{}) + outerContext = p.context + outerKey = p.currentKey + ) + + p.context = append(p.context, p.currentKey) + p.currentKey = "" + for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() { + if it.typ != itemKeyStart { + p.bug("Expected key start but instead found %q, around line %d", + it.val, p.approxLine) + } + if it.typ == itemCommentStart { + p.expect(itemText) + continue + } + + // retrieve key + k := p.next() + p.approxLine = k.line + kname := p.keyString(k) + + // retrieve value + p.currentKey = kname + val, typ := p.value(p.next()) + // make sure we keep metadata up to date + p.setType(kname, typ) + p.ordered = append(p.ordered, p.context.add(p.currentKey)) + hash[kname] = val + } + p.context = outerContext + p.currentKey = outerKey + return hash, tomlHash + } + p.bug("Unexpected value type: %s", it.typ) + panic("unreachable") +} + +// numUnderscoresOK checks whether each underscore in s is surrounded by +// characters that are not underscores. +func numUnderscoresOK(s string) bool { + accept := false + for _, r := range s { + if r == '_' { + if !accept { + return false + } + accept = false + continue + } + accept = true + } + return accept +} + +// numPeriodsOK checks whether every period in s is followed by a digit. +func numPeriodsOK(s string) bool { + period := false + for _, r := range s { + if period && !isDigit(r) { + return false + } + period = r == '.' + } + return !period +} + +// establishContext sets the current context of the parser, +// where the context is either a hash or an array of hashes. Which one is +// set depends on the value of the `array` parameter. +// +// Establishing the context also makes sure that the key isn't a duplicate, and +// will create implicit hashes automatically. +func (p *parser) establishContext(key Key, array bool) { + var ok bool + + // Always start at the top level and drill down for our context. + hashContext := p.mapping + keyContext := make(Key, 0) + + // We only need implicit hashes for key[0:-1] + for _, k := range key[0 : len(key)-1] { + _, ok = hashContext[k] + keyContext = append(keyContext, k) + + // No key? Make an implicit hash and move on. + if !ok { + p.addImplicit(keyContext) + hashContext[k] = make(map[string]interface{}) + } + + // If the hash context is actually an array of tables, then set + // the hash context to the last element in that array. + // + // Otherwise, it better be a table, since this MUST be a key group (by + // virtue of it not being the last element in a key). + switch t := hashContext[k].(type) { + case []map[string]interface{}: + hashContext = t[len(t)-1] + case map[string]interface{}: + hashContext = t + default: + p.panicf("Key '%s' was already created as a hash.", keyContext) + } + } + + p.context = keyContext + if array { + // If this is the first element for this array, then allocate a new + // list of tables for it. + k := key[len(key)-1] + if _, ok := hashContext[k]; !ok { + hashContext[k] = make([]map[string]interface{}, 0, 5) + } + + // Add a new table. But make sure the key hasn't already been used + // for something else. + if hash, ok := hashContext[k].([]map[string]interface{}); ok { + hashContext[k] = append(hash, make(map[string]interface{})) + } else { + p.panicf("Key '%s' was already created and cannot be used as "+ + "an array.", keyContext) + } + } else { + p.setValue(key[len(key)-1], make(map[string]interface{})) + } + p.context = append(p.context, key[len(key)-1]) +} + +// setValue sets the given key to the given value in the current context. +// It will make sure that the key hasn't already been defined, account for +// implicit key groups. +func (p *parser) setValue(key string, value interface{}) { + var tmpHash interface{} + var ok bool + + hash := p.mapping + keyContext := make(Key, 0) + for _, k := range p.context { + keyContext = append(keyContext, k) + if tmpHash, ok = hash[k]; !ok { + p.bug("Context for key '%s' has not been established.", keyContext) + } + switch t := tmpHash.(type) { + case []map[string]interface{}: + // The context is a table of hashes. Pick the most recent table + // defined as the current hash. + hash = t[len(t)-1] + case map[string]interface{}: + hash = t + default: + p.bug("Expected hash to have type 'map[string]interface{}', but "+ + "it has '%T' instead.", tmpHash) + } + } + keyContext = append(keyContext, key) + + if _, ok := hash[key]; ok { + // Typically, if the given key has already been set, then we have + // to raise an error since duplicate keys are disallowed. However, + // it's possible that a key was previously defined implicitly. In this + // case, it is allowed to be redefined concretely. (See the + // `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.) + // + // But we have to make sure to stop marking it as an implicit. (So that + // another redefinition provokes an error.) + // + // Note that since it has already been defined (as a hash), we don't + // want to overwrite it. So our business is done. + if p.isImplicit(keyContext) { + p.removeImplicit(keyContext) + return + } + + // Otherwise, we have a concrete key trying to override a previous + // key, which is *always* wrong. + p.panicf("Key '%s' has already been defined.", keyContext) + } + hash[key] = value +} + +// setType sets the type of a particular value at a given key. +// It should be called immediately AFTER setValue. +// +// Note that if `key` is empty, then the type given will be applied to the +// current context (which is either a table or an array of tables). +func (p *parser) setType(key string, typ tomlType) { + keyContext := make(Key, 0, len(p.context)+1) + for _, k := range p.context { + keyContext = append(keyContext, k) + } + if len(key) > 0 { // allow type setting for hashes + keyContext = append(keyContext, key) + } + p.types[keyContext.String()] = typ +} + +// addImplicit sets the given Key as having been created implicitly. +func (p *parser) addImplicit(key Key) { + p.implicits[key.String()] = true +} + +// removeImplicit stops tagging the given key as having been implicitly +// created. +func (p *parser) removeImplicit(key Key) { + p.implicits[key.String()] = false +} + +// isImplicit returns true if the key group pointed to by the key was created +// implicitly. +func (p *parser) isImplicit(key Key) bool { + return p.implicits[key.String()] +} + +// current returns the full key name of the current context. +func (p *parser) current() string { + if len(p.currentKey) == 0 { + return p.context.String() + } + if len(p.context) == 0 { + return p.currentKey + } + return fmt.Sprintf("%s.%s", p.context, p.currentKey) +} + +func stripFirstNewline(s string) string { + if len(s) == 0 || s[0] != '\n' { + return s + } + return s[1:] +} + +func stripEscapedWhitespace(s string) string { + esc := strings.Split(s, "\\\n") + if len(esc) > 1 { + for i := 1; i < len(esc); i++ { + esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace) + } + } + return strings.Join(esc, "") +} + +func (p *parser) replaceEscapes(str string) string { + var replaced []rune + s := []byte(str) + r := 0 + for r < len(s) { + if s[r] != '\\' { + c, size := utf8.DecodeRune(s[r:]) + r += size + replaced = append(replaced, c) + continue + } + r += 1 + if r >= len(s) { + p.bug("Escape sequence at end of string.") + return "" + } + switch s[r] { + default: + p.bug("Expected valid escape code after \\, but got %q.", s[r]) + return "" + case 'b': + replaced = append(replaced, rune(0x0008)) + r += 1 + case 't': + replaced = append(replaced, rune(0x0009)) + r += 1 + case 'n': + replaced = append(replaced, rune(0x000A)) + r += 1 + case 'f': + replaced = append(replaced, rune(0x000C)) + r += 1 + case 'r': + replaced = append(replaced, rune(0x000D)) + r += 1 + case '"': + replaced = append(replaced, rune(0x0022)) + r += 1 + case '\\': + replaced = append(replaced, rune(0x005C)) + r += 1 + case 'u': + // At this point, we know we have a Unicode escape of the form + // `uXXXX` at [r, r+5). (Because the lexer guarantees this + // for us.) + escaped := p.asciiEscapeToUnicode(s[r+1 : r+5]) + replaced = append(replaced, escaped) + r += 5 + case 'U': + // At this point, we know we have a Unicode escape of the form + // `uXXXX` at [r, r+9). (Because the lexer guarantees this + // for us.) + escaped := p.asciiEscapeToUnicode(s[r+1 : r+9]) + replaced = append(replaced, escaped) + r += 9 + } + } + return string(replaced) +} + +func (p *parser) asciiEscapeToUnicode(bs []byte) rune { + s := string(bs) + hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32) + if err != nil { + p.bug("Could not parse '%s' as a hexadecimal number, but the "+ + "lexer claims it's OK: %s", s, err) + } + if !utf8.ValidRune(rune(hex)) { + p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s) + } + return rune(hex) +} + +func isStringType(ty itemType) bool { + return ty == itemString || ty == itemMultilineString || + ty == itemRawString || ty == itemRawMultilineString +} diff --git a/vendor/github.com/BurntSushi/toml/session.vim b/vendor/github.com/BurntSushi/toml/session.vim new file mode 100644 index 0000000..562164b --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/session.vim @@ -0,0 +1 @@ +au BufWritePost *.go silent!make tags > /dev/null 2>&1 diff --git a/vendor/github.com/BurntSushi/toml/type_check.go b/vendor/github.com/BurntSushi/toml/type_check.go new file mode 100644 index 0000000..c73f8af --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/type_check.go @@ -0,0 +1,91 @@ +package toml + +// tomlType represents any Go type that corresponds to a TOML type. +// While the first draft of the TOML spec has a simplistic type system that +// probably doesn't need this level of sophistication, we seem to be militating +// toward adding real composite types. +type tomlType interface { + typeString() string +} + +// typeEqual accepts any two types and returns true if they are equal. +func typeEqual(t1, t2 tomlType) bool { + if t1 == nil || t2 == nil { + return false + } + return t1.typeString() == t2.typeString() +} + +func typeIsHash(t tomlType) bool { + return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash) +} + +type tomlBaseType string + +func (btype tomlBaseType) typeString() string { + return string(btype) +} + +func (btype tomlBaseType) String() string { + return btype.typeString() +} + +var ( + tomlInteger tomlBaseType = "Integer" + tomlFloat tomlBaseType = "Float" + tomlDatetime tomlBaseType = "Datetime" + tomlString tomlBaseType = "String" + tomlBool tomlBaseType = "Bool" + tomlArray tomlBaseType = "Array" + tomlHash tomlBaseType = "Hash" + tomlArrayHash tomlBaseType = "ArrayHash" +) + +// typeOfPrimitive returns a tomlType of any primitive value in TOML. +// Primitive values are: Integer, Float, Datetime, String and Bool. +// +// Passing a lexer item other than the following will cause a BUG message +// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime. +func (p *parser) typeOfPrimitive(lexItem item) tomlType { + switch lexItem.typ { + case itemInteger: + return tomlInteger + case itemFloat: + return tomlFloat + case itemDatetime: + return tomlDatetime + case itemString: + return tomlString + case itemMultilineString: + return tomlString + case itemRawString: + return tomlString + case itemRawMultilineString: + return tomlString + case itemBool: + return tomlBool + } + p.bug("Cannot infer primitive type of lex item '%s'.", lexItem) + panic("unreachable") +} + +// typeOfArray returns a tomlType for an array given a list of types of its +// values. +// +// In the current spec, if an array is homogeneous, then its type is always +// "Array". If the array is not homogeneous, an error is generated. +func (p *parser) typeOfArray(types []tomlType) tomlType { + // Empty arrays are cool. + if len(types) == 0 { + return tomlArray + } + + theType := types[0] + for _, t := range types[1:] { + if !typeEqual(theType, t) { + p.panicf("Array contains values of type '%s' and '%s', but "+ + "arrays must be homogeneous.", theType, t) + } + } + return tomlArray +} diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go new file mode 100644 index 0000000..608997c --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/type_fields.go @@ -0,0 +1,242 @@ +package toml + +// Struct field handling is adapted from code in encoding/json: +// +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the Go distribution. + +import ( + "reflect" + "sort" + "sync" +) + +// A field represents a single field found in a struct. +type field struct { + name string // the name of the field (`toml` tag included) + tag bool // whether field has a `toml` tag + index []int // represents the depth of an anonymous field + typ reflect.Type // the type of the field +} + +// byName sorts field by name, breaking ties with depth, +// then breaking ties with "name came from toml tag", then +// breaking ties with index sequence. +type byName []field + +func (x byName) Len() int { return len(x) } + +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byName) Less(i, j int) bool { + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []field + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// typeFields returns a list of fields that TOML should recognize for the given +// type. The algorithm is breadth-first search over the set of structs to +// include - the top struct and then any reachable anonymous structs. +func typeFields(t reflect.Type) []field { + // Anonymous fields to explore at the current level and the next. + current := []field{} + next := []field{{typ: t}} + + // Count of queued names for current level and the next. + count := map[reflect.Type]int{} + nextCount := map[reflect.Type]int{} + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []field + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + if sf.PkgPath != "" && !sf.Anonymous { // unexported + continue + } + opts := getOptions(sf.Tag) + if opts.skip { + continue + } + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + // Record found field and index sequence. + if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := opts.name != "" + name := opts.name + if name == "" { + name = sf.Name + } + fields = append(fields, field{name, tagged, index, ft}) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + f := field{name: ft.Name(), index: index, typ: ft} + next = append(next, f) + } + } + } + } + + sort.Sort(byName(fields)) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with TOML tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + return fields +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// TOML tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.index) > length { + fields = fields[:i] + break + } + if f.tag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +var fieldCache struct { + sync.RWMutex + m map[reflect.Type][]field +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) []field { + fieldCache.RLock() + f := fieldCache.m[t] + fieldCache.RUnlock() + if f != nil { + return f + } + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = typeFields(t) + if f == nil { + f = []field{} + } + + fieldCache.Lock() + if fieldCache.m == nil { + fieldCache.m = map[reflect.Type][]field{} + } + fieldCache.m[t] = f + fieldCache.Unlock() + return f +} diff --git a/vendor/github.com/hashicorp/golang-lru/.gitignore b/vendor/github.com/hashicorp/golang-lru/.gitignore new file mode 100644 index 0000000..8365624 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/vendor/github.com/hashicorp/golang-lru/2q.go b/vendor/github.com/hashicorp/golang-lru/2q.go new file mode 100644 index 0000000..337d963 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/2q.go @@ -0,0 +1,212 @@ +package lru + +import ( + "fmt" + "sync" + + "github.com/hashicorp/golang-lru/simplelru" +) + +const ( + // Default2QRecentRatio is the ratio of the 2Q cache dedicated + // to recently added entries that have only been accessed once. + Default2QRecentRatio = 0.25 + + // Default2QGhostEntries is the default ratio of ghost + // entries kept to track entries recently evicted + Default2QGhostEntries = 0.50 +) + +// TwoQueueCache is a thread-safe fixed size 2Q cache. +// 2Q is an enhancement over the standard LRU cache +// in that it tracks both frequently and recently used +// entries separately. This avoids a burst in access to new +// entries from evicting frequently used entries. It adds some +// additional tracking overhead to the standard LRU cache, and is +// computationally about 2x the cost, and adds some metadata over +// head. The ARCCache is similar, but does not require setting any +// parameters. +type TwoQueueCache struct { + size int + recentSize int + + recent *simplelru.LRU + frequent *simplelru.LRU + recentEvict *simplelru.LRU + lock sync.RWMutex +} + +// New2Q creates a new TwoQueueCache using the default +// values for the parameters. +func New2Q(size int) (*TwoQueueCache, error) { + return New2QParams(size, Default2QRecentRatio, Default2QGhostEntries) +} + +// New2QParams creates a new TwoQueueCache using the provided +// parameter values. +func New2QParams(size int, recentRatio float64, ghostRatio float64) (*TwoQueueCache, error) { + if size <= 0 { + return nil, fmt.Errorf("invalid size") + } + if recentRatio < 0.0 || recentRatio > 1.0 { + return nil, fmt.Errorf("invalid recent ratio") + } + if ghostRatio < 0.0 || ghostRatio > 1.0 { + return nil, fmt.Errorf("invalid ghost ratio") + } + + // Determine the sub-sizes + recentSize := int(float64(size) * recentRatio) + evictSize := int(float64(size) * ghostRatio) + + // Allocate the LRUs + recent, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + frequent, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + recentEvict, err := simplelru.NewLRU(evictSize, nil) + if err != nil { + return nil, err + } + + // Initialize the cache + c := &TwoQueueCache{ + size: size, + recentSize: recentSize, + recent: recent, + frequent: frequent, + recentEvict: recentEvict, + } + return c, nil +} + +func (c *TwoQueueCache) Get(key interface{}) (interface{}, bool) { + c.lock.Lock() + defer c.lock.Unlock() + + // Check if this is a frequent value + if val, ok := c.frequent.Get(key); ok { + return val, ok + } + + // If the value is contained in recent, then we + // promote it to frequent + if val, ok := c.recent.Peek(key); ok { + c.recent.Remove(key) + c.frequent.Add(key, val) + return val, ok + } + + // No hit + return nil, false +} + +func (c *TwoQueueCache) Add(key, value interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + + // Check if the value is frequently used already, + // and just update the value + if c.frequent.Contains(key) { + c.frequent.Add(key, value) + return + } + + // Check if the value is recently used, and promote + // the value into the frequent list + if c.recent.Contains(key) { + c.recent.Remove(key) + c.frequent.Add(key, value) + return + } + + // If the value was recently evicted, add it to the + // frequently used list + if c.recentEvict.Contains(key) { + c.ensureSpace(true) + c.recentEvict.Remove(key) + c.frequent.Add(key, value) + return + } + + // Add to the recently seen list + c.ensureSpace(false) + c.recent.Add(key, value) + return +} + +// ensureSpace is used to ensure we have space in the cache +func (c *TwoQueueCache) ensureSpace(recentEvict bool) { + // If we have space, nothing to do + recentLen := c.recent.Len() + freqLen := c.frequent.Len() + if recentLen+freqLen < c.size { + return + } + + // If the recent buffer is larger than + // the target, evict from there + if recentLen > 0 && (recentLen > c.recentSize || (recentLen == c.recentSize && !recentEvict)) { + k, _, _ := c.recent.RemoveOldest() + c.recentEvict.Add(k, nil) + return + } + + // Remove from the frequent list otherwise + c.frequent.RemoveOldest() +} + +func (c *TwoQueueCache) Len() int { + c.lock.RLock() + defer c.lock.RUnlock() + return c.recent.Len() + c.frequent.Len() +} + +func (c *TwoQueueCache) Keys() []interface{} { + c.lock.RLock() + defer c.lock.RUnlock() + k1 := c.frequent.Keys() + k2 := c.recent.Keys() + return append(k1, k2...) +} + +func (c *TwoQueueCache) Remove(key interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + if c.frequent.Remove(key) { + return + } + if c.recent.Remove(key) { + return + } + if c.recentEvict.Remove(key) { + return + } +} + +func (c *TwoQueueCache) Purge() { + c.lock.Lock() + defer c.lock.Unlock() + c.recent.Purge() + c.frequent.Purge() + c.recentEvict.Purge() +} + +func (c *TwoQueueCache) Contains(key interface{}) bool { + c.lock.RLock() + defer c.lock.RUnlock() + return c.frequent.Contains(key) || c.recent.Contains(key) +} + +func (c *TwoQueueCache) Peek(key interface{}) (interface{}, bool) { + c.lock.RLock() + defer c.lock.RUnlock() + if val, ok := c.frequent.Peek(key); ok { + return val, ok + } + return c.recent.Peek(key) +} diff --git a/vendor/github.com/hashicorp/golang-lru/2q_test.go b/vendor/github.com/hashicorp/golang-lru/2q_test.go new file mode 100644 index 0000000..1b0f351 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/2q_test.go @@ -0,0 +1,306 @@ +package lru + +import ( + "math/rand" + "testing" +) + +func Benchmark2Q_Rand(b *testing.B) { + l, err := New2Q(8192) + if err != nil { + b.Fatalf("err: %v", err) + } + + trace := make([]int64, b.N*2) + for i := 0; i < b.N*2; i++ { + trace[i] = rand.Int63() % 32768 + } + + b.ResetTimer() + + var hit, miss int + for i := 0; i < 2*b.N; i++ { + if i%2 == 0 { + l.Add(trace[i], trace[i]) + } else { + _, ok := l.Get(trace[i]) + if ok { + hit++ + } else { + miss++ + } + } + } + b.Logf("hit: %d miss: %d ratio: %f", hit, miss, float64(hit)/float64(miss)) +} + +func Benchmark2Q_Freq(b *testing.B) { + l, err := New2Q(8192) + if err != nil { + b.Fatalf("err: %v", err) + } + + trace := make([]int64, b.N*2) + for i := 0; i < b.N*2; i++ { + if i%2 == 0 { + trace[i] = rand.Int63() % 16384 + } else { + trace[i] = rand.Int63() % 32768 + } + } + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + l.Add(trace[i], trace[i]) + } + var hit, miss int + for i := 0; i < b.N; i++ { + _, ok := l.Get(trace[i]) + if ok { + hit++ + } else { + miss++ + } + } + b.Logf("hit: %d miss: %d ratio: %f", hit, miss, float64(hit)/float64(miss)) +} + +func Test2Q_RandomOps(t *testing.T) { + size := 128 + l, err := New2Q(128) + if err != nil { + t.Fatalf("err: %v", err) + } + + n := 200000 + for i := 0; i < n; i++ { + key := rand.Int63() % 512 + r := rand.Int63() + switch r % 3 { + case 0: + l.Add(key, key) + case 1: + l.Get(key) + case 2: + l.Remove(key) + } + + if l.recent.Len()+l.frequent.Len() > size { + t.Fatalf("bad: recent: %d freq: %d", + l.recent.Len(), l.frequent.Len()) + } + } +} + +func Test2Q_Get_RecentToFrequent(t *testing.T) { + l, err := New2Q(128) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Touch all the entries, should be in t1 + for i := 0; i < 128; i++ { + l.Add(i, i) + } + if n := l.recent.Len(); n != 128 { + t.Fatalf("bad: %d", n) + } + if n := l.frequent.Len(); n != 0 { + t.Fatalf("bad: %d", n) + } + + // Get should upgrade to t2 + for i := 0; i < 128; i++ { + _, ok := l.Get(i) + if !ok { + t.Fatalf("missing: %d", i) + } + } + if n := l.recent.Len(); n != 0 { + t.Fatalf("bad: %d", n) + } + if n := l.frequent.Len(); n != 128 { + t.Fatalf("bad: %d", n) + } + + // Get be from t2 + for i := 0; i < 128; i++ { + _, ok := l.Get(i) + if !ok { + t.Fatalf("missing: %d", i) + } + } + if n := l.recent.Len(); n != 0 { + t.Fatalf("bad: %d", n) + } + if n := l.frequent.Len(); n != 128 { + t.Fatalf("bad: %d", n) + } +} + +func Test2Q_Add_RecentToFrequent(t *testing.T) { + l, err := New2Q(128) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Add initially to recent + l.Add(1, 1) + if n := l.recent.Len(); n != 1 { + t.Fatalf("bad: %d", n) + } + if n := l.frequent.Len(); n != 0 { + t.Fatalf("bad: %d", n) + } + + // Add should upgrade to frequent + l.Add(1, 1) + if n := l.recent.Len(); n != 0 { + t.Fatalf("bad: %d", n) + } + if n := l.frequent.Len(); n != 1 { + t.Fatalf("bad: %d", n) + } + + // Add should remain in frequent + l.Add(1, 1) + if n := l.recent.Len(); n != 0 { + t.Fatalf("bad: %d", n) + } + if n := l.frequent.Len(); n != 1 { + t.Fatalf("bad: %d", n) + } +} + +func Test2Q_Add_RecentEvict(t *testing.T) { + l, err := New2Q(4) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Add 1,2,3,4,5 -> Evict 1 + l.Add(1, 1) + l.Add(2, 2) + l.Add(3, 3) + l.Add(4, 4) + l.Add(5, 5) + if n := l.recent.Len(); n != 4 { + t.Fatalf("bad: %d", n) + } + if n := l.recentEvict.Len(); n != 1 { + t.Fatalf("bad: %d", n) + } + if n := l.frequent.Len(); n != 0 { + t.Fatalf("bad: %d", n) + } + + // Pull in the recently evicted + l.Add(1, 1) + if n := l.recent.Len(); n != 3 { + t.Fatalf("bad: %d", n) + } + if n := l.recentEvict.Len(); n != 1 { + t.Fatalf("bad: %d", n) + } + if n := l.frequent.Len(); n != 1 { + t.Fatalf("bad: %d", n) + } + + // Add 6, should cause another recent evict + l.Add(6, 6) + if n := l.recent.Len(); n != 3 { + t.Fatalf("bad: %d", n) + } + if n := l.recentEvict.Len(); n != 2 { + t.Fatalf("bad: %d", n) + } + if n := l.frequent.Len(); n != 1 { + t.Fatalf("bad: %d", n) + } +} + +func Test2Q(t *testing.T) { + l, err := New2Q(128) + if err != nil { + t.Fatalf("err: %v", err) + } + + for i := 0; i < 256; i++ { + l.Add(i, i) + } + if l.Len() != 128 { + t.Fatalf("bad len: %v", l.Len()) + } + + for i, k := range l.Keys() { + if v, ok := l.Get(k); !ok || v != k || v != i+128 { + t.Fatalf("bad key: %v", k) + } + } + for i := 0; i < 128; i++ { + _, ok := l.Get(i) + if ok { + t.Fatalf("should be evicted") + } + } + for i := 128; i < 256; i++ { + _, ok := l.Get(i) + if !ok { + t.Fatalf("should not be evicted") + } + } + for i := 128; i < 192; i++ { + l.Remove(i) + _, ok := l.Get(i) + if ok { + t.Fatalf("should be deleted") + } + } + + l.Purge() + if l.Len() != 0 { + t.Fatalf("bad len: %v", l.Len()) + } + if _, ok := l.Get(200); ok { + t.Fatalf("should contain nothing") + } +} + +// Test that Contains doesn't update recent-ness +func Test2Q_Contains(t *testing.T) { + l, err := New2Q(2) + if err != nil { + t.Fatalf("err: %v", err) + } + + l.Add(1, 1) + l.Add(2, 2) + if !l.Contains(1) { + t.Errorf("1 should be contained") + } + + l.Add(3, 3) + if l.Contains(1) { + t.Errorf("Contains should not have updated recent-ness of 1") + } +} + +// Test that Peek doesn't update recent-ness +func Test2Q_Peek(t *testing.T) { + l, err := New2Q(2) + if err != nil { + t.Fatalf("err: %v", err) + } + + l.Add(1, 1) + l.Add(2, 2) + if v, ok := l.Peek(1); !ok || v != 1 { + t.Errorf("1 should be set to 1: %v, %v", v, ok) + } + + l.Add(3, 3) + if l.Contains(1) { + t.Errorf("should not have updated recent-ness of 1") + } +} diff --git a/vendor/github.com/hashicorp/golang-lru/LICENSE b/vendor/github.com/hashicorp/golang-lru/LICENSE new file mode 100644 index 0000000..be2cc4d --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/LICENSE @@ -0,0 +1,362 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/golang-lru/README.md b/vendor/github.com/hashicorp/golang-lru/README.md new file mode 100644 index 0000000..33e58cf --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/README.md @@ -0,0 +1,25 @@ +golang-lru +========== + +This provides the `lru` package which implements a fixed-size +thread safe LRU cache. It is based on the cache in Groupcache. + +Documentation +============= + +Full docs are available on [Godoc](http://godoc.org/github.com/hashicorp/golang-lru) + +Example +======= + +Using the LRU is very simple: + +```go +l, _ := New(128) +for i := 0; i < 256; i++ { + l.Add(i, nil) +} +if l.Len() != 128 { + panic(fmt.Sprintf("bad len: %v", l.Len())) +} +``` diff --git a/vendor/github.com/hashicorp/golang-lru/arc.go b/vendor/github.com/hashicorp/golang-lru/arc.go new file mode 100644 index 0000000..a2a2528 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/arc.go @@ -0,0 +1,257 @@ +package lru + +import ( + "sync" + + "github.com/hashicorp/golang-lru/simplelru" +) + +// ARCCache is a thread-safe fixed size Adaptive Replacement Cache (ARC). +// ARC is an enhancement over the standard LRU cache in that tracks both +// frequency and recency of use. This avoids a burst in access to new +// entries from evicting the frequently used older entries. It adds some +// additional tracking overhead to a standard LRU cache, computationally +// it is roughly 2x the cost, and the extra memory overhead is linear +// with the size of the cache. ARC has been patented by IBM, but is +// similar to the TwoQueueCache (2Q) which requires setting parameters. +type ARCCache struct { + size int // Size is the total capacity of the cache + p int // P is the dynamic preference towards T1 or T2 + + t1 *simplelru.LRU // T1 is the LRU for recently accessed items + b1 *simplelru.LRU // B1 is the LRU for evictions from t1 + + t2 *simplelru.LRU // T2 is the LRU for frequently accessed items + b2 *simplelru.LRU // B2 is the LRU for evictions from t2 + + lock sync.RWMutex +} + +// NewARC creates an ARC of the given size +func NewARC(size int) (*ARCCache, error) { + // Create the sub LRUs + b1, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + b2, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + t1, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + t2, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + + // Initialize the ARC + c := &ARCCache{ + size: size, + p: 0, + t1: t1, + b1: b1, + t2: t2, + b2: b2, + } + return c, nil +} + +// Get looks up a key's value from the cache. +func (c *ARCCache) Get(key interface{}) (interface{}, bool) { + c.lock.Lock() + defer c.lock.Unlock() + + // Ff the value is contained in T1 (recent), then + // promote it to T2 (frequent) + if val, ok := c.t1.Peek(key); ok { + c.t1.Remove(key) + c.t2.Add(key, val) + return val, ok + } + + // Check if the value is contained in T2 (frequent) + if val, ok := c.t2.Get(key); ok { + return val, ok + } + + // No hit + return nil, false +} + +// Add adds a value to the cache. +func (c *ARCCache) Add(key, value interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + + // Check if the value is contained in T1 (recent), and potentially + // promote it to frequent T2 + if c.t1.Contains(key) { + c.t1.Remove(key) + c.t2.Add(key, value) + return + } + + // Check if the value is already in T2 (frequent) and update it + if c.t2.Contains(key) { + c.t2.Add(key, value) + return + } + + // Check if this value was recently evicted as part of the + // recently used list + if c.b1.Contains(key) { + // T1 set is too small, increase P appropriately + delta := 1 + b1Len := c.b1.Len() + b2Len := c.b2.Len() + if b2Len > b1Len { + delta = b2Len / b1Len + } + if c.p+delta >= c.size { + c.p = c.size + } else { + c.p += delta + } + + // Potentially need to make room in the cache + if c.t1.Len()+c.t2.Len() >= c.size { + c.replace(false) + } + + // Remove from B1 + c.b1.Remove(key) + + // Add the key to the frequently used list + c.t2.Add(key, value) + return + } + + // Check if this value was recently evicted as part of the + // frequently used list + if c.b2.Contains(key) { + // T2 set is too small, decrease P appropriately + delta := 1 + b1Len := c.b1.Len() + b2Len := c.b2.Len() + if b1Len > b2Len { + delta = b1Len / b2Len + } + if delta >= c.p { + c.p = 0 + } else { + c.p -= delta + } + + // Potentially need to make room in the cache + if c.t1.Len()+c.t2.Len() >= c.size { + c.replace(true) + } + + // Remove from B2 + c.b2.Remove(key) + + // Add the key to the frequntly used list + c.t2.Add(key, value) + return + } + + // Potentially need to make room in the cache + if c.t1.Len()+c.t2.Len() >= c.size { + c.replace(false) + } + + // Keep the size of the ghost buffers trim + if c.b1.Len() > c.size-c.p { + c.b1.RemoveOldest() + } + if c.b2.Len() > c.p { + c.b2.RemoveOldest() + } + + // Add to the recently seen list + c.t1.Add(key, value) + return +} + +// replace is used to adaptively evict from either T1 or T2 +// based on the current learned value of P +func (c *ARCCache) replace(b2ContainsKey bool) { + t1Len := c.t1.Len() + if t1Len > 0 && (t1Len > c.p || (t1Len == c.p && b2ContainsKey)) { + k, _, ok := c.t1.RemoveOldest() + if ok { + c.b1.Add(k, nil) + } + } else { + k, _, ok := c.t2.RemoveOldest() + if ok { + c.b2.Add(k, nil) + } + } +} + +// Len returns the number of cached entries +func (c *ARCCache) Len() int { + c.lock.RLock() + defer c.lock.RUnlock() + return c.t1.Len() + c.t2.Len() +} + +// Keys returns all the cached keys +func (c *ARCCache) Keys() []interface{} { + c.lock.RLock() + defer c.lock.RUnlock() + k1 := c.t1.Keys() + k2 := c.t2.Keys() + return append(k1, k2...) +} + +// Remove is used to purge a key from the cache +func (c *ARCCache) Remove(key interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + if c.t1.Remove(key) { + return + } + if c.t2.Remove(key) { + return + } + if c.b1.Remove(key) { + return + } + if c.b2.Remove(key) { + return + } +} + +// Purge is used to clear the cache +func (c *ARCCache) Purge() { + c.lock.Lock() + defer c.lock.Unlock() + c.t1.Purge() + c.t2.Purge() + c.b1.Purge() + c.b2.Purge() +} + +// Contains is used to check if the cache contains a key +// without updating recency or frequency. +func (c *ARCCache) Contains(key interface{}) bool { + c.lock.RLock() + defer c.lock.RUnlock() + return c.t1.Contains(key) || c.t2.Contains(key) +} + +// Peek is used to inspect the cache value of a key +// without updating recency or frequency. +func (c *ARCCache) Peek(key interface{}) (interface{}, bool) { + c.lock.RLock() + defer c.lock.RUnlock() + if val, ok := c.t1.Peek(key); ok { + return val, ok + } + return c.t2.Peek(key) +} diff --git a/vendor/github.com/hashicorp/golang-lru/arc_test.go b/vendor/github.com/hashicorp/golang-lru/arc_test.go new file mode 100644 index 0000000..e2d9b68 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/arc_test.go @@ -0,0 +1,377 @@ +package lru + +import ( + "math/rand" + "testing" + "time" +) + +func init() { + rand.Seed(time.Now().Unix()) +} + +func BenchmarkARC_Rand(b *testing.B) { + l, err := NewARC(8192) + if err != nil { + b.Fatalf("err: %v", err) + } + + trace := make([]int64, b.N*2) + for i := 0; i < b.N*2; i++ { + trace[i] = rand.Int63() % 32768 + } + + b.ResetTimer() + + var hit, miss int + for i := 0; i < 2*b.N; i++ { + if i%2 == 0 { + l.Add(trace[i], trace[i]) + } else { + _, ok := l.Get(trace[i]) + if ok { + hit++ + } else { + miss++ + } + } + } + b.Logf("hit: %d miss: %d ratio: %f", hit, miss, float64(hit)/float64(miss)) +} + +func BenchmarkARC_Freq(b *testing.B) { + l, err := NewARC(8192) + if err != nil { + b.Fatalf("err: %v", err) + } + + trace := make([]int64, b.N*2) + for i := 0; i < b.N*2; i++ { + if i%2 == 0 { + trace[i] = rand.Int63() % 16384 + } else { + trace[i] = rand.Int63() % 32768 + } + } + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + l.Add(trace[i], trace[i]) + } + var hit, miss int + for i := 0; i < b.N; i++ { + _, ok := l.Get(trace[i]) + if ok { + hit++ + } else { + miss++ + } + } + b.Logf("hit: %d miss: %d ratio: %f", hit, miss, float64(hit)/float64(miss)) +} + +func TestARC_RandomOps(t *testing.T) { + size := 128 + l, err := NewARC(128) + if err != nil { + t.Fatalf("err: %v", err) + } + + n := 200000 + for i := 0; i < n; i++ { + key := rand.Int63() % 512 + r := rand.Int63() + switch r % 3 { + case 0: + l.Add(key, key) + case 1: + l.Get(key) + case 2: + l.Remove(key) + } + + if l.t1.Len()+l.t2.Len() > size { + t.Fatalf("bad: t1: %d t2: %d b1: %d b2: %d p: %d", + l.t1.Len(), l.t2.Len(), l.b1.Len(), l.b2.Len(), l.p) + } + if l.b1.Len()+l.b2.Len() > size { + t.Fatalf("bad: t1: %d t2: %d b1: %d b2: %d p: %d", + l.t1.Len(), l.t2.Len(), l.b1.Len(), l.b2.Len(), l.p) + } + } +} + +func TestARC_Get_RecentToFrequent(t *testing.T) { + l, err := NewARC(128) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Touch all the entries, should be in t1 + for i := 0; i < 128; i++ { + l.Add(i, i) + } + if n := l.t1.Len(); n != 128 { + t.Fatalf("bad: %d", n) + } + if n := l.t2.Len(); n != 0 { + t.Fatalf("bad: %d", n) + } + + // Get should upgrade to t2 + for i := 0; i < 128; i++ { + _, ok := l.Get(i) + if !ok { + t.Fatalf("missing: %d", i) + } + } + if n := l.t1.Len(); n != 0 { + t.Fatalf("bad: %d", n) + } + if n := l.t2.Len(); n != 128 { + t.Fatalf("bad: %d", n) + } + + // Get be from t2 + for i := 0; i < 128; i++ { + _, ok := l.Get(i) + if !ok { + t.Fatalf("missing: %d", i) + } + } + if n := l.t1.Len(); n != 0 { + t.Fatalf("bad: %d", n) + } + if n := l.t2.Len(); n != 128 { + t.Fatalf("bad: %d", n) + } +} + +func TestARC_Add_RecentToFrequent(t *testing.T) { + l, err := NewARC(128) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Add initially to t1 + l.Add(1, 1) + if n := l.t1.Len(); n != 1 { + t.Fatalf("bad: %d", n) + } + if n := l.t2.Len(); n != 0 { + t.Fatalf("bad: %d", n) + } + + // Add should upgrade to t2 + l.Add(1, 1) + if n := l.t1.Len(); n != 0 { + t.Fatalf("bad: %d", n) + } + if n := l.t2.Len(); n != 1 { + t.Fatalf("bad: %d", n) + } + + // Add should remain in t2 + l.Add(1, 1) + if n := l.t1.Len(); n != 0 { + t.Fatalf("bad: %d", n) + } + if n := l.t2.Len(); n != 1 { + t.Fatalf("bad: %d", n) + } +} + +func TestARC_Adaptive(t *testing.T) { + l, err := NewARC(4) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Fill t1 + for i := 0; i < 4; i++ { + l.Add(i, i) + } + if n := l.t1.Len(); n != 4 { + t.Fatalf("bad: %d", n) + } + + // Move to t2 + l.Get(0) + l.Get(1) + if n := l.t2.Len(); n != 2 { + t.Fatalf("bad: %d", n) + } + + // Evict from t1 + l.Add(4, 4) + if n := l.b1.Len(); n != 1 { + t.Fatalf("bad: %d", n) + } + + // Current state + // t1 : (MRU) [4, 3] (LRU) + // t2 : (MRU) [1, 0] (LRU) + // b1 : (MRU) [2] (LRU) + // b2 : (MRU) [] (LRU) + + // Add 2, should cause hit on b1 + l.Add(2, 2) + if n := l.b1.Len(); n != 1 { + t.Fatalf("bad: %d", n) + } + if l.p != 1 { + t.Fatalf("bad: %d", l.p) + } + if n := l.t2.Len(); n != 3 { + t.Fatalf("bad: %d", n) + } + + // Current state + // t1 : (MRU) [4] (LRU) + // t2 : (MRU) [2, 1, 0] (LRU) + // b1 : (MRU) [3] (LRU) + // b2 : (MRU) [] (LRU) + + // Add 4, should migrate to t2 + l.Add(4, 4) + if n := l.t1.Len(); n != 0 { + t.Fatalf("bad: %d", n) + } + if n := l.t2.Len(); n != 4 { + t.Fatalf("bad: %d", n) + } + + // Current state + // t1 : (MRU) [] (LRU) + // t2 : (MRU) [4, 2, 1, 0] (LRU) + // b1 : (MRU) [3] (LRU) + // b2 : (MRU) [] (LRU) + + // Add 4, should evict to b2 + l.Add(5, 5) + if n := l.t1.Len(); n != 1 { + t.Fatalf("bad: %d", n) + } + if n := l.t2.Len(); n != 3 { + t.Fatalf("bad: %d", n) + } + if n := l.b2.Len(); n != 1 { + t.Fatalf("bad: %d", n) + } + + // Current state + // t1 : (MRU) [5] (LRU) + // t2 : (MRU) [4, 2, 1] (LRU) + // b1 : (MRU) [3] (LRU) + // b2 : (MRU) [0] (LRU) + + // Add 0, should decrease p + l.Add(0, 0) + if n := l.t1.Len(); n != 0 { + t.Fatalf("bad: %d", n) + } + if n := l.t2.Len(); n != 4 { + t.Fatalf("bad: %d", n) + } + if n := l.b1.Len(); n != 2 { + t.Fatalf("bad: %d", n) + } + if n := l.b2.Len(); n != 0 { + t.Fatalf("bad: %d", n) + } + if l.p != 0 { + t.Fatalf("bad: %d", l.p) + } + + // Current state + // t1 : (MRU) [] (LRU) + // t2 : (MRU) [0, 4, 2, 1] (LRU) + // b1 : (MRU) [5, 3] (LRU) + // b2 : (MRU) [0] (LRU) +} + +func TestARC(t *testing.T) { + l, err := NewARC(128) + if err != nil { + t.Fatalf("err: %v", err) + } + + for i := 0; i < 256; i++ { + l.Add(i, i) + } + if l.Len() != 128 { + t.Fatalf("bad len: %v", l.Len()) + } + + for i, k := range l.Keys() { + if v, ok := l.Get(k); !ok || v != k || v != i+128 { + t.Fatalf("bad key: %v", k) + } + } + for i := 0; i < 128; i++ { + _, ok := l.Get(i) + if ok { + t.Fatalf("should be evicted") + } + } + for i := 128; i < 256; i++ { + _, ok := l.Get(i) + if !ok { + t.Fatalf("should not be evicted") + } + } + for i := 128; i < 192; i++ { + l.Remove(i) + _, ok := l.Get(i) + if ok { + t.Fatalf("should be deleted") + } + } + + l.Purge() + if l.Len() != 0 { + t.Fatalf("bad len: %v", l.Len()) + } + if _, ok := l.Get(200); ok { + t.Fatalf("should contain nothing") + } +} + +// Test that Contains doesn't update recent-ness +func TestARC_Contains(t *testing.T) { + l, err := NewARC(2) + if err != nil { + t.Fatalf("err: %v", err) + } + + l.Add(1, 1) + l.Add(2, 2) + if !l.Contains(1) { + t.Errorf("1 should be contained") + } + + l.Add(3, 3) + if l.Contains(1) { + t.Errorf("Contains should not have updated recent-ness of 1") + } +} + +// Test that Peek doesn't update recent-ness +func TestARC_Peek(t *testing.T) { + l, err := NewARC(2) + if err != nil { + t.Fatalf("err: %v", err) + } + + l.Add(1, 1) + l.Add(2, 2) + if v, ok := l.Peek(1); !ok || v != 1 { + t.Errorf("1 should be set to 1: %v, %v", v, ok) + } + + l.Add(3, 3) + if l.Contains(1) { + t.Errorf("should not have updated recent-ness of 1") + } +} diff --git a/vendor/github.com/hashicorp/golang-lru/lru.go b/vendor/github.com/hashicorp/golang-lru/lru.go new file mode 100644 index 0000000..a6285f9 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/lru.go @@ -0,0 +1,114 @@ +// This package provides a simple LRU cache. It is based on the +// LRU implementation in groupcache: +// https://github.com/golang/groupcache/tree/master/lru +package lru + +import ( + "sync" + + "github.com/hashicorp/golang-lru/simplelru" +) + +// Cache is a thread-safe fixed size LRU cache. +type Cache struct { + lru *simplelru.LRU + lock sync.RWMutex +} + +// New creates an LRU of the given size +func New(size int) (*Cache, error) { + return NewWithEvict(size, nil) +} + +// NewWithEvict constructs a fixed size cache with the given eviction +// callback. +func NewWithEvict(size int, onEvicted func(key interface{}, value interface{})) (*Cache, error) { + lru, err := simplelru.NewLRU(size, simplelru.EvictCallback(onEvicted)) + if err != nil { + return nil, err + } + c := &Cache{ + lru: lru, + } + return c, nil +} + +// Purge is used to completely clear the cache +func (c *Cache) Purge() { + c.lock.Lock() + c.lru.Purge() + c.lock.Unlock() +} + +// Add adds a value to the cache. Returns true if an eviction occurred. +func (c *Cache) Add(key, value interface{}) bool { + c.lock.Lock() + defer c.lock.Unlock() + return c.lru.Add(key, value) +} + +// Get looks up a key's value from the cache. +func (c *Cache) Get(key interface{}) (interface{}, bool) { + c.lock.Lock() + defer c.lock.Unlock() + return c.lru.Get(key) +} + +// Check if a key is in the cache, without updating the recent-ness +// or deleting it for being stale. +func (c *Cache) Contains(key interface{}) bool { + c.lock.RLock() + defer c.lock.RUnlock() + return c.lru.Contains(key) +} + +// Returns the key value (or undefined if not found) without updating +// the "recently used"-ness of the key. +func (c *Cache) Peek(key interface{}) (interface{}, bool) { + c.lock.RLock() + defer c.lock.RUnlock() + return c.lru.Peek(key) +} + +// ContainsOrAdd checks if a key is in the cache without updating the +// recent-ness or deleting it for being stale, and if not, adds the value. +// Returns whether found and whether an eviction occurred. +func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evict bool) { + c.lock.Lock() + defer c.lock.Unlock() + + if c.lru.Contains(key) { + return true, false + } else { + evict := c.lru.Add(key, value) + return false, evict + } +} + +// Remove removes the provided key from the cache. +func (c *Cache) Remove(key interface{}) { + c.lock.Lock() + c.lru.Remove(key) + c.lock.Unlock() +} + +// RemoveOldest removes the oldest item from the cache. +func (c *Cache) RemoveOldest() { + c.lock.Lock() + c.lru.RemoveOldest() + c.lock.Unlock() +} + +// Keys returns a slice of the keys in the cache, from oldest to newest. +func (c *Cache) Keys() []interface{} { + c.lock.RLock() + defer c.lock.RUnlock() + return c.lru.Keys() +} + +// Len returns the number of items in the cache. +func (c *Cache) Len() int { + c.lock.RLock() + defer c.lock.RUnlock() + return c.lru.Len() +} diff --git a/vendor/github.com/hashicorp/golang-lru/lru_test.go b/vendor/github.com/hashicorp/golang-lru/lru_test.go new file mode 100644 index 0000000..2b31218 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/lru_test.go @@ -0,0 +1,221 @@ +package lru + +import ( + "math/rand" + "testing" +) + +func BenchmarkLRU_Rand(b *testing.B) { + l, err := New(8192) + if err != nil { + b.Fatalf("err: %v", err) + } + + trace := make([]int64, b.N*2) + for i := 0; i < b.N*2; i++ { + trace[i] = rand.Int63() % 32768 + } + + b.ResetTimer() + + var hit, miss int + for i := 0; i < 2*b.N; i++ { + if i%2 == 0 { + l.Add(trace[i], trace[i]) + } else { + _, ok := l.Get(trace[i]) + if ok { + hit++ + } else { + miss++ + } + } + } + b.Logf("hit: %d miss: %d ratio: %f", hit, miss, float64(hit)/float64(miss)) +} + +func BenchmarkLRU_Freq(b *testing.B) { + l, err := New(8192) + if err != nil { + b.Fatalf("err: %v", err) + } + + trace := make([]int64, b.N*2) + for i := 0; i < b.N*2; i++ { + if i%2 == 0 { + trace[i] = rand.Int63() % 16384 + } else { + trace[i] = rand.Int63() % 32768 + } + } + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + l.Add(trace[i], trace[i]) + } + var hit, miss int + for i := 0; i < b.N; i++ { + _, ok := l.Get(trace[i]) + if ok { + hit++ + } else { + miss++ + } + } + b.Logf("hit: %d miss: %d ratio: %f", hit, miss, float64(hit)/float64(miss)) +} + +func TestLRU(t *testing.T) { + evictCounter := 0 + onEvicted := func(k interface{}, v interface{}) { + if k != v { + t.Fatalf("Evict values not equal (%v!=%v)", k, v) + } + evictCounter += 1 + } + l, err := NewWithEvict(128, onEvicted) + if err != nil { + t.Fatalf("err: %v", err) + } + + for i := 0; i < 256; i++ { + l.Add(i, i) + } + if l.Len() != 128 { + t.Fatalf("bad len: %v", l.Len()) + } + + if evictCounter != 128 { + t.Fatalf("bad evict count: %v", evictCounter) + } + + for i, k := range l.Keys() { + if v, ok := l.Get(k); !ok || v != k || v != i+128 { + t.Fatalf("bad key: %v", k) + } + } + for i := 0; i < 128; i++ { + _, ok := l.Get(i) + if ok { + t.Fatalf("should be evicted") + } + } + for i := 128; i < 256; i++ { + _, ok := l.Get(i) + if !ok { + t.Fatalf("should not be evicted") + } + } + for i := 128; i < 192; i++ { + l.Remove(i) + _, ok := l.Get(i) + if ok { + t.Fatalf("should be deleted") + } + } + + l.Get(192) // expect 192 to be last key in l.Keys() + + for i, k := range l.Keys() { + if (i < 63 && k != i+193) || (i == 63 && k != 192) { + t.Fatalf("out of order key: %v", k) + } + } + + l.Purge() + if l.Len() != 0 { + t.Fatalf("bad len: %v", l.Len()) + } + if _, ok := l.Get(200); ok { + t.Fatalf("should contain nothing") + } +} + +// test that Add returns true/false if an eviction occurred +func TestLRUAdd(t *testing.T) { + evictCounter := 0 + onEvicted := func(k interface{}, v interface{}) { + evictCounter += 1 + } + + l, err := NewWithEvict(1, onEvicted) + if err != nil { + t.Fatalf("err: %v", err) + } + + if l.Add(1, 1) == true || evictCounter != 0 { + t.Errorf("should not have an eviction") + } + if l.Add(2, 2) == false || evictCounter != 1 { + t.Errorf("should have an eviction") + } +} + +// test that Contains doesn't update recent-ness +func TestLRUContains(t *testing.T) { + l, err := New(2) + if err != nil { + t.Fatalf("err: %v", err) + } + + l.Add(1, 1) + l.Add(2, 2) + if !l.Contains(1) { + t.Errorf("1 should be contained") + } + + l.Add(3, 3) + if l.Contains(1) { + t.Errorf("Contains should not have updated recent-ness of 1") + } +} + +// test that Contains doesn't update recent-ness +func TestLRUContainsOrAdd(t *testing.T) { + l, err := New(2) + if err != nil { + t.Fatalf("err: %v", err) + } + + l.Add(1, 1) + l.Add(2, 2) + contains, evict := l.ContainsOrAdd(1, 1) + if !contains { + t.Errorf("1 should be contained") + } + if evict { + t.Errorf("nothing should be evicted here") + } + + l.Add(3, 3) + contains, evict = l.ContainsOrAdd(1, 1) + if contains { + t.Errorf("1 should not have been contained") + } + if !evict { + t.Errorf("an eviction should have occurred") + } + if !l.Contains(1) { + t.Errorf("now 1 should be contained") + } +} + +// test that Peek doesn't update recent-ness +func TestLRUPeek(t *testing.T) { + l, err := New(2) + if err != nil { + t.Fatalf("err: %v", err) + } + + l.Add(1, 1) + l.Add(2, 2) + if v, ok := l.Peek(1); !ok || v != 1 { + t.Errorf("1 should be set to 1: %v, %v", v, ok) + } + + l.Add(3, 3) + if l.Contains(1) { + t.Errorf("should not have updated recent-ness of 1") + } +} diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go new file mode 100644 index 0000000..cb416b3 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go @@ -0,0 +1,160 @@ +package simplelru + +import ( + "container/list" + "errors" +) + +// EvictCallback is used to get a callback when a cache entry is evicted +type EvictCallback func(key interface{}, value interface{}) + +// LRU implements a non-thread safe fixed size LRU cache +type LRU struct { + size int + evictList *list.List + items map[interface{}]*list.Element + onEvict EvictCallback +} + +// entry is used to hold a value in the evictList +type entry struct { + key interface{} + value interface{} +} + +// NewLRU constructs an LRU of the given size +func NewLRU(size int, onEvict EvictCallback) (*LRU, error) { + if size <= 0 { + return nil, errors.New("Must provide a positive size") + } + c := &LRU{ + size: size, + evictList: list.New(), + items: make(map[interface{}]*list.Element), + onEvict: onEvict, + } + return c, nil +} + +// Purge is used to completely clear the cache +func (c *LRU) Purge() { + for k, v := range c.items { + if c.onEvict != nil { + c.onEvict(k, v.Value.(*entry).value) + } + delete(c.items, k) + } + c.evictList.Init() +} + +// Add adds a value to the cache. Returns true if an eviction occurred. +func (c *LRU) Add(key, value interface{}) bool { + // Check for existing item + if ent, ok := c.items[key]; ok { + c.evictList.MoveToFront(ent) + ent.Value.(*entry).value = value + return false + } + + // Add new item + ent := &entry{key, value} + entry := c.evictList.PushFront(ent) + c.items[key] = entry + + evict := c.evictList.Len() > c.size + // Verify size not exceeded + if evict { + c.removeOldest() + } + return evict +} + +// Get looks up a key's value from the cache. +func (c *LRU) Get(key interface{}) (value interface{}, ok bool) { + if ent, ok := c.items[key]; ok { + c.evictList.MoveToFront(ent) + return ent.Value.(*entry).value, true + } + return +} + +// Check if a key is in the cache, without updating the recent-ness +// or deleting it for being stale. +func (c *LRU) Contains(key interface{}) (ok bool) { + _, ok = c.items[key] + return ok +} + +// Returns the key value (or undefined if not found) without updating +// the "recently used"-ness of the key. +func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) { + if ent, ok := c.items[key]; ok { + return ent.Value.(*entry).value, true + } + return nil, ok +} + +// Remove removes the provided key from the cache, returning if the +// key was contained. +func (c *LRU) Remove(key interface{}) bool { + if ent, ok := c.items[key]; ok { + c.removeElement(ent) + return true + } + return false +} + +// RemoveOldest removes the oldest item from the cache. +func (c *LRU) RemoveOldest() (interface{}, interface{}, bool) { + ent := c.evictList.Back() + if ent != nil { + c.removeElement(ent) + kv := ent.Value.(*entry) + return kv.key, kv.value, true + } + return nil, nil, false +} + +// GetOldest returns the oldest entry +func (c *LRU) GetOldest() (interface{}, interface{}, bool) { + ent := c.evictList.Back() + if ent != nil { + kv := ent.Value.(*entry) + return kv.key, kv.value, true + } + return nil, nil, false +} + +// Keys returns a slice of the keys in the cache, from oldest to newest. +func (c *LRU) Keys() []interface{} { + keys := make([]interface{}, len(c.items)) + i := 0 + for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() { + keys[i] = ent.Value.(*entry).key + i++ + } + return keys +} + +// Len returns the number of items in the cache. +func (c *LRU) Len() int { + return c.evictList.Len() +} + +// removeOldest removes the oldest item from the cache. +func (c *LRU) removeOldest() { + ent := c.evictList.Back() + if ent != nil { + c.removeElement(ent) + } +} + +// removeElement is used to remove a given list element from the cache +func (c *LRU) removeElement(e *list.Element) { + c.evictList.Remove(e) + kv := e.Value.(*entry) + delete(c.items, kv.key) + if c.onEvict != nil { + c.onEvict(kv.key, kv.value) + } +} diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_test.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_test.go new file mode 100644 index 0000000..a958934 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_test.go @@ -0,0 +1,167 @@ +package simplelru + +import "testing" + +func TestLRU(t *testing.T) { + evictCounter := 0 + onEvicted := func(k interface{}, v interface{}) { + if k != v { + t.Fatalf("Evict values not equal (%v!=%v)", k, v) + } + evictCounter += 1 + } + l, err := NewLRU(128, onEvicted) + if err != nil { + t.Fatalf("err: %v", err) + } + + for i := 0; i < 256; i++ { + l.Add(i, i) + } + if l.Len() != 128 { + t.Fatalf("bad len: %v", l.Len()) + } + + if evictCounter != 128 { + t.Fatalf("bad evict count: %v", evictCounter) + } + + for i, k := range l.Keys() { + if v, ok := l.Get(k); !ok || v != k || v != i+128 { + t.Fatalf("bad key: %v", k) + } + } + for i := 0; i < 128; i++ { + _, ok := l.Get(i) + if ok { + t.Fatalf("should be evicted") + } + } + for i := 128; i < 256; i++ { + _, ok := l.Get(i) + if !ok { + t.Fatalf("should not be evicted") + } + } + for i := 128; i < 192; i++ { + ok := l.Remove(i) + if !ok { + t.Fatalf("should be contained") + } + ok = l.Remove(i) + if ok { + t.Fatalf("should not be contained") + } + _, ok = l.Get(i) + if ok { + t.Fatalf("should be deleted") + } + } + + l.Get(192) // expect 192 to be last key in l.Keys() + + for i, k := range l.Keys() { + if (i < 63 && k != i+193) || (i == 63 && k != 192) { + t.Fatalf("out of order key: %v", k) + } + } + + l.Purge() + if l.Len() != 0 { + t.Fatalf("bad len: %v", l.Len()) + } + if _, ok := l.Get(200); ok { + t.Fatalf("should contain nothing") + } +} + +func TestLRU_GetOldest_RemoveOldest(t *testing.T) { + l, err := NewLRU(128, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + for i := 0; i < 256; i++ { + l.Add(i, i) + } + k, _, ok := l.GetOldest() + if !ok { + t.Fatalf("missing") + } + if k.(int) != 128 { + t.Fatalf("bad: %v", k) + } + + k, _, ok = l.RemoveOldest() + if !ok { + t.Fatalf("missing") + } + if k.(int) != 128 { + t.Fatalf("bad: %v", k) + } + + k, _, ok = l.RemoveOldest() + if !ok { + t.Fatalf("missing") + } + if k.(int) != 129 { + t.Fatalf("bad: %v", k) + } +} + +// Test that Add returns true/false if an eviction occurred +func TestLRU_Add(t *testing.T) { + evictCounter := 0 + onEvicted := func(k interface{}, v interface{}) { + evictCounter += 1 + } + + l, err := NewLRU(1, onEvicted) + if err != nil { + t.Fatalf("err: %v", err) + } + + if l.Add(1, 1) == true || evictCounter != 0 { + t.Errorf("should not have an eviction") + } + if l.Add(2, 2) == false || evictCounter != 1 { + t.Errorf("should have an eviction") + } +} + +// Test that Contains doesn't update recent-ness +func TestLRU_Contains(t *testing.T) { + l, err := NewLRU(2, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + l.Add(1, 1) + l.Add(2, 2) + if !l.Contains(1) { + t.Errorf("1 should be contained") + } + + l.Add(3, 3) + if l.Contains(1) { + t.Errorf("Contains should not have updated recent-ness of 1") + } +} + +// Test that Peek doesn't update recent-ness +func TestLRU_Peek(t *testing.T) { + l, err := NewLRU(2, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + l.Add(1, 1) + l.Add(2, 2) + if v, ok := l.Peek(1); !ok || v != 1 { + t.Errorf("1 should be set to 1: %v, %v", v, ok) + } + + l.Add(3, 3) + if l.Contains(1) { + t.Errorf("should not have updated recent-ness of 1") + } +} diff --git a/vendor/github.com/imdario/mergo/.travis.yml b/vendor/github.com/imdario/mergo/.travis.yml new file mode 100644 index 0000000..9d91c63 --- /dev/null +++ b/vendor/github.com/imdario/mergo/.travis.yml @@ -0,0 +1,2 @@ +language: go +install: go get -t diff --git a/vendor/github.com/imdario/mergo/LICENSE b/vendor/github.com/imdario/mergo/LICENSE new file mode 100644 index 0000000..6866802 --- /dev/null +++ b/vendor/github.com/imdario/mergo/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2013 Dario Castañé. All rights reserved. +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md new file mode 100644 index 0000000..f4769cd --- /dev/null +++ b/vendor/github.com/imdario/mergo/README.md @@ -0,0 +1,128 @@ +# Mergo + +A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. + +Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region Marche. + + + +## Status + +It is ready for production use. It works fine after extensive use in the wild. + +[![Build Status][1]][2] +[![GoDoc][3]][4] +[![GoCard][5]][6] + +[1]: https://travis-ci.org/imdario/mergo.png +[2]: https://travis-ci.org/imdario/mergo +[3]: https://godoc.org/github.com/imdario/mergo?status.svg +[4]: https://godoc.org/github.com/imdario/mergo +[5]: https://goreportcard.com/badge/imdario/mergo +[6]: https://goreportcard.com/report/github.com/imdario/mergo + +### Important note + +Mergo is intended to assign **only** zero value fields on destination with source value. Since April 6th it works like this. Before it didn't work properly, causing some random overwrites. After some issues and PRs I found it didn't merge as I designed it. Thanks to [imdario/mergo#8](https://github.com/imdario/mergo/pull/8) overwriting functions were added and the wrong behavior was clearly detected. + +If you were using Mergo **before** April 6th 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause (I hope it won't!) in existing projects after the change (release 0.2.0). + +### Mergo in the wild + +- [docker/docker](https://github.com/docker/docker/) +- [GoogleCloudPlatform/kubernetes](https://github.com/GoogleCloudPlatform/kubernetes) +- [imdario/zas](https://github.com/imdario/zas) +- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy) +- [EagerIO/Stout](https://github.com/EagerIO/Stout) +- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api) +- [russross/canvasassignments](https://github.com/russross/canvasassignments) +- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api) +- [casualjim/exeggutor](https://github.com/casualjim/exeggutor) +- [divshot/gitling](https://github.com/divshot/gitling) +- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl) +- [andrerocker/deploy42](https://github.com/andrerocker/deploy42) +- [elwinar/rambler](https://github.com/elwinar/rambler) +- [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman) +- [jfbus/impressionist](https://github.com/jfbus/impressionist) +- [Jmeyering/zealot](https://github.com/Jmeyering/zealot) +- [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host) +- [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go) +- [thoas/picfit](https://github.com/thoas/picfit) +- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server) +- [jnuthong/item_search](https://github.com/jnuthong/item_search) + +## Installation + + go get github.com/imdario/mergo + + // use in your .go code + import ( + "github.com/imdario/mergo" + ) + +## Usage + +You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). + + if err := mergo.Merge(&dst, src); err != nil { + // ... + } + +Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field. + + if err := mergo.Map(&dst, srcMap); err != nil { + // ... + } + +Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values. + +More information and examples in [godoc documentation](http://godoc.org/github.com/imdario/mergo). + +### Nice example + +```go +package main + +import ( + "fmt" + "github.com/imdario/mergo" +) + +type Foo struct { + A string + B int64 +} + +func main() { + src := Foo{ + A: "one", + } + + dest := Foo{ + A: "two", + B: 2, + } + + mergo.Merge(&dest, src) + + fmt.Println(dest) + // Will print + // {two 2} +} +``` + +Note: if test are failing due missing package, please execute: + + go get gopkg.in/yaml.v1 + +## Contact me + +If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario) + +## About + +Written by [Dario Castañé](http://dario.im). + +## License + +[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). diff --git a/vendor/github.com/imdario/mergo/doc.go b/vendor/github.com/imdario/mergo/doc.go new file mode 100644 index 0000000..6e9aa7b --- /dev/null +++ b/vendor/github.com/imdario/mergo/doc.go @@ -0,0 +1,44 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package mergo merges same-type structs and maps by setting default values in zero-value fields. + +Mergo won't merge unexported (private) fields but will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). + +Usage + +From my own work-in-progress project: + + type networkConfig struct { + Protocol string + Address string + ServerType string `json: "server_type"` + Port uint16 + } + + type FssnConfig struct { + Network networkConfig + } + + var fssnDefault = FssnConfig { + networkConfig { + "tcp", + "127.0.0.1", + "http", + 31560, + }, + } + + // Inside a function [...] + + if err := mergo.Merge(&config, fssnDefault); err != nil { + log.Fatal(err) + } + + // More code [...] + +*/ +package mergo diff --git a/vendor/github.com/imdario/mergo/issue17_test.go b/vendor/github.com/imdario/mergo/issue17_test.go new file mode 100644 index 0000000..0ee96f3 --- /dev/null +++ b/vendor/github.com/imdario/mergo/issue17_test.go @@ -0,0 +1,25 @@ +package mergo + +import ( + "encoding/json" + "testing" +) + +var ( + request = `{"timestamp":null, "name": "foo"}` + maprequest = map[string]interface{}{ + "timestamp": nil, + "name": "foo", + "newStuff": "foo", + } +) + +func TestIssue17MergeWithOverwrite(t *testing.T) { + var something map[string]interface{} + if err := json.Unmarshal([]byte(request), &something); err != nil { + t.Errorf("Error while Unmarshalling maprequest %s", err) + } + if err := MergeWithOverwrite(&something, maprequest); err != nil { + t.Errorf("Error while merging %s", err) + } +} diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go new file mode 100644 index 0000000..8e8c4ba --- /dev/null +++ b/vendor/github.com/imdario/mergo/map.go @@ -0,0 +1,156 @@ +// Copyright 2014 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on src/pkg/reflect/deepequal.go from official +// golang's stdlib. + +package mergo + +import ( + "fmt" + "reflect" + "unicode" + "unicode/utf8" +) + +func changeInitialCase(s string, mapper func(rune) rune) string { + if s == "" { + return s + } + r, n := utf8.DecodeRuneInString(s) + return string(mapper(r)) + s[n:] +} + +func isExported(field reflect.StructField) bool { + r, _ := utf8.DecodeRuneInString(field.Name) + return r >= 'A' && r <= 'Z' +} + +// Traverses recursively both values, assigning src's fields values to dst. +// The map argument tracks comparisons that have already been seen, which allows +// short circuiting on recursive types. +func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, overwrite bool) (err error) { + if dst.CanAddr() { + addr := dst.UnsafeAddr() + h := 17 * addr + seen := visited[h] + typ := dst.Type() + for p := seen; p != nil; p = p.next { + if p.ptr == addr && p.typ == typ { + return nil + } + } + // Remember, remember... + visited[h] = &visit{addr, typ, seen} + } + zeroValue := reflect.Value{} + switch dst.Kind() { + case reflect.Map: + dstMap := dst.Interface().(map[string]interface{}) + for i, n := 0, src.NumField(); i < n; i++ { + srcType := src.Type() + field := srcType.Field(i) + if !isExported(field) { + continue + } + fieldName := field.Name + fieldName = changeInitialCase(fieldName, unicode.ToLower) + if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v)) || overwrite) { + dstMap[fieldName] = src.Field(i).Interface() + } + } + case reflect.Struct: + srcMap := src.Interface().(map[string]interface{}) + for key := range srcMap { + srcValue := srcMap[key] + fieldName := changeInitialCase(key, unicode.ToUpper) + dstElement := dst.FieldByName(fieldName) + if dstElement == zeroValue { + // We discard it because the field doesn't exist. + continue + } + srcElement := reflect.ValueOf(srcValue) + dstKind := dstElement.Kind() + srcKind := srcElement.Kind() + if srcKind == reflect.Ptr && dstKind != reflect.Ptr { + srcElement = srcElement.Elem() + srcKind = reflect.TypeOf(srcElement.Interface()).Kind() + } else if dstKind == reflect.Ptr { + // Can this work? I guess it can't. + if srcKind != reflect.Ptr && srcElement.CanAddr() { + srcPtr := srcElement.Addr() + srcElement = reflect.ValueOf(srcPtr) + srcKind = reflect.Ptr + } + } + if !srcElement.IsValid() { + continue + } + if srcKind == dstKind { + if err = deepMerge(dstElement, srcElement, visited, depth+1, overwrite); err != nil { + return + } + } else { + if srcKind == reflect.Map { + if err = deepMap(dstElement, srcElement, visited, depth+1, overwrite); err != nil { + return + } + } else { + return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind) + } + } + } + } + return +} + +// Map sets fields' values in dst from src. +// src can be a map with string keys or a struct. dst must be the opposite: +// if src is a map, dst must be a valid pointer to struct. If src is a struct, +// dst must be map[string]interface{}. +// It won't merge unexported (private) fields and will do recursively +// any exported field. +// If dst is a map, keys will be src fields' names in lower camel case. +// Missing key in src that doesn't match a field in dst will be skipped. This +// doesn't apply if dst is a map. +// This is separated method from Merge because it is cleaner and it keeps sane +// semantics: merging equal types, mapping different (restricted) types. +func Map(dst, src interface{}) error { + return _map(dst, src, false) +} + +// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overriden by +// non-empty src attribute values. +func MapWithOverwrite(dst, src interface{}) error { + return _map(dst, src, true) +} + +func _map(dst, src interface{}, overwrite bool) error { + var ( + vDst, vSrc reflect.Value + err error + ) + if vDst, vSrc, err = resolveValues(dst, src); err != nil { + return err + } + // To be friction-less, we redirect equal-type arguments + // to deepMerge. Only because arguments can be anything. + if vSrc.Kind() == vDst.Kind() { + return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, overwrite) + } + switch vSrc.Kind() { + case reflect.Struct: + if vDst.Kind() != reflect.Map { + return ErrExpectedMapAsDestination + } + case reflect.Map: + if vDst.Kind() != reflect.Struct { + return ErrExpectedStructAsDestination + } + default: + return ErrNotSupported + } + return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, overwrite) +} diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go new file mode 100644 index 0000000..11e55b1 --- /dev/null +++ b/vendor/github.com/imdario/mergo/merge.go @@ -0,0 +1,123 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on src/pkg/reflect/deepequal.go from official +// golang's stdlib. + +package mergo + +import ( + "reflect" +) + +// Traverses recursively both values, assigning src's fields values to dst. +// The map argument tracks comparisons that have already been seen, which allows +// short circuiting on recursive types. +func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, overwrite bool) (err error) { + if !src.IsValid() { + return + } + if dst.CanAddr() { + addr := dst.UnsafeAddr() + h := 17 * addr + seen := visited[h] + typ := dst.Type() + for p := seen; p != nil; p = p.next { + if p.ptr == addr && p.typ == typ { + return nil + } + } + // Remember, remember... + visited[h] = &visit{addr, typ, seen} + } + switch dst.Kind() { + case reflect.Struct: + for i, n := 0, dst.NumField(); i < n; i++ { + if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, overwrite); err != nil { + return + } + } + case reflect.Map: + for _, key := range src.MapKeys() { + srcElement := src.MapIndex(key) + if !srcElement.IsValid() { + continue + } + dstElement := dst.MapIndex(key) + switch srcElement.Kind() { + case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Slice: + if srcElement.IsNil() { + continue + } + fallthrough + default: + if !srcElement.CanInterface() { + continue + } + switch reflect.TypeOf(srcElement.Interface()).Kind() { + case reflect.Struct: + fallthrough + case reflect.Ptr: + fallthrough + case reflect.Map: + if err = deepMerge(dstElement, srcElement, visited, depth+1, overwrite); err != nil { + return + } + } + } + if !isEmptyValue(srcElement) && (overwrite || (!dstElement.IsValid() || isEmptyValue(dst))) { + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + dst.SetMapIndex(key, srcElement) + } + } + case reflect.Ptr: + fallthrough + case reflect.Interface: + if src.IsNil() { + break + } else if dst.IsNil() { + if dst.CanSet() && (overwrite || isEmptyValue(dst)) { + dst.Set(src) + } + } else if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, overwrite); err != nil { + return + } + default: + if dst.CanSet() && !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) { + dst.Set(src) + } + } + return +} + +// Merge will fill any empty for value type attributes on the dst struct using corresponding +// src attributes if they themselves are not empty. dst and src must be valid same-type structs +// and dst must be a pointer to struct. +// It won't merge unexported (private) fields and will do recursively any exported field. +func Merge(dst, src interface{}) error { + return merge(dst, src, false) +} + +// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overriden by +// non-empty src attribute values. +func MergeWithOverwrite(dst, src interface{}) error { + return merge(dst, src, true) +} + +func merge(dst, src interface{}, overwrite bool) error { + var ( + vDst, vSrc reflect.Value + err error + ) + if vDst, vSrc, err = resolveValues(dst, src); err != nil { + return err + } + if vDst.Type() != vSrc.Type() { + return ErrDifferentArgumentsTypes + } + return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, overwrite) +} diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go new file mode 100644 index 0000000..f8a0991 --- /dev/null +++ b/vendor/github.com/imdario/mergo/mergo.go @@ -0,0 +1,90 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on src/pkg/reflect/deepequal.go from official +// golang's stdlib. + +package mergo + +import ( + "errors" + "reflect" +) + +// Errors reported by Mergo when it finds invalid arguments. +var ( + ErrNilArguments = errors.New("src and dst must not be nil") + ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type") + ErrNotSupported = errors.New("only structs and maps are supported") + ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") + ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") +) + +// During deepMerge, must keep track of checks that are +// in progress. The comparison algorithm assumes that all +// checks in progress are true when it reencounters them. +// Visited are stored in a map indexed by 17 * a1 + a2; +type visit struct { + ptr uintptr + typ reflect.Type + next *visit +} + +// From src/pkg/encoding/json. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { + if dst == nil || src == nil { + err = ErrNilArguments + return + } + vDst = reflect.ValueOf(dst).Elem() + if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map { + err = ErrNotSupported + return + } + vSrc = reflect.ValueOf(src) + // We check if vSrc is a pointer to dereference it. + if vSrc.Kind() == reflect.Ptr { + vSrc = vSrc.Elem() + } + return +} + +// Traverses recursively both values, assigning src's fields values to dst. +// The map argument tracks comparisons that have already been seen, which allows +// short circuiting on recursive types. +func deeper(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) { + if dst.CanAddr() { + addr := dst.UnsafeAddr() + h := 17 * addr + seen := visited[h] + typ := dst.Type() + for p := seen; p != nil; p = p.next { + if p.ptr == addr && p.typ == typ { + return nil + } + } + // Remember, remember... + visited[h] = &visit{addr, typ, seen} + } + return // TODO refactor +} diff --git a/vendor/github.com/imdario/mergo/mergo_test.go b/vendor/github.com/imdario/mergo/mergo_test.go new file mode 100644 index 0000000..dd2651b --- /dev/null +++ b/vendor/github.com/imdario/mergo/mergo_test.go @@ -0,0 +1,525 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mergo + +import ( + "io/ioutil" + "reflect" + "testing" + "time" + + "gopkg.in/yaml.v1" +) + +type simpleTest struct { + Value int +} + +type complexTest struct { + St simpleTest + sz int + ID string +} + +type moreComplextText struct { + Ct complexTest + St simpleTest + Nt simpleTest +} + +type pointerTest struct { + C *simpleTest +} + +type sliceTest struct { + S []int +} + +func TestKb(t *testing.T) { + type testStruct struct { + Name string + KeyValue map[string]interface{} + } + + akv := make(map[string]interface{}) + akv["Key1"] = "not value 1" + akv["Key2"] = "value2" + a := testStruct{} + a.Name = "A" + a.KeyValue = akv + + bkv := make(map[string]interface{}) + bkv["Key1"] = "value1" + bkv["Key3"] = "value3" + b := testStruct{} + b.Name = "B" + b.KeyValue = bkv + + ekv := make(map[string]interface{}) + ekv["Key1"] = "value1" + ekv["Key2"] = "value2" + ekv["Key3"] = "value3" + expected := testStruct{} + expected.Name = "B" + expected.KeyValue = ekv + + Merge(&b, a) + + if !reflect.DeepEqual(b, expected) { + t.Errorf("Actual: %#v did not match \nExpected: %#v", b, expected) + } +} + +func TestNil(t *testing.T) { + if err := Merge(nil, nil); err != ErrNilArguments { + t.Fail() + } +} + +func TestDifferentTypes(t *testing.T) { + a := simpleTest{42} + b := 42 + if err := Merge(&a, b); err != ErrDifferentArgumentsTypes { + t.Fail() + } +} + +func TestSimpleStruct(t *testing.T) { + a := simpleTest{} + b := simpleTest{42} + if err := Merge(&a, b); err != nil { + t.FailNow() + } + if a.Value != 42 { + t.Fatalf("b not merged in properly: a.Value(%d) != b.Value(%d)", a.Value, b.Value) + } + if !reflect.DeepEqual(a, b) { + t.FailNow() + } +} + +func TestComplexStruct(t *testing.T) { + a := complexTest{} + a.ID = "athing" + b := complexTest{simpleTest{42}, 1, "bthing"} + if err := Merge(&a, b); err != nil { + t.FailNow() + } + if a.St.Value != 42 { + t.Fatalf("b not merged in properly: a.St.Value(%d) != b.St.Value(%d)", a.St.Value, b.St.Value) + } + if a.sz == 1 { + t.Fatalf("a's private field sz not preserved from merge: a.sz(%d) == b.sz(%d)", a.sz, b.sz) + } + if a.ID == b.ID { + t.Fatalf("a's field ID merged unexpectedly: a.ID(%s) == b.ID(%s)", a.ID, b.ID) + } +} + +func TestComplexStructWithOverwrite(t *testing.T) { + a := complexTest{simpleTest{1}, 1, "do-not-overwrite-with-empty-value"} + b := complexTest{simpleTest{42}, 2, ""} + + expect := complexTest{simpleTest{42}, 1, "do-not-overwrite-with-empty-value"} + if err := MergeWithOverwrite(&a, b); err != nil { + t.FailNow() + } + + if !reflect.DeepEqual(a, expect) { + t.Fatalf("Test failed:\ngot :\n%#v\n\nwant :\n%#v\n\n", a, expect) + } +} + +func TestPointerStruct(t *testing.T) { + s1 := simpleTest{} + s2 := simpleTest{19} + a := pointerTest{&s1} + b := pointerTest{&s2} + if err := Merge(&a, b); err != nil { + t.FailNow() + } + if a.C.Value != b.C.Value { + t.Fatalf("b not merged in properly: a.C.Value(%d) != b.C.Value(%d)", a.C.Value, b.C.Value) + } +} + +type embeddingStruct struct { + embeddedStruct +} + +type embeddedStruct struct { + A string +} + +func TestEmbeddedStruct(t *testing.T) { + tests := []struct { + src embeddingStruct + dst embeddingStruct + expected embeddingStruct + }{ + { + src: embeddingStruct{ + embeddedStruct{"foo"}, + }, + dst: embeddingStruct{ + embeddedStruct{""}, + }, + expected: embeddingStruct{ + embeddedStruct{"foo"}, + }, + }, + { + src: embeddingStruct{ + embeddedStruct{""}, + }, + dst: embeddingStruct{ + embeddedStruct{"bar"}, + }, + expected: embeddingStruct{ + embeddedStruct{"bar"}, + }, + }, + { + src: embeddingStruct{ + embeddedStruct{"foo"}, + }, + dst: embeddingStruct{ + embeddedStruct{"bar"}, + }, + expected: embeddingStruct{ + embeddedStruct{"bar"}, + }, + }, + } + + for _, test := range tests { + err := Merge(&test.dst, test.src) + if err != nil { + t.Errorf("unexpected error: %v", err) + continue + } + if !reflect.DeepEqual(test.dst, test.expected) { + t.Errorf("unexpected output\nexpected:\n%+v\nsaw:\n%+v\n", test.expected, test.dst) + } + } +} + +func TestPointerStructNil(t *testing.T) { + a := pointerTest{nil} + b := pointerTest{&simpleTest{19}} + if err := Merge(&a, b); err != nil { + t.FailNow() + } + if a.C.Value != b.C.Value { + t.Fatalf("b not merged in a properly: a.C.Value(%d) != b.C.Value(%d)", a.C.Value, b.C.Value) + } +} + +func TestSliceStruct(t *testing.T) { + a := sliceTest{} + b := sliceTest{[]int{1, 2, 3}} + if err := Merge(&a, b); err != nil { + t.FailNow() + } + if len(b.S) != 3 { + t.FailNow() + } + if len(a.S) != len(b.S) { + t.Fatalf("b not merged in a proper way %d != %d", len(a.S), len(b.S)) + } + + a = sliceTest{[]int{1}} + b = sliceTest{[]int{1, 2, 3}} + if err := Merge(&a, b); err != nil { + t.FailNow() + } + if len(a.S) != 1 { + t.FailNow() + } + if len(a.S) == len(b.S) { + t.Fatalf("b merged unexpectedly %d != %d", len(a.S), len(b.S)) + } +} + +func TestMapsWithOverwrite(t *testing.T) { + m := map[string]simpleTest{ + "a": {}, // overwritten by 16 + "b": {42}, // not overwritten by empty value + "c": {13}, // overwritten by 12 + "d": {61}, + } + n := map[string]simpleTest{ + "a": {16}, + "b": {}, + "c": {12}, + "e": {14}, + } + expect := map[string]simpleTest{ + "a": {16}, + "b": {}, + "c": {12}, + "d": {61}, + "e": {14}, + } + + if err := MergeWithOverwrite(&m, n); err != nil { + t.Fatalf(err.Error()) + } + + if !reflect.DeepEqual(m, expect) { + t.Fatalf("Test failed:\ngot :\n%#v\n\nwant :\n%#v\n\n", m, expect) + } +} + +func TestMaps(t *testing.T) { + m := map[string]simpleTest{ + "a": {}, + "b": {42}, + "c": {13}, + "d": {61}, + } + n := map[string]simpleTest{ + "a": {16}, + "b": {}, + "c": {12}, + "e": {14}, + } + expect := map[string]simpleTest{ + "a": {0}, + "b": {42}, + "c": {13}, + "d": {61}, + "e": {14}, + } + + if err := Merge(&m, n); err != nil { + t.Fatalf(err.Error()) + } + + if !reflect.DeepEqual(m, expect) { + t.Fatalf("Test failed:\ngot :\n%#v\n\nwant :\n%#v\n\n", m, expect) + } + if m["a"].Value != 0 { + t.Fatalf(`n merged in m because I solved non-addressable map values TODO: m["a"].Value(%d) != n["a"].Value(%d)`, m["a"].Value, n["a"].Value) + } + if m["b"].Value != 42 { + t.Fatalf(`n wrongly merged in m: m["b"].Value(%d) != n["b"].Value(%d)`, m["b"].Value, n["b"].Value) + } + if m["c"].Value != 13 { + t.Fatalf(`n overwritten in m: m["c"].Value(%d) != n["c"].Value(%d)`, m["c"].Value, n["c"].Value) + } +} + +func TestYAMLMaps(t *testing.T) { + thing := loadYAML("testdata/thing.yml") + license := loadYAML("testdata/license.yml") + ft := thing["fields"].(map[interface{}]interface{}) + fl := license["fields"].(map[interface{}]interface{}) + expectedLength := len(ft) + len(fl) + if err := Merge(&license, thing); err != nil { + t.Fatal(err.Error()) + } + currentLength := len(license["fields"].(map[interface{}]interface{})) + if currentLength != expectedLength { + t.Fatalf(`thing not merged in license properly, license must have %d elements instead of %d`, expectedLength, currentLength) + } + fields := license["fields"].(map[interface{}]interface{}) + if _, ok := fields["id"]; !ok { + t.Fatalf(`thing not merged in license properly, license must have a new id field from thing`) + } +} + +func TestTwoPointerValues(t *testing.T) { + a := &simpleTest{} + b := &simpleTest{42} + if err := Merge(a, b); err != nil { + t.Fatalf(`Boom. You crossed the streams: %s`, err) + } +} + +func TestMap(t *testing.T) { + a := complexTest{} + a.ID = "athing" + c := moreComplextText{a, simpleTest{}, simpleTest{}} + b := map[string]interface{}{ + "ct": map[string]interface{}{ + "st": map[string]interface{}{ + "value": 42, + }, + "sz": 1, + "id": "bthing", + }, + "st": &simpleTest{144}, // Mapping a reference + "zt": simpleTest{299}, // Mapping a missing field (zt doesn't exist) + "nt": simpleTest{3}, + } + if err := Map(&c, b); err != nil { + t.FailNow() + } + m := b["ct"].(map[string]interface{}) + n := m["st"].(map[string]interface{}) + o := b["st"].(*simpleTest) + p := b["nt"].(simpleTest) + if c.Ct.St.Value != 42 { + t.Fatalf("b not merged in properly: c.Ct.St.Value(%d) != b.Ct.St.Value(%d)", c.Ct.St.Value, n["value"]) + } + if c.St.Value != 144 { + t.Fatalf("b not merged in properly: c.St.Value(%d) != b.St.Value(%d)", c.St.Value, o.Value) + } + if c.Nt.Value != 3 { + t.Fatalf("b not merged in properly: c.Nt.Value(%d) != b.Nt.Value(%d)", c.St.Value, p.Value) + } + if c.Ct.sz == 1 { + t.Fatalf("a's private field sz not preserved from merge: c.Ct.sz(%d) == b.Ct.sz(%d)", c.Ct.sz, m["sz"]) + } + if c.Ct.ID == m["id"] { + t.Fatalf("a's field ID merged unexpectedly: c.Ct.ID(%s) == b.Ct.ID(%s)", c.Ct.ID, m["id"]) + } +} + +func TestSimpleMap(t *testing.T) { + a := simpleTest{} + b := map[string]interface{}{ + "value": 42, + } + if err := Map(&a, b); err != nil { + t.FailNow() + } + if a.Value != 42 { + t.Fatalf("b not merged in properly: a.Value(%d) != b.Value(%v)", a.Value, b["value"]) + } +} + +type pointerMapTest struct { + A int + hidden int + B *simpleTest +} + +func TestBackAndForth(t *testing.T) { + pt := pointerMapTest{42, 1, &simpleTest{66}} + m := make(map[string]interface{}) + if err := Map(&m, pt); err != nil { + t.FailNow() + } + var ( + v interface{} + ok bool + ) + if v, ok = m["a"]; v.(int) != pt.A || !ok { + t.Fatalf("pt not merged in properly: m[`a`](%d) != pt.A(%d)", v, pt.A) + } + if v, ok = m["b"]; !ok { + t.Fatalf("pt not merged in properly: B is missing in m") + } + var st *simpleTest + if st = v.(*simpleTest); st.Value != 66 { + t.Fatalf("something went wrong while mapping pt on m, B wasn't copied") + } + bpt := pointerMapTest{} + if err := Map(&bpt, m); err != nil { + t.Fatal(err) + } + if bpt.A != pt.A { + t.Fatalf("pt not merged in properly: bpt.A(%d) != pt.A(%d)", bpt.A, pt.A) + } + if bpt.hidden == pt.hidden { + t.Fatalf("pt unexpectedly merged: bpt.hidden(%d) == pt.hidden(%d)", bpt.hidden, pt.hidden) + } + if bpt.B.Value != pt.B.Value { + t.Fatalf("pt not merged in properly: bpt.B.Value(%d) != pt.B.Value(%d)", bpt.B.Value, pt.B.Value) + } +} + +type structWithTimePointer struct { + Birth *time.Time +} + +func TestTime(t *testing.T) { + now := time.Now() + dataStruct := structWithTimePointer{ + Birth: &now, + } + dataMap := map[string]interface{}{ + "Birth": &now, + } + b := structWithTimePointer{} + if err := Merge(&b, dataStruct); err != nil { + t.FailNow() + } + if b.Birth.IsZero() { + t.Fatalf("time.Time not merged in properly: b.Birth(%v) != dataStruct['Birth'](%v)", b.Birth, dataStruct.Birth) + } + if b.Birth != dataStruct.Birth { + t.Fatalf("time.Time not merged in properly: b.Birth(%v) != dataStruct['Birth'](%v)", b.Birth, dataStruct.Birth) + } + b = structWithTimePointer{} + if err := Map(&b, dataMap); err != nil { + t.FailNow() + } + if b.Birth.IsZero() { + t.Fatalf("time.Time not merged in properly: b.Birth(%v) != dataMap['Birth'](%v)", b.Birth, dataMap["Birth"]) + } +} + +type simpleNested struct { + A int +} + +type structWithNestedPtrValueMap struct { + NestedPtrValue map[string]*simpleNested +} + +func TestNestedPtrValueInMap(t *testing.T) { + src := &structWithNestedPtrValueMap{ + NestedPtrValue: map[string]*simpleNested{ + "x": { + A: 1, + }, + }, + } + dst := &structWithNestedPtrValueMap{ + NestedPtrValue: map[string]*simpleNested{ + "x": {}, + }, + } + if err := Map(dst, src); err != nil { + t.FailNow() + } + if dst.NestedPtrValue["x"].A == 0 { + t.Fatalf("Nested Ptr value not merged in properly: dst.NestedPtrValue[\"x\"].A(%v) != src.NestedPtrValue[\"x\"].A(%v)", dst.NestedPtrValue["x"].A, src.NestedPtrValue["x"].A) + } +} + +func loadYAML(path string) (m map[string]interface{}) { + m = make(map[string]interface{}) + raw, _ := ioutil.ReadFile(path) + _ = yaml.Unmarshal(raw, &m) + return +} + +type structWithMap struct { + m map[string]structWithUnexportedProperty +} + +type structWithUnexportedProperty struct { + s string +} + +func TestUnexportedProperty(t *testing.T) { + a := structWithMap{map[string]structWithUnexportedProperty{ + "key": structWithUnexportedProperty{"hello"}, + }} + b := structWithMap{map[string]structWithUnexportedProperty{ + "key": structWithUnexportedProperty{"hi"}, + }} + defer func() { + if r := recover(); r != nil { + t.Errorf("Should not have panicked") + } + }() + Merge(&a, b) +} diff --git a/vendor/github.com/imdario/mergo/testdata/license.yml b/vendor/github.com/imdario/mergo/testdata/license.yml new file mode 100644 index 0000000..62fdb61 --- /dev/null +++ b/vendor/github.com/imdario/mergo/testdata/license.yml @@ -0,0 +1,3 @@ +import: ../../../../fossene/db/schema/thing.yml +fields: + site: string diff --git a/vendor/github.com/imdario/mergo/testdata/thing.yml b/vendor/github.com/imdario/mergo/testdata/thing.yml new file mode 100644 index 0000000..c28eab0 --- /dev/null +++ b/vendor/github.com/imdario/mergo/testdata/thing.yml @@ -0,0 +1,5 @@ +fields: + id: int + name: string + parent: ref "datu:thing" + status: enum(draft, public, private) diff --git a/vendor/github.com/miekg/dns/.gitignore b/vendor/github.com/miekg/dns/.gitignore new file mode 100644 index 0000000..776cd95 --- /dev/null +++ b/vendor/github.com/miekg/dns/.gitignore @@ -0,0 +1,4 @@ +*.6 +tags +test.out +a.out diff --git a/vendor/github.com/miekg/dns/.travis.yml b/vendor/github.com/miekg/dns/.travis.yml new file mode 100644 index 0000000..bb8b8d4 --- /dev/null +++ b/vendor/github.com/miekg/dns/.travis.yml @@ -0,0 +1,14 @@ +language: go +sudo: false +go: + - 1.7.x + - 1.8.x + - tip + +before_install: + # don't use the miekg/dns when testing forks + - mkdir -p $GOPATH/src/github.com/miekg + - ln -s $TRAVIS_BUILD_DIR $GOPATH/src/github.com/miekg/ || true + +script: + - go test -race -v -bench=. diff --git a/vendor/github.com/miekg/dns/AUTHORS b/vendor/github.com/miekg/dns/AUTHORS new file mode 100644 index 0000000..1965683 --- /dev/null +++ b/vendor/github.com/miekg/dns/AUTHORS @@ -0,0 +1 @@ +Miek Gieben <miek@miek.nl> diff --git a/vendor/github.com/miekg/dns/CONTRIBUTORS b/vendor/github.com/miekg/dns/CONTRIBUTORS new file mode 100644 index 0000000..f77e8a8 --- /dev/null +++ b/vendor/github.com/miekg/dns/CONTRIBUTORS @@ -0,0 +1,9 @@ +Alex A. Skinner +Andrew Tunnell-Jones +Ask Bjørn Hansen +Dave Cheney +Dusty Wilson +Marek Majkowski +Peter van Dijk +Omri Bahumi +Alex Sergeyev diff --git a/vendor/github.com/miekg/dns/COPYRIGHT b/vendor/github.com/miekg/dns/COPYRIGHT new file mode 100644 index 0000000..35702b1 --- /dev/null +++ b/vendor/github.com/miekg/dns/COPYRIGHT @@ -0,0 +1,9 @@ +Copyright 2009 The Go Authors. All rights reserved. Use of this source code +is governed by a BSD-style license that can be found in the LICENSE file. +Extensions of the original work are copyright (c) 2011 Miek Gieben + +Copyright 2011 Miek Gieben. All rights reserved. Use of this source code is +governed by a BSD-style license that can be found in the LICENSE file. + +Copyright 2014 CloudFlare. All rights reserved. Use of this source code is +governed by a BSD-style license that can be found in the LICENSE file. diff --git a/vendor/github.com/miekg/dns/LICENSE b/vendor/github.com/miekg/dns/LICENSE new file mode 100644 index 0000000..5763fa7 --- /dev/null +++ b/vendor/github.com/miekg/dns/LICENSE @@ -0,0 +1,32 @@ +Extensions of the original work are copyright (c) 2011 Miek Gieben + +As this is fork of the official Go code the same license applies: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/miekg/dns/README.md b/vendor/github.com/miekg/dns/README.md new file mode 100644 index 0000000..32a49cb --- /dev/null +++ b/vendor/github.com/miekg/dns/README.md @@ -0,0 +1,162 @@ +[](https://travis-ci.org/miekg/dns) +[](https://godoc.org/github.com/miekg/dns) + +# Alternative (more granular) approach to a DNS library + +> Less is more. + +Complete and usable DNS library. All widely used Resource Records are +supported, including the DNSSEC types. It follows a lean and mean philosophy. +If there is stuff you should know as a DNS programmer there isn't a convenience +function for it. Server side and client side programming is supported, i.e. you +can build servers and resolvers with it. + +We try to keep the "master" branch as sane as possible and at the bleeding edge +of standards, avoiding breaking changes wherever reasonable. We support the last +two versions of Go, currently: 1.7 and 1.8. + +# Goals + +* KISS; +* Fast; +* Small API. If it's easy to code in Go, don't make a function for it. + +# Users + +A not-so-up-to-date-list-that-may-be-actually-current: + +* https://github.com/coredns/coredns +* https://cloudflare.com +* https://github.com/abh/geodns +* http://www.statdns.com/ +* http://www.dnsinspect.com/ +* https://github.com/chuangbo/jianbing-dictionary-dns +* http://www.dns-lg.com/ +* https://github.com/fcambus/rrda +* https://github.com/kenshinx/godns +* https://github.com/skynetservices/skydns +* https://github.com/hashicorp/consul +* https://github.com/DevelopersPL/godnsagent +* https://github.com/duedil-ltd/discodns +* https://github.com/StalkR/dns-reverse-proxy +* https://github.com/tianon/rawdns +* https://mesosphere.github.io/mesos-dns/ +* https://pulse.turbobytes.com/ +* https://play.google.com/store/apps/details?id=com.turbobytes.dig +* https://github.com/fcambus/statzone +* https://github.com/benschw/dns-clb-go +* https://github.com/corny/dnscheck for http://public-dns.info/ +* https://namesmith.io +* https://github.com/miekg/unbound +* https://github.com/miekg/exdns +* https://dnslookup.org +* https://github.com/looterz/grimd +* https://github.com/phamhongviet/serf-dns +* https://github.com/mehrdadrad/mylg +* https://github.com/bamarni/dockness +* https://github.com/fffaraz/microdns +* http://quilt.io +* https://github.com/ipdcode/hades (JD.COM) +* https://github.com/StackExchange/dnscontrol/ +* https://www.dnsperf.com/ +* https://dnssectest.net/ + +Send pull request if you want to be listed here. + +# Features + +* UDP/TCP queries, IPv4 and IPv6; +* RFC 1035 zone file parsing ($INCLUDE, $ORIGIN, $TTL and $GENERATE (for all record types) are supported; +* Fast: + * Reply speed around ~ 80K qps (faster hardware results in more qps); + * Parsing RRs ~ 100K RR/s, that's 5M records in about 50 seconds; +* Server side programming (mimicking the net/http package); +* Client side programming; +* DNSSEC: signing, validating and key generation for DSA, RSA and ECDSA; +* EDNS0, NSID, Cookies; +* AXFR/IXFR; +* TSIG, SIG(0); +* DNS over TLS: optional encrypted connection between client and server; +* DNS name compression; +* Depends only on the standard library. + +Have fun! + +Miek Gieben - 2010-2012 - <miek@miek.nl> + +# Building + +Building is done with the `go` tool. If you have setup your GOPATH +correctly, the following should work: + + go get github.com/miekg/dns + go build github.com/miekg/dns + +## Examples + +A short "how to use the API" is at the beginning of doc.go (this also will show +when you call `godoc github.com/miekg/dns`). + +Example programs can be found in the `github.com/miekg/exdns` repository. + +## Supported RFCs + +*all of them* + +* 103{4,5} - DNS standard +* 1348 - NSAP record (removed the record) +* 1982 - Serial Arithmetic +* 1876 - LOC record +* 1995 - IXFR +* 1996 - DNS notify +* 2136 - DNS Update (dynamic updates) +* 2181 - RRset definition - there is no RRset type though, just []RR +* 2537 - RSAMD5 DNS keys +* 2065 - DNSSEC (updated in later RFCs) +* 2671 - EDNS record +* 2782 - SRV record +* 2845 - TSIG record +* 2915 - NAPTR record +* 2929 - DNS IANA Considerations +* 3110 - RSASHA1 DNS keys +* 3225 - DO bit (DNSSEC OK) +* 340{1,2,3} - NAPTR record +* 3445 - Limiting the scope of (DNS)KEY +* 3597 - Unknown RRs +* 403{3,4,5} - DNSSEC + validation functions +* 4255 - SSHFP record +* 4343 - Case insensitivity +* 4408 - SPF record +* 4509 - SHA256 Hash in DS +* 4592 - Wildcards in the DNS +* 4635 - HMAC SHA TSIG +* 4701 - DHCID +* 4892 - id.server +* 5001 - NSID +* 5155 - NSEC3 record +* 5205 - HIP record +* 5702 - SHA2 in the DNS +* 5936 - AXFR +* 5966 - TCP implementation recommendations +* 6605 - ECDSA +* 6725 - IANA Registry Update +* 6742 - ILNP DNS +* 6840 - Clarifications and Implementation Notes for DNS Security +* 6844 - CAA record +* 6891 - EDNS0 update +* 6895 - DNS IANA considerations +* 6975 - Algorithm Understanding in DNSSEC +* 7043 - EUI48/EUI64 records +* 7314 - DNS (EDNS) EXPIRE Option +* 7828 - edns-tcp-keepalive EDNS0 Option +* 7553 - URI record +* 7858 - DNS over TLS: Initiation and Performance Considerations (draft) +* 7873 - Domain Name System (DNS) Cookies (draft-ietf-dnsop-cookies) +* xxxx - EDNS0 DNS Update Lease (draft) + +## Loosely based upon + +* `ldns` +* `NSD` +* `Net::DNS` +* `GRONG` diff --git a/vendor/github.com/miekg/dns/client.go b/vendor/github.com/miekg/dns/client.go new file mode 100644 index 0000000..1c14a19 --- /dev/null +++ b/vendor/github.com/miekg/dns/client.go @@ -0,0 +1,531 @@ +package dns + +// A client implementation. + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/binary" + "io" + "net" + "time" +) + +const dnsTimeout time.Duration = 2 * time.Second +const tcpIdleTimeout time.Duration = 8 * time.Second + +// A Conn represents a connection to a DNS server. +type Conn struct { + net.Conn // a net.Conn holding the connection + UDPSize uint16 // minimum receive buffer for UDP messages + TsigSecret map[string]string // secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be fully qualified + rtt time.Duration + t time.Time + tsigRequestMAC string +} + +// A Client defines parameters for a DNS client. +type Client struct { + Net string // if "tcp" or "tcp-tls" (DNS over TLS) a TCP query will be initiated, otherwise an UDP one (default is "" for UDP) + UDPSize uint16 // minimum receive buffer for UDP messages + TLSConfig *tls.Config // TLS connection configuration + Timeout time.Duration // a cumulative timeout for dial, write and read, defaults to 0 (disabled) - overrides DialTimeout, ReadTimeout and WriteTimeout when non-zero + DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds - overridden by Timeout when that value is non-zero + ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero + WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero + TsigSecret map[string]string // secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be fully qualified + SingleInflight bool // if true suppress multiple outstanding queries for the same Qname, Qtype and Qclass + group singleflight +} + +// Exchange performs a synchronous UDP query. It sends the message m to the address +// contained in a and waits for a reply. Exchange does not retry a failed query, nor +// will it fall back to TCP in case of truncation. +// See client.Exchange for more information on setting larger buffer sizes. +func Exchange(m *Msg, a string) (r *Msg, err error) { + var co *Conn + co, err = DialTimeout("udp", a, dnsTimeout) + if err != nil { + return nil, err + } + + defer co.Close() + + opt := m.IsEdns0() + // If EDNS0 is used use that for size. + if opt != nil && opt.UDPSize() >= MinMsgSize { + co.UDPSize = opt.UDPSize() + } + + co.SetWriteDeadline(time.Now().Add(dnsTimeout)) + if err = co.WriteMsg(m); err != nil { + return nil, err + } + + co.SetReadDeadline(time.Now().Add(dnsTimeout)) + r, err = co.ReadMsg() + if err == nil && r.Id != m.Id { + err = ErrId + } + return r, err +} + +// ExchangeContext performs a synchronous UDP query, like Exchange. It +// additionally obeys deadlines from the passed Context. +func ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, err error) { + // Combine context deadline with built-in timeout. Context chooses whichever + // is sooner. + timeoutCtx, cancel := context.WithTimeout(ctx, dnsTimeout) + defer cancel() + deadline, _ := timeoutCtx.Deadline() + + co := new(Conn) + dialer := net.Dialer{} + co.Conn, err = dialer.DialContext(timeoutCtx, "udp", a) + if err != nil { + return nil, err + } + + defer co.Conn.Close() + + opt := m.IsEdns0() + // If EDNS0 is used use that for size. + if opt != nil && opt.UDPSize() >= MinMsgSize { + co.UDPSize = opt.UDPSize() + } + + co.SetWriteDeadline(deadline) + if err = co.WriteMsg(m); err != nil { + return nil, err + } + + co.SetReadDeadline(deadline) + r, err = co.ReadMsg() + if err == nil && r.Id != m.Id { + err = ErrId + } + return r, err +} + +// ExchangeConn performs a synchronous query. It sends the message m via the connection +// c and waits for a reply. The connection c is not closed by ExchangeConn. +// This function is going away, but can easily be mimicked: +// +// co := &dns.Conn{Conn: c} // c is your net.Conn +// co.WriteMsg(m) +// in, _ := co.ReadMsg() +// co.Close() +// +func ExchangeConn(c net.Conn, m *Msg) (r *Msg, err error) { + println("dns: this function is deprecated") + co := new(Conn) + co.Conn = c + if err = co.WriteMsg(m); err != nil { + return nil, err + } + r, err = co.ReadMsg() + if err == nil && r.Id != m.Id { + err = ErrId + } + return r, err +} + +// Exchange performs a synchronous query. It sends the message m to the address +// contained in a and waits for a reply. Basic use pattern with a *dns.Client: +// +// c := new(dns.Client) +// in, rtt, err := c.Exchange(message, "127.0.0.1:53") +// +// Exchange does not retry a failed query, nor will it fall back to TCP in +// case of truncation. +// It is up to the caller to create a message that allows for larger responses to be +// returned. Specifically this means adding an EDNS0 OPT RR that will advertise a larger +// buffer, see SetEdns0. Messages without an OPT RR will fallback to the historic limit +// of 512 bytes. +func (c *Client) Exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) { + return c.ExchangeContext(context.Background(), m, a) +} + +// ExchangeContext acts like Exchange, but honors the deadline on the provided +// context, if present. If there is both a context deadline and a configured +// timeout on the client, the earliest of the two takes effect. +func (c *Client) ExchangeContext(ctx context.Context, m *Msg, a string) ( + r *Msg, + rtt time.Duration, + err error) { + if !c.SingleInflight { + return c.exchange(ctx, m, a) + } + // This adds a bunch of garbage, TODO(miek). + t := "nop" + if t1, ok := TypeToString[m.Question[0].Qtype]; ok { + t = t1 + } + cl := "nop" + if cl1, ok := ClassToString[m.Question[0].Qclass]; ok { + cl = cl1 + } + r, rtt, err, shared := c.group.Do(m.Question[0].Name+t+cl, func() (*Msg, time.Duration, error) { + return c.exchange(ctx, m, a) + }) + if r != nil && shared { + r = r.Copy() + } + if err != nil { + return r, rtt, err + } + return r, rtt, nil +} + +func (c *Client) dialTimeout() time.Duration { + if c.Timeout != 0 { + return c.Timeout + } + if c.DialTimeout != 0 { + return c.DialTimeout + } + return dnsTimeout +} + +func (c *Client) readTimeout() time.Duration { + if c.ReadTimeout != 0 { + return c.ReadTimeout + } + return dnsTimeout +} + +func (c *Client) writeTimeout() time.Duration { + if c.WriteTimeout != 0 { + return c.WriteTimeout + } + return dnsTimeout +} + +func (c *Client) exchange(ctx context.Context, m *Msg, a string) (r *Msg, rtt time.Duration, err error) { + var co *Conn + network := "udp" + tls := false + + switch c.Net { + case "tcp-tls": + network = "tcp" + tls = true + case "tcp4-tls": + network = "tcp4" + tls = true + case "tcp6-tls": + network = "tcp6" + tls = true + default: + if c.Net != "" { + network = c.Net + } + } + + var deadline time.Time + if c.Timeout != 0 { + deadline = time.Now().Add(c.Timeout) + } + + dialDeadline := deadlineOrTimeoutOrCtx(ctx, deadline, c.dialTimeout()) + dialTimeout := dialDeadline.Sub(time.Now()) + + if tls { + co, err = DialTimeoutWithTLS(network, a, c.TLSConfig, dialTimeout) + } else { + co, err = DialTimeout(network, a, dialTimeout) + } + + if err != nil { + return nil, 0, err + } + defer co.Close() + + opt := m.IsEdns0() + // If EDNS0 is used use that for size. + if opt != nil && opt.UDPSize() >= MinMsgSize { + co.UDPSize = opt.UDPSize() + } + // Otherwise use the client's configured UDP size. + if opt == nil && c.UDPSize >= MinMsgSize { + co.UDPSize = c.UDPSize + } + + co.TsigSecret = c.TsigSecret + co.SetWriteDeadline(deadlineOrTimeoutOrCtx(ctx, deadline, c.writeTimeout())) + if err = co.WriteMsg(m); err != nil { + return nil, 0, err + } + + co.SetReadDeadline(deadlineOrTimeoutOrCtx(ctx, deadline, c.readTimeout())) + r, err = co.ReadMsg() + if err == nil && r.Id != m.Id { + err = ErrId + } + return r, co.rtt, err +} + +// ReadMsg reads a message from the connection co. +// If the received message contains a TSIG record the transaction +// signature is verified. +func (co *Conn) ReadMsg() (*Msg, error) { + p, err := co.ReadMsgHeader(nil) + if err != nil { + return nil, err + } + + m := new(Msg) + if err := m.Unpack(p); err != nil { + // If ErrTruncated was returned, we still want to allow the user to use + // the message, but naively they can just check err if they don't want + // to use a truncated message + if err == ErrTruncated { + return m, err + } + return nil, err + } + if t := m.IsTsig(); t != nil { + if _, ok := co.TsigSecret[t.Hdr.Name]; !ok { + return m, ErrSecret + } + // Need to work on the original message p, as that was used to calculate the tsig. + err = TsigVerify(p, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false) + } + return m, err +} + +// ReadMsgHeader reads a DNS message, parses and populates hdr (when hdr is not nil). +// Returns message as a byte slice to be parsed with Msg.Unpack later on. +// Note that error handling on the message body is not possible as only the header is parsed. +func (co *Conn) ReadMsgHeader(hdr *Header) ([]byte, error) { + var ( + p []byte + n int + err error + ) + + switch t := co.Conn.(type) { + case *net.TCPConn, *tls.Conn: + r := t.(io.Reader) + + // First two bytes specify the length of the entire message. + l, err := tcpMsgLen(r) + if err != nil { + return nil, err + } + p = make([]byte, l) + n, err = tcpRead(r, p) + co.rtt = time.Since(co.t) + default: + if co.UDPSize > MinMsgSize { + p = make([]byte, co.UDPSize) + } else { + p = make([]byte, MinMsgSize) + } + n, err = co.Read(p) + co.rtt = time.Since(co.t) + } + + if err != nil { + return nil, err + } else if n < headerSize { + return nil, ErrShortRead + } + + p = p[:n] + if hdr != nil { + dh, _, err := unpackMsgHdr(p, 0) + if err != nil { + return nil, err + } + *hdr = dh + } + return p, err +} + +// tcpMsgLen is a helper func to read first two bytes of stream as uint16 packet length. +func tcpMsgLen(t io.Reader) (int, error) { + p := []byte{0, 0} + n, err := t.Read(p) + if err != nil { + return 0, err + } + + // As seen with my local router/switch, retursn 1 byte on the above read, + // resulting a a ShortRead. Just write it out (instead of loop) and read the + // other byte. + if n == 1 { + n1, err := t.Read(p[1:]) + if err != nil { + return 0, err + } + n += n1 + } + + if n != 2 { + return 0, ErrShortRead + } + l := binary.BigEndian.Uint16(p) + if l == 0 { + return 0, ErrShortRead + } + return int(l), nil +} + +// tcpRead calls TCPConn.Read enough times to fill allocated buffer. +func tcpRead(t io.Reader, p []byte) (int, error) { + n, err := t.Read(p) + if err != nil { + return n, err + } + for n < len(p) { + j, err := t.Read(p[n:]) + if err != nil { + return n, err + } + n += j + } + return n, err +} + +// Read implements the net.Conn read method. +func (co *Conn) Read(p []byte) (n int, err error) { + if co.Conn == nil { + return 0, ErrConnEmpty + } + if len(p) < 2 { + return 0, io.ErrShortBuffer + } + switch t := co.Conn.(type) { + case *net.TCPConn, *tls.Conn: + r := t.(io.Reader) + + l, err := tcpMsgLen(r) + if err != nil { + return 0, err + } + if l > len(p) { + return int(l), io.ErrShortBuffer + } + return tcpRead(r, p[:l]) + } + // UDP connection + n, err = co.Conn.Read(p) + if err != nil { + return n, err + } + return n, err +} + +// WriteMsg sends a message through the connection co. +// If the message m contains a TSIG record the transaction +// signature is calculated. +func (co *Conn) WriteMsg(m *Msg) (err error) { + var out []byte + if t := m.IsTsig(); t != nil { + mac := "" + if _, ok := co.TsigSecret[t.Hdr.Name]; !ok { + return ErrSecret + } + out, mac, err = TsigGenerate(m, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false) + // Set for the next read, although only used in zone transfers + co.tsigRequestMAC = mac + } else { + out, err = m.Pack() + } + if err != nil { + return err + } + co.t = time.Now() + if _, err = co.Write(out); err != nil { + return err + } + return nil +} + +// Write implements the net.Conn Write method. +func (co *Conn) Write(p []byte) (n int, err error) { + switch t := co.Conn.(type) { + case *net.TCPConn, *tls.Conn: + w := t.(io.Writer) + + lp := len(p) + if lp < 2 { + return 0, io.ErrShortBuffer + } + if lp > MaxMsgSize { + return 0, &Error{err: "message too large"} + } + l := make([]byte, 2, lp+2) + binary.BigEndian.PutUint16(l, uint16(lp)) + p = append(l, p...) + n, err := io.Copy(w, bytes.NewReader(p)) + return int(n), err + } + n, err = co.Conn.Write(p) + return n, err +} + +// Dial connects to the address on the named network. +func Dial(network, address string) (conn *Conn, err error) { + conn = new(Conn) + conn.Conn, err = net.Dial(network, address) + if err != nil { + return nil, err + } + return conn, nil +} + +// DialTimeout acts like Dial but takes a timeout. +func DialTimeout(network, address string, timeout time.Duration) (conn *Conn, err error) { + conn = new(Conn) + conn.Conn, err = net.DialTimeout(network, address, timeout) + if err != nil { + return nil, err + } + return conn, nil +} + +// DialWithTLS connects to the address on the named network with TLS. +func DialWithTLS(network, address string, tlsConfig *tls.Config) (conn *Conn, err error) { + conn = new(Conn) + conn.Conn, err = tls.Dial(network, address, tlsConfig) + if err != nil { + return nil, err + } + return conn, nil +} + +// DialTimeoutWithTLS acts like DialWithTLS but takes a timeout. +func DialTimeoutWithTLS(network, address string, tlsConfig *tls.Config, timeout time.Duration) (conn *Conn, err error) { + var dialer net.Dialer + dialer.Timeout = timeout + + conn = new(Conn) + conn.Conn, err = tls.DialWithDialer(&dialer, network, address, tlsConfig) + if err != nil { + return nil, err + } + return conn, nil +} + +// deadlineOrTimeout chooses between the provided deadline and timeout +// by always preferring the deadline so long as it's non-zero (regardless +// of which is bigger), and returns the equivalent deadline value. +func deadlineOrTimeout(deadline time.Time, timeout time.Duration) time.Time { + if deadline.IsZero() { + return time.Now().Add(timeout) + } + return deadline +} + +// deadlineOrTimeoutOrCtx returns the earliest of: a context deadline, or the +// output of deadlineOrtimeout. +func deadlineOrTimeoutOrCtx(ctx context.Context, deadline time.Time, timeout time.Duration) time.Time { + result := deadlineOrTimeout(deadline, timeout) + if ctxDeadline, ok := ctx.Deadline(); ok && ctxDeadline.Before(result) { + result = ctxDeadline + } + return result +} diff --git a/vendor/github.com/miekg/dns/client_test.go b/vendor/github.com/miekg/dns/client_test.go new file mode 100644 index 0000000..d29e4e3 --- /dev/null +++ b/vendor/github.com/miekg/dns/client_test.go @@ -0,0 +1,526 @@ +package dns + +import ( + "context" + "crypto/tls" + "fmt" + "net" + "strconv" + "sync" + "testing" + "time" +) + +func TestClientSync(t *testing.T) { + HandleFunc("miek.nl.", HelloServer) + defer HandleRemove("miek.nl.") + + s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") + if err != nil { + t.Fatalf("unable to run test server: %v", err) + } + defer s.Shutdown() + + m := new(Msg) + m.SetQuestion("miek.nl.", TypeSOA) + + c := new(Client) + r, _, err := c.Exchange(m, addrstr) + if err != nil { + t.Errorf("failed to exchange: %v", err) + } + if r != nil && r.Rcode != RcodeSuccess { + t.Errorf("failed to get an valid answer\n%v", r) + } + // And now with plain Exchange(). + r, err = Exchange(m, addrstr) + if err != nil { + t.Errorf("failed to exchange: %v", err) + } + if r == nil || r.Rcode != RcodeSuccess { + t.Errorf("failed to get an valid answer\n%v", r) + } +} + +func TestClientTLSSync(t *testing.T) { + HandleFunc("miek.nl.", HelloServer) + defer HandleRemove("miek.nl.") + + cert, err := tls.X509KeyPair(CertPEMBlock, KeyPEMBlock) + if err != nil { + t.Fatalf("unable to build certificate: %v", err) + } + + config := tls.Config{ + Certificates: []tls.Certificate{cert}, + } + + s, addrstr, err := RunLocalTLSServer("127.0.0.1:0", &config) + if err != nil { + t.Fatalf("unable to run test server: %v", err) + } + defer s.Shutdown() + + m := new(Msg) + m.SetQuestion("miek.nl.", TypeSOA) + + c := new(Client) + c.Net = "tcp-tls" + c.TLSConfig = &tls.Config{ + InsecureSkipVerify: true, + } + + r, _, err := c.Exchange(m, addrstr) + if err != nil { + t.Errorf("failed to exchange: %v", err) + } + if r != nil && r.Rcode != RcodeSuccess { + t.Errorf("failed to get an valid answer\n%v", r) + } +} + +func TestClientSyncBadID(t *testing.T) { + HandleFunc("miek.nl.", HelloServerBadID) + defer HandleRemove("miek.nl.") + + s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") + if err != nil { + t.Fatalf("unable to run test server: %v", err) + } + defer s.Shutdown() + + m := new(Msg) + m.SetQuestion("miek.nl.", TypeSOA) + + c := new(Client) + if _, _, err := c.Exchange(m, addrstr); err != ErrId { + t.Errorf("did not find a bad Id") + } + // And now with plain Exchange(). + if _, err := Exchange(m, addrstr); err != ErrId { + t.Errorf("did not find a bad Id") + } +} + +func TestClientEDNS0(t *testing.T) { + HandleFunc("miek.nl.", HelloServer) + defer HandleRemove("miek.nl.") + + s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") + if err != nil { + t.Fatalf("unable to run test server: %v", err) + } + defer s.Shutdown() + + m := new(Msg) + m.SetQuestion("miek.nl.", TypeDNSKEY) + + m.SetEdns0(2048, true) + + c := new(Client) + r, _, err := c.Exchange(m, addrstr) + if err != nil { + t.Errorf("failed to exchange: %v", err) + } + + if r != nil && r.Rcode != RcodeSuccess { + t.Errorf("failed to get an valid answer\n%v", r) + } +} + +// Validates the transmission and parsing of local EDNS0 options. +func TestClientEDNS0Local(t *testing.T) { + optStr1 := "1979:0x0707" + optStr2 := strconv.Itoa(EDNS0LOCALSTART) + ":0x0601" + + handler := func(w ResponseWriter, req *Msg) { + m := new(Msg) + m.SetReply(req) + + m.Extra = make([]RR, 1, 2) + m.Extra[0] = &TXT{Hdr: RR_Header{Name: m.Question[0].Name, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello local edns"}} + + // If the local options are what we expect, then reflect them back. + ec1 := req.Extra[0].(*OPT).Option[0].(*EDNS0_LOCAL).String() + ec2 := req.Extra[0].(*OPT).Option[1].(*EDNS0_LOCAL).String() + if ec1 == optStr1 && ec2 == optStr2 { + m.Extra = append(m.Extra, req.Extra[0]) + } + + w.WriteMsg(m) + } + + HandleFunc("miek.nl.", handler) + defer HandleRemove("miek.nl.") + + s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") + if err != nil { + t.Fatalf("unable to run test server: %s", err) + } + defer s.Shutdown() + + m := new(Msg) + m.SetQuestion("miek.nl.", TypeTXT) + + // Add two local edns options to the query. + ec1 := &EDNS0_LOCAL{Code: 1979, Data: []byte{7, 7}} + ec2 := &EDNS0_LOCAL{Code: EDNS0LOCALSTART, Data: []byte{6, 1}} + o := &OPT{Hdr: RR_Header{Name: ".", Rrtype: TypeOPT}, Option: []EDNS0{ec1, ec2}} + m.Extra = append(m.Extra, o) + + c := new(Client) + r, _, err := c.Exchange(m, addrstr) + if err != nil { + t.Errorf("failed to exchange: %s", err) + } + + if r != nil && r.Rcode != RcodeSuccess { + t.Error("failed to get a valid answer") + t.Logf("%v\n", r) + } + + txt := r.Extra[0].(*TXT).Txt[0] + if txt != "Hello local edns" { + t.Error("Unexpected result for miek.nl", txt, "!= Hello local edns") + } + + // Validate the local options in the reply. + got := r.Extra[1].(*OPT).Option[0].(*EDNS0_LOCAL).String() + if got != optStr1 { + t.Errorf("failed to get local edns0 answer; got %s, expected %s", got, optStr1) + t.Logf("%v\n", r) + } + + got = r.Extra[1].(*OPT).Option[1].(*EDNS0_LOCAL).String() + if got != optStr2 { + t.Errorf("failed to get local edns0 answer; got %s, expected %s", got, optStr2) + t.Logf("%v\n", r) + } +} + +// ExampleTsigSecret_updateLeaseTSIG shows how to update a lease signed with TSIG +func ExampleTsigSecret_updateLeaseTSIG() { + m := new(Msg) + m.SetUpdate("t.local.ip6.io.") + rr, _ := NewRR("t.local.ip6.io. 30 A 127.0.0.1") + rrs := make([]RR, 1) + rrs[0] = rr + m.Insert(rrs) + + leaseRr := new(OPT) + leaseRr.Hdr.Name = "." + leaseRr.Hdr.Rrtype = TypeOPT + e := new(EDNS0_UL) + e.Code = EDNS0UL + e.Lease = 120 + leaseRr.Option = append(leaseRr.Option, e) + m.Extra = append(m.Extra, leaseRr) + + c := new(Client) + m.SetTsig("polvi.", HmacMD5, 300, time.Now().Unix()) + c.TsigSecret = map[string]string{"polvi.": "pRZgBrBvI4NAHZYhxmhs/Q=="} + + _, _, err := c.Exchange(m, "127.0.0.1:53") + if err != nil { + panic(err) + } +} + +func TestClientConn(t *testing.T) { + HandleFunc("miek.nl.", HelloServer) + defer HandleRemove("miek.nl.") + + // This uses TCP just to make it slightly different than TestClientSync + s, addrstr, err := RunLocalTCPServer("127.0.0.1:0") + if err != nil { + t.Fatalf("unable to run test server: %v", err) + } + defer s.Shutdown() + + m := new(Msg) + m.SetQuestion("miek.nl.", TypeSOA) + + cn, err := Dial("tcp", addrstr) + if err != nil { + t.Errorf("failed to dial %s: %v", addrstr, err) + } + + err = cn.WriteMsg(m) + if err != nil { + t.Errorf("failed to exchange: %v", err) + } + r, err := cn.ReadMsg() + if r == nil || r.Rcode != RcodeSuccess { + t.Errorf("failed to get an valid answer\n%v", r) + } + + err = cn.WriteMsg(m) + if err != nil { + t.Errorf("failed to exchange: %v", err) + } + h := new(Header) + buf, err := cn.ReadMsgHeader(h) + if buf == nil { + t.Errorf("failed to get an valid answer\n%v", r) + } + if int(h.Bits&0xF) != RcodeSuccess { + t.Errorf("failed to get an valid answer in ReadMsgHeader\n%v", r) + } + if h.Ancount != 0 || h.Qdcount != 1 || h.Nscount != 0 || h.Arcount != 1 { + t.Errorf("expected to have question and additional in response; got something else: %+v", h) + } + if err = r.Unpack(buf); err != nil { + t.Errorf("unable to unpack message fully: %v", err) + } +} + +func TestTruncatedMsg(t *testing.T) { + m := new(Msg) + m.SetQuestion("miek.nl.", TypeSRV) + cnt := 10 + for i := 0; i < cnt; i++ { + r := &SRV{ + Hdr: RR_Header{Name: m.Question[0].Name, Rrtype: TypeSRV, Class: ClassINET, Ttl: 0}, + Port: uint16(i + 8000), + Target: "target.miek.nl.", + } + m.Answer = append(m.Answer, r) + + re := &A{ + Hdr: RR_Header{Name: m.Question[0].Name, Rrtype: TypeA, Class: ClassINET, Ttl: 0}, + A: net.ParseIP(fmt.Sprintf("127.0.0.%d", i)).To4(), + } + m.Extra = append(m.Extra, re) + } + buf, err := m.Pack() + if err != nil { + t.Errorf("failed to pack: %v", err) + } + + r := new(Msg) + if err = r.Unpack(buf); err != nil { + t.Errorf("unable to unpack message: %v", err) + } + if len(r.Answer) != cnt { + t.Errorf("answer count after regular unpack doesn't match: %d", len(r.Answer)) + } + if len(r.Extra) != cnt { + t.Errorf("extra count after regular unpack doesn't match: %d", len(r.Extra)) + } + + m.Truncated = true + buf, err = m.Pack() + if err != nil { + t.Errorf("failed to pack truncated: %v", err) + } + + r = new(Msg) + if err = r.Unpack(buf); err != nil && err != ErrTruncated { + t.Errorf("unable to unpack truncated message: %v", err) + } + if !r.Truncated { + t.Errorf("truncated message wasn't unpacked as truncated") + } + if len(r.Answer) != cnt { + t.Errorf("answer count after truncated unpack doesn't match: %d", len(r.Answer)) + } + if len(r.Extra) != cnt { + t.Errorf("extra count after truncated unpack doesn't match: %d", len(r.Extra)) + } + + // Now we want to remove almost all of the extra records + // We're going to loop over the extra to get the count of the size of all + // of them + off := 0 + buf1 := make([]byte, m.Len()) + for i := 0; i < len(m.Extra); i++ { + off, err = PackRR(m.Extra[i], buf1, off, nil, m.Compress) + if err != nil { + t.Errorf("failed to pack extra: %v", err) + } + } + + // Remove all of the extra bytes but 10 bytes from the end of buf + off -= 10 + buf1 = buf[:len(buf)-off] + + r = new(Msg) + if err = r.Unpack(buf1); err != nil && err != ErrTruncated { + t.Errorf("unable to unpack cutoff message: %v", err) + } + if !r.Truncated { + t.Error("truncated cutoff message wasn't unpacked as truncated") + } + if len(r.Answer) != cnt { + t.Errorf("answer count after cutoff unpack doesn't match: %d", len(r.Answer)) + } + if len(r.Extra) != 0 { + t.Errorf("extra count after cutoff unpack is not zero: %d", len(r.Extra)) + } + + // Now we want to remove almost all of the answer records too + buf1 = make([]byte, m.Len()) + as := 0 + for i := 0; i < len(m.Extra); i++ { + off1 := off + off, err = PackRR(m.Extra[i], buf1, off, nil, m.Compress) + as = off - off1 + if err != nil { + t.Errorf("failed to pack extra: %v", err) + } + } + + // Keep exactly one answer left + // This should still cause Answer to be nil + off -= as + buf1 = buf[:len(buf)-off] + + r = new(Msg) + if err = r.Unpack(buf1); err != nil && err != ErrTruncated { + t.Errorf("unable to unpack cutoff message: %v", err) + } + if !r.Truncated { + t.Error("truncated cutoff message wasn't unpacked as truncated") + } + if len(r.Answer) != 0 { + t.Errorf("answer count after second cutoff unpack is not zero: %d", len(r.Answer)) + } + + // Now leave only 1 byte of the question + // Since the header is always 12 bytes, we just need to keep 13 + buf1 = buf[:13] + + r = new(Msg) + err = r.Unpack(buf1) + if err == nil || err == ErrTruncated { + t.Errorf("error should not be ErrTruncated from question cutoff unpack: %v", err) + } + + // Finally, if we only have the header, we should still return an error + buf1 = buf[:12] + + r = new(Msg) + if err = r.Unpack(buf1); err == nil || err != ErrTruncated { + t.Errorf("error not ErrTruncated from header-only unpack: %v", err) + } +} + +func TestTimeout(t *testing.T) { + // Set up a dummy UDP server that won't respond + addr, err := net.ResolveUDPAddr("udp", "127.0.0.1:0") + if err != nil { + t.Fatalf("unable to resolve local udp address: %v", err) + } + conn, err := net.ListenUDP("udp", addr) + if err != nil { + t.Fatalf("unable to run test server: %v", err) + } + defer conn.Close() + addrstr := conn.LocalAddr().String() + + // Message to send + m := new(Msg) + m.SetQuestion("miek.nl.", TypeTXT) + + // Use a channel + timeout to ensure we don't get stuck if the + // Client Timeout is not working properly + done := make(chan struct{}, 2) + + timeout := time.Millisecond + allowable := timeout + (10 * time.Millisecond) + abortAfter := timeout + (100 * time.Millisecond) + + start := time.Now() + + go func() { + c := &Client{Timeout: timeout} + _, _, err := c.Exchange(m, addrstr) + if err == nil { + t.Error("no timeout using Client.Exchange") + } + done <- struct{}{} + }() + + go func() { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + c := &Client{} + _, _, err := c.ExchangeContext(ctx, m, addrstr) + if err == nil { + t.Error("no timeout using Client.ExchangeContext") + } + done <- struct{}{} + }() + + // Wait for both the Exchange and ExchangeContext tests to be done. + for i := 0; i < 2; i++ { + select { + case <-done: + case <-time.After(abortAfter): + } + } + + length := time.Since(start) + + if length > allowable { + t.Errorf("exchange took longer (%v) than specified Timeout (%v)", length, timeout) + } +} + +// Check that responses from deduplicated requests aren't shared between callers +func TestConcurrentExchanges(t *testing.T) { + cases := make([]*Msg, 2) + cases[0] = new(Msg) + cases[1] = new(Msg) + cases[1].Truncated = true + for _, m := range cases { + block := make(chan struct{}) + waiting := make(chan struct{}) + + handler := func(w ResponseWriter, req *Msg) { + r := m.Copy() + r.SetReply(req) + + waiting <- struct{}{} + <-block + w.WriteMsg(r) + } + + HandleFunc("miek.nl.", handler) + defer HandleRemove("miek.nl.") + + s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") + if err != nil { + t.Fatalf("unable to run test server: %s", err) + } + defer s.Shutdown() + + m := new(Msg) + m.SetQuestion("miek.nl.", TypeSRV) + c := &Client{ + SingleInflight: true, + } + r := make([]*Msg, 2) + + var wg sync.WaitGroup + wg.Add(len(r)) + for i := 0; i < len(r); i++ { + go func(i int) { + r[i], _, _ = c.Exchange(m.Copy(), addrstr) + wg.Done() + }(i) + } + select { + case <-waiting: + case <-time.After(time.Second): + t.FailNow() + } + close(block) + wg.Wait() + + if r[0] == r[1] { + t.Log("Got same response object, expected non-shared responses") + t.Fail() + } + } +} diff --git a/vendor/github.com/miekg/dns/clientconfig.go b/vendor/github.com/miekg/dns/clientconfig.go new file mode 100644 index 0000000..0a1f5a9 --- /dev/null +++ b/vendor/github.com/miekg/dns/clientconfig.go @@ -0,0 +1,131 @@ +package dns + +import ( + "bufio" + "os" + "strconv" + "strings" +) + +// ClientConfig wraps the contents of the /etc/resolv.conf file. +type ClientConfig struct { + Servers []string // servers to use + Search []string // suffixes to append to local name + Port string // what port to use + Ndots int // number of dots in name to trigger absolute lookup + Timeout int // seconds before giving up on packet + Attempts int // lost packets before giving up on server, not used in the package dns +} + +// ClientConfigFromFile parses a resolv.conf(5) like file and returns +// a *ClientConfig. +func ClientConfigFromFile(resolvconf string) (*ClientConfig, error) { + file, err := os.Open(resolvconf) + if err != nil { + return nil, err + } + defer file.Close() + c := new(ClientConfig) + scanner := bufio.NewScanner(file) + c.Servers = make([]string, 0) + c.Search = make([]string, 0) + c.Port = "53" + c.Ndots = 1 + c.Timeout = 5 + c.Attempts = 2 + + for scanner.Scan() { + if err := scanner.Err(); err != nil { + return nil, err + } + line := scanner.Text() + f := strings.Fields(line) + if len(f) < 1 { + continue + } + switch f[0] { + case "nameserver": // add one name server + if len(f) > 1 { + // One more check: make sure server name is + // just an IP address. Otherwise we need DNS + // to look it up. + name := f[1] + c.Servers = append(c.Servers, name) + } + + case "domain": // set search path to just this domain + if len(f) > 1 { + c.Search = make([]string, 1) + c.Search[0] = f[1] + } else { + c.Search = make([]string, 0) + } + + case "search": // set search path to given servers + c.Search = make([]string, len(f)-1) + for i := 0; i < len(c.Search); i++ { + c.Search[i] = f[i+1] + } + + case "options": // magic options + for i := 1; i < len(f); i++ { + s := f[i] + switch { + case len(s) >= 6 && s[:6] == "ndots:": + n, _ := strconv.Atoi(s[6:]) + if n < 1 { + n = 1 + } + c.Ndots = n + case len(s) >= 8 && s[:8] == "timeout:": + n, _ := strconv.Atoi(s[8:]) + if n < 1 { + n = 1 + } + c.Timeout = n + case len(s) >= 8 && s[:9] == "attempts:": + n, _ := strconv.Atoi(s[9:]) + if n < 1 { + n = 1 + } + c.Attempts = n + case s == "rotate": + /* not imp */ + } + } + } + } + return c, nil +} + +// NameList returns all of the names that should be queried based on the +// config. It is based off of go's net/dns name building, but it does not +// check the length of the resulting names. +func (c *ClientConfig) NameList(name string) []string { + // if this domain is already fully qualified, no append needed. + if IsFqdn(name) { + return []string{name} + } + + // Check to see if the name has more labels than Ndots. Do this before making + // the domain fully qualified. + hasNdots := CountLabel(name) > c.Ndots + // Make the domain fully qualified. + name = Fqdn(name) + + // Make a list of names based off search. + names := []string{} + + // If name has enough dots, try that first. + if hasNdots { + names = append(names, name) + } + for _, s := range c.Search { + names = append(names, Fqdn(name+s)) + } + // If we didn't have enough dots, try after suffixes. + if !hasNdots { + names = append(names, name) + } + return names +} diff --git a/vendor/github.com/miekg/dns/clientconfig_test.go b/vendor/github.com/miekg/dns/clientconfig_test.go new file mode 100644 index 0000000..7755a8a --- /dev/null +++ b/vendor/github.com/miekg/dns/clientconfig_test.go @@ -0,0 +1,87 @@ +package dns + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +const normal string = ` +# Comment +domain somedomain.com +nameserver 10.28.10.2 +nameserver 11.28.10.1 +` + +const missingNewline string = ` +domain somedomain.com +nameserver 10.28.10.2 +nameserver 11.28.10.1` // <- NOTE: NO newline. + +func testConfig(t *testing.T, data string) { + tempDir, err := ioutil.TempDir("", "") + if err != nil { + t.Fatalf("tempDir: %v", err) + } + defer os.RemoveAll(tempDir) + + path := filepath.Join(tempDir, "resolv.conf") + if err := ioutil.WriteFile(path, []byte(data), 0644); err != nil { + t.Fatalf("writeFile: %v", err) + } + cc, err := ClientConfigFromFile(path) + if err != nil { + t.Errorf("error parsing resolv.conf: %v", err) + } + if l := len(cc.Servers); l != 2 { + t.Errorf("incorrect number of nameservers detected: %d", l) + } + if l := len(cc.Search); l != 1 { + t.Errorf("domain directive not parsed correctly: %v", cc.Search) + } else { + if cc.Search[0] != "somedomain.com" { + t.Errorf("domain is unexpected: %v", cc.Search[0]) + } + } +} + +func TestNameserver(t *testing.T) { testConfig(t, normal) } +func TestMissingFinalNewLine(t *testing.T) { testConfig(t, missingNewline) } + +func TestNameList(t *testing.T) { + cfg := ClientConfig{ + Ndots: 1, + } + // fqdn should be only result returned + names := cfg.NameList("miek.nl.") + if len(names) != 1 { + t.Errorf("NameList returned != 1 names: %v", names) + } else if names[0] != "miek.nl." { + t.Errorf("NameList didn't return sent fqdn domain: %v", names[0]) + } + + cfg.Search = []string{ + "test", + } + // Sent domain has NDots and search + names = cfg.NameList("miek.nl") + if len(names) != 2 { + t.Errorf("NameList returned != 2 names: %v", names) + } else if names[0] != "miek.nl." { + t.Errorf("NameList didn't return sent domain first: %v", names[0]) + } else if names[1] != "miek.nl.test." { + t.Errorf("NameList didn't return search last: %v", names[1]) + } + + cfg.Ndots = 2 + // Sent domain has less than NDots and search + names = cfg.NameList("miek.nl") + if len(names) != 2 { + t.Errorf("NameList returned != 2 names: %v", names) + } else if names[0] != "miek.nl.test." { + t.Errorf("NameList didn't return search first: %v", names[0]) + } else if names[1] != "miek.nl." { + t.Errorf("NameList didn't return sent domain last: %v", names[1]) + } +} diff --git a/vendor/github.com/miekg/dns/compress_generate.go b/vendor/github.com/miekg/dns/compress_generate.go new file mode 100644 index 0000000..1a301e9 --- /dev/null +++ b/vendor/github.com/miekg/dns/compress_generate.go @@ -0,0 +1,184 @@ +//+build ignore + +// compression_generate.go is meant to run with go generate. It will use +// go/{importer,types} to track down all the RR struct types. Then for each type +// it will look to see if there are (compressible) names, if so it will add that +// type to compressionLenHelperType and comressionLenSearchType which "fake" the +// compression so that Len() is fast. +package main + +import ( + "bytes" + "fmt" + "go/format" + "go/importer" + "go/types" + "log" + "os" +) + +var packageHdr = ` +// *** DO NOT MODIFY *** +// AUTOGENERATED BY go generate from compress_generate.go + +package dns + +` + +// getTypeStruct will take a type and the package scope, and return the +// (innermost) struct if the type is considered a RR type (currently defined as +// those structs beginning with a RR_Header, could be redefined as implementing +// the RR interface). The bool return value indicates if embedded structs were +// resolved. +func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) { + st, ok := t.Underlying().(*types.Struct) + if !ok { + return nil, false + } + if st.Field(0).Type() == scope.Lookup("RR_Header").Type() { + return st, false + } + if st.Field(0).Anonymous() { + st, _ := getTypeStruct(st.Field(0).Type(), scope) + return st, true + } + return nil, false +} + +func main() { + // Import and type-check the package + pkg, err := importer.Default().Import("github.com/miekg/dns") + fatalIfErr(err) + scope := pkg.Scope() + + domainTypes := map[string]bool{} // Types that have a domain name in them (either comressible or not). + cdomainTypes := map[string]bool{} // Types that have a compressible domain name in them (subset of domainType) + for _, name := range scope.Names() { + o := scope.Lookup(name) + if o == nil || !o.Exported() { + continue + } + st, _ := getTypeStruct(o.Type(), scope) + if st == nil { + continue + } + if name == "PrivateRR" { + continue + } + + if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" { + log.Fatalf("Constant Type%s does not exist.", o.Name()) + } + + for i := 1; i < st.NumFields(); i++ { + if _, ok := st.Field(i).Type().(*types.Slice); ok { + if st.Tag(i) == `dns:"domain-name"` { + domainTypes[o.Name()] = true + } + if st.Tag(i) == `dns:"cdomain-name"` { + cdomainTypes[o.Name()] = true + domainTypes[o.Name()] = true + } + continue + } + + switch { + case st.Tag(i) == `dns:"domain-name"`: + domainTypes[o.Name()] = true + case st.Tag(i) == `dns:"cdomain-name"`: + cdomainTypes[o.Name()] = true + domainTypes[o.Name()] = true + } + } + } + + b := &bytes.Buffer{} + b.WriteString(packageHdr) + + // compressionLenHelperType - all types that have domain-name/cdomain-name can be used for compressing names + + fmt.Fprint(b, "func compressionLenHelperType(c map[string]int, r RR) {\n") + fmt.Fprint(b, "switch x := r.(type) {\n") + for name, _ := range domainTypes { + o := scope.Lookup(name) + st, _ := getTypeStruct(o.Type(), scope) + + fmt.Fprintf(b, "case *%s:\n", name) + for i := 1; i < st.NumFields(); i++ { + out := func(s string) { fmt.Fprintf(b, "compressionLenHelper(c, x.%s)\n", st.Field(i).Name()) } + + if _, ok := st.Field(i).Type().(*types.Slice); ok { + switch st.Tag(i) { + case `dns:"domain-name"`: + fallthrough + case `dns:"cdomain-name"`: + // For HIP we need to slice over the elements in this slice. + fmt.Fprintf(b, `for i := range x.%s { + compressionLenHelper(c, x.%s[i]) + } +`, st.Field(i).Name(), st.Field(i).Name()) + } + continue + } + + switch { + case st.Tag(i) == `dns:"cdomain-name"`: + fallthrough + case st.Tag(i) == `dns:"domain-name"`: + out(st.Field(i).Name()) + } + } + } + fmt.Fprintln(b, "}\n}\n\n") + + // compressionLenSearchType - search cdomain-tags types for compressible names. + + fmt.Fprint(b, "func compressionLenSearchType(c map[string]int, r RR) (int, bool) {\n") + fmt.Fprint(b, "switch x := r.(type) {\n") + for name, _ := range cdomainTypes { + o := scope.Lookup(name) + st, _ := getTypeStruct(o.Type(), scope) + + fmt.Fprintf(b, "case *%s:\n", name) + j := 1 + for i := 1; i < st.NumFields(); i++ { + out := func(s string, j int) { + fmt.Fprintf(b, "k%d, ok%d := compressionLenSearch(c, x.%s)\n", j, j, st.Field(i).Name()) + } + + // There are no slice types with names that can be compressed. + + switch { + case st.Tag(i) == `dns:"cdomain-name"`: + out(st.Field(i).Name(), j) + j++ + } + } + k := "k1" + ok := "ok1" + for i := 2; i < j; i++ { + k += fmt.Sprintf(" + k%d", i) + ok += fmt.Sprintf(" && ok%d", i) + } + fmt.Fprintf(b, "return %s, %s\n", k, ok) + } + fmt.Fprintln(b, "}\nreturn 0, false\n}\n\n") + + // gofmt + res, err := format.Source(b.Bytes()) + if err != nil { + b.WriteTo(os.Stderr) + log.Fatal(err) + } + + f, err := os.Create("zcompress.go") + fatalIfErr(err) + defer f.Close() + f.Write(res) +} + +func fatalIfErr(err error) { + if err != nil { + log.Fatal(err) + } +} diff --git a/vendor/github.com/miekg/dns/dane.go b/vendor/github.com/miekg/dns/dane.go new file mode 100644 index 0000000..8c4a14e --- /dev/null +++ b/vendor/github.com/miekg/dns/dane.go @@ -0,0 +1,43 @@ +package dns + +import ( + "crypto/sha256" + "crypto/sha512" + "crypto/x509" + "encoding/hex" + "errors" +) + +// CertificateToDANE converts a certificate to a hex string as used in the TLSA or SMIMEA records. +func CertificateToDANE(selector, matchingType uint8, cert *x509.Certificate) (string, error) { + switch matchingType { + case 0: + switch selector { + case 0: + return hex.EncodeToString(cert.Raw), nil + case 1: + return hex.EncodeToString(cert.RawSubjectPublicKeyInfo), nil + } + case 1: + h := sha256.New() + switch selector { + case 0: + h.Write(cert.Raw) + return hex.EncodeToString(h.Sum(nil)), nil + case 1: + h.Write(cert.RawSubjectPublicKeyInfo) + return hex.EncodeToString(h.Sum(nil)), nil + } + case 2: + h := sha512.New() + switch selector { + case 0: + h.Write(cert.Raw) + return hex.EncodeToString(h.Sum(nil)), nil + case 1: + h.Write(cert.RawSubjectPublicKeyInfo) + return hex.EncodeToString(h.Sum(nil)), nil + } + } + return "", errors.New("dns: bad MatchingType or Selector") +} diff --git a/vendor/github.com/miekg/dns/defaults.go b/vendor/github.com/miekg/dns/defaults.go new file mode 100644 index 0000000..c34890e --- /dev/null +++ b/vendor/github.com/miekg/dns/defaults.go @@ -0,0 +1,285 @@ +package dns + +import ( + "errors" + "net" + "strconv" +) + +const hexDigit = "0123456789abcdef" + +// Everything is assumed in ClassINET. + +// SetReply creates a reply message from a request message. +func (dns *Msg) SetReply(request *Msg) *Msg { + dns.Id = request.Id + dns.Response = true + dns.Opcode = request.Opcode + if dns.Opcode == OpcodeQuery { + dns.RecursionDesired = request.RecursionDesired // Copy rd bit + dns.CheckingDisabled = request.CheckingDisabled // Copy cd bit + } + dns.Rcode = RcodeSuccess + if len(request.Question) > 0 { + dns.Question = make([]Question, 1) + dns.Question[0] = request.Question[0] + } + return dns +} + +// SetQuestion creates a question message, it sets the Question +// section, generates an Id and sets the RecursionDesired (RD) +// bit to true. +func (dns *Msg) SetQuestion(z string, t uint16) *Msg { + dns.Id = Id() + dns.RecursionDesired = true + dns.Question = make([]Question, 1) + dns.Question[0] = Question{z, t, ClassINET} + return dns +} + +// SetNotify creates a notify message, it sets the Question +// section, generates an Id and sets the Authoritative (AA) +// bit to true. +func (dns *Msg) SetNotify(z string) *Msg { + dns.Opcode = OpcodeNotify + dns.Authoritative = true + dns.Id = Id() + dns.Question = make([]Question, 1) + dns.Question[0] = Question{z, TypeSOA, ClassINET} + return dns +} + +// SetRcode creates an error message suitable for the request. +func (dns *Msg) SetRcode(request *Msg, rcode int) *Msg { + dns.SetReply(request) + dns.Rcode = rcode + return dns +} + +// SetRcodeFormatError creates a message with FormError set. +func (dns *Msg) SetRcodeFormatError(request *Msg) *Msg { + dns.Rcode = RcodeFormatError + dns.Opcode = OpcodeQuery + dns.Response = true + dns.Authoritative = false + dns.Id = request.Id + return dns +} + +// SetUpdate makes the message a dynamic update message. It +// sets the ZONE section to: z, TypeSOA, ClassINET. +func (dns *Msg) SetUpdate(z string) *Msg { + dns.Id = Id() + dns.Response = false + dns.Opcode = OpcodeUpdate + dns.Compress = false // BIND9 cannot handle compression + dns.Question = make([]Question, 1) + dns.Question[0] = Question{z, TypeSOA, ClassINET} + return dns +} + +// SetIxfr creates message for requesting an IXFR. +func (dns *Msg) SetIxfr(z string, serial uint32, ns, mbox string) *Msg { + dns.Id = Id() + dns.Question = make([]Question, 1) + dns.Ns = make([]RR, 1) + s := new(SOA) + s.Hdr = RR_Header{z, TypeSOA, ClassINET, defaultTtl, 0} + s.Serial = serial + s.Ns = ns + s.Mbox = mbox + dns.Question[0] = Question{z, TypeIXFR, ClassINET} + dns.Ns[0] = s + return dns +} + +// SetAxfr creates message for requesting an AXFR. +func (dns *Msg) SetAxfr(z string) *Msg { + dns.Id = Id() + dns.Question = make([]Question, 1) + dns.Question[0] = Question{z, TypeAXFR, ClassINET} + return dns +} + +// SetTsig appends a TSIG RR to the message. +// This is only a skeleton TSIG RR that is added as the last RR in the +// additional section. The Tsig is calculated when the message is being send. +func (dns *Msg) SetTsig(z, algo string, fudge uint16, timesigned int64) *Msg { + t := new(TSIG) + t.Hdr = RR_Header{z, TypeTSIG, ClassANY, 0, 0} + t.Algorithm = algo + t.Fudge = fudge + t.TimeSigned = uint64(timesigned) + t.OrigId = dns.Id + dns.Extra = append(dns.Extra, t) + return dns +} + +// SetEdns0 appends a EDNS0 OPT RR to the message. +// TSIG should always the last RR in a message. +func (dns *Msg) SetEdns0(udpsize uint16, do bool) *Msg { + e := new(OPT) + e.Hdr.Name = "." + e.Hdr.Rrtype = TypeOPT + e.SetUDPSize(udpsize) + if do { + e.SetDo() + } + dns.Extra = append(dns.Extra, e) + return dns +} + +// IsTsig checks if the message has a TSIG record as the last record +// in the additional section. It returns the TSIG record found or nil. +func (dns *Msg) IsTsig() *TSIG { + if len(dns.Extra) > 0 { + if dns.Extra[len(dns.Extra)-1].Header().Rrtype == TypeTSIG { + return dns.Extra[len(dns.Extra)-1].(*TSIG) + } + } + return nil +} + +// IsEdns0 checks if the message has a EDNS0 (OPT) record, any EDNS0 +// record in the additional section will do. It returns the OPT record +// found or nil. +func (dns *Msg) IsEdns0() *OPT { + // EDNS0 is at the end of the additional section, start there. + // We might want to change this to *only* look at the last two + // records. So we see TSIG and/or OPT - this a slightly bigger + // change though. + for i := len(dns.Extra) - 1; i >= 0; i-- { + if dns.Extra[i].Header().Rrtype == TypeOPT { + return dns.Extra[i].(*OPT) + } + } + return nil +} + +// IsDomainName checks if s is a valid domain name, it returns the number of +// labels and true, when a domain name is valid. Note that non fully qualified +// domain name is considered valid, in this case the last label is counted in +// the number of labels. When false is returned the number of labels is not +// defined. Also note that this function is extremely liberal; almost any +// string is a valid domain name as the DNS is 8 bit protocol. It checks if each +// label fits in 63 characters, but there is no length check for the entire +// string s. I.e. a domain name longer than 255 characters is considered valid. +func IsDomainName(s string) (labels int, ok bool) { + _, labels, err := packDomainName(s, nil, 0, nil, false) + return labels, err == nil +} + +// IsSubDomain checks if child is indeed a child of the parent. If child and parent +// are the same domain true is returned as well. +func IsSubDomain(parent, child string) bool { + // Entire child is contained in parent + return CompareDomainName(parent, child) == CountLabel(parent) +} + +// IsMsg sanity checks buf and returns an error if it isn't a valid DNS packet. +// The checking is performed on the binary payload. +func IsMsg(buf []byte) error { + // Header + if len(buf) < 12 { + return errors.New("dns: bad message header") + } + // Header: Opcode + // TODO(miek): more checks here, e.g. check all header bits. + return nil +} + +// IsFqdn checks if a domain name is fully qualified. +func IsFqdn(s string) bool { + l := len(s) + if l == 0 { + return false + } + return s[l-1] == '.' +} + +// IsRRset checks if a set of RRs is a valid RRset as defined by RFC 2181. +// This means the RRs need to have the same type, name, and class. Returns true +// if the RR set is valid, otherwise false. +func IsRRset(rrset []RR) bool { + if len(rrset) == 0 { + return false + } + if len(rrset) == 1 { + return true + } + rrHeader := rrset[0].Header() + rrType := rrHeader.Rrtype + rrClass := rrHeader.Class + rrName := rrHeader.Name + + for _, rr := range rrset[1:] { + curRRHeader := rr.Header() + if curRRHeader.Rrtype != rrType || curRRHeader.Class != rrClass || curRRHeader.Name != rrName { + // Mismatch between the records, so this is not a valid rrset for + //signing/verifying + return false + } + } + + return true +} + +// Fqdn return the fully qualified domain name from s. +// If s is already fully qualified, it behaves as the identity function. +func Fqdn(s string) string { + if IsFqdn(s) { + return s + } + return s + "." +} + +// Copied from the official Go code. + +// ReverseAddr returns the in-addr.arpa. or ip6.arpa. hostname of the IP +// address suitable for reverse DNS (PTR) record lookups or an error if it fails +// to parse the IP address. +func ReverseAddr(addr string) (arpa string, err error) { + ip := net.ParseIP(addr) + if ip == nil { + return "", &Error{err: "unrecognized address: " + addr} + } + if ip.To4() != nil { + return strconv.Itoa(int(ip[15])) + "." + strconv.Itoa(int(ip[14])) + "." + strconv.Itoa(int(ip[13])) + "." + + strconv.Itoa(int(ip[12])) + ".in-addr.arpa.", nil + } + // Must be IPv6 + buf := make([]byte, 0, len(ip)*4+len("ip6.arpa.")) + // Add it, in reverse, to the buffer + for i := len(ip) - 1; i >= 0; i-- { + v := ip[i] + buf = append(buf, hexDigit[v&0xF]) + buf = append(buf, '.') + buf = append(buf, hexDigit[v>>4]) + buf = append(buf, '.') + } + // Append "ip6.arpa." and return (buf already has the final .) + buf = append(buf, "ip6.arpa."...) + return string(buf), nil +} + +// String returns the string representation for the type t. +func (t Type) String() string { + if t1, ok := TypeToString[uint16(t)]; ok { + return t1 + } + return "TYPE" + strconv.Itoa(int(t)) +} + +// String returns the string representation for the class c. +func (c Class) String() string { + if c1, ok := ClassToString[uint16(c)]; ok { + return c1 + } + return "CLASS" + strconv.Itoa(int(c)) +} + +// String returns the string representation for the name n. +func (n Name) String() string { + return sprintName(string(n)) +} diff --git a/vendor/github.com/miekg/dns/dns.go b/vendor/github.com/miekg/dns/dns.go new file mode 100644 index 0000000..b329228 --- /dev/null +++ b/vendor/github.com/miekg/dns/dns.go @@ -0,0 +1,104 @@ +package dns + +import "strconv" + +const ( + year68 = 1 << 31 // For RFC1982 (Serial Arithmetic) calculations in 32 bits. + defaultTtl = 3600 // Default internal TTL. + + DefaultMsgSize = 4096 // DefaultMsgSize is the standard default for messages larger than 512 bytes. + MinMsgSize = 512 // MinMsgSize is the minimal size of a DNS packet. + MaxMsgSize = 65535 // MaxMsgSize is the largest possible DNS packet. +) + +// Error represents a DNS error. +type Error struct{ err string } + +func (e *Error) Error() string { + if e == nil { + return "dns: <nil>" + } + return "dns: " + e.err +} + +// An RR represents a resource record. +type RR interface { + // Header returns the header of an resource record. The header contains + // everything up to the rdata. + Header() *RR_Header + // String returns the text representation of the resource record. + String() string + + // copy returns a copy of the RR + copy() RR + // len returns the length (in octets) of the uncompressed RR in wire format. + len() int + // pack packs an RR into wire format. + pack([]byte, int, map[string]int, bool) (int, error) +} + +// RR_Header is the header all DNS resource records share. +type RR_Header struct { + Name string `dns:"cdomain-name"` + Rrtype uint16 + Class uint16 + Ttl uint32 + Rdlength uint16 // Length of data after header. +} + +// Header returns itself. This is here to make RR_Header implements the RR interface. +func (h *RR_Header) Header() *RR_Header { return h } + +// Just to implement the RR interface. +func (h *RR_Header) copy() RR { return nil } + +func (h *RR_Header) copyHeader() *RR_Header { + r := new(RR_Header) + r.Name = h.Name + r.Rrtype = h.Rrtype + r.Class = h.Class + r.Ttl = h.Ttl + r.Rdlength = h.Rdlength + return r +} + +func (h *RR_Header) String() string { + var s string + + if h.Rrtype == TypeOPT { + s = ";" + // and maybe other things + } + + s += sprintName(h.Name) + "\t" + s += strconv.FormatInt(int64(h.Ttl), 10) + "\t" + s += Class(h.Class).String() + "\t" + s += Type(h.Rrtype).String() + "\t" + return s +} + +func (h *RR_Header) len() int { + l := len(h.Name) + 1 + l += 10 // rrtype(2) + class(2) + ttl(4) + rdlength(2) + return l +} + +// ToRFC3597 converts a known RR to the unknown RR representation from RFC 3597. +func (rr *RFC3597) ToRFC3597(r RR) error { + buf := make([]byte, r.len()*2) + off, err := PackRR(r, buf, 0, nil, false) + if err != nil { + return err + } + buf = buf[:off] + if int(r.Header().Rdlength) > off { + return ErrBuf + } + + rfc3597, _, err := unpackRFC3597(*r.Header(), buf, off-int(r.Header().Rdlength)) + if err != nil { + return err + } + *rr = *rfc3597.(*RFC3597) + return nil +} diff --git a/vendor/github.com/miekg/dns/dns_bench_test.go b/vendor/github.com/miekg/dns/dns_bench_test.go new file mode 100644 index 0000000..bccc3d5 --- /dev/null +++ b/vendor/github.com/miekg/dns/dns_bench_test.go @@ -0,0 +1,211 @@ +package dns + +import ( + "net" + "testing" +) + +func BenchmarkMsgLength(b *testing.B) { + b.StopTimer() + makeMsg := func(question string, ans, ns, e []RR) *Msg { + msg := new(Msg) + msg.SetQuestion(Fqdn(question), TypeANY) + msg.Answer = append(msg.Answer, ans...) + msg.Ns = append(msg.Ns, ns...) + msg.Extra = append(msg.Extra, e...) + msg.Compress = true + return msg + } + name1 := "12345678901234567890123456789012345.12345678.123." + rrMx, _ := NewRR(name1 + " 3600 IN MX 10 " + name1) + msg := makeMsg(name1, []RR{rrMx, rrMx}, nil, nil) + b.StartTimer() + for i := 0; i < b.N; i++ { + msg.Len() + } +} + +func BenchmarkMsgLengthPack(b *testing.B) { + makeMsg := func(question string, ans, ns, e []RR) *Msg { + msg := new(Msg) + msg.SetQuestion(Fqdn(question), TypeANY) + msg.Answer = append(msg.Answer, ans...) + msg.Ns = append(msg.Ns, ns...) + msg.Extra = append(msg.Extra, e...) + msg.Compress = true + return msg + } + name1 := "12345678901234567890123456789012345.12345678.123." + rrMx, _ := NewRR(name1 + " 3600 IN MX 10 " + name1) + msg := makeMsg(name1, []RR{rrMx, rrMx}, nil, nil) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = msg.Pack() + } +} + +func BenchmarkPackDomainName(b *testing.B) { + name1 := "12345678901234567890123456789012345.12345678.123." + buf := make([]byte, len(name1)+1) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = PackDomainName(name1, buf, 0, nil, false) + } +} + +func BenchmarkUnpackDomainName(b *testing.B) { + name1 := "12345678901234567890123456789012345.12345678.123." + buf := make([]byte, len(name1)+1) + _, _ = PackDomainName(name1, buf, 0, nil, false) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _, _ = UnpackDomainName(buf, 0) + } +} + +func BenchmarkUnpackDomainNameUnprintable(b *testing.B) { + name1 := "\x02\x02\x02\x025\x02\x02\x02\x02.12345678.123." + buf := make([]byte, len(name1)+1) + _, _ = PackDomainName(name1, buf, 0, nil, false) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _, _ = UnpackDomainName(buf, 0) + } +} + +func BenchmarkCopy(b *testing.B) { + b.ReportAllocs() + m := new(Msg) + m.SetQuestion("miek.nl.", TypeA) + rr, _ := NewRR("miek.nl. 2311 IN A 127.0.0.1") + m.Answer = []RR{rr} + rr, _ = NewRR("miek.nl. 2311 IN NS 127.0.0.1") + m.Ns = []RR{rr} + rr, _ = NewRR("miek.nl. 2311 IN A 127.0.0.1") + m.Extra = []RR{rr} + + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.Copy() + } +} + +func BenchmarkPackA(b *testing.B) { + a := &A{Hdr: RR_Header{Name: ".", Rrtype: TypeA, Class: ClassANY}, A: net.IPv4(127, 0, 0, 1)} + + buf := make([]byte, a.len()) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = PackRR(a, buf, 0, nil, false) + } +} + +func BenchmarkUnpackA(b *testing.B) { + a := &A{Hdr: RR_Header{Name: ".", Rrtype: TypeA, Class: ClassANY}, A: net.IPv4(127, 0, 0, 1)} + + buf := make([]byte, a.len()) + PackRR(a, buf, 0, nil, false) + a = nil + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _, _ = UnpackRR(buf, 0) + } +} + +func BenchmarkPackMX(b *testing.B) { + m := &MX{Hdr: RR_Header{Name: ".", Rrtype: TypeA, Class: ClassANY}, Mx: "mx.miek.nl."} + + buf := make([]byte, m.len()) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = PackRR(m, buf, 0, nil, false) + } +} + +func BenchmarkUnpackMX(b *testing.B) { + m := &MX{Hdr: RR_Header{Name: ".", Rrtype: TypeA, Class: ClassANY}, Mx: "mx.miek.nl."} + + buf := make([]byte, m.len()) + PackRR(m, buf, 0, nil, false) + m = nil + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _, _ = UnpackRR(buf, 0) + } +} + +func BenchmarkPackAAAAA(b *testing.B) { + aaaa, _ := NewRR(". IN A ::1") + + buf := make([]byte, aaaa.len()) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = PackRR(aaaa, buf, 0, nil, false) + } +} + +func BenchmarkUnpackAAAA(b *testing.B) { + aaaa, _ := NewRR(". IN A ::1") + + buf := make([]byte, aaaa.len()) + PackRR(aaaa, buf, 0, nil, false) + aaaa = nil + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _, _ = UnpackRR(buf, 0) + } +} + +func BenchmarkPackMsg(b *testing.B) { + makeMsg := func(question string, ans, ns, e []RR) *Msg { + msg := new(Msg) + msg.SetQuestion(Fqdn(question), TypeANY) + msg.Answer = append(msg.Answer, ans...) + msg.Ns = append(msg.Ns, ns...) + msg.Extra = append(msg.Extra, e...) + msg.Compress = true + return msg + } + name1 := "12345678901234567890123456789012345.12345678.123." + rrMx, _ := NewRR(name1 + " 3600 IN MX 10 " + name1) + msg := makeMsg(name1, []RR{rrMx, rrMx}, nil, nil) + buf := make([]byte, 512) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = msg.PackBuffer(buf) + } +} + +func BenchmarkUnpackMsg(b *testing.B) { + makeMsg := func(question string, ans, ns, e []RR) *Msg { + msg := new(Msg) + msg.SetQuestion(Fqdn(question), TypeANY) + msg.Answer = append(msg.Answer, ans...) + msg.Ns = append(msg.Ns, ns...) + msg.Extra = append(msg.Extra, e...) + msg.Compress = true + return msg + } + name1 := "12345678901234567890123456789012345.12345678.123." + rrMx, _ := NewRR(name1 + " 3600 IN MX 10 " + name1) + msg := makeMsg(name1, []RR{rrMx, rrMx}, nil, nil) + msgBuf, _ := msg.Pack() + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = msg.Unpack(msgBuf) + } +} + +func BenchmarkIdGeneration(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = id() + } +} diff --git a/vendor/github.com/miekg/dns/dns_test.go b/vendor/github.com/miekg/dns/dns_test.go new file mode 100644 index 0000000..dbfe253 --- /dev/null +++ b/vendor/github.com/miekg/dns/dns_test.go @@ -0,0 +1,450 @@ +package dns + +import ( + "encoding/hex" + "net" + "testing" +) + +func TestPackUnpack(t *testing.T) { + out := new(Msg) + out.Answer = make([]RR, 1) + key := new(DNSKEY) + key = &DNSKEY{Flags: 257, Protocol: 3, Algorithm: RSASHA1} + key.Hdr = RR_Header{Name: "miek.nl.", Rrtype: TypeDNSKEY, Class: ClassINET, Ttl: 3600} + key.PublicKey = "AwEAAaHIwpx3w4VHKi6i1LHnTaWeHCL154Jug0Rtc9ji5qwPXpBo6A5sRv7cSsPQKPIwxLpyCrbJ4mr2L0EPOdvP6z6YfljK2ZmTbogU9aSU2fiq/4wjxbdkLyoDVgtO+JsxNN4bjr4WcWhsmk1Hg93FV9ZpkWb0Tbad8DFqNDzr//kZ" + + out.Answer[0] = key + msg, err := out.Pack() + if err != nil { + t.Error("failed to pack msg with DNSKEY") + } + in := new(Msg) + if in.Unpack(msg) != nil { + t.Error("failed to unpack msg with DNSKEY") + } + + sig := new(RRSIG) + sig = &RRSIG{TypeCovered: TypeDNSKEY, Algorithm: RSASHA1, Labels: 2, + OrigTtl: 3600, Expiration: 4000, Inception: 4000, KeyTag: 34641, SignerName: "miek.nl.", + Signature: "AwEAAaHIwpx3w4VHKi6i1LHnTaWeHCL154Jug0Rtc9ji5qwPXpBo6A5sRv7cSsPQKPIwxLpyCrbJ4mr2L0EPOdvP6z6YfljK2ZmTbogU9aSU2fiq/4wjxbdkLyoDVgtO+JsxNN4bjr4WcWhsmk1Hg93FV9ZpkWb0Tbad8DFqNDzr//kZ"} + sig.Hdr = RR_Header{Name: "miek.nl.", Rrtype: TypeRRSIG, Class: ClassINET, Ttl: 3600} + + out.Answer[0] = sig + msg, err = out.Pack() + if err != nil { + t.Error("failed to pack msg with RRSIG") + } + + if in.Unpack(msg) != nil { + t.Error("failed to unpack msg with RRSIG") + } +} + +func TestPackUnpack2(t *testing.T) { + m := new(Msg) + m.Extra = make([]RR, 1) + m.Answer = make([]RR, 1) + dom := "miek.nl." + rr := new(A) + rr.Hdr = RR_Header{Name: dom, Rrtype: TypeA, Class: ClassINET, Ttl: 0} + rr.A = net.IPv4(127, 0, 0, 1) + + x := new(TXT) + x.Hdr = RR_Header{Name: dom, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0} + x.Txt = []string{"heelalaollo"} + + m.Extra[0] = x + m.Answer[0] = rr + _, err := m.Pack() + if err != nil { + t.Error("Packing failed: ", err) + return + } +} + +func TestPackUnpack3(t *testing.T) { + m := new(Msg) + m.Extra = make([]RR, 2) + m.Answer = make([]RR, 1) + dom := "miek.nl." + rr := new(A) + rr.Hdr = RR_Header{Name: dom, Rrtype: TypeA, Class: ClassINET, Ttl: 0} + rr.A = net.IPv4(127, 0, 0, 1) + + x1 := new(TXT) + x1.Hdr = RR_Header{Name: dom, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0} + x1.Txt = []string{} + + x2 := new(TXT) + x2.Hdr = RR_Header{Name: dom, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0} + x2.Txt = []string{"heelalaollo"} + + m.Extra[0] = x1 + m.Extra[1] = x2 + m.Answer[0] = rr + b, err := m.Pack() + if err != nil { + t.Error("packing failed: ", err) + return + } + + var unpackMsg Msg + err = unpackMsg.Unpack(b) + if err != nil { + t.Error("unpacking failed") + return + } +} + +func TestBailiwick(t *testing.T) { + yes := map[string]string{ + "miek1.nl": "miek1.nl", + "miek.nl": "ns.miek.nl", + ".": "miek.nl", + } + for parent, child := range yes { + if !IsSubDomain(parent, child) { + t.Errorf("%s should be child of %s", child, parent) + t.Errorf("comparelabels %d", CompareDomainName(parent, child)) + t.Errorf("lenlabels %d %d", CountLabel(parent), CountLabel(child)) + } + } + no := map[string]string{ + "www.miek.nl": "ns.miek.nl", + "m\\.iek.nl": "ns.miek.nl", + "w\\.iek.nl": "w.iek.nl", + "p\\\\.iek.nl": "ns.p.iek.nl", // p\\.iek.nl , literal \ in domain name + "miek.nl": ".", + } + for parent, child := range no { + if IsSubDomain(parent, child) { + t.Errorf("%s should not be child of %s", child, parent) + t.Errorf("comparelabels %d", CompareDomainName(parent, child)) + t.Errorf("lenlabels %d %d", CountLabel(parent), CountLabel(child)) + } + } +} + +func TestPack(t *testing.T) { + rr := []string{"US. 86400 IN NSEC 0-.us. NS SOA RRSIG NSEC DNSKEY TYPE65534"} + m := new(Msg) + var err error + m.Answer = make([]RR, 1) + for _, r := range rr { + m.Answer[0], err = NewRR(r) + if err != nil { + t.Errorf("failed to create RR: %v", err) + continue + } + if _, err := m.Pack(); err != nil { + t.Errorf("packing failed: %v", err) + } + } + x := new(Msg) + ns, _ := NewRR("pool.ntp.org. 390 IN NS a.ntpns.org") + ns.(*NS).Ns = "a.ntpns.org" + x.Ns = append(m.Ns, ns) + x.Ns = append(m.Ns, ns) + x.Ns = append(m.Ns, ns) + // This crashes due to the fact the a.ntpns.org isn't a FQDN + // How to recover() from a remove panic()? + if _, err := x.Pack(); err == nil { + t.Error("packing should fail") + } + x.Answer = make([]RR, 1) + x.Answer[0], err = NewRR(rr[0]) + if _, err := x.Pack(); err == nil { + t.Error("packing should fail") + } + x.Question = make([]Question, 1) + x.Question[0] = Question{";sd#eddddséâèµââ
â¥âxzztsestxssweewwsssstx@s@Zåµe@cn.pool.ntp.org.", TypeA, ClassINET} + if _, err := x.Pack(); err == nil { + t.Error("packing should fail") + } +} + +func TestPackNAPTR(t *testing.T) { + for _, n := range []string{ + `apple.com. IN NAPTR 100 50 "se" "SIP+D2U" "" _sip._udp.apple.com.`, + `apple.com. IN NAPTR 90 50 "se" "SIP+D2T" "" _sip._tcp.apple.com.`, + `apple.com. IN NAPTR 50 50 "se" "SIPS+D2T" "" _sips._tcp.apple.com.`, + } { + rr, _ := NewRR(n) + msg := make([]byte, rr.len()) + if off, err := PackRR(rr, msg, 0, nil, false); err != nil { + t.Errorf("packing failed: %v", err) + t.Errorf("length %d, need more than %d", rr.len(), off) + } else { + t.Logf("buf size needed: %d", off) + } + } +} + +func TestCompressLength(t *testing.T) { + m := new(Msg) + m.SetQuestion("miek.nl", TypeMX) + ul := m.Len() + m.Compress = true + if ul != m.Len() { + t.Fatalf("should be equal") + } +} + +// Does the predicted length match final packed length? +func TestMsgCompressLength(t *testing.T) { + makeMsg := func(question string, ans, ns, e []RR) *Msg { + msg := new(Msg) + msg.SetQuestion(Fqdn(question), TypeANY) + msg.Answer = append(msg.Answer, ans...) + msg.Ns = append(msg.Ns, ns...) + msg.Extra = append(msg.Extra, e...) + msg.Compress = true + return msg + } + + name1 := "12345678901234567890123456789012345.12345678.123." + rrA, _ := NewRR(name1 + " 3600 IN A 192.0.2.1") + rrMx, _ := NewRR(name1 + " 3600 IN MX 10 " + name1) + tests := []*Msg{ + makeMsg(name1, []RR{rrA}, nil, nil), + makeMsg(name1, []RR{rrMx, rrMx}, nil, nil)} + + for _, msg := range tests { + predicted := msg.Len() + buf, err := msg.Pack() + if err != nil { + t.Error(err) + } + if predicted < len(buf) { + t.Errorf("predicted compressed length is wrong: predicted %s (len=%d) %d, actual %d", + msg.Question[0].Name, len(msg.Answer), predicted, len(buf)) + } + } +} + +func TestMsgLength(t *testing.T) { + makeMsg := func(question string, ans, ns, e []RR) *Msg { + msg := new(Msg) + msg.SetQuestion(Fqdn(question), TypeANY) + msg.Answer = append(msg.Answer, ans...) + msg.Ns = append(msg.Ns, ns...) + msg.Extra = append(msg.Extra, e...) + return msg + } + + name1 := "12345678901234567890123456789012345.12345678.123." + rrA, _ := NewRR(name1 + " 3600 IN A 192.0.2.1") + rrMx, _ := NewRR(name1 + " 3600 IN MX 10 " + name1) + tests := []*Msg{ + makeMsg(name1, []RR{rrA}, nil, nil), + makeMsg(name1, []RR{rrMx, rrMx}, nil, nil)} + + for _, msg := range tests { + predicted := msg.Len() + buf, err := msg.Pack() + if err != nil { + t.Error(err) + } + if predicted < len(buf) { + t.Errorf("predicted length is wrong: predicted %s (len=%d), actual %d", + msg.Question[0].Name, predicted, len(buf)) + } + } +} + +func TestMsgLength2(t *testing.T) { + // Serialized replies + var testMessages = []string{ + // google.com. IN A? + "064e81800001000b0004000506676f6f676c6503636f6d0000010001c00c00010001000000050004adc22986c00c00010001000000050004adc22987c00c00010001000000050004adc22988c00c00010001000000050004adc22989c00c00010001000000050004adc2298ec00c00010001000000050004adc22980c00c00010001000000050004adc22981c00c00010001000000050004adc22982c00c00010001000000050004adc22983c00c00010001000000050004adc22984c00c00010001000000050004adc22985c00c00020001000000050006036e7331c00cc00c00020001000000050006036e7332c00cc00c00020001000000050006036e7333c00cc00c00020001000000050006036e7334c00cc0d800010001000000050004d8ef200ac0ea00010001000000050004d8ef220ac0fc00010001000000050004d8ef240ac10e00010001000000050004d8ef260a0000290500000000050000", + // amazon.com. IN A? (reply has no EDNS0 record) + // TODO(miek): this one is off-by-one, need to find out why + //"6de1818000010004000a000806616d617a6f6e03636f6d0000010001c00c000100010000000500044815c2d4c00c000100010000000500044815d7e8c00c00010001000000050004b02062a6c00c00010001000000050004cdfbf236c00c000200010000000500140570646e733408756c747261646e73036f726700c00c000200010000000500150570646e733508756c747261646e7304696e666f00c00c000200010000000500160570646e733608756c747261646e7302636f02756b00c00c00020001000000050014036e7331037033310664796e656374036e657400c00c00020001000000050006036e7332c0cfc00c00020001000000050006036e7333c0cfc00c00020001000000050006036e7334c0cfc00c000200010000000500110570646e733108756c747261646e73c0dac00c000200010000000500080570646e7332c127c00c000200010000000500080570646e7333c06ec0cb00010001000000050004d04e461fc0eb00010001000000050004cc0dfa1fc0fd00010001000000050004d04e471fc10f00010001000000050004cc0dfb1fc12100010001000000050004cc4a6c01c121001c000100000005001020010502f3ff00000000000000000001c13e00010001000000050004cc4a6d01c13e001c0001000000050010261000a1101400000000000000000001", + // yahoo.com. IN A? + "fc2d81800001000300070008057961686f6f03636f6d0000010001c00c00010001000000050004628afd6dc00c00010001000000050004628bb718c00c00010001000000050004cebe242dc00c00020001000000050006036e7336c00cc00c00020001000000050006036e7338c00cc00c00020001000000050006036e7331c00cc00c00020001000000050006036e7332c00cc00c00020001000000050006036e7333c00cc00c00020001000000050006036e7334c00cc00c00020001000000050006036e7335c00cc07b0001000100000005000444b48310c08d00010001000000050004448eff10c09f00010001000000050004cb54dd35c0b100010001000000050004628a0b9dc0c30001000100000005000477a0f77cc05700010001000000050004ca2bdfaac06900010001000000050004caa568160000290500000000050000", + // microsoft.com. IN A? + "f4368180000100020005000b096d6963726f736f667403636f6d0000010001c00c0001000100000005000440040b25c00c0001000100000005000441373ac9c00c0002000100000005000e036e7331046d736674036e657400c00c00020001000000050006036e7332c04fc00c00020001000000050006036e7333c04fc00c00020001000000050006036e7334c04fc00c00020001000000050006036e7335c04fc04b000100010000000500044137253ec04b001c00010000000500102a010111200500000000000000010001c0650001000100000005000440043badc065001c00010000000500102a010111200600060000000000010001c07700010001000000050004d5c7b435c077001c00010000000500102a010111202000000000000000010001c08900010001000000050004cf2e4bfec089001c00010000000500102404f800200300000000000000010001c09b000100010000000500044137e28cc09b001c00010000000500102a010111200f000100000000000100010000290500000000050000", + // google.com. IN MX? + "724b8180000100050004000b06676f6f676c6503636f6d00000f0001c00c000f000100000005000c000a056173706d78016cc00cc00c000f0001000000050009001404616c7431c02ac00c000f0001000000050009001e04616c7432c02ac00c000f0001000000050009002804616c7433c02ac00c000f0001000000050009003204616c7434c02ac00c00020001000000050006036e7332c00cc00c00020001000000050006036e7333c00cc00c00020001000000050006036e7334c00cc00c00020001000000050006036e7331c00cc02a00010001000000050004adc2421bc02a001c00010000000500102a00145040080c01000000000000001bc04200010001000000050004adc2461bc05700010001000000050004adc2451bc06c000100010000000500044a7d8f1bc081000100010000000500044a7d191bc0ca00010001000000050004d8ef200ac09400010001000000050004d8ef220ac0a600010001000000050004d8ef240ac0b800010001000000050004d8ef260a0000290500000000050000", + // reddit.com. IN A? + "12b98180000100080000000c0672656464697403636f6d0000020001c00c0002000100000005000f046175733204616b616d036e657400c00c000200010000000500070475736534c02dc00c000200010000000500070475737733c02dc00c000200010000000500070475737735c02dc00c00020001000000050008056173696131c02dc00c00020001000000050008056173696139c02dc00c00020001000000050008056e73312d31c02dc00c0002000100000005000a076e73312d313935c02dc02800010001000000050004c30a242ec04300010001000000050004451f1d39c05600010001000000050004451f3bc7c0690001000100000005000460073240c07c000100010000000500046007fb81c090000100010000000500047c283484c090001c00010000000500102a0226f0006700000000000000000064c0a400010001000000050004c16c5b01c0a4001c000100000005001026001401000200000000000000000001c0b800010001000000050004c16c5bc3c0b8001c0001000000050010260014010002000000000000000000c30000290500000000050000", + } + + for i, hexData := range testMessages { + // we won't fail the decoding of the hex + input, _ := hex.DecodeString(hexData) + + m := new(Msg) + m.Unpack(input) + m.Compress = true + lenComp := m.Len() + b, _ := m.Pack() + pacComp := len(b) + m.Compress = false + lenUnComp := m.Len() + b, _ = m.Pack() + pacUnComp := len(b) + if pacComp+1 != lenComp { + t.Errorf("msg.Len(compressed)=%d actual=%d for test %d", lenComp, pacComp, i) + } + if pacUnComp+1 != lenUnComp { + t.Errorf("msg.Len(uncompressed)=%d actual=%d for test %d", lenUnComp, pacUnComp, i) + } + } +} + +func TestMsgLengthCompressionMalformed(t *testing.T) { + // SOA with empty hostmaster, which is illegal + soa := &SOA{Hdr: RR_Header{Name: ".", Rrtype: TypeSOA, Class: ClassINET, Ttl: 12345}, + Ns: ".", + Mbox: "", + Serial: 0, + Refresh: 28800, + Retry: 7200, + Expire: 604800, + Minttl: 60} + m := new(Msg) + m.Compress = true + m.Ns = []RR{soa} + m.Len() // Should not crash. +} + +func TestMsgCompressLength2(t *testing.T) { + msg := new(Msg) + msg.Compress = true + msg.SetQuestion(Fqdn("bliep."), TypeANY) + msg.Answer = append(msg.Answer, &SRV{Hdr: RR_Header{Name: "blaat.", Rrtype: 0x21, Class: 0x1, Ttl: 0x3c}, Port: 0x4c57, Target: "foo.bar."}) + msg.Extra = append(msg.Extra, &A{Hdr: RR_Header{Name: "foo.bar.", Rrtype: 0x1, Class: 0x1, Ttl: 0x3c}, A: net.IP{0xac, 0x11, 0x0, 0x3}}) + predicted := msg.Len() + buf, err := msg.Pack() + if err != nil { + t.Error(err) + } + if predicted != len(buf) { + t.Errorf("predicted compressed length is wrong: predicted %s (len=%d) %d, actual %d", + msg.Question[0].Name, len(msg.Answer), predicted, len(buf)) + } +} + +func TestToRFC3597(t *testing.T) { + a, _ := NewRR("miek.nl. IN A 10.0.1.1") + x := new(RFC3597) + x.ToRFC3597(a) + if x.String() != `miek.nl. 3600 CLASS1 TYPE1 \# 4 0a000101` { + t.Errorf("string mismatch, got: %s", x) + } + + b, _ := NewRR("miek.nl. IN MX 10 mx.miek.nl.") + x.ToRFC3597(b) + if x.String() != `miek.nl. 3600 CLASS1 TYPE15 \# 14 000a026d78046d69656b026e6c00` { + t.Errorf("string mismatch, got: %s", x) + } +} + +func TestNoRdataPack(t *testing.T) { + data := make([]byte, 1024) + for typ, fn := range TypeToRR { + r := fn() + *r.Header() = RR_Header{Name: "miek.nl.", Rrtype: typ, Class: ClassINET, Ttl: 16} + _, err := PackRR(r, data, 0, nil, false) + if err != nil { + t.Errorf("failed to pack RR with zero rdata: %s: %v", TypeToString[typ], err) + } + } +} + +func TestNoRdataUnpack(t *testing.T) { + data := make([]byte, 1024) + for typ, fn := range TypeToRR { + if typ == TypeSOA || typ == TypeTSIG { + // SOA, TSIG will not be seen (like this) in dyn. updates? + continue + } + r := fn() + *r.Header() = RR_Header{Name: "miek.nl.", Rrtype: typ, Class: ClassINET, Ttl: 16} + off, err := PackRR(r, data, 0, nil, false) + if err != nil { + // Should always works, TestNoDataPack should have caught this + t.Errorf("failed to pack RR: %v", err) + continue + } + rr, _, err := UnpackRR(data[:off], 0) + if err != nil { + t.Errorf("failed to unpack RR with zero rdata: %s: %v", TypeToString[typ], err) + } + t.Log(rr) + } +} + +func TestRdataOverflow(t *testing.T) { + rr := new(RFC3597) + rr.Hdr.Name = "." + rr.Hdr.Class = ClassINET + rr.Hdr.Rrtype = 65280 + rr.Rdata = hex.EncodeToString(make([]byte, 0xFFFF)) + buf := make([]byte, 0xFFFF*2) + if _, err := PackRR(rr, buf, 0, nil, false); err != nil { + t.Fatalf("maximum size rrdata pack failed: %v", err) + } + rr.Rdata += "00" + if _, err := PackRR(rr, buf, 0, nil, false); err != ErrRdata { + t.Fatalf("oversize rrdata pack didn't return ErrRdata - instead: %v", err) + } +} + +func TestCopy(t *testing.T) { + rr, _ := NewRR("miek.nl. 2311 IN A 127.0.0.1") // Weird TTL to avoid catching TTL + rr1 := Copy(rr) + if rr.String() != rr1.String() { + t.Fatalf("Copy() failed %s != %s", rr.String(), rr1.String()) + } +} + +func TestMsgCopy(t *testing.T) { + m := new(Msg) + m.SetQuestion("miek.nl.", TypeA) + rr, _ := NewRR("miek.nl. 2311 IN A 127.0.0.1") + m.Answer = []RR{rr} + rr, _ = NewRR("miek.nl. 2311 IN NS 127.0.0.1") + m.Ns = []RR{rr} + + m1 := m.Copy() + if m.String() != m1.String() { + t.Fatalf("Msg.Copy() failed %s != %s", m.String(), m1.String()) + } + + m1.Answer[0], _ = NewRR("somethingelse.nl. 2311 IN A 127.0.0.1") + if m.String() == m1.String() { + t.Fatalf("Msg.Copy() failed; change to copy changed template %s", m.String()) + } + + rr, _ = NewRR("miek.nl. 2311 IN A 127.0.0.2") + m1.Answer = append(m1.Answer, rr) + if m1.Ns[0].String() == m1.Answer[1].String() { + t.Fatalf("Msg.Copy() failed; append changed underlying array %s", m1.Ns[0].String()) + } +} + +func TestMsgPackBuffer(t *testing.T) { + var testMessages = []string{ + // news.ycombinator.com.in.escapemg.com. IN A, response + "586285830001000000010000046e6577730b79636f6d62696e61746f7203636f6d02696e086573636170656d6703636f6d0000010001c0210006000100000e10002c036e7332c02103646e730b67726f6f7665736861726bc02d77ed50e600002a3000000e1000093a8000000e10", + + // news.ycombinator.com.in.escapemg.com. IN A, question + "586201000001000000000000046e6577730b79636f6d62696e61746f7203636f6d02696e086573636170656d6703636f6d0000010001", + + "398781020001000000000000046e6577730b79636f6d62696e61746f7203636f6d0000010001", + } + + for i, hexData := range testMessages { + // we won't fail the decoding of the hex + input, _ := hex.DecodeString(hexData) + m := new(Msg) + if err := m.Unpack(input); err != nil { + t.Errorf("packet %d failed to unpack", i) + continue + } + t.Logf("packet %d %s", i, m.String()) + } +} diff --git a/vendor/github.com/miekg/dns/dnssec.go b/vendor/github.com/miekg/dns/dnssec.go new file mode 100644 index 0000000..3bd5538 --- /dev/null +++ b/vendor/github.com/miekg/dns/dnssec.go @@ -0,0 +1,720 @@ +package dns + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + _ "crypto/md5" + "crypto/rand" + "crypto/rsa" + _ "crypto/sha1" + _ "crypto/sha256" + _ "crypto/sha512" + "encoding/asn1" + "encoding/binary" + "encoding/hex" + "math/big" + "sort" + "strings" + "time" +) + +// DNSSEC encryption algorithm codes. +const ( + _ uint8 = iota + RSAMD5 + DH + DSA + _ // Skip 4, RFC 6725, section 2.1 + RSASHA1 + DSANSEC3SHA1 + RSASHA1NSEC3SHA1 + RSASHA256 + _ // Skip 9, RFC 6725, section 2.1 + RSASHA512 + _ // Skip 11, RFC 6725, section 2.1 + ECCGOST + ECDSAP256SHA256 + ECDSAP384SHA384 + INDIRECT uint8 = 252 + PRIVATEDNS uint8 = 253 // Private (experimental keys) + PRIVATEOID uint8 = 254 +) + +// AlgorithmToString is a map of algorithm IDs to algorithm names. +var AlgorithmToString = map[uint8]string{ + RSAMD5: "RSAMD5", + DH: "DH", + DSA: "DSA", + RSASHA1: "RSASHA1", + DSANSEC3SHA1: "DSA-NSEC3-SHA1", + RSASHA1NSEC3SHA1: "RSASHA1-NSEC3-SHA1", + RSASHA256: "RSASHA256", + RSASHA512: "RSASHA512", + ECCGOST: "ECC-GOST", + ECDSAP256SHA256: "ECDSAP256SHA256", + ECDSAP384SHA384: "ECDSAP384SHA384", + INDIRECT: "INDIRECT", + PRIVATEDNS: "PRIVATEDNS", + PRIVATEOID: "PRIVATEOID", +} + +// StringToAlgorithm is the reverse of AlgorithmToString. +var StringToAlgorithm = reverseInt8(AlgorithmToString) + +// AlgorithmToHash is a map of algorithm crypto hash IDs to crypto.Hash's. +var AlgorithmToHash = map[uint8]crypto.Hash{ + RSAMD5: crypto.MD5, // Deprecated in RFC 6725 + RSASHA1: crypto.SHA1, + RSASHA1NSEC3SHA1: crypto.SHA1, + RSASHA256: crypto.SHA256, + ECDSAP256SHA256: crypto.SHA256, + ECDSAP384SHA384: crypto.SHA384, + RSASHA512: crypto.SHA512, +} + +// DNSSEC hashing algorithm codes. +const ( + _ uint8 = iota + SHA1 // RFC 4034 + SHA256 // RFC 4509 + GOST94 // RFC 5933 + SHA384 // Experimental + SHA512 // Experimental +) + +// HashToString is a map of hash IDs to names. +var HashToString = map[uint8]string{ + SHA1: "SHA1", + SHA256: "SHA256", + GOST94: "GOST94", + SHA384: "SHA384", + SHA512: "SHA512", +} + +// StringToHash is a map of names to hash IDs. +var StringToHash = reverseInt8(HashToString) + +// DNSKEY flag values. +const ( + SEP = 1 + REVOKE = 1 << 7 + ZONE = 1 << 8 +) + +// The RRSIG needs to be converted to wireformat with some of the rdata (the signature) missing. +type rrsigWireFmt struct { + TypeCovered uint16 + Algorithm uint8 + Labels uint8 + OrigTtl uint32 + Expiration uint32 + Inception uint32 + KeyTag uint16 + SignerName string `dns:"domain-name"` + /* No Signature */ +} + +// Used for converting DNSKEY's rdata to wirefmt. +type dnskeyWireFmt struct { + Flags uint16 + Protocol uint8 + Algorithm uint8 + PublicKey string `dns:"base64"` + /* Nothing is left out */ +} + +func divRoundUp(a, b int) int { + return (a + b - 1) / b +} + +// KeyTag calculates the keytag (or key-id) of the DNSKEY. +func (k *DNSKEY) KeyTag() uint16 { + if k == nil { + return 0 + } + var keytag int + switch k.Algorithm { + case RSAMD5: + // Look at the bottom two bytes of the modules, which the last + // item in the pubkey. We could do this faster by looking directly + // at the base64 values. But I'm lazy. + modulus, _ := fromBase64([]byte(k.PublicKey)) + if len(modulus) > 1 { + x := binary.BigEndian.Uint16(modulus[len(modulus)-2:]) + keytag = int(x) + } + default: + keywire := new(dnskeyWireFmt) + keywire.Flags = k.Flags + keywire.Protocol = k.Protocol + keywire.Algorithm = k.Algorithm + keywire.PublicKey = k.PublicKey + wire := make([]byte, DefaultMsgSize) + n, err := packKeyWire(keywire, wire) + if err != nil { + return 0 + } + wire = wire[:n] + for i, v := range wire { + if i&1 != 0 { + keytag += int(v) // must be larger than uint32 + } else { + keytag += int(v) << 8 + } + } + keytag += (keytag >> 16) & 0xFFFF + keytag &= 0xFFFF + } + return uint16(keytag) +} + +// ToDS converts a DNSKEY record to a DS record. +func (k *DNSKEY) ToDS(h uint8) *DS { + if k == nil { + return nil + } + ds := new(DS) + ds.Hdr.Name = k.Hdr.Name + ds.Hdr.Class = k.Hdr.Class + ds.Hdr.Rrtype = TypeDS + ds.Hdr.Ttl = k.Hdr.Ttl + ds.Algorithm = k.Algorithm + ds.DigestType = h + ds.KeyTag = k.KeyTag() + + keywire := new(dnskeyWireFmt) + keywire.Flags = k.Flags + keywire.Protocol = k.Protocol + keywire.Algorithm = k.Algorithm + keywire.PublicKey = k.PublicKey + wire := make([]byte, DefaultMsgSize) + n, err := packKeyWire(keywire, wire) + if err != nil { + return nil + } + wire = wire[:n] + + owner := make([]byte, 255) + off, err1 := PackDomainName(strings.ToLower(k.Hdr.Name), owner, 0, nil, false) + if err1 != nil { + return nil + } + owner = owner[:off] + // RFC4034: + // digest = digest_algorithm( DNSKEY owner name | DNSKEY RDATA); + // "|" denotes concatenation + // DNSKEY RDATA = Flags | Protocol | Algorithm | Public Key. + + var hash crypto.Hash + switch h { + case SHA1: + hash = crypto.SHA1 + case SHA256: + hash = crypto.SHA256 + case SHA384: + hash = crypto.SHA384 + case SHA512: + hash = crypto.SHA512 + default: + return nil + } + + s := hash.New() + s.Write(owner) + s.Write(wire) + ds.Digest = hex.EncodeToString(s.Sum(nil)) + return ds +} + +// ToCDNSKEY converts a DNSKEY record to a CDNSKEY record. +func (k *DNSKEY) ToCDNSKEY() *CDNSKEY { + c := &CDNSKEY{DNSKEY: *k} + c.Hdr = *k.Hdr.copyHeader() + c.Hdr.Rrtype = TypeCDNSKEY + return c +} + +// ToCDS converts a DS record to a CDS record. +func (d *DS) ToCDS() *CDS { + c := &CDS{DS: *d} + c.Hdr = *d.Hdr.copyHeader() + c.Hdr.Rrtype = TypeCDS + return c +} + +// Sign signs an RRSet. The signature needs to be filled in with the values: +// Inception, Expiration, KeyTag, SignerName and Algorithm. The rest is copied +// from the RRset. Sign returns a non-nill error when the signing went OK. +// There is no check if RRSet is a proper (RFC 2181) RRSet. If OrigTTL is non +// zero, it is used as-is, otherwise the TTL of the RRset is used as the +// OrigTTL. +func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error { + if k == nil { + return ErrPrivKey + } + // s.Inception and s.Expiration may be 0 (rollover etc.), the rest must be set + if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 { + return ErrKey + } + + rr.Hdr.Rrtype = TypeRRSIG + rr.Hdr.Name = rrset[0].Header().Name + rr.Hdr.Class = rrset[0].Header().Class + if rr.OrigTtl == 0 { // If set don't override + rr.OrigTtl = rrset[0].Header().Ttl + } + rr.TypeCovered = rrset[0].Header().Rrtype + rr.Labels = uint8(CountLabel(rrset[0].Header().Name)) + + if strings.HasPrefix(rrset[0].Header().Name, "*") { + rr.Labels-- // wildcard, remove from label count + } + + sigwire := new(rrsigWireFmt) + sigwire.TypeCovered = rr.TypeCovered + sigwire.Algorithm = rr.Algorithm + sigwire.Labels = rr.Labels + sigwire.OrigTtl = rr.OrigTtl + sigwire.Expiration = rr.Expiration + sigwire.Inception = rr.Inception + sigwire.KeyTag = rr.KeyTag + // For signing, lowercase this name + sigwire.SignerName = strings.ToLower(rr.SignerName) + + // Create the desired binary blob + signdata := make([]byte, DefaultMsgSize) + n, err := packSigWire(sigwire, signdata) + if err != nil { + return err + } + signdata = signdata[:n] + wire, err := rawSignatureData(rrset, rr) + if err != nil { + return err + } + + hash, ok := AlgorithmToHash[rr.Algorithm] + if !ok { + return ErrAlg + } + + h := hash.New() + h.Write(signdata) + h.Write(wire) + + signature, err := sign(k, h.Sum(nil), hash, rr.Algorithm) + if err != nil { + return err + } + + rr.Signature = toBase64(signature) + + return nil +} + +func sign(k crypto.Signer, hashed []byte, hash crypto.Hash, alg uint8) ([]byte, error) { + signature, err := k.Sign(rand.Reader, hashed, hash) + if err != nil { + return nil, err + } + + switch alg { + case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512: + return signature, nil + + case ECDSAP256SHA256, ECDSAP384SHA384: + ecdsaSignature := &struct { + R, S *big.Int + }{} + if _, err := asn1.Unmarshal(signature, ecdsaSignature); err != nil { + return nil, err + } + + var intlen int + switch alg { + case ECDSAP256SHA256: + intlen = 32 + case ECDSAP384SHA384: + intlen = 48 + } + + signature := intToBytes(ecdsaSignature.R, intlen) + signature = append(signature, intToBytes(ecdsaSignature.S, intlen)...) + return signature, nil + + // There is no defined interface for what a DSA backed crypto.Signer returns + case DSA, DSANSEC3SHA1: + // t := divRoundUp(divRoundUp(p.PublicKey.Y.BitLen(), 8)-64, 8) + // signature := []byte{byte(t)} + // signature = append(signature, intToBytes(r1, 20)...) + // signature = append(signature, intToBytes(s1, 20)...) + // rr.Signature = signature + } + + return nil, ErrAlg +} + +// Verify validates an RRSet with the signature and key. This is only the +// cryptographic test, the signature validity period must be checked separately. +// This function copies the rdata of some RRs (to lowercase domain names) for the validation to work. +func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error { + // First the easy checks + if !IsRRset(rrset) { + return ErrRRset + } + if rr.KeyTag != k.KeyTag() { + return ErrKey + } + if rr.Hdr.Class != k.Hdr.Class { + return ErrKey + } + if rr.Algorithm != k.Algorithm { + return ErrKey + } + if strings.ToLower(rr.SignerName) != strings.ToLower(k.Hdr.Name) { + return ErrKey + } + if k.Protocol != 3 { + return ErrKey + } + + // IsRRset checked that we have at least one RR and that the RRs in + // the set have consistent type, class, and name. Also check that type and + // class matches the RRSIG record. + if rrset[0].Header().Class != rr.Hdr.Class { + return ErrRRset + } + if rrset[0].Header().Rrtype != rr.TypeCovered { + return ErrRRset + } + + // RFC 4035 5.3.2. Reconstructing the Signed Data + // Copy the sig, except the rrsig data + sigwire := new(rrsigWireFmt) + sigwire.TypeCovered = rr.TypeCovered + sigwire.Algorithm = rr.Algorithm + sigwire.Labels = rr.Labels + sigwire.OrigTtl = rr.OrigTtl + sigwire.Expiration = rr.Expiration + sigwire.Inception = rr.Inception + sigwire.KeyTag = rr.KeyTag + sigwire.SignerName = strings.ToLower(rr.SignerName) + // Create the desired binary blob + signeddata := make([]byte, DefaultMsgSize) + n, err := packSigWire(sigwire, signeddata) + if err != nil { + return err + } + signeddata = signeddata[:n] + wire, err := rawSignatureData(rrset, rr) + if err != nil { + return err + } + + sigbuf := rr.sigBuf() // Get the binary signature data + if rr.Algorithm == PRIVATEDNS { // PRIVATEOID + // TODO(miek) + // remove the domain name and assume its ours? + } + + hash, ok := AlgorithmToHash[rr.Algorithm] + if !ok { + return ErrAlg + } + + switch rr.Algorithm { + case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512, RSAMD5: + // TODO(mg): this can be done quicker, ie. cache the pubkey data somewhere?? + pubkey := k.publicKeyRSA() // Get the key + if pubkey == nil { + return ErrKey + } + + h := hash.New() + h.Write(signeddata) + h.Write(wire) + return rsa.VerifyPKCS1v15(pubkey, hash, h.Sum(nil), sigbuf) + + case ECDSAP256SHA256, ECDSAP384SHA384: + pubkey := k.publicKeyECDSA() + if pubkey == nil { + return ErrKey + } + + // Split sigbuf into the r and s coordinates + r := new(big.Int).SetBytes(sigbuf[:len(sigbuf)/2]) + s := new(big.Int).SetBytes(sigbuf[len(sigbuf)/2:]) + + h := hash.New() + h.Write(signeddata) + h.Write(wire) + if ecdsa.Verify(pubkey, h.Sum(nil), r, s) { + return nil + } + return ErrSig + + default: + return ErrAlg + } +} + +// ValidityPeriod uses RFC1982 serial arithmetic to calculate +// if a signature period is valid. If t is the zero time, the +// current time is taken other t is. Returns true if the signature +// is valid at the given time, otherwise returns false. +func (rr *RRSIG) ValidityPeriod(t time.Time) bool { + var utc int64 + if t.IsZero() { + utc = time.Now().UTC().Unix() + } else { + utc = t.UTC().Unix() + } + modi := (int64(rr.Inception) - utc) / year68 + mode := (int64(rr.Expiration) - utc) / year68 + ti := int64(rr.Inception) + (modi * year68) + te := int64(rr.Expiration) + (mode * year68) + return ti <= utc && utc <= te +} + +// Return the signatures base64 encodedig sigdata as a byte slice. +func (rr *RRSIG) sigBuf() []byte { + sigbuf, err := fromBase64([]byte(rr.Signature)) + if err != nil { + return nil + } + return sigbuf +} + +// publicKeyRSA returns the RSA public key from a DNSKEY record. +func (k *DNSKEY) publicKeyRSA() *rsa.PublicKey { + keybuf, err := fromBase64([]byte(k.PublicKey)) + if err != nil { + return nil + } + + // RFC 2537/3110, section 2. RSA Public KEY Resource Records + // Length is in the 0th byte, unless its zero, then it + // it in bytes 1 and 2 and its a 16 bit number + explen := uint16(keybuf[0]) + keyoff := 1 + if explen == 0 { + explen = uint16(keybuf[1])<<8 | uint16(keybuf[2]) + keyoff = 3 + } + pubkey := new(rsa.PublicKey) + + pubkey.N = big.NewInt(0) + shift := uint64((explen - 1) * 8) + expo := uint64(0) + for i := int(explen - 1); i > 0; i-- { + expo += uint64(keybuf[keyoff+i]) << shift + shift -= 8 + } + // Remainder + expo += uint64(keybuf[keyoff]) + if expo > (2<<31)+1 { + // Larger expo than supported. + // println("dns: F5 primes (or larger) are not supported") + return nil + } + pubkey.E = int(expo) + + pubkey.N.SetBytes(keybuf[keyoff+int(explen):]) + return pubkey +} + +// publicKeyECDSA returns the Curve public key from the DNSKEY record. +func (k *DNSKEY) publicKeyECDSA() *ecdsa.PublicKey { + keybuf, err := fromBase64([]byte(k.PublicKey)) + if err != nil { + return nil + } + pubkey := new(ecdsa.PublicKey) + switch k.Algorithm { + case ECDSAP256SHA256: + pubkey.Curve = elliptic.P256() + if len(keybuf) != 64 { + // wrongly encoded key + return nil + } + case ECDSAP384SHA384: + pubkey.Curve = elliptic.P384() + if len(keybuf) != 96 { + // Wrongly encoded key + return nil + } + } + pubkey.X = big.NewInt(0) + pubkey.X.SetBytes(keybuf[:len(keybuf)/2]) + pubkey.Y = big.NewInt(0) + pubkey.Y.SetBytes(keybuf[len(keybuf)/2:]) + return pubkey +} + +func (k *DNSKEY) publicKeyDSA() *dsa.PublicKey { + keybuf, err := fromBase64([]byte(k.PublicKey)) + if err != nil { + return nil + } + if len(keybuf) < 22 { + return nil + } + t, keybuf := int(keybuf[0]), keybuf[1:] + size := 64 + t*8 + q, keybuf := keybuf[:20], keybuf[20:] + if len(keybuf) != 3*size { + return nil + } + p, keybuf := keybuf[:size], keybuf[size:] + g, y := keybuf[:size], keybuf[size:] + pubkey := new(dsa.PublicKey) + pubkey.Parameters.Q = big.NewInt(0).SetBytes(q) + pubkey.Parameters.P = big.NewInt(0).SetBytes(p) + pubkey.Parameters.G = big.NewInt(0).SetBytes(g) + pubkey.Y = big.NewInt(0).SetBytes(y) + return pubkey +} + +type wireSlice [][]byte + +func (p wireSlice) Len() int { return len(p) } +func (p wireSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p wireSlice) Less(i, j int) bool { + _, ioff, _ := UnpackDomainName(p[i], 0) + _, joff, _ := UnpackDomainName(p[j], 0) + return bytes.Compare(p[i][ioff+10:], p[j][joff+10:]) < 0 +} + +// Return the raw signature data. +func rawSignatureData(rrset []RR, s *RRSIG) (buf []byte, err error) { + wires := make(wireSlice, len(rrset)) + for i, r := range rrset { + r1 := r.copy() + r1.Header().Ttl = s.OrigTtl + labels := SplitDomainName(r1.Header().Name) + // 6.2. Canonical RR Form. (4) - wildcards + if len(labels) > int(s.Labels) { + // Wildcard + r1.Header().Name = "*." + strings.Join(labels[len(labels)-int(s.Labels):], ".") + "." + } + // RFC 4034: 6.2. Canonical RR Form. (2) - domain name to lowercase + r1.Header().Name = strings.ToLower(r1.Header().Name) + // 6.2. Canonical RR Form. (3) - domain rdata to lowercase. + // NS, MD, MF, CNAME, SOA, MB, MG, MR, PTR, + // HINFO, MINFO, MX, RP, AFSDB, RT, SIG, PX, NXT, NAPTR, KX, + // SRV, DNAME, A6 + // + // RFC 6840 - Clarifications and Implementation Notes for DNS Security (DNSSEC): + // Section 6.2 of [RFC4034] also erroneously lists HINFO as a record + // that needs conversion to lowercase, and twice at that. Since HINFO + // records contain no domain names, they are not subject to case + // conversion. + switch x := r1.(type) { + case *NS: + x.Ns = strings.ToLower(x.Ns) + case *CNAME: + x.Target = strings.ToLower(x.Target) + case *SOA: + x.Ns = strings.ToLower(x.Ns) + x.Mbox = strings.ToLower(x.Mbox) + case *MB: + x.Mb = strings.ToLower(x.Mb) + case *MG: + x.Mg = strings.ToLower(x.Mg) + case *MR: + x.Mr = strings.ToLower(x.Mr) + case *PTR: + x.Ptr = strings.ToLower(x.Ptr) + case *MINFO: + x.Rmail = strings.ToLower(x.Rmail) + x.Email = strings.ToLower(x.Email) + case *MX: + x.Mx = strings.ToLower(x.Mx) + case *NAPTR: + x.Replacement = strings.ToLower(x.Replacement) + case *KX: + x.Exchanger = strings.ToLower(x.Exchanger) + case *SRV: + x.Target = strings.ToLower(x.Target) + case *DNAME: + x.Target = strings.ToLower(x.Target) + } + // 6.2. Canonical RR Form. (5) - origTTL + wire := make([]byte, r1.len()+1) // +1 to be safe(r) + off, err1 := PackRR(r1, wire, 0, nil, false) + if err1 != nil { + return nil, err1 + } + wire = wire[:off] + wires[i] = wire + } + sort.Sort(wires) + for i, wire := range wires { + if i > 0 && bytes.Equal(wire, wires[i-1]) { + continue + } + buf = append(buf, wire...) + } + return buf, nil +} + +func packSigWire(sw *rrsigWireFmt, msg []byte) (int, error) { + // copied from zmsg.go RRSIG packing + off, err := packUint16(sw.TypeCovered, msg, 0) + if err != nil { + return off, err + } + off, err = packUint8(sw.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(sw.Labels, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(sw.OrigTtl, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(sw.Expiration, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(sw.Inception, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(sw.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(sw.SignerName, msg, off, nil, false) + if err != nil { + return off, err + } + return off, nil +} + +func packKeyWire(dw *dnskeyWireFmt, msg []byte) (int, error) { + // copied from zmsg.go DNSKEY packing + off, err := packUint16(dw.Flags, msg, 0) + if err != nil { + return off, err + } + off, err = packUint8(dw.Protocol, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(dw.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(dw.PublicKey, msg, off) + if err != nil { + return off, err + } + return off, nil +} diff --git a/vendor/github.com/miekg/dns/dnssec_keygen.go b/vendor/github.com/miekg/dns/dnssec_keygen.go new file mode 100644 index 0000000..5e4b774 --- /dev/null +++ b/vendor/github.com/miekg/dns/dnssec_keygen.go @@ -0,0 +1,156 @@ +package dns + +import ( + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "math/big" +) + +// Generate generates a DNSKEY of the given bit size. +// The public part is put inside the DNSKEY record. +// The Algorithm in the key must be set as this will define +// what kind of DNSKEY will be generated. +// The ECDSA algorithms imply a fixed keysize, in that case +// bits should be set to the size of the algorithm. +func (k *DNSKEY) Generate(bits int) (crypto.PrivateKey, error) { + switch k.Algorithm { + case DSA, DSANSEC3SHA1: + if bits != 1024 { + return nil, ErrKeySize + } + case RSAMD5, RSASHA1, RSASHA256, RSASHA1NSEC3SHA1: + if bits < 512 || bits > 4096 { + return nil, ErrKeySize + } + case RSASHA512: + if bits < 1024 || bits > 4096 { + return nil, ErrKeySize + } + case ECDSAP256SHA256: + if bits != 256 { + return nil, ErrKeySize + } + case ECDSAP384SHA384: + if bits != 384 { + return nil, ErrKeySize + } + } + + switch k.Algorithm { + case DSA, DSANSEC3SHA1: + params := new(dsa.Parameters) + if err := dsa.GenerateParameters(params, rand.Reader, dsa.L1024N160); err != nil { + return nil, err + } + priv := new(dsa.PrivateKey) + priv.PublicKey.Parameters = *params + err := dsa.GenerateKey(priv, rand.Reader) + if err != nil { + return nil, err + } + k.setPublicKeyDSA(params.Q, params.P, params.G, priv.PublicKey.Y) + return priv, nil + case RSAMD5, RSASHA1, RSASHA256, RSASHA512, RSASHA1NSEC3SHA1: + priv, err := rsa.GenerateKey(rand.Reader, bits) + if err != nil { + return nil, err + } + k.setPublicKeyRSA(priv.PublicKey.E, priv.PublicKey.N) + return priv, nil + case ECDSAP256SHA256, ECDSAP384SHA384: + var c elliptic.Curve + switch k.Algorithm { + case ECDSAP256SHA256: + c = elliptic.P256() + case ECDSAP384SHA384: + c = elliptic.P384() + } + priv, err := ecdsa.GenerateKey(c, rand.Reader) + if err != nil { + return nil, err + } + k.setPublicKeyECDSA(priv.PublicKey.X, priv.PublicKey.Y) + return priv, nil + default: + return nil, ErrAlg + } +} + +// Set the public key (the value E and N) +func (k *DNSKEY) setPublicKeyRSA(_E int, _N *big.Int) bool { + if _E == 0 || _N == nil { + return false + } + buf := exponentToBuf(_E) + buf = append(buf, _N.Bytes()...) + k.PublicKey = toBase64(buf) + return true +} + +// Set the public key for Elliptic Curves +func (k *DNSKEY) setPublicKeyECDSA(_X, _Y *big.Int) bool { + if _X == nil || _Y == nil { + return false + } + var intlen int + switch k.Algorithm { + case ECDSAP256SHA256: + intlen = 32 + case ECDSAP384SHA384: + intlen = 48 + } + k.PublicKey = toBase64(curveToBuf(_X, _Y, intlen)) + return true +} + +// Set the public key for DSA +func (k *DNSKEY) setPublicKeyDSA(_Q, _P, _G, _Y *big.Int) bool { + if _Q == nil || _P == nil || _G == nil || _Y == nil { + return false + } + buf := dsaToBuf(_Q, _P, _G, _Y) + k.PublicKey = toBase64(buf) + return true +} + +// Set the public key (the values E and N) for RSA +// RFC 3110: Section 2. RSA Public KEY Resource Records +func exponentToBuf(_E int) []byte { + var buf []byte + i := big.NewInt(int64(_E)).Bytes() + if len(i) < 256 { + buf = make([]byte, 1, 1+len(i)) + buf[0] = uint8(len(i)) + } else { + buf = make([]byte, 3, 3+len(i)) + buf[0] = 0 + buf[1] = uint8(len(i) >> 8) + buf[2] = uint8(len(i)) + } + buf = append(buf, i...) + return buf +} + +// Set the public key for X and Y for Curve. The two +// values are just concatenated. +func curveToBuf(_X, _Y *big.Int, intlen int) []byte { + buf := intToBytes(_X, intlen) + buf = append(buf, intToBytes(_Y, intlen)...) + return buf +} + +// Set the public key for X and Y for Curve. The two +// values are just concatenated. +func dsaToBuf(_Q, _P, _G, _Y *big.Int) []byte { + t := divRoundUp(divRoundUp(_G.BitLen(), 8)-64, 8) + buf := []byte{byte(t)} + buf = append(buf, intToBytes(_Q, 20)...) + buf = append(buf, intToBytes(_P, 64+t*8)...) + buf = append(buf, intToBytes(_G, 64+t*8)...) + buf = append(buf, intToBytes(_Y, 64+t*8)...) + return buf +} diff --git a/vendor/github.com/miekg/dns/dnssec_keyscan.go b/vendor/github.com/miekg/dns/dnssec_keyscan.go new file mode 100644 index 0000000..4f8d830 --- /dev/null +++ b/vendor/github.com/miekg/dns/dnssec_keyscan.go @@ -0,0 +1,249 @@ +package dns + +import ( + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/rsa" + "io" + "math/big" + "strconv" + "strings" +) + +// NewPrivateKey returns a PrivateKey by parsing the string s. +// s should be in the same form of the BIND private key files. +func (k *DNSKEY) NewPrivateKey(s string) (crypto.PrivateKey, error) { + if s == "" || s[len(s)-1] != '\n' { // We need a closing newline + return k.ReadPrivateKey(strings.NewReader(s+"\n"), "") + } + return k.ReadPrivateKey(strings.NewReader(s), "") +} + +// ReadPrivateKey reads a private key from the io.Reader q. The string file is +// only used in error reporting. +// The public key must be known, because some cryptographic algorithms embed +// the public inside the privatekey. +func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (crypto.PrivateKey, error) { + m, err := parseKey(q, file) + if m == nil { + return nil, err + } + if _, ok := m["private-key-format"]; !ok { + return nil, ErrPrivKey + } + if m["private-key-format"] != "v1.2" && m["private-key-format"] != "v1.3" { + return nil, ErrPrivKey + } + // TODO(mg): check if the pubkey matches the private key + algo, err := strconv.ParseUint(strings.SplitN(m["algorithm"], " ", 2)[0], 10, 8) + if err != nil { + return nil, ErrPrivKey + } + switch uint8(algo) { + case DSA: + priv, err := readPrivateKeyDSA(m) + if err != nil { + return nil, err + } + pub := k.publicKeyDSA() + if pub == nil { + return nil, ErrKey + } + priv.PublicKey = *pub + return priv, nil + case RSAMD5: + fallthrough + case RSASHA1: + fallthrough + case RSASHA1NSEC3SHA1: + fallthrough + case RSASHA256: + fallthrough + case RSASHA512: + priv, err := readPrivateKeyRSA(m) + if err != nil { + return nil, err + } + pub := k.publicKeyRSA() + if pub == nil { + return nil, ErrKey + } + priv.PublicKey = *pub + return priv, nil + case ECCGOST: + return nil, ErrPrivKey + case ECDSAP256SHA256: + fallthrough + case ECDSAP384SHA384: + priv, err := readPrivateKeyECDSA(m) + if err != nil { + return nil, err + } + pub := k.publicKeyECDSA() + if pub == nil { + return nil, ErrKey + } + priv.PublicKey = *pub + return priv, nil + default: + return nil, ErrPrivKey + } +} + +// Read a private key (file) string and create a public key. Return the private key. +func readPrivateKeyRSA(m map[string]string) (*rsa.PrivateKey, error) { + p := new(rsa.PrivateKey) + p.Primes = []*big.Int{nil, nil} + for k, v := range m { + switch k { + case "modulus", "publicexponent", "privateexponent", "prime1", "prime2": + v1, err := fromBase64([]byte(v)) + if err != nil { + return nil, err + } + switch k { + case "modulus": + p.PublicKey.N = big.NewInt(0) + p.PublicKey.N.SetBytes(v1) + case "publicexponent": + i := big.NewInt(0) + i.SetBytes(v1) + p.PublicKey.E = int(i.Int64()) // int64 should be large enough + case "privateexponent": + p.D = big.NewInt(0) + p.D.SetBytes(v1) + case "prime1": + p.Primes[0] = big.NewInt(0) + p.Primes[0].SetBytes(v1) + case "prime2": + p.Primes[1] = big.NewInt(0) + p.Primes[1].SetBytes(v1) + } + case "exponent1", "exponent2", "coefficient": + // not used in Go (yet) + case "created", "publish", "activate": + // not used in Go (yet) + } + } + return p, nil +} + +func readPrivateKeyDSA(m map[string]string) (*dsa.PrivateKey, error) { + p := new(dsa.PrivateKey) + p.X = big.NewInt(0) + for k, v := range m { + switch k { + case "private_value(x)": + v1, err := fromBase64([]byte(v)) + if err != nil { + return nil, err + } + p.X.SetBytes(v1) + case "created", "publish", "activate": + /* not used in Go (yet) */ + } + } + return p, nil +} + +func readPrivateKeyECDSA(m map[string]string) (*ecdsa.PrivateKey, error) { + p := new(ecdsa.PrivateKey) + p.D = big.NewInt(0) + // TODO: validate that the required flags are present + for k, v := range m { + switch k { + case "privatekey": + v1, err := fromBase64([]byte(v)) + if err != nil { + return nil, err + } + p.D.SetBytes(v1) + case "created", "publish", "activate": + /* not used in Go (yet) */ + } + } + return p, nil +} + +// parseKey reads a private key from r. It returns a map[string]string, +// with the key-value pairs, or an error when the file is not correct. +func parseKey(r io.Reader, file string) (map[string]string, error) { + s := scanInit(r) + m := make(map[string]string) + c := make(chan lex) + k := "" + // Start the lexer + go klexer(s, c) + for l := range c { + // It should alternate + switch l.value { + case zKey: + k = l.token + case zValue: + if k == "" { + return nil, &ParseError{file, "no private key seen", l} + } + //println("Setting", strings.ToLower(k), "to", l.token, "b") + m[strings.ToLower(k)] = l.token + k = "" + } + } + return m, nil +} + +// klexer scans the sourcefile and returns tokens on the channel c. +func klexer(s *scan, c chan lex) { + var l lex + str := "" // Hold the current read text + commt := false + key := true + x, err := s.tokenText() + defer close(c) + for err == nil { + l.column = s.position.Column + l.line = s.position.Line + switch x { + case ':': + if commt { + break + } + l.token = str + if key { + l.value = zKey + c <- l + // Next token is a space, eat it + s.tokenText() + key = false + str = "" + } else { + l.value = zValue + } + case ';': + commt = true + case '\n': + if commt { + // Reset a comment + commt = false + } + l.value = zValue + l.token = str + c <- l + str = "" + commt = false + key = true + default: + if commt { + break + } + str += string(x) + } + x, err = s.tokenText() + } + if len(str) > 0 { + // Send remainder + l.token = str + l.value = zValue + c <- l + } +} diff --git a/vendor/github.com/miekg/dns/dnssec_privkey.go b/vendor/github.com/miekg/dns/dnssec_privkey.go new file mode 100644 index 0000000..56f3ea9 --- /dev/null +++ b/vendor/github.com/miekg/dns/dnssec_privkey.go @@ -0,0 +1,85 @@ +package dns + +import ( + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/rsa" + "math/big" + "strconv" +) + +const format = "Private-key-format: v1.3\n" + +// PrivateKeyString converts a PrivateKey to a string. This string has the same +// format as the private-key-file of BIND9 (Private-key-format: v1.3). +// It needs some info from the key (the algorithm), so its a method of the DNSKEY +// It supports rsa.PrivateKey, ecdsa.PrivateKey and dsa.PrivateKey +func (r *DNSKEY) PrivateKeyString(p crypto.PrivateKey) string { + algorithm := strconv.Itoa(int(r.Algorithm)) + algorithm += " (" + AlgorithmToString[r.Algorithm] + ")" + + switch p := p.(type) { + case *rsa.PrivateKey: + modulus := toBase64(p.PublicKey.N.Bytes()) + e := big.NewInt(int64(p.PublicKey.E)) + publicExponent := toBase64(e.Bytes()) + privateExponent := toBase64(p.D.Bytes()) + prime1 := toBase64(p.Primes[0].Bytes()) + prime2 := toBase64(p.Primes[1].Bytes()) + // Calculate Exponent1/2 and Coefficient as per: http://en.wikipedia.org/wiki/RSA#Using_the_Chinese_remainder_algorithm + // and from: http://code.google.com/p/go/issues/detail?id=987 + one := big.NewInt(1) + p1 := big.NewInt(0).Sub(p.Primes[0], one) + q1 := big.NewInt(0).Sub(p.Primes[1], one) + exp1 := big.NewInt(0).Mod(p.D, p1) + exp2 := big.NewInt(0).Mod(p.D, q1) + coeff := big.NewInt(0).ModInverse(p.Primes[1], p.Primes[0]) + + exponent1 := toBase64(exp1.Bytes()) + exponent2 := toBase64(exp2.Bytes()) + coefficient := toBase64(coeff.Bytes()) + + return format + + "Algorithm: " + algorithm + "\n" + + "Modulus: " + modulus + "\n" + + "PublicExponent: " + publicExponent + "\n" + + "PrivateExponent: " + privateExponent + "\n" + + "Prime1: " + prime1 + "\n" + + "Prime2: " + prime2 + "\n" + + "Exponent1: " + exponent1 + "\n" + + "Exponent2: " + exponent2 + "\n" + + "Coefficient: " + coefficient + "\n" + + case *ecdsa.PrivateKey: + var intlen int + switch r.Algorithm { + case ECDSAP256SHA256: + intlen = 32 + case ECDSAP384SHA384: + intlen = 48 + } + private := toBase64(intToBytes(p.D, intlen)) + return format + + "Algorithm: " + algorithm + "\n" + + "PrivateKey: " + private + "\n" + + case *dsa.PrivateKey: + T := divRoundUp(divRoundUp(p.PublicKey.Parameters.G.BitLen(), 8)-64, 8) + prime := toBase64(intToBytes(p.PublicKey.Parameters.P, 64+T*8)) + subprime := toBase64(intToBytes(p.PublicKey.Parameters.Q, 20)) + base := toBase64(intToBytes(p.PublicKey.Parameters.G, 64+T*8)) + priv := toBase64(intToBytes(p.X, 20)) + pub := toBase64(intToBytes(p.PublicKey.Y, 64+T*8)) + return format + + "Algorithm: " + algorithm + "\n" + + "Prime(p): " + prime + "\n" + + "Subprime(q): " + subprime + "\n" + + "Base(g): " + base + "\n" + + "Private_value(x): " + priv + "\n" + + "Public_value(y): " + pub + "\n" + + default: + return "" + } +} diff --git a/vendor/github.com/miekg/dns/dnssec_test.go b/vendor/github.com/miekg/dns/dnssec_test.go new file mode 100644 index 0000000..ca085ed --- /dev/null +++ b/vendor/github.com/miekg/dns/dnssec_test.go @@ -0,0 +1,733 @@ +package dns + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rsa" + "reflect" + "strings" + "testing" + "time" +) + +func getKey() *DNSKEY { + key := new(DNSKEY) + key.Hdr.Name = "miek.nl." + key.Hdr.Class = ClassINET + key.Hdr.Ttl = 14400 + key.Flags = 256 + key.Protocol = 3 + key.Algorithm = RSASHA256 + key.PublicKey = "AwEAAcNEU67LJI5GEgF9QLNqLO1SMq1EdoQ6E9f85ha0k0ewQGCblyW2836GiVsm6k8Kr5ECIoMJ6fZWf3CQSQ9ycWfTyOHfmI3eQ/1Covhb2y4bAmL/07PhrL7ozWBW3wBfM335Ft9xjtXHPy7ztCbV9qZ4TVDTW/Iyg0PiwgoXVesz" + return key +} + +func getSoa() *SOA { + soa := new(SOA) + soa.Hdr = RR_Header{"miek.nl.", TypeSOA, ClassINET, 14400, 0} + soa.Ns = "open.nlnetlabs.nl." + soa.Mbox = "miekg.atoom.net." + soa.Serial = 1293945905 + soa.Refresh = 14400 + soa.Retry = 3600 + soa.Expire = 604800 + soa.Minttl = 86400 + return soa +} + +func TestGenerateEC(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + key := new(DNSKEY) + key.Hdr.Rrtype = TypeDNSKEY + key.Hdr.Name = "miek.nl." + key.Hdr.Class = ClassINET + key.Hdr.Ttl = 14400 + key.Flags = 256 + key.Protocol = 3 + key.Algorithm = ECDSAP256SHA256 + privkey, _ := key.Generate(256) + t.Log(key.String()) + t.Log(key.PrivateKeyString(privkey)) +} + +func TestGenerateDSA(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + key := new(DNSKEY) + key.Hdr.Rrtype = TypeDNSKEY + key.Hdr.Name = "miek.nl." + key.Hdr.Class = ClassINET + key.Hdr.Ttl = 14400 + key.Flags = 256 + key.Protocol = 3 + key.Algorithm = DSA + privkey, _ := key.Generate(1024) + t.Log(key.String()) + t.Log(key.PrivateKeyString(privkey)) +} + +func TestGenerateRSA(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + key := new(DNSKEY) + key.Hdr.Rrtype = TypeDNSKEY + key.Hdr.Name = "miek.nl." + key.Hdr.Class = ClassINET + key.Hdr.Ttl = 14400 + key.Flags = 256 + key.Protocol = 3 + key.Algorithm = RSASHA256 + privkey, _ := key.Generate(1024) + t.Log(key.String()) + t.Log(key.PrivateKeyString(privkey)) +} + +func TestSecure(t *testing.T) { + soa := getSoa() + + sig := new(RRSIG) + sig.Hdr = RR_Header{"miek.nl.", TypeRRSIG, ClassINET, 14400, 0} + sig.TypeCovered = TypeSOA + sig.Algorithm = RSASHA256 + sig.Labels = 2 + sig.Expiration = 1296534305 // date -u '+%s' -d"2011-02-01 04:25:05" + sig.Inception = 1293942305 // date -u '+%s' -d"2011-01-02 04:25:05" + sig.OrigTtl = 14400 + sig.KeyTag = 12051 + sig.SignerName = "miek.nl." + sig.Signature = "oMCbslaAVIp/8kVtLSms3tDABpcPRUgHLrOR48OOplkYo+8TeEGWwkSwaz/MRo2fB4FxW0qj/hTlIjUGuACSd+b1wKdH5GvzRJc2pFmxtCbm55ygAh4EUL0F6U5cKtGJGSXxxg6UFCQ0doJCmiGFa78LolaUOXImJrk6AFrGa0M=" + + key := new(DNSKEY) + key.Hdr.Name = "miek.nl." + key.Hdr.Class = ClassINET + key.Hdr.Ttl = 14400 + key.Flags = 256 + key.Protocol = 3 + key.Algorithm = RSASHA256 + key.PublicKey = "AwEAAcNEU67LJI5GEgF9QLNqLO1SMq1EdoQ6E9f85ha0k0ewQGCblyW2836GiVsm6k8Kr5ECIoMJ6fZWf3CQSQ9ycWfTyOHfmI3eQ/1Covhb2y4bAmL/07PhrL7ozWBW3wBfM335Ft9xjtXHPy7ztCbV9qZ4TVDTW/Iyg0PiwgoXVesz" + + // It should validate. Period is checked separately, so this will keep on working + if sig.Verify(key, []RR{soa}) != nil { + t.Error("failure to validate") + } +} + +func TestSignature(t *testing.T) { + sig := new(RRSIG) + sig.Hdr.Name = "miek.nl." + sig.Hdr.Class = ClassINET + sig.Hdr.Ttl = 3600 + sig.TypeCovered = TypeDNSKEY + sig.Algorithm = RSASHA1 + sig.Labels = 2 + sig.OrigTtl = 4000 + sig.Expiration = 1000 //Thu Jan 1 02:06:40 CET 1970 + sig.Inception = 800 //Thu Jan 1 01:13:20 CET 1970 + sig.KeyTag = 34641 + sig.SignerName = "miek.nl." + sig.Signature = "AwEAAaHIwpx3w4VHKi6i1LHnTaWeHCL154Jug0Rtc9ji5qwPXpBo6A5sRv7cSsPQKPIwxLpyCrbJ4mr2L0EPOdvP6z6YfljK2ZmTbogU9aSU2fiq/4wjxbdkLyoDVgtO+JsxNN4bjr4WcWhsmk1Hg93FV9ZpkWb0Tbad8DFqNDzr//kZ" + + // Should not be valid + if sig.ValidityPeriod(time.Now()) { + t.Error("should not be valid") + } + + sig.Inception = 315565800 //Tue Jan 1 10:10:00 CET 1980 + sig.Expiration = 4102477800 //Fri Jan 1 10:10:00 CET 2100 + if !sig.ValidityPeriod(time.Now()) { + t.Error("should be valid") + } +} + +func TestSignVerify(t *testing.T) { + // The record we want to sign + soa := new(SOA) + soa.Hdr = RR_Header{"miek.nl.", TypeSOA, ClassINET, 14400, 0} + soa.Ns = "open.nlnetlabs.nl." + soa.Mbox = "miekg.atoom.net." + soa.Serial = 1293945905 + soa.Refresh = 14400 + soa.Retry = 3600 + soa.Expire = 604800 + soa.Minttl = 86400 + + soa1 := new(SOA) + soa1.Hdr = RR_Header{"*.miek.nl.", TypeSOA, ClassINET, 14400, 0} + soa1.Ns = "open.nlnetlabs.nl." + soa1.Mbox = "miekg.atoom.net." + soa1.Serial = 1293945905 + soa1.Refresh = 14400 + soa1.Retry = 3600 + soa1.Expire = 604800 + soa1.Minttl = 86400 + + srv := new(SRV) + srv.Hdr = RR_Header{"srv.miek.nl.", TypeSRV, ClassINET, 14400, 0} + srv.Port = 1000 + srv.Weight = 800 + srv.Target = "web1.miek.nl." + + hinfo := &HINFO{ + Hdr: RR_Header{ + Name: "miek.nl.", + Rrtype: TypeHINFO, + Class: ClassINET, + Ttl: 3789, + }, + Cpu: "X", + Os: "Y", + } + + // With this key + key := new(DNSKEY) + key.Hdr.Rrtype = TypeDNSKEY + key.Hdr.Name = "miek.nl." + key.Hdr.Class = ClassINET + key.Hdr.Ttl = 14400 + key.Flags = 256 + key.Protocol = 3 + key.Algorithm = RSASHA256 + privkey, _ := key.Generate(512) + + // Fill in the values of the Sig, before signing + sig := new(RRSIG) + sig.Hdr = RR_Header{"miek.nl.", TypeRRSIG, ClassINET, 14400, 0} + sig.TypeCovered = soa.Hdr.Rrtype + sig.Labels = uint8(CountLabel(soa.Hdr.Name)) // works for all 3 + sig.OrigTtl = soa.Hdr.Ttl + sig.Expiration = 1296534305 // date -u '+%s' -d"2011-02-01 04:25:05" + sig.Inception = 1293942305 // date -u '+%s' -d"2011-01-02 04:25:05" + sig.KeyTag = key.KeyTag() // Get the keyfrom the Key + sig.SignerName = key.Hdr.Name + sig.Algorithm = RSASHA256 + + for _, r := range []RR{soa, soa1, srv, hinfo} { + if err := sig.Sign(privkey.(*rsa.PrivateKey), []RR{r}); err != nil { + t.Error("failure to sign the record:", err) + continue + } + if err := sig.Verify(key, []RR{r}); err != nil { + t.Error("failure to validate") + continue + } + t.Logf("validated: %s", r.Header().Name) + } +} + +func Test65534(t *testing.T) { + t6 := new(RFC3597) + t6.Hdr = RR_Header{"miek.nl.", 65534, ClassINET, 14400, 0} + t6.Rdata = "505D870001" + key := new(DNSKEY) + key.Hdr.Name = "miek.nl." + key.Hdr.Rrtype = TypeDNSKEY + key.Hdr.Class = ClassINET + key.Hdr.Ttl = 14400 + key.Flags = 256 + key.Protocol = 3 + key.Algorithm = RSASHA256 + privkey, _ := key.Generate(1024) + + sig := new(RRSIG) + sig.Hdr = RR_Header{"miek.nl.", TypeRRSIG, ClassINET, 14400, 0} + sig.TypeCovered = t6.Hdr.Rrtype + sig.Labels = uint8(CountLabel(t6.Hdr.Name)) + sig.OrigTtl = t6.Hdr.Ttl + sig.Expiration = 1296534305 // date -u '+%s' -d"2011-02-01 04:25:05" + sig.Inception = 1293942305 // date -u '+%s' -d"2011-01-02 04:25:05" + sig.KeyTag = key.KeyTag() + sig.SignerName = key.Hdr.Name + sig.Algorithm = RSASHA256 + if err := sig.Sign(privkey.(*rsa.PrivateKey), []RR{t6}); err != nil { + t.Error(err) + t.Error("failure to sign the TYPE65534 record") + } + if err := sig.Verify(key, []RR{t6}); err != nil { + t.Error(err) + t.Error("failure to validate") + } else { + t.Logf("validated: %s", t6.Header().Name) + } +} + +func TestDnskey(t *testing.T) { + pubkey, err := ReadRR(strings.NewReader(` +miek.nl. IN DNSKEY 256 3 10 AwEAAZuMCu2FdugHkTrXYgl5qixvcDw1aDDlvL46/xJKbHBAHY16fNUb2b65cwko2Js/aJxUYJbZk5dwCDZxYfrfbZVtDPQuc3o8QaChVxC7/JYz2AHc9qHvqQ1j4VrH71RWINlQo6VYjzN/BGpMhOZoZOEwzp1HfsOE3lNYcoWU1smL ;{id = 5240 (zsk), size = 1024b} +`), "Kmiek.nl.+010+05240.key") + if err != nil { + t.Fatal(err) + } + privStr := `Private-key-format: v1.3 +Algorithm: 10 (RSASHA512) +Modulus: m4wK7YV26AeROtdiCXmqLG9wPDVoMOW8vjr/EkpscEAdjXp81RvZvrlzCSjYmz9onFRgltmTl3AINnFh+t9tlW0M9C5zejxBoKFXELv8ljPYAdz2oe+pDWPhWsfvVFYg2VCjpViPM38EakyE5mhk4TDOnUd+w4TeU1hyhZTWyYs= +PublicExponent: AQAB +PrivateExponent: UfCoIQ/Z38l8vB6SSqOI/feGjHEl/fxIPX4euKf0D/32k30fHbSaNFrFOuIFmWMB3LimWVEs6u3dpbB9CQeCVg7hwU5puG7OtuiZJgDAhNeOnxvo5btp4XzPZrJSxR4WNQnwIiYWbl0aFlL1VGgHC/3By89ENZyWaZcMLW4KGWE= +Prime1: yxwC6ogAu8aVcDx2wg1V0b5M5P6jP8qkRFVMxWNTw60Vkn+ECvw6YAZZBHZPaMyRYZLzPgUlyYRd0cjupy4+fQ== +Prime2: xA1bF8M0RTIQ6+A11AoVG6GIR/aPGg5sogRkIZ7ID/sF6g9HMVU/CM2TqVEBJLRPp73cv6ZeC3bcqOCqZhz+pw== +Exponent1: xzkblyZ96bGYxTVZm2/vHMOXswod4KWIyMoOepK6B/ZPcZoIT6omLCgtypWtwHLfqyCz3MK51Nc0G2EGzg8rFQ== +Exponent2: Pu5+mCEb7T5F+kFNZhQadHUklt0JUHbi3hsEvVoHpEGSw3BGDQrtIflDde0/rbWHgDPM4WQY+hscd8UuTXrvLw== +Coefficient: UuRoNqe7YHnKmQzE6iDWKTMIWTuoqqrFAmXPmKQnC+Y+BQzOVEHUo9bXdDnoI9hzXP1gf8zENMYwYLeWpuYlFQ== +` + privkey, err := pubkey.(*DNSKEY).ReadPrivateKey(strings.NewReader(privStr), + "Kmiek.nl.+010+05240.private") + if err != nil { + t.Fatal(err) + } + if pubkey.(*DNSKEY).PublicKey != "AwEAAZuMCu2FdugHkTrXYgl5qixvcDw1aDDlvL46/xJKbHBAHY16fNUb2b65cwko2Js/aJxUYJbZk5dwCDZxYfrfbZVtDPQuc3o8QaChVxC7/JYz2AHc9qHvqQ1j4VrH71RWINlQo6VYjzN/BGpMhOZoZOEwzp1HfsOE3lNYcoWU1smL" { + t.Error("pubkey is not what we've read") + } + if pubkey.(*DNSKEY).PrivateKeyString(privkey) != privStr { + t.Error("privkey is not what we've read") + t.Errorf("%v", pubkey.(*DNSKEY).PrivateKeyString(privkey)) + } +} + +func TestTag(t *testing.T) { + key := new(DNSKEY) + key.Hdr.Name = "miek.nl." + key.Hdr.Rrtype = TypeDNSKEY + key.Hdr.Class = ClassINET + key.Hdr.Ttl = 3600 + key.Flags = 256 + key.Protocol = 3 + key.Algorithm = RSASHA256 + key.PublicKey = "AwEAAcNEU67LJI5GEgF9QLNqLO1SMq1EdoQ6E9f85ha0k0ewQGCblyW2836GiVsm6k8Kr5ECIoMJ6fZWf3CQSQ9ycWfTyOHfmI3eQ/1Covhb2y4bAmL/07PhrL7ozWBW3wBfM335Ft9xjtXHPy7ztCbV9qZ4TVDTW/Iyg0PiwgoXVesz" + + tag := key.KeyTag() + if tag != 12051 { + t.Errorf("wrong key tag: %d for key %v", tag, key) + } +} + +func TestKeyRSA(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + key := new(DNSKEY) + key.Hdr.Name = "miek.nl." + key.Hdr.Rrtype = TypeDNSKEY + key.Hdr.Class = ClassINET + key.Hdr.Ttl = 3600 + key.Flags = 256 + key.Protocol = 3 + key.Algorithm = RSASHA256 + priv, _ := key.Generate(2048) + + soa := new(SOA) + soa.Hdr = RR_Header{"miek.nl.", TypeSOA, ClassINET, 14400, 0} + soa.Ns = "open.nlnetlabs.nl." + soa.Mbox = "miekg.atoom.net." + soa.Serial = 1293945905 + soa.Refresh = 14400 + soa.Retry = 3600 + soa.Expire = 604800 + soa.Minttl = 86400 + + sig := new(RRSIG) + sig.Hdr = RR_Header{"miek.nl.", TypeRRSIG, ClassINET, 14400, 0} + sig.TypeCovered = TypeSOA + sig.Algorithm = RSASHA256 + sig.Labels = 2 + sig.Expiration = 1296534305 // date -u '+%s' -d"2011-02-01 04:25:05" + sig.Inception = 1293942305 // date -u '+%s' -d"2011-01-02 04:25:05" + sig.OrigTtl = soa.Hdr.Ttl + sig.KeyTag = key.KeyTag() + sig.SignerName = key.Hdr.Name + + if err := sig.Sign(priv.(*rsa.PrivateKey), []RR{soa}); err != nil { + t.Error("failed to sign") + return + } + if err := sig.Verify(key, []RR{soa}); err != nil { + t.Error("failed to verify") + } +} + +func TestKeyToDS(t *testing.T) { + key := new(DNSKEY) + key.Hdr.Name = "miek.nl." + key.Hdr.Rrtype = TypeDNSKEY + key.Hdr.Class = ClassINET + key.Hdr.Ttl = 3600 + key.Flags = 256 + key.Protocol = 3 + key.Algorithm = RSASHA256 + key.PublicKey = "AwEAAcNEU67LJI5GEgF9QLNqLO1SMq1EdoQ6E9f85ha0k0ewQGCblyW2836GiVsm6k8Kr5ECIoMJ6fZWf3CQSQ9ycWfTyOHfmI3eQ/1Covhb2y4bAmL/07PhrL7ozWBW3wBfM335Ft9xjtXHPy7ztCbV9qZ4TVDTW/Iyg0PiwgoXVesz" + + ds := key.ToDS(SHA1) + if strings.ToUpper(ds.Digest) != "B5121BDB5B8D86D0CC5FFAFBAAABE26C3E20BAC1" { + t.Errorf("wrong DS digest for SHA1\n%v", ds) + } +} + +func TestSignRSA(t *testing.T) { + pub := "miek.nl. IN DNSKEY 256 3 5 AwEAAb+8lGNCxJgLS8rYVer6EnHVuIkQDghdjdtewDzU3G5R7PbMbKVRvH2Ma7pQyYceoaqWZQirSj72euPWfPxQnMy9ucCylA+FuH9cSjIcPf4PqJfdupHk9X6EBYjxrCLY4p1/yBwgyBIRJtZtAqM3ceAH2WovEJD6rTtOuHo5AluJ" + + priv := `Private-key-format: v1.3 +Algorithm: 5 (RSASHA1) +Modulus: v7yUY0LEmAtLythV6voScdW4iRAOCF2N217APNTcblHs9sxspVG8fYxrulDJhx6hqpZlCKtKPvZ649Z8/FCczL25wLKUD4W4f1xKMhw9/g+ol926keT1foQFiPGsItjinX/IHCDIEhEm1m0Cozdx4AfZai8QkPqtO064ejkCW4k= +PublicExponent: AQAB +PrivateExponent: YPwEmwjk5HuiROKU4xzHQ6l1hG8Iiha4cKRG3P5W2b66/EN/GUh07ZSf0UiYB67o257jUDVEgwCuPJz776zfApcCB4oGV+YDyEu7Hp/rL8KcSN0la0k2r9scKwxTp4BTJT23zyBFXsV/1wRDK1A5NxsHPDMYi2SoK63Enm/1ptk= +Prime1: /wjOG+fD0ybNoSRn7nQ79udGeR1b0YhUA5mNjDx/x2fxtIXzygYk0Rhx9QFfDy6LOBvz92gbNQlzCLz3DJt5hw== +Prime2: wHZsJ8OGhkp5p3mrJFZXMDc2mbYusDVTA+t+iRPdS797Tj0pjvU2HN4vTnTj8KBQp6hmnY7dLp9Y1qserySGbw== +Exponent1: N0A7FsSRIg+IAN8YPQqlawoTtG1t1OkJ+nWrurPootScApX6iMvn8fyvw3p2k51rv84efnzpWAYiC8SUaQDNxQ== +Exponent2: SvuYRaGyvo0zemE3oS+WRm2scxR8eiA8WJGeOc+obwOKCcBgeZblXzfdHGcEC1KaOcetOwNW/vwMA46lpLzJNw== +Coefficient: 8+7ZN/JgByqv0NfULiFKTjtyegUcijRuyij7yNxYbCBneDvZGxJwKNi4YYXWx743pcAj4Oi4Oh86gcmxLs+hGw== +Created: 20110302104537 +Publish: 20110302104537 +Activate: 20110302104537` + + xk, _ := NewRR(pub) + k := xk.(*DNSKEY) + p, err := k.NewPrivateKey(priv) + if err != nil { + t.Error(err) + } + switch priv := p.(type) { + case *rsa.PrivateKey: + if 65537 != priv.PublicKey.E { + t.Error("exponenent should be 65537") + } + default: + t.Errorf("we should have read an RSA key: %v", priv) + } + if k.KeyTag() != 37350 { + t.Errorf("keytag should be 37350, got %d %v", k.KeyTag(), k) + } + + soa := new(SOA) + soa.Hdr = RR_Header{"miek.nl.", TypeSOA, ClassINET, 14400, 0} + soa.Ns = "open.nlnetlabs.nl." + soa.Mbox = "miekg.atoom.net." + soa.Serial = 1293945905 + soa.Refresh = 14400 + soa.Retry = 3600 + soa.Expire = 604800 + soa.Minttl = 86400 + + sig := new(RRSIG) + sig.Hdr = RR_Header{"miek.nl.", TypeRRSIG, ClassINET, 14400, 0} + sig.Expiration = 1296534305 // date -u '+%s' -d"2011-02-01 04:25:05" + sig.Inception = 1293942305 // date -u '+%s' -d"2011-01-02 04:25:05" + sig.KeyTag = k.KeyTag() + sig.SignerName = k.Hdr.Name + sig.Algorithm = k.Algorithm + + sig.Sign(p.(*rsa.PrivateKey), []RR{soa}) + if sig.Signature != "D5zsobpQcmMmYsUMLxCVEtgAdCvTu8V/IEeP4EyLBjqPJmjt96bwM9kqihsccofA5LIJ7DN91qkCORjWSTwNhzCv7bMyr2o5vBZElrlpnRzlvsFIoAZCD9xg6ZY7ZyzUJmU6IcTwG4v3xEYajcpbJJiyaw/RqR90MuRdKPiBzSo=" { + t.Errorf("signature is not correct: %v", sig) + } +} + +func TestSignVerifyECDSA(t *testing.T) { + pub := `example.net. 3600 IN DNSKEY 257 3 14 ( + xKYaNhWdGOfJ+nPrL8/arkwf2EY3MDJ+SErKivBVSum1 + w/egsXvSADtNJhyem5RCOpgQ6K8X1DRSEkrbYQ+OB+v8 + /uX45NBwY8rp65F6Glur8I/mlVNgF6W/qTI37m40 )` + priv := `Private-key-format: v1.2 +Algorithm: 14 (ECDSAP384SHA384) +PrivateKey: WURgWHCcYIYUPWgeLmiPY2DJJk02vgrmTfitxgqcL4vwW7BOrbawVmVe0d9V94SR` + + eckey, err := NewRR(pub) + if err != nil { + t.Fatal(err) + } + privkey, err := eckey.(*DNSKEY).NewPrivateKey(priv) + if err != nil { + t.Fatal(err) + } + // TODO: Create separate test for this + ds := eckey.(*DNSKEY).ToDS(SHA384) + if ds.KeyTag != 10771 { + t.Fatal("wrong keytag on DS") + } + if ds.Digest != "72d7b62976ce06438e9c0bf319013cf801f09ecc84b8d7e9495f27e305c6a9b0563a9b5f4d288405c3008a946df983d6" { + t.Fatal("wrong DS Digest") + } + a, _ := NewRR("www.example.net. 3600 IN A 192.0.2.1") + sig := new(RRSIG) + sig.Hdr = RR_Header{"example.net.", TypeRRSIG, ClassINET, 14400, 0} + sig.Expiration, _ = StringToTime("20100909102025") + sig.Inception, _ = StringToTime("20100812102025") + sig.KeyTag = eckey.(*DNSKEY).KeyTag() + sig.SignerName = eckey.(*DNSKEY).Hdr.Name + sig.Algorithm = eckey.(*DNSKEY).Algorithm + + if sig.Sign(privkey.(*ecdsa.PrivateKey), []RR{a}) != nil { + t.Fatal("failure to sign the record") + } + + if err := sig.Verify(eckey.(*DNSKEY), []RR{a}); err != nil { + t.Fatalf("failure to validate:\n%s\n%s\n%s\n\n%s\n\n%v", + eckey.(*DNSKEY).String(), + a.String(), + sig.String(), + eckey.(*DNSKEY).PrivateKeyString(privkey), + err, + ) + } +} + +func TestSignVerifyECDSA2(t *testing.T) { + srv1, err := NewRR("srv.miek.nl. IN SRV 1000 800 0 web1.miek.nl.") + if err != nil { + t.Fatal(err) + } + srv := srv1.(*SRV) + + // With this key + key := new(DNSKEY) + key.Hdr.Rrtype = TypeDNSKEY + key.Hdr.Name = "miek.nl." + key.Hdr.Class = ClassINET + key.Hdr.Ttl = 14400 + key.Flags = 256 + key.Protocol = 3 + key.Algorithm = ECDSAP256SHA256 + privkey, err := key.Generate(256) + if err != nil { + t.Fatal("failure to generate key") + } + + // Fill in the values of the Sig, before signing + sig := new(RRSIG) + sig.Hdr = RR_Header{"miek.nl.", TypeRRSIG, ClassINET, 14400, 0} + sig.TypeCovered = srv.Hdr.Rrtype + sig.Labels = uint8(CountLabel(srv.Hdr.Name)) // works for all 3 + sig.OrigTtl = srv.Hdr.Ttl + sig.Expiration = 1296534305 // date -u '+%s' -d"2011-02-01 04:25:05" + sig.Inception = 1293942305 // date -u '+%s' -d"2011-01-02 04:25:05" + sig.KeyTag = key.KeyTag() // Get the keyfrom the Key + sig.SignerName = key.Hdr.Name + sig.Algorithm = ECDSAP256SHA256 + + if sig.Sign(privkey.(*ecdsa.PrivateKey), []RR{srv}) != nil { + t.Fatal("failure to sign the record") + } + + err = sig.Verify(key, []RR{srv}) + if err != nil { + t.Logf("failure to validate:\n%s\n%s\n%s\n\n%s\n\n%v", + key.String(), + srv.String(), + sig.String(), + key.PrivateKeyString(privkey), + err, + ) + } +} + +// Here the test vectors from the relevant RFCs are checked. +// rfc6605 6.1 +func TestRFC6605P256(t *testing.T) { + exDNSKEY := `example.net. 3600 IN DNSKEY 257 3 13 ( + GojIhhXUN/u4v54ZQqGSnyhWJwaubCvTmeexv7bR6edb + krSqQpF64cYbcB7wNcP+e+MAnLr+Wi9xMWyQLc8NAA== )` + exPriv := `Private-key-format: v1.2 +Algorithm: 13 (ECDSAP256SHA256) +PrivateKey: GU6SnQ/Ou+xC5RumuIUIuJZteXT2z0O/ok1s38Et6mQ=` + rrDNSKEY, err := NewRR(exDNSKEY) + if err != nil { + t.Fatal(err) + } + priv, err := rrDNSKEY.(*DNSKEY).NewPrivateKey(exPriv) + if err != nil { + t.Fatal(err) + } + + exDS := `example.net. 3600 IN DS 55648 13 2 ( + b4c8c1fe2e7477127b27115656ad6256f424625bf5c1 + e2770ce6d6e37df61d17 )` + rrDS, err := NewRR(exDS) + if err != nil { + t.Fatal(err) + } + ourDS := rrDNSKEY.(*DNSKEY).ToDS(SHA256) + if !reflect.DeepEqual(ourDS, rrDS.(*DS)) { + t.Errorf("DS record differs:\n%v\n%v", ourDS, rrDS.(*DS)) + } + + exA := `www.example.net. 3600 IN A 192.0.2.1` + exRRSIG := `www.example.net. 3600 IN RRSIG A 13 3 3600 ( + 20100909100439 20100812100439 55648 example.net. + qx6wLYqmh+l9oCKTN6qIc+bw6ya+KJ8oMz0YP107epXA + yGmt+3SNruPFKG7tZoLBLlUzGGus7ZwmwWep666VCw== )` + rrA, err := NewRR(exA) + if err != nil { + t.Fatal(err) + } + rrRRSIG, err := NewRR(exRRSIG) + if err != nil { + t.Fatal(err) + } + if err = rrRRSIG.(*RRSIG).Verify(rrDNSKEY.(*DNSKEY), []RR{rrA}); err != nil { + t.Errorf("failure to validate the spec RRSIG: %v", err) + } + + ourRRSIG := &RRSIG{ + Hdr: RR_Header{ + Ttl: rrA.Header().Ttl, + }, + KeyTag: rrDNSKEY.(*DNSKEY).KeyTag(), + SignerName: rrDNSKEY.(*DNSKEY).Hdr.Name, + Algorithm: rrDNSKEY.(*DNSKEY).Algorithm, + } + ourRRSIG.Expiration, _ = StringToTime("20100909100439") + ourRRSIG.Inception, _ = StringToTime("20100812100439") + err = ourRRSIG.Sign(priv.(*ecdsa.PrivateKey), []RR{rrA}) + if err != nil { + t.Fatal(err) + } + + if err = ourRRSIG.Verify(rrDNSKEY.(*DNSKEY), []RR{rrA}); err != nil { + t.Errorf("failure to validate our RRSIG: %v", err) + } + + // Signatures are randomized + rrRRSIG.(*RRSIG).Signature = "" + ourRRSIG.Signature = "" + if !reflect.DeepEqual(ourRRSIG, rrRRSIG.(*RRSIG)) { + t.Fatalf("RRSIG record differs:\n%v\n%v", ourRRSIG, rrRRSIG.(*RRSIG)) + } +} + +// rfc6605 6.2 +func TestRFC6605P384(t *testing.T) { + exDNSKEY := `example.net. 3600 IN DNSKEY 257 3 14 ( + xKYaNhWdGOfJ+nPrL8/arkwf2EY3MDJ+SErKivBVSum1 + w/egsXvSADtNJhyem5RCOpgQ6K8X1DRSEkrbYQ+OB+v8 + /uX45NBwY8rp65F6Glur8I/mlVNgF6W/qTI37m40 )` + exPriv := `Private-key-format: v1.2 +Algorithm: 14 (ECDSAP384SHA384) +PrivateKey: WURgWHCcYIYUPWgeLmiPY2DJJk02vgrmTfitxgqcL4vwW7BOrbawVmVe0d9V94SR` + rrDNSKEY, err := NewRR(exDNSKEY) + if err != nil { + t.Fatal(err) + } + priv, err := rrDNSKEY.(*DNSKEY).NewPrivateKey(exPriv) + if err != nil { + t.Fatal(err) + } + + exDS := `example.net. 3600 IN DS 10771 14 4 ( + 72d7b62976ce06438e9c0bf319013cf801f09ecc84b8 + d7e9495f27e305c6a9b0563a9b5f4d288405c3008a94 + 6df983d6 )` + rrDS, err := NewRR(exDS) + if err != nil { + t.Fatal(err) + } + ourDS := rrDNSKEY.(*DNSKEY).ToDS(SHA384) + if !reflect.DeepEqual(ourDS, rrDS.(*DS)) { + t.Fatalf("DS record differs:\n%v\n%v", ourDS, rrDS.(*DS)) + } + + exA := `www.example.net. 3600 IN A 192.0.2.1` + exRRSIG := `www.example.net. 3600 IN RRSIG A 14 3 3600 ( + 20100909102025 20100812102025 10771 example.net. + /L5hDKIvGDyI1fcARX3z65qrmPsVz73QD1Mr5CEqOiLP + 95hxQouuroGCeZOvzFaxsT8Glr74hbavRKayJNuydCuz + WTSSPdz7wnqXL5bdcJzusdnI0RSMROxxwGipWcJm )` + rrA, err := NewRR(exA) + if err != nil { + t.Fatal(err) + } + rrRRSIG, err := NewRR(exRRSIG) + if err != nil { + t.Fatal(err) + } + if err = rrRRSIG.(*RRSIG).Verify(rrDNSKEY.(*DNSKEY), []RR{rrA}); err != nil { + t.Errorf("failure to validate the spec RRSIG: %v", err) + } + + ourRRSIG := &RRSIG{ + Hdr: RR_Header{ + Ttl: rrA.Header().Ttl, + }, + KeyTag: rrDNSKEY.(*DNSKEY).KeyTag(), + SignerName: rrDNSKEY.(*DNSKEY).Hdr.Name, + Algorithm: rrDNSKEY.(*DNSKEY).Algorithm, + } + ourRRSIG.Expiration, _ = StringToTime("20100909102025") + ourRRSIG.Inception, _ = StringToTime("20100812102025") + err = ourRRSIG.Sign(priv.(*ecdsa.PrivateKey), []RR{rrA}) + if err != nil { + t.Fatal(err) + } + + if err = ourRRSIG.Verify(rrDNSKEY.(*DNSKEY), []RR{rrA}); err != nil { + t.Errorf("failure to validate our RRSIG: %v", err) + } + + // Signatures are randomized + rrRRSIG.(*RRSIG).Signature = "" + ourRRSIG.Signature = "" + if !reflect.DeepEqual(ourRRSIG, rrRRSIG.(*RRSIG)) { + t.Fatalf("RRSIG record differs:\n%v\n%v", ourRRSIG, rrRRSIG.(*RRSIG)) + } +} + +func TestInvalidRRSet(t *testing.T) { + goodRecords := make([]RR, 2) + goodRecords[0] = &TXT{Hdr: RR_Header{Name: "name.cloudflare.com.", Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello world"}} + goodRecords[1] = &TXT{Hdr: RR_Header{Name: "name.cloudflare.com.", Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"_o/"}} + + // Generate key + keyname := "cloudflare.com." + key := &DNSKEY{ + Hdr: RR_Header{Name: keyname, Rrtype: TypeDNSKEY, Class: ClassINET, Ttl: 0}, + Algorithm: ECDSAP256SHA256, + Flags: ZONE, + Protocol: 3, + } + privatekey, err := key.Generate(256) + if err != nil { + t.Fatal(err.Error()) + } + + // Need to fill in: Inception, Expiration, KeyTag, SignerName and Algorithm + curTime := time.Now() + signature := &RRSIG{ + Inception: uint32(curTime.Unix()), + Expiration: uint32(curTime.Add(time.Hour).Unix()), + KeyTag: key.KeyTag(), + SignerName: keyname, + Algorithm: ECDSAP256SHA256, + } + + // Inconsistent name between records + badRecords := make([]RR, 2) + badRecords[0] = &TXT{Hdr: RR_Header{Name: "name.cloudflare.com.", Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello world"}} + badRecords[1] = &TXT{Hdr: RR_Header{Name: "nama.cloudflare.com.", Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"_o/"}} + + if IsRRset(badRecords) { + t.Fatal("Record set with inconsistent names considered valid") + } + + badRecords[0] = &TXT{Hdr: RR_Header{Name: "name.cloudflare.com.", Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello world"}} + badRecords[1] = &A{Hdr: RR_Header{Name: "name.cloudflare.com.", Rrtype: TypeA, Class: ClassINET, Ttl: 0}} + + if IsRRset(badRecords) { + t.Fatal("Record set with inconsistent record types considered valid") + } + + badRecords[0] = &TXT{Hdr: RR_Header{Name: "name.cloudflare.com.", Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello world"}} + badRecords[1] = &TXT{Hdr: RR_Header{Name: "name.cloudflare.com.", Rrtype: TypeTXT, Class: ClassCHAOS, Ttl: 0}, Txt: []string{"_o/"}} + + if IsRRset(badRecords) { + t.Fatal("Record set with inconsistent record class considered valid") + } + + // Sign the good record set and then make sure verification fails on the bad record set + if err := signature.Sign(privatekey.(crypto.Signer), goodRecords); err != nil { + t.Fatal("Signing good records failed") + } + + if err := signature.Verify(key, badRecords); err != ErrRRset { + t.Fatal("Verification did not return ErrRRset with inconsistent records") + } +} diff --git a/vendor/github.com/miekg/dns/dnsutil/util.go b/vendor/github.com/miekg/dns/dnsutil/util.go new file mode 100644 index 0000000..9ed03f2 --- /dev/null +++ b/vendor/github.com/miekg/dns/dnsutil/util.go @@ -0,0 +1,79 @@ +// Package dnsutil contains higher-level methods useful with the dns +// package. While package dns implements the DNS protocols itself, +// these functions are related but not directly required for protocol +// processing. They are often useful in preparing input/output of the +// functions in package dns. +package dnsutil + +import ( + "strings" + + "github.com/miekg/dns" +) + +// AddDomain adds origin to s if s is not already a FQDN. +// Note that the result may not be a FQDN. If origin does not end +// with a ".", the result won't either. +// This implements the zonefile convention (specified in RFC 1035, +// Section "5.1. Format") that "@" represents the +// apex (bare) domain. i.e. AddOrigin("@", "foo.com.") returns "foo.com.". +func AddOrigin(s, origin string) string { + // ("foo.", "origin.") -> "foo." (already a FQDN) + // ("foo", "origin.") -> "foo.origin." + // ("foo"), "origin" -> "foo.origin" + // ("@", "origin.") -> "origin." (@ represents the apex (bare) domain) + // ("", "origin.") -> "origin." (not obvious) + // ("foo", "") -> "foo" (not obvious) + + if dns.IsFqdn(s) { + return s // s is already a FQDN, no need to mess with it. + } + if len(origin) == 0 { + return s // Nothing to append. + } + if s == "@" || len(s) == 0 { + return origin // Expand apex. + } + + if origin == "." { + return s + origin // AddOrigin(s, ".") is an expensive way to add a ".". + } + + return s + "." + origin // The simple case. +} + +// TrimDomainName trims origin from s if s is a subdomain. +// This function will never return "", but returns "@" instead (@ represents the apex (bare) domain). +func TrimDomainName(s, origin string) string { + // An apex (bare) domain is always returned as "@". + // If the return value ends in a ".", the domain was not the suffix. + // origin can end in "." or not. Either way the results should be the same. + + if len(s) == 0 { + return "@" // Return the apex (@) rather than "". + } + // Someone is using TrimDomainName(s, ".") to remove a dot if it exists. + if origin == "." { + return strings.TrimSuffix(s, origin) + } + + // Dude, you aren't even if the right subdomain! + if !dns.IsSubDomain(origin, s) { + return s + } + + slabels := dns.Split(s) + olabels := dns.Split(origin) + m := dns.CompareDomainName(s, origin) + if len(olabels) == m { + if len(olabels) == len(slabels) { + return "@" // origin == s + } + if (s[0] == '.') && (len(slabels) == (len(olabels) + 1)) { + return "@" // TrimDomainName(".foo.", "foo.") + } + } + + // Return the first (len-m) labels: + return s[:slabels[len(slabels)-m]-1] +} diff --git a/vendor/github.com/miekg/dns/dnsutil/util_test.go b/vendor/github.com/miekg/dns/dnsutil/util_test.go new file mode 100644 index 0000000..0f1ecec --- /dev/null +++ b/vendor/github.com/miekg/dns/dnsutil/util_test.go @@ -0,0 +1,130 @@ +package dnsutil + +import "testing" + +func TestAddOrigin(t *testing.T) { + var tests = []struct{ e1, e2, expected string }{ + {"@", "example.com", "example.com"}, + {"foo", "example.com", "foo.example.com"}, + {"foo.", "example.com", "foo."}, + {"@", "example.com.", "example.com."}, + {"foo", "example.com.", "foo.example.com."}, + {"foo.", "example.com.", "foo."}, + // Oddball tests: + // In general origin should not be "" or "." but at least + // these tests verify we don't crash and will keep results + // from changing unexpectedly. + {"*.", "", "*."}, + {"@", "", "@"}, + {"foobar", "", "foobar"}, + {"foobar.", "", "foobar."}, + {"*.", ".", "*."}, + {"@", ".", "."}, + {"foobar", ".", "foobar."}, + {"foobar.", ".", "foobar."}, + } + for _, test := range tests { + actual := AddOrigin(test.e1, test.e2) + if test.expected != actual { + t.Errorf("AddOrigin(%#v, %#v) expected %#v, go %#v\n", test.e1, test.e2, test.expected, actual) + } + } +} + +func TestTrimDomainName(t *testing.T) { + + // Basic tests. + // Try trimming "example.com" and "example.com." from typical use cases. + var tests_examplecom = []struct{ experiment, expected string }{ + {"foo.example.com", "foo"}, + {"foo.example.com.", "foo"}, + {".foo.example.com", ".foo"}, + {".foo.example.com.", ".foo"}, + {"*.example.com", "*"}, + {"example.com", "@"}, + {"example.com.", "@"}, + {"com.", "com."}, + {"foo.", "foo."}, + {"serverfault.com.", "serverfault.com."}, + {"serverfault.com", "serverfault.com"}, + {".foo.ronco.com", ".foo.ronco.com"}, + {".foo.ronco.com.", ".foo.ronco.com."}, + } + for _, dom := range []string{"example.com", "example.com."} { + for i, test := range tests_examplecom { + actual := TrimDomainName(test.experiment, dom) + if test.expected != actual { + t.Errorf("%d TrimDomainName(%#v, %#v): expected (%v) got (%v)\n", i, test.experiment, dom, test.expected, actual) + } + } + } + + // Paranoid tests. + // These test shouldn't be needed but I was weary of off-by-one errors. + // In theory, these can't happen because there are no single-letter TLDs, + // but it is good to exercize the code this way. + var tests = []struct{ experiment, expected string }{ + {"", "@"}, + {".", "."}, + {"a.b.c.d.e.f.", "a.b.c.d.e"}, + {"b.c.d.e.f.", "b.c.d.e"}, + {"c.d.e.f.", "c.d.e"}, + {"d.e.f.", "d.e"}, + {"e.f.", "e"}, + {"f.", "@"}, + {".a.b.c.d.e.f.", ".a.b.c.d.e"}, + {".b.c.d.e.f.", ".b.c.d.e"}, + {".c.d.e.f.", ".c.d.e"}, + {".d.e.f.", ".d.e"}, + {".e.f.", ".e"}, + {".f.", "@"}, + {"a.b.c.d.e.f", "a.b.c.d.e"}, + {"a.b.c.d.e.", "a.b.c.d.e."}, + {"a.b.c.d.e", "a.b.c.d.e"}, + {"a.b.c.d.", "a.b.c.d."}, + {"a.b.c.d", "a.b.c.d"}, + {"a.b.c.", "a.b.c."}, + {"a.b.c", "a.b.c"}, + {"a.b.", "a.b."}, + {"a.b", "a.b"}, + {"a.", "a."}, + {"a", "a"}, + {".a.b.c.d.e.f", ".a.b.c.d.e"}, + {".a.b.c.d.e.", ".a.b.c.d.e."}, + {".a.b.c.d.e", ".a.b.c.d.e"}, + {".a.b.c.d.", ".a.b.c.d."}, + {".a.b.c.d", ".a.b.c.d"}, + {".a.b.c.", ".a.b.c."}, + {".a.b.c", ".a.b.c"}, + {".a.b.", ".a.b."}, + {".a.b", ".a.b"}, + {".a.", ".a."}, + {".a", ".a"}, + } + for _, dom := range []string{"f", "f."} { + for i, test := range tests { + actual := TrimDomainName(test.experiment, dom) + if test.expected != actual { + t.Errorf("%d TrimDomainName(%#v, %#v): expected (%v) got (%v)\n", i, test.experiment, dom, test.expected, actual) + } + } + } + + // Test cases for bugs found in the wild. + // These test cases provide both origin, s, and the expected result. + // If you find a bug in the while, this is probably the easiest place + // to add it as a test case. + var tests_wild = []struct{ e1, e2, expected string }{ + {"mathoverflow.net.", ".", "mathoverflow.net"}, + {"mathoverflow.net", ".", "mathoverflow.net"}, + {"", ".", "@"}, + {"@", ".", "@"}, + } + for i, test := range tests_wild { + actual := TrimDomainName(test.e1, test.e2) + if test.expected != actual { + t.Errorf("%d TrimDomainName(%#v, %#v): expected (%v) got (%v)\n", i, test.e1, test.e2, test.expected, actual) + } + } + +} diff --git a/vendor/github.com/miekg/dns/doc.go b/vendor/github.com/miekg/dns/doc.go new file mode 100644 index 0000000..e38753d --- /dev/null +++ b/vendor/github.com/miekg/dns/doc.go @@ -0,0 +1,251 @@ +/* +Package dns implements a full featured interface to the Domain Name System. +Server- and client-side programming is supported. +The package allows complete control over what is send out to the DNS. The package +API follows the less-is-more principle, by presenting a small, clean interface. + +The package dns supports (asynchronous) querying/replying, incoming/outgoing zone transfers, +TSIG, EDNS0, dynamic updates, notifies and DNSSEC validation/signing. +Note that domain names MUST be fully qualified, before sending them, unqualified +names in a message will result in a packing failure. + +Resource records are native types. They are not stored in wire format. +Basic usage pattern for creating a new resource record: + + r := new(dns.MX) + r.Hdr = dns.RR_Header{Name: "miek.nl.", Rrtype: dns.TypeMX, + Class: dns.ClassINET, Ttl: 3600} + r.Preference = 10 + r.Mx = "mx.miek.nl." + +Or directly from a string: + + mx, err := dns.NewRR("miek.nl. 3600 IN MX 10 mx.miek.nl.") + +Or when the default TTL (3600) and class (IN) suit you: + + mx, err := dns.NewRR("miek.nl. MX 10 mx.miek.nl.") + +Or even: + + mx, err := dns.NewRR("$ORIGIN nl.\nmiek 1H IN MX 10 mx.miek") + +In the DNS messages are exchanged, these messages contain resource +records (sets). Use pattern for creating a message: + + m := new(dns.Msg) + m.SetQuestion("miek.nl.", dns.TypeMX) + +Or when not certain if the domain name is fully qualified: + + m.SetQuestion(dns.Fqdn("miek.nl"), dns.TypeMX) + +The message m is now a message with the question section set to ask +the MX records for the miek.nl. zone. + +The following is slightly more verbose, but more flexible: + + m1 := new(dns.Msg) + m1.Id = dns.Id() + m1.RecursionDesired = true + m1.Question = make([]dns.Question, 1) + m1.Question[0] = dns.Question{"miek.nl.", dns.TypeMX, dns.ClassINET} + +After creating a message it can be send. +Basic use pattern for synchronous querying the DNS at a +server configured on 127.0.0.1 and port 53: + + c := new(dns.Client) + in, rtt, err := c.Exchange(m1, "127.0.0.1:53") + +Suppressing multiple outstanding queries (with the same question, type and +class) is as easy as setting: + + c.SingleInflight = true + +If these "advanced" features are not needed, a simple UDP query can be send, +with: + + in, err := dns.Exchange(m1, "127.0.0.1:53") + +When this functions returns you will get dns message. A dns message consists +out of four sections. +The question section: in.Question, the answer section: in.Answer, +the authority section: in.Ns and the additional section: in.Extra. + +Each of these sections (except the Question section) contain a []RR. Basic +use pattern for accessing the rdata of a TXT RR as the first RR in +the Answer section: + + if t, ok := in.Answer[0].(*dns.TXT); ok { + // do something with t.Txt + } + +Domain Name and TXT Character String Representations + +Both domain names and TXT character strings are converted to presentation +form both when unpacked and when converted to strings. + +For TXT character strings, tabs, carriage returns and line feeds will be +converted to \t, \r and \n respectively. Back slashes and quotations marks +will be escaped. Bytes below 32 and above 127 will be converted to \DDD +form. + +For domain names, in addition to the above rules brackets, periods, +spaces, semicolons and the at symbol are escaped. + +DNSSEC + +DNSSEC (DNS Security Extension) adds a layer of security to the DNS. It +uses public key cryptography to sign resource records. The +public keys are stored in DNSKEY records and the signatures in RRSIG records. + +Requesting DNSSEC information for a zone is done by adding the DO (DNSSEC OK) bit +to a request. + + m := new(dns.Msg) + m.SetEdns0(4096, true) + +Signature generation, signature verification and key generation are all supported. + +DYNAMIC UPDATES + +Dynamic updates reuses the DNS message format, but renames three of +the sections. Question is Zone, Answer is Prerequisite, Authority is +Update, only the Additional is not renamed. See RFC 2136 for the gory details. + +You can set a rather complex set of rules for the existence of absence of +certain resource records or names in a zone to specify if resource records +should be added or removed. The table from RFC 2136 supplemented with the Go +DNS function shows which functions exist to specify the prerequisites. + + 3.2.4 - Table Of Metavalues Used In Prerequisite Section + + CLASS TYPE RDATA Meaning Function + -------------------------------------------------------------- + ANY ANY empty Name is in use dns.NameUsed + ANY rrset empty RRset exists (value indep) dns.RRsetUsed + NONE ANY empty Name is not in use dns.NameNotUsed + NONE rrset empty RRset does not exist dns.RRsetNotUsed + zone rrset rr RRset exists (value dep) dns.Used + +The prerequisite section can also be left empty. +If you have decided on the prerequisites you can tell what RRs should +be added or deleted. The next table shows the options you have and +what functions to call. + + 3.4.2.6 - Table Of Metavalues Used In Update Section + + CLASS TYPE RDATA Meaning Function + --------------------------------------------------------------- + ANY ANY empty Delete all RRsets from name dns.RemoveName + ANY rrset empty Delete an RRset dns.RemoveRRset + NONE rrset rr Delete an RR from RRset dns.Remove + zone rrset rr Add to an RRset dns.Insert + +TRANSACTION SIGNATURE + +An TSIG or transaction signature adds a HMAC TSIG record to each message sent. +The supported algorithms include: HmacMD5, HmacSHA1, HmacSHA256 and HmacSHA512. + +Basic use pattern when querying with a TSIG name "axfr." (note that these key names +must be fully qualified - as they are domain names) and the base64 secret +"so6ZGir4GPAqINNh9U5c3A==": + + c := new(dns.Client) + c.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="} + m := new(dns.Msg) + m.SetQuestion("miek.nl.", dns.TypeMX) + m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix()) + ... + // When sending the TSIG RR is calculated and filled in before sending + +When requesting an zone transfer (almost all TSIG usage is when requesting zone transfers), with +TSIG, this is the basic use pattern. In this example we request an AXFR for +miek.nl. with TSIG key named "axfr." and secret "so6ZGir4GPAqINNh9U5c3A==" +and using the server 176.58.119.54: + + t := new(dns.Transfer) + m := new(dns.Msg) + t.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="} + m.SetAxfr("miek.nl.") + m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix()) + c, err := t.In(m, "176.58.119.54:53") + for r := range c { ... } + +You can now read the records from the transfer as they come in. Each envelope is checked with TSIG. +If something is not correct an error is returned. + +Basic use pattern validating and replying to a message that has TSIG set. + + server := &dns.Server{Addr: ":53", Net: "udp"} + server.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="} + go server.ListenAndServe() + dns.HandleFunc(".", handleRequest) + + func handleRequest(w dns.ResponseWriter, r *dns.Msg) { + m := new(dns.Msg) + m.SetReply(r) + if r.IsTsig() != nil { + if w.TsigStatus() == nil { + // *Msg r has an TSIG record and it was validated + m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix()) + } else { + // *Msg r has an TSIG records and it was not valided + } + } + w.WriteMsg(m) + } + +PRIVATE RRS + +RFC 6895 sets aside a range of type codes for private use. This range +is 65,280 - 65,534 (0xFF00 - 0xFFFE). When experimenting with new Resource Records these +can be used, before requesting an official type code from IANA. + +see http://miek.nl/2014/September/21/idn-and-private-rr-in-go-dns/ for more +information. + +EDNS0 + +EDNS0 is an extension mechanism for the DNS defined in RFC 2671 and updated +by RFC 6891. It defines an new RR type, the OPT RR, which is then completely +abused. +Basic use pattern for creating an (empty) OPT RR: + + o := new(dns.OPT) + o.Hdr.Name = "." // MUST be the root zone, per definition. + o.Hdr.Rrtype = dns.TypeOPT + +The rdata of an OPT RR consists out of a slice of EDNS0 (RFC 6891) +interfaces. Currently only a few have been standardized: EDNS0_NSID +(RFC 5001) and EDNS0_SUBNET (draft-vandergaast-edns-client-subnet-02). Note +that these options may be combined in an OPT RR. +Basic use pattern for a server to check if (and which) options are set: + + // o is a dns.OPT + for _, s := range o.Option { + switch e := s.(type) { + case *dns.EDNS0_NSID: + // do stuff with e.Nsid + case *dns.EDNS0_SUBNET: + // access e.Family, e.Address, etc. + } + } + +SIG(0) + +From RFC 2931: + + SIG(0) provides protection for DNS transactions and requests .... + ... protection for glue records, DNS requests, protection for message headers + on requests and responses, and protection of the overall integrity of a response. + +It works like TSIG, except that SIG(0) uses public key cryptography, instead of the shared +secret approach in TSIG. +Supported algorithms: DSA, ECDSAP256SHA256, ECDSAP384SHA384, RSASHA1, RSASHA256 and +RSASHA512. + +Signing subsequent messages in multi-message sessions is not implemented. +*/ +package dns diff --git a/vendor/github.com/miekg/dns/dyn_test.go b/vendor/github.com/miekg/dns/dyn_test.go new file mode 100644 index 0000000..09986a5 --- /dev/null +++ b/vendor/github.com/miekg/dns/dyn_test.go @@ -0,0 +1,3 @@ +package dns + +// Find better solution diff --git a/vendor/github.com/miekg/dns/edns.go b/vendor/github.com/miekg/dns/edns.go new file mode 100644 index 0000000..dbff371 --- /dev/null +++ b/vendor/github.com/miekg/dns/edns.go @@ -0,0 +1,597 @@ +package dns + +import ( + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "net" + "strconv" +) + +// EDNS0 Option codes. +const ( + EDNS0LLQ = 0x1 // long lived queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01 + EDNS0UL = 0x2 // update lease draft: http://files.dns-sd.org/draft-sekar-dns-ul.txt + EDNS0NSID = 0x3 // nsid (RFC5001) + EDNS0DAU = 0x5 // DNSSEC Algorithm Understood + EDNS0DHU = 0x6 // DS Hash Understood + EDNS0N3U = 0x7 // NSEC3 Hash Understood + EDNS0SUBNET = 0x8 // client-subnet (RFC6891) + EDNS0EXPIRE = 0x9 // EDNS0 expire + EDNS0COOKIE = 0xa // EDNS0 Cookie + EDNS0TCPKEEPALIVE = 0xb // EDNS0 tcp keep alive (RFC7828) + EDNS0SUBNETDRAFT = 0x50fa // Don't use! Use EDNS0SUBNET + EDNS0LOCALSTART = 0xFDE9 // Beginning of range reserved for local/experimental use (RFC6891) + EDNS0LOCALEND = 0xFFFE // End of range reserved for local/experimental use (RFC6891) + _DO = 1 << 15 // dnssec ok +) + +// OPT is the EDNS0 RR appended to messages to convey extra (meta) information. +// See RFC 6891. +type OPT struct { + Hdr RR_Header + Option []EDNS0 `dns:"opt"` +} + +func (rr *OPT) String() string { + s := "\n;; OPT PSEUDOSECTION:\n; EDNS: version " + strconv.Itoa(int(rr.Version())) + "; " + if rr.Do() { + s += "flags: do; " + } else { + s += "flags: ; " + } + s += "udp: " + strconv.Itoa(int(rr.UDPSize())) + + for _, o := range rr.Option { + switch o.(type) { + case *EDNS0_NSID: + s += "\n; NSID: " + o.String() + h, e := o.pack() + var r string + if e == nil { + for _, c := range h { + r += "(" + string(c) + ")" + } + s += " " + r + } + case *EDNS0_SUBNET: + s += "\n; SUBNET: " + o.String() + if o.(*EDNS0_SUBNET).DraftOption { + s += " (draft)" + } + case *EDNS0_COOKIE: + s += "\n; COOKIE: " + o.String() + case *EDNS0_UL: + s += "\n; UPDATE LEASE: " + o.String() + case *EDNS0_LLQ: + s += "\n; LONG LIVED QUERIES: " + o.String() + case *EDNS0_DAU: + s += "\n; DNSSEC ALGORITHM UNDERSTOOD: " + o.String() + case *EDNS0_DHU: + s += "\n; DS HASH UNDERSTOOD: " + o.String() + case *EDNS0_N3U: + s += "\n; NSEC3 HASH UNDERSTOOD: " + o.String() + case *EDNS0_LOCAL: + s += "\n; LOCAL OPT: " + o.String() + } + } + return s +} + +func (rr *OPT) len() int { + l := rr.Hdr.len() + for i := 0; i < len(rr.Option); i++ { + l += 4 // Account for 2-byte option code and 2-byte option length. + lo, _ := rr.Option[i].pack() + l += len(lo) + } + return l +} + +// return the old value -> delete SetVersion? + +// Version returns the EDNS version used. Only zero is defined. +func (rr *OPT) Version() uint8 { + return uint8((rr.Hdr.Ttl & 0x00FF0000) >> 16) +} + +// SetVersion sets the version of EDNS. This is usually zero. +func (rr *OPT) SetVersion(v uint8) { + rr.Hdr.Ttl = rr.Hdr.Ttl&0xFF00FFFF | (uint32(v) << 16) +} + +// ExtendedRcode returns the EDNS extended RCODE field (the upper 8 bits of the TTL). +func (rr *OPT) ExtendedRcode() int { + return int((rr.Hdr.Ttl&0xFF000000)>>24) + 15 +} + +// SetExtendedRcode sets the EDNS extended RCODE field. +func (rr *OPT) SetExtendedRcode(v uint8) { + if v < RcodeBadVers { // Smaller than 16.. Use the 4 bits you have! + return + } + rr.Hdr.Ttl = rr.Hdr.Ttl&0x00FFFFFF | (uint32(v-15) << 24) +} + +// UDPSize returns the UDP buffer size. +func (rr *OPT) UDPSize() uint16 { + return rr.Hdr.Class +} + +// SetUDPSize sets the UDP buffer size. +func (rr *OPT) SetUDPSize(size uint16) { + rr.Hdr.Class = size +} + +// Do returns the value of the DO (DNSSEC OK) bit. +func (rr *OPT) Do() bool { + return rr.Hdr.Ttl&_DO == _DO +} + +// SetDo sets the DO (DNSSEC OK) bit. +// If we pass an argument, set the DO bit to that value. +// It is possible to pass 2 or more arguments. Any arguments after the 1st is silently ignored. +func (rr *OPT) SetDo(do ...bool) { + if len(do) == 1 { + if do[0] { + rr.Hdr.Ttl |= _DO + } else { + rr.Hdr.Ttl &^= _DO + } + } else { + rr.Hdr.Ttl |= _DO + } +} + +// EDNS0 defines an EDNS0 Option. An OPT RR can have multiple options appended to it. +type EDNS0 interface { + // Option returns the option code for the option. + Option() uint16 + // pack returns the bytes of the option data. + pack() ([]byte, error) + // unpack sets the data as found in the buffer. Is also sets + // the length of the slice as the length of the option data. + unpack([]byte) error + // String returns the string representation of the option. + String() string +} + +// EDNS0_NSID option is used to retrieve a nameserver +// identifier. When sending a request Nsid must be set to the empty string +// The identifier is an opaque string encoded as hex. +// Basic use pattern for creating an nsid option: +// +// o := new(dns.OPT) +// o.Hdr.Name = "." +// o.Hdr.Rrtype = dns.TypeOPT +// e := new(dns.EDNS0_NSID) +// e.Code = dns.EDNS0NSID +// e.Nsid = "AA" +// o.Option = append(o.Option, e) +type EDNS0_NSID struct { + Code uint16 // Always EDNS0NSID + Nsid string // This string needs to be hex encoded +} + +func (e *EDNS0_NSID) pack() ([]byte, error) { + h, err := hex.DecodeString(e.Nsid) + if err != nil { + return nil, err + } + return h, nil +} + +func (e *EDNS0_NSID) Option() uint16 { return EDNS0NSID } +func (e *EDNS0_NSID) unpack(b []byte) error { e.Nsid = hex.EncodeToString(b); return nil } +func (e *EDNS0_NSID) String() string { return string(e.Nsid) } + +// EDNS0_SUBNET is the subnet option that is used to give the remote nameserver +// an idea of where the client lives. It can then give back a different +// answer depending on the location or network topology. +// Basic use pattern for creating an subnet option: +// +// o := new(dns.OPT) +// o.Hdr.Name = "." +// o.Hdr.Rrtype = dns.TypeOPT +// e := new(dns.EDNS0_SUBNET) +// e.Code = dns.EDNS0SUBNET +// e.Family = 1 // 1 for IPv4 source address, 2 for IPv6 +// e.SourceNetmask = 32 // 32 for IPV4, 128 for IPv6 +// e.SourceScope = 0 +// e.Address = net.ParseIP("127.0.0.1").To4() // for IPv4 +// // e.Address = net.ParseIP("2001:7b8:32a::2") // for IPV6 +// o.Option = append(o.Option, e) +// +// Note: the spec (draft-ietf-dnsop-edns-client-subnet-00) has some insane logic +// for which netmask applies to the address. This code will parse all the +// available bits when unpacking (up to optlen). When packing it will apply +// SourceNetmask. If you need more advanced logic, patches welcome and good luck. +type EDNS0_SUBNET struct { + Code uint16 // Always EDNS0SUBNET + Family uint16 // 1 for IP, 2 for IP6 + SourceNetmask uint8 + SourceScope uint8 + Address net.IP + DraftOption bool // Set to true if using the old (0x50fa) option code +} + +func (e *EDNS0_SUBNET) Option() uint16 { + if e.DraftOption { + return EDNS0SUBNETDRAFT + } + return EDNS0SUBNET +} + +func (e *EDNS0_SUBNET) pack() ([]byte, error) { + b := make([]byte, 4) + binary.BigEndian.PutUint16(b[0:], e.Family) + b[2] = e.SourceNetmask + b[3] = e.SourceScope + switch e.Family { + case 1: + if e.SourceNetmask > net.IPv4len*8 { + return nil, errors.New("dns: bad netmask") + } + if len(e.Address.To4()) != net.IPv4len { + return nil, errors.New("dns: bad address") + } + ip := e.Address.To4().Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv4len*8)) + needLength := (e.SourceNetmask + 8 - 1) / 8 // division rounding up + b = append(b, ip[:needLength]...) + case 2: + if e.SourceNetmask > net.IPv6len*8 { + return nil, errors.New("dns: bad netmask") + } + if len(e.Address) != net.IPv6len { + return nil, errors.New("dns: bad address") + } + ip := e.Address.Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv6len*8)) + needLength := (e.SourceNetmask + 8 - 1) / 8 // division rounding up + b = append(b, ip[:needLength]...) + default: + return nil, errors.New("dns: bad address family") + } + return b, nil +} + +func (e *EDNS0_SUBNET) unpack(b []byte) error { + if len(b) < 4 { + return ErrBuf + } + e.Family = binary.BigEndian.Uint16(b) + e.SourceNetmask = b[2] + e.SourceScope = b[3] + switch e.Family { + case 1: + if e.SourceNetmask > net.IPv4len*8 || e.SourceScope > net.IPv4len*8 { + return errors.New("dns: bad netmask") + } + addr := make([]byte, net.IPv4len) + for i := 0; i < net.IPv4len && 4+i < len(b); i++ { + addr[i] = b[4+i] + } + e.Address = net.IPv4(addr[0], addr[1], addr[2], addr[3]) + case 2: + if e.SourceNetmask > net.IPv6len*8 || e.SourceScope > net.IPv6len*8 { + return errors.New("dns: bad netmask") + } + addr := make([]byte, net.IPv6len) + for i := 0; i < net.IPv6len && 4+i < len(b); i++ { + addr[i] = b[4+i] + } + e.Address = net.IP{addr[0], addr[1], addr[2], addr[3], addr[4], + addr[5], addr[6], addr[7], addr[8], addr[9], addr[10], + addr[11], addr[12], addr[13], addr[14], addr[15]} + default: + return errors.New("dns: bad address family") + } + return nil +} + +func (e *EDNS0_SUBNET) String() (s string) { + if e.Address == nil { + s = "<nil>" + } else if e.Address.To4() != nil { + s = e.Address.String() + } else { + s = "[" + e.Address.String() + "]" + } + s += "/" + strconv.Itoa(int(e.SourceNetmask)) + "/" + strconv.Itoa(int(e.SourceScope)) + return +} + +// The EDNS0_COOKIE option is used to add a DNS Cookie to a message. +// +// o := new(dns.OPT) +// o.Hdr.Name = "." +// o.Hdr.Rrtype = dns.TypeOPT +// e := new(dns.EDNS0_COOKIE) +// e.Code = dns.EDNS0COOKIE +// e.Cookie = "24a5ac.." +// o.Option = append(o.Option, e) +// +// The Cookie field consists out of a client cookie (RFC 7873 Section 4), that is +// always 8 bytes. It may then optionally be followed by the server cookie. The server +// cookie is of variable length, 8 to a maximum of 32 bytes. In other words: +// +// cCookie := o.Cookie[:16] +// sCookie := o.Cookie[16:] +// +// There is no guarantee that the Cookie string has a specific length. +type EDNS0_COOKIE struct { + Code uint16 // Always EDNS0COOKIE + Cookie string // Hex-encoded cookie data +} + +func (e *EDNS0_COOKIE) pack() ([]byte, error) { + h, err := hex.DecodeString(e.Cookie) + if err != nil { + return nil, err + } + return h, nil +} + +func (e *EDNS0_COOKIE) Option() uint16 { return EDNS0COOKIE } +func (e *EDNS0_COOKIE) unpack(b []byte) error { e.Cookie = hex.EncodeToString(b); return nil } +func (e *EDNS0_COOKIE) String() string { return e.Cookie } + +// The EDNS0_UL (Update Lease) (draft RFC) option is used to tell the server to set +// an expiration on an update RR. This is helpful for clients that cannot clean +// up after themselves. This is a draft RFC and more information can be found at +// http://files.dns-sd.org/draft-sekar-dns-ul.txt +// +// o := new(dns.OPT) +// o.Hdr.Name = "." +// o.Hdr.Rrtype = dns.TypeOPT +// e := new(dns.EDNS0_UL) +// e.Code = dns.EDNS0UL +// e.Lease = 120 // in seconds +// o.Option = append(o.Option, e) +type EDNS0_UL struct { + Code uint16 // Always EDNS0UL + Lease uint32 +} + +func (e *EDNS0_UL) Option() uint16 { return EDNS0UL } +func (e *EDNS0_UL) String() string { return strconv.FormatUint(uint64(e.Lease), 10) } + +// Copied: http://golang.org/src/pkg/net/dnsmsg.go +func (e *EDNS0_UL) pack() ([]byte, error) { + b := make([]byte, 4) + binary.BigEndian.PutUint32(b, e.Lease) + return b, nil +} + +func (e *EDNS0_UL) unpack(b []byte) error { + if len(b) < 4 { + return ErrBuf + } + e.Lease = binary.BigEndian.Uint32(b) + return nil +} + +// EDNS0_LLQ stands for Long Lived Queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01 +// Implemented for completeness, as the EDNS0 type code is assigned. +type EDNS0_LLQ struct { + Code uint16 // Always EDNS0LLQ + Version uint16 + Opcode uint16 + Error uint16 + Id uint64 + LeaseLife uint32 +} + +func (e *EDNS0_LLQ) Option() uint16 { return EDNS0LLQ } + +func (e *EDNS0_LLQ) pack() ([]byte, error) { + b := make([]byte, 18) + binary.BigEndian.PutUint16(b[0:], e.Version) + binary.BigEndian.PutUint16(b[2:], e.Opcode) + binary.BigEndian.PutUint16(b[4:], e.Error) + binary.BigEndian.PutUint64(b[6:], e.Id) + binary.BigEndian.PutUint32(b[14:], e.LeaseLife) + return b, nil +} + +func (e *EDNS0_LLQ) unpack(b []byte) error { + if len(b) < 18 { + return ErrBuf + } + e.Version = binary.BigEndian.Uint16(b[0:]) + e.Opcode = binary.BigEndian.Uint16(b[2:]) + e.Error = binary.BigEndian.Uint16(b[4:]) + e.Id = binary.BigEndian.Uint64(b[6:]) + e.LeaseLife = binary.BigEndian.Uint32(b[14:]) + return nil +} + +func (e *EDNS0_LLQ) String() string { + s := strconv.FormatUint(uint64(e.Version), 10) + " " + strconv.FormatUint(uint64(e.Opcode), 10) + + " " + strconv.FormatUint(uint64(e.Error), 10) + " " + strconv.FormatUint(uint64(e.Id), 10) + + " " + strconv.FormatUint(uint64(e.LeaseLife), 10) + return s +} + +type EDNS0_DAU struct { + Code uint16 // Always EDNS0DAU + AlgCode []uint8 +} + +func (e *EDNS0_DAU) Option() uint16 { return EDNS0DAU } +func (e *EDNS0_DAU) pack() ([]byte, error) { return e.AlgCode, nil } +func (e *EDNS0_DAU) unpack(b []byte) error { e.AlgCode = b; return nil } + +func (e *EDNS0_DAU) String() string { + s := "" + for i := 0; i < len(e.AlgCode); i++ { + if a, ok := AlgorithmToString[e.AlgCode[i]]; ok { + s += " " + a + } else { + s += " " + strconv.Itoa(int(e.AlgCode[i])) + } + } + return s +} + +type EDNS0_DHU struct { + Code uint16 // Always EDNS0DHU + AlgCode []uint8 +} + +func (e *EDNS0_DHU) Option() uint16 { return EDNS0DHU } +func (e *EDNS0_DHU) pack() ([]byte, error) { return e.AlgCode, nil } +func (e *EDNS0_DHU) unpack(b []byte) error { e.AlgCode = b; return nil } + +func (e *EDNS0_DHU) String() string { + s := "" + for i := 0; i < len(e.AlgCode); i++ { + if a, ok := HashToString[e.AlgCode[i]]; ok { + s += " " + a + } else { + s += " " + strconv.Itoa(int(e.AlgCode[i])) + } + } + return s +} + +type EDNS0_N3U struct { + Code uint16 // Always EDNS0N3U + AlgCode []uint8 +} + +func (e *EDNS0_N3U) Option() uint16 { return EDNS0N3U } +func (e *EDNS0_N3U) pack() ([]byte, error) { return e.AlgCode, nil } +func (e *EDNS0_N3U) unpack(b []byte) error { e.AlgCode = b; return nil } + +func (e *EDNS0_N3U) String() string { + // Re-use the hash map + s := "" + for i := 0; i < len(e.AlgCode); i++ { + if a, ok := HashToString[e.AlgCode[i]]; ok { + s += " " + a + } else { + s += " " + strconv.Itoa(int(e.AlgCode[i])) + } + } + return s +} + +type EDNS0_EXPIRE struct { + Code uint16 // Always EDNS0EXPIRE + Expire uint32 +} + +func (e *EDNS0_EXPIRE) Option() uint16 { return EDNS0EXPIRE } +func (e *EDNS0_EXPIRE) String() string { return strconv.FormatUint(uint64(e.Expire), 10) } + +func (e *EDNS0_EXPIRE) pack() ([]byte, error) { + b := make([]byte, 4) + b[0] = byte(e.Expire >> 24) + b[1] = byte(e.Expire >> 16) + b[2] = byte(e.Expire >> 8) + b[3] = byte(e.Expire) + return b, nil +} + +func (e *EDNS0_EXPIRE) unpack(b []byte) error { + if len(b) < 4 { + return ErrBuf + } + e.Expire = binary.BigEndian.Uint32(b) + return nil +} + +// The EDNS0_LOCAL option is used for local/experimental purposes. The option +// code is recommended to be within the range [EDNS0LOCALSTART, EDNS0LOCALEND] +// (RFC6891), although any unassigned code can actually be used. The content of +// the option is made available in Data, unaltered. +// Basic use pattern for creating a local option: +// +// o := new(dns.OPT) +// o.Hdr.Name = "." +// o.Hdr.Rrtype = dns.TypeOPT +// e := new(dns.EDNS0_LOCAL) +// e.Code = dns.EDNS0LOCALSTART +// e.Data = []byte{72, 82, 74} +// o.Option = append(o.Option, e) +type EDNS0_LOCAL struct { + Code uint16 + Data []byte +} + +func (e *EDNS0_LOCAL) Option() uint16 { return e.Code } +func (e *EDNS0_LOCAL) String() string { + return strconv.FormatInt(int64(e.Code), 10) + ":0x" + hex.EncodeToString(e.Data) +} + +func (e *EDNS0_LOCAL) pack() ([]byte, error) { + b := make([]byte, len(e.Data)) + copied := copy(b, e.Data) + if copied != len(e.Data) { + return nil, ErrBuf + } + return b, nil +} + +func (e *EDNS0_LOCAL) unpack(b []byte) error { + e.Data = make([]byte, len(b)) + copied := copy(e.Data, b) + if copied != len(b) { + return ErrBuf + } + return nil +} + +// EDNS0_TCP_KEEPALIVE is an EDNS0 option that instructs the server to keep +// the TCP connection alive. See RFC 7828. +type EDNS0_TCP_KEEPALIVE struct { + Code uint16 // Always EDNSTCPKEEPALIVE + Length uint16 // the value 0 if the TIMEOUT is omitted, the value 2 if it is present; + Timeout uint16 // an idle timeout value for the TCP connection, specified in units of 100 milliseconds, encoded in network byte order. +} + +func (e *EDNS0_TCP_KEEPALIVE) Option() uint16 { return EDNS0TCPKEEPALIVE } + +func (e *EDNS0_TCP_KEEPALIVE) pack() ([]byte, error) { + if e.Timeout != 0 && e.Length != 2 { + return nil, errors.New("dns: timeout specified but length is not 2") + } + if e.Timeout == 0 && e.Length != 0 { + return nil, errors.New("dns: timeout not specified but length is not 0") + } + b := make([]byte, 4+e.Length) + binary.BigEndian.PutUint16(b[0:], e.Code) + binary.BigEndian.PutUint16(b[2:], e.Length) + if e.Length == 2 { + binary.BigEndian.PutUint16(b[4:], e.Timeout) + } + return b, nil +} + +func (e *EDNS0_TCP_KEEPALIVE) unpack(b []byte) error { + if len(b) < 4 { + return ErrBuf + } + e.Length = binary.BigEndian.Uint16(b[2:4]) + if e.Length != 0 && e.Length != 2 { + return errors.New("dns: length mismatch, want 0/2 but got " + strconv.FormatUint(uint64(e.Length), 10)) + } + if e.Length == 2 { + if len(b) < 6 { + return ErrBuf + } + e.Timeout = binary.BigEndian.Uint16(b[4:6]) + } + return nil +} + +func (e *EDNS0_TCP_KEEPALIVE) String() (s string) { + s = "use tcp keep-alive" + if e.Length == 0 { + s += ", timeout omitted" + } else { + s += fmt.Sprintf(", timeout %dms", e.Timeout*100) + } + return +} diff --git a/vendor/github.com/miekg/dns/edns_test.go b/vendor/github.com/miekg/dns/edns_test.go new file mode 100644 index 0000000..c290b0c --- /dev/null +++ b/vendor/github.com/miekg/dns/edns_test.go @@ -0,0 +1,68 @@ +package dns + +import "testing" + +func TestOPTTtl(t *testing.T) { + e := &OPT{} + e.Hdr.Name = "." + e.Hdr.Rrtype = TypeOPT + + // verify the default setting of DO=0 + if e.Do() { + t.Errorf("DO bit should be zero") + } + + // There are 6 possible invocations of SetDo(): + // + // 1. Starting with DO=0, using SetDo() + // 2. Starting with DO=0, using SetDo(true) + // 3. Starting with DO=0, using SetDo(false) + // 4. Starting with DO=1, using SetDo() + // 5. Starting with DO=1, using SetDo(true) + // 6. Starting with DO=1, using SetDo(false) + + // verify that invoking SetDo() sets DO=1 (TEST #1) + e.SetDo() + if !e.Do() { + t.Errorf("DO bit should be non-zero") + } + // verify that using SetDo(true) works when DO=1 (TEST #5) + e.SetDo(true) + if !e.Do() { + t.Errorf("DO bit should still be non-zero") + } + // verify that we can use SetDo(false) to set DO=0 (TEST #6) + e.SetDo(false) + if e.Do() { + t.Errorf("DO bit should be zero") + } + // verify that if we call SetDo(false) when DO=0 that it is unchanged (TEST #3) + e.SetDo(false) + if e.Do() { + t.Errorf("DO bit should still be zero") + } + // verify that using SetDo(true) works for DO=0 (TEST #2) + e.SetDo(true) + if !e.Do() { + t.Errorf("DO bit should be non-zero") + } + // verify that using SetDo() works for DO=1 (TEST #4) + e.SetDo() + if !e.Do() { + t.Errorf("DO bit should be non-zero") + } + + if e.Version() != 0 { + t.Errorf("version should be non-zero") + } + + e.SetVersion(42) + if e.Version() != 42 { + t.Errorf("set 42, expected %d, got %d", 42, e.Version()) + } + + e.SetExtendedRcode(42) + if e.ExtendedRcode() != 42 { + t.Errorf("set 42, expected %d, got %d", 42-15, e.ExtendedRcode()) + } +} diff --git a/vendor/github.com/miekg/dns/example_test.go b/vendor/github.com/miekg/dns/example_test.go new file mode 100644 index 0000000..64c1496 --- /dev/null +++ b/vendor/github.com/miekg/dns/example_test.go @@ -0,0 +1,146 @@ +package dns_test + +import ( + "errors" + "fmt" + "log" + "net" + + "github.com/miekg/dns" +) + +// Retrieve the MX records for miek.nl. +func ExampleMX() { + config, _ := dns.ClientConfigFromFile("/etc/resolv.conf") + c := new(dns.Client) + m := new(dns.Msg) + m.SetQuestion("miek.nl.", dns.TypeMX) + m.RecursionDesired = true + r, _, err := c.Exchange(m, config.Servers[0]+":"+config.Port) + if err != nil { + return + } + if r.Rcode != dns.RcodeSuccess { + return + } + for _, a := range r.Answer { + if mx, ok := a.(*dns.MX); ok { + fmt.Printf("%s\n", mx.String()) + } + } +} + +// Retrieve the DNSKEY records of a zone and convert them +// to DS records for SHA1, SHA256 and SHA384. +func ExampleDS() { + config, _ := dns.ClientConfigFromFile("/etc/resolv.conf") + c := new(dns.Client) + m := new(dns.Msg) + zone := "miek.nl" + m.SetQuestion(dns.Fqdn(zone), dns.TypeDNSKEY) + m.SetEdns0(4096, true) + r, _, err := c.Exchange(m, config.Servers[0]+":"+config.Port) + if err != nil { + return + } + if r.Rcode != dns.RcodeSuccess { + return + } + for _, k := range r.Answer { + if key, ok := k.(*dns.DNSKEY); ok { + for _, alg := range []uint8{dns.SHA1, dns.SHA256, dns.SHA384} { + fmt.Printf("%s; %d\n", key.ToDS(alg).String(), key.Flags) + } + } + } +} + +const TypeAPAIR = 0x0F99 + +type APAIR struct { + addr [2]net.IP +} + +func NewAPAIR() dns.PrivateRdata { return new(APAIR) } + +func (rd *APAIR) String() string { return rd.addr[0].String() + " " + rd.addr[1].String() } +func (rd *APAIR) Parse(txt []string) error { + if len(txt) != 2 { + return errors.New("two addresses required for APAIR") + } + for i, s := range txt { + ip := net.ParseIP(s) + if ip == nil { + return errors.New("invalid IP in APAIR text representation") + } + rd.addr[i] = ip + } + return nil +} + +func (rd *APAIR) Pack(buf []byte) (int, error) { + b := append([]byte(rd.addr[0]), []byte(rd.addr[1])...) + n := copy(buf, b) + if n != len(b) { + return n, dns.ErrBuf + } + return n, nil +} + +func (rd *APAIR) Unpack(buf []byte) (int, error) { + ln := net.IPv4len * 2 + if len(buf) != ln { + return 0, errors.New("invalid length of APAIR rdata") + } + cp := make([]byte, ln) + copy(cp, buf) // clone bytes to use them in IPs + + rd.addr[0] = net.IP(cp[:3]) + rd.addr[1] = net.IP(cp[4:]) + + return len(buf), nil +} + +func (rd *APAIR) Copy(dest dns.PrivateRdata) error { + cp := make([]byte, rd.Len()) + _, err := rd.Pack(cp) + if err != nil { + return err + } + + d := dest.(*APAIR) + d.addr[0] = net.IP(cp[:3]) + d.addr[1] = net.IP(cp[4:]) + return nil +} + +func (rd *APAIR) Len() int { + return net.IPv4len * 2 +} + +func ExamplePrivateHandle() { + dns.PrivateHandle("APAIR", TypeAPAIR, NewAPAIR) + defer dns.PrivateHandleRemove(TypeAPAIR) + + rr, err := dns.NewRR("miek.nl. APAIR (1.2.3.4 1.2.3.5)") + if err != nil { + log.Fatal("could not parse APAIR record: ", err) + } + fmt.Println(rr) + // Output: miek.nl. 3600 IN APAIR 1.2.3.4 1.2.3.5 + + m := new(dns.Msg) + m.Id = 12345 + m.SetQuestion("miek.nl.", TypeAPAIR) + m.Answer = append(m.Answer, rr) + + fmt.Println(m) + // ;; opcode: QUERY, status: NOERROR, id: 12345 + // ;; flags: rd; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 0 + // + // ;; QUESTION SECTION: + // ;miek.nl. IN APAIR + // + // ;; ANSWER SECTION: + // miek.nl. 3600 IN APAIR 1.2.3.4 1.2.3.5 +} diff --git a/vendor/github.com/miekg/dns/format.go b/vendor/github.com/miekg/dns/format.go new file mode 100644 index 0000000..3f5303c --- /dev/null +++ b/vendor/github.com/miekg/dns/format.go @@ -0,0 +1,87 @@ +package dns + +import ( + "net" + "reflect" + "strconv" +) + +// NumField returns the number of rdata fields r has. +func NumField(r RR) int { + return reflect.ValueOf(r).Elem().NumField() - 1 // Remove RR_Header +} + +// Field returns the rdata field i as a string. Fields are indexed starting from 1. +// RR types that holds slice data, for instance the NSEC type bitmap will return a single +// string where the types are concatenated using a space. +// Accessing non existing fields will cause a panic. +func Field(r RR, i int) string { + if i == 0 { + return "" + } + d := reflect.ValueOf(r).Elem().Field(i) + switch k := d.Kind(); k { + case reflect.String: + return d.String() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return strconv.FormatInt(d.Int(), 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return strconv.FormatUint(d.Uint(), 10) + case reflect.Slice: + switch reflect.ValueOf(r).Elem().Type().Field(i).Tag { + case `dns:"a"`: + // TODO(miek): Hmm store this as 16 bytes + if d.Len() < net.IPv6len { + return net.IPv4(byte(d.Index(0).Uint()), + byte(d.Index(1).Uint()), + byte(d.Index(2).Uint()), + byte(d.Index(3).Uint())).String() + } + return net.IPv4(byte(d.Index(12).Uint()), + byte(d.Index(13).Uint()), + byte(d.Index(14).Uint()), + byte(d.Index(15).Uint())).String() + case `dns:"aaaa"`: + return net.IP{ + byte(d.Index(0).Uint()), + byte(d.Index(1).Uint()), + byte(d.Index(2).Uint()), + byte(d.Index(3).Uint()), + byte(d.Index(4).Uint()), + byte(d.Index(5).Uint()), + byte(d.Index(6).Uint()), + byte(d.Index(7).Uint()), + byte(d.Index(8).Uint()), + byte(d.Index(9).Uint()), + byte(d.Index(10).Uint()), + byte(d.Index(11).Uint()), + byte(d.Index(12).Uint()), + byte(d.Index(13).Uint()), + byte(d.Index(14).Uint()), + byte(d.Index(15).Uint()), + }.String() + case `dns:"nsec"`: + if d.Len() == 0 { + return "" + } + s := Type(d.Index(0).Uint()).String() + for i := 1; i < d.Len(); i++ { + s += " " + Type(d.Index(i).Uint()).String() + } + return s + default: + // if it does not have a tag its a string slice + fallthrough + case `dns:"txt"`: + if d.Len() == 0 { + return "" + } + s := d.Index(0).String() + for i := 1; i < d.Len(); i++ { + s += " " + d.Index(i).String() + } + return s + } + } + return "" +} diff --git a/vendor/github.com/miekg/dns/fuzz_test.go b/vendor/github.com/miekg/dns/fuzz_test.go new file mode 100644 index 0000000..2558697 --- /dev/null +++ b/vendor/github.com/miekg/dns/fuzz_test.go @@ -0,0 +1,25 @@ +package dns + +import "testing" + +func TestFuzzString(t *testing.T) { + testcases := []string{"", " MINFO ", " RP ", " NSEC 0 0", " \" NSEC 0 0\"", " \" MINFO \"", + ";a ", ";a����������", + " NSAP O ", " NSAP N ", + " TYPE4 TYPE6a789a3bc0045c8a5fb42c7d1bd998f5444 IN 9579b47d46817afbd17273e6", + " TYPE45 3 3 4147994 TYPE\\(\\)\\)\\(\\)\\(\\(\\)\\(\\)\\)\\)\\(\\)\\(\\)\\(\\(\\R 948\"\")\\(\\)\\)\\)\\(\\ ", + "$GENERATE 0-3 ${441189,5039418474430,o}", + "$INCLUDE 00 TYPE00000000000n ", + "$INCLUDE PE4 TYPE061463623/727071511 \\(\\)\\$GENERATE 6-462/0", + } + for i, tc := range testcases { + rr, err := NewRR(tc) + if err == nil { + // rr can be nil because we can (for instance) just parse a comment + if rr == nil { + continue + } + t.Fatalf("parsed mailformed RR %d: %s", i, rr.String()) + } + } +} diff --git a/vendor/github.com/miekg/dns/generate.go b/vendor/github.com/miekg/dns/generate.go new file mode 100644 index 0000000..e4481a4 --- /dev/null +++ b/vendor/github.com/miekg/dns/generate.go @@ -0,0 +1,159 @@ +package dns + +import ( + "bytes" + "errors" + "fmt" + "strconv" + "strings" +) + +// Parse the $GENERATE statement as used in BIND9 zones. +// See http://www.zytrax.com/books/dns/ch8/generate.html for instance. +// We are called after '$GENERATE '. After which we expect: +// * the range (12-24/2) +// * lhs (ownername) +// * [[ttl][class]] +// * type +// * rhs (rdata) +// But we are lazy here, only the range is parsed *all* occurrences +// of $ after that are interpreted. +// Any error are returned as a string value, the empty string signals +// "no error". +func generate(l lex, c chan lex, t chan *Token, o string) string { + step := 1 + if i := strings.IndexAny(l.token, "/"); i != -1 { + if i+1 == len(l.token) { + return "bad step in $GENERATE range" + } + if s, err := strconv.Atoi(l.token[i+1:]); err == nil { + if s < 0 { + return "bad step in $GENERATE range" + } + step = s + } else { + return "bad step in $GENERATE range" + } + l.token = l.token[:i] + } + sx := strings.SplitN(l.token, "-", 2) + if len(sx) != 2 { + return "bad start-stop in $GENERATE range" + } + start, err := strconv.Atoi(sx[0]) + if err != nil { + return "bad start in $GENERATE range" + } + end, err := strconv.Atoi(sx[1]) + if err != nil { + return "bad stop in $GENERATE range" + } + if end < 0 || start < 0 || end < start { + return "bad range in $GENERATE range" + } + + <-c // _BLANK + // Create a complete new string, which we then parse again. + s := "" +BuildRR: + l = <-c + if l.value != zNewline && l.value != zEOF { + s += l.token + goto BuildRR + } + for i := start; i <= end; i += step { + var ( + escape bool + dom bytes.Buffer + mod string + err error + offset int + ) + + for j := 0; j < len(s); j++ { // No 'range' because we need to jump around + switch s[j] { + case '\\': + if escape { + dom.WriteByte('\\') + escape = false + continue + } + escape = true + case '$': + mod = "%d" + offset = 0 + if escape { + dom.WriteByte('$') + escape = false + continue + } + escape = false + if j+1 >= len(s) { // End of the string + dom.WriteString(fmt.Sprintf(mod, i+offset)) + continue + } else { + if s[j+1] == '$' { + dom.WriteByte('$') + j++ + continue + } + } + // Search for { and } + if s[j+1] == '{' { // Modifier block + sep := strings.Index(s[j+2:], "}") + if sep == -1 { + return "bad modifier in $GENERATE" + } + mod, offset, err = modToPrintf(s[j+2 : j+2+sep]) + if err != nil { + return err.Error() + } + j += 2 + sep // Jump to it + } + dom.WriteString(fmt.Sprintf(mod, i+offset)) + default: + if escape { // Pretty useless here + escape = false + continue + } + dom.WriteByte(s[j]) + } + } + // Re-parse the RR and send it on the current channel t + rx, err := NewRR("$ORIGIN " + o + "\n" + dom.String()) + if err != nil { + return err.Error() + } + t <- &Token{RR: rx} + // Its more efficient to first built the rrlist and then parse it in + // one go! But is this a problem? + } + return "" +} + +// Convert a $GENERATE modifier 0,0,d to something Printf can deal with. +func modToPrintf(s string) (string, int, error) { + xs := strings.SplitN(s, ",", 3) + if len(xs) != 3 { + return "", 0, errors.New("bad modifier in $GENERATE") + } + // xs[0] is offset, xs[1] is width, xs[2] is base + if xs[2] != "o" && xs[2] != "d" && xs[2] != "x" && xs[2] != "X" { + return "", 0, errors.New("bad base in $GENERATE") + } + offset, err := strconv.Atoi(xs[0]) + if err != nil || offset > 255 { + return "", 0, errors.New("bad offset in $GENERATE") + } + width, err := strconv.Atoi(xs[1]) + if err != nil || width > 255 { + return "", offset, errors.New("bad width in $GENERATE") + } + switch { + case width < 0: + return "", offset, errors.New("bad width in $GENERATE") + case width == 0: + return "%" + xs[1] + xs[2], offset, nil + } + return "%0" + xs[1] + xs[2], offset, nil +} diff --git a/vendor/github.com/miekg/dns/idn/code_points.go b/vendor/github.com/miekg/dns/idn/code_points.go new file mode 100644 index 0000000..129c374 --- /dev/null +++ b/vendor/github.com/miekg/dns/idn/code_points.go @@ -0,0 +1,2346 @@ +package idn + +const ( + propertyUnknown property = iota // unknown character property + propertyPVALID // allowed to be used in IDNs + propertyCONTEXTJ // invisible or problematic characters (join controls) + propertyCONTEXTO // invisible or problematic characters (others) + propertyDISALLOWED // should not be included in IDNs + propertyUNASSIGNED // code points that are not designated in the Unicode Standard +) + +// property stores the property of a code point, as described in RFC 5892, +// section 1 +type property int + +// codePoints list all code points in Unicode Character Database (UCD) Format +// according to RFC 5892, appendix B.1. Thanks to libidn2 (GNU) - +// http://www.gnu.org/software/libidn/libidn2/ +var codePoints = []struct { + start rune + end rune + state property +}{ + {0x0000, 0x002C, propertyDISALLOWED}, // <control>..COMMA + {0x002D, 0x0, propertyPVALID}, // HYPHEN-MINUS + {0x002E, 0x002F, propertyDISALLOWED}, // FULL STOP..SOLIDUS + {0x0030, 0x0039, propertyPVALID}, // DIGIT ZERO..DIGIT NINE + {0x003A, 0x0060, propertyDISALLOWED}, // COLON..GRAVE ACCENT + {0x0041, 0x005A, propertyPVALID}, // LATIN CAPITAL LETTER A..LATIN CAPITAL LETTER Z + {0x0061, 0x007A, propertyPVALID}, // LATIN SMALL LETTER A..LATIN SMALL LETTER Z + {0x007B, 0x00B6, propertyDISALLOWED}, // LEFT CURLY BRACKET..PILCROW SIGN + {0x00B7, 0x0, propertyCONTEXTO}, // MIDDLE DOT + {0x00B8, 0x00DE, propertyDISALLOWED}, // CEDILLA..LATIN CAPITAL LETTER THORN + {0x00DF, 0x00F6, propertyPVALID}, // LATIN SMALL LETTER SHARP S..LATIN SMALL LETT + {0x00F7, 0x0, propertyDISALLOWED}, // DIVISION SIGN + {0x00F8, 0x00FF, propertyPVALID}, // LATIN SMALL LETTER O WITH STROKE..LATIN SMAL + {0x0100, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH MACRON + {0x0101, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH MACRON + {0x0102, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH BREVE + {0x0103, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH BREVE + {0x0104, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH OGONEK + {0x0105, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH OGONEK + {0x0106, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER C WITH ACUTE + {0x0107, 0x0, propertyPVALID}, // LATIN SMALL LETTER C WITH ACUTE + {0x0108, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER C WITH CIRCUMFLEX + {0x0109, 0x0, propertyPVALID}, // LATIN SMALL LETTER C WITH CIRCUMFLEX + {0x010A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER C WITH DOT ABOVE + {0x010B, 0x0, propertyPVALID}, // LATIN SMALL LETTER C WITH DOT ABOVE + {0x010C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER C WITH CARON + {0x010D, 0x0, propertyPVALID}, // LATIN SMALL LETTER C WITH CARON + {0x010E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER D WITH CARON + {0x010F, 0x0, propertyPVALID}, // LATIN SMALL LETTER D WITH CARON + {0x0110, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER D WITH STROKE + {0x0111, 0x0, propertyPVALID}, // LATIN SMALL LETTER D WITH STROKE + {0x0112, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH MACRON + {0x0113, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH MACRON + {0x0114, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH BREVE + {0x0115, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH BREVE + {0x0116, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH DOT ABOVE + {0x0117, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH DOT ABOVE + {0x0118, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH OGONEK + {0x0119, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH OGONEK + {0x011A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CARON + {0x011B, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CARON + {0x011C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER G WITH CIRCUMFLEX + {0x011D, 0x0, propertyPVALID}, // LATIN SMALL LETTER G WITH CIRCUMFLEX + {0x011E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER G WITH BREVE + {0x011F, 0x0, propertyPVALID}, // LATIN SMALL LETTER G WITH BREVE + {0x0120, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER G WITH DOT ABOVE + {0x0121, 0x0, propertyPVALID}, // LATIN SMALL LETTER G WITH DOT ABOVE + {0x0122, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER G WITH CEDILLA + {0x0123, 0x0, propertyPVALID}, // LATIN SMALL LETTER G WITH CEDILLA + {0x0124, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH CIRCUMFLEX + {0x0125, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH CIRCUMFLEX + {0x0126, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH STROKE + {0x0127, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH STROKE + {0x0128, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH TILDE + {0x0129, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH TILDE + {0x012A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH MACRON + {0x012B, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH MACRON + {0x012C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH BREVE + {0x012D, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH BREVE + {0x012E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH OGONEK + {0x012F, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH OGONEK + {0x0130, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH DOT ABOVE + {0x0131, 0x0, propertyPVALID}, // LATIN SMALL LETTER DOTLESS I + {0x0132, 0x0134, propertyDISALLOWED}, // LATIN CAPITAL LIGATURE IJ..LATIN CAPITAL LET + {0x0135, 0x0, propertyPVALID}, // LATIN SMALL LETTER J WITH CIRCUMFLEX + {0x0136, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH CEDILLA + {0x0137, 0x0138, propertyPVALID}, // LATIN SMALL LETTER K WITH CEDILLA..LATIN SMA + {0x0139, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH ACUTE + {0x013A, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH ACUTE + {0x013B, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH CEDILLA + {0x013C, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH CEDILLA + {0x013D, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH CARON + {0x013E, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH CARON + {0x013F, 0x0141, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH MIDDLE DOT..LATI + {0x0142, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH STROKE + {0x0143, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER N WITH ACUTE + {0x0144, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH ACUTE + {0x0145, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER N WITH CEDILLA + {0x0146, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH CEDILLA + {0x0147, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER N WITH CARON + {0x0148, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH CARON + {0x0149, 0x014A, propertyDISALLOWED}, // LATIN SMALL LETTER N PRECEDED BY APOSTROPHE. + {0x014B, 0x0, propertyPVALID}, // LATIN SMALL LETTER ENG + {0x014C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH MACRON + {0x014D, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH MACRON + {0x014E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH BREVE + {0x014F, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH BREVE + {0x0150, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH DOUBLE ACUTE + {0x0151, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH DOUBLE ACUTE + {0x0152, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LIGATURE OE + {0x0153, 0x0, propertyPVALID}, // LATIN SMALL LIGATURE OE + {0x0154, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH ACUTE + {0x0155, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH ACUTE + {0x0156, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH CEDILLA + {0x0157, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH CEDILLA + {0x0158, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH CARON + {0x0159, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH CARON + {0x015A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH ACUTE + {0x015B, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH ACUTE + {0x015C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH CIRCUMFLEX + {0x015D, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH CIRCUMFLEX + {0x015E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH CEDILLA + {0x015F, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH CEDILLA + {0x0160, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH CARON + {0x0161, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH CARON + {0x0162, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH CEDILLA + {0x0163, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH CEDILLA + {0x0164, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH CARON + {0x0165, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH CARON + {0x0166, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH STROKE + {0x0167, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH STROKE + {0x0168, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH TILDE + {0x0169, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH TILDE + {0x016A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH MACRON + {0x016B, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH MACRON + {0x016C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH BREVE + {0x016D, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH BREVE + {0x016E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH RING ABOVE + {0x016F, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH RING ABOVE + {0x0170, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH DOUBLE ACUTE + {0x0171, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH DOUBLE ACUTE + {0x0172, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH OGONEK + {0x0173, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH OGONEK + {0x0174, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER W WITH CIRCUMFLEX + {0x0175, 0x0, propertyPVALID}, // LATIN SMALL LETTER W WITH CIRCUMFLEX + {0x0176, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH CIRCUMFLEX + {0x0177, 0x0, propertyPVALID}, // LATIN SMALL LETTER Y WITH CIRCUMFLEX + {0x0178, 0x0179, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH DIAERESIS..LATIN + {0x017A, 0x0, propertyPVALID}, // LATIN SMALL LETTER Z WITH ACUTE + {0x017B, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Z WITH DOT ABOVE + {0x017C, 0x0, propertyPVALID}, // LATIN SMALL LETTER Z WITH DOT ABOVE + {0x017D, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Z WITH CARON + {0x017E, 0x0, propertyPVALID}, // LATIN SMALL LETTER Z WITH CARON + {0x017F, 0x0, propertyDISALLOWED}, // LATIN SMALL LETTER LONG S + {0x0180, 0x0, propertyPVALID}, // LATIN SMALL LETTER B WITH STROKE + {0x0181, 0x0182, propertyDISALLOWED}, // LATIN CAPITAL LETTER B WITH HOOK..LATIN CAPI + {0x0183, 0x0, propertyPVALID}, // LATIN SMALL LETTER B WITH TOPBAR + {0x0184, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER TONE SIX + {0x0185, 0x0, propertyPVALID}, // LATIN SMALL LETTER TONE SIX + {0x0186, 0x0187, propertyDISALLOWED}, // LATIN CAPITAL LETTER OPEN O..LATIN CAPITAL L + {0x0188, 0x0, propertyPVALID}, // LATIN SMALL LETTER C WITH HOOK + {0x0189, 0x018B, propertyDISALLOWED}, // LATIN CAPITAL LETTER AFRICAN D..LATIN CAPITA + {0x018C, 0x018D, propertyPVALID}, // LATIN SMALL LETTER D WITH TOPBAR..LATIN SMAL + {0x018E, 0x0191, propertyDISALLOWED}, // LATIN CAPITAL LETTER REVERSED E..LATIN CAPIT + {0x0192, 0x0, propertyPVALID}, // LATIN SMALL LETTER F WITH HOOK + {0x0193, 0x0194, propertyDISALLOWED}, // LATIN CAPITAL LETTER G WITH HOOK..LATIN CAPI + {0x0195, 0x0, propertyPVALID}, // LATIN SMALL LETTER HV + {0x0196, 0x0198, propertyDISALLOWED}, // LATIN CAPITAL LETTER IOTA..LATIN CAPITAL LET + {0x0199, 0x019B, propertyPVALID}, // LATIN SMALL LETTER K WITH HOOK..LATIN SMALL + {0x019C, 0x019D, propertyDISALLOWED}, // LATIN CAPITAL LETTER TURNED M..LATIN CAPITAL + {0x019E, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH LONG RIGHT LEG + {0x019F, 0x01A0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH MIDDLE TILDE..LA + {0x01A1, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH HORN + {0x01A2, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER OI + {0x01A3, 0x0, propertyPVALID}, // LATIN SMALL LETTER OI + {0x01A4, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER P WITH HOOK + {0x01A5, 0x0, propertyPVALID}, // LATIN SMALL LETTER P WITH HOOK + {0x01A6, 0x01A7, propertyDISALLOWED}, // LATIN LETTER YR..LATIN CAPITAL LETTER TONE T + {0x01A8, 0x0, propertyPVALID}, // LATIN SMALL LETTER TONE TWO + {0x01A9, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER ESH + {0x01AA, 0x01AB, propertyPVALID}, // LATIN LETTER REVERSED ESH LOOP..LATIN SMALL + {0x01AC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH HOOK + {0x01AD, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH HOOK + {0x01AE, 0x01AF, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH RETROFLEX HOOK.. + {0x01B0, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH HORN + {0x01B1, 0x01B3, propertyDISALLOWED}, // LATIN CAPITAL LETTER UPSILON..LATIN CAPITAL + {0x01B4, 0x0, propertyPVALID}, // LATIN SMALL LETTER Y WITH HOOK + {0x01B5, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Z WITH STROKE + {0x01B6, 0x0, propertyPVALID}, // LATIN SMALL LETTER Z WITH STROKE + {0x01B7, 0x01B8, propertyDISALLOWED}, // LATIN CAPITAL LETTER EZH..LATIN CAPITAL LETT + {0x01B9, 0x01BB, propertyPVALID}, // LATIN SMALL LETTER EZH REVERSED..LATIN LETTE + {0x01BC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER TONE FIVE + {0x01BD, 0x01C3, propertyPVALID}, // LATIN SMALL LETTER TONE FIVE..LATIN LETTER R + {0x01C4, 0x01CD, propertyDISALLOWED}, // LATIN CAPITAL LETTER DZ WITH CARON..LATIN CA + {0x01CE, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH CARON + {0x01CF, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH CARON + {0x01D0, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH CARON + {0x01D1, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH CARON + {0x01D2, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH CARON + {0x01D3, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH CARON + {0x01D4, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH CARON + {0x01D5, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH DIAERESIS AND MA + {0x01D6, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH DIAERESIS AND MACR + {0x01D7, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH DIAERESIS AND AC + {0x01D8, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH DIAERESIS AND ACUT + {0x01D9, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH DIAERESIS AND CA + {0x01DA, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH DIAERESIS AND CARO + {0x01DB, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH DIAERESIS AND GR + {0x01DC, 0x01DD, propertyPVALID}, // LATIN SMALL LETTER U WITH DIAERESIS AND GRAV + {0x01DE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH DIAERESIS AND MA + {0x01DF, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH DIAERESIS AND MACR + {0x01E0, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH DOT ABOVE AND MA + {0x01E1, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH DOT ABOVE AND MACR + {0x01E2, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER AE WITH MACRON + {0x01E3, 0x0, propertyPVALID}, // LATIN SMALL LETTER AE WITH MACRON + {0x01E4, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER G WITH STROKE + {0x01E5, 0x0, propertyPVALID}, // LATIN SMALL LETTER G WITH STROKE + {0x01E6, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER G WITH CARON + {0x01E7, 0x0, propertyPVALID}, // LATIN SMALL LETTER G WITH CARON + {0x01E8, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH CARON + {0x01E9, 0x0, propertyPVALID}, // LATIN SMALL LETTER K WITH CARON + {0x01EA, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH OGONEK + {0x01EB, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH OGONEK + {0x01EC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH OGONEK AND MACRO + {0x01ED, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH OGONEK AND MACRON + {0x01EE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER EZH WITH CARON + {0x01EF, 0x01F0, propertyPVALID}, // LATIN SMALL LETTER EZH WITH CARON..LATIN SMA + {0x01F1, 0x01F4, propertyDISALLOWED}, // LATIN CAPITAL LETTER DZ..LATIN CAPITAL LETTE + {0x01F5, 0x0, propertyPVALID}, // LATIN SMALL LETTER G WITH ACUTE + {0x01F6, 0x01F8, propertyDISALLOWED}, // LATIN CAPITAL LETTER HWAIR..LATIN CAPITAL LE + {0x01F9, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH GRAVE + {0x01FA, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH RING ABOVE AND A + {0x01FB, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH RING ABOVE AND ACU + {0x01FC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER AE WITH ACUTE + {0x01FD, 0x0, propertyPVALID}, // LATIN SMALL LETTER AE WITH ACUTE + {0x01FE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH STROKE AND ACUTE + {0x01FF, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH STROKE AND ACUTE + {0x0200, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH DOUBLE GRAVE + {0x0201, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH DOUBLE GRAVE + {0x0202, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH INVERTED BREVE + {0x0203, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH INVERTED BREVE + {0x0204, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH DOUBLE GRAVE + {0x0205, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH DOUBLE GRAVE + {0x0206, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH INVERTED BREVE + {0x0207, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH INVERTED BREVE + {0x0208, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH DOUBLE GRAVE + {0x0209, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH DOUBLE GRAVE + {0x020A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH INVERTED BREVE + {0x020B, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH INVERTED BREVE + {0x020C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH DOUBLE GRAVE + {0x020D, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH DOUBLE GRAVE + {0x020E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH INVERTED BREVE + {0x020F, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH INVERTED BREVE + {0x0210, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH DOUBLE GRAVE + {0x0211, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH DOUBLE GRAVE + {0x0212, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH INVERTED BREVE + {0x0213, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH INVERTED BREVE + {0x0214, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH DOUBLE GRAVE + {0x0215, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH DOUBLE GRAVE + {0x0216, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH INVERTED BREVE + {0x0217, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH INVERTED BREVE + {0x0218, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH COMMA BELOW + {0x0219, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH COMMA BELOW + {0x021A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH COMMA BELOW + {0x021B, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH COMMA BELOW + {0x021C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER YOGH + {0x021D, 0x0, propertyPVALID}, // LATIN SMALL LETTER YOGH + {0x021E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH CARON + {0x021F, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH CARON + {0x0220, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER N WITH LONG RIGHT LEG + {0x0221, 0x0, propertyPVALID}, // LATIN SMALL LETTER D WITH CURL + {0x0222, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER OU + {0x0223, 0x0, propertyPVALID}, // LATIN SMALL LETTER OU + {0x0224, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Z WITH HOOK + {0x0225, 0x0, propertyPVALID}, // LATIN SMALL LETTER Z WITH HOOK + {0x0226, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH DOT ABOVE + {0x0227, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH DOT ABOVE + {0x0228, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CEDILLA + {0x0229, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CEDILLA + {0x022A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH DIAERESIS AND MA + {0x022B, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH DIAERESIS AND MACR + {0x022C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH TILDE AND MACRON + {0x022D, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH TILDE AND MACRON + {0x022E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH DOT ABOVE + {0x022F, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH DOT ABOVE + {0x0230, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH DOT ABOVE AND MA + {0x0231, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH DOT ABOVE AND MACR + {0x0232, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH MACRON + {0x0233, 0x0239, propertyPVALID}, // LATIN SMALL LETTER Y WITH MACRON..LATIN SMAL + {0x023A, 0x023B, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH STROKE..LATIN CA + {0x023C, 0x0, propertyPVALID}, // LATIN SMALL LETTER C WITH STROKE + {0x023D, 0x023E, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH BAR..LATIN CAPIT + {0x023F, 0x0240, propertyPVALID}, // LATIN SMALL LETTER S WITH SWASH TAIL..LATIN + {0x0241, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER GLOTTAL STOP + {0x0242, 0x0, propertyPVALID}, // LATIN SMALL LETTER GLOTTAL STOP + {0x0243, 0x0246, propertyDISALLOWED}, // LATIN CAPITAL LETTER B WITH STROKE..LATIN CA + {0x0247, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH STROKE + {0x0248, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER J WITH STROKE + {0x0249, 0x0, propertyPVALID}, // LATIN SMALL LETTER J WITH STROKE + {0x024A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER SMALL Q WITH HOOK TAIL + {0x024B, 0x0, propertyPVALID}, // LATIN SMALL LETTER Q WITH HOOK TAIL + {0x024C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH STROKE + {0x024D, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH STROKE + {0x024E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH STROKE + {0x024F, 0x02AF, propertyPVALID}, // LATIN SMALL LETTER Y WITH STROKE..LATIN SMAL + {0x02B0, 0x02B8, propertyDISALLOWED}, // MODIFIER LETTER SMALL H..MODIFIER LETTER SMA + {0x02B9, 0x02C1, propertyPVALID}, // MODIFIER LETTER PRIME..MODIFIER LETTER REVER + {0x02C2, 0x02C5, propertyDISALLOWED}, // MODIFIER LETTER LEFT ARROWHEAD..MODIFIER LET + {0x02C6, 0x02D1, propertyPVALID}, // MODIFIER LETTER CIRCUMFLEX ACCENT..MODIFIER + {0x02D2, 0x02EB, propertyDISALLOWED}, // MODIFIER LETTER CENTRED RIGHT HALF RING..MOD + {0x02EC, 0x0, propertyPVALID}, // MODIFIER LETTER VOICING + {0x02ED, 0x0, propertyDISALLOWED}, // MODIFIER LETTER UNASPIRATED + {0x02EE, 0x0, propertyPVALID}, // MODIFIER LETTER DOUBLE APOSTROPHE + {0x02EF, 0x02FF, propertyDISALLOWED}, // MODIFIER LETTER LOW DOWN ARROWHEAD..MODIFIER + {0x0300, 0x033F, propertyPVALID}, // COMBINING GRAVE ACCENT..COMBINING DOUBLE OVE + {0x0340, 0x0341, propertyDISALLOWED}, // COMBINING GRAVE TONE MARK..COMBINING ACUTE T + {0x0342, 0x0, propertyPVALID}, // COMBINING GREEK PERISPOMENI + {0x0343, 0x0345, propertyDISALLOWED}, // COMBINING GREEK KORONIS..COMBINING GREEK YPO + {0x0346, 0x034E, propertyPVALID}, // COMBINING BRIDGE ABOVE..COMBINING UPWARDS AR + {0x034F, 0x0, propertyDISALLOWED}, // COMBINING GRAPHEME JOINER + {0x0350, 0x036F, propertyPVALID}, // COMBINING RIGHT ARROWHEAD ABOVE..COMBINING L + {0x0370, 0x0, propertyDISALLOWED}, // GREEK CAPITAL LETTER HETA + {0x0371, 0x0, propertyPVALID}, // GREEK SMALL LETTER HETA + {0x0372, 0x0, propertyDISALLOWED}, // GREEK CAPITAL LETTER ARCHAIC SAMPI + {0x0373, 0x0, propertyPVALID}, // GREEK SMALL LETTER ARCHAIC SAMPI + {0x0374, 0x0, propertyDISALLOWED}, // GREEK NUMERAL SIGN + {0x0375, 0x0, propertyCONTEXTO}, // GREEK LOWER NUMERAL SIGN + {0x0376, 0x0, propertyDISALLOWED}, // GREEK CAPITAL LETTER PAMPHYLIAN DIGAMMA + {0x0377, 0x0, propertyPVALID}, // GREEK SMALL LETTER PAMPHYLIAN DIGAMMA + {0x0378, 0x0379, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x037A, 0x0, propertyDISALLOWED}, // GREEK YPOGEGRAMMENI + {0x037B, 0x037D, propertyPVALID}, // GREEK SMALL REVERSED LUNATE SIGMA SYMBOL..GR + {0x037E, 0x0, propertyDISALLOWED}, // GREEK QUESTION MARK + {0x037F, 0x0383, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0384, 0x038A, propertyDISALLOWED}, // GREEK TONOS..GREEK CAPITAL LETTER IOTA WITH + {0x038B, 0x0, propertyUNASSIGNED}, // <reserved> + {0x038C, 0x0, propertyDISALLOWED}, // GREEK CAPITAL LETTER OMICRON WITH TONOS + {0x038D, 0x0, propertyUNASSIGNED}, // <reserved> + {0x038E, 0x038F, propertyDISALLOWED}, // GREEK CAPITAL LETTER UPSILON WITH TONOS..GRE + {0x0390, 0x0, propertyPVALID}, // GREEK SMALL LETTER IOTA WITH DIALYTIKA AND T + {0x0391, 0x03A1, propertyDISALLOWED}, // GREEK CAPITAL LETTER ALPHA..GREEK CAPITAL LE + {0x03A2, 0x0, propertyUNASSIGNED}, // <reserved> + {0x03A3, 0x03AB, propertyDISALLOWED}, // GREEK CAPITAL LETTER SIGMA..GREEK CAPITAL LE + {0x03AC, 0x03CE, propertyPVALID}, // GREEK SMALL LETTER ALPHA WITH TONOS..GREEK S + {0x03CF, 0x03D6, propertyDISALLOWED}, // GREEK CAPITAL KAI SYMBOL..GREEK PI SYMBOL + {0x03D7, 0x0, propertyPVALID}, // GREEK KAI SYMBOL + {0x03D8, 0x0, propertyDISALLOWED}, // GREEK LETTER ARCHAIC KOPPA + {0x03D9, 0x0, propertyPVALID}, // GREEK SMALL LETTER ARCHAIC KOPPA + {0x03DA, 0x0, propertyDISALLOWED}, // GREEK LETTER STIGMA + {0x03DB, 0x0, propertyPVALID}, // GREEK SMALL LETTER STIGMA + {0x03DC, 0x0, propertyDISALLOWED}, // GREEK LETTER DIGAMMA + {0x03DD, 0x0, propertyPVALID}, // GREEK SMALL LETTER DIGAMMA + {0x03DE, 0x0, propertyDISALLOWED}, // GREEK LETTER KOPPA + {0x03DF, 0x0, propertyPVALID}, // GREEK SMALL LETTER KOPPA + {0x03E0, 0x0, propertyDISALLOWED}, // GREEK LETTER SAMPI + {0x03E1, 0x0, propertyPVALID}, // GREEK SMALL LETTER SAMPI + {0x03E2, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER SHEI + {0x03E3, 0x0, propertyPVALID}, // COPTIC SMALL LETTER SHEI + {0x03E4, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER FEI + {0x03E5, 0x0, propertyPVALID}, // COPTIC SMALL LETTER FEI + {0x03E6, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER KHEI + {0x03E7, 0x0, propertyPVALID}, // COPTIC SMALL LETTER KHEI + {0x03E8, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER HORI + {0x03E9, 0x0, propertyPVALID}, // COPTIC SMALL LETTER HORI + {0x03EA, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER GANGIA + {0x03EB, 0x0, propertyPVALID}, // COPTIC SMALL LETTER GANGIA + {0x03EC, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER SHIMA + {0x03ED, 0x0, propertyPVALID}, // COPTIC SMALL LETTER SHIMA + {0x03EE, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER DEI + {0x03EF, 0x0, propertyPVALID}, // COPTIC SMALL LETTER DEI + {0x03F0, 0x03F2, propertyDISALLOWED}, // GREEK KAPPA SYMBOL..GREEK LUNATE SIGMA SYMBO + {0x03F3, 0x0, propertyPVALID}, // GREEK LETTER YOT + {0x03F4, 0x03F7, propertyDISALLOWED}, // GREEK CAPITAL THETA SYMBOL..GREEK CAPITAL LE + {0x03F8, 0x0, propertyPVALID}, // GREEK SMALL LETTER SHO + {0x03F9, 0x03FA, propertyDISALLOWED}, // GREEK CAPITAL LUNATE SIGMA SYMBOL..GREEK CAP + {0x03FB, 0x03FC, propertyPVALID}, // GREEK SMALL LETTER SAN..GREEK RHO WITH STROK + {0x03FD, 0x042F, propertyDISALLOWED}, // GREEK CAPITAL REVERSED LUNATE SIGMA SYMBOL.. + {0x0430, 0x045F, propertyPVALID}, // CYRILLIC SMALL LETTER A..CYRILLIC SMALL LETT + {0x0460, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER OMEGA + {0x0461, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER OMEGA + {0x0462, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER YAT + {0x0463, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER YAT + {0x0464, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IOTIFIED E + {0x0465, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IOTIFIED E + {0x0466, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER LITTLE YUS + {0x0467, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER LITTLE YUS + {0x0468, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IOTIFIED LITTLE YUS + {0x0469, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IOTIFIED LITTLE YUS + {0x046A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER BIG YUS + {0x046B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER BIG YUS + {0x046C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IOTIFIED BIG YUS + {0x046D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IOTIFIED BIG YUS + {0x046E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KSI + {0x046F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KSI + {0x0470, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER PSI + {0x0471, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER PSI + {0x0472, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER FITA + {0x0473, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER FITA + {0x0474, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IZHITSA + {0x0475, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IZHITSA + {0x0476, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IZHITSA WITH DOUBLE + {0x0477, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IZHITSA WITH DOUBLE GR + {0x0478, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER UK + {0x0479, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER UK + {0x047A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ROUND OMEGA + {0x047B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ROUND OMEGA + {0x047C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER OMEGA WITH TITLO + {0x047D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER OMEGA WITH TITLO + {0x047E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER OT + {0x047F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER OT + {0x0480, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOPPA + {0x0481, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOPPA + {0x0482, 0x0, propertyDISALLOWED}, // CYRILLIC THOUSANDS SIGN + {0x0483, 0x0487, propertyPVALID}, // COMBINING CYRILLIC TITLO..COMBINING CYRILLIC + {0x0488, 0x048A, propertyDISALLOWED}, // COMBINING CYRILLIC HUNDRED THOUSANDS SIGN..C + {0x048B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SHORT I WITH TAIL + {0x048C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER SEMISOFT SIGN + {0x048D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SEMISOFT SIGN + {0x048E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ER WITH TICK + {0x048F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ER WITH TICK + {0x0490, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER GHE WITH UPTURN + {0x0491, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER GHE WITH UPTURN + {0x0492, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER GHE WITH STROKE + {0x0493, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER GHE WITH STROKE + {0x0494, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER GHE WITH MIDDLE HOOK + {0x0495, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER GHE WITH MIDDLE HOOK + {0x0496, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ZHE WITH DESCENDER + {0x0497, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ZHE WITH DESCENDER + {0x0498, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ZE WITH DESCENDER + {0x0499, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ZE WITH DESCENDER + {0x049A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KA WITH DESCENDER + {0x049B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KA WITH DESCENDER + {0x049C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KA WITH VERTICAL STR + {0x049D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KA WITH VERTICAL STROK + {0x049E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KA WITH STROKE + {0x049F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KA WITH STROKE + {0x04A0, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER BASHKIR KA + {0x04A1, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER BASHKIR KA + {0x04A2, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER EN WITH DESCENDER + {0x04A3, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER EN WITH DESCENDER + {0x04A4, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LIGATURE EN GHE + {0x04A5, 0x0, propertyPVALID}, // CYRILLIC SMALL LIGATURE EN GHE + {0x04A6, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER PE WITH MIDDLE HOOK + {0x04A7, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER PE WITH MIDDLE HOOK + {0x04A8, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ABKHASIAN HA + {0x04A9, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ABKHASIAN HA + {0x04AA, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ES WITH DESCENDER + {0x04AB, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ES WITH DESCENDER + {0x04AC, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER TE WITH DESCENDER + {0x04AD, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER TE WITH DESCENDER + {0x04AE, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER STRAIGHT U + {0x04AF, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER STRAIGHT U + {0x04B0, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER STRAIGHT U WITH STRO + {0x04B1, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER STRAIGHT U WITH STROKE + {0x04B2, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER HA WITH DESCENDER + {0x04B3, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER HA WITH DESCENDER + {0x04B4, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LIGATURE TE TSE + {0x04B5, 0x0, propertyPVALID}, // CYRILLIC SMALL LIGATURE TE TSE + {0x04B6, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER CHE WITH DESCENDER + {0x04B7, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER CHE WITH DESCENDER + {0x04B8, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER CHE WITH VERTICAL ST + {0x04B9, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER CHE WITH VERTICAL STRO + {0x04BA, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER SHHA + {0x04BB, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SHHA + {0x04BC, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ABKHASIAN CHE + {0x04BD, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ABKHASIAN CHE + {0x04BE, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ABKHASIAN CHE WITH D + {0x04BF, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ABKHASIAN CHE WITH DES + {0x04C0, 0x04C1, propertyDISALLOWED}, // CYRILLIC LETTER PALOCHKA..CYRILLIC CAPITAL L + {0x04C2, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ZHE WITH BREVE + {0x04C3, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KA WITH HOOK + {0x04C4, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KA WITH HOOK + {0x04C5, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER EL WITH TAIL + {0x04C6, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER EL WITH TAIL + {0x04C7, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER EN WITH HOOK + {0x04C8, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER EN WITH HOOK + {0x04C9, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER EN WITH TAIL + {0x04CA, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER EN WITH TAIL + {0x04CB, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KHAKASSIAN CHE + {0x04CC, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KHAKASSIAN CHE + {0x04CD, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER EM WITH TAIL + {0x04CE, 0x04CF, propertyPVALID}, // CYRILLIC SMALL LETTER EM WITH TAIL..CYRILLIC + {0x04D0, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER A WITH BREVE + {0x04D1, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER A WITH BREVE + {0x04D2, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER A WITH DIAERESIS + {0x04D3, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER A WITH DIAERESIS + {0x04D4, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LIGATURE A IE + {0x04D5, 0x0, propertyPVALID}, // CYRILLIC SMALL LIGATURE A IE + {0x04D6, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IE WITH BREVE + {0x04D7, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IE WITH BREVE + {0x04D8, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER SCHWA + {0x04D9, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SCHWA + {0x04DA, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER SCHWA WITH DIAERESIS + {0x04DB, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SCHWA WITH DIAERESIS + {0x04DC, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ZHE WITH DIAERESIS + {0x04DD, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ZHE WITH DIAERESIS + {0x04DE, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ZE WITH DIAERESIS + {0x04DF, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ZE WITH DIAERESIS + {0x04E0, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ABKHASIAN DZE + {0x04E1, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ABKHASIAN DZE + {0x04E2, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER I WITH MACRON + {0x04E3, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER I WITH MACRON + {0x04E4, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER I WITH DIAERESIS + {0x04E5, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER I WITH DIAERESIS + {0x04E6, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER O WITH DIAERESIS + {0x04E7, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER O WITH DIAERESIS + {0x04E8, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER BARRED O + {0x04E9, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER BARRED O + {0x04EA, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER BARRED O WITH DIAERE + {0x04EB, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER BARRED O WITH DIAERESI + {0x04EC, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER E WITH DIAERESIS + {0x04ED, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER E WITH DIAERESIS + {0x04EE, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER U WITH MACRON + {0x04EF, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER U WITH MACRON + {0x04F0, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER U WITH DIAERESIS + {0x04F1, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER U WITH DIAERESIS + {0x04F2, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER U WITH DOUBLE ACUTE + {0x04F3, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER U WITH DOUBLE ACUTE + {0x04F4, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER CHE WITH DIAERESIS + {0x04F5, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER CHE WITH DIAERESIS + {0x04F6, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER GHE WITH DESCENDER + {0x04F7, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER GHE WITH DESCENDER + {0x04F8, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER YERU WITH DIAERESIS + {0x04F9, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER YERU WITH DIAERESIS + {0x04FA, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER GHE WITH STROKE AND + {0x04FB, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER GHE WITH STROKE AND HO + {0x04FC, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER HA WITH HOOK + {0x04FD, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER HA WITH HOOK + {0x04FE, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER HA WITH STROKE + {0x04FF, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER HA WITH STROKE + {0x0500, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOMI DE + {0x0501, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOMI DE + {0x0502, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOMI DJE + {0x0503, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOMI DJE + {0x0504, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOMI ZJE + {0x0505, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOMI ZJE + {0x0506, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOMI DZJE + {0x0507, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOMI DZJE + {0x0508, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOMI LJE + {0x0509, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOMI LJE + {0x050A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOMI NJE + {0x050B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOMI NJE + {0x050C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOMI SJE + {0x050D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOMI SJE + {0x050E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER KOMI TJE + {0x050F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER KOMI TJE + {0x0510, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER REVERSED ZE + {0x0511, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER REVERSED ZE + {0x0512, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER EL WITH HOOK + {0x0513, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER EL WITH HOOK + {0x0514, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER LHA + {0x0515, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER LHA + {0x0516, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER RHA + {0x0517, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER RHA + {0x0518, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER YAE + {0x0519, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER YAE + {0x051A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER QA + {0x051B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER QA + {0x051C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER WE + {0x051D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER WE + {0x051E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ALEUT KA + {0x051F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ALEUT KA + {0x0520, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER EL WITH MIDDLE HOOK + {0x0521, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER EL WITH MIDDLE HOOK + {0x0522, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER EN WITH MIDDLE HOOK + {0x0523, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER EN WITH MIDDLE HOOK + {0x0524, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER PE WITH DESCENDER + {0x0525, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER PE WITH DESCENDER + {0x0526, 0x0530, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0531, 0x0556, propertyDISALLOWED}, // ARMENIAN CAPITAL LETTER AYB..ARMENIAN CAPITA + {0x0557, 0x0558, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0559, 0x0, propertyPVALID}, // ARMENIAN MODIFIER LETTER LEFT HALF RING + {0x055A, 0x055F, propertyDISALLOWED}, // ARMENIAN APOSTROPHE..ARMENIAN ABBREVIATION M + {0x0560, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0561, 0x0586, propertyPVALID}, // ARMENIAN SMALL LETTER AYB..ARMENIAN SMALL LE + {0x0587, 0x0, propertyDISALLOWED}, // ARMENIAN SMALL LIGATURE ECH YIWN + {0x0588, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0589, 0x058A, propertyDISALLOWED}, // ARMENIAN FULL STOP..ARMENIAN HYPHEN + {0x058B, 0x0590, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0591, 0x05BD, propertyPVALID}, // HEBREW ACCENT ETNAHTA..HEBREW POINT METEG + {0x05BE, 0x0, propertyDISALLOWED}, // HEBREW PUNCTUATION MAQAF + {0x05BF, 0x0, propertyPVALID}, // HEBREW POINT RAFE + {0x05C0, 0x0, propertyDISALLOWED}, // HEBREW PUNCTUATION PASEQ + {0x05C1, 0x05C2, propertyPVALID}, // HEBREW POINT SHIN DOT..HEBREW POINT SIN DOT + {0x05C3, 0x0, propertyDISALLOWED}, // HEBREW PUNCTUATION SOF PASUQ + {0x05C4, 0x05C5, propertyPVALID}, // HEBREW MARK UPPER DOT..HEBREW MARK LOWER DOT + {0x05C6, 0x0, propertyDISALLOWED}, // HEBREW PUNCTUATION NUN HAFUKHA + {0x05C7, 0x0, propertyPVALID}, // HEBREW POINT QAMATS QATAN + {0x05C8, 0x05CF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x05D0, 0x05EA, propertyPVALID}, // HEBREW LETTER ALEF..HEBREW LETTER TAV + {0x05EB, 0x05EF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x05F0, 0x05F2, propertyPVALID}, // HEBREW LIGATURE YIDDISH DOUBLE VAV..HEBREW L + {0x05F3, 0x05F4, propertyCONTEXTO}, // HEBREW PUNCTUATION GERESH..HEBREW PUNCTUATIO + {0x05F5, 0x05FF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0600, 0x0603, propertyDISALLOWED}, // ARABIC NUMBER SIGN..ARABIC SIGN SAFHA + {0x0604, 0x0605, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0606, 0x060F, propertyDISALLOWED}, // ARABIC-INDIC CUBE ROOT..ARABIC SIGN MISRA + {0x0610, 0x061A, propertyPVALID}, // ARABIC SIGN SALLALLAHOU ALAYHE WASSALLAM..AR + {0x061B, 0x0, propertyDISALLOWED}, // ARABIC SEMICOLON + {0x061C, 0x061D, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x061E, 0x061F, propertyDISALLOWED}, // ARABIC TRIPLE DOT PUNCTUATION MARK..ARABIC Q + {0x0620, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0621, 0x063F, propertyPVALID}, // ARABIC LETTER HAMZA..ARABIC LETTER FARSI YEH + {0x0640, 0x0, propertyDISALLOWED}, // ARABIC TATWEEL + {0x0641, 0x065E, propertyPVALID}, // ARABIC LETTER FEH..ARABIC FATHA WITH TWO DOT + {0x065F, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0660, 0x0669, propertyCONTEXTO}, // ARABIC-INDIC DIGIT ZERO..ARABIC-INDIC DIGIT + {0x066A, 0x066D, propertyDISALLOWED}, // ARABIC PERCENT SIGN..ARABIC FIVE POINTED STA + {0x066E, 0x0674, propertyPVALID}, // ARABIC LETTER DOTLESS BEH..ARABIC LETTER HIG + {0x0675, 0x0678, propertyDISALLOWED}, // ARABIC LETTER HIGH HAMZA ALEF..ARABIC LETTER + {0x0679, 0x06D3, propertyPVALID}, // ARABIC LETTER TTEH..ARABIC LETTER YEH BARREE + {0x06D4, 0x0, propertyDISALLOWED}, // ARABIC FULL STOP + {0x06D5, 0x06DC, propertyPVALID}, // ARABIC LETTER AE..ARABIC SMALL HIGH SEEN + {0x06DD, 0x06DE, propertyDISALLOWED}, // ARABIC END OF AYAH..ARABIC START OF RUB EL H + {0x06DF, 0x06E8, propertyPVALID}, // ARABIC SMALL HIGH ROUNDED ZERO..ARABIC SMALL + {0x06E9, 0x0, propertyDISALLOWED}, // ARABIC PLACE OF SAJDAH + {0x06EA, 0x06EF, propertyPVALID}, // ARABIC EMPTY CENTRE LOW STOP..ARABIC LETTER + {0x06F0, 0x06F9, propertyCONTEXTO}, // EXTENDED ARABIC-INDIC DIGIT ZERO..EXTENDED A + {0x06FA, 0x06FF, propertyPVALID}, // ARABIC LETTER SHEEN WITH DOT BELOW..ARABIC L + {0x0700, 0x070D, propertyDISALLOWED}, // SYRIAC END OF PARAGRAPH..SYRIAC HARKLEAN AST + {0x070E, 0x0, propertyUNASSIGNED}, // <reserved> + {0x070F, 0x0, propertyDISALLOWED}, // SYRIAC ABBREVIATION MARK + {0x0710, 0x074A, propertyPVALID}, // SYRIAC LETTER ALAPH..SYRIAC BARREKH + {0x074B, 0x074C, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x074D, 0x07B1, propertyPVALID}, // SYRIAC LETTER SOGDIAN ZHAIN..THAANA LETTER N + {0x07B2, 0x07BF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x07C0, 0x07F5, propertyPVALID}, // NKO DIGIT ZERO..NKO LOW TONE APOSTROPHE + {0x07F6, 0x07FA, propertyDISALLOWED}, // NKO SYMBOL OO DENNEN..NKO LAJANYALAN + {0x07FB, 0x07FF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0800, 0x082D, propertyPVALID}, // SAMARITAN LETTER ALAF..SAMARITAN MARK NEQUDA + {0x082E, 0x082F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0830, 0x083E, propertyDISALLOWED}, // SAMARITAN PUNCTUATION NEQUDAA..SAMARITAN PUN + {0x083F, 0x08FF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0900, 0x0939, propertyPVALID}, // DEVANAGARI SIGN INVERTED CANDRABINDU..DEVANA + {0x093A, 0x093B, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x093C, 0x094E, propertyPVALID}, // DEVANAGARI SIGN NUKTA..DEVANAGARI VOWEL SIGN + {0x094F, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0950, 0x0955, propertyPVALID}, // DEVANAGARI OM..DEVANAGARI VOWEL SIGN CANDRA + {0x0956, 0x0957, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0958, 0x095F, propertyDISALLOWED}, // DEVANAGARI LETTER QA..DEVANAGARI LETTER YYA + {0x0960, 0x0963, propertyPVALID}, // DEVANAGARI LETTER VOCALIC RR..DEVANAGARI VOW + {0x0964, 0x0965, propertyDISALLOWED}, // DEVANAGARI DANDA..DEVANAGARI DOUBLE DANDA + {0x0966, 0x096F, propertyPVALID}, // DEVANAGARI DIGIT ZERO..DEVANAGARI DIGIT NINE + {0x0970, 0x0, propertyDISALLOWED}, // DEVANAGARI ABBREVIATION SIGN + {0x0971, 0x0972, propertyPVALID}, // DEVANAGARI SIGN HIGH SPACING DOT..DEVANAGARI + {0x0973, 0x0978, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0979, 0x097F, propertyPVALID}, // DEVANAGARI LETTER ZHA..DEVANAGARI LETTER BBA + {0x0980, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0981, 0x0983, propertyPVALID}, // BENGALI SIGN CANDRABINDU..BENGALI SIGN VISAR + {0x0984, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0985, 0x098C, propertyPVALID}, // BENGALI LETTER A..BENGALI LETTER VOCALIC L + {0x098D, 0x098E, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x098F, 0x0990, propertyPVALID}, // BENGALI LETTER E..BENGALI LETTER AI + {0x0991, 0x0992, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0993, 0x09A8, propertyPVALID}, // BENGALI LETTER O..BENGALI LETTER NA + {0x09A9, 0x0, propertyUNASSIGNED}, // <reserved> + {0x09AA, 0x09B0, propertyPVALID}, // BENGALI LETTER PA..BENGALI LETTER RA + {0x09B1, 0x0, propertyUNASSIGNED}, // <reserved> + {0x09B2, 0x0, propertyPVALID}, // BENGALI LETTER LA + {0x09B3, 0x09B5, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x09B6, 0x09B9, propertyPVALID}, // BENGALI LETTER SHA..BENGALI LETTER HA + {0x09BA, 0x09BB, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x09BC, 0x09C4, propertyPVALID}, // BENGALI SIGN NUKTA..BENGALI VOWEL SIGN VOCAL + {0x09C5, 0x09C6, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x09C7, 0x09C8, propertyPVALID}, // BENGALI VOWEL SIGN E..BENGALI VOWEL SIGN AI + {0x09C9, 0x09CA, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x09CB, 0x09CE, propertyPVALID}, // BENGALI VOWEL SIGN O..BENGALI LETTER KHANDA + {0x09CF, 0x09D6, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x09D7, 0x0, propertyPVALID}, // BENGALI AU LENGTH MARK + {0x09D8, 0x09DB, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x09DC, 0x09DD, propertyDISALLOWED}, // BENGALI LETTER RRA..BENGALI LETTER RHA + {0x09DE, 0x0, propertyUNASSIGNED}, // <reserved> + {0x09DF, 0x0, propertyDISALLOWED}, // BENGALI LETTER YYA + {0x09E0, 0x09E3, propertyPVALID}, // BENGALI LETTER VOCALIC RR..BENGALI VOWEL SIG + {0x09E4, 0x09E5, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x09E6, 0x09F1, propertyPVALID}, // BENGALI DIGIT ZERO..BENGALI LETTER RA WITH L + {0x09F2, 0x09FB, propertyDISALLOWED}, // BENGALI RUPEE MARK..BENGALI GANDA MARK + {0x09FC, 0x0A00, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0A01, 0x0A03, propertyPVALID}, // GURMUKHI SIGN ADAK BINDI..GURMUKHI SIGN VISA + {0x0A04, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0A05, 0x0A0A, propertyPVALID}, // GURMUKHI LETTER A..GURMUKHI LETTER UU + {0x0A0B, 0x0A0E, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0A0F, 0x0A10, propertyPVALID}, // GURMUKHI LETTER EE..GURMUKHI LETTER AI + {0x0A11, 0x0A12, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0A13, 0x0A28, propertyPVALID}, // GURMUKHI LETTER OO..GURMUKHI LETTER NA + {0x0A29, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0A2A, 0x0A30, propertyPVALID}, // GURMUKHI LETTER PA..GURMUKHI LETTER RA + {0x0A31, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0A32, 0x0, propertyPVALID}, // GURMUKHI LETTER LA + {0x0A33, 0x0, propertyDISALLOWED}, // GURMUKHI LETTER LLA + {0x0A34, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0A35, 0x0, propertyPVALID}, // GURMUKHI LETTER VA + {0x0A36, 0x0, propertyDISALLOWED}, // GURMUKHI LETTER SHA + {0x0A37, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0A38, 0x0A39, propertyPVALID}, // GURMUKHI LETTER SA..GURMUKHI LETTER HA + {0x0A3A, 0x0A3B, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0A3C, 0x0, propertyPVALID}, // GURMUKHI SIGN NUKTA + {0x0A3D, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0A3E, 0x0A42, propertyPVALID}, // GURMUKHI VOWEL SIGN AA..GURMUKHI VOWEL SIGN + {0x0A43, 0x0A46, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0A47, 0x0A48, propertyPVALID}, // GURMUKHI VOWEL SIGN EE..GURMUKHI VOWEL SIGN + {0x0A49, 0x0A4A, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0A4B, 0x0A4D, propertyPVALID}, // GURMUKHI VOWEL SIGN OO..GURMUKHI SIGN VIRAMA + {0x0A4E, 0x0A50, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0A51, 0x0, propertyPVALID}, // GURMUKHI SIGN UDAAT + {0x0A52, 0x0A58, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0A59, 0x0A5B, propertyDISALLOWED}, // GURMUKHI LETTER KHHA..GURMUKHI LETTER ZA + {0x0A5C, 0x0, propertyPVALID}, // GURMUKHI LETTER RRA + {0x0A5D, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0A5E, 0x0, propertyDISALLOWED}, // GURMUKHI LETTER FA + {0x0A5F, 0x0A65, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0A66, 0x0A75, propertyPVALID}, // GURMUKHI DIGIT ZERO..GURMUKHI SIGN YAKASH + {0x0A76, 0x0A80, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0A81, 0x0A83, propertyPVALID}, // GUJARATI SIGN CANDRABINDU..GUJARATI SIGN VIS + {0x0A84, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0A85, 0x0A8D, propertyPVALID}, // GUJARATI LETTER A..GUJARATI VOWEL CANDRA E + {0x0A8E, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0A8F, 0x0A91, propertyPVALID}, // GUJARATI LETTER E..GUJARATI VOWEL CANDRA O + {0x0A92, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0A93, 0x0AA8, propertyPVALID}, // GUJARATI LETTER O..GUJARATI LETTER NA + {0x0AA9, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0AAA, 0x0AB0, propertyPVALID}, // GUJARATI LETTER PA..GUJARATI LETTER RA + {0x0AB1, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0AB2, 0x0AB3, propertyPVALID}, // GUJARATI LETTER LA..GUJARATI LETTER LLA + {0x0AB4, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0AB5, 0x0AB9, propertyPVALID}, // GUJARATI LETTER VA..GUJARATI LETTER HA + {0x0ABA, 0x0ABB, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0ABC, 0x0AC5, propertyPVALID}, // GUJARATI SIGN NUKTA..GUJARATI VOWEL SIGN CAN + {0x0AC6, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0AC7, 0x0AC9, propertyPVALID}, // GUJARATI VOWEL SIGN E..GUJARATI VOWEL SIGN C + {0x0ACA, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0ACB, 0x0ACD, propertyPVALID}, // GUJARATI VOWEL SIGN O..GUJARATI SIGN VIRAMA + {0x0ACE, 0x0ACF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0AD0, 0x0, propertyPVALID}, // GUJARATI OM + {0x0AD1, 0x0ADF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0AE0, 0x0AE3, propertyPVALID}, // GUJARATI LETTER VOCALIC RR..GUJARATI VOWEL S + {0x0AE4, 0x0AE5, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0AE6, 0x0AEF, propertyPVALID}, // GUJARATI DIGIT ZERO..GUJARATI DIGIT NINE + {0x0AF0, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0AF1, 0x0, propertyDISALLOWED}, // GUJARATI RUPEE SIGN + {0x0AF2, 0x0B00, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0B01, 0x0B03, propertyPVALID}, // ORIYA SIGN CANDRABINDU..ORIYA SIGN VISARGA + {0x0B04, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0B05, 0x0B0C, propertyPVALID}, // ORIYA LETTER A..ORIYA LETTER VOCALIC L + {0x0B0D, 0x0B0E, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0B0F, 0x0B10, propertyPVALID}, // ORIYA LETTER E..ORIYA LETTER AI + {0x0B11, 0x0B12, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0B13, 0x0B28, propertyPVALID}, // ORIYA LETTER O..ORIYA LETTER NA + {0x0B29, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0B2A, 0x0B30, propertyPVALID}, // ORIYA LETTER PA..ORIYA LETTER RA + {0x0B31, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0B32, 0x0B33, propertyPVALID}, // ORIYA LETTER LA..ORIYA LETTER LLA + {0x0B34, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0B35, 0x0B39, propertyPVALID}, // ORIYA LETTER VA..ORIYA LETTER HA + {0x0B3A, 0x0B3B, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0B3C, 0x0B44, propertyPVALID}, // ORIYA SIGN NUKTA..ORIYA VOWEL SIGN VOCALIC R + {0x0B45, 0x0B46, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0B47, 0x0B48, propertyPVALID}, // ORIYA VOWEL SIGN E..ORIYA VOWEL SIGN AI + {0x0B49, 0x0B4A, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0B4B, 0x0B4D, propertyPVALID}, // ORIYA VOWEL SIGN O..ORIYA SIGN VIRAMA + {0x0B4E, 0x0B55, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0B56, 0x0B57, propertyPVALID}, // ORIYA AI LENGTH MARK..ORIYA AU LENGTH MARK + {0x0B58, 0x0B5B, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0B5C, 0x0B5D, propertyDISALLOWED}, // ORIYA LETTER RRA..ORIYA LETTER RHA + {0x0B5E, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0B5F, 0x0B63, propertyPVALID}, // ORIYA LETTER YYA..ORIYA VOWEL SIGN VOCALIC L + {0x0B64, 0x0B65, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0B66, 0x0B6F, propertyPVALID}, // ORIYA DIGIT ZERO..ORIYA DIGIT NINE + {0x0B70, 0x0, propertyDISALLOWED}, // ORIYA ISSHAR + {0x0B71, 0x0, propertyPVALID}, // ORIYA LETTER WA + {0x0B72, 0x0B81, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0B82, 0x0B83, propertyPVALID}, // TAMIL SIGN ANUSVARA..TAMIL SIGN VISARGA + {0x0B84, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0B85, 0x0B8A, propertyPVALID}, // TAMIL LETTER A..TAMIL LETTER UU + {0x0B8B, 0x0B8D, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0B8E, 0x0B90, propertyPVALID}, // TAMIL LETTER E..TAMIL LETTER AI + {0x0B91, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0B92, 0x0B95, propertyPVALID}, // TAMIL LETTER O..TAMIL LETTER KA + {0x0B96, 0x0B98, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0B99, 0x0B9A, propertyPVALID}, // TAMIL LETTER NGA..TAMIL LETTER CA + {0x0B9B, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0B9C, 0x0, propertyPVALID}, // TAMIL LETTER JA + {0x0B9D, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0B9E, 0x0B9F, propertyPVALID}, // TAMIL LETTER NYA..TAMIL LETTER TTA + {0x0BA0, 0x0BA2, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0BA3, 0x0BA4, propertyPVALID}, // TAMIL LETTER NNA..TAMIL LETTER TA + {0x0BA5, 0x0BA7, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0BA8, 0x0BAA, propertyPVALID}, // TAMIL LETTER NA..TAMIL LETTER PA + {0x0BAB, 0x0BAD, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0BAE, 0x0BB9, propertyPVALID}, // TAMIL LETTER MA..TAMIL LETTER HA + {0x0BBA, 0x0BBD, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0BBE, 0x0BC2, propertyPVALID}, // TAMIL VOWEL SIGN AA..TAMIL VOWEL SIGN UU + {0x0BC3, 0x0BC5, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0BC6, 0x0BC8, propertyPVALID}, // TAMIL VOWEL SIGN E..TAMIL VOWEL SIGN AI + {0x0BC9, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0BCA, 0x0BCD, propertyPVALID}, // TAMIL VOWEL SIGN O..TAMIL SIGN VIRAMA + {0x0BCE, 0x0BCF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0BD0, 0x0, propertyPVALID}, // TAMIL OM + {0x0BD1, 0x0BD6, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0BD7, 0x0, propertyPVALID}, // TAMIL AU LENGTH MARK + {0x0BD8, 0x0BE5, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0BE6, 0x0BEF, propertyPVALID}, // TAMIL DIGIT ZERO..TAMIL DIGIT NINE + {0x0BF0, 0x0BFA, propertyDISALLOWED}, // TAMIL NUMBER TEN..TAMIL NUMBER SIGN + {0x0BFB, 0x0C00, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0C01, 0x0C03, propertyPVALID}, // TELUGU SIGN CANDRABINDU..TELUGU SIGN VISARGA + {0x0C04, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0C05, 0x0C0C, propertyPVALID}, // TELUGU LETTER A..TELUGU LETTER VOCALIC L + {0x0C0D, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0C0E, 0x0C10, propertyPVALID}, // TELUGU LETTER E..TELUGU LETTER AI + {0x0C11, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0C12, 0x0C28, propertyPVALID}, // TELUGU LETTER O..TELUGU LETTER NA + {0x0C29, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0C2A, 0x0C33, propertyPVALID}, // TELUGU LETTER PA..TELUGU LETTER LLA + {0x0C34, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0C35, 0x0C39, propertyPVALID}, // TELUGU LETTER VA..TELUGU LETTER HA + {0x0C3A, 0x0C3C, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0C3D, 0x0C44, propertyPVALID}, // TELUGU SIGN AVAGRAHA..TELUGU VOWEL SIGN VOCA + {0x0C45, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0C46, 0x0C48, propertyPVALID}, // TELUGU VOWEL SIGN E..TELUGU VOWEL SIGN AI + {0x0C49, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0C4A, 0x0C4D, propertyPVALID}, // TELUGU VOWEL SIGN O..TELUGU SIGN VIRAMA + {0x0C4E, 0x0C54, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0C55, 0x0C56, propertyPVALID}, // TELUGU LENGTH MARK..TELUGU AI LENGTH MARK + {0x0C57, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0C58, 0x0C59, propertyPVALID}, // TELUGU LETTER TSA..TELUGU LETTER DZA + {0x0C5A, 0x0C5F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0C60, 0x0C63, propertyPVALID}, // TELUGU LETTER VOCALIC RR..TELUGU VOWEL SIGN + {0x0C64, 0x0C65, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0C66, 0x0C6F, propertyPVALID}, // TELUGU DIGIT ZERO..TELUGU DIGIT NINE + {0x0C70, 0x0C77, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0C78, 0x0C7F, propertyDISALLOWED}, // TELUGU FRACTION DIGIT ZERO FOR ODD POWERS OF + {0x0C80, 0x0C81, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0C82, 0x0C83, propertyPVALID}, // KANNADA SIGN ANUSVARA..KANNADA SIGN VISARGA + {0x0C84, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0C85, 0x0C8C, propertyPVALID}, // KANNADA LETTER A..KANNADA LETTER VOCALIC L + {0x0C8D, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0C8E, 0x0C90, propertyPVALID}, // KANNADA LETTER E..KANNADA LETTER AI + {0x0C91, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0C92, 0x0CA8, propertyPVALID}, // KANNADA LETTER O..KANNADA LETTER NA + {0x0CA9, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0CAA, 0x0CB3, propertyPVALID}, // KANNADA LETTER PA..KANNADA LETTER LLA + {0x0CB4, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0CB5, 0x0CB9, propertyPVALID}, // KANNADA LETTER VA..KANNADA LETTER HA + {0x0CBA, 0x0CBB, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0CBC, 0x0CC4, propertyPVALID}, // KANNADA SIGN NUKTA..KANNADA VOWEL SIGN VOCAL + {0x0CC5, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0CC6, 0x0CC8, propertyPVALID}, // KANNADA VOWEL SIGN E..KANNADA VOWEL SIGN AI + {0x0CC9, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0CCA, 0x0CCD, propertyPVALID}, // KANNADA VOWEL SIGN O..KANNADA SIGN VIRAMA + {0x0CCE, 0x0CD4, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0CD5, 0x0CD6, propertyPVALID}, // KANNADA LENGTH MARK..KANNADA AI LENGTH MARK + {0x0CD7, 0x0CDD, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0CDE, 0x0, propertyPVALID}, // KANNADA LETTER FA + {0x0CDF, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0CE0, 0x0CE3, propertyPVALID}, // KANNADA LETTER VOCALIC RR..KANNADA VOWEL SIG + {0x0CE4, 0x0CE5, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0CE6, 0x0CEF, propertyPVALID}, // KANNADA DIGIT ZERO..KANNADA DIGIT NINE + {0x0CF0, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0CF1, 0x0CF2, propertyDISALLOWED}, // KANNADA SIGN JIHVAMULIYA..KANNADA SIGN UPADH + {0x0CF3, 0x0D01, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0D02, 0x0D03, propertyPVALID}, // MALAYALAM SIGN ANUSVARA..MALAYALAM SIGN VISA + {0x0D04, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0D05, 0x0D0C, propertyPVALID}, // MALAYALAM LETTER A..MALAYALAM LETTER VOCALIC + {0x0D0D, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0D0E, 0x0D10, propertyPVALID}, // MALAYALAM LETTER E..MALAYALAM LETTER AI + {0x0D11, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0D12, 0x0D28, propertyPVALID}, // MALAYALAM LETTER O..MALAYALAM LETTER NA + {0x0D29, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0D2A, 0x0D39, propertyPVALID}, // MALAYALAM LETTER PA..MALAYALAM LETTER HA + {0x0D3A, 0x0D3C, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0D3D, 0x0D44, propertyPVALID}, // MALAYALAM SIGN AVAGRAHA..MALAYALAM VOWEL SIG + {0x0D45, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0D46, 0x0D48, propertyPVALID}, // MALAYALAM VOWEL SIGN E..MALAYALAM VOWEL SIGN + {0x0D49, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0D4A, 0x0D4D, propertyPVALID}, // MALAYALAM VOWEL SIGN O..MALAYALAM SIGN VIRAM + {0x0D4E, 0x0D56, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0D57, 0x0, propertyPVALID}, // MALAYALAM AU LENGTH MARK + {0x0D58, 0x0D5F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0D60, 0x0D63, propertyPVALID}, // MALAYALAM LETTER VOCALIC RR..MALAYALAM VOWEL + {0x0D64, 0x0D65, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0D66, 0x0D6F, propertyPVALID}, // MALAYALAM DIGIT ZERO..MALAYALAM DIGIT NINE + {0x0D70, 0x0D75, propertyDISALLOWED}, // MALAYALAM NUMBER TEN..MALAYALAM FRACTION THR + {0x0D76, 0x0D78, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0D79, 0x0, propertyDISALLOWED}, // MALAYALAM DATE MARK + {0x0D7A, 0x0D7F, propertyPVALID}, // MALAYALAM LETTER CHILLU NN..MALAYALAM LETTER + {0x0D80, 0x0D81, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0D82, 0x0D83, propertyPVALID}, // SINHALA SIGN ANUSVARAYA..SINHALA SIGN VISARG + {0x0D84, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0D85, 0x0D96, propertyPVALID}, // SINHALA LETTER AYANNA..SINHALA LETTER AUYANN + {0x0D97, 0x0D99, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0D9A, 0x0DB1, propertyPVALID}, // SINHALA LETTER ALPAPRAANA KAYANNA..SINHALA L + {0x0DB2, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0DB3, 0x0DBB, propertyPVALID}, // SINHALA LETTER SANYAKA DAYANNA..SINHALA LETT + {0x0DBC, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0DBD, 0x0, propertyPVALID}, // SINHALA LETTER DANTAJA LAYANNA + {0x0DBE, 0x0DBF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0DC0, 0x0DC6, propertyPVALID}, // SINHALA LETTER VAYANNA..SINHALA LETTER FAYAN + {0x0DC7, 0x0DC9, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0DCA, 0x0, propertyPVALID}, // SINHALA SIGN AL-LAKUNA + {0x0DCB, 0x0DCE, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0DCF, 0x0DD4, propertyPVALID}, // SINHALA VOWEL SIGN AELA-PILLA..SINHALA VOWEL + {0x0DD5, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0DD6, 0x0, propertyPVALID}, // SINHALA VOWEL SIGN DIGA PAA-PILLA + {0x0DD7, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0DD8, 0x0DDF, propertyPVALID}, // SINHALA VOWEL SIGN GAETTA-PILLA..SINHALA VOW + {0x0DE0, 0x0DF1, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0DF2, 0x0DF3, propertyPVALID}, // SINHALA VOWEL SIGN DIGA GAETTA-PILLA..SINHAL + {0x0DF4, 0x0, propertyDISALLOWED}, // SINHALA PUNCTUATION KUNDDALIYA + {0x0DF5, 0x0E00, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0E01, 0x0E32, propertyPVALID}, // THAI CHARACTER KO KAI..THAI CHARACTER SARA A + {0x0E33, 0x0, propertyDISALLOWED}, // THAI CHARACTER SARA AM + {0x0E34, 0x0E3A, propertyPVALID}, // THAI CHARACTER SARA I..THAI CHARACTER PHINTH + {0x0E3B, 0x0E3E, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0E3F, 0x0, propertyDISALLOWED}, // THAI CURRENCY SYMBOL BAHT + {0x0E40, 0x0E4E, propertyPVALID}, // THAI CHARACTER SARA E..THAI CHARACTER YAMAKK + {0x0E4F, 0x0, propertyDISALLOWED}, // THAI CHARACTER FONGMAN + {0x0E50, 0x0E59, propertyPVALID}, // THAI DIGIT ZERO..THAI DIGIT NINE + {0x0E5A, 0x0E5B, propertyDISALLOWED}, // THAI CHARACTER ANGKHANKHU..THAI CHARACTER KH + {0x0E5C, 0x0E80, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0E81, 0x0E82, propertyPVALID}, // LAO LETTER KO..LAO LETTER KHO SUNG + {0x0E83, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0E84, 0x0, propertyPVALID}, // LAO LETTER KHO TAM + {0x0E85, 0x0E86, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0E87, 0x0E88, propertyPVALID}, // LAO LETTER NGO..LAO LETTER CO + {0x0E89, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0E8A, 0x0, propertyPVALID}, // LAO LETTER SO TAM + {0x0E8B, 0x0E8C, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0E8D, 0x0, propertyPVALID}, // LAO LETTER NYO + {0x0E8E, 0x0E93, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0E94, 0x0E97, propertyPVALID}, // LAO LETTER DO..LAO LETTER THO TAM + {0x0E98, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0E99, 0x0E9F, propertyPVALID}, // LAO LETTER NO..LAO LETTER FO SUNG + {0x0EA0, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0EA1, 0x0EA3, propertyPVALID}, // LAO LETTER MO..LAO LETTER LO LING + {0x0EA4, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0EA5, 0x0, propertyPVALID}, // LAO LETTER LO LOOT + {0x0EA6, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0EA7, 0x0, propertyPVALID}, // LAO LETTER WO + {0x0EA8, 0x0EA9, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0EAA, 0x0EAB, propertyPVALID}, // LAO LETTER SO SUNG..LAO LETTER HO SUNG + {0x0EAC, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0EAD, 0x0EB2, propertyPVALID}, // LAO LETTER O..LAO VOWEL SIGN AA + {0x0EB3, 0x0, propertyDISALLOWED}, // LAO VOWEL SIGN AM + {0x0EB4, 0x0EB9, propertyPVALID}, // LAO VOWEL SIGN I..LAO VOWEL SIGN UU + {0x0EBA, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0EBB, 0x0EBD, propertyPVALID}, // LAO VOWEL SIGN MAI KON..LAO SEMIVOWEL SIGN N + {0x0EBE, 0x0EBF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0EC0, 0x0EC4, propertyPVALID}, // LAO VOWEL SIGN E..LAO VOWEL SIGN AI + {0x0EC5, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0EC6, 0x0, propertyPVALID}, // LAO KO LA + {0x0EC7, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0EC8, 0x0ECD, propertyPVALID}, // LAO TONE MAI EK..LAO NIGGAHITA + {0x0ECE, 0x0ECF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0ED0, 0x0ED9, propertyPVALID}, // LAO DIGIT ZERO..LAO DIGIT NINE + {0x0EDA, 0x0EDB, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0EDC, 0x0EDD, propertyDISALLOWED}, // LAO HO NO..LAO HO MO + {0x0EDE, 0x0EFF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0F00, 0x0, propertyPVALID}, // TIBETAN SYLLABLE OM + {0x0F01, 0x0F0A, propertyDISALLOWED}, // TIBETAN MARK GTER YIG MGO TRUNCATED A..TIBET + {0x0F0B, 0x0, propertyPVALID}, // TIBETAN MARK INTERSYLLABIC TSHEG + {0x0F0C, 0x0F17, propertyDISALLOWED}, // TIBETAN MARK DELIMITER TSHEG BSTAR..TIBETAN + {0x0F18, 0x0F19, propertyPVALID}, // TIBETAN ASTROLOGICAL SIGN -KHYUD PA..TIBETAN + {0x0F1A, 0x0F1F, propertyDISALLOWED}, // TIBETAN SIGN RDEL DKAR GCIG..TIBETAN SIGN RD + {0x0F20, 0x0F29, propertyPVALID}, // TIBETAN DIGIT ZERO..TIBETAN DIGIT NINE + {0x0F2A, 0x0F34, propertyDISALLOWED}, // TIBETAN DIGIT HALF ONE..TIBETAN MARK BSDUS R + {0x0F35, 0x0, propertyPVALID}, // TIBETAN MARK NGAS BZUNG NYI ZLA + {0x0F36, 0x0, propertyDISALLOWED}, // TIBETAN MARK CARET -DZUD RTAGS BZHI MIG CAN + {0x0F37, 0x0, propertyPVALID}, // TIBETAN MARK NGAS BZUNG SGOR RTAGS + {0x0F38, 0x0, propertyDISALLOWED}, // TIBETAN MARK CHE MGO + {0x0F39, 0x0, propertyPVALID}, // TIBETAN MARK TSA -PHRU + {0x0F3A, 0x0F3D, propertyDISALLOWED}, // TIBETAN MARK GUG RTAGS GYON..TIBETAN MARK AN + {0x0F3E, 0x0F42, propertyPVALID}, // TIBETAN SIGN YAR TSHES..TIBETAN LETTER GA + {0x0F43, 0x0, propertyDISALLOWED}, // TIBETAN LETTER GHA + {0x0F44, 0x0F47, propertyPVALID}, // TIBETAN LETTER NGA..TIBETAN LETTER JA + {0x0F48, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0F49, 0x0F4C, propertyPVALID}, // TIBETAN LETTER NYA..TIBETAN LETTER DDA + {0x0F4D, 0x0, propertyDISALLOWED}, // TIBETAN LETTER DDHA + {0x0F4E, 0x0F51, propertyPVALID}, // TIBETAN LETTER NNA..TIBETAN LETTER DA + {0x0F52, 0x0, propertyDISALLOWED}, // TIBETAN LETTER DHA + {0x0F53, 0x0F56, propertyPVALID}, // TIBETAN LETTER NA..TIBETAN LETTER BA + {0x0F57, 0x0, propertyDISALLOWED}, // TIBETAN LETTER BHA + {0x0F58, 0x0F5B, propertyPVALID}, // TIBETAN LETTER MA..TIBETAN LETTER DZA + {0x0F5C, 0x0, propertyDISALLOWED}, // TIBETAN LETTER DZHA + {0x0F5D, 0x0F68, propertyPVALID}, // TIBETAN LETTER WA..TIBETAN LETTER A + {0x0F69, 0x0, propertyDISALLOWED}, // TIBETAN LETTER KSSA + {0x0F6A, 0x0F6C, propertyPVALID}, // TIBETAN LETTER FIXED-FORM RA..TIBETAN LETTER + {0x0F6D, 0x0F70, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0F71, 0x0F72, propertyPVALID}, // TIBETAN VOWEL SIGN AA..TIBETAN VOWEL SIGN I + {0x0F73, 0x0, propertyDISALLOWED}, // TIBETAN VOWEL SIGN II + {0x0F74, 0x0, propertyPVALID}, // TIBETAN VOWEL SIGN U + {0x0F75, 0x0F79, propertyDISALLOWED}, // TIBETAN VOWEL SIGN UU..TIBETAN VOWEL SIGN VO + {0x0F7A, 0x0F80, propertyPVALID}, // TIBETAN VOWEL SIGN E..TIBETAN VOWEL SIGN REV + {0x0F81, 0x0, propertyDISALLOWED}, // TIBETAN VOWEL SIGN REVERSED II + {0x0F82, 0x0F84, propertyPVALID}, // TIBETAN SIGN NYI ZLA NAA DA..TIBETAN MARK HA + {0x0F85, 0x0, propertyDISALLOWED}, // TIBETAN MARK PALUTA + {0x0F86, 0x0F8B, propertyPVALID}, // TIBETAN SIGN LCI RTAGS..TIBETAN SIGN GRU MED + {0x0F8C, 0x0F8F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x0F90, 0x0F92, propertyPVALID}, // TIBETAN SUBJOINED LETTER KA..TIBETAN SUBJOIN + {0x0F93, 0x0, propertyDISALLOWED}, // TIBETAN SUBJOINED LETTER GHA + {0x0F94, 0x0F97, propertyPVALID}, // TIBETAN SUBJOINED LETTER NGA..TIBETAN SUBJOI + {0x0F98, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0F99, 0x0F9C, propertyPVALID}, // TIBETAN SUBJOINED LETTER NYA..TIBETAN SUBJOI + {0x0F9D, 0x0, propertyDISALLOWED}, // TIBETAN SUBJOINED LETTER DDHA + {0x0F9E, 0x0FA1, propertyPVALID}, // TIBETAN SUBJOINED LETTER NNA..TIBETAN SUBJOI + {0x0FA2, 0x0, propertyDISALLOWED}, // TIBETAN SUBJOINED LETTER DHA + {0x0FA3, 0x0FA6, propertyPVALID}, // TIBETAN SUBJOINED LETTER NA..TIBETAN SUBJOIN + {0x0FA7, 0x0, propertyDISALLOWED}, // TIBETAN SUBJOINED LETTER BHA + {0x0FA8, 0x0FAB, propertyPVALID}, // TIBETAN SUBJOINED LETTER MA..TIBETAN SUBJOIN + {0x0FAC, 0x0, propertyDISALLOWED}, // TIBETAN SUBJOINED LETTER DZHA + {0x0FAD, 0x0FB8, propertyPVALID}, // TIBETAN SUBJOINED LETTER WA..TIBETAN SUBJOIN + {0x0FB9, 0x0, propertyDISALLOWED}, // TIBETAN SUBJOINED LETTER KSSA + {0x0FBA, 0x0FBC, propertyPVALID}, // TIBETAN SUBJOINED LETTER FIXED-FORM WA..TIBE + {0x0FBD, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0FBE, 0x0FC5, propertyDISALLOWED}, // TIBETAN KU RU KHA..TIBETAN SYMBOL RDO RJE + {0x0FC6, 0x0, propertyPVALID}, // TIBETAN SYMBOL PADMA GDAN + {0x0FC7, 0x0FCC, propertyDISALLOWED}, // TIBETAN SYMBOL RDO RJE RGYA GRAM..TIBETAN SY + {0x0FCD, 0x0, propertyUNASSIGNED}, // <reserved> + {0x0FCE, 0x0FD8, propertyDISALLOWED}, // TIBETAN SIGN RDEL NAG RDEL DKAR..LEFT-FACING + {0x0FD9, 0x0FFF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1000, 0x1049, propertyPVALID}, // MYANMAR LETTER KA..MYANMAR DIGIT NINE + {0x104A, 0x104F, propertyDISALLOWED}, // MYANMAR SIGN LITTLE SECTION..MYANMAR SYMBOL + {0x1050, 0x109D, propertyPVALID}, // MYANMAR LETTER SHA..MYANMAR VOWEL SIGN AITON + {0x109E, 0x10C5, propertyDISALLOWED}, // MYANMAR SYMBOL SHAN ONE..GEORGIAN CAPITAL LE + {0x10C6, 0x10CF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x10D0, 0x10FA, propertyPVALID}, // GEORGIAN LETTER AN..GEORGIAN LETTER AIN + {0x10FB, 0x10FC, propertyDISALLOWED}, // GEORGIAN PARAGRAPH SEPARATOR..MODIFIER LETTE + {0x10FD, 0x10FF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1100, 0x11FF, propertyDISALLOWED}, // HANGUL CHOSEONG KIYEOK..HANGUL JONGSEONG SSA + {0x1200, 0x1248, propertyPVALID}, // ETHIOPIC SYLLABLE HA..ETHIOPIC SYLLABLE QWA + {0x1249, 0x0, propertyUNASSIGNED}, // <reserved> + {0x124A, 0x124D, propertyPVALID}, // ETHIOPIC SYLLABLE QWI..ETHIOPIC SYLLABLE QWE + {0x124E, 0x124F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1250, 0x1256, propertyPVALID}, // ETHIOPIC SYLLABLE QHA..ETHIOPIC SYLLABLE QHO + {0x1257, 0x0, propertyUNASSIGNED}, // <reserved> + {0x1258, 0x0, propertyPVALID}, // ETHIOPIC SYLLABLE QHWA + {0x1259, 0x0, propertyUNASSIGNED}, // <reserved> + {0x125A, 0x125D, propertyPVALID}, // ETHIOPIC SYLLABLE QHWI..ETHIOPIC SYLLABLE QH + {0x125E, 0x125F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1260, 0x1288, propertyPVALID}, // ETHIOPIC SYLLABLE BA..ETHIOPIC SYLLABLE XWA + {0x1289, 0x0, propertyUNASSIGNED}, // <reserved> + {0x128A, 0x128D, propertyPVALID}, // ETHIOPIC SYLLABLE XWI..ETHIOPIC SYLLABLE XWE + {0x128E, 0x128F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1290, 0x12B0, propertyPVALID}, // ETHIOPIC SYLLABLE NA..ETHIOPIC SYLLABLE KWA + {0x12B1, 0x0, propertyUNASSIGNED}, // <reserved> + {0x12B2, 0x12B5, propertyPVALID}, // ETHIOPIC SYLLABLE KWI..ETHIOPIC SYLLABLE KWE + {0x12B6, 0x12B7, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x12B8, 0x12BE, propertyPVALID}, // ETHIOPIC SYLLABLE KXA..ETHIOPIC SYLLABLE KXO + {0x12BF, 0x0, propertyUNASSIGNED}, // <reserved> + {0x12C0, 0x0, propertyPVALID}, // ETHIOPIC SYLLABLE KXWA + {0x12C1, 0x0, propertyUNASSIGNED}, // <reserved> + {0x12C2, 0x12C5, propertyPVALID}, // ETHIOPIC SYLLABLE KXWI..ETHIOPIC SYLLABLE KX + {0x12C6, 0x12C7, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x12C8, 0x12D6, propertyPVALID}, // ETHIOPIC SYLLABLE WA..ETHIOPIC SYLLABLE PHAR + {0x12D7, 0x0, propertyUNASSIGNED}, // <reserved> + {0x12D8, 0x1310, propertyPVALID}, // ETHIOPIC SYLLABLE ZA..ETHIOPIC SYLLABLE GWA + {0x1311, 0x0, propertyUNASSIGNED}, // <reserved> + {0x1312, 0x1315, propertyPVALID}, // ETHIOPIC SYLLABLE GWI..ETHIOPIC SYLLABLE GWE + {0x1316, 0x1317, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1318, 0x135A, propertyPVALID}, // ETHIOPIC SYLLABLE GGA..ETHIOPIC SYLLABLE FYA + {0x135B, 0x135E, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x135F, 0x0, propertyPVALID}, // ETHIOPIC COMBINING GEMINATION MARK + {0x1360, 0x137C, propertyDISALLOWED}, // ETHIOPIC SECTION MARK..ETHIOPIC NUMBER TEN T + {0x137D, 0x137F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1380, 0x138F, propertyPVALID}, // ETHIOPIC SYLLABLE SEBATBEIT MWA..ETHIOPIC SY + {0x1390, 0x1399, propertyDISALLOWED}, // ETHIOPIC TONAL MARK YIZET..ETHIOPIC TONAL MA + {0x139A, 0x139F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x13A0, 0x13F4, propertyPVALID}, // CHEROKEE LETTER A..CHEROKEE LETTER YV + {0x13F5, 0x13FF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1400, 0x0, propertyDISALLOWED}, // CANADIAN SYLLABICS HYPHEN + {0x1401, 0x166C, propertyPVALID}, // CANADIAN SYLLABICS E..CANADIAN SYLLABICS CAR + {0x166D, 0x166E, propertyDISALLOWED}, // CANADIAN SYLLABICS CHI SIGN..CANADIAN SYLLAB + {0x166F, 0x167F, propertyPVALID}, // CANADIAN SYLLABICS QAI..CANADIAN SYLLABICS B + {0x1680, 0x0, propertyDISALLOWED}, // OGHAM SPACE MARK + {0x1681, 0x169A, propertyPVALID}, // OGHAM LETTER BEITH..OGHAM LETTER PEITH + {0x169B, 0x169C, propertyDISALLOWED}, // OGHAM FEATHER MARK..OGHAM REVERSED FEATHER M + {0x169D, 0x169F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x16A0, 0x16EA, propertyPVALID}, // RUNIC LETTER FEHU FEOH FE F..RUNIC LETTER X + {0x16EB, 0x16F0, propertyDISALLOWED}, // RUNIC SINGLE PUNCTUATION..RUNIC BELGTHOR SYM + {0x16F1, 0x16FF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1700, 0x170C, propertyPVALID}, // TAGALOG LETTER A..TAGALOG LETTER YA + {0x170D, 0x0, propertyUNASSIGNED}, // <reserved> + {0x170E, 0x1714, propertyPVALID}, // TAGALOG LETTER LA..TAGALOG SIGN VIRAMA + {0x1715, 0x171F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1720, 0x1734, propertyPVALID}, // HANUNOO LETTER A..HANUNOO SIGN PAMUDPOD + {0x1735, 0x1736, propertyDISALLOWED}, // PHILIPPINE SINGLE PUNCTUATION..PHILIPPINE DO + {0x1737, 0x173F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1740, 0x1753, propertyPVALID}, // BUHID LETTER A..BUHID VOWEL SIGN U + {0x1754, 0x175F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1760, 0x176C, propertyPVALID}, // TAGBANWA LETTER A..TAGBANWA LETTER YA + {0x176D, 0x0, propertyUNASSIGNED}, // <reserved> + {0x176E, 0x1770, propertyPVALID}, // TAGBANWA LETTER LA..TAGBANWA LETTER SA + {0x1771, 0x0, propertyUNASSIGNED}, // <reserved> + {0x1772, 0x1773, propertyPVALID}, // TAGBANWA VOWEL SIGN I..TAGBANWA VOWEL SIGN U + {0x1774, 0x177F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1780, 0x17B3, propertyPVALID}, // KHMER LETTER KA..KHMER INDEPENDENT VOWEL QAU + {0x17B4, 0x17B5, propertyDISALLOWED}, // KHMER VOWEL INHERENT AQ..KHMER VOWEL INHEREN + {0x17B6, 0x17D3, propertyPVALID}, // KHMER VOWEL SIGN AA..KHMER SIGN BATHAMASAT + {0x17D4, 0x17D6, propertyDISALLOWED}, // KHMER SIGN KHAN..KHMER SIGN CAMNUC PII KUUH + {0x17D7, 0x0, propertyPVALID}, // KHMER SIGN LEK TOO + {0x17D8, 0x17DB, propertyDISALLOWED}, // KHMER SIGN BEYYAL..KHMER CURRENCY SYMBOL RIE + {0x17DC, 0x17DD, propertyPVALID}, // KHMER SIGN AVAKRAHASANYA..KHMER SIGN ATTHACA + {0x17DE, 0x17DF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x17E0, 0x17E9, propertyPVALID}, // KHMER DIGIT ZERO..KHMER DIGIT NINE + {0x17EA, 0x17EF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x17F0, 0x17F9, propertyDISALLOWED}, // KHMER SYMBOL LEK ATTAK SON..KHMER SYMBOL LEK + {0x17FA, 0x17FF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1800, 0x180E, propertyDISALLOWED}, // MONGOLIAN BIRGA..MONGOLIAN VOWEL SEPARATOR + {0x180F, 0x0, propertyUNASSIGNED}, // <reserved> + {0x1810, 0x1819, propertyPVALID}, // MONGOLIAN DIGIT ZERO..MONGOLIAN DIGIT NINE + {0x181A, 0x181F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1820, 0x1877, propertyPVALID}, // MONGOLIAN LETTER A..MONGOLIAN LETTER MANCHU + {0x1878, 0x187F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1880, 0x18AA, propertyPVALID}, // MONGOLIAN LETTER ALI GALI ANUSVARA ONE..MONG + {0x18AB, 0x18AF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x18B0, 0x18F5, propertyPVALID}, // CANADIAN SYLLABICS OY..CANADIAN SYLLABICS CA + {0x18F6, 0x18FF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1900, 0x191C, propertyPVALID}, // LIMBU VOWEL-CARRIER LETTER..LIMBU LETTER HA + {0x191D, 0x191F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1920, 0x192B, propertyPVALID}, // LIMBU VOWEL SIGN A..LIMBU SUBJOINED LETTER W + {0x192C, 0x192F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1930, 0x193B, propertyPVALID}, // LIMBU SMALL LETTER KA..LIMBU SIGN SA-I + {0x193C, 0x193F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1940, 0x0, propertyDISALLOWED}, // LIMBU SIGN LOO + {0x1941, 0x1943, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1944, 0x1945, propertyDISALLOWED}, // LIMBU EXCLAMATION MARK..LIMBU QUESTION MARK + {0x1946, 0x196D, propertyPVALID}, // LIMBU DIGIT ZERO..TAI LE LETTER AI + {0x196E, 0x196F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1970, 0x1974, propertyPVALID}, // TAI LE LETTER TONE-2..TAI LE LETTER TONE-6 + {0x1975, 0x197F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1980, 0x19AB, propertyPVALID}, // NEW TAI LUE LETTER HIGH QA..NEW TAI LUE LETT + {0x19AC, 0x19AF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x19B0, 0x19C9, propertyPVALID}, // NEW TAI LUE VOWEL SIGN VOWEL SHORTENER..NEW + {0x19CA, 0x19CF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x19D0, 0x19DA, propertyPVALID}, // NEW TAI LUE DIGIT ZERO..NEW TAI LUE THAM DIG + {0x19DB, 0x19DD, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x19DE, 0x19FF, propertyDISALLOWED}, // NEW TAI LUE SIGN LAE..KHMER SYMBOL DAP-PRAM + {0x1A00, 0x1A1B, propertyPVALID}, // BUGINESE LETTER KA..BUGINESE VOWEL SIGN AE + {0x1A1C, 0x1A1D, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1A1E, 0x1A1F, propertyDISALLOWED}, // BUGINESE PALLAWA..BUGINESE END OF SECTION + {0x1A20, 0x1A5E, propertyPVALID}, // TAI THAM LETTER HIGH KA..TAI THAM CONSONANT + {0x1A5F, 0x0, propertyUNASSIGNED}, // <reserved> + {0x1A60, 0x1A7C, propertyPVALID}, // TAI THAM SIGN SAKOT..TAI THAM SIGN KHUEN-LUE + {0x1A7D, 0x1A7E, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1A7F, 0x1A89, propertyPVALID}, // TAI THAM COMBINING CRYPTOGRAMMIC DOT..TAI TH + {0x1A8A, 0x1A8F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1A90, 0x1A99, propertyPVALID}, // TAI THAM THAM DIGIT ZERO..TAI THAM THAM DIGI + {0x1A9A, 0x1A9F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1AA0, 0x1AA6, propertyDISALLOWED}, // TAI THAM SIGN WIANG..TAI THAM SIGN REVERSED + {0x1AA7, 0x0, propertyPVALID}, // TAI THAM SIGN MAI YAMOK + {0x1AA8, 0x1AAD, propertyDISALLOWED}, // TAI THAM SIGN KAAN..TAI THAM SIGN CAANG + {0x1AAE, 0x1AFF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1B00, 0x1B4B, propertyPVALID}, // BALINESE SIGN ULU RICEM..BALINESE LETTER ASY + {0x1B4C, 0x1B4F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1B50, 0x1B59, propertyPVALID}, // BALINESE DIGIT ZERO..BALINESE DIGIT NINE + {0x1B5A, 0x1B6A, propertyDISALLOWED}, // BALINESE PANTI..BALINESE MUSICAL SYMBOL DANG + {0x1B6B, 0x1B73, propertyPVALID}, // BALINESE MUSICAL SYMBOL COMBINING TEGEH..BAL + {0x1B74, 0x1B7C, propertyDISALLOWED}, // BALINESE MUSICAL SYMBOL RIGHT-HAND OPEN DUG. + {0x1B7D, 0x1B7F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1B80, 0x1BAA, propertyPVALID}, // SUNDANESE SIGN PANYECEK..SUNDANESE SIGN PAMA + {0x1BAB, 0x1BAD, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1BAE, 0x1BB9, propertyPVALID}, // SUNDANESE LETTER KHA..SUNDANESE DIGIT NINE + {0x1BBA, 0x1BFF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1C00, 0x1C37, propertyPVALID}, // LEPCHA LETTER KA..LEPCHA SIGN NUKTA + {0x1C38, 0x1C3A, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1C3B, 0x1C3F, propertyDISALLOWED}, // LEPCHA PUNCTUATION TA-ROL..LEPCHA PUNCTUATIO + {0x1C40, 0x1C49, propertyPVALID}, // LEPCHA DIGIT ZERO..LEPCHA DIGIT NINE + {0x1C4A, 0x1C4C, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1C4D, 0x1C7D, propertyPVALID}, // LEPCHA LETTER TTA..OL CHIKI AHAD + {0x1C7E, 0x1C7F, propertyDISALLOWED}, // OL CHIKI PUNCTUATION MUCAAD..OL CHIKI PUNCTU + {0x1C80, 0x1CCF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1CD0, 0x1CD2, propertyPVALID}, // VEDIC TONE KARSHANA..VEDIC TONE PRENKHA + {0x1CD3, 0x0, propertyDISALLOWED}, // VEDIC SIGN NIHSHVASA + {0x1CD4, 0x1CF2, propertyPVALID}, // VEDIC SIGN YAJURVEDIC MIDLINE SVARITA..VEDIC + {0x1CF3, 0x1CFF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1D00, 0x1D2B, propertyPVALID}, // LATIN LETTER SMALL CAPITAL A..CYRILLIC LETTE + {0x1D2C, 0x1D2E, propertyDISALLOWED}, // MODIFIER LETTER CAPITAL A..MODIFIER LETTER C + {0x1D2F, 0x0, propertyPVALID}, // MODIFIER LETTER CAPITAL BARRED B + {0x1D30, 0x1D3A, propertyDISALLOWED}, // MODIFIER LETTER CAPITAL D..MODIFIER LETTER C + {0x1D3B, 0x0, propertyPVALID}, // MODIFIER LETTER CAPITAL REVERSED N + {0x1D3C, 0x1D4D, propertyDISALLOWED}, // MODIFIER LETTER CAPITAL O..MODIFIER LETTER S + {0x1D4E, 0x0, propertyPVALID}, // MODIFIER LETTER SMALL TURNED I + {0x1D4F, 0x1D6A, propertyDISALLOWED}, // MODIFIER LETTER SMALL K..GREEK SUBSCRIPT SMA + {0x1D6B, 0x1D77, propertyPVALID}, // LATIN SMALL LETTER UE..LATIN SMALL LETTER TU + {0x1D78, 0x0, propertyDISALLOWED}, // MODIFIER LETTER CYRILLIC EN + {0x1D79, 0x1D9A, propertyPVALID}, // LATIN SMALL LETTER INSULAR G..LATIN SMALL LE + {0x1D9B, 0x1DBF, propertyDISALLOWED}, // MODIFIER LETTER SMALL TURNED ALPHA..MODIFIER + {0x1DC0, 0x1DE6, propertyPVALID}, // COMBINING DOTTED GRAVE ACCENT..COMBINING LAT + {0x1DE7, 0x1DFC, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1DFD, 0x1DFF, propertyPVALID}, // COMBINING ALMOST EQUAL TO BELOW..COMBINING R + {0x1E00, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH RING BELOW + {0x1E01, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH RING BELOW + {0x1E02, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER B WITH DOT ABOVE + {0x1E03, 0x0, propertyPVALID}, // LATIN SMALL LETTER B WITH DOT ABOVE + {0x1E04, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER B WITH DOT BELOW + {0x1E05, 0x0, propertyPVALID}, // LATIN SMALL LETTER B WITH DOT BELOW + {0x1E06, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER B WITH LINE BELOW + {0x1E07, 0x0, propertyPVALID}, // LATIN SMALL LETTER B WITH LINE BELOW + {0x1E08, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER C WITH CEDILLA AND ACUT + {0x1E09, 0x0, propertyPVALID}, // LATIN SMALL LETTER C WITH CEDILLA AND ACUTE + {0x1E0A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER D WITH DOT ABOVE + {0x1E0B, 0x0, propertyPVALID}, // LATIN SMALL LETTER D WITH DOT ABOVE + {0x1E0C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER D WITH DOT BELOW + {0x1E0D, 0x0, propertyPVALID}, // LATIN SMALL LETTER D WITH DOT BELOW + {0x1E0E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER D WITH LINE BELOW + {0x1E0F, 0x0, propertyPVALID}, // LATIN SMALL LETTER D WITH LINE BELOW + {0x1E10, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER D WITH CEDILLA + {0x1E11, 0x0, propertyPVALID}, // LATIN SMALL LETTER D WITH CEDILLA + {0x1E12, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER D WITH CIRCUMFLEX BELOW + {0x1E13, 0x0, propertyPVALID}, // LATIN SMALL LETTER D WITH CIRCUMFLEX BELOW + {0x1E14, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH MACRON AND GRAVE + {0x1E15, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH MACRON AND GRAVE + {0x1E16, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH MACRON AND ACUTE + {0x1E17, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH MACRON AND ACUTE + {0x1E18, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CIRCUMFLEX BELOW + {0x1E19, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CIRCUMFLEX BELOW + {0x1E1A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH TILDE BELOW + {0x1E1B, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH TILDE BELOW + {0x1E1C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CEDILLA AND BREV + {0x1E1D, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CEDILLA AND BREVE + {0x1E1E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER F WITH DOT ABOVE + {0x1E1F, 0x0, propertyPVALID}, // LATIN SMALL LETTER F WITH DOT ABOVE + {0x1E20, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER G WITH MACRON + {0x1E21, 0x0, propertyPVALID}, // LATIN SMALL LETTER G WITH MACRON + {0x1E22, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH DOT ABOVE + {0x1E23, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH DOT ABOVE + {0x1E24, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH DOT BELOW + {0x1E25, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH DOT BELOW + {0x1E26, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH DIAERESIS + {0x1E27, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH DIAERESIS + {0x1E28, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH CEDILLA + {0x1E29, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH CEDILLA + {0x1E2A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH BREVE BELOW + {0x1E2B, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH BREVE BELOW + {0x1E2C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH TILDE BELOW + {0x1E2D, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH TILDE BELOW + {0x1E2E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH DIAERESIS AND AC + {0x1E2F, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH DIAERESIS AND ACUT + {0x1E30, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH ACUTE + {0x1E31, 0x0, propertyPVALID}, // LATIN SMALL LETTER K WITH ACUTE + {0x1E32, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH DOT BELOW + {0x1E33, 0x0, propertyPVALID}, // LATIN SMALL LETTER K WITH DOT BELOW + {0x1E34, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH LINE BELOW + {0x1E35, 0x0, propertyPVALID}, // LATIN SMALL LETTER K WITH LINE BELOW + {0x1E36, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH DOT BELOW + {0x1E37, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH DOT BELOW + {0x1E38, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH DOT BELOW AND MA + {0x1E39, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH DOT BELOW AND MACR + {0x1E3A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH LINE BELOW + {0x1E3B, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH LINE BELOW + {0x1E3C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH CIRCUMFLEX BELOW + {0x1E3D, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH CIRCUMFLEX BELOW + {0x1E3E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER M WITH ACUTE + {0x1E3F, 0x0, propertyPVALID}, // LATIN SMALL LETTER M WITH ACUTE + {0x1E40, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER M WITH DOT ABOVE + {0x1E41, 0x0, propertyPVALID}, // LATIN SMALL LETTER M WITH DOT ABOVE + {0x1E42, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER M WITH DOT BELOW + {0x1E43, 0x0, propertyPVALID}, // LATIN SMALL LETTER M WITH DOT BELOW + {0x1E44, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER N WITH DOT ABOVE + {0x1E45, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH DOT ABOVE + {0x1E46, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER N WITH DOT BELOW + {0x1E47, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH DOT BELOW + {0x1E48, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER N WITH LINE BELOW + {0x1E49, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH LINE BELOW + {0x1E4A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER N WITH CIRCUMFLEX BELOW + {0x1E4B, 0x0, propertyPVALID}, // LATIN SMALL LETTER N WITH CIRCUMFLEX BELOW + {0x1E4C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH TILDE AND ACUTE + {0x1E4D, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH TILDE AND ACUTE + {0x1E4E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH TILDE AND DIAERE + {0x1E4F, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH TILDE AND DIAERESI + {0x1E50, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH MACRON AND GRAVE + {0x1E51, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH MACRON AND GRAVE + {0x1E52, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH MACRON AND ACUTE + {0x1E53, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH MACRON AND ACUTE + {0x1E54, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER P WITH ACUTE + {0x1E55, 0x0, propertyPVALID}, // LATIN SMALL LETTER P WITH ACUTE + {0x1E56, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER P WITH DOT ABOVE + {0x1E57, 0x0, propertyPVALID}, // LATIN SMALL LETTER P WITH DOT ABOVE + {0x1E58, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH DOT ABOVE + {0x1E59, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH DOT ABOVE + {0x1E5A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH DOT BELOW + {0x1E5B, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH DOT BELOW + {0x1E5C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH DOT BELOW AND MA + {0x1E5D, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH DOT BELOW AND MACR + {0x1E5E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R WITH LINE BELOW + {0x1E5F, 0x0, propertyPVALID}, // LATIN SMALL LETTER R WITH LINE BELOW + {0x1E60, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH DOT ABOVE + {0x1E61, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH DOT ABOVE + {0x1E62, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH DOT BELOW + {0x1E63, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH DOT BELOW + {0x1E64, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH ACUTE AND DOT AB + {0x1E65, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH ACUTE AND DOT ABOV + {0x1E66, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH CARON AND DOT AB + {0x1E67, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH CARON AND DOT ABOV + {0x1E68, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER S WITH DOT BELOW AND DO + {0x1E69, 0x0, propertyPVALID}, // LATIN SMALL LETTER S WITH DOT BELOW AND DOT + {0x1E6A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH DOT ABOVE + {0x1E6B, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH DOT ABOVE + {0x1E6C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH DOT BELOW + {0x1E6D, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH DOT BELOW + {0x1E6E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH LINE BELOW + {0x1E6F, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH LINE BELOW + {0x1E70, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER T WITH CIRCUMFLEX BELOW + {0x1E71, 0x0, propertyPVALID}, // LATIN SMALL LETTER T WITH CIRCUMFLEX BELOW + {0x1E72, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH DIAERESIS BELOW + {0x1E73, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH DIAERESIS BELOW + {0x1E74, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH TILDE BELOW + {0x1E75, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH TILDE BELOW + {0x1E76, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH CIRCUMFLEX BELOW + {0x1E77, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH CIRCUMFLEX BELOW + {0x1E78, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH TILDE AND ACUTE + {0x1E79, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH TILDE AND ACUTE + {0x1E7A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH MACRON AND DIAER + {0x1E7B, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH MACRON AND DIAERES + {0x1E7C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER V WITH TILDE + {0x1E7D, 0x0, propertyPVALID}, // LATIN SMALL LETTER V WITH TILDE + {0x1E7E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER V WITH DOT BELOW + {0x1E7F, 0x0, propertyPVALID}, // LATIN SMALL LETTER V WITH DOT BELOW + {0x1E80, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER W WITH GRAVE + {0x1E81, 0x0, propertyPVALID}, // LATIN SMALL LETTER W WITH GRAVE + {0x1E82, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER W WITH ACUTE + {0x1E83, 0x0, propertyPVALID}, // LATIN SMALL LETTER W WITH ACUTE + {0x1E84, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER W WITH DIAERESIS + {0x1E85, 0x0, propertyPVALID}, // LATIN SMALL LETTER W WITH DIAERESIS + {0x1E86, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER W WITH DOT ABOVE + {0x1E87, 0x0, propertyPVALID}, // LATIN SMALL LETTER W WITH DOT ABOVE + {0x1E88, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER W WITH DOT BELOW + {0x1E89, 0x0, propertyPVALID}, // LATIN SMALL LETTER W WITH DOT BELOW + {0x1E8A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER X WITH DOT ABOVE + {0x1E8B, 0x0, propertyPVALID}, // LATIN SMALL LETTER X WITH DOT ABOVE + {0x1E8C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER X WITH DIAERESIS + {0x1E8D, 0x0, propertyPVALID}, // LATIN SMALL LETTER X WITH DIAERESIS + {0x1E8E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH DOT ABOVE + {0x1E8F, 0x0, propertyPVALID}, // LATIN SMALL LETTER Y WITH DOT ABOVE + {0x1E90, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Z WITH CIRCUMFLEX + {0x1E91, 0x0, propertyPVALID}, // LATIN SMALL LETTER Z WITH CIRCUMFLEX + {0x1E92, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Z WITH DOT BELOW + {0x1E93, 0x0, propertyPVALID}, // LATIN SMALL LETTER Z WITH DOT BELOW + {0x1E94, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Z WITH LINE BELOW + {0x1E95, 0x1E99, propertyPVALID}, // LATIN SMALL LETTER Z WITH LINE BELOW..LATIN + {0x1E9A, 0x1E9B, propertyDISALLOWED}, // LATIN SMALL LETTER A WITH RIGHT HALF RING..L + {0x1E9C, 0x1E9D, propertyPVALID}, // LATIN SMALL LETTER LONG S WITH DIAGONAL STRO + {0x1E9E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER SHARP S + {0x1E9F, 0x0, propertyPVALID}, // LATIN SMALL LETTER DELTA + {0x1EA0, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH DOT BELOW + {0x1EA1, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH DOT BELOW + {0x1EA2, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH HOOK ABOVE + {0x1EA3, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH HOOK ABOVE + {0x1EA4, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND A + {0x1EA5, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH CIRCUMFLEX AND ACU + {0x1EA6, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND G + {0x1EA7, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH CIRCUMFLEX AND GRA + {0x1EA8, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND H + {0x1EA9, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH CIRCUMFLEX AND HOO + {0x1EAA, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND T + {0x1EAB, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH CIRCUMFLEX AND TIL + {0x1EAC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND D + {0x1EAD, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH CIRCUMFLEX AND DOT + {0x1EAE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH BREVE AND ACUTE + {0x1EAF, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH BREVE AND ACUTE + {0x1EB0, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH BREVE AND GRAVE + {0x1EB1, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH BREVE AND GRAVE + {0x1EB2, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH BREVE AND HOOK A + {0x1EB3, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH BREVE AND HOOK ABO + {0x1EB4, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH BREVE AND TILDE + {0x1EB5, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH BREVE AND TILDE + {0x1EB6, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER A WITH BREVE AND DOT BE + {0x1EB7, 0x0, propertyPVALID}, // LATIN SMALL LETTER A WITH BREVE AND DOT BELO + {0x1EB8, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH DOT BELOW + {0x1EB9, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH DOT BELOW + {0x1EBA, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH HOOK ABOVE + {0x1EBB, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH HOOK ABOVE + {0x1EBC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH TILDE + {0x1EBD, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH TILDE + {0x1EBE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND A + {0x1EBF, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CIRCUMFLEX AND ACU + {0x1EC0, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND G + {0x1EC1, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CIRCUMFLEX AND GRA + {0x1EC2, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND H + {0x1EC3, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CIRCUMFLEX AND HOO + {0x1EC4, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND T + {0x1EC5, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CIRCUMFLEX AND TIL + {0x1EC6, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND D + {0x1EC7, 0x0, propertyPVALID}, // LATIN SMALL LETTER E WITH CIRCUMFLEX AND DOT + {0x1EC8, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH HOOK ABOVE + {0x1EC9, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH HOOK ABOVE + {0x1ECA, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER I WITH DOT BELOW + {0x1ECB, 0x0, propertyPVALID}, // LATIN SMALL LETTER I WITH DOT BELOW + {0x1ECC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH DOT BELOW + {0x1ECD, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH DOT BELOW + {0x1ECE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH HOOK ABOVE + {0x1ECF, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH HOOK ABOVE + {0x1ED0, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND A + {0x1ED1, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH CIRCUMFLEX AND ACU + {0x1ED2, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND G + {0x1ED3, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH CIRCUMFLEX AND GRA + {0x1ED4, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND H + {0x1ED5, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH CIRCUMFLEX AND HOO + {0x1ED6, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND T + {0x1ED7, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH CIRCUMFLEX AND TIL + {0x1ED8, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND D + {0x1ED9, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH CIRCUMFLEX AND DOT + {0x1EDA, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH HORN AND ACUTE + {0x1EDB, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH HORN AND ACUTE + {0x1EDC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH HORN AND GRAVE + {0x1EDD, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH HORN AND GRAVE + {0x1EDE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH HORN AND HOOK AB + {0x1EDF, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH HORN AND HOOK ABOV + {0x1EE0, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH HORN AND TILDE + {0x1EE1, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH HORN AND TILDE + {0x1EE2, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH HORN AND DOT BEL + {0x1EE3, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH HORN AND DOT BELOW + {0x1EE4, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH DOT BELOW + {0x1EE5, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH DOT BELOW + {0x1EE6, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH HOOK ABOVE + {0x1EE7, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH HOOK ABOVE + {0x1EE8, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH HORN AND ACUTE + {0x1EE9, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH HORN AND ACUTE + {0x1EEA, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH HORN AND GRAVE + {0x1EEB, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH HORN AND GRAVE + {0x1EEC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH HORN AND HOOK AB + {0x1EED, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH HORN AND HOOK ABOV + {0x1EEE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH HORN AND TILDE + {0x1EEF, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH HORN AND TILDE + {0x1EF0, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER U WITH HORN AND DOT BEL + {0x1EF1, 0x0, propertyPVALID}, // LATIN SMALL LETTER U WITH HORN AND DOT BELOW + {0x1EF2, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH GRAVE + {0x1EF3, 0x0, propertyPVALID}, // LATIN SMALL LETTER Y WITH GRAVE + {0x1EF4, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH DOT BELOW + {0x1EF5, 0x0, propertyPVALID}, // LATIN SMALL LETTER Y WITH DOT BELOW + {0x1EF6, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH HOOK ABOVE + {0x1EF7, 0x0, propertyPVALID}, // LATIN SMALL LETTER Y WITH HOOK ABOVE + {0x1EF8, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH TILDE + {0x1EF9, 0x0, propertyPVALID}, // LATIN SMALL LETTER Y WITH TILDE + {0x1EFA, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER MIDDLE-WELSH LL + {0x1EFB, 0x0, propertyPVALID}, // LATIN SMALL LETTER MIDDLE-WELSH LL + {0x1EFC, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER MIDDLE-WELSH V + {0x1EFD, 0x0, propertyPVALID}, // LATIN SMALL LETTER MIDDLE-WELSH V + {0x1EFE, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Y WITH LOOP + {0x1EFF, 0x1F07, propertyPVALID}, // LATIN SMALL LETTER Y WITH LOOP..GREEK SMALL + {0x1F08, 0x1F0F, propertyDISALLOWED}, // GREEK CAPITAL LETTER ALPHA WITH PSILI..GREEK + {0x1F10, 0x1F15, propertyPVALID}, // GREEK SMALL LETTER EPSILON WITH PSILI..GREEK + {0x1F16, 0x1F17, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1F18, 0x1F1D, propertyDISALLOWED}, // GREEK CAPITAL LETTER EPSILON WITH PSILI..GRE + {0x1F1E, 0x1F1F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1F20, 0x1F27, propertyPVALID}, // GREEK SMALL LETTER ETA WITH PSILI..GREEK SMA + {0x1F28, 0x1F2F, propertyDISALLOWED}, // GREEK CAPITAL LETTER ETA WITH PSILI..GREEK C + {0x1F30, 0x1F37, propertyPVALID}, // GREEK SMALL LETTER IOTA WITH PSILI..GREEK SM + {0x1F38, 0x1F3F, propertyDISALLOWED}, // GREEK CAPITAL LETTER IOTA WITH PSILI..GREEK + {0x1F40, 0x1F45, propertyPVALID}, // GREEK SMALL LETTER OMICRON WITH PSILI..GREEK + {0x1F46, 0x1F47, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1F48, 0x1F4D, propertyDISALLOWED}, // GREEK CAPITAL LETTER OMICRON WITH PSILI..GRE + {0x1F4E, 0x1F4F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1F50, 0x1F57, propertyPVALID}, // GREEK SMALL LETTER UPSILON WITH PSILI..GREEK + {0x1F58, 0x0, propertyUNASSIGNED}, // <reserved> + {0x1F59, 0x0, propertyDISALLOWED}, // GREEK CAPITAL LETTER UPSILON WITH DASIA + {0x1F5A, 0x0, propertyUNASSIGNED}, // <reserved> + {0x1F5B, 0x0, propertyDISALLOWED}, // GREEK CAPITAL LETTER UPSILON WITH DASIA AND + {0x1F5C, 0x0, propertyUNASSIGNED}, // <reserved> + {0x1F5D, 0x0, propertyDISALLOWED}, // GREEK CAPITAL LETTER UPSILON WITH DASIA AND + {0x1F5E, 0x0, propertyUNASSIGNED}, // <reserved> + {0x1F5F, 0x0, propertyDISALLOWED}, // GREEK CAPITAL LETTER UPSILON WITH DASIA AND + {0x1F60, 0x1F67, propertyPVALID}, // GREEK SMALL LETTER OMEGA WITH PSILI..GREEK S + {0x1F68, 0x1F6F, propertyDISALLOWED}, // GREEK CAPITAL LETTER OMEGA WITH PSILI..GREEK + {0x1F70, 0x0, propertyPVALID}, // GREEK SMALL LETTER ALPHA WITH VARIA + {0x1F71, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER ALPHA WITH OXIA + {0x1F72, 0x0, propertyPVALID}, // GREEK SMALL LETTER EPSILON WITH VARIA + {0x1F73, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER EPSILON WITH OXIA + {0x1F74, 0x0, propertyPVALID}, // GREEK SMALL LETTER ETA WITH VARIA + {0x1F75, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER ETA WITH OXIA + {0x1F76, 0x0, propertyPVALID}, // GREEK SMALL LETTER IOTA WITH VARIA + {0x1F77, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER IOTA WITH OXIA + {0x1F78, 0x0, propertyPVALID}, // GREEK SMALL LETTER OMICRON WITH VARIA + {0x1F79, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER OMICRON WITH OXIA + {0x1F7A, 0x0, propertyPVALID}, // GREEK SMALL LETTER UPSILON WITH VARIA + {0x1F7B, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER UPSILON WITH OXIA + {0x1F7C, 0x0, propertyPVALID}, // GREEK SMALL LETTER OMEGA WITH VARIA + {0x1F7D, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER OMEGA WITH OXIA + {0x1F7E, 0x1F7F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1F80, 0x1FAF, propertyDISALLOWED}, // GREEK SMALL LETTER ALPHA WITH PSILI AND YPOG + {0x1FB0, 0x1FB1, propertyPVALID}, // GREEK SMALL LETTER ALPHA WITH VRACHY..GREEK + {0x1FB2, 0x1FB4, propertyDISALLOWED}, // GREEK SMALL LETTER ALPHA WITH VARIA AND YPOG + {0x1FB5, 0x0, propertyUNASSIGNED}, // <reserved> + {0x1FB6, 0x0, propertyPVALID}, // GREEK SMALL LETTER ALPHA WITH PERISPOMENI + {0x1FB7, 0x1FC4, propertyDISALLOWED}, // GREEK SMALL LETTER ALPHA WITH PERISPOMENI AN + {0x1FC5, 0x0, propertyUNASSIGNED}, // <reserved> + {0x1FC6, 0x0, propertyPVALID}, // GREEK SMALL LETTER ETA WITH PERISPOMENI + {0x1FC7, 0x1FCF, propertyDISALLOWED}, // GREEK SMALL LETTER ETA WITH PERISPOMENI AND + {0x1FD0, 0x1FD2, propertyPVALID}, // GREEK SMALL LETTER IOTA WITH VRACHY..GREEK S + {0x1FD3, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER IOTA WITH DIALYTIKA AND O + {0x1FD4, 0x1FD5, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1FD6, 0x1FD7, propertyPVALID}, // GREEK SMALL LETTER IOTA WITH PERISPOMENI..GR + {0x1FD8, 0x1FDB, propertyDISALLOWED}, // GREEK CAPITAL LETTER IOTA WITH VRACHY..GREEK + {0x1FDC, 0x0, propertyUNASSIGNED}, // <reserved> + {0x1FDD, 0x1FDF, propertyDISALLOWED}, // GREEK DASIA AND VARIA..GREEK DASIA AND PERIS + {0x1FE0, 0x1FE2, propertyPVALID}, // GREEK SMALL LETTER UPSILON WITH VRACHY..GREE + {0x1FE3, 0x0, propertyDISALLOWED}, // GREEK SMALL LETTER UPSILON WITH DIALYTIKA AN + {0x1FE4, 0x1FE7, propertyPVALID}, // GREEK SMALL LETTER RHO WITH PSILI..GREEK SMA + {0x1FE8, 0x1FEF, propertyDISALLOWED}, // GREEK CAPITAL LETTER UPSILON WITH VRACHY..GR + {0x1FF0, 0x1FF1, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1FF2, 0x1FF4, propertyDISALLOWED}, // GREEK SMALL LETTER OMEGA WITH VARIA AND YPOG + {0x1FF5, 0x0, propertyUNASSIGNED}, // <reserved> + {0x1FF6, 0x0, propertyPVALID}, // GREEK SMALL LETTER OMEGA WITH PERISPOMENI + {0x1FF7, 0x1FFE, propertyDISALLOWED}, // GREEK SMALL LETTER OMEGA WITH PERISPOMENI AN + {0x1FFF, 0x0, propertyUNASSIGNED}, // <reserved> + {0x2000, 0x200B, propertyDISALLOWED}, // EN QUAD..ZERO WIDTH SPACE + {0x200C, 0x200D, propertyCONTEXTJ}, // ZERO WIDTH NON-JOINER..ZERO WIDTH JOINER + {0x200E, 0x2064, propertyDISALLOWED}, // LEFT-TO-RIGHT MARK..INVISIBLE PLUS + {0x2065, 0x2069, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x206A, 0x2071, propertyDISALLOWED}, // INHIBIT SYMMETRIC SWAPPING..SUPERSCRIPT LATI + {0x2072, 0x2073, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x2074, 0x208E, propertyDISALLOWED}, // SUPERSCRIPT FOUR..SUBSCRIPT RIGHT PARENTHESI + {0x208F, 0x0, propertyUNASSIGNED}, // <reserved> + {0x2090, 0x2094, propertyDISALLOWED}, // LATIN SUBSCRIPT SMALL LETTER A..LATIN SUBSCR + {0x2095, 0x209F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x20A0, 0x20B8, propertyDISALLOWED}, // EURO-CURRENCY SIGN..TENGE SIGN + {0x20B9, 0x20CF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x20D0, 0x20F0, propertyDISALLOWED}, // COMBINING LEFT HARPOON ABOVE..COMBINING ASTE + {0x20F1, 0x20FF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x2100, 0x214D, propertyDISALLOWED}, // ACCOUNT OF..AKTIESELSKAB + {0x214E, 0x0, propertyPVALID}, // TURNED SMALL F + {0x214F, 0x2183, propertyDISALLOWED}, // SYMBOL FOR SAMARITAN SOURCE..ROMAN NUMERAL R + {0x2184, 0x0, propertyPVALID}, // LATIN SMALL LETTER REVERSED C + {0x2185, 0x2189, propertyDISALLOWED}, // ROMAN NUMERAL SIX LATE FORM..VULGAR FRACTION + {0x218A, 0x218F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x2190, 0x23E8, propertyDISALLOWED}, // LEFTWARDS ARROW..DECIMAL EXPONENT SYMBOL + {0x23E9, 0x23FF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x2400, 0x2426, propertyDISALLOWED}, // SYMBOL FOR NULL..SYMBOL FOR SUBSTITUTE FORM + {0x2427, 0x243F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x2440, 0x244A, propertyDISALLOWED}, // OCR HOOK..OCR DOUBLE BACKSLASH + {0x244B, 0x245F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x2460, 0x26CD, propertyDISALLOWED}, // CIRCLED DIGIT ONE..DISABLED CAR + {0x26CE, 0x0, propertyUNASSIGNED}, // <reserved> + {0x26CF, 0x26E1, propertyDISALLOWED}, // PICK..RESTRICTED LEFT ENTRY-2 + {0x26E2, 0x0, propertyUNASSIGNED}, // <reserved> + {0x26E3, 0x0, propertyDISALLOWED}, // HEAVY CIRCLE WITH STROKE AND TWO DOTS ABOVE + {0x26E4, 0x26E7, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x26E8, 0x26FF, propertyDISALLOWED}, // BLACK CROSS ON SHIELD..WHITE FLAG WITH HORIZ + {0x2700, 0x0, propertyUNASSIGNED}, // <reserved> + {0x2701, 0x2704, propertyDISALLOWED}, // UPPER BLADE SCISSORS..WHITE SCISSORS + {0x2705, 0x0, propertyUNASSIGNED}, // <reserved> + {0x2706, 0x2709, propertyDISALLOWED}, // TELEPHONE LOCATION SIGN..ENVELOPE + {0x270A, 0x270B, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x270C, 0x2727, propertyDISALLOWED}, // VICTORY HAND..WHITE FOUR POINTED STAR + {0x2728, 0x0, propertyUNASSIGNED}, // <reserved> + {0x2729, 0x274B, propertyDISALLOWED}, // STRESS OUTLINED WHITE STAR..HEAVY EIGHT TEAR + {0x274C, 0x0, propertyUNASSIGNED}, // <reserved> + {0x274D, 0x0, propertyDISALLOWED}, // SHADOWED WHITE CIRCLE + {0x274E, 0x0, propertyUNASSIGNED}, // <reserved> + {0x274F, 0x2752, propertyDISALLOWED}, // LOWER RIGHT DROP-SHADOWED WHITE SQUARE..UPPE + {0x2753, 0x2755, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x2756, 0x275E, propertyDISALLOWED}, // BLACK DIAMOND MINUS WHITE X..HEAVY DOUBLE CO + {0x275F, 0x2760, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x2761, 0x2794, propertyDISALLOWED}, // CURVED STEM PARAGRAPH SIGN ORNAMENT..HEAVY W + {0x2795, 0x2797, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x2798, 0x27AF, propertyDISALLOWED}, // HEAVY SOUTH EAST ARROW..NOTCHED LOWER RIGHT- + {0x27B0, 0x0, propertyUNASSIGNED}, // <reserved> + {0x27B1, 0x27BE, propertyDISALLOWED}, // NOTCHED UPPER RIGHT-SHADOWED WHITE RIGHTWARD + {0x27BF, 0x0, propertyUNASSIGNED}, // <reserved> + {0x27C0, 0x27CA, propertyDISALLOWED}, // THREE DIMENSIONAL ANGLE..VERTICAL BAR WITH H + {0x27CB, 0x0, propertyUNASSIGNED}, // <reserved> + {0x27CC, 0x0, propertyDISALLOWED}, // LONG DIVISION + {0x27CD, 0x27CF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x27D0, 0x2B4C, propertyDISALLOWED}, // WHITE DIAMOND WITH CENTRED DOT..RIGHTWARDS A + {0x2B4D, 0x2B4F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x2B50, 0x2B59, propertyDISALLOWED}, // WHITE MEDIUM STAR..HEAVY CIRCLED SALTIRE + {0x2B5A, 0x2BFF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x2C00, 0x2C2E, propertyDISALLOWED}, // GLAGOLITIC CAPITAL LETTER AZU..GLAGOLITIC CA + {0x2C2F, 0x0, propertyUNASSIGNED}, // <reserved> + {0x2C30, 0x2C5E, propertyPVALID}, // GLAGOLITIC SMALL LETTER AZU..GLAGOLITIC SMAL + {0x2C5F, 0x0, propertyUNASSIGNED}, // <reserved> + {0x2C60, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH DOUBLE BAR + {0x2C61, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH DOUBLE BAR + {0x2C62, 0x2C64, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH MIDDLE TILDE..LA + {0x2C65, 0x2C66, propertyPVALID}, // LATIN SMALL LETTER A WITH STROKE..LATIN SMAL + {0x2C67, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER H WITH DESCENDER + {0x2C68, 0x0, propertyPVALID}, // LATIN SMALL LETTER H WITH DESCENDER + {0x2C69, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH DESCENDER + {0x2C6A, 0x0, propertyPVALID}, // LATIN SMALL LETTER K WITH DESCENDER + {0x2C6B, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Z WITH DESCENDER + {0x2C6C, 0x0, propertyPVALID}, // LATIN SMALL LETTER Z WITH DESCENDER + {0x2C6D, 0x2C70, propertyDISALLOWED}, // LATIN CAPITAL LETTER ALPHA..LATIN CAPITAL LE + {0x2C71, 0x0, propertyPVALID}, // LATIN SMALL LETTER V WITH RIGHT HOOK + {0x2C72, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER W WITH HOOK + {0x2C73, 0x2C74, propertyPVALID}, // LATIN SMALL LETTER W WITH HOOK..LATIN SMALL + {0x2C75, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER HALF H + {0x2C76, 0x2C7B, propertyPVALID}, // LATIN SMALL LETTER HALF H..LATIN LETTER SMAL + {0x2C7C, 0x2C80, propertyDISALLOWED}, // LATIN SUBSCRIPT SMALL LETTER J..COPTIC CAPIT + {0x2C81, 0x0, propertyPVALID}, // COPTIC SMALL LETTER ALFA + {0x2C82, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER VIDA + {0x2C83, 0x0, propertyPVALID}, // COPTIC SMALL LETTER VIDA + {0x2C84, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER GAMMA + {0x2C85, 0x0, propertyPVALID}, // COPTIC SMALL LETTER GAMMA + {0x2C86, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER DALDA + {0x2C87, 0x0, propertyPVALID}, // COPTIC SMALL LETTER DALDA + {0x2C88, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER EIE + {0x2C89, 0x0, propertyPVALID}, // COPTIC SMALL LETTER EIE + {0x2C8A, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER SOU + {0x2C8B, 0x0, propertyPVALID}, // COPTIC SMALL LETTER SOU + {0x2C8C, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER ZATA + {0x2C8D, 0x0, propertyPVALID}, // COPTIC SMALL LETTER ZATA + {0x2C8E, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER HATE + {0x2C8F, 0x0, propertyPVALID}, // COPTIC SMALL LETTER HATE + {0x2C90, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER THETHE + {0x2C91, 0x0, propertyPVALID}, // COPTIC SMALL LETTER THETHE + {0x2C92, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER IAUDA + {0x2C93, 0x0, propertyPVALID}, // COPTIC SMALL LETTER IAUDA + {0x2C94, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER KAPA + {0x2C95, 0x0, propertyPVALID}, // COPTIC SMALL LETTER KAPA + {0x2C96, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER LAULA + {0x2C97, 0x0, propertyPVALID}, // COPTIC SMALL LETTER LAULA + {0x2C98, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER MI + {0x2C99, 0x0, propertyPVALID}, // COPTIC SMALL LETTER MI + {0x2C9A, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER NI + {0x2C9B, 0x0, propertyPVALID}, // COPTIC SMALL LETTER NI + {0x2C9C, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER KSI + {0x2C9D, 0x0, propertyPVALID}, // COPTIC SMALL LETTER KSI + {0x2C9E, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER O + {0x2C9F, 0x0, propertyPVALID}, // COPTIC SMALL LETTER O + {0x2CA0, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER PI + {0x2CA1, 0x0, propertyPVALID}, // COPTIC SMALL LETTER PI + {0x2CA2, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER RO + {0x2CA3, 0x0, propertyPVALID}, // COPTIC SMALL LETTER RO + {0x2CA4, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER SIMA + {0x2CA5, 0x0, propertyPVALID}, // COPTIC SMALL LETTER SIMA + {0x2CA6, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER TAU + {0x2CA7, 0x0, propertyPVALID}, // COPTIC SMALL LETTER TAU + {0x2CA8, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER UA + {0x2CA9, 0x0, propertyPVALID}, // COPTIC SMALL LETTER UA + {0x2CAA, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER FI + {0x2CAB, 0x0, propertyPVALID}, // COPTIC SMALL LETTER FI + {0x2CAC, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER KHI + {0x2CAD, 0x0, propertyPVALID}, // COPTIC SMALL LETTER KHI + {0x2CAE, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER PSI + {0x2CAF, 0x0, propertyPVALID}, // COPTIC SMALL LETTER PSI + {0x2CB0, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OOU + {0x2CB1, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OOU + {0x2CB2, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER DIALECT-P ALEF + {0x2CB3, 0x0, propertyPVALID}, // COPTIC SMALL LETTER DIALECT-P ALEF + {0x2CB4, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC AIN + {0x2CB5, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC AIN + {0x2CB6, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER CRYPTOGRAMMIC EIE + {0x2CB7, 0x0, propertyPVALID}, // COPTIC SMALL LETTER CRYPTOGRAMMIC EIE + {0x2CB8, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER DIALECT-P KAPA + {0x2CB9, 0x0, propertyPVALID}, // COPTIC SMALL LETTER DIALECT-P KAPA + {0x2CBA, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER DIALECT-P NI + {0x2CBB, 0x0, propertyPVALID}, // COPTIC SMALL LETTER DIALECT-P NI + {0x2CBC, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER CRYPTOGRAMMIC NI + {0x2CBD, 0x0, propertyPVALID}, // COPTIC SMALL LETTER CRYPTOGRAMMIC NI + {0x2CBE, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC OOU + {0x2CBF, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC OOU + {0x2CC0, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER SAMPI + {0x2CC1, 0x0, propertyPVALID}, // COPTIC SMALL LETTER SAMPI + {0x2CC2, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER CROSSED SHEI + {0x2CC3, 0x0, propertyPVALID}, // COPTIC SMALL LETTER CROSSED SHEI + {0x2CC4, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC SHEI + {0x2CC5, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC SHEI + {0x2CC6, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC ESH + {0x2CC7, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC ESH + {0x2CC8, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER AKHMIMIC KHEI + {0x2CC9, 0x0, propertyPVALID}, // COPTIC SMALL LETTER AKHMIMIC KHEI + {0x2CCA, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER DIALECT-P HORI + {0x2CCB, 0x0, propertyPVALID}, // COPTIC SMALL LETTER DIALECT-P HORI + {0x2CCC, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC HORI + {0x2CCD, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC HORI + {0x2CCE, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC HA + {0x2CCF, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC HA + {0x2CD0, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER L-SHAPED HA + {0x2CD1, 0x0, propertyPVALID}, // COPTIC SMALL LETTER L-SHAPED HA + {0x2CD2, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC HEI + {0x2CD3, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC HEI + {0x2CD4, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC HAT + {0x2CD5, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC HAT + {0x2CD6, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC GANGIA + {0x2CD7, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC GANGIA + {0x2CD8, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC DJA + {0x2CD9, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC DJA + {0x2CDA, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD COPTIC SHIMA + {0x2CDB, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD COPTIC SHIMA + {0x2CDC, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD NUBIAN SHIMA + {0x2CDD, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD NUBIAN SHIMA + {0x2CDE, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD NUBIAN NGI + {0x2CDF, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD NUBIAN NGI + {0x2CE0, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD NUBIAN NYI + {0x2CE1, 0x0, propertyPVALID}, // COPTIC SMALL LETTER OLD NUBIAN NYI + {0x2CE2, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER OLD NUBIAN WAU + {0x2CE3, 0x2CE4, propertyPVALID}, // COPTIC SMALL LETTER OLD NUBIAN WAU..COPTIC S + {0x2CE5, 0x2CEB, propertyDISALLOWED}, // COPTIC SYMBOL MI RO..COPTIC CAPITAL LETTER C + {0x2CEC, 0x0, propertyPVALID}, // COPTIC SMALL LETTER CRYPTOGRAMMIC SHEI + {0x2CED, 0x0, propertyDISALLOWED}, // COPTIC CAPITAL LETTER CRYPTOGRAMMIC GANGIA + {0x2CEE, 0x2CF1, propertyPVALID}, // COPTIC SMALL LETTER CRYPTOGRAMMIC GANGIA..CO + {0x2CF2, 0x2CF8, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x2CF9, 0x2CFF, propertyDISALLOWED}, // COPTIC OLD NUBIAN FULL STOP..COPTIC MORPHOLO + {0x2D00, 0x2D25, propertyPVALID}, // GEORGIAN SMALL LETTER AN..GEORGIAN SMALL LET + {0x2D26, 0x2D2F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x2D30, 0x2D65, propertyPVALID}, // TIFINAGH LETTER YA..TIFINAGH LETTER YAZZ + {0x2D66, 0x2D6E, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x2D6F, 0x0, propertyDISALLOWED}, // TIFINAGH MODIFIER LETTER LABIALIZATION MARK + {0x2D70, 0x2D7F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x2D80, 0x2D96, propertyPVALID}, // ETHIOPIC SYLLABLE LOA..ETHIOPIC SYLLABLE GGW + {0x2D97, 0x2D9F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x2DA0, 0x2DA6, propertyPVALID}, // ETHIOPIC SYLLABLE SSA..ETHIOPIC SYLLABLE SSO + {0x2DA7, 0x0, propertyUNASSIGNED}, // <reserved> + {0x2DA8, 0x2DAE, propertyPVALID}, // ETHIOPIC SYLLABLE CCA..ETHIOPIC SYLLABLE CCO + {0x2DAF, 0x0, propertyUNASSIGNED}, // <reserved> + {0x2DB0, 0x2DB6, propertyPVALID}, // ETHIOPIC SYLLABLE ZZA..ETHIOPIC SYLLABLE ZZO + {0x2DB7, 0x0, propertyUNASSIGNED}, // <reserved> + {0x2DB8, 0x2DBE, propertyPVALID}, // ETHIOPIC SYLLABLE CCHA..ETHIOPIC SYLLABLE CC + {0x2DBF, 0x0, propertyUNASSIGNED}, // <reserved> + {0x2DC0, 0x2DC6, propertyPVALID}, // ETHIOPIC SYLLABLE QYA..ETHIOPIC SYLLABLE QYO + {0x2DC7, 0x0, propertyUNASSIGNED}, // <reserved> + {0x2DC8, 0x2DCE, propertyPVALID}, // ETHIOPIC SYLLABLE KYA..ETHIOPIC SYLLABLE KYO + {0x2DCF, 0x0, propertyUNASSIGNED}, // <reserved> + {0x2DD0, 0x2DD6, propertyPVALID}, // ETHIOPIC SYLLABLE XYA..ETHIOPIC SYLLABLE XYO + {0x2DD7, 0x0, propertyUNASSIGNED}, // <reserved> + {0x2DD8, 0x2DDE, propertyPVALID}, // ETHIOPIC SYLLABLE GYA..ETHIOPIC SYLLABLE GYO + {0x2DDF, 0x0, propertyUNASSIGNED}, // <reserved> + {0x2DE0, 0x2DFF, propertyPVALID}, // COMBINING CYRILLIC LETTER BE..COMBINING CYRI + {0x2E00, 0x2E2E, propertyDISALLOWED}, // RIGHT ANGLE SUBSTITUTION MARKER..REVERSED QU + {0x2E2F, 0x0, propertyPVALID}, // VERTICAL TILDE + {0x2E30, 0x2E31, propertyDISALLOWED}, // RING POINT..WORD SEPARATOR MIDDLE DOT + {0x2E32, 0x2E7F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x2E80, 0x2E99, propertyDISALLOWED}, // CJK RADICAL REPEAT..CJK RADICAL RAP + {0x2E9A, 0x0, propertyUNASSIGNED}, // <reserved> + {0x2E9B, 0x2EF3, propertyDISALLOWED}, // CJK RADICAL CHOKE..CJK RADICAL C-SIMPLIFIED + {0x2EF4, 0x2EFF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x2F00, 0x2FD5, propertyDISALLOWED}, // KANGXI RADICAL ONE..KANGXI RADICAL FLUTE + {0x2FD6, 0x2FEF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x2FF0, 0x2FFB, propertyDISALLOWED}, // IDEOGRAPHIC DESCRIPTION CHARACTER LEFT TO RI + {0x2FFC, 0x2FFF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x3000, 0x3004, propertyDISALLOWED}, // IDEOGRAPHIC SPACE..JAPANESE INDUSTRIAL STAND + {0x3005, 0x3007, propertyPVALID}, // IDEOGRAPHIC ITERATION MARK..IDEOGRAPHIC NUMB + {0x3008, 0x3029, propertyDISALLOWED}, // LEFT ANGLE BRACKET..HANGZHOU NUMERAL NINE + {0x302A, 0x302D, propertyPVALID}, // IDEOGRAPHIC LEVEL TONE MARK..IDEOGRAPHIC ENT + {0x302E, 0x303B, propertyDISALLOWED}, // HANGUL SINGLE DOT TONE MARK..VERTICAL IDEOGR + {0x303C, 0x0, propertyPVALID}, // MASU MARK + {0x303D, 0x303F, propertyDISALLOWED}, // PART ALTERNATION MARK..IDEOGRAPHIC HALF FILL + {0x3040, 0x0, propertyUNASSIGNED}, // <reserved> + {0x3041, 0x3096, propertyPVALID}, // HIRAGANA LETTER SMALL A..HIRAGANA LETTER SMA + {0x3097, 0x3098, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x3099, 0x309A, propertyPVALID}, // COMBINING KATAKANA-HIRAGANA VOICED SOUND MAR + {0x309B, 0x309C, propertyDISALLOWED}, // KATAKANA-HIRAGANA VOICED SOUND MARK..KATAKAN + {0x309D, 0x309E, propertyPVALID}, // HIRAGANA ITERATION MARK..HIRAGANA VOICED ITE + {0x309F, 0x30A0, propertyDISALLOWED}, // HIRAGANA DIGRAPH YORI..KATAKANA-HIRAGANA DOU + {0x30A1, 0x30FA, propertyPVALID}, // KATAKANA LETTER SMALL A..KATAKANA LETTER VO + {0x30FB, 0x0, propertyCONTEXTO}, // KATAKANA MIDDLE DOT + {0x30FC, 0x30FE, propertyPVALID}, // KATAKANA-HIRAGANA PROLONGED SOUND MARK..KATA + {0x30FF, 0x0, propertyDISALLOWED}, // KATAKANA DIGRAPH KOTO + {0x3100, 0x3104, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x3105, 0x312D, propertyPVALID}, // BOPOMOFO LETTER B..BOPOMOFO LETTER IH + {0x312E, 0x3130, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x3131, 0x318E, propertyDISALLOWED}, // HANGUL LETTER KIYEOK..HANGUL LETTER ARAEAE + {0x318F, 0x0, propertyUNASSIGNED}, // <reserved> + {0x3190, 0x319F, propertyDISALLOWED}, // IDEOGRAPHIC ANNOTATION LINKING MARK..IDEOGRA + {0x31A0, 0x31B7, propertyPVALID}, // BOPOMOFO LETTER BU..BOPOMOFO FINAL LETTER H + {0x31B8, 0x31BF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x31C0, 0x31E3, propertyDISALLOWED}, // CJK STROKE T..CJK STROKE Q + {0x31E4, 0x31EF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x31F0, 0x31FF, propertyPVALID}, // KATAKANA LETTER SMALL KU..KATAKANA LETTER SM + {0x3200, 0x321E, propertyDISALLOWED}, // PARENTHESIZED HANGUL KIYEOK..PARENTHESIZED K + {0x321F, 0x0, propertyUNASSIGNED}, // <reserved> + {0x3220, 0x32FE, propertyDISALLOWED}, // PARENTHESIZED IDEOGRAPH ONE..CIRCLED KATAKAN + {0x32FF, 0x0, propertyUNASSIGNED}, // <reserved> + {0x3300, 0x33FF, propertyDISALLOWED}, // SQUARE APAATO..SQUARE GAL + {0x3400, 0x4DB5, propertyPVALID}, // <CJK Ideograph Extension A>..<CJK Ideograph + {0x4DB6, 0x4DBF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x4DC0, 0x4DFF, propertyDISALLOWED}, // HEXAGRAM FOR THE CREATIVE HEAVEN..HEXAGRAM F + {0x4E00, 0x9FCB, propertyPVALID}, // <CJK Ideograph>..<CJK Ideograph> + {0x9FCC, 0x9FFF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xA000, 0xA48C, propertyPVALID}, // YI SYLLABLE IT..YI SYLLABLE YYR + {0xA48D, 0xA48F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xA490, 0xA4C6, propertyDISALLOWED}, // YI RADICAL QOT..YI RADICAL KE + {0xA4C7, 0xA4CF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xA4D0, 0xA4FD, propertyPVALID}, // LISU LETTER BA..LISU LETTER TONE MYA JEU + {0xA4FE, 0xA4FF, propertyDISALLOWED}, // LISU PUNCTUATION COMMA..LISU PUNCTUATION FUL + {0xA500, 0xA60C, propertyPVALID}, // VAI SYLLABLE EE..VAI SYLLABLE LENGTHENER + {0xA60D, 0xA60F, propertyDISALLOWED}, // VAI COMMA..VAI QUESTION MARK + {0xA610, 0xA62B, propertyPVALID}, // VAI SYLLABLE NDOLE FA..VAI SYLLABLE NDOLE DO + {0xA62C, 0xA63F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xA640, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ZEMLYA + {0xA641, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ZEMLYA + {0xA642, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER DZELO + {0xA643, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER DZELO + {0xA644, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER REVERSED DZE + {0xA645, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER REVERSED DZE + {0xA646, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IOTA + {0xA647, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IOTA + {0xA648, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER DJERV + {0xA649, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER DJERV + {0xA64A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER MONOGRAPH UK + {0xA64B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER MONOGRAPH UK + {0xA64C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER BROAD OMEGA + {0xA64D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER BROAD OMEGA + {0xA64E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER NEUTRAL YER + {0xA64F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER NEUTRAL YER + {0xA650, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER YERU WITH BACK YER + {0xA651, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER YERU WITH BACK YER + {0xA652, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IOTIFIED YAT + {0xA653, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IOTIFIED YAT + {0xA654, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER REVERSED YU + {0xA655, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER REVERSED YU + {0xA656, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IOTIFIED A + {0xA657, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IOTIFIED A + {0xA658, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER CLOSED LITTLE YUS + {0xA659, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER CLOSED LITTLE YUS + {0xA65A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER BLENDED YUS + {0xA65B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER BLENDED YUS + {0xA65C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER IOTIFIED CLOSED LITT + {0xA65D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER IOTIFIED CLOSED LITTLE + {0xA65E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER YN + {0xA65F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER YN + {0xA660, 0xA661, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xA662, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER SOFT DE + {0xA663, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SOFT DE + {0xA664, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER SOFT EL + {0xA665, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SOFT EL + {0xA666, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER SOFT EM + {0xA667, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SOFT EM + {0xA668, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER MONOCULAR O + {0xA669, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER MONOCULAR O + {0xA66A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER BINOCULAR O + {0xA66B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER BINOCULAR O + {0xA66C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER DOUBLE MONOCULAR O + {0xA66D, 0xA66F, propertyPVALID}, // CYRILLIC SMALL LETTER DOUBLE MONOCULAR O..CO + {0xA670, 0xA673, propertyDISALLOWED}, // COMBINING CYRILLIC TEN MILLIONS SIGN..SLAVON + {0xA674, 0xA67B, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xA67C, 0xA67D, propertyPVALID}, // COMBINING CYRILLIC KAVYKA..COMBINING CYRILLI + {0xA67E, 0x0, propertyDISALLOWED}, // CYRILLIC KAVYKA + {0xA67F, 0x0, propertyPVALID}, // CYRILLIC PAYEROK + {0xA680, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER DWE + {0xA681, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER DWE + {0xA682, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER DZWE + {0xA683, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER DZWE + {0xA684, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER ZHWE + {0xA685, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER ZHWE + {0xA686, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER CCHE + {0xA687, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER CCHE + {0xA688, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER DZZE + {0xA689, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER DZZE + {0xA68A, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER TE WITH MIDDLE HOOK + {0xA68B, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER TE WITH MIDDLE HOOK + {0xA68C, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER TWE + {0xA68D, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER TWE + {0xA68E, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER TSWE + {0xA68F, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER TSWE + {0xA690, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER TSSE + {0xA691, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER TSSE + {0xA692, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER TCHE + {0xA693, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER TCHE + {0xA694, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER HWE + {0xA695, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER HWE + {0xA696, 0x0, propertyDISALLOWED}, // CYRILLIC CAPITAL LETTER SHWE + {0xA697, 0x0, propertyPVALID}, // CYRILLIC SMALL LETTER SHWE + {0xA698, 0xA69F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xA6A0, 0xA6E5, propertyPVALID}, // BAMUM LETTER A..BAMUM LETTER KI + {0xA6E6, 0xA6EF, propertyDISALLOWED}, // BAMUM LETTER MO..BAMUM LETTER KOGHOM + {0xA6F0, 0xA6F1, propertyPVALID}, // BAMUM COMBINING MARK KOQNDON..BAMUM COMBININ + {0xA6F2, 0xA6F7, propertyDISALLOWED}, // BAMUM NJAEMLI..BAMUM QUESTION MARK + {0xA6F8, 0xA6FF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xA700, 0xA716, propertyDISALLOWED}, // MODIFIER LETTER CHINESE TONE YIN PING..MODIF + {0xA717, 0xA71F, propertyPVALID}, // MODIFIER LETTER DOT VERTICAL BAR..MODIFIER L + {0xA720, 0xA722, propertyDISALLOWED}, // MODIFIER LETTER STRESS AND HIGH TONE..LATIN + {0xA723, 0x0, propertyPVALID}, // LATIN SMALL LETTER EGYPTOLOGICAL ALEF + {0xA724, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER EGYPTOLOGICAL AIN + {0xA725, 0x0, propertyPVALID}, // LATIN SMALL LETTER EGYPTOLOGICAL AIN + {0xA726, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER HENG + {0xA727, 0x0, propertyPVALID}, // LATIN SMALL LETTER HENG + {0xA728, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER TZ + {0xA729, 0x0, propertyPVALID}, // LATIN SMALL LETTER TZ + {0xA72A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER TRESILLO + {0xA72B, 0x0, propertyPVALID}, // LATIN SMALL LETTER TRESILLO + {0xA72C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER CUATRILLO + {0xA72D, 0x0, propertyPVALID}, // LATIN SMALL LETTER CUATRILLO + {0xA72E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER CUATRILLO WITH COMMA + {0xA72F, 0xA731, propertyPVALID}, // LATIN SMALL LETTER CUATRILLO WITH COMMA..LAT + {0xA732, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER AA + {0xA733, 0x0, propertyPVALID}, // LATIN SMALL LETTER AA + {0xA734, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER AO + {0xA735, 0x0, propertyPVALID}, // LATIN SMALL LETTER AO + {0xA736, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER AU + {0xA737, 0x0, propertyPVALID}, // LATIN SMALL LETTER AU + {0xA738, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER AV + {0xA739, 0x0, propertyPVALID}, // LATIN SMALL LETTER AV + {0xA73A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER AV WITH HORIZONTAL BAR + {0xA73B, 0x0, propertyPVALID}, // LATIN SMALL LETTER AV WITH HORIZONTAL BAR + {0xA73C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER AY + {0xA73D, 0x0, propertyPVALID}, // LATIN SMALL LETTER AY + {0xA73E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER REVERSED C WITH DOT + {0xA73F, 0x0, propertyPVALID}, // LATIN SMALL LETTER REVERSED C WITH DOT + {0xA740, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH STROKE + {0xA741, 0x0, propertyPVALID}, // LATIN SMALL LETTER K WITH STROKE + {0xA742, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH DIAGONAL STROKE + {0xA743, 0x0, propertyPVALID}, // LATIN SMALL LETTER K WITH DIAGONAL STROKE + {0xA744, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER K WITH STROKE AND DIAGO + {0xA745, 0x0, propertyPVALID}, // LATIN SMALL LETTER K WITH STROKE AND DIAGONA + {0xA746, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER BROKEN L + {0xA747, 0x0, propertyPVALID}, // LATIN SMALL LETTER BROKEN L + {0xA748, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER L WITH HIGH STROKE + {0xA749, 0x0, propertyPVALID}, // LATIN SMALL LETTER L WITH HIGH STROKE + {0xA74A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH LONG STROKE OVER + {0xA74B, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH LONG STROKE OVERLA + {0xA74C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER O WITH LOOP + {0xA74D, 0x0, propertyPVALID}, // LATIN SMALL LETTER O WITH LOOP + {0xA74E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER OO + {0xA74F, 0x0, propertyPVALID}, // LATIN SMALL LETTER OO + {0xA750, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER P WITH STROKE THROUGH D + {0xA751, 0x0, propertyPVALID}, // LATIN SMALL LETTER P WITH STROKE THROUGH DES + {0xA752, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER P WITH FLOURISH + {0xA753, 0x0, propertyPVALID}, // LATIN SMALL LETTER P WITH FLOURISH + {0xA754, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER P WITH SQUIRREL TAIL + {0xA755, 0x0, propertyPVALID}, // LATIN SMALL LETTER P WITH SQUIRREL TAIL + {0xA756, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Q WITH STROKE THROUGH D + {0xA757, 0x0, propertyPVALID}, // LATIN SMALL LETTER Q WITH STROKE THROUGH DES + {0xA758, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER Q WITH DIAGONAL STROKE + {0xA759, 0x0, propertyPVALID}, // LATIN SMALL LETTER Q WITH DIAGONAL STROKE + {0xA75A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER R ROTUNDA + {0xA75B, 0x0, propertyPVALID}, // LATIN SMALL LETTER R ROTUNDA + {0xA75C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER RUM ROTUNDA + {0xA75D, 0x0, propertyPVALID}, // LATIN SMALL LETTER RUM ROTUNDA + {0xA75E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER V WITH DIAGONAL STROKE + {0xA75F, 0x0, propertyPVALID}, // LATIN SMALL LETTER V WITH DIAGONAL STROKE + {0xA760, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER VY + {0xA761, 0x0, propertyPVALID}, // LATIN SMALL LETTER VY + {0xA762, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER VISIGOTHIC Z + {0xA763, 0x0, propertyPVALID}, // LATIN SMALL LETTER VISIGOTHIC Z + {0xA764, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER THORN WITH STROKE + {0xA765, 0x0, propertyPVALID}, // LATIN SMALL LETTER THORN WITH STROKE + {0xA766, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER THORN WITH STROKE THROU + {0xA767, 0x0, propertyPVALID}, // LATIN SMALL LETTER THORN WITH STROKE THROUGH + {0xA768, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER VEND + {0xA769, 0x0, propertyPVALID}, // LATIN SMALL LETTER VEND + {0xA76A, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER ET + {0xA76B, 0x0, propertyPVALID}, // LATIN SMALL LETTER ET + {0xA76C, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER IS + {0xA76D, 0x0, propertyPVALID}, // LATIN SMALL LETTER IS + {0xA76E, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER CON + {0xA76F, 0x0, propertyPVALID}, // LATIN SMALL LETTER CON + {0xA770, 0x0, propertyDISALLOWED}, // MODIFIER LETTER US + {0xA771, 0xA778, propertyPVALID}, // LATIN SMALL LETTER DUM..LATIN SMALL LETTER U + {0xA779, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER INSULAR D + {0xA77A, 0x0, propertyPVALID}, // LATIN SMALL LETTER INSULAR D + {0xA77B, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER INSULAR F + {0xA77C, 0x0, propertyPVALID}, // LATIN SMALL LETTER INSULAR F + {0xA77D, 0xA77E, propertyDISALLOWED}, // LATIN CAPITAL LETTER INSULAR G..LATIN CAPITA + {0xA77F, 0x0, propertyPVALID}, // LATIN SMALL LETTER TURNED INSULAR G + {0xA780, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER TURNED L + {0xA781, 0x0, propertyPVALID}, // LATIN SMALL LETTER TURNED L + {0xA782, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER INSULAR R + {0xA783, 0x0, propertyPVALID}, // LATIN SMALL LETTER INSULAR R + {0xA784, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER INSULAR S + {0xA785, 0x0, propertyPVALID}, // LATIN SMALL LETTER INSULAR S + {0xA786, 0x0, propertyDISALLOWED}, // LATIN CAPITAL LETTER INSULAR T + {0xA787, 0xA788, propertyPVALID}, // LATIN SMALL LETTER INSULAR T..MODIFIER LETTE + {0xA789, 0xA78B, propertyDISALLOWED}, // MODIFIER LETTER COLON..LATIN CAPITAL LETTER + {0xA78C, 0x0, propertyPVALID}, // LATIN SMALL LETTER SALTILLO + {0xA78D, 0xA7FA, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xA7FB, 0xA827, propertyPVALID}, // LATIN EPIGRAPHIC LETTER REVERSED F..SYLOTI N + {0xA828, 0xA82B, propertyDISALLOWED}, // SYLOTI NAGRI POETRY MARK-1..SYLOTI NAGRI POE + {0xA82C, 0xA82F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xA830, 0xA839, propertyDISALLOWED}, // NORTH INDIC FRACTION ONE QUARTER..NORTH INDI + {0xA83A, 0xA83F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xA840, 0xA873, propertyPVALID}, // PHAGS-PA LETTER KA..PHAGS-PA LETTER CANDRABI + {0xA874, 0xA877, propertyDISALLOWED}, // PHAGS-PA SINGLE HEAD MARK..PHAGS-PA MARK DOU + {0xA878, 0xA87F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xA880, 0xA8C4, propertyPVALID}, // SAURASHTRA SIGN ANUSVARA..SAURASHTRA SIGN VI + {0xA8C5, 0xA8CD, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xA8CE, 0xA8CF, propertyDISALLOWED}, // SAURASHTRA DANDA..SAURASHTRA DOUBLE DANDA + {0xA8D0, 0xA8D9, propertyPVALID}, // SAURASHTRA DIGIT ZERO..SAURASHTRA DIGIT NINE + {0xA8DA, 0xA8DF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xA8E0, 0xA8F7, propertyPVALID}, // COMBINING DEVANAGARI DIGIT ZERO..DEVANAGARI + {0xA8F8, 0xA8FA, propertyDISALLOWED}, // DEVANAGARI SIGN PUSHPIKA..DEVANAGARI CARET + {0xA8FB, 0x0, propertyPVALID}, // DEVANAGARI HEADSTROKE + {0xA8FC, 0xA8FF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xA900, 0xA92D, propertyPVALID}, // KAYAH LI DIGIT ZERO..KAYAH LI TONE CALYA PLO + {0xA92E, 0xA92F, propertyDISALLOWED}, // KAYAH LI SIGN CWI..KAYAH LI SIGN SHYA + {0xA930, 0xA953, propertyPVALID}, // REJANG LETTER KA..REJANG VIRAMA + {0xA954, 0xA95E, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xA95F, 0xA97C, propertyDISALLOWED}, // REJANG SECTION MARK..HANGUL CHOSEONG SSANGYE + {0xA97D, 0xA97F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xA980, 0xA9C0, propertyPVALID}, // JAVANESE SIGN PANYANGGA..JAVANESE PANGKON + {0xA9C1, 0xA9CD, propertyDISALLOWED}, // JAVANESE LEFT RERENGGAN..JAVANESE TURNED PAD + {0xA9CE, 0x0, propertyUNASSIGNED}, // <reserved> + {0xA9CF, 0xA9D9, propertyPVALID}, // JAVANESE PANGRANGKEP..JAVANESE DIGIT NINE + {0xA9DA, 0xA9DD, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xA9DE, 0xA9DF, propertyDISALLOWED}, // JAVANESE PADA TIRTA TUMETES..JAVANESE PADA I + {0xA9E0, 0xA9FF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xAA00, 0xAA36, propertyPVALID}, // CHAM LETTER A..CHAM CONSONANT SIGN WA + {0xAA37, 0xAA3F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xAA40, 0xAA4D, propertyPVALID}, // CHAM LETTER FINAL K..CHAM CONSONANT SIGN FIN + {0xAA4E, 0xAA4F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xAA50, 0xAA59, propertyPVALID}, // CHAM DIGIT ZERO..CHAM DIGIT NINE + {0xAA5A, 0xAA5B, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xAA5C, 0xAA5F, propertyDISALLOWED}, // CHAM PUNCTUATION SPIRAL..CHAM PUNCTUATION TR + {0xAA60, 0xAA76, propertyPVALID}, // MYANMAR LETTER KHAMTI GA..MYANMAR LOGOGRAM K + {0xAA77, 0xAA79, propertyDISALLOWED}, // MYANMAR SYMBOL AITON EXCLAMATION..MYANMAR SY + {0xAA7A, 0xAA7B, propertyPVALID}, // MYANMAR LETTER AITON RA..MYANMAR SIGN PAO KA + {0xAA7C, 0xAA7F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xAA80, 0xAAC2, propertyPVALID}, // TAI VIET LETTER LOW KO..TAI VIET TONE MAI SO + {0xAAC3, 0xAADA, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xAADB, 0xAADD, propertyPVALID}, // TAI VIET SYMBOL KON..TAI VIET SYMBOL SAM + {0xAADE, 0xAADF, propertyDISALLOWED}, // TAI VIET SYMBOL HO HOI..TAI VIET SYMBOL KOI + {0xAAE0, 0xABBF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xABC0, 0xABEA, propertyPVALID}, // MEETEI MAYEK LETTER KOK..MEETEI MAYEK VOWEL + {0xABEB, 0x0, propertyDISALLOWED}, // MEETEI MAYEK CHEIKHEI + {0xABEC, 0xABED, propertyPVALID}, // MEETEI MAYEK LUM IYEK..MEETEI MAYEK APUN IYE + {0xABEE, 0xABEF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xABF0, 0xABF9, propertyPVALID}, // MEETEI MAYEK DIGIT ZERO..MEETEI MAYEK DIGIT + {0xABFA, 0xABFF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xAC00, 0xD7A3, propertyPVALID}, // <Hangul Syllable>..<Hangul Syllable> + {0xD7A4, 0xD7AF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xD7B0, 0xD7C6, propertyDISALLOWED}, // HANGUL JUNGSEONG O-YEO..HANGUL JUNGSEONG ARA + {0xD7C7, 0xD7CA, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xD7CB, 0xD7FB, propertyDISALLOWED}, // HANGUL JONGSEONG NIEUN-RIEUL..HANGUL JONGSEO + {0xD7FC, 0xD7FF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xD800, 0xFA0D, propertyDISALLOWED}, // <Non Private Use High Surrogate>..CJK COMPAT + {0xFA0E, 0xFA0F, propertyPVALID}, // CJK COMPATIBILITY IDEOGRAPH-FA0E..CJK COMPAT + {0xFA10, 0x0, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA10 + {0xFA11, 0x0, propertyPVALID}, // CJK COMPATIBILITY IDEOGRAPH-FA11 + {0xFA12, 0x0, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA12 + {0xFA13, 0xFA14, propertyPVALID}, // CJK COMPATIBILITY IDEOGRAPH-FA13..CJK COMPAT + {0xFA15, 0xFA1E, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA15..CJK COMPAT + {0xFA1F, 0x0, propertyPVALID}, // CJK COMPATIBILITY IDEOGRAPH-FA1F + {0xFA20, 0x0, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA20 + {0xFA21, 0x0, propertyPVALID}, // CJK COMPATIBILITY IDEOGRAPH-FA21 + {0xFA22, 0x0, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA22 + {0xFA23, 0xFA24, propertyPVALID}, // CJK COMPATIBILITY IDEOGRAPH-FA23..CJK COMPAT + {0xFA25, 0xFA26, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA25..CJK COMPAT + {0xFA27, 0xFA29, propertyPVALID}, // CJK COMPATIBILITY IDEOGRAPH-FA27..CJK COMPAT + {0xFA2A, 0xFA2D, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA2A..CJK COMPAT + {0xFA2E, 0xFA2F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xFA30, 0xFA6D, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA30..CJK COMPAT + {0xFA6E, 0xFA6F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xFA70, 0xFAD9, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-FA70..CJK COMPAT + {0xFADA, 0xFAFF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xFB00, 0xFB06, propertyDISALLOWED}, // LATIN SMALL LIGATURE FF..LATIN SMALL LIGATUR + {0xFB07, 0xFB12, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xFB13, 0xFB17, propertyDISALLOWED}, // ARMENIAN SMALL LIGATURE MEN NOW..ARMENIAN SM + {0xFB18, 0xFB1C, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xFB1D, 0x0, propertyDISALLOWED}, // HEBREW LETTER YOD WITH HIRIQ + {0xFB1E, 0x0, propertyPVALID}, // HEBREW POINT JUDEO-SPANISH VARIKA + {0xFB1F, 0xFB36, propertyDISALLOWED}, // HEBREW LIGATURE YIDDISH YOD YOD PATAH..HEBRE + {0xFB37, 0x0, propertyUNASSIGNED}, // <reserved> + {0xFB38, 0xFB3C, propertyDISALLOWED}, // HEBREW LETTER TET WITH DAGESH..HEBREW LETTER + {0xFB3D, 0x0, propertyUNASSIGNED}, // <reserved> + {0xFB3E, 0x0, propertyDISALLOWED}, // HEBREW LETTER MEM WITH DAGESH + {0xFB3F, 0x0, propertyUNASSIGNED}, // <reserved> + {0xFB40, 0xFB41, propertyDISALLOWED}, // HEBREW LETTER NUN WITH DAGESH..HEBREW LETTER + {0xFB42, 0x0, propertyUNASSIGNED}, // <reserved> + {0xFB43, 0xFB44, propertyDISALLOWED}, // HEBREW LETTER FINAL PE WITH DAGESH..HEBREW L + {0xFB45, 0x0, propertyUNASSIGNED}, // <reserved> + {0xFB46, 0xFBB1, propertyDISALLOWED}, // HEBREW LETTER TSADI WITH DAGESH..ARABIC LETT + {0xFBB2, 0xFBD2, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xFBD3, 0xFD3F, propertyDISALLOWED}, // ARABIC LETTER NG ISOLATED FORM..ORNATE RIGHT + {0xFD40, 0xFD4F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xFD50, 0xFD8F, propertyDISALLOWED}, // ARABIC LIGATURE TEH WITH JEEM WITH MEEM INIT + {0xFD90, 0xFD91, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xFD92, 0xFDC7, propertyDISALLOWED}, // ARABIC LIGATURE MEEM WITH JEEM WITH KHAH INI + {0xFDC8, 0xFDCF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xFDD0, 0xFDFD, propertyDISALLOWED}, // <noncharacter>..ARABIC LIGATURE BISMILLAH AR + {0xFDFE, 0xFDFF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xFE00, 0xFE19, propertyDISALLOWED}, // VARIATION SELECTOR-1..PRESENTATION FORM FOR + {0xFE1A, 0xFE1F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xFE20, 0xFE26, propertyPVALID}, // COMBINING LIGATURE LEFT HALF..COMBINING CONJ + {0xFE27, 0xFE2F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xFE30, 0xFE52, propertyDISALLOWED}, // PRESENTATION FORM FOR VERTICAL TWO DOT LEADE + {0xFE53, 0x0, propertyUNASSIGNED}, // <reserved> + {0xFE54, 0xFE66, propertyDISALLOWED}, // SMALL SEMICOLON..SMALL EQUALS SIGN + {0xFE67, 0x0, propertyUNASSIGNED}, // <reserved> + {0xFE68, 0xFE6B, propertyDISALLOWED}, // SMALL REVERSE SOLIDUS..SMALL COMMERCIAL AT + {0xFE6C, 0xFE6F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xFE70, 0xFE72, propertyDISALLOWED}, // ARABIC FATHATAN ISOLATED FORM..ARABIC DAMMAT + {0xFE73, 0x0, propertyPVALID}, // ARABIC TAIL FRAGMENT + {0xFE74, 0x0, propertyDISALLOWED}, // ARABIC KASRATAN ISOLATED FORM + {0xFE75, 0x0, propertyUNASSIGNED}, // <reserved> + {0xFE76, 0xFEFC, propertyDISALLOWED}, // ARABIC FATHA ISOLATED FORM..ARABIC LIGATURE + {0xFEFD, 0xFEFE, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xFEFF, 0x0, propertyDISALLOWED}, // ZERO WIDTH NO-BREAK SPACE + {0xFF00, 0x0, propertyUNASSIGNED}, // <reserved> + {0xFF01, 0xFFBE, propertyDISALLOWED}, // FULLWIDTH EXCLAMATION MARK..HALFWIDTH HANGUL + {0xFFBF, 0xFFC1, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xFFC2, 0xFFC7, propertyDISALLOWED}, // HALFWIDTH HANGUL LETTER A..HALFWIDTH HANGUL + {0xFFC8, 0xFFC9, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xFFCA, 0xFFCF, propertyDISALLOWED}, // HALFWIDTH HANGUL LETTER YEO..HALFWIDTH HANGU + {0xFFD0, 0xFFD1, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xFFD2, 0xFFD7, propertyDISALLOWED}, // HALFWIDTH HANGUL LETTER YO..HALFWIDTH HANGUL + {0xFFD8, 0xFFD9, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xFFDA, 0xFFDC, propertyDISALLOWED}, // HALFWIDTH HANGUL LETTER EU..HALFWIDTH HANGUL + {0xFFDD, 0xFFDF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xFFE0, 0xFFE6, propertyDISALLOWED}, // FULLWIDTH CENT SIGN..FULLWIDTH WON SIGN + {0xFFE7, 0x0, propertyUNASSIGNED}, // <reserved> + {0xFFE8, 0xFFEE, propertyDISALLOWED}, // HALFWIDTH FORMS LIGHT VERTICAL..HALFWIDTH WH + {0xFFEF, 0xFFF8, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xFFF9, 0xFFFF, propertyDISALLOWED}, // INTERLINEAR ANNOTATION ANCHOR..<noncharacter + {0x10000, 0x1000B, propertyPVALID}, // LINEAR B SYLLABLE B008 A..LINEAR B SYLLABLE + {0x1000C, 0x0, propertyUNASSIGNED}, // <reserved> + {0x1000D, 0x10026, propertyPVALID}, // LINEAR B SYLLABLE B036 JO..LINEAR B SYLLABLE + {0x10027, 0x0, propertyUNASSIGNED}, // <reserved> + {0x10028, 0x1003A, propertyPVALID}, // LINEAR B SYLLABLE B060 RA..LINEAR B SYLLABLE + {0x1003B, 0x0, propertyUNASSIGNED}, // <reserved> + {0x1003C, 0x1003D, propertyPVALID}, // LINEAR B SYLLABLE B017 ZA..LINEAR B SYLLABLE + {0x1003E, 0x0, propertyUNASSIGNED}, // <reserved> + {0x1003F, 0x1004D, propertyPVALID}, // LINEAR B SYLLABLE B020 ZO..LINEAR B SYLLABLE + {0x1004E, 0x1004F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x10050, 0x1005D, propertyPVALID}, // LINEAR B SYMBOL B018..LINEAR B SYMBOL B089 + {0x1005E, 0x1007F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x10080, 0x100FA, propertyPVALID}, // LINEAR B IDEOGRAM B100 MAN..LINEAR B IDEOGRA + {0x100FB, 0x100FF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x10100, 0x10102, propertyDISALLOWED}, // AEGEAN WORD SEPARATOR LINE..AEGEAN CHECK MAR + {0x10103, 0x10106, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x10107, 0x10133, propertyDISALLOWED}, // AEGEAN NUMBER ONE..AEGEAN NUMBER NINETY THOU + {0x10134, 0x10136, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x10137, 0x1018A, propertyDISALLOWED}, // AEGEAN WEIGHT BASE UNIT..GREEK ZERO SIGN + {0x1018B, 0x1018F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x10190, 0x1019B, propertyDISALLOWED}, // ROMAN SEXTANS SIGN..ROMAN CENTURIAL SIGN + {0x1019C, 0x101CF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x101D0, 0x101FC, propertyDISALLOWED}, // PHAISTOS DISC SIGN PEDESTRIAN..PHAISTOS DISC + {0x101FD, 0x0, propertyPVALID}, // PHAISTOS DISC SIGN COMBINING OBLIQUE STROKE + {0x101FE, 0x1027F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x10280, 0x1029C, propertyPVALID}, // LYCIAN LETTER A..LYCIAN LETTER X + {0x1029D, 0x1029F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x102A0, 0x102D0, propertyPVALID}, // CARIAN LETTER A..CARIAN LETTER UUU3 + {0x102D1, 0x102FF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x10300, 0x1031E, propertyPVALID}, // OLD ITALIC LETTER A..OLD ITALIC LETTER UU + {0x1031F, 0x0, propertyUNASSIGNED}, // <reserved> + {0x10320, 0x10323, propertyDISALLOWED}, // OLD ITALIC NUMERAL ONE..OLD ITALIC NUMERAL F + {0x10324, 0x1032F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x10330, 0x10340, propertyPVALID}, // GOTHIC LETTER AHSA..GOTHIC LETTER PAIRTHRA + {0x10341, 0x0, propertyDISALLOWED}, // GOTHIC LETTER NINETY + {0x10342, 0x10349, propertyPVALID}, // GOTHIC LETTER RAIDA..GOTHIC LETTER OTHAL + {0x1034A, 0x0, propertyDISALLOWED}, // GOTHIC LETTER NINE HUNDRED + {0x1034B, 0x1037F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x10380, 0x1039D, propertyPVALID}, // UGARITIC LETTER ALPA..UGARITIC LETTER SSU + {0x1039E, 0x0, propertyUNASSIGNED}, // <reserved> + {0x1039F, 0x0, propertyDISALLOWED}, // UGARITIC WORD DIVIDER + {0x103A0, 0x103C3, propertyPVALID}, // OLD PERSIAN SIGN A..OLD PERSIAN SIGN HA + {0x103C4, 0x103C7, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x103C8, 0x103CF, propertyPVALID}, // OLD PERSIAN SIGN AURAMAZDAA..OLD PERSIAN SIG + {0x103D0, 0x103D5, propertyDISALLOWED}, // OLD PERSIAN WORD DIVIDER..OLD PERSIAN NUMBER + {0x103D6, 0x103FF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x10400, 0x10427, propertyDISALLOWED}, // DESERET CAPITAL LETTER LONG I..DESERET CAPIT + {0x10428, 0x1049D, propertyPVALID}, // DESERET SMALL LETTER LONG I..OSMANYA LETTER + {0x1049E, 0x1049F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x104A0, 0x104A9, propertyPVALID}, // OSMANYA DIGIT ZERO..OSMANYA DIGIT NINE + {0x104AA, 0x107FF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x10800, 0x10805, propertyPVALID}, // CYPRIOT SYLLABLE A..CYPRIOT SYLLABLE JA + {0x10806, 0x10807, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x10808, 0x0, propertyPVALID}, // CYPRIOT SYLLABLE JO + {0x10809, 0x0, propertyUNASSIGNED}, // <reserved> + {0x1080A, 0x10835, propertyPVALID}, // CYPRIOT SYLLABLE KA..CYPRIOT SYLLABLE WO + {0x10836, 0x0, propertyUNASSIGNED}, // <reserved> + {0x10837, 0x10838, propertyPVALID}, // CYPRIOT SYLLABLE XA..CYPRIOT SYLLABLE XE + {0x10839, 0x1083B, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1083C, 0x0, propertyPVALID}, // CYPRIOT SYLLABLE ZA + {0x1083D, 0x1083E, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1083F, 0x10855, propertyPVALID}, // CYPRIOT SYLLABLE ZO..IMPERIAL ARAMAIC LETTER + {0x10856, 0x0, propertyUNASSIGNED}, // <reserved> + {0x10857, 0x1085F, propertyDISALLOWED}, // IMPERIAL ARAMAIC SECTION SIGN..IMPERIAL ARAM + {0x10860, 0x108FF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x10900, 0x10915, propertyPVALID}, // PHOENICIAN LETTER ALF..PHOENICIAN LETTER TAU + {0x10916, 0x1091B, propertyDISALLOWED}, // PHOENICIAN NUMBER ONE..PHOENICIAN NUMBER THR + {0x1091C, 0x1091E, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1091F, 0x0, propertyDISALLOWED}, // PHOENICIAN WORD SEPARATOR + {0x10920, 0x10939, propertyPVALID}, // LYDIAN LETTER A..LYDIAN LETTER C + {0x1093A, 0x1093E, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1093F, 0x0, propertyDISALLOWED}, // LYDIAN TRIANGULAR MARK + {0x10940, 0x109FF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x10A00, 0x10A03, propertyPVALID}, // KHAROSHTHI LETTER A..KHAROSHTHI VOWEL SIGN V + {0x10A04, 0x0, propertyUNASSIGNED}, // <reserved> + {0x10A05, 0x10A06, propertyPVALID}, // KHAROSHTHI VOWEL SIGN E..KHAROSHTHI VOWEL SI + {0x10A07, 0x10A0B, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x10A0C, 0x10A13, propertyPVALID}, // KHAROSHTHI VOWEL LENGTH MARK..KHAROSHTHI LET + {0x10A14, 0x0, propertyUNASSIGNED}, // <reserved> + {0x10A15, 0x10A17, propertyPVALID}, // KHAROSHTHI LETTER CA..KHAROSHTHI LETTER JA + {0x10A18, 0x0, propertyUNASSIGNED}, // <reserved> + {0x10A19, 0x10A33, propertyPVALID}, // KHAROSHTHI LETTER NYA..KHAROSHTHI LETTER TTT + {0x10A34, 0x10A37, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x10A38, 0x10A3A, propertyPVALID}, // KHAROSHTHI SIGN BAR ABOVE..KHAROSHTHI SIGN D + {0x10A3B, 0x10A3E, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x10A3F, 0x0, propertyPVALID}, // KHAROSHTHI VIRAMA + {0x10A40, 0x10A47, propertyDISALLOWED}, // KHAROSHTHI DIGIT ONE..KHAROSHTHI NUMBER ONE + {0x10A48, 0x10A4F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x10A50, 0x10A58, propertyDISALLOWED}, // KHAROSHTHI PUNCTUATION DOT..KHAROSHTHI PUNCT + {0x10A59, 0x10A5F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x10A60, 0x10A7C, propertyPVALID}, // OLD SOUTH ARABIAN LETTER HE..OLD SOUTH ARABI + {0x10A7D, 0x10A7F, propertyDISALLOWED}, // OLD SOUTH ARABIAN NUMBER ONE..OLD SOUTH ARAB + {0x10A80, 0x10AFF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x10B00, 0x10B35, propertyPVALID}, // AVESTAN LETTER A..AVESTAN LETTER HE + {0x10B36, 0x10B38, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x10B39, 0x10B3F, propertyDISALLOWED}, // AVESTAN ABBREVIATION MARK..LARGE ONE RING OV + {0x10B40, 0x10B55, propertyPVALID}, // INSCRIPTIONAL PARTHIAN LETTER ALEPH..INSCRIP + {0x10B56, 0x10B57, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x10B58, 0x10B5F, propertyDISALLOWED}, // INSCRIPTIONAL PARTHIAN NUMBER ONE..INSCRIPTI + {0x10B60, 0x10B72, propertyPVALID}, // INSCRIPTIONAL PAHLAVI LETTER ALEPH..INSCRIPT + {0x10B73, 0x10B77, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x10B78, 0x10B7F, propertyDISALLOWED}, // INSCRIPTIONAL PAHLAVI NUMBER ONE..INSCRIPTIO + {0x10B80, 0x10BFF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x10C00, 0x10C48, propertyPVALID}, // OLD TURKIC LETTER ORKHON A..OLD TURKIC LETTE + {0x10C49, 0x10E5F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x10E60, 0x10E7E, propertyDISALLOWED}, // RUMI DIGIT ONE..RUMI FRACTION TWO THIRDS + {0x10E7F, 0x1107F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x11080, 0x110BA, propertyPVALID}, // KAITHI SIGN CANDRABINDU..KAITHI SIGN NUKTA + {0x110BB, 0x110C1, propertyDISALLOWED}, // KAITHI ABBREVIATION SIGN..KAITHI DOUBLE DAND + {0x110C2, 0x11FFF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x12000, 0x1236E, propertyPVALID}, // CUNEIFORM SIGN A..CUNEIFORM SIGN ZUM + {0x1236F, 0x123FF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x12400, 0x12462, propertyDISALLOWED}, // CUNEIFORM NUMERIC SIGN TWO ASH..CUNEIFORM NU + {0x12463, 0x1246F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x12470, 0x12473, propertyDISALLOWED}, // CUNEIFORM PUNCTUATION SIGN OLD ASSYRIAN WORD + {0x12474, 0x12FFF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x13000, 0x1342E, propertyPVALID}, // EGYPTIAN HIEROGLYPH A001..EGYPTIAN HIEROGLYP + {0x1342F, 0x1CFFF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1D000, 0x1D0F5, propertyDISALLOWED}, // BYZANTINE MUSICAL SYMBOL PSILI..BYZANTINE MU + {0x1D0F6, 0x1D0FF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1D100, 0x1D126, propertyDISALLOWED}, // MUSICAL SYMBOL SINGLE BARLINE..MUSICAL SYMBO + {0x1D127, 0x1D128, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1D129, 0x1D1DD, propertyDISALLOWED}, // MUSICAL SYMBOL MULTIPLE MEASURE REST..MUSICA + {0x1D1DE, 0x1D1FF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1D200, 0x1D245, propertyDISALLOWED}, // GREEK VOCAL NOTATION SYMBOL-1..GREEK MUSICAL + {0x1D246, 0x1D2FF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1D300, 0x1D356, propertyDISALLOWED}, // MONOGRAM FOR EARTH..TETRAGRAM FOR FOSTERING + {0x1D357, 0x1D35F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1D360, 0x1D371, propertyDISALLOWED}, // COUNTING ROD UNIT DIGIT ONE..COUNTING ROD TE + {0x1D372, 0x1D3FF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1D400, 0x1D454, propertyDISALLOWED}, // MATHEMATICAL BOLD CAPITAL A..MATHEMATICAL IT + {0x1D455, 0x0, propertyUNASSIGNED}, // <reserved> + {0x1D456, 0x1D49C, propertyDISALLOWED}, // MATHEMATICAL ITALIC SMALL I..MATHEMATICAL SC + {0x1D49D, 0x0, propertyUNASSIGNED}, // <reserved> + {0x1D49E, 0x1D49F, propertyDISALLOWED}, // MATHEMATICAL SCRIPT CAPITAL C..MATHEMATICAL + {0x1D4A0, 0x1D4A1, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1D4A2, 0x0, propertyDISALLOWED}, // MATHEMATICAL SCRIPT CAPITAL G + {0x1D4A3, 0x1D4A4, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1D4A5, 0x1D4A6, propertyDISALLOWED}, // MATHEMATICAL SCRIPT CAPITAL J..MATHEMATICAL + {0x1D4A7, 0x1D4A8, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1D4A9, 0x1D4AC, propertyDISALLOWED}, // MATHEMATICAL SCRIPT CAPITAL N..MATHEMATICAL + {0x1D4AD, 0x0, propertyUNASSIGNED}, // <reserved> + {0x1D4AE, 0x1D4B9, propertyDISALLOWED}, // MATHEMATICAL SCRIPT CAPITAL S..MATHEMATICAL + {0x1D4BA, 0x0, propertyUNASSIGNED}, // <reserved> + {0x1D4BB, 0x0, propertyDISALLOWED}, // MATHEMATICAL SCRIPT SMALL F + {0x1D4BC, 0x0, propertyUNASSIGNED}, // <reserved> + {0x1D4BD, 0x1D4C3, propertyDISALLOWED}, // MATHEMATICAL SCRIPT SMALL H..MATHEMATICAL SC + {0x1D4C4, 0x0, propertyUNASSIGNED}, // <reserved> + {0x1D4C5, 0x1D505, propertyDISALLOWED}, // MATHEMATICAL SCRIPT SMALL P..MATHEMATICAL FR + {0x1D506, 0x0, propertyUNASSIGNED}, // <reserved> + {0x1D507, 0x1D50A, propertyDISALLOWED}, // MATHEMATICAL FRAKTUR CAPITAL D..MATHEMATICAL + {0x1D50B, 0x1D50C, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1D50D, 0x1D514, propertyDISALLOWED}, // MATHEMATICAL FRAKTUR CAPITAL J..MATHEMATICAL + {0x1D515, 0x0, propertyUNASSIGNED}, // <reserved> + {0x1D516, 0x1D51C, propertyDISALLOWED}, // MATHEMATICAL FRAKTUR CAPITAL S..MATHEMATICAL + {0x1D51D, 0x0, propertyUNASSIGNED}, // <reserved> + {0x1D51E, 0x1D539, propertyDISALLOWED}, // MATHEMATICAL FRAKTUR SMALL A..MATHEMATICAL D + {0x1D53A, 0x0, propertyUNASSIGNED}, // <reserved> + {0x1D53B, 0x1D53E, propertyDISALLOWED}, // MATHEMATICAL DOUBLE-STRUCK CAPITAL D..MATHEM + {0x1D53F, 0x0, propertyUNASSIGNED}, // <reserved> + {0x1D540, 0x1D544, propertyDISALLOWED}, // MATHEMATICAL DOUBLE-STRUCK CAPITAL I..MATHEM + {0x1D545, 0x0, propertyUNASSIGNED}, // <reserved> + {0x1D546, 0x0, propertyDISALLOWED}, // MATHEMATICAL DOUBLE-STRUCK CAPITAL O + {0x1D547, 0x1D549, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1D54A, 0x1D550, propertyDISALLOWED}, // MATHEMATICAL DOUBLE-STRUCK CAPITAL S..MATHEM + {0x1D551, 0x0, propertyUNASSIGNED}, // <reserved> + {0x1D552, 0x1D6A5, propertyDISALLOWED}, // MATHEMATICAL DOUBLE-STRUCK SMALL A..MATHEMAT + {0x1D6A6, 0x1D6A7, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1D6A8, 0x1D7CB, propertyDISALLOWED}, // MATHEMATICAL BOLD CAPITAL ALPHA..MATHEMATICA + {0x1D7CC, 0x1D7CD, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1D7CE, 0x1D7FF, propertyDISALLOWED}, // MATHEMATICAL BOLD DIGIT ZERO..MATHEMATICAL M + {0x1D800, 0x1EFFF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1F000, 0x1F02B, propertyDISALLOWED}, // MAHJONG TILE EAST WIND..MAHJONG TILE BACK + {0x1F02C, 0x1F02F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1F030, 0x1F093, propertyDISALLOWED}, // DOMINO TILE HORIZONTAL BACK..DOMINO TILE VER + {0x1F094, 0x1F0FF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1F100, 0x1F10A, propertyDISALLOWED}, // DIGIT ZERO FULL STOP..DIGIT NINE COMMA + {0x1F10B, 0x1F10F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1F110, 0x1F12E, propertyDISALLOWED}, // PARENTHESIZED LATIN CAPITAL LETTER A..CIRCLE + {0x1F12F, 0x1F130, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1F131, 0x0, propertyDISALLOWED}, // SQUARED LATIN CAPITAL LETTER B + {0x1F132, 0x1F13C, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1F13D, 0x0, propertyDISALLOWED}, // SQUARED LATIN CAPITAL LETTER N + {0x1F13E, 0x0, propertyUNASSIGNED}, // <reserved> + {0x1F13F, 0x0, propertyDISALLOWED}, // SQUARED LATIN CAPITAL LETTER P + {0x1F140, 0x1F141, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1F142, 0x0, propertyDISALLOWED}, // SQUARED LATIN CAPITAL LETTER S + {0x1F143, 0x1F145, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1F146, 0x0, propertyDISALLOWED}, // SQUARED LATIN CAPITAL LETTER W + {0x1F147, 0x1F149, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1F14A, 0x1F14E, propertyDISALLOWED}, // SQUARED HV..SQUARED PPV + {0x1F14F, 0x1F156, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1F157, 0x0, propertyDISALLOWED}, // NEGATIVE CIRCLED LATIN CAPITAL LETTER H + {0x1F158, 0x1F15E, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1F15F, 0x0, propertyDISALLOWED}, // NEGATIVE CIRCLED LATIN CAPITAL LETTER P + {0x1F160, 0x1F178, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1F179, 0x0, propertyDISALLOWED}, // NEGATIVE SQUARED LATIN CAPITAL LETTER J + {0x1F17A, 0x0, propertyUNASSIGNED}, // <reserved> + {0x1F17B, 0x1F17C, propertyDISALLOWED}, // NEGATIVE SQUARED LATIN CAPITAL LETTER L..NEG + {0x1F17D, 0x1F17E, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1F17F, 0x0, propertyDISALLOWED}, // NEGATIVE SQUARED LATIN CAPITAL LETTER P + {0x1F180, 0x1F189, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1F18A, 0x1F18D, propertyDISALLOWED}, // CROSSED NEGATIVE SQUARED LATIN CAPITAL LETTE + {0x1F18E, 0x1F18F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1F190, 0x0, propertyDISALLOWED}, // SQUARE DJ + {0x1F191, 0x1F1FF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1F200, 0x0, propertyDISALLOWED}, // SQUARE HIRAGANA HOKA + {0x1F201, 0x1F20F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1F210, 0x1F231, propertyDISALLOWED}, // SQUARED CJK UNIFIED IDEOGRAPH-624B..SQUARED + {0x1F232, 0x1F23F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1F240, 0x1F248, propertyDISALLOWED}, // TORTOISE SHELL BRACKETED CJK UNIFIED IDEOGRA + {0x1F249, 0x1FFFD, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x1FFFE, 0x1FFFF, propertyDISALLOWED}, // <noncharacter>..<noncharacter> + {0x20000, 0x2A6D6, propertyPVALID}, // <CJK Ideograph Extension B>..<CJK Ideograph + {0x2A6D7, 0x2A6FF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x2A700, 0x2B734, propertyPVALID}, // <CJK Ideograph Extension C>..<CJK Ideograph + {0x2B735, 0x2F7FF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x2F800, 0x2FA1D, propertyDISALLOWED}, // CJK COMPATIBILITY IDEOGRAPH-2F800..CJK COMPA + {0x2FA1E, 0x2FFFD, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x2FFFE, 0x2FFFF, propertyDISALLOWED}, // <noncharacter>..<noncharacter> + {0x30000, 0x3FFFD, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x3FFFE, 0x3FFFF, propertyDISALLOWED}, // <noncharacter>..<noncharacter> + {0x40000, 0x4FFFD, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x4FFFE, 0x4FFFF, propertyDISALLOWED}, // <noncharacter>..<noncharacter> + {0x50000, 0x5FFFD, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x5FFFE, 0x5FFFF, propertyDISALLOWED}, // <noncharacter>..<noncharacter> + {0x60000, 0x6FFFD, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x6FFFE, 0x6FFFF, propertyDISALLOWED}, // <noncharacter>..<noncharacter> + {0x70000, 0x7FFFD, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x7FFFE, 0x7FFFF, propertyDISALLOWED}, // <noncharacter>..<noncharacter> + {0x80000, 0x8FFFD, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x8FFFE, 0x8FFFF, propertyDISALLOWED}, // <noncharacter>..<noncharacter> + {0x90000, 0x9FFFD, propertyUNASSIGNED}, // <reserved>..<reserved> + {0x9FFFE, 0x9FFFF, propertyDISALLOWED}, // <noncharacter>..<noncharacter> + {0xA0000, 0xAFFFD, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xAFFFE, 0xAFFFF, propertyDISALLOWED}, // <noncharacter>..<noncharacter> + {0xB0000, 0xBFFFD, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xBFFFE, 0xBFFFF, propertyDISALLOWED}, // <noncharacter>..<noncharacter> + {0xC0000, 0xCFFFD, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xCFFFE, 0xCFFFF, propertyDISALLOWED}, // <noncharacter>..<noncharacter> + {0xD0000, 0xDFFFD, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xDFFFE, 0xDFFFF, propertyDISALLOWED}, // <noncharacter>..<noncharacter> + {0xE0000, 0x0, propertyUNASSIGNED}, // <reserved> + {0xE0001, 0x0, propertyDISALLOWED}, // LANGUAGE TAG + {0xE0002, 0xE001F, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xE0020, 0xE007F, propertyDISALLOWED}, // TAG SPACE..CANCEL TAG + {0xE0080, 0xE00FF, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xE0100, 0xE01EF, propertyDISALLOWED}, // VARIATION SELECTOR-17..VARIATION SELECTOR-25 + {0xE01F0, 0xEFFFD, propertyUNASSIGNED}, // <reserved>..<reserved> + {0xEFFFE, 0x10FFFF, propertyDISALLOWED}, // <noncharacter>..<noncharacter> +} diff --git a/vendor/github.com/miekg/dns/idn/example_test.go b/vendor/github.com/miekg/dns/idn/example_test.go new file mode 100644 index 0000000..8833cd9 --- /dev/null +++ b/vendor/github.com/miekg/dns/idn/example_test.go @@ -0,0 +1,18 @@ +package idn_test + +import ( + "fmt" + "github.com/miekg/dns/idn" +) + +func ExampleToPunycode() { + name := "インターネット.テスト" + fmt.Printf("%s -> %s", name, idn.ToPunycode(name)) + // Output: インターネット.テスト -> xn--eckucmux0ukc.xn--zckzah +} + +func ExampleFromPunycode() { + name := "xn--mgbaja8a1hpac.xn--mgbachtv" + fmt.Printf("%s -> %s", name, idn.FromPunycode(name)) + // Output: xn--mgbaja8a1hpac.xn--mgbachtv -> الانترنت.اختبار +} diff --git a/vendor/github.com/miekg/dns/idn/punycode.go b/vendor/github.com/miekg/dns/idn/punycode.go new file mode 100644 index 0000000..1d03bf6 --- /dev/null +++ b/vendor/github.com/miekg/dns/idn/punycode.go @@ -0,0 +1,370 @@ +// Package idn implements encoding from and to punycode as speficied by RFC 3492. +package idn + +import ( + "bytes" + "strings" + "unicode" + "unicode/utf8" + + "github.com/miekg/dns" +) + +// Implementation idea from RFC itself and from from IDNA::Punycode created by +// Tatsuhiko Miyagawa <miyagawa@bulknews.net> and released under Perl Artistic +// License in 2002. + +const ( + _MIN rune = 1 + _MAX rune = 26 + _SKEW rune = 38 + _BASE rune = 36 + _BIAS rune = 72 + _N rune = 128 + _DAMP rune = 700 + + _DELIMITER = '-' + _PREFIX = "xn--" +) + +// ToPunycode converts unicode domain names to DNS-appropriate punycode names. +// This function will return an empty string result for domain names with +// invalid unicode strings. This function expects domain names in lowercase. +func ToPunycode(s string) string { + // Early check to see if encoding is needed. + // This will prevent making heap allocations when not needed. + if !needToPunycode(s) { + return s + } + + tokens := dns.SplitDomainName(s) + switch { + case s == "": + return "" + case tokens == nil: // s == . + return "." + case s[len(s)-1] == '.': + tokens = append(tokens, "") + } + + for i := range tokens { + t := encode([]byte(tokens[i])) + if t == nil { + return "" + } + tokens[i] = string(t) + } + return strings.Join(tokens, ".") +} + +// FromPunycode returns unicode domain name from provided punycode string. +// This function expects punycode strings in lowercase. +func FromPunycode(s string) string { + // Early check to see if decoding is needed. + // This will prevent making heap allocations when not needed. + if !needFromPunycode(s) { + return s + } + + tokens := dns.SplitDomainName(s) + switch { + case s == "": + return "" + case tokens == nil: // s == . + return "." + case s[len(s)-1] == '.': + tokens = append(tokens, "") + } + for i := range tokens { + tokens[i] = string(decode([]byte(tokens[i]))) + } + return strings.Join(tokens, ".") +} + +// digitval converts single byte into meaningful value that's used to calculate decoded unicode character. +const errdigit = 0xffff + +func digitval(code rune) rune { + switch { + case code >= 'A' && code <= 'Z': + return code - 'A' + case code >= 'a' && code <= 'z': + return code - 'a' + case code >= '0' && code <= '9': + return code - '0' + 26 + } + return errdigit +} + +// lettercode finds BASE36 byte (a-z0-9) based on calculated number. +func lettercode(digit rune) rune { + switch { + case digit >= 0 && digit <= 25: + return digit + 'a' + case digit >= 26 && digit <= 36: + return digit - 26 + '0' + } + panic("dns: not reached") +} + +// adapt calculates next bias to be used for next iteration delta. +func adapt(delta rune, numpoints int, firsttime bool) rune { + if firsttime { + delta /= _DAMP + } else { + delta /= 2 + } + + var k rune + for delta = delta + delta/rune(numpoints); delta > (_BASE-_MIN)*_MAX/2; k += _BASE { + delta /= _BASE - _MIN + } + + return k + ((_BASE-_MIN+1)*delta)/(delta+_SKEW) +} + +// next finds minimal rune (one with lowest codepoint value) that should be equal or above boundary. +func next(b []rune, boundary rune) rune { + if len(b) == 0 { + panic("dns: invalid set of runes to determine next one") + } + m := b[0] + for _, x := range b[1:] { + if x >= boundary && (m < boundary || x < m) { + m = x + } + } + return m +} + +// preprune converts unicode rune to lower case. At this time it's not +// supporting all things described in RFCs. +func preprune(r rune) rune { + if unicode.IsUpper(r) { + r = unicode.ToLower(r) + } + return r +} + +// tfunc is a function that helps calculate each character weight. +func tfunc(k, bias rune) rune { + switch { + case k <= bias: + return _MIN + case k >= bias+_MAX: + return _MAX + } + return k - bias +} + +// needToPunycode returns true for strings that require punycode encoding +// (contain unicode characters). +func needToPunycode(s string) bool { + // This function is very similar to bytes.Runes. We don't use bytes.Runes + // because it makes a heap allocation that's not needed here. + for i := 0; len(s) > 0; i++ { + r, l := utf8.DecodeRuneInString(s) + if r > 0x7f { + return true + } + s = s[l:] + } + return false +} + +// needFromPunycode returns true for strings that require punycode decoding. +func needFromPunycode(s string) bool { + if s == "." { + return false + } + + off := 0 + end := false + pl := len(_PREFIX) + sl := len(s) + + // If s starts with _PREFIX. + if sl > pl && s[off:off+pl] == _PREFIX { + return true + } + + for { + // Find the part after the next ".". + off, end = dns.NextLabel(s, off) + if end { + return false + } + // If this parts starts with _PREFIX. + if sl-off > pl && s[off:off+pl] == _PREFIX { + return true + } + } +} + +// encode transforms Unicode input bytes (that represent DNS label) into +// punycode bytestream. This function would return nil if there's an invalid +// character in the label. +func encode(input []byte) []byte { + n, bias := _N, _BIAS + + b := bytes.Runes(input) + for i := range b { + if !isValidRune(b[i]) { + return nil + } + + b[i] = preprune(b[i]) + } + + basic := make([]byte, 0, len(b)) + for _, ltr := range b { + if ltr <= 0x7f { + basic = append(basic, byte(ltr)) + } + } + basiclen := len(basic) + fulllen := len(b) + if basiclen == fulllen { + return basic + } + + var out bytes.Buffer + + out.WriteString(_PREFIX) + if basiclen > 0 { + out.Write(basic) + out.WriteByte(_DELIMITER) + } + + var ( + ltr, nextltr rune + delta, q rune // delta calculation (see rfc) + t, k, cp rune // weight and codepoint calculation + ) + + for h := basiclen; h < fulllen; n, delta = n+1, delta+1 { + nextltr = next(b, n) + delta, n = delta+(nextltr-n)*rune(h+1), nextltr + + for _, ltr = range b { + if ltr < n { + delta++ + } + if ltr == n { + q = delta + for k = _BASE; ; k += _BASE { + t = tfunc(k, bias) + if q < t { + break + } + cp = t + ((q - t) % (_BASE - t)) + out.WriteRune(lettercode(cp)) + q = (q - t) / (_BASE - t) + } + + out.WriteRune(lettercode(q)) + + bias = adapt(delta, h+1, h == basiclen) + h, delta = h+1, 0 + } + } + } + return out.Bytes() +} + +// decode transforms punycode input bytes (that represent DNS label) into Unicode bytestream. +func decode(b []byte) []byte { + src := b // b would move and we need to keep it + + n, bias := _N, _BIAS + if !bytes.HasPrefix(b, []byte(_PREFIX)) { + return b + } + out := make([]rune, 0, len(b)) + b = b[len(_PREFIX):] + for pos := len(b) - 1; pos >= 0; pos-- { + // only last delimiter is our interest + if b[pos] == _DELIMITER { + out = append(out, bytes.Runes(b[:pos])...) + b = b[pos+1:] // trim source string + break + } + } + if len(b) == 0 { + return src + } + var ( + i, oldi, w rune + ch byte + t, digit rune + ln int + ) + + for i = 0; len(b) > 0; i++ { + oldi, w = i, 1 + for k := _BASE; len(b) > 0; k += _BASE { + ch, b = b[0], b[1:] + digit = digitval(rune(ch)) + if digit == errdigit { + return src + } + i += digit * w + if i < 0 { + // safety check for rune overflow + return src + } + + t = tfunc(k, bias) + if digit < t { + break + } + + w *= _BASE - t + } + ln = len(out) + 1 + bias = adapt(i-oldi, ln, oldi == 0) + n += i / rune(ln) + i = i % rune(ln) + // insert + out = append(out, 0) + copy(out[i+1:], out[i:]) + out[i] = n + } + + var ret bytes.Buffer + for _, r := range out { + ret.WriteRune(r) + } + return ret.Bytes() +} + +// isValidRune checks if the character is valid. We will look for the +// character property in the code points list. For now we aren't checking special +// rules in case of contextual property +func isValidRune(r rune) bool { + return findProperty(r) == propertyPVALID +} + +// findProperty will try to check the code point property of the given +// character. It will use a binary search algorithm as we have a slice of +// ordered ranges (average case performance O(log n)) +func findProperty(r rune) property { + imin, imax := 0, len(codePoints) + + for imax >= imin { + imid := (imin + imax) / 2 + + codePoint := codePoints[imid] + if (codePoint.start == r && codePoint.end == 0) || (codePoint.start <= r && codePoint.end >= r) { + return codePoint.state + } + + if (codePoint.end > 0 && codePoint.end < r) || (codePoint.end == 0 && codePoint.start < r) { + imin = imid + 1 + } else { + imax = imid - 1 + } + } + + return propertyUnknown +} diff --git a/vendor/github.com/miekg/dns/idn/punycode_test.go b/vendor/github.com/miekg/dns/idn/punycode_test.go new file mode 100644 index 0000000..9c9a15f --- /dev/null +++ b/vendor/github.com/miekg/dns/idn/punycode_test.go @@ -0,0 +1,116 @@ +package idn + +import ( + "strings" + "testing" +) + +var testcases = [][2]string{ + {"", ""}, + {"a", "a"}, + {"a-b", "a-b"}, + {"a-b-c", "a-b-c"}, + {"abc", "abc"}, + {"я", "xn--41a"}, + {"zя", "xn--z-0ub"}, + {"яZ", "xn--z-zub"}, + {"а-я", "xn----7sb8g"}, + {"إختبار", "xn--kgbechtv"}, + {"آزمایشی", "xn--hgbk6aj7f53bba"}, + {"测试", "xn--0zwm56d"}, + {"測試", "xn--g6w251d"}, + {"испытание", "xn--80akhbyknj4f"}, + {"परीक्षा", "xn--11b5bs3a9aj6g"}, + {"δοκιμή", "xn--jxalpdlp"}, + {"테스트", "xn--9t4b11yi5a"}, + {"טעסט", "xn--deba0ad"}, + {"テスト", "xn--zckzah"}, + {"பரிட்சை", "xn--hlcj6aya9esc7a"}, + {"mamão-com-açúcar", "xn--mamo-com-acar-yeb1e6q"}, + {"σ", "xn--4xa"}, +} + +func TestEncodeDecodePunycode(t *testing.T) { + for _, tst := range testcases { + enc := encode([]byte(tst[0])) + if string(enc) != tst[1] { + t.Errorf("%s encodeded as %s but should be %s", tst[0], enc, tst[1]) + } + dec := decode([]byte(tst[1])) + if string(dec) != strings.ToLower(tst[0]) { + t.Errorf("%s decoded as %s but should be %s", tst[1], dec, strings.ToLower(tst[0])) + } + } +} + +func TestToFromPunycode(t *testing.T) { + for _, tst := range testcases { + // assert unicode.com == punycode.com + full := ToPunycode(tst[0] + ".com") + if full != tst[1]+".com" { + t.Errorf("invalid result from string conversion to punycode, %s and should be %s.com", full, tst[1]) + } + // assert punycode.punycode == unicode.unicode + decoded := FromPunycode(tst[1] + "." + tst[1]) + if decoded != strings.ToLower(tst[0]+"."+tst[0]) { + t.Errorf("invalid result from string conversion to punycode, %s and should be %s.%s", decoded, tst[0], tst[0]) + } + } +} + +func TestEncodeDecodeFinalPeriod(t *testing.T) { + for _, tst := range testcases { + // assert unicode.com. == punycode.com. + full := ToPunycode(tst[0] + ".") + if full != tst[1]+"." { + t.Errorf("invalid result from string conversion to punycode when period added at the end, %#v and should be %#v", full, tst[1]+".") + } + // assert punycode.com. == unicode.com. + decoded := FromPunycode(tst[1] + ".") + if decoded != strings.ToLower(tst[0]+".") { + t.Errorf("invalid result from string conversion to punycode when period added, %#v and should be %#v", decoded, tst[0]+".") + } + full = ToPunycode(tst[0]) + if full != tst[1] { + t.Errorf("invalid result from string conversion to punycode when no period added at the end, %#v and should be %#v", full, tst[1]+".") + } + // assert punycode.com. == unicode.com. + decoded = FromPunycode(tst[1]) + if decoded != strings.ToLower(tst[0]) { + t.Errorf("invalid result from string conversion to punycode when no period added, %#v and should be %#v", decoded, tst[0]+".") + } + } +} + +var invalidACEs = []string{ + "xn--*", + "xn--", + "xn---", + "xn--a000000000", +} + +func TestInvalidPunycode(t *testing.T) { + for _, d := range invalidACEs { + s := FromPunycode(d) + if s != d { + t.Errorf("Changed invalid name %s to %#v", d, s) + } + } +} + +// You can verify the labels that are valid or not comparing to the Verisign +// website: http://mct.verisign-grs.com/ +var invalidUnicodes = []string{ + "Σ", + "ЯZ", + "Испытание", +} + +func TestInvalidUnicodes(t *testing.T) { + for _, d := range invalidUnicodes { + s := ToPunycode(d) + if s != "" { + t.Errorf("Changed invalid name %s to %#v", d, s) + } + } +} diff --git a/vendor/github.com/miekg/dns/issue_test.go b/vendor/github.com/miekg/dns/issue_test.go new file mode 100644 index 0000000..265ad56 --- /dev/null +++ b/vendor/github.com/miekg/dns/issue_test.go @@ -0,0 +1,68 @@ +package dns + +// Tests that solve that an specific issue. + +import ( + "strings" + "testing" +) + +func TestTCPRtt(t *testing.T) { + m := new(Msg) + m.RecursionDesired = true + m.SetQuestion("example.org.", TypeA) + + c := &Client{} + for _, proto := range []string{"udp", "tcp"} { + c.Net = proto + _, rtt, err := c.Exchange(m, "8.8.4.4:53") + if err != nil { + t.Fatal(err) + } + if rtt == 0 { + t.Fatalf("expecting non zero rtt %s, got zero", c.Net) + } + } +} + +func TestNSEC3MissingSalt(t *testing.T) { + rr, err := NewRR("ji6neoaepv8b5o6k4ev33abha8ht9fgc.example. NSEC3 1 1 12 aabbccdd K8UDEMVP1J2F7EG6JEBPS17VP3N8I58H") + if err != nil { + t.Fatalf("failed to parse example rr: %s", err) + } + m := new(Msg) + m.Answer = []RR{rr} + mb, err := m.Pack() + if err != nil { + t.Fatalf("expected to pack message. err: %s", err) + } + if err := m.Unpack(mb); err != nil { + t.Fatalf("expected to unpack message. missing salt? err: %s", err) + } + in := rr.(*NSEC3).Salt + out := m.Answer[0].(*NSEC3).Salt + if in != out { + t.Fatalf("expected salts to match. packed: `%s`. returned: `%s`", in, out) + } +} + +func TestNSEC3MixedNextDomain(t *testing.T) { + rr, err := NewRR("ji6neoaepv8b5o6k4ev33abha8ht9fgc.example. NSEC3 1 1 12 - k8udemvp1j2f7eg6jebps17vp3n8i58h") + if err != nil { + t.Fatalf("failed to parse example rr: %s", err) + } + m := new(Msg) + m.Answer = []RR{rr} + mb, err := m.Pack() + if err != nil { + t.Fatalf("expected to pack message. err: %s", err) + } + if err := m.Unpack(mb); err != nil { + t.Fatalf("expected to unpack message. err: %s", err) + } + in := strings.ToUpper(rr.(*NSEC3).NextDomain) + out := m.Answer[0].(*NSEC3).NextDomain + if in != out { + t.Fatalf("expected round trip to produce NextDomain `%s`, instead `%s`", in, out) + } +} diff --git a/vendor/github.com/miekg/dns/labels.go b/vendor/github.com/miekg/dns/labels.go new file mode 100644 index 0000000..9538d9c --- /dev/null +++ b/vendor/github.com/miekg/dns/labels.go @@ -0,0 +1,171 @@ +package dns + +import "strings" + +// Holds a bunch of helper functions for dealing with labels. + +// SplitDomainName splits a name string into it's labels. +// www.miek.nl. returns []string{"www", "miek", "nl"} +// .www.miek.nl. returns []string{"", "www", "miek", "nl"}, +// The root label (.) returns nil. Note that using +// strings.Split(s) will work in most cases, but does not handle +// escaped dots (\.) for instance. +// s must be a syntactically valid domain name, see IsDomainName. +func SplitDomainName(s string) (labels []string) { + if len(s) == 0 { + return nil + } + fqdnEnd := 0 // offset of the final '.' or the length of the name + idx := Split(s) + begin := 0 + if s[len(s)-1] == '.' { + fqdnEnd = len(s) - 1 + } else { + fqdnEnd = len(s) + } + + switch len(idx) { + case 0: + return nil + case 1: + // no-op + default: + end := 0 + for i := 1; i < len(idx); i++ { + end = idx[i] + labels = append(labels, s[begin:end-1]) + begin = end + } + } + + labels = append(labels, s[begin:fqdnEnd]) + return labels +} + +// CompareDomainName compares the names s1 and s2 and +// returns how many labels they have in common starting from the *right*. +// The comparison stops at the first inequality. The names are not downcased +// before the comparison. +// +// www.miek.nl. and miek.nl. have two labels in common: miek and nl +// www.miek.nl. and www.bla.nl. have one label in common: nl +// +// s1 and s2 must be syntactically valid domain names. +func CompareDomainName(s1, s2 string) (n int) { + s1, s2 = strings.ToLower(s1), strings.ToLower(s2) + s1 = Fqdn(s1) + s2 = Fqdn(s2) + l1 := Split(s1) + l2 := Split(s2) + + // the first check: root label + if l1 == nil || l2 == nil { + return + } + + j1 := len(l1) - 1 // end + i1 := len(l1) - 2 // start + j2 := len(l2) - 1 + i2 := len(l2) - 2 + // the second check can be done here: last/only label + // before we fall through into the for-loop below + if s1[l1[j1]:] == s2[l2[j2]:] { + n++ + } else { + return + } + for { + if i1 < 0 || i2 < 0 { + break + } + if s1[l1[i1]:l1[j1]] == s2[l2[i2]:l2[j2]] { + n++ + } else { + break + } + j1-- + i1-- + j2-- + i2-- + } + return +} + +// CountLabel counts the the number of labels in the string s. +// s must be a syntactically valid domain name. +func CountLabel(s string) (labels int) { + if s == "." { + return + } + off := 0 + end := false + for { + off, end = NextLabel(s, off) + labels++ + if end { + return + } + } +} + +// Split splits a name s into its label indexes. +// www.miek.nl. returns []int{0, 4, 9}, www.miek.nl also returns []int{0, 4, 9}. +// The root name (.) returns nil. Also see SplitDomainName. +// s must be a syntactically valid domain name. +func Split(s string) []int { + if s == "." { + return nil + } + idx := make([]int, 1, 3) + off := 0 + end := false + + for { + off, end = NextLabel(s, off) + if end { + return idx + } + idx = append(idx, off) + } +} + +// NextLabel returns the index of the start of the next label in the +// string s starting at offset. +// The bool end is true when the end of the string has been reached. +// Also see PrevLabel. +func NextLabel(s string, offset int) (i int, end bool) { + quote := false + for i = offset; i < len(s)-1; i++ { + switch s[i] { + case '\\': + quote = !quote + default: + quote = false + case '.': + if quote { + quote = !quote + continue + } + return i + 1, false + } + } + return i + 1, true +} + +// PrevLabel returns the index of the label when starting from the right and +// jumping n labels to the left. +// The bool start is true when the start of the string has been overshot. +// Also see NextLabel. +func PrevLabel(s string, n int) (i int, start bool) { + if n == 0 { + return len(s), false + } + lab := Split(s) + if lab == nil { + return 0, true + } + if n > len(lab) { + return 0, true + } + return lab[len(lab)-n], false +} diff --git a/vendor/github.com/miekg/dns/labels_test.go b/vendor/github.com/miekg/dns/labels_test.go new file mode 100644 index 0000000..9875d6c --- /dev/null +++ b/vendor/github.com/miekg/dns/labels_test.go @@ -0,0 +1,203 @@ +package dns + +import "testing" + +func TestCompareDomainName(t *testing.T) { + s1 := "www.miek.nl." + s2 := "miek.nl." + s3 := "www.bla.nl." + s4 := "nl.www.bla." + s5 := "nl" + s6 := "miek.nl" + + if CompareDomainName(s1, s2) != 2 { + t.Errorf("%s with %s should be %d", s1, s2, 2) + } + if CompareDomainName(s1, s3) != 1 { + t.Errorf("%s with %s should be %d", s1, s3, 1) + } + if CompareDomainName(s3, s4) != 0 { + t.Errorf("%s with %s should be %d", s3, s4, 0) + } + // Non qualified tests + if CompareDomainName(s1, s5) != 1 { + t.Errorf("%s with %s should be %d", s1, s5, 1) + } + if CompareDomainName(s1, s6) != 2 { + t.Errorf("%s with %s should be %d", s1, s5, 2) + } + + if CompareDomainName(s1, ".") != 0 { + t.Errorf("%s with %s should be %d", s1, s5, 0) + } + if CompareDomainName(".", ".") != 0 { + t.Errorf("%s with %s should be %d", ".", ".", 0) + } + if CompareDomainName("test.com.", "TEST.COM.") != 2 { + t.Errorf("test.com. and TEST.COM. should be an exact match") + } +} + +func TestSplit(t *testing.T) { + splitter := map[string]int{ + "www.miek.nl.": 3, + "www.miek.nl": 3, + "www..miek.nl": 4, + `www\.miek.nl.`: 2, + `www\\.miek.nl.`: 3, + ".": 0, + "nl.": 1, + "nl": 1, + "com.": 1, + ".com.": 2, + } + for s, i := range splitter { + if x := len(Split(s)); x != i { + t.Errorf("labels should be %d, got %d: %s %v", i, x, s, Split(s)) + } else { + t.Logf("%s %v", s, Split(s)) + } + } +} + +func TestSplit2(t *testing.T) { + splitter := map[string][]int{ + "www.miek.nl.": {0, 4, 9}, + "www.miek.nl": {0, 4, 9}, + "nl": {0}, + } + for s, i := range splitter { + x := Split(s) + switch len(i) { + case 1: + if x[0] != i[0] { + t.Errorf("labels should be %v, got %v: %s", i, x, s) + } + default: + if x[0] != i[0] || x[1] != i[1] || x[2] != i[2] { + t.Errorf("labels should be %v, got %v: %s", i, x, s) + } + } + } +} + +func TestPrevLabel(t *testing.T) { + type prev struct { + string + int + } + prever := map[prev]int{ + prev{"www.miek.nl.", 0}: 12, + prev{"www.miek.nl.", 1}: 9, + prev{"www.miek.nl.", 2}: 4, + + prev{"www.miek.nl", 0}: 11, + prev{"www.miek.nl", 1}: 9, + prev{"www.miek.nl", 2}: 4, + + prev{"www.miek.nl.", 5}: 0, + prev{"www.miek.nl", 5}: 0, + + prev{"www.miek.nl.", 3}: 0, + prev{"www.miek.nl", 3}: 0, + } + for s, i := range prever { + x, ok := PrevLabel(s.string, s.int) + if i != x { + t.Errorf("label should be %d, got %d, %t: preving %d, %s", i, x, ok, s.int, s.string) + } + } +} + +func TestCountLabel(t *testing.T) { + splitter := map[string]int{ + "www.miek.nl.": 3, + "www.miek.nl": 3, + "nl": 1, + ".": 0, + } + for s, i := range splitter { + x := CountLabel(s) + if x != i { + t.Errorf("CountLabel should have %d, got %d", i, x) + } + } +} + +func TestSplitDomainName(t *testing.T) { + labels := map[string][]string{ + "miek.nl": {"miek", "nl"}, + ".": nil, + "www.miek.nl.": {"www", "miek", "nl"}, + "www.miek.nl": {"www", "miek", "nl"}, + "www..miek.nl": {"www", "", "miek", "nl"}, + `www\.miek.nl`: {`www\.miek`, "nl"}, + `www\\.miek.nl`: {`www\\`, "miek", "nl"}, + ".www.miek.nl.": {"", "www", "miek", "nl"}, + } +domainLoop: + for domain, splits := range labels { + parts := SplitDomainName(domain) + if len(parts) != len(splits) { + t.Errorf("SplitDomainName returned %v for %s, expected %v", parts, domain, splits) + continue domainLoop + } + for i := range parts { + if parts[i] != splits[i] { + t.Errorf("SplitDomainName returned %v for %s, expected %v", parts, domain, splits) + continue domainLoop + } + } + } +} + +func TestIsDomainName(t *testing.T) { + type ret struct { + ok bool + lab int + } + names := map[string]*ret{ + "..": {false, 1}, + "@.": {true, 1}, + "www.example.com": {true, 3}, + "www.e%ample.com": {true, 3}, + "www.example.com.": {true, 3}, + "mi\\k.nl.": {true, 2}, + "mi\\k.nl": {true, 2}, + } + for d, ok := range names { + l, k := IsDomainName(d) + if ok.ok != k || ok.lab != l { + t.Errorf(" got %v %d for %s ", k, l, d) + t.Errorf("have %v %d for %s ", ok.ok, ok.lab, d) + } + } +} + +func BenchmarkSplitLabels(b *testing.B) { + for i := 0; i < b.N; i++ { + Split("www.example.com") + } +} + +func BenchmarkLenLabels(b *testing.B) { + for i := 0; i < b.N; i++ { + CountLabel("www.example.com") + } +} + +func BenchmarkCompareLabels(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + CompareDomainName("www.example.com", "aa.example.com") + } +} + +func BenchmarkIsSubDomain(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + IsSubDomain("www.example.com", "aa.example.com") + IsSubDomain("example.com", "aa.example.com") + IsSubDomain("miek.nl", "aa.example.com") + } +} diff --git a/vendor/github.com/miekg/dns/msg.go b/vendor/github.com/miekg/dns/msg.go new file mode 100644 index 0000000..605fe6c --- /dev/null +++ b/vendor/github.com/miekg/dns/msg.go @@ -0,0 +1,1159 @@ +// DNS packet assembly, see RFC 1035. Converting from - Unpack() - +// and to - Pack() - wire format. +// All the packers and unpackers take a (msg []byte, off int) +// and return (off1 int, ok bool). If they return ok==false, they +// also return off1==len(msg), so that the next unpacker will +// also fail. This lets us avoid checks of ok until the end of a +// packing sequence. + +package dns + +//go:generate go run msg_generate.go +//go:generate go run compress_generate.go + +import ( + crand "crypto/rand" + "encoding/binary" + "fmt" + "math/big" + "math/rand" + "strconv" + "sync" +) + +const ( + maxCompressionOffset = 2 << 13 // We have 14 bits for the compression pointer + maxDomainNameWireOctets = 255 // See RFC 1035 section 2.3.4 +) + +var ( + ErrAlg error = &Error{err: "bad algorithm"} // ErrAlg indicates an error with the (DNSSEC) algorithm. + ErrAuth error = &Error{err: "bad authentication"} // ErrAuth indicates an error in the TSIG authentication. + ErrBuf error = &Error{err: "buffer size too small"} // ErrBuf indicates that the buffer used is too small for the message. + ErrConnEmpty error = &Error{err: "conn has no connection"} // ErrConnEmpty indicates a connection is being used before it is initialized. + ErrExtendedRcode error = &Error{err: "bad extended rcode"} // ErrExtendedRcode ... + ErrFqdn error = &Error{err: "domain must be fully qualified"} // ErrFqdn indicates that a domain name does not have a closing dot. + ErrId error = &Error{err: "id mismatch"} // ErrId indicates there is a mismatch with the message's ID. + ErrKeyAlg error = &Error{err: "bad key algorithm"} // ErrKeyAlg indicates that the algorithm in the key is not valid. + ErrKey error = &Error{err: "bad key"} + ErrKeySize error = &Error{err: "bad key size"} + ErrLongDomain error = &Error{err: fmt.Sprintf("domain name exceeded %d wire-format octets", maxDomainNameWireOctets)} + ErrNoSig error = &Error{err: "no signature found"} + ErrPrivKey error = &Error{err: "bad private key"} + ErrRcode error = &Error{err: "bad rcode"} + ErrRdata error = &Error{err: "bad rdata"} + ErrRRset error = &Error{err: "bad rrset"} + ErrSecret error = &Error{err: "no secrets defined"} + ErrShortRead error = &Error{err: "short read"} + ErrSig error = &Error{err: "bad signature"} // ErrSig indicates that a signature can not be cryptographically validated. + ErrSoa error = &Error{err: "no SOA"} // ErrSOA indicates that no SOA RR was seen when doing zone transfers. + ErrTime error = &Error{err: "bad time"} // ErrTime indicates a timing error in TSIG authentication. + ErrTruncated error = &Error{err: "failed to unpack truncated message"} // ErrTruncated indicates that we failed to unpack a truncated message. We unpacked as much as we had so Msg can still be used, if desired. +) + +// Id by default, returns a 16 bits random number to be used as a +// message id. The random provided should be good enough. This being a +// variable the function can be reassigned to a custom function. +// For instance, to make it return a static value: +// +// dns.Id = func() uint16 { return 3 } +var Id func() uint16 = id + +var ( + idLock sync.Mutex + idRand *rand.Rand +) + +// id returns a 16 bits random number to be used as a +// message id. The random provided should be good enough. +func id() uint16 { + idLock.Lock() + + if idRand == nil { + // This (partially) works around + // https://github.com/golang/go/issues/11833 by only + // seeding idRand upon the first call to id. + + var seed int64 + var buf [8]byte + + if _, err := crand.Read(buf[:]); err == nil { + seed = int64(binary.LittleEndian.Uint64(buf[:])) + } else { + seed = rand.Int63() + } + + idRand = rand.New(rand.NewSource(seed)) + } + + // The call to idRand.Uint32 must be within the + // mutex lock because *rand.Rand is not safe for + // concurrent use. + // + // There is no added performance overhead to calling + // idRand.Uint32 inside a mutex lock over just + // calling rand.Uint32 as the global math/rand rng + // is internally protected by a sync.Mutex. + id := uint16(idRand.Uint32()) + + idLock.Unlock() + return id +} + +// MsgHdr is a a manually-unpacked version of (id, bits). +type MsgHdr struct { + Id uint16 + Response bool + Opcode int + Authoritative bool + Truncated bool + RecursionDesired bool + RecursionAvailable bool + Zero bool + AuthenticatedData bool + CheckingDisabled bool + Rcode int +} + +// Msg contains the layout of a DNS message. +type Msg struct { + MsgHdr + Compress bool `json:"-"` // If true, the message will be compressed when converted to wire format. + Question []Question // Holds the RR(s) of the question section. + Answer []RR // Holds the RR(s) of the answer section. + Ns []RR // Holds the RR(s) of the authority section. + Extra []RR // Holds the RR(s) of the additional section. +} + +// ClassToString is a maps Classes to strings for each CLASS wire type. +var ClassToString = map[uint16]string{ + ClassINET: "IN", + ClassCSNET: "CS", + ClassCHAOS: "CH", + ClassHESIOD: "HS", + ClassNONE: "NONE", + ClassANY: "ANY", +} + +// OpcodeToString maps Opcodes to strings. +var OpcodeToString = map[int]string{ + OpcodeQuery: "QUERY", + OpcodeIQuery: "IQUERY", + OpcodeStatus: "STATUS", + OpcodeNotify: "NOTIFY", + OpcodeUpdate: "UPDATE", +} + +// RcodeToString maps Rcodes to strings. +var RcodeToString = map[int]string{ + RcodeSuccess: "NOERROR", + RcodeFormatError: "FORMERR", + RcodeServerFailure: "SERVFAIL", + RcodeNameError: "NXDOMAIN", + RcodeNotImplemented: "NOTIMPL", + RcodeRefused: "REFUSED", + RcodeYXDomain: "YXDOMAIN", // See RFC 2136 + RcodeYXRrset: "YXRRSET", + RcodeNXRrset: "NXRRSET", + RcodeNotAuth: "NOTAUTH", + RcodeNotZone: "NOTZONE", + RcodeBadSig: "BADSIG", // Also known as RcodeBadVers, see RFC 6891 + // RcodeBadVers: "BADVERS", + RcodeBadKey: "BADKEY", + RcodeBadTime: "BADTIME", + RcodeBadMode: "BADMODE", + RcodeBadName: "BADNAME", + RcodeBadAlg: "BADALG", + RcodeBadTrunc: "BADTRUNC", + RcodeBadCookie: "BADCOOKIE", +} + +// Domain names are a sequence of counted strings +// split at the dots. They end with a zero-length string. + +// PackDomainName packs a domain name s into msg[off:]. +// If compression is wanted compress must be true and the compression +// map needs to hold a mapping between domain names and offsets +// pointing into msg. +func PackDomainName(s string, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) { + off1, _, err = packDomainName(s, msg, off, compression, compress) + return +} + +func packDomainName(s string, msg []byte, off int, compression map[string]int, compress bool) (off1 int, labels int, err error) { + // special case if msg == nil + lenmsg := 256 + if msg != nil { + lenmsg = len(msg) + } + ls := len(s) + if ls == 0 { // Ok, for instance when dealing with update RR without any rdata. + return off, 0, nil + } + // If not fully qualified, error out, but only if msg == nil #ugly + switch { + case msg == nil: + if s[ls-1] != '.' { + s += "." + ls++ + } + case msg != nil: + if s[ls-1] != '.' { + return lenmsg, 0, ErrFqdn + } + } + // Each dot ends a segment of the name. + // We trade each dot byte for a length byte. + // Except for escaped dots (\.), which are normal dots. + // There is also a trailing zero. + + // Compression + nameoffset := -1 + pointer := -1 + // Emit sequence of counted strings, chopping at dots. + begin := 0 + bs := []byte(s) + roBs, bsFresh, escapedDot := s, true, false + for i := 0; i < ls; i++ { + if bs[i] == '\\' { + for j := i; j < ls-1; j++ { + bs[j] = bs[j+1] + } + ls-- + if off+1 > lenmsg { + return lenmsg, labels, ErrBuf + } + // check for \DDD + if i+2 < ls && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) { + bs[i] = dddToByte(bs[i:]) + for j := i + 1; j < ls-2; j++ { + bs[j] = bs[j+2] + } + ls -= 2 + } + escapedDot = bs[i] == '.' + bsFresh = false + continue + } + + if bs[i] == '.' { + if i > 0 && bs[i-1] == '.' && !escapedDot { + // two dots back to back is not legal + return lenmsg, labels, ErrRdata + } + if i-begin >= 1<<6 { // top two bits of length must be clear + return lenmsg, labels, ErrRdata + } + // off can already (we're in a loop) be bigger than len(msg) + // this happens when a name isn't fully qualified + if off+1 > lenmsg { + return lenmsg, labels, ErrBuf + } + if msg != nil { + msg[off] = byte(i - begin) + } + offset := off + off++ + for j := begin; j < i; j++ { + if off+1 > lenmsg { + return lenmsg, labels, ErrBuf + } + if msg != nil { + msg[off] = bs[j] + } + off++ + } + if compress && !bsFresh { + roBs = string(bs) + bsFresh = true + } + // Don't try to compress '.' + // We should only compress when compress it true, but we should also still pick + // up names that can be used for *future* compression(s). + if compression != nil && roBs[begin:] != "." { + if p, ok := compression[roBs[begin:]]; !ok { + // Only offsets smaller than this can be used. + if offset < maxCompressionOffset { + compression[roBs[begin:]] = offset + } + } else { + // The first hit is the longest matching dname + // keep the pointer offset we get back and store + // the offset of the current name, because that's + // where we need to insert the pointer later + + // If compress is true, we're allowed to compress this dname + if pointer == -1 && compress { + pointer = p // Where to point to + nameoffset = offset // Where to point from + break + } + } + } + labels++ + begin = i + 1 + } + escapedDot = false + } + // Root label is special + if len(bs) == 1 && bs[0] == '.' { + return off, labels, nil + } + // If we did compression and we find something add the pointer here + if pointer != -1 { + // We have two bytes (14 bits) to put the pointer in + // if msg == nil, we will never do compression + binary.BigEndian.PutUint16(msg[nameoffset:], uint16(pointer^0xC000)) + off = nameoffset + 1 + goto End + } + if msg != nil && off < len(msg) { + msg[off] = 0 + } +End: + off++ + return off, labels, nil +} + +// Unpack a domain name. +// In addition to the simple sequences of counted strings above, +// domain names are allowed to refer to strings elsewhere in the +// packet, to avoid repeating common suffixes when returning +// many entries in a single domain. The pointers are marked +// by a length byte with the top two bits set. Ignoring those +// two bits, that byte and the next give a 14 bit offset from msg[0] +// where we should pick up the trail. +// Note that if we jump elsewhere in the packet, +// we return off1 == the offset after the first pointer we found, +// which is where the next record will start. +// In theory, the pointers are only allowed to jump backward. +// We let them jump anywhere and stop jumping after a while. + +// UnpackDomainName unpacks a domain name into a string. +func UnpackDomainName(msg []byte, off int) (string, int, error) { + s := make([]byte, 0, 64) + off1 := 0 + lenmsg := len(msg) + maxLen := maxDomainNameWireOctets + ptr := 0 // number of pointers followed +Loop: + for { + if off >= lenmsg { + return "", lenmsg, ErrBuf + } + c := int(msg[off]) + off++ + switch c & 0xC0 { + case 0x00: + if c == 0x00 { + // end of name + break Loop + } + // literal string + if off+c > lenmsg { + return "", lenmsg, ErrBuf + } + for j := off; j < off+c; j++ { + switch b := msg[j]; b { + case '.', '(', ')', ';', ' ', '@': + fallthrough + case '"', '\\': + s = append(s, '\\', b) + // presentation-format \X escapes add an extra byte + maxLen += 1 + default: + if b < 32 || b >= 127 { // unprintable, use \DDD + var buf [3]byte + bufs := strconv.AppendInt(buf[:0], int64(b), 10) + s = append(s, '\\') + for i := 0; i < 3-len(bufs); i++ { + s = append(s, '0') + } + for _, r := range bufs { + s = append(s, r) + } + // presentation-format \DDD escapes add 3 extra bytes + maxLen += 3 + } else { + s = append(s, b) + } + } + } + s = append(s, '.') + off += c + case 0xC0: + // pointer to somewhere else in msg. + // remember location after first ptr, + // since that's how many bytes we consumed. + // also, don't follow too many pointers -- + // maybe there's a loop. + if off >= lenmsg { + return "", lenmsg, ErrBuf + } + c1 := msg[off] + off++ + if ptr == 0 { + off1 = off + } + if ptr++; ptr > 10 { + return "", lenmsg, &Error{err: "too many compression pointers"} + } + // pointer should guarantee that it advances and points forwards at least + // but the condition on previous three lines guarantees that it's + // at least loop-free + off = (c^0xC0)<<8 | int(c1) + default: + // 0x80 and 0x40 are reserved + return "", lenmsg, ErrRdata + } + } + if ptr == 0 { + off1 = off + } + if len(s) == 0 { + s = []byte(".") + } else if len(s) >= maxLen { + // error if the name is too long, but don't throw it away + return string(s), lenmsg, ErrLongDomain + } + return string(s), off1, nil +} + +func packTxt(txt []string, msg []byte, offset int, tmp []byte) (int, error) { + if len(txt) == 0 { + if offset >= len(msg) { + return offset, ErrBuf + } + msg[offset] = 0 + return offset, nil + } + var err error + for i := range txt { + if len(txt[i]) > len(tmp) { + return offset, ErrBuf + } + offset, err = packTxtString(txt[i], msg, offset, tmp) + if err != nil { + return offset, err + } + } + return offset, nil +} + +func packTxtString(s string, msg []byte, offset int, tmp []byte) (int, error) { + lenByteOffset := offset + if offset >= len(msg) || len(s) > len(tmp) { + return offset, ErrBuf + } + offset++ + bs := tmp[:len(s)] + copy(bs, s) + for i := 0; i < len(bs); i++ { + if len(msg) <= offset { + return offset, ErrBuf + } + if bs[i] == '\\' { + i++ + if i == len(bs) { + break + } + // check for \DDD + if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) { + msg[offset] = dddToByte(bs[i:]) + i += 2 + } else { + msg[offset] = bs[i] + } + } else { + msg[offset] = bs[i] + } + offset++ + } + l := offset - lenByteOffset - 1 + if l > 255 { + return offset, &Error{err: "string exceeded 255 bytes in txt"} + } + msg[lenByteOffset] = byte(l) + return offset, nil +} + +func packOctetString(s string, msg []byte, offset int, tmp []byte) (int, error) { + if offset >= len(msg) || len(s) > len(tmp) { + return offset, ErrBuf + } + bs := tmp[:len(s)] + copy(bs, s) + for i := 0; i < len(bs); i++ { + if len(msg) <= offset { + return offset, ErrBuf + } + if bs[i] == '\\' { + i++ + if i == len(bs) { + break + } + // check for \DDD + if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) { + msg[offset] = dddToByte(bs[i:]) + i += 2 + } else { + msg[offset] = bs[i] + } + } else { + msg[offset] = bs[i] + } + offset++ + } + return offset, nil +} + +func unpackTxt(msg []byte, off0 int) (ss []string, off int, err error) { + off = off0 + var s string + for off < len(msg) && err == nil { + s, off, err = unpackTxtString(msg, off) + if err == nil { + ss = append(ss, s) + } + } + return +} + +func unpackTxtString(msg []byte, offset int) (string, int, error) { + if offset+1 > len(msg) { + return "", offset, &Error{err: "overflow unpacking txt"} + } + l := int(msg[offset]) + if offset+l+1 > len(msg) { + return "", offset, &Error{err: "overflow unpacking txt"} + } + s := make([]byte, 0, l) + for _, b := range msg[offset+1 : offset+1+l] { + switch b { + case '"', '\\': + s = append(s, '\\', b) + default: + if b < 32 || b > 127 { // unprintable + var buf [3]byte + bufs := strconv.AppendInt(buf[:0], int64(b), 10) + s = append(s, '\\') + for i := 0; i < 3-len(bufs); i++ { + s = append(s, '0') + } + for _, r := range bufs { + s = append(s, r) + } + } else { + s = append(s, b) + } + } + } + offset += 1 + l + return string(s), offset, nil +} + +// Helpers for dealing with escaped bytes +func isDigit(b byte) bool { return b >= '0' && b <= '9' } + +func dddToByte(s []byte) byte { + return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0')) +} + +// Helper function for packing and unpacking +func intToBytes(i *big.Int, length int) []byte { + buf := i.Bytes() + if len(buf) < length { + b := make([]byte, length) + copy(b[length-len(buf):], buf) + return b + } + return buf +} + +// PackRR packs a resource record rr into msg[off:]. +// See PackDomainName for documentation about the compression. +func PackRR(rr RR, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) { + if rr == nil { + return len(msg), &Error{err: "nil rr"} + } + + off1, err = rr.pack(msg, off, compression, compress) + if err != nil { + return len(msg), err + } + // TODO(miek): Not sure if this is needed? If removed we can remove rawmsg.go as well. + if rawSetRdlength(msg, off, off1) { + return off1, nil + } + return off, ErrRdata +} + +// UnpackRR unpacks msg[off:] into an RR. +func UnpackRR(msg []byte, off int) (rr RR, off1 int, err error) { + h, off, msg, err := unpackHeader(msg, off) + if err != nil { + return nil, len(msg), err + } + end := off + int(h.Rdlength) + + if fn, known := typeToUnpack[h.Rrtype]; !known { + rr, off, err = unpackRFC3597(h, msg, off) + } else { + rr, off, err = fn(h, msg, off) + } + if off != end { + return &h, end, &Error{err: "bad rdlength"} + } + return rr, off, err +} + +// unpackRRslice unpacks msg[off:] into an []RR. +// If we cannot unpack the whole array, then it will return nil +func unpackRRslice(l int, msg []byte, off int) (dst1 []RR, off1 int, err error) { + var r RR + // Optimistically make dst be the length that was sent + dst := make([]RR, 0, l) + for i := 0; i < l; i++ { + off1 := off + r, off, err = UnpackRR(msg, off) + if err != nil { + off = len(msg) + break + } + // If offset does not increase anymore, l is a lie + if off1 == off { + l = i + break + } + dst = append(dst, r) + } + if err != nil && off == len(msg) { + dst = nil + } + return dst, off, err +} + +// Convert a MsgHdr to a string, with dig-like headers: +// +//;; opcode: QUERY, status: NOERROR, id: 48404 +// +//;; flags: qr aa rd ra; +func (h *MsgHdr) String() string { + if h == nil { + return "<nil> MsgHdr" + } + + s := ";; opcode: " + OpcodeToString[h.Opcode] + s += ", status: " + RcodeToString[h.Rcode] + s += ", id: " + strconv.Itoa(int(h.Id)) + "\n" + + s += ";; flags:" + if h.Response { + s += " qr" + } + if h.Authoritative { + s += " aa" + } + if h.Truncated { + s += " tc" + } + if h.RecursionDesired { + s += " rd" + } + if h.RecursionAvailable { + s += " ra" + } + if h.Zero { // Hmm + s += " z" + } + if h.AuthenticatedData { + s += " ad" + } + if h.CheckingDisabled { + s += " cd" + } + + s += ";" + return s +} + +// Pack packs a Msg: it is converted to to wire format. +// If the dns.Compress is true the message will be in compressed wire format. +func (dns *Msg) Pack() (msg []byte, err error) { + return dns.PackBuffer(nil) +} + +// PackBuffer packs a Msg, using the given buffer buf. If buf is too small +// a new buffer is allocated. +func (dns *Msg) PackBuffer(buf []byte) (msg []byte, err error) { + // We use a similar function in tsig.go's stripTsig. + var ( + dh Header + compression map[string]int + ) + + if dns.Compress { + compression = make(map[string]int) // Compression pointer mappings + } + + if dns.Rcode < 0 || dns.Rcode > 0xFFF { + return nil, ErrRcode + } + if dns.Rcode > 0xF { + // Regular RCODE field is 4 bits + opt := dns.IsEdns0() + if opt == nil { + return nil, ErrExtendedRcode + } + opt.SetExtendedRcode(uint8(dns.Rcode >> 4)) + dns.Rcode &= 0xF + } + + // Convert convenient Msg into wire-like Header. + dh.Id = dns.Id + dh.Bits = uint16(dns.Opcode)<<11 | uint16(dns.Rcode) + if dns.Response { + dh.Bits |= _QR + } + if dns.Authoritative { + dh.Bits |= _AA + } + if dns.Truncated { + dh.Bits |= _TC + } + if dns.RecursionDesired { + dh.Bits |= _RD + } + if dns.RecursionAvailable { + dh.Bits |= _RA + } + if dns.Zero { + dh.Bits |= _Z + } + if dns.AuthenticatedData { + dh.Bits |= _AD + } + if dns.CheckingDisabled { + dh.Bits |= _CD + } + + // Prepare variable sized arrays. + question := dns.Question + answer := dns.Answer + ns := dns.Ns + extra := dns.Extra + + dh.Qdcount = uint16(len(question)) + dh.Ancount = uint16(len(answer)) + dh.Nscount = uint16(len(ns)) + dh.Arcount = uint16(len(extra)) + + // We need the uncompressed length here, because we first pack it and then compress it. + msg = buf + uncompressedLen := compressedLen(dns, false) + if packLen := uncompressedLen + 1; len(msg) < packLen { + msg = make([]byte, packLen) + } + + // Pack it in: header and then the pieces. + off := 0 + off, err = dh.pack(msg, off, compression, dns.Compress) + if err != nil { + return nil, err + } + for i := 0; i < len(question); i++ { + off, err = question[i].pack(msg, off, compression, dns.Compress) + if err != nil { + return nil, err + } + } + for i := 0; i < len(answer); i++ { + off, err = PackRR(answer[i], msg, off, compression, dns.Compress) + if err != nil { + return nil, err + } + } + for i := 0; i < len(ns); i++ { + off, err = PackRR(ns[i], msg, off, compression, dns.Compress) + if err != nil { + return nil, err + } + } + for i := 0; i < len(extra); i++ { + off, err = PackRR(extra[i], msg, off, compression, dns.Compress) + if err != nil { + return nil, err + } + } + return msg[:off], nil +} + +// Unpack unpacks a binary message to a Msg structure. +func (dns *Msg) Unpack(msg []byte) (err error) { + var ( + dh Header + off int + ) + if dh, off, err = unpackMsgHdr(msg, off); err != nil { + return err + } + + dns.Id = dh.Id + dns.Response = (dh.Bits & _QR) != 0 + dns.Opcode = int(dh.Bits>>11) & 0xF + dns.Authoritative = (dh.Bits & _AA) != 0 + dns.Truncated = (dh.Bits & _TC) != 0 + dns.RecursionDesired = (dh.Bits & _RD) != 0 + dns.RecursionAvailable = (dh.Bits & _RA) != 0 + dns.Zero = (dh.Bits & _Z) != 0 + dns.AuthenticatedData = (dh.Bits & _AD) != 0 + dns.CheckingDisabled = (dh.Bits & _CD) != 0 + dns.Rcode = int(dh.Bits & 0xF) + + if off == len(msg) { + return ErrTruncated + } + + // Optimistically use the count given to us in the header + dns.Question = make([]Question, 0, int(dh.Qdcount)) + + for i := 0; i < int(dh.Qdcount); i++ { + off1 := off + var q Question + q, off, err = unpackQuestion(msg, off) + if err != nil { + // Even if Truncated is set, we only will set ErrTruncated if we + // actually got the questions + return err + } + if off1 == off { // Offset does not increase anymore, dh.Qdcount is a lie! + dh.Qdcount = uint16(i) + break + } + dns.Question = append(dns.Question, q) + } + + dns.Answer, off, err = unpackRRslice(int(dh.Ancount), msg, off) + // The header counts might have been wrong so we need to update it + dh.Ancount = uint16(len(dns.Answer)) + if err == nil { + dns.Ns, off, err = unpackRRslice(int(dh.Nscount), msg, off) + } + // The header counts might have been wrong so we need to update it + dh.Nscount = uint16(len(dns.Ns)) + if err == nil { + dns.Extra, off, err = unpackRRslice(int(dh.Arcount), msg, off) + } + // The header counts might have been wrong so we need to update it + dh.Arcount = uint16(len(dns.Extra)) + + if off != len(msg) { + // TODO(miek) make this an error? + // use PackOpt to let people tell how detailed the error reporting should be? + // println("dns: extra bytes in dns packet", off, "<", len(msg)) + } else if dns.Truncated { + // Whether we ran into a an error or not, we want to return that it + // was truncated + err = ErrTruncated + } + return err +} + +// Convert a complete message to a string with dig-like output. +func (dns *Msg) String() string { + if dns == nil { + return "<nil> MsgHdr" + } + s := dns.MsgHdr.String() + " " + s += "QUERY: " + strconv.Itoa(len(dns.Question)) + ", " + s += "ANSWER: " + strconv.Itoa(len(dns.Answer)) + ", " + s += "AUTHORITY: " + strconv.Itoa(len(dns.Ns)) + ", " + s += "ADDITIONAL: " + strconv.Itoa(len(dns.Extra)) + "\n" + if len(dns.Question) > 0 { + s += "\n;; QUESTION SECTION:\n" + for i := 0; i < len(dns.Question); i++ { + s += dns.Question[i].String() + "\n" + } + } + if len(dns.Answer) > 0 { + s += "\n;; ANSWER SECTION:\n" + for i := 0; i < len(dns.Answer); i++ { + if dns.Answer[i] != nil { + s += dns.Answer[i].String() + "\n" + } + } + } + if len(dns.Ns) > 0 { + s += "\n;; AUTHORITY SECTION:\n" + for i := 0; i < len(dns.Ns); i++ { + if dns.Ns[i] != nil { + s += dns.Ns[i].String() + "\n" + } + } + } + if len(dns.Extra) > 0 { + s += "\n;; ADDITIONAL SECTION:\n" + for i := 0; i < len(dns.Extra); i++ { + if dns.Extra[i] != nil { + s += dns.Extra[i].String() + "\n" + } + } + } + return s +} + +// Len returns the message length when in (un)compressed wire format. +// If dns.Compress is true compression it is taken into account. Len() +// is provided to be a faster way to get the size of the resulting packet, +// than packing it, measuring the size and discarding the buffer. +func (dns *Msg) Len() int { return compressedLen(dns, dns.Compress) } + +// compressedLen returns the message length when in compressed wire format +// when compress is true, otherwise the uncompressed length is returned. +func compressedLen(dns *Msg, compress bool) int { + // We always return one more than needed. + l := 12 // Message header is always 12 bytes + compression := map[string]int{} + + for i := 0; i < len(dns.Question); i++ { + l += dns.Question[i].len() + if compress { + compressionLenHelper(compression, dns.Question[i].Name) + } + } + for i := 0; i < len(dns.Answer); i++ { + if dns.Answer[i] == nil { + continue + } + l += dns.Answer[i].len() + if compress { + k, ok := compressionLenSearch(compression, dns.Answer[i].Header().Name) + if ok { + l += 1 - k + } + compressionLenHelper(compression, dns.Answer[i].Header().Name) + k, ok = compressionLenSearchType(compression, dns.Answer[i]) + if ok { + l += 1 - k + } + compressionLenHelperType(compression, dns.Answer[i]) + } + } + for i := 0; i < len(dns.Ns); i++ { + if dns.Ns[i] == nil { + continue + } + l += dns.Ns[i].len() + if compress { + k, ok := compressionLenSearch(compression, dns.Ns[i].Header().Name) + if ok { + l += 1 - k + } + compressionLenHelper(compression, dns.Ns[i].Header().Name) + k, ok = compressionLenSearchType(compression, dns.Ns[i]) + if ok { + l += 1 - k + } + compressionLenHelperType(compression, dns.Ns[i]) + } + } + for i := 0; i < len(dns.Extra); i++ { + if dns.Extra[i] == nil { + continue + } + l += dns.Extra[i].len() + if compress { + k, ok := compressionLenSearch(compression, dns.Extra[i].Header().Name) + if ok { + l += 1 - k + } + compressionLenHelper(compression, dns.Extra[i].Header().Name) + k, ok = compressionLenSearchType(compression, dns.Extra[i]) + if ok { + l += 1 - k + } + compressionLenHelperType(compression, dns.Extra[i]) + } + } + return l +} + +// Put the parts of the name in the compression map. +func compressionLenHelper(c map[string]int, s string) { + pref := "" + lbs := Split(s) + for j := len(lbs) - 1; j >= 0; j-- { + pref = s[lbs[j]:] + if _, ok := c[pref]; !ok { + c[pref] = len(pref) + } + } +} + +// Look for each part in the compression map and returns its length, +// keep on searching so we get the longest match. +func compressionLenSearch(c map[string]int, s string) (int, bool) { + off := 0 + end := false + if s == "" { // don't bork on bogus data + return 0, false + } + for { + if _, ok := c[s[off:]]; ok { + return len(s[off:]), true + } + if end { + break + } + off, end = NextLabel(s, off) + } + return 0, false +} + +// Copy returns a new RR which is a deep-copy of r. +func Copy(r RR) RR { r1 := r.copy(); return r1 } + +// Len returns the length (in octets) of the uncompressed RR in wire format. +func Len(r RR) int { return r.len() } + +// Copy returns a new *Msg which is a deep-copy of dns. +func (dns *Msg) Copy() *Msg { return dns.CopyTo(new(Msg)) } + +// CopyTo copies the contents to the provided message using a deep-copy and returns the copy. +func (dns *Msg) CopyTo(r1 *Msg) *Msg { + r1.MsgHdr = dns.MsgHdr + r1.Compress = dns.Compress + + if len(dns.Question) > 0 { + r1.Question = make([]Question, len(dns.Question)) + copy(r1.Question, dns.Question) // TODO(miek): Question is an immutable value, ok to do a shallow-copy + } + + rrArr := make([]RR, len(dns.Answer)+len(dns.Ns)+len(dns.Extra)) + var rri int + + if len(dns.Answer) > 0 { + rrbegin := rri + for i := 0; i < len(dns.Answer); i++ { + rrArr[rri] = dns.Answer[i].copy() + rri++ + } + r1.Answer = rrArr[rrbegin:rri:rri] + } + + if len(dns.Ns) > 0 { + rrbegin := rri + for i := 0; i < len(dns.Ns); i++ { + rrArr[rri] = dns.Ns[i].copy() + rri++ + } + r1.Ns = rrArr[rrbegin:rri:rri] + } + + if len(dns.Extra) > 0 { + rrbegin := rri + for i := 0; i < len(dns.Extra); i++ { + rrArr[rri] = dns.Extra[i].copy() + rri++ + } + r1.Extra = rrArr[rrbegin:rri:rri] + } + + return r1 +} + +func (q *Question) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := PackDomainName(q.Name, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = packUint16(q.Qtype, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(q.Qclass, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func unpackQuestion(msg []byte, off int) (Question, int, error) { + var ( + q Question + err error + ) + q.Name, off, err = UnpackDomainName(msg, off) + if err != nil { + return q, off, err + } + if off == len(msg) { + return q, off, nil + } + q.Qtype, off, err = unpackUint16(msg, off) + if err != nil { + return q, off, err + } + if off == len(msg) { + return q, off, nil + } + q.Qclass, off, err = unpackUint16(msg, off) + if off == len(msg) { + return q, off, nil + } + return q, off, err +} + +func (dh *Header) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := packUint16(dh.Id, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(dh.Bits, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(dh.Qdcount, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(dh.Ancount, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(dh.Nscount, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(dh.Arcount, msg, off) + return off, err +} + +func unpackMsgHdr(msg []byte, off int) (Header, int, error) { + var ( + dh Header + err error + ) + dh.Id, off, err = unpackUint16(msg, off) + if err != nil { + return dh, off, err + } + dh.Bits, off, err = unpackUint16(msg, off) + if err != nil { + return dh, off, err + } + dh.Qdcount, off, err = unpackUint16(msg, off) + if err != nil { + return dh, off, err + } + dh.Ancount, off, err = unpackUint16(msg, off) + if err != nil { + return dh, off, err + } + dh.Nscount, off, err = unpackUint16(msg, off) + if err != nil { + return dh, off, err + } + dh.Arcount, off, err = unpackUint16(msg, off) + return dh, off, err +} diff --git a/vendor/github.com/miekg/dns/msg_generate.go b/vendor/github.com/miekg/dns/msg_generate.go new file mode 100644 index 0000000..4d9f81d --- /dev/null +++ b/vendor/github.com/miekg/dns/msg_generate.go @@ -0,0 +1,349 @@ +//+build ignore + +// msg_generate.go is meant to run with go generate. It will use +// go/{importer,types} to track down all the RR struct types. Then for each type +// it will generate pack/unpack methods based on the struct tags. The generated source is +// written to zmsg.go, and is meant to be checked into git. +package main + +import ( + "bytes" + "fmt" + "go/format" + "go/importer" + "go/types" + "log" + "os" + "strings" +) + +var packageHdr = ` +// *** DO NOT MODIFY *** +// AUTOGENERATED BY go generate from msg_generate.go + +package dns + +` + +// getTypeStruct will take a type and the package scope, and return the +// (innermost) struct if the type is considered a RR type (currently defined as +// those structs beginning with a RR_Header, could be redefined as implementing +// the RR interface). The bool return value indicates if embedded structs were +// resolved. +func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) { + st, ok := t.Underlying().(*types.Struct) + if !ok { + return nil, false + } + if st.Field(0).Type() == scope.Lookup("RR_Header").Type() { + return st, false + } + if st.Field(0).Anonymous() { + st, _ := getTypeStruct(st.Field(0).Type(), scope) + return st, true + } + return nil, false +} + +func main() { + // Import and type-check the package + pkg, err := importer.Default().Import("github.com/miekg/dns") + fatalIfErr(err) + scope := pkg.Scope() + + // Collect actual types (*X) + var namedTypes []string + for _, name := range scope.Names() { + o := scope.Lookup(name) + if o == nil || !o.Exported() { + continue + } + if st, _ := getTypeStruct(o.Type(), scope); st == nil { + continue + } + if name == "PrivateRR" { + continue + } + + // Check if corresponding TypeX exists + if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" { + log.Fatalf("Constant Type%s does not exist.", o.Name()) + } + + namedTypes = append(namedTypes, o.Name()) + } + + b := &bytes.Buffer{} + b.WriteString(packageHdr) + + fmt.Fprint(b, "// pack*() functions\n\n") + for _, name := range namedTypes { + o := scope.Lookup(name) + st, _ := getTypeStruct(o.Type(), scope) + + fmt.Fprintf(b, "func (rr *%s) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {\n", name) + fmt.Fprint(b, `off, err := rr.Hdr.pack(msg, off, compression, compress) +if err != nil { + return off, err +} +headerEnd := off +`) + for i := 1; i < st.NumFields(); i++ { + o := func(s string) { + fmt.Fprintf(b, s, st.Field(i).Name()) + fmt.Fprint(b, `if err != nil { +return off, err +} +`) + } + + if _, ok := st.Field(i).Type().(*types.Slice); ok { + switch st.Tag(i) { + case `dns:"-"`: // ignored + case `dns:"txt"`: + o("off, err = packStringTxt(rr.%s, msg, off)\n") + case `dns:"opt"`: + o("off, err = packDataOpt(rr.%s, msg, off)\n") + case `dns:"nsec"`: + o("off, err = packDataNsec(rr.%s, msg, off)\n") + case `dns:"domain-name"`: + o("off, err = packDataDomainNames(rr.%s, msg, off, compression, compress)\n") + default: + log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) + } + continue + } + + switch { + case st.Tag(i) == `dns:"-"`: // ignored + case st.Tag(i) == `dns:"cdomain-name"`: + o("off, err = PackDomainName(rr.%s, msg, off, compression, compress)\n") + case st.Tag(i) == `dns:"domain-name"`: + o("off, err = PackDomainName(rr.%s, msg, off, compression, false)\n") + case st.Tag(i) == `dns:"a"`: + o("off, err = packDataA(rr.%s, msg, off)\n") + case st.Tag(i) == `dns:"aaaa"`: + o("off, err = packDataAAAA(rr.%s, msg, off)\n") + case st.Tag(i) == `dns:"uint48"`: + o("off, err = packUint48(rr.%s, msg, off)\n") + case st.Tag(i) == `dns:"txt"`: + o("off, err = packString(rr.%s, msg, off)\n") + + case strings.HasPrefix(st.Tag(i), `dns:"size-base32`): // size-base32 can be packed just like base32 + fallthrough + case st.Tag(i) == `dns:"base32"`: + o("off, err = packStringBase32(rr.%s, msg, off)\n") + + case strings.HasPrefix(st.Tag(i), `dns:"size-base64`): // size-base64 can be packed just like base64 + fallthrough + case st.Tag(i) == `dns:"base64"`: + o("off, err = packStringBase64(rr.%s, msg, off)\n") + + case strings.HasPrefix(st.Tag(i), `dns:"size-hex:SaltLength`): + // directly write instead of using o() so we get the error check in the correct place + field := st.Field(i).Name() + fmt.Fprintf(b, `// Only pack salt if value is not "-", i.e. empty +if rr.%s != "-" { + off, err = packStringHex(rr.%s, msg, off) + if err != nil { + return off, err + } +} +`, field, field) + continue + case strings.HasPrefix(st.Tag(i), `dns:"size-hex`): // size-hex can be packed just like hex + fallthrough + case st.Tag(i) == `dns:"hex"`: + o("off, err = packStringHex(rr.%s, msg, off)\n") + + case st.Tag(i) == `dns:"octet"`: + o("off, err = packStringOctet(rr.%s, msg, off)\n") + case st.Tag(i) == "": + switch st.Field(i).Type().(*types.Basic).Kind() { + case types.Uint8: + o("off, err = packUint8(rr.%s, msg, off)\n") + case types.Uint16: + o("off, err = packUint16(rr.%s, msg, off)\n") + case types.Uint32: + o("off, err = packUint32(rr.%s, msg, off)\n") + case types.Uint64: + o("off, err = packUint64(rr.%s, msg, off)\n") + case types.String: + o("off, err = packString(rr.%s, msg, off)\n") + default: + log.Fatalln(name, st.Field(i).Name()) + } + default: + log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) + } + } + // We have packed everything, only now we know the rdlength of this RR + fmt.Fprintln(b, "rr.Header().Rdlength = uint16(off-headerEnd)") + fmt.Fprintln(b, "return off, nil }\n") + } + + fmt.Fprint(b, "// unpack*() functions\n\n") + for _, name := range namedTypes { + o := scope.Lookup(name) + st, _ := getTypeStruct(o.Type(), scope) + + fmt.Fprintf(b, "func unpack%s(h RR_Header, msg []byte, off int) (RR, int, error) {\n", name) + fmt.Fprintf(b, "rr := new(%s)\n", name) + fmt.Fprint(b, "rr.Hdr = h\n") + fmt.Fprint(b, `if noRdata(h) { +return rr, off, nil + } +var err error +rdStart := off +_ = rdStart + +`) + for i := 1; i < st.NumFields(); i++ { + o := func(s string) { + fmt.Fprintf(b, s, st.Field(i).Name()) + fmt.Fprint(b, `if err != nil { +return rr, off, err +} +`) + } + + // size-* are special, because they reference a struct member we should use for the length. + if strings.HasPrefix(st.Tag(i), `dns:"size-`) { + structMember := structMember(st.Tag(i)) + structTag := structTag(st.Tag(i)) + switch structTag { + case "hex": + fmt.Fprintf(b, "rr.%s, off, err = unpackStringHex(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember) + case "base32": + fmt.Fprintf(b, "rr.%s, off, err = unpackStringBase32(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember) + case "base64": + fmt.Fprintf(b, "rr.%s, off, err = unpackStringBase64(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember) + default: + log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) + } + fmt.Fprint(b, `if err != nil { +return rr, off, err +} +`) + continue + } + + if _, ok := st.Field(i).Type().(*types.Slice); ok { + switch st.Tag(i) { + case `dns:"-"`: // ignored + case `dns:"txt"`: + o("rr.%s, off, err = unpackStringTxt(msg, off)\n") + case `dns:"opt"`: + o("rr.%s, off, err = unpackDataOpt(msg, off)\n") + case `dns:"nsec"`: + o("rr.%s, off, err = unpackDataNsec(msg, off)\n") + case `dns:"domain-name"`: + o("rr.%s, off, err = unpackDataDomainNames(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") + default: + log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) + } + continue + } + + switch st.Tag(i) { + case `dns:"-"`: // ignored + case `dns:"cdomain-name"`: + fallthrough + case `dns:"domain-name"`: + o("rr.%s, off, err = UnpackDomainName(msg, off)\n") + case `dns:"a"`: + o("rr.%s, off, err = unpackDataA(msg, off)\n") + case `dns:"aaaa"`: + o("rr.%s, off, err = unpackDataAAAA(msg, off)\n") + case `dns:"uint48"`: + o("rr.%s, off, err = unpackUint48(msg, off)\n") + case `dns:"txt"`: + o("rr.%s, off, err = unpackString(msg, off)\n") + case `dns:"base32"`: + o("rr.%s, off, err = unpackStringBase32(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") + case `dns:"base64"`: + o("rr.%s, off, err = unpackStringBase64(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") + case `dns:"hex"`: + o("rr.%s, off, err = unpackStringHex(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") + case `dns:"octet"`: + o("rr.%s, off, err = unpackStringOctet(msg, off)\n") + case "": + switch st.Field(i).Type().(*types.Basic).Kind() { + case types.Uint8: + o("rr.%s, off, err = unpackUint8(msg, off)\n") + case types.Uint16: + o("rr.%s, off, err = unpackUint16(msg, off)\n") + case types.Uint32: + o("rr.%s, off, err = unpackUint32(msg, off)\n") + case types.Uint64: + o("rr.%s, off, err = unpackUint64(msg, off)\n") + case types.String: + o("rr.%s, off, err = unpackString(msg, off)\n") + default: + log.Fatalln(name, st.Field(i).Name()) + } + default: + log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) + } + // If we've hit len(msg) we return without error. + if i < st.NumFields()-1 { + fmt.Fprintf(b, `if off == len(msg) { +return rr, off, nil + } +`) + } + } + fmt.Fprintf(b, "return rr, off, err }\n\n") + } + // Generate typeToUnpack map + fmt.Fprintln(b, "var typeToUnpack = map[uint16]func(RR_Header, []byte, int) (RR, int, error){") + for _, name := range namedTypes { + if name == "RFC3597" { + continue + } + fmt.Fprintf(b, "Type%s: unpack%s,\n", name, name) + } + fmt.Fprintln(b, "}\n") + + // gofmt + res, err := format.Source(b.Bytes()) + if err != nil { + b.WriteTo(os.Stderr) + log.Fatal(err) + } + + // write result + f, err := os.Create("zmsg.go") + fatalIfErr(err) + defer f.Close() + f.Write(res) +} + +// structMember will take a tag like dns:"size-base32:SaltLength" and return the last part of this string. +func structMember(s string) string { + fields := strings.Split(s, ":") + if len(fields) == 0 { + return "" + } + f := fields[len(fields)-1] + // f should have a closing " + if len(f) > 1 { + return f[:len(f)-1] + } + return f +} + +// structTag will take a tag like dns:"size-base32:SaltLength" and return base32. +func structTag(s string) string { + fields := strings.Split(s, ":") + if len(fields) < 2 { + return "" + } + return fields[1][len("\"size-"):] +} + +func fatalIfErr(err error) { + if err != nil { + log.Fatal(err) + } +} diff --git a/vendor/github.com/miekg/dns/msg_helpers.go b/vendor/github.com/miekg/dns/msg_helpers.go new file mode 100644 index 0000000..615274a --- /dev/null +++ b/vendor/github.com/miekg/dns/msg_helpers.go @@ -0,0 +1,633 @@ +package dns + +import ( + "encoding/base32" + "encoding/base64" + "encoding/binary" + "encoding/hex" + "net" + "strconv" +) + +// helper functions called from the generated zmsg.go + +// These function are named after the tag to help pack/unpack, if there is no tag it is the name +// of the type they pack/unpack (string, int, etc). We prefix all with unpackData or packData, so packDataA or +// packDataDomainName. + +func unpackDataA(msg []byte, off int) (net.IP, int, error) { + if off+net.IPv4len > len(msg) { + return nil, len(msg), &Error{err: "overflow unpacking a"} + } + a := append(make(net.IP, 0, net.IPv4len), msg[off:off+net.IPv4len]...) + off += net.IPv4len + return a, off, nil +} + +func packDataA(a net.IP, msg []byte, off int) (int, error) { + // It must be a slice of 4, even if it is 16, we encode only the first 4 + if off+net.IPv4len > len(msg) { + return len(msg), &Error{err: "overflow packing a"} + } + switch len(a) { + case net.IPv4len, net.IPv6len: + copy(msg[off:], a.To4()) + off += net.IPv4len + case 0: + // Allowed, for dynamic updates. + default: + return len(msg), &Error{err: "overflow packing a"} + } + return off, nil +} + +func unpackDataAAAA(msg []byte, off int) (net.IP, int, error) { + if off+net.IPv6len > len(msg) { + return nil, len(msg), &Error{err: "overflow unpacking aaaa"} + } + aaaa := append(make(net.IP, 0, net.IPv6len), msg[off:off+net.IPv6len]...) + off += net.IPv6len + return aaaa, off, nil +} + +func packDataAAAA(aaaa net.IP, msg []byte, off int) (int, error) { + if off+net.IPv6len > len(msg) { + return len(msg), &Error{err: "overflow packing aaaa"} + } + + switch len(aaaa) { + case net.IPv6len: + copy(msg[off:], aaaa) + off += net.IPv6len + case 0: + // Allowed, dynamic updates. + default: + return len(msg), &Error{err: "overflow packing aaaa"} + } + return off, nil +} + +// unpackHeader unpacks an RR header, returning the offset to the end of the header and a +// re-sliced msg according to the expected length of the RR. +func unpackHeader(msg []byte, off int) (rr RR_Header, off1 int, truncmsg []byte, err error) { + hdr := RR_Header{} + if off == len(msg) { + return hdr, off, msg, nil + } + + hdr.Name, off, err = UnpackDomainName(msg, off) + if err != nil { + return hdr, len(msg), msg, err + } + hdr.Rrtype, off, err = unpackUint16(msg, off) + if err != nil { + return hdr, len(msg), msg, err + } + hdr.Class, off, err = unpackUint16(msg, off) + if err != nil { + return hdr, len(msg), msg, err + } + hdr.Ttl, off, err = unpackUint32(msg, off) + if err != nil { + return hdr, len(msg), msg, err + } + hdr.Rdlength, off, err = unpackUint16(msg, off) + if err != nil { + return hdr, len(msg), msg, err + } + msg, err = truncateMsgFromRdlength(msg, off, hdr.Rdlength) + return hdr, off, msg, nil +} + +// pack packs an RR header, returning the offset to the end of the header. +// See PackDomainName for documentation about the compression. +func (hdr RR_Header) pack(msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) { + if off == len(msg) { + return off, nil + } + + off, err = PackDomainName(hdr.Name, msg, off, compression, compress) + if err != nil { + return len(msg), err + } + off, err = packUint16(hdr.Rrtype, msg, off) + if err != nil { + return len(msg), err + } + off, err = packUint16(hdr.Class, msg, off) + if err != nil { + return len(msg), err + } + off, err = packUint32(hdr.Ttl, msg, off) + if err != nil { + return len(msg), err + } + off, err = packUint16(hdr.Rdlength, msg, off) + if err != nil { + return len(msg), err + } + return off, nil +} + +// helper helper functions. + +// truncateMsgFromRdLength truncates msg to match the expected length of the RR. +// Returns an error if msg is smaller than the expected size. +func truncateMsgFromRdlength(msg []byte, off int, rdlength uint16) (truncmsg []byte, err error) { + lenrd := off + int(rdlength) + if lenrd > len(msg) { + return msg, &Error{err: "overflowing header size"} + } + return msg[:lenrd], nil +} + +func fromBase32(s []byte) (buf []byte, err error) { + for i, b := range s { + if b >= 'a' && b <= 'z' { + s[i] = b - 32 + } + } + buflen := base32.HexEncoding.DecodedLen(len(s)) + buf = make([]byte, buflen) + n, err := base32.HexEncoding.Decode(buf, s) + buf = buf[:n] + return +} + +func toBase32(b []byte) string { return base32.HexEncoding.EncodeToString(b) } + +func fromBase64(s []byte) (buf []byte, err error) { + buflen := base64.StdEncoding.DecodedLen(len(s)) + buf = make([]byte, buflen) + n, err := base64.StdEncoding.Decode(buf, s) + buf = buf[:n] + return +} + +func toBase64(b []byte) string { return base64.StdEncoding.EncodeToString(b) } + +// dynamicUpdate returns true if the Rdlength is zero. +func noRdata(h RR_Header) bool { return h.Rdlength == 0 } + +func unpackUint8(msg []byte, off int) (i uint8, off1 int, err error) { + if off+1 > len(msg) { + return 0, len(msg), &Error{err: "overflow unpacking uint8"} + } + return uint8(msg[off]), off + 1, nil +} + +func packUint8(i uint8, msg []byte, off int) (off1 int, err error) { + if off+1 > len(msg) { + return len(msg), &Error{err: "overflow packing uint8"} + } + msg[off] = byte(i) + return off + 1, nil +} + +func unpackUint16(msg []byte, off int) (i uint16, off1 int, err error) { + if off+2 > len(msg) { + return 0, len(msg), &Error{err: "overflow unpacking uint16"} + } + return binary.BigEndian.Uint16(msg[off:]), off + 2, nil +} + +func packUint16(i uint16, msg []byte, off int) (off1 int, err error) { + if off+2 > len(msg) { + return len(msg), &Error{err: "overflow packing uint16"} + } + binary.BigEndian.PutUint16(msg[off:], i) + return off + 2, nil +} + +func unpackUint32(msg []byte, off int) (i uint32, off1 int, err error) { + if off+4 > len(msg) { + return 0, len(msg), &Error{err: "overflow unpacking uint32"} + } + return binary.BigEndian.Uint32(msg[off:]), off + 4, nil +} + +func packUint32(i uint32, msg []byte, off int) (off1 int, err error) { + if off+4 > len(msg) { + return len(msg), &Error{err: "overflow packing uint32"} + } + binary.BigEndian.PutUint32(msg[off:], i) + return off + 4, nil +} + +func unpackUint48(msg []byte, off int) (i uint64, off1 int, err error) { + if off+6 > len(msg) { + return 0, len(msg), &Error{err: "overflow unpacking uint64 as uint48"} + } + // Used in TSIG where the last 48 bits are occupied, so for now, assume a uint48 (6 bytes) + i = (uint64(uint64(msg[off])<<40 | uint64(msg[off+1])<<32 | uint64(msg[off+2])<<24 | uint64(msg[off+3])<<16 | + uint64(msg[off+4])<<8 | uint64(msg[off+5]))) + off += 6 + return i, off, nil +} + +func packUint48(i uint64, msg []byte, off int) (off1 int, err error) { + if off+6 > len(msg) { + return len(msg), &Error{err: "overflow packing uint64 as uint48"} + } + msg[off] = byte(i >> 40) + msg[off+1] = byte(i >> 32) + msg[off+2] = byte(i >> 24) + msg[off+3] = byte(i >> 16) + msg[off+4] = byte(i >> 8) + msg[off+5] = byte(i) + off += 6 + return off, nil +} + +func unpackUint64(msg []byte, off int) (i uint64, off1 int, err error) { + if off+8 > len(msg) { + return 0, len(msg), &Error{err: "overflow unpacking uint64"} + } + return binary.BigEndian.Uint64(msg[off:]), off + 8, nil +} + +func packUint64(i uint64, msg []byte, off int) (off1 int, err error) { + if off+8 > len(msg) { + return len(msg), &Error{err: "overflow packing uint64"} + } + binary.BigEndian.PutUint64(msg[off:], i) + off += 8 + return off, nil +} + +func unpackString(msg []byte, off int) (string, int, error) { + if off+1 > len(msg) { + return "", off, &Error{err: "overflow unpacking txt"} + } + l := int(msg[off]) + if off+l+1 > len(msg) { + return "", off, &Error{err: "overflow unpacking txt"} + } + s := make([]byte, 0, l) + for _, b := range msg[off+1 : off+1+l] { + switch b { + case '"', '\\': + s = append(s, '\\', b) + default: + if b < 32 || b > 127 { // unprintable + var buf [3]byte + bufs := strconv.AppendInt(buf[:0], int64(b), 10) + s = append(s, '\\') + for i := 0; i < 3-len(bufs); i++ { + s = append(s, '0') + } + for _, r := range bufs { + s = append(s, r) + } + } else { + s = append(s, b) + } + } + } + off += 1 + l + return string(s), off, nil +} + +func packString(s string, msg []byte, off int) (int, error) { + txtTmp := make([]byte, 256*4+1) + off, err := packTxtString(s, msg, off, txtTmp) + if err != nil { + return len(msg), err + } + return off, nil +} + +func unpackStringBase32(msg []byte, off, end int) (string, int, error) { + if end > len(msg) { + return "", len(msg), &Error{err: "overflow unpacking base32"} + } + s := toBase32(msg[off:end]) + return s, end, nil +} + +func packStringBase32(s string, msg []byte, off int) (int, error) { + b32, err := fromBase32([]byte(s)) + if err != nil { + return len(msg), err + } + if off+len(b32) > len(msg) { + return len(msg), &Error{err: "overflow packing base32"} + } + copy(msg[off:off+len(b32)], b32) + off += len(b32) + return off, nil +} + +func unpackStringBase64(msg []byte, off, end int) (string, int, error) { + // Rest of the RR is base64 encoded value, so we don't need an explicit length + // to be set. Thus far all RR's that have base64 encoded fields have those as their + // last one. What we do need is the end of the RR! + if end > len(msg) { + return "", len(msg), &Error{err: "overflow unpacking base64"} + } + s := toBase64(msg[off:end]) + return s, end, nil +} + +func packStringBase64(s string, msg []byte, off int) (int, error) { + b64, err := fromBase64([]byte(s)) + if err != nil { + return len(msg), err + } + if off+len(b64) > len(msg) { + return len(msg), &Error{err: "overflow packing base64"} + } + copy(msg[off:off+len(b64)], b64) + off += len(b64) + return off, nil +} + +func unpackStringHex(msg []byte, off, end int) (string, int, error) { + // Rest of the RR is hex encoded value, so we don't need an explicit length + // to be set. NSEC and TSIG have hex fields with a length field. + // What we do need is the end of the RR! + if end > len(msg) { + return "", len(msg), &Error{err: "overflow unpacking hex"} + } + + s := hex.EncodeToString(msg[off:end]) + return s, end, nil +} + +func packStringHex(s string, msg []byte, off int) (int, error) { + h, err := hex.DecodeString(s) + if err != nil { + return len(msg), err + } + if off+(len(h)) > len(msg) { + return len(msg), &Error{err: "overflow packing hex"} + } + copy(msg[off:off+len(h)], h) + off += len(h) + return off, nil +} + +func unpackStringTxt(msg []byte, off int) ([]string, int, error) { + txt, off, err := unpackTxt(msg, off) + if err != nil { + return nil, len(msg), err + } + return txt, off, nil +} + +func packStringTxt(s []string, msg []byte, off int) (int, error) { + txtTmp := make([]byte, 256*4+1) // If the whole string consists out of \DDD we need this many. + off, err := packTxt(s, msg, off, txtTmp) + if err != nil { + return len(msg), err + } + return off, nil +} + +func unpackDataOpt(msg []byte, off int) ([]EDNS0, int, error) { + var edns []EDNS0 +Option: + code := uint16(0) + if off+4 > len(msg) { + return nil, len(msg), &Error{err: "overflow unpacking opt"} + } + code = binary.BigEndian.Uint16(msg[off:]) + off += 2 + optlen := binary.BigEndian.Uint16(msg[off:]) + off += 2 + if off+int(optlen) > len(msg) { + return nil, len(msg), &Error{err: "overflow unpacking opt"} + } + switch code { + case EDNS0NSID: + e := new(EDNS0_NSID) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0SUBNET, EDNS0SUBNETDRAFT: + e := new(EDNS0_SUBNET) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + if code == EDNS0SUBNETDRAFT { + e.DraftOption = true + } + case EDNS0COOKIE: + e := new(EDNS0_COOKIE) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0UL: + e := new(EDNS0_UL) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0LLQ: + e := new(EDNS0_LLQ) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0DAU: + e := new(EDNS0_DAU) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0DHU: + e := new(EDNS0_DHU) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0N3U: + e := new(EDNS0_N3U) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + default: + e := new(EDNS0_LOCAL) + e.Code = code + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + } + + if off < len(msg) { + goto Option + } + + return edns, off, nil +} + +func packDataOpt(options []EDNS0, msg []byte, off int) (int, error) { + for _, el := range options { + b, err := el.pack() + if err != nil || off+3 > len(msg) { + return len(msg), &Error{err: "overflow packing opt"} + } + binary.BigEndian.PutUint16(msg[off:], el.Option()) // Option code + binary.BigEndian.PutUint16(msg[off+2:], uint16(len(b))) // Length + off += 4 + if off+len(b) > len(msg) { + copy(msg[off:], b) + off = len(msg) + continue + } + // Actual data + copy(msg[off:off+len(b)], b) + off += len(b) + } + return off, nil +} + +func unpackStringOctet(msg []byte, off int) (string, int, error) { + s := string(msg[off:]) + return s, len(msg), nil +} + +func packStringOctet(s string, msg []byte, off int) (int, error) { + txtTmp := make([]byte, 256*4+1) + off, err := packOctetString(s, msg, off, txtTmp) + if err != nil { + return len(msg), err + } + return off, nil +} + +func unpackDataNsec(msg []byte, off int) ([]uint16, int, error) { + var nsec []uint16 + length, window, lastwindow := 0, 0, -1 + for off < len(msg) { + if off+2 > len(msg) { + return nsec, len(msg), &Error{err: "overflow unpacking nsecx"} + } + window = int(msg[off]) + length = int(msg[off+1]) + off += 2 + if window <= lastwindow { + // RFC 4034: Blocks are present in the NSEC RR RDATA in + // increasing numerical order. + return nsec, len(msg), &Error{err: "out of order NSEC block"} + } + if length == 0 { + // RFC 4034: Blocks with no types present MUST NOT be included. + return nsec, len(msg), &Error{err: "empty NSEC block"} + } + if length > 32 { + return nsec, len(msg), &Error{err: "NSEC block too long"} + } + if off+length > len(msg) { + return nsec, len(msg), &Error{err: "overflowing NSEC block"} + } + + // Walk the bytes in the window and extract the type bits + for j := 0; j < length; j++ { + b := msg[off+j] + // Check the bits one by one, and set the type + if b&0x80 == 0x80 { + nsec = append(nsec, uint16(window*256+j*8+0)) + } + if b&0x40 == 0x40 { + nsec = append(nsec, uint16(window*256+j*8+1)) + } + if b&0x20 == 0x20 { + nsec = append(nsec, uint16(window*256+j*8+2)) + } + if b&0x10 == 0x10 { + nsec = append(nsec, uint16(window*256+j*8+3)) + } + if b&0x8 == 0x8 { + nsec = append(nsec, uint16(window*256+j*8+4)) + } + if b&0x4 == 0x4 { + nsec = append(nsec, uint16(window*256+j*8+5)) + } + if b&0x2 == 0x2 { + nsec = append(nsec, uint16(window*256+j*8+6)) + } + if b&0x1 == 0x1 { + nsec = append(nsec, uint16(window*256+j*8+7)) + } + } + off += length + lastwindow = window + } + return nsec, off, nil +} + +func packDataNsec(bitmap []uint16, msg []byte, off int) (int, error) { + if len(bitmap) == 0 { + return off, nil + } + var lastwindow, lastlength uint16 + for j := 0; j < len(bitmap); j++ { + t := bitmap[j] + window := t / 256 + length := (t-window*256)/8 + 1 + if window > lastwindow && lastlength != 0 { // New window, jump to the new offset + off += int(lastlength) + 2 + lastlength = 0 + } + if window < lastwindow || length < lastlength { + return len(msg), &Error{err: "nsec bits out of order"} + } + if off+2+int(length) > len(msg) { + return len(msg), &Error{err: "overflow packing nsec"} + } + // Setting the window # + msg[off] = byte(window) + // Setting the octets length + msg[off+1] = byte(length) + // Setting the bit value for the type in the right octet + msg[off+1+int(length)] |= byte(1 << (7 - (t % 8))) + lastwindow, lastlength = window, length + } + off += int(lastlength) + 2 + return off, nil +} + +func unpackDataDomainNames(msg []byte, off, end int) ([]string, int, error) { + var ( + servers []string + s string + err error + ) + if end > len(msg) { + return nil, len(msg), &Error{err: "overflow unpacking domain names"} + } + for off < end { + s, off, err = UnpackDomainName(msg, off) + if err != nil { + return servers, len(msg), err + } + servers = append(servers, s) + } + return servers, off, nil +} + +func packDataDomainNames(names []string, msg []byte, off int, compression map[string]int, compress bool) (int, error) { + var err error + for j := 0; j < len(names); j++ { + off, err = PackDomainName(names[j], msg, off, compression, false && compress) + if err != nil { + return len(msg), err + } + } + return off, nil +} diff --git a/vendor/github.com/miekg/dns/msg_test.go b/vendor/github.com/miekg/dns/msg_test.go new file mode 100644 index 0000000..2dbef62 --- /dev/null +++ b/vendor/github.com/miekg/dns/msg_test.go @@ -0,0 +1,124 @@ +package dns + +import ( + "fmt" + "regexp" + "strconv" + "strings" + "testing" +) + +const ( + maxPrintableLabel = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789x" + tooLongLabel = maxPrintableLabel + "x" +) + +var ( + longDomain = maxPrintableLabel[:53] + strings.TrimSuffix( + strings.Join([]string{".", ".", ".", ".", "."}, maxPrintableLabel[:49]), ".") + reChar = regexp.MustCompile(`.`) + i = -1 + maxUnprintableLabel = reChar.ReplaceAllStringFunc(maxPrintableLabel, func(ch string) string { + if i++; i >= 32 { + i = 0 + } + return fmt.Sprintf("\\%03d", i) + }) +) + +func TestUnpackDomainName(t *testing.T) { + var cases = []struct { + label string + input string + expectedOutput string + expectedError string + }{ + {"empty domain", + "\x00", + ".", + ""}, + {"long label", + string(63) + maxPrintableLabel + "\x00", + maxPrintableLabel + ".", + ""}, + {"unprintable label", + string(63) + regexp.MustCompile(`\\[0-9]+`).ReplaceAllStringFunc(maxUnprintableLabel, + func(escape string) string { + n, _ := strconv.ParseInt(escape[1:], 10, 8) + return string(n) + }) + "\x00", + maxUnprintableLabel + ".", + ""}, + {"long domain", + string(53) + strings.Replace(longDomain, ".", string(49), -1) + "\x00", + longDomain + ".", + ""}, + {"compression pointer", + // an unrealistic but functional test referencing an offset _inside_ a label + "\x03foo" + "\x05\x03com\x00" + "\x07example" + "\xC0\x05", + "foo.\\003com\\000.example.com.", + ""}, + + {"too long domain", + string(54) + "x" + strings.Replace(longDomain, ".", string(49), -1) + "\x00", + "x" + longDomain + ".", + ErrLongDomain.Error()}, + {"too long by pointer", + // a matryoshka doll name to get over 255 octets after expansion via internal pointers + string([]byte{ + // 11 length values, first to last + 40, 37, 34, 31, 28, 25, 22, 19, 16, 13, 0, + // 12 filler values + 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, + // 10 pointers, last to first + 192, 10, 192, 9, 192, 8, 192, 7, 192, 6, 192, 5, 192, 4, 192, 3, 192, 2, 192, 1, + }), + "", + ErrLongDomain.Error()}, + {"long by pointer", + // a matryoshka doll name _not_ exceeding 255 octets after expansion + string([]byte{ + // 11 length values, first to last + 37, 34, 31, 28, 25, 22, 19, 16, 13, 10, 0, + // 9 filler values + 120, 120, 120, 120, 120, 120, 120, 120, 120, + // 10 pointers, last to first + 192, 10, 192, 9, 192, 8, 192, 7, 192, 6, 192, 5, 192, 4, 192, 3, 192, 2, 192, 1, + }), + "" + + (`\"\031\028\025\022\019\016\013\010\000xxxxxxxxx` + + `\192\010\192\009\192\008\192\007\192\006\192\005\192\004\192\003\192\002.`) + + (`\031\028\025\022\019\016\013\010\000xxxxxxxxx` + + `\192\010\192\009\192\008\192\007\192\006\192\005\192\004\192\003.`) + + (`\028\025\022\019\016\013\010\000xxxxxxxxx` + + `\192\010\192\009\192\008\192\007\192\006\192\005\192\004.`) + + (`\025\022\019\016\013\010\000xxxxxxxxx` + + `\192\010\192\009\192\008\192\007\192\006\192\005.`) + + `\022\019\016\013\010\000xxxxxxxxx\192\010\192\009\192\008\192\007\192\006.` + + `\019\016\013\010\000xxxxxxxxx\192\010\192\009\192\008\192\007.` + + `\016\013\010\000xxxxxxxxx\192\010\192\009\192\008.` + + `\013\010\000xxxxxxxxx\192\010\192\009.` + + `\010\000xxxxxxxxx\192\010.` + + `\000xxxxxxxxx.`, + ""}, + {"truncated name", "\x07example\x03", "", "dns: buffer size too small"}, + {"non-absolute name", "\x07example\x03com", "", "dns: buffer size too small"}, + {"compression pointer cycle", + "\x03foo" + "\x03bar" + "\x07example" + "\xC0\x04", + "", + "dns: too many compression pointers"}, + {"reserved compression pointer 0b10", "\x07example\x80", "", "dns: bad rdata"}, + {"reserved compression pointer 0b01", "\x07example\x40", "", "dns: bad rdata"}, + } + for _, test := range cases { + output, idx, err := UnpackDomainName([]byte(test.input), 0) + if test.expectedOutput != "" && output != test.expectedOutput { + t.Errorf("%s: expected %s, got %s", test.label, test.expectedOutput, output) + } + if test.expectedError == "" && err != nil { + t.Errorf("%s: expected no error, got %d %v", test.label, idx, err) + } else if test.expectedError != "" && (err == nil || err.Error() != test.expectedError) { + t.Errorf("%s: expected error %s, got %d %v", test.label, test.expectedError, idx, err) + } + } +} diff --git a/vendor/github.com/miekg/dns/nsecx.go b/vendor/github.com/miekg/dns/nsecx.go new file mode 100644 index 0000000..9b908c4 --- /dev/null +++ b/vendor/github.com/miekg/dns/nsecx.go @@ -0,0 +1,106 @@ +package dns + +import ( + "crypto/sha1" + "hash" + "strings" +) + +type saltWireFmt struct { + Salt string `dns:"size-hex"` +} + +// HashName hashes a string (label) according to RFC 5155. It returns the hashed string in uppercase. +func HashName(label string, ha uint8, iter uint16, salt string) string { + saltwire := new(saltWireFmt) + saltwire.Salt = salt + wire := make([]byte, DefaultMsgSize) + n, err := packSaltWire(saltwire, wire) + if err != nil { + return "" + } + wire = wire[:n] + name := make([]byte, 255) + off, err := PackDomainName(strings.ToLower(label), name, 0, nil, false) + if err != nil { + return "" + } + name = name[:off] + var s hash.Hash + switch ha { + case SHA1: + s = sha1.New() + default: + return "" + } + + // k = 0 + s.Write(name) + s.Write(wire) + nsec3 := s.Sum(nil) + // k > 0 + for k := uint16(0); k < iter; k++ { + s.Reset() + s.Write(nsec3) + s.Write(wire) + nsec3 = s.Sum(nsec3[:0]) + } + return toBase32(nsec3) +} + +// Cover returns true if a name is covered by the NSEC3 record +func (rr *NSEC3) Cover(name string) bool { + nameHash := HashName(name, rr.Hash, rr.Iterations, rr.Salt) + owner := strings.ToUpper(rr.Hdr.Name) + labelIndices := Split(owner) + if len(labelIndices) < 2 { + return false + } + ownerHash := owner[:labelIndices[1]-1] + ownerZone := owner[labelIndices[1]:] + if !IsSubDomain(ownerZone, strings.ToUpper(name)) { // name is outside owner zone + return false + } + + nextHash := rr.NextDomain + if ownerHash == nextHash { // empty interval + return false + } + if ownerHash > nextHash { // end of zone + if nameHash > ownerHash { // covered since there is nothing after ownerHash + return true + } + return nameHash < nextHash // if nameHash is before beginning of zone it is covered + } + if nameHash < ownerHash { // nameHash is before ownerHash, not covered + return false + } + return nameHash < nextHash // if nameHash is before nextHash is it covered (between ownerHash and nextHash) +} + +// Match returns true if a name matches the NSEC3 record +func (rr *NSEC3) Match(name string) bool { + nameHash := HashName(name, rr.Hash, rr.Iterations, rr.Salt) + owner := strings.ToUpper(rr.Hdr.Name) + labelIndices := Split(owner) + if len(labelIndices) < 2 { + return false + } + ownerHash := owner[:labelIndices[1]-1] + ownerZone := owner[labelIndices[1]:] + if !IsSubDomain(ownerZone, strings.ToUpper(name)) { // name is outside owner zone + return false + } + if ownerHash == nameHash { + return true + } + return false +} + +func packSaltWire(sw *saltWireFmt, msg []byte) (int, error) { + off, err := packStringHex(sw.Salt, msg, 0) + if err != nil { + return off, err + } + return off, nil +} diff --git a/vendor/github.com/miekg/dns/nsecx_test.go b/vendor/github.com/miekg/dns/nsecx_test.go new file mode 100644 index 0000000..8d5f717 --- /dev/null +++ b/vendor/github.com/miekg/dns/nsecx_test.go @@ -0,0 +1,133 @@ +package dns + +import "testing" + +func TestPackNsec3(t *testing.T) { + nsec3 := HashName("dnsex.nl.", SHA1, 0, "DEAD") + if nsec3 != "ROCCJAE8BJJU7HN6T7NG3TNM8ACRS87J" { + t.Error(nsec3) + } + + nsec3 = HashName("a.b.c.example.org.", SHA1, 2, "DEAD") + if nsec3 != "6LQ07OAHBTOOEU2R9ANI2AT70K5O0RCG" { + t.Error(nsec3) + } +} + +func TestNsec3(t *testing.T) { + nsec3, _ := NewRR("sk4e8fj94u78smusb40o1n0oltbblu2r.nl. IN NSEC3 1 1 5 F10E9F7EA83FC8F3 SK4F38CQ0ATIEI8MH3RGD0P5I4II6QAN NS SOA TXT RRSIG DNSKEY NSEC3PARAM") + if !nsec3.(*NSEC3).Match("nl.") { // name hash = sk4e8fj94u78smusb40o1n0oltbblu2r + t.Fatal("sk4e8fj94u78smusb40o1n0oltbblu2r.nl. should match sk4e8fj94u78smusb40o1n0oltbblu2r.nl.") + } + if !nsec3.(*NSEC3).Match("NL.") { // name hash = sk4e8fj94u78smusb40o1n0oltbblu2r + t.Fatal("sk4e8fj94u78smusb40o1n0oltbblu2r.NL. should match sk4e8fj94u78smusb40o1n0oltbblu2r.nl.") + } + if nsec3.(*NSEC3).Match("com.") { // + t.Fatal("com. is not in the zone nl.") + } + if nsec3.(*NSEC3).Match("test.nl.") { // name hash = gd0ptr5bnfpimpu2d3v6gd4n0bai7s0q + t.Fatal("gd0ptr5bnfpimpu2d3v6gd4n0bai7s0q.nl. should not match sk4e8fj94u78smusb40o1n0oltbblu2r.nl.") + } + nsec3, _ = NewRR("nl. IN NSEC3 1 1 5 F10E9F7EA83FC8F3 SK4F38CQ0ATIEI8MH3RGD0P5I4II6QAN NS SOA TXT RRSIG DNSKEY NSEC3PARAM") + if nsec3.(*NSEC3).Match("nl.") { + t.Fatal("sk4e8fj94u78smusb40o1n0oltbblu2r.nl. should not match a record without a owner hash") + } + + for _, tc := range []struct { + rr *NSEC3 + name string + covers bool + }{ + // positive tests + { // name hash between owner hash and next hash + rr: &NSEC3{ + Hdr: RR_Header{Name: "2N1TB3VAIRUOBL6RKDVII42N9TFMIALP.com."}, + Hash: 1, + Flags: 1, + Iterations: 5, + Salt: "F10E9F7EA83FC8F3", + NextDomain: "PT3RON8N7PM3A0OE989IB84OOSADP7O8", + }, + name: "bsd.com.", + covers: true, + }, + { // end of zone, name hash is after owner hash + rr: &NSEC3{ + Hdr: RR_Header{Name: "3v62ulr0nre83v0rja2vjgtlif9v6rab.com."}, + Hash: 1, + Flags: 1, + Iterations: 5, + Salt: "F10E9F7EA83FC8F3", + NextDomain: "2N1TB3VAIRUOBL6RKDVII42N9TFMIALP", + }, + name: "csd.com.", + covers: true, + }, + { // end of zone, name hash is before beginning of zone + rr: &NSEC3{ + Hdr: RR_Header{Name: "PT3RON8N7PM3A0OE989IB84OOSADP7O8.com."}, + Hash: 1, + Flags: 1, + Iterations: 5, + Salt: "F10E9F7EA83FC8F3", + NextDomain: "3V62ULR0NRE83V0RJA2VJGTLIF9V6RAB", + }, + name: "asd.com.", + covers: true, + }, + // negative tests + { // too short owner name + rr: &NSEC3{ + Hdr: RR_Header{Name: "nl."}, + Hash: 1, + Flags: 1, + Iterations: 5, + Salt: "F10E9F7EA83FC8F3", + NextDomain: "39P99DCGG0MDLARTCRMCF6OFLLUL7PR6", + }, + name: "asd.com.", + covers: false, + }, + { // outside of zone + rr: &NSEC3{ + Hdr: RR_Header{Name: "39p91242oslggest5e6a7cci4iaeqvnk.nl."}, + Hash: 1, + Flags: 1, + Iterations: 5, + Salt: "F10E9F7EA83FC8F3", + NextDomain: "39P99DCGG0MDLARTCRMCF6OFLLUL7PR6", + }, + name: "asd.com.", + covers: false, + }, + { // empty interval + rr: &NSEC3{ + Hdr: RR_Header{Name: "2n1tb3vairuobl6rkdvii42n9tfmialp.com."}, + Hash: 1, + Flags: 1, + Iterations: 5, + Salt: "F10E9F7EA83FC8F3", + NextDomain: "2N1TB3VAIRUOBL6RKDVII42N9TFMIALP", + }, + name: "asd.com.", + covers: false, + }, + { // name hash is before owner hash, not covered + rr: &NSEC3{ + Hdr: RR_Header{Name: "3V62ULR0NRE83V0RJA2VJGTLIF9V6RAB.com."}, + Hash: 1, + Flags: 1, + Iterations: 5, + Salt: "F10E9F7EA83FC8F3", + NextDomain: "PT3RON8N7PM3A0OE989IB84OOSADP7O8", + }, + name: "asd.com.", + covers: false, + }, + } { + covers := tc.rr.Cover(tc.name) + if tc.covers != covers { + t.Fatalf("Cover failed for %s: expected %t, got %t [record: %s]", tc.name, tc.covers, covers, tc.rr) + } + } +} diff --git a/vendor/github.com/miekg/dns/parse_test.go b/vendor/github.com/miekg/dns/parse_test.go new file mode 100644 index 0000000..fc5bdaf --- /dev/null +++ b/vendor/github.com/miekg/dns/parse_test.go @@ -0,0 +1,1540 @@ +package dns + +import ( + "bytes" + "crypto/rsa" + "encoding/hex" + "fmt" + "math/rand" + "net" + "reflect" + "strconv" + "strings" + "testing" + "testing/quick" + "time" +) + +func TestDotInName(t *testing.T) { + buf := make([]byte, 20) + PackDomainName("aa\\.bb.nl.", buf, 0, nil, false) + // index 3 must be a real dot + if buf[3] != '.' { + t.Error("dot should be a real dot") + } + + if buf[6] != 2 { + t.Error("this must have the value 2") + } + dom, _, _ := UnpackDomainName(buf, 0) + // printing it should yield the backspace again + if dom != "aa\\.bb.nl." { + t.Error("dot should have been escaped: ", dom) + } +} + +func TestDotLastInLabel(t *testing.T) { + sample := "aa\\..au." + buf := make([]byte, 20) + _, err := PackDomainName(sample, buf, 0, nil, false) + if err != nil { + t.Fatalf("unexpected error packing domain: %v", err) + } + dom, _, _ := UnpackDomainName(buf, 0) + if dom != sample { + t.Fatalf("unpacked domain `%s' doesn't match packed domain", dom) + } +} + +func TestTooLongDomainName(t *testing.T) { + l := "aaabbbcccdddeeefffggghhhiiijjjkkklllmmmnnnooopppqqqrrrsssttt." + dom := l + l + l + l + l + l + l + _, err := NewRR(dom + " IN A 127.0.0.1") + if err == nil { + t.Error("should be too long") + } else { + t.Logf("error is %v", err) + } + _, err = NewRR("..com. IN A 127.0.0.1") + if err == nil { + t.Error("should fail") + } else { + t.Logf("error is %v", err) + } +} + +func TestDomainName(t *testing.T) { + tests := []string{"r\\.gieben.miek.nl.", "www\\.www.miek.nl.", + "www.*.miek.nl.", "www.*.miek.nl.", + } + dbuff := make([]byte, 40) + + for _, ts := range tests { + if _, err := PackDomainName(ts, dbuff, 0, nil, false); err != nil { + t.Error("not a valid domain name") + continue + } + n, _, err := UnpackDomainName(dbuff, 0) + if err != nil { + t.Error("failed to unpack packed domain name") + continue + } + if ts != n { + t.Errorf("must be equal: in: %s, out: %s", ts, n) + } + } +} + +func TestDomainNameAndTXTEscapes(t *testing.T) { + tests := []byte{'.', '(', ')', ';', ' ', '@', '"', '\\', 9, 13, 10, 0, 255} + for _, b := range tests { + rrbytes := []byte{ + 1, b, 0, // owner + byte(TypeTXT >> 8), byte(TypeTXT), + byte(ClassINET >> 8), byte(ClassINET), + 0, 0, 0, 1, // TTL + 0, 2, 1, b, // Data + } + rr1, _, err := UnpackRR(rrbytes, 0) + if err != nil { + panic(err) + } + s := rr1.String() + rr2, err := NewRR(s) + if err != nil { + t.Errorf("Error parsing unpacked RR's string: %v", err) + t.Errorf(" Bytes: %v", rrbytes) + t.Errorf("String: %v", s) + } + repacked := make([]byte, len(rrbytes)) + if _, err := PackRR(rr2, repacked, 0, nil, false); err != nil { + t.Errorf("error packing parsed RR: %v", err) + t.Errorf(" original Bytes: %v", rrbytes) + t.Errorf("unpacked Struct: %v", rr1) + t.Errorf(" parsed Struct: %v", rr2) + } + if !bytes.Equal(repacked, rrbytes) { + t.Error("packed bytes don't match original bytes") + t.Errorf(" original bytes: %v", rrbytes) + t.Errorf(" packed bytes: %v", repacked) + t.Errorf("unpacked struct: %v", rr1) + t.Errorf(" parsed struct: %v", rr2) + } + } +} + +func TestTXTEscapeParsing(t *testing.T) { + test := [][]string{ + {`";"`, `";"`}, + {`\;`, `";"`}, + {`"\t"`, `"t"`}, + {`"\r"`, `"r"`}, + {`"\ "`, `" "`}, + {`"\;"`, `";"`}, + {`"\;\""`, `";\""`}, + {`"\(a\)"`, `"(a)"`}, + {`"\(a)"`, `"(a)"`}, + {`"(a\)"`, `"(a)"`}, + {`"(a)"`, `"(a)"`}, + {`"\048"`, `"0"`}, + {`"\` + "\t" + `"`, `"\009"`}, + {`"\` + "\n" + `"`, `"\010"`}, + {`"\` + "\r" + `"`, `"\013"`}, + {`"\` + "\x11" + `"`, `"\017"`}, + {`"\'"`, `"'"`}, + } + for _, s := range test { + rr, err := NewRR(fmt.Sprintf("example.com. IN TXT %v", s[0])) + if err != nil { + t.Errorf("could not parse %v TXT: %s", s[0], err) + continue + } + + txt := sprintTxt(rr.(*TXT).Txt) + if txt != s[1] { + t.Errorf("mismatch after parsing `%v` TXT record: `%v` != `%v`", s[0], txt, s[1]) + } + } +} + +func GenerateDomain(r *rand.Rand, size int) []byte { + dnLen := size % 70 // artificially limit size so there's less to intrepret if a failure occurs + var dn []byte + done := false + for i := 0; i < dnLen && !done; { + max := dnLen - i + if max > 63 { + max = 63 + } + lLen := max + if lLen != 0 { + lLen = int(r.Int31()) % max + } + done = lLen == 0 + if done { + continue + } + l := make([]byte, lLen+1) + l[0] = byte(lLen) + for j := 0; j < lLen; j++ { + l[j+1] = byte(rand.Int31()) + } + dn = append(dn, l...) + i += 1 + lLen + } + return append(dn, 0) +} + +func TestDomainQuick(t *testing.T) { + r := rand.New(rand.NewSource(0)) + f := func(l int) bool { + db := GenerateDomain(r, l) + ds, _, err := UnpackDomainName(db, 0) + if err != nil { + panic(err) + } + buf := make([]byte, 255) + off, err := PackDomainName(ds, buf, 0, nil, false) + if err != nil { + t.Errorf("error packing domain: %v", err) + t.Errorf(" bytes: %v", db) + t.Errorf("string: %v", ds) + return false + } + if !bytes.Equal(db, buf[:off]) { + t.Errorf("repacked domain doesn't match original:") + t.Errorf("src bytes: %v", db) + t.Errorf(" string: %v", ds) + t.Errorf("out bytes: %v", buf[:off]) + return false + } + return true + } + if err := quick.Check(f, nil); err != nil { + t.Error(err) + } +} + +func GenerateTXT(r *rand.Rand, size int) []byte { + rdLen := size % 300 // artificially limit size so there's less to intrepret if a failure occurs + var rd []byte + for i := 0; i < rdLen; { + max := rdLen - 1 + if max > 255 { + max = 255 + } + sLen := max + if max != 0 { + sLen = int(r.Int31()) % max + } + s := make([]byte, sLen+1) + s[0] = byte(sLen) + for j := 0; j < sLen; j++ { + s[j+1] = byte(rand.Int31()) + } + rd = append(rd, s...) + i += 1 + sLen + } + return rd +} + +// Ok, 2 things. 1) this test breaks with the new functionality of splitting up larger txt +// chunks into 255 byte pieces. 2) I don't like the random nature of this thing, because I can't +// place the quotes where they need to be. +// So either add some code the places the quotes in just the right spots, make this non random +// or do something else. +// Disabled for now. (miek) +func testTXTRRQuick(t *testing.T) { + s := rand.NewSource(0) + r := rand.New(s) + typeAndClass := []byte{ + byte(TypeTXT >> 8), byte(TypeTXT), + byte(ClassINET >> 8), byte(ClassINET), + 0, 0, 0, 1, // TTL + } + f := func(l int) bool { + owner := GenerateDomain(r, l) + rdata := GenerateTXT(r, l) + rrbytes := make([]byte, 0, len(owner)+2+2+4+2+len(rdata)) + rrbytes = append(rrbytes, owner...) + rrbytes = append(rrbytes, typeAndClass...) + rrbytes = append(rrbytes, byte(len(rdata)>>8)) + rrbytes = append(rrbytes, byte(len(rdata))) + rrbytes = append(rrbytes, rdata...) + rr, _, err := UnpackRR(rrbytes, 0) + if err != nil { + panic(err) + } + buf := make([]byte, len(rrbytes)*3) + off, err := PackRR(rr, buf, 0, nil, false) + if err != nil { + t.Errorf("pack Error: %v\nRR: %v", err, rr) + return false + } + buf = buf[:off] + if !bytes.Equal(buf, rrbytes) { + t.Errorf("packed bytes don't match original bytes") + t.Errorf("src bytes: %v", rrbytes) + t.Errorf(" struct: %v", rr) + t.Errorf("out bytes: %v", buf) + return false + } + if len(rdata) == 0 { + // string'ing won't produce any data to parse + return true + } + rrString := rr.String() + rr2, err := NewRR(rrString) + if err != nil { + t.Errorf("error parsing own output: %v", err) + t.Errorf("struct: %v", rr) + t.Errorf("string: %v", rrString) + return false + } + if rr2.String() != rrString { + t.Errorf("parsed rr.String() doesn't match original string") + t.Errorf("original: %v", rrString) + t.Errorf(" parsed: %v", rr2.String()) + return false + } + + buf = make([]byte, len(rrbytes)*3) + off, err = PackRR(rr2, buf, 0, nil, false) + if err != nil { + t.Errorf("error packing parsed rr: %v", err) + t.Errorf("unpacked Struct: %v", rr) + t.Errorf(" string: %v", rrString) + t.Errorf(" parsed Struct: %v", rr2) + return false + } + buf = buf[:off] + if !bytes.Equal(buf, rrbytes) { + t.Errorf("parsed packed bytes don't match original bytes") + t.Errorf(" source bytes: %v", rrbytes) + t.Errorf("unpacked struct: %v", rr) + t.Errorf(" string: %v", rrString) + t.Errorf(" parsed struct: %v", rr2) + t.Errorf(" repacked bytes: %v", buf) + return false + } + return true + } + c := &quick.Config{MaxCountScale: 10} + if err := quick.Check(f, c); err != nil { + t.Error(err) + } +} + +func TestParseDirectiveMisc(t *testing.T) { + tests := map[string]string{ + "$ORIGIN miek.nl.\na IN NS b": "a.miek.nl.\t3600\tIN\tNS\tb.miek.nl.", + "$TTL 2H\nmiek.nl. IN NS b.": "miek.nl.\t7200\tIN\tNS\tb.", + "miek.nl. 1D IN NS b.": "miek.nl.\t86400\tIN\tNS\tb.", + `name. IN SOA a6.nstld.com. hostmaster.nic.name. ( + 203362132 ; serial + 5m ; refresh (5 minutes) + 5m ; retry (5 minutes) + 2w ; expire (2 weeks) + 300 ; minimum (5 minutes) +)`: "name.\t3600\tIN\tSOA\ta6.nstld.com. hostmaster.nic.name. 203362132 300 300 1209600 300", + ". 3600000 IN NS ONE.MY-ROOTS.NET.": ".\t3600000\tIN\tNS\tONE.MY-ROOTS.NET.", + "ONE.MY-ROOTS.NET. 3600000 IN A 192.168.1.1": "ONE.MY-ROOTS.NET.\t3600000\tIN\tA\t192.168.1.1", + } + for i, o := range tests { + rr, err := NewRR(i) + if err != nil { + t.Error("failed to parse RR: ", err) + continue + } + if rr.String() != o { + t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) + } else { + t.Logf("RR is OK: `%s'", rr.String()) + } + } +} + +func TestNSEC(t *testing.T) { + nsectests := map[string]string{ + "nl. IN NSEC3PARAM 1 0 5 30923C44C6CBBB8F": "nl.\t3600\tIN\tNSEC3PARAM\t1 0 5 30923C44C6CBBB8F", + "p2209hipbpnm681knjnu0m1febshlv4e.nl. IN NSEC3 1 1 5 30923C44C6CBBB8F P90DG1KE8QEAN0B01613LHQDG0SOJ0TA NS SOA TXT RRSIG DNSKEY NSEC3PARAM": "p2209hipbpnm681knjnu0m1febshlv4e.nl.\t3600\tIN\tNSEC3\t1 1 5 30923C44C6CBBB8F P90DG1KE8QEAN0B01613LHQDG0SOJ0TA NS SOA TXT RRSIG DNSKEY NSEC3PARAM", + "localhost.dnssex.nl. IN NSEC www.dnssex.nl. A RRSIG NSEC": "localhost.dnssex.nl.\t3600\tIN\tNSEC\twww.dnssex.nl. A RRSIG NSEC", + "localhost.dnssex.nl. IN NSEC www.dnssex.nl. A RRSIG NSEC TYPE65534": "localhost.dnssex.nl.\t3600\tIN\tNSEC\twww.dnssex.nl. A RRSIG NSEC TYPE65534", + "localhost.dnssex.nl. IN NSEC www.dnssex.nl. A RRSIG NSec Type65534": "localhost.dnssex.nl.\t3600\tIN\tNSEC\twww.dnssex.nl. A RRSIG NSEC TYPE65534", + "44ohaq2njb0idnvolt9ggthvsk1e1uv8.skydns.test. NSEC3 1 0 0 - 44OHAQ2NJB0IDNVOLT9GGTHVSK1E1UVA": "44ohaq2njb0idnvolt9ggthvsk1e1uv8.skydns.test.\t3600\tIN\tNSEC3\t1 0 0 - 44OHAQ2NJB0IDNVOLT9GGTHVSK1E1UVA", + } + for i, o := range nsectests { + rr, err := NewRR(i) + if err != nil { + t.Error("failed to parse RR: ", err) + continue + } + if rr.String() != o { + t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) + } else { + t.Logf("RR is OK: `%s'", rr.String()) + } + } +} + +func TestParseLOC(t *testing.T) { + lt := map[string]string{ + "SW1A2AA.find.me.uk. LOC 51 30 12.748 N 00 07 39.611 W 0.00m 0.00m 0.00m 0.00m": "SW1A2AA.find.me.uk.\t3600\tIN\tLOC\t51 30 12.748 N 00 07 39.611 W 0m 0.00m 0.00m 0.00m", + "SW1A2AA.find.me.uk. LOC 51 0 0.0 N 00 07 39.611 W 0.00m 0.00m 0.00m 0.00m": "SW1A2AA.find.me.uk.\t3600\tIN\tLOC\t51 00 0.000 N 00 07 39.611 W 0m 0.00m 0.00m 0.00m", + } + for i, o := range lt { + rr, err := NewRR(i) + if err != nil { + t.Error("failed to parse RR: ", err) + continue + } + if rr.String() != o { + t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) + } else { + t.Logf("RR is OK: `%s'", rr.String()) + } + } +} + +func TestParseDS(t *testing.T) { + dt := map[string]string{ + "example.net. 3600 IN DS 40692 12 3 22261A8B0E0D799183E35E24E2AD6BB58533CBA7E3B14D659E9CA09B 2071398F": "example.net.\t3600\tIN\tDS\t40692 12 3 22261A8B0E0D799183E35E24E2AD6BB58533CBA7E3B14D659E9CA09B2071398F", + } + for i, o := range dt { + rr, err := NewRR(i) + if err != nil { + t.Error("failed to parse RR: ", err) + continue + } + if rr.String() != o { + t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) + } else { + t.Logf("RR is OK: `%s'", rr.String()) + } + } +} + +func TestQuotes(t *testing.T) { + tests := map[string]string{ + `t.example.com. IN TXT "a bc"`: "t.example.com.\t3600\tIN\tTXT\t\"a bc\"", + `t.example.com. IN TXT "a + bc"`: "t.example.com.\t3600\tIN\tTXT\t\"a\\010 bc\"", + `t.example.com. IN TXT ""`: "t.example.com.\t3600\tIN\tTXT\t\"\"", + `t.example.com. IN TXT "a"`: "t.example.com.\t3600\tIN\tTXT\t\"a\"", + `t.example.com. IN TXT "aa"`: "t.example.com.\t3600\tIN\tTXT\t\"aa\"", + `t.example.com. IN TXT "aaa" ;`: "t.example.com.\t3600\tIN\tTXT\t\"aaa\"", + `t.example.com. IN TXT "abc" "DEF"`: "t.example.com.\t3600\tIN\tTXT\t\"abc\" \"DEF\"", + `t.example.com. IN TXT "abc" ( "DEF" )`: "t.example.com.\t3600\tIN\tTXT\t\"abc\" \"DEF\"", + `t.example.com. IN TXT aaa ;`: "t.example.com.\t3600\tIN\tTXT\t\"aaa\"", + `t.example.com. IN TXT aaa aaa;`: "t.example.com.\t3600\tIN\tTXT\t\"aaa\" \"aaa\"", + `t.example.com. IN TXT aaa aaa`: "t.example.com.\t3600\tIN\tTXT\t\"aaa\" \"aaa\"", + `t.example.com. IN TXT aaa`: "t.example.com.\t3600\tIN\tTXT\t\"aaa\"", + "cid.urn.arpa. NAPTR 100 50 \"s\" \"z3950+I2L+I2C\" \"\" _z3950._tcp.gatech.edu.": "cid.urn.arpa.\t3600\tIN\tNAPTR\t100 50 \"s\" \"z3950+I2L+I2C\" \"\" _z3950._tcp.gatech.edu.", + "cid.urn.arpa. NAPTR 100 50 \"s\" \"rcds+I2C\" \"\" _rcds._udp.gatech.edu.": "cid.urn.arpa.\t3600\tIN\tNAPTR\t100 50 \"s\" \"rcds+I2C\" \"\" _rcds._udp.gatech.edu.", + "cid.urn.arpa. NAPTR 100 50 \"s\" \"http+I2L+I2C+I2R\" \"\" _http._tcp.gatech.edu.": "cid.urn.arpa.\t3600\tIN\tNAPTR\t100 50 \"s\" \"http+I2L+I2C+I2R\" \"\" _http._tcp.gatech.edu.", + "cid.urn.arpa. NAPTR 100 10 \"\" \"\" \"/urn:cid:.+@([^\\.]+\\.)(.*)$/\\2/i\" .": "cid.urn.arpa.\t3600\tIN\tNAPTR\t100 10 \"\" \"\" \"/urn:cid:.+@([^\\.]+\\.)(.*)$/\\2/i\" .", + } + for i, o := range tests { + rr, err := NewRR(i) + if err != nil { + t.Error("failed to parse RR: ", err) + continue + } + if rr.String() != o { + t.Errorf("`%s' should be equal to\n`%s', but is\n`%s'", i, o, rr.String()) + } else { + t.Logf("RR is OK: `%s'", rr.String()) + } + } +} + +func TestParseClass(t *testing.T) { + tests := map[string]string{ + "t.example.com. IN A 127.0.0.1": "t.example.com. 3600 IN A 127.0.0.1", + "t.example.com. CS A 127.0.0.1": "t.example.com. 3600 CS A 127.0.0.1", + "t.example.com. CH A 127.0.0.1": "t.example.com. 3600 CH A 127.0.0.1", + // ClassANY can not occur in zone files + // "t.example.com. ANY A 127.0.0.1": "t.example.com. 3600 ANY A 127.0.0.1", + "t.example.com. NONE A 127.0.0.1": "t.example.com. 3600 NONE A 127.0.0.1", + } + for i, o := range tests { + rr, err := NewRR(i) + if err != nil { + t.Error("failed to parse RR: ", err) + continue + } + if rr.String() != o { + t.Errorf("`%s' should be equal to\n`%s', but is\n`%s'", i, o, rr.String()) + } else { + t.Logf("RR is OK: `%s'", rr.String()) + } + } +} + +func TestBrace(t *testing.T) { + tests := map[string]string{ + "(miek.nl.) 3600 IN A 127.0.1.1": "miek.nl.\t3600\tIN\tA\t127.0.1.1", + "miek.nl. (3600) IN MX (10) elektron.atoom.net.": "miek.nl.\t3600\tIN\tMX\t10 elektron.atoom.net.", + `miek.nl. IN ( + 3600 A 127.0.0.1)`: "miek.nl.\t3600\tIN\tA\t127.0.0.1", + "(miek.nl.) (A) (127.0.2.1)": "miek.nl.\t3600\tIN\tA\t127.0.2.1", + "miek.nl A 127.0.3.1": "miek.nl.\t3600\tIN\tA\t127.0.3.1", + "_ssh._tcp.local. 60 IN (PTR) stora._ssh._tcp.local.": "_ssh._tcp.local.\t60\tIN\tPTR\tstora._ssh._tcp.local.", + "miek.nl. NS ns.miek.nl": "miek.nl.\t3600\tIN\tNS\tns.miek.nl.", + `(miek.nl.) ( + (IN) + (AAAA) + (::1) )`: "miek.nl.\t3600\tIN\tAAAA\t::1", + `(miek.nl.) ( + (IN) + (AAAA) + (::1))`: "miek.nl.\t3600\tIN\tAAAA\t::1", + "miek.nl. IN AAAA ::2": "miek.nl.\t3600\tIN\tAAAA\t::2", + `((m)(i)ek.(n)l.) (SOA) (soa.) (soa.) ( + 2009032802 ; serial + 21600 ; refresh (6 hours) + 7(2)00 ; retry (2 hours) + 604()800 ; expire (1 week) + 3600 ; minimum (1 hour) + )`: "miek.nl.\t3600\tIN\tSOA\tsoa. soa. 2009032802 21600 7200 604800 3600", + "miek\\.nl. IN A 127.0.0.10": "miek\\.nl.\t3600\tIN\tA\t127.0.0.10", + "miek.nl. IN A 127.0.0.11": "miek.nl.\t3600\tIN\tA\t127.0.0.11", + "miek.nl. A 127.0.0.12": "miek.nl.\t3600\tIN\tA\t127.0.0.12", + `miek.nl. 86400 IN SOA elektron.atoom.net. miekg.atoom.net. ( + 2009032802 ; serial + 21600 ; refresh (6 hours) + 7200 ; retry (2 hours) + 604800 ; expire (1 week) + 3600 ; minimum (1 hour) + )`: "miek.nl.\t86400\tIN\tSOA\telektron.atoom.net. miekg.atoom.net. 2009032802 21600 7200 604800 3600", + } + for i, o := range tests { + rr, err := NewRR(i) + if err != nil { + t.Errorf("failed to parse RR: %v\n\t%s", err, i) + continue + } + if rr.String() != o { + t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) + } else { + t.Logf("RR is OK: `%s'", rr.String()) + } + } +} + +func TestParseFailure(t *testing.T) { + tests := []string{"miek.nl. IN A 327.0.0.1", + "miek.nl. IN AAAA ::x", + "miek.nl. IN MX a0 miek.nl.", + "miek.nl aap IN MX mx.miek.nl.", + "miek.nl 200 IN mxx 10 mx.miek.nl.", + "miek.nl. inn MX 10 mx.miek.nl.", + // "miek.nl. IN CNAME ", // actually valid nowadays, zero size rdata + "miek.nl. IN CNAME ..", + "miek.nl. PA MX 10 miek.nl.", + "miek.nl. ) IN MX 10 miek.nl.", + } + + for _, s := range tests { + _, err := NewRR(s) + if err == nil { + t.Errorf("should have triggered an error: \"%s\"", s) + } + } +} + +func TestZoneParsing(t *testing.T) { + // parse_test.db + db := ` +a.example.com. IN A 127.0.0.1 +8db7._openpgpkey.example.com. IN OPENPGPKEY mQCNAzIG +$ORIGIN a.example.com. +test IN A 127.0.0.1 + IN SSHFP 1 2 ( + BC6533CDC95A79078A39A56EA7635984ED655318ADA9 + B6159E30723665DA95BB ) +$ORIGIN b.example.com. +test IN CNAME test.a.example.com. +` + start := time.Now().UnixNano() + to := ParseZone(strings.NewReader(db), "", "parse_test.db") + var i int + for x := range to { + i++ + if x.Error != nil { + t.Error(x.Error) + continue + } + t.Log(x.RR) + } + delta := time.Now().UnixNano() - start + t.Logf("%d RRs parsed in %.2f s (%.2f RR/s)", i, float32(delta)/1e9, float32(i)/(float32(delta)/1e9)) +} + +func ExampleParseZone() { + zone := `$ORIGIN . +$TTL 3600 ; 1 hour +name IN SOA a6.nstld.com. hostmaster.nic.name. ( + 203362132 ; serial + 300 ; refresh (5 minutes) + 300 ; retry (5 minutes) + 1209600 ; expire (2 weeks) + 300 ; minimum (5 minutes) + ) +$TTL 10800 ; 3 hours +name. 10800 IN NS name. + IN NS g6.nstld.com. + 7200 NS h6.nstld.com. + 3600 IN NS j6.nstld.com. + IN 3600 NS k6.nstld.com. + NS l6.nstld.com. + NS a6.nstld.com. + NS c6.nstld.com. + NS d6.nstld.com. + NS f6.nstld.com. + NS m6.nstld.com. +( + NS m7.nstld.com. +) +$ORIGIN name. +0-0onlus NS ns7.ehiweb.it. + NS ns8.ehiweb.it. +0-g MX 10 mx01.nic + MX 10 mx02.nic + MX 10 mx03.nic + MX 10 mx04.nic +$ORIGIN 0-g.name +moutamassey NS ns01.yahoodomains.jp. + NS ns02.yahoodomains.jp. +` + to := ParseZone(strings.NewReader(zone), "", "testzone") + for x := range to { + fmt.Println(x.RR) + } + // Output: + // name. 3600 IN SOA a6.nstld.com. hostmaster.nic.name. 203362132 300 300 1209600 300 + // name. 10800 IN NS name. + // name. 10800 IN NS g6.nstld.com. + // name. 7200 IN NS h6.nstld.com. + // name. 3600 IN NS j6.nstld.com. + // name. 3600 IN NS k6.nstld.com. + // name. 10800 IN NS l6.nstld.com. + // name. 10800 IN NS a6.nstld.com. + // name. 10800 IN NS c6.nstld.com. + // name. 10800 IN NS d6.nstld.com. + // name. 10800 IN NS f6.nstld.com. + // name. 10800 IN NS m6.nstld.com. + // name. 10800 IN NS m7.nstld.com. + // 0-0onlus.name. 10800 IN NS ns7.ehiweb.it. + // 0-0onlus.name. 10800 IN NS ns8.ehiweb.it. + // 0-g.name. 10800 IN MX 10 mx01.nic.name. + // 0-g.name. 10800 IN MX 10 mx02.nic.name. + // 0-g.name. 10800 IN MX 10 mx03.nic.name. + // 0-g.name. 10800 IN MX 10 mx04.nic.name. + // moutamassey.0-g.name.name. 10800 IN NS ns01.yahoodomains.jp. + // moutamassey.0-g.name.name. 10800 IN NS ns02.yahoodomains.jp. +} + +func ExampleHIP() { + h := `www.example.com IN HIP ( 2 200100107B1A74DF365639CC39F1D578 + AwEAAbdxyhNuSutc5EMzxTs9LBPCIkOFH8cIvM4p +9+LrV4e19WzK00+CI6zBCQTdtWsuxKbWIy87UOoJTwkUs7lBu+Upr1gsNrut79ryra+bSRGQ +b1slImA8YVJyuIDsj7kwzG7jnERNqnWxZ48AWkskmdHaVDP4BcelrTI3rMXdXF5D + rvs.example.com. )` + if hip, err := NewRR(h); err == nil { + fmt.Println(hip.String()) + } + // Output: + // www.example.com. 3600 IN HIP 2 200100107B1A74DF365639CC39F1D578 AwEAAbdxyhNuSutc5EMzxTs9LBPCIkOFH8cIvM4p9+LrV4e19WzK00+CI6zBCQTdtWsuxKbWIy87UOoJTwkUs7lBu+Upr1gsNrut79ryra+bSRGQb1slImA8YVJyuIDsj7kwzG7jnERNqnWxZ48AWkskmdHaVDP4BcelrTI3rMXdXF5D rvs.example.com. +} + +func TestHIP(t *testing.T) { + h := `www.example.com. IN HIP ( 2 200100107B1A74DF365639CC39F1D578 + AwEAAbdxyhNuSutc5EMzxTs9LBPCIkOFH8cIvM4p +9+LrV4e19WzK00+CI6zBCQTdtWsuxKbWIy87UOoJTwkUs7lBu+Upr1gsNrut79ryra+bSRGQ +b1slImA8YVJyuIDsj7kwzG7jnERNqnWxZ48AWkskmdHaVDP4BcelrTI3rMXdXF5D + rvs1.example.com. + rvs2.example.com. )` + rr, err := NewRR(h) + if err != nil { + t.Fatalf("failed to parse RR: %v", err) + } + t.Logf("RR: %s", rr) + msg := new(Msg) + msg.Answer = []RR{rr, rr} + bytes, err := msg.Pack() + if err != nil { + t.Fatalf("failed to pack msg: %v", err) + } + if err := msg.Unpack(bytes); err != nil { + t.Fatalf("failed to unpack msg: %v", err) + } + if len(msg.Answer) != 2 { + t.Fatalf("2 answers expected: %v", msg) + } + for i, rr := range msg.Answer { + rr := rr.(*HIP) + t.Logf("RR: %s", rr) + if l := len(rr.RendezvousServers); l != 2 { + t.Fatalf("2 servers expected, only %d in record %d:\n%v", l, i, msg) + } + for j, s := range []string{"rvs1.example.com.", "rvs2.example.com."} { + if rr.RendezvousServers[j] != s { + t.Fatalf("expected server %d of record %d to be %s:\n%v", j, i, s, msg) + } + } + } +} + +func ExampleSOA() { + s := "example.com. 1000 SOA master.example.com. admin.example.com. 1 4294967294 4294967293 4294967295 100" + if soa, err := NewRR(s); err == nil { + fmt.Println(soa.String()) + } + // Output: + // example.com. 1000 IN SOA master.example.com. admin.example.com. 1 4294967294 4294967293 4294967295 100 +} + +func TestLineNumberError(t *testing.T) { + s := "example.com. 1000 SOA master.example.com. admin.example.com. monkey 4294967294 4294967293 4294967295 100" + if _, err := NewRR(s); err != nil { + if err.Error() != "dns: bad SOA zone parameter: \"monkey\" at line: 1:68" { + t.Error("not expecting this error: ", err) + } + } +} + +// Test with no known RR on the line +func TestLineNumberError2(t *testing.T) { + tests := map[string]string{ + "example.com. 1000 SO master.example.com. admin.example.com. 1 4294967294 4294967293 4294967295 100": "dns: expecting RR type or class, not this...: \"SO\" at line: 1:21", + "example.com 1000 IN TALINK a.example.com. b..example.com.": "dns: bad TALINK NextName: \"b..example.com.\" at line: 1:57", + "example.com 1000 IN TALINK ( a.example.com. b..example.com. )": "dns: bad TALINK NextName: \"b..example.com.\" at line: 1:60", + `example.com 1000 IN TALINK ( a.example.com. + bb..example.com. )`: "dns: bad TALINK NextName: \"bb..example.com.\" at line: 2:18", + // This is a bug, it should report an error on line 1, but the new is already processed. + `example.com 1000 IN TALINK ( a.example.com. b...example.com. + )`: "dns: bad TALINK NextName: \"b...example.com.\" at line: 2:1"} + + for in, errStr := range tests { + _, err := NewRR(in) + if err == nil { + t.Error("err is nil") + } else { + if err.Error() != errStr { + t.Errorf("%s: error should be %s is %v", in, errStr, err) + } + } + } +} + +// Test if the calculations are correct +func TestRfc1982(t *testing.T) { + // If the current time and the timestamp are more than 68 years apart + // it means the date has wrapped. 0 is 1970 + + // fall in the current 68 year span + strtests := []string{"20120525134203", "19700101000000", "20380119031408"} + for _, v := range strtests { + if x, _ := StringToTime(v); v != TimeToString(x) { + t.Errorf("1982 arithmetic string failure %s (%s:%d)", v, TimeToString(x), x) + } + } + + inttests := map[uint32]string{0: "19700101000000", + 1 << 31: "20380119031408", + 1<<32 - 1: "21060207062815", + } + for i, v := range inttests { + if TimeToString(i) != v { + t.Errorf("1982 arithmetic int failure %d:%s (%s)", i, v, TimeToString(i)) + } + } + + // Future tests, these dates get parsed to a date within the current 136 year span + future := map[string]string{"22680119031408": "20631123173144", + "19010101121212": "20370206184028", + "19210101121212": "20570206184028", + "19500101121212": "20860206184028", + "19700101000000": "19700101000000", + "19690101000000": "21050207062816", + "29210101121212": "21040522212236", + } + for from, to := range future { + x, _ := StringToTime(from) + y := TimeToString(x) + if y != to { + t.Errorf("1982 arithmetic future failure %s:%s (%s)", from, to, y) + } + } +} + +func TestEmpty(t *testing.T) { + for range ParseZone(strings.NewReader(""), "", "") { + t.Errorf("should be empty") + } +} + +func TestLowercaseTokens(t *testing.T) { + var testrecords = []string{ + "example.org. 300 IN a 1.2.3.4", + "example.org. 300 in A 1.2.3.4", + "example.org. 300 in a 1.2.3.4", + "example.org. 300 a 1.2.3.4", + "example.org. 300 A 1.2.3.4", + "example.org. IN a 1.2.3.4", + "example.org. in A 1.2.3.4", + "example.org. in a 1.2.3.4", + "example.org. a 1.2.3.4", + "example.org. A 1.2.3.4", + "example.org. a 1.2.3.4", + "$ORIGIN example.org.\n a 1.2.3.4", + "$Origin example.org.\n a 1.2.3.4", + "$origin example.org.\n a 1.2.3.4", + "example.org. Class1 Type1 1.2.3.4", + } + for _, testrr := range testrecords { + _, err := NewRR(testrr) + if err != nil { + t.Errorf("failed to parse %#v, got %v", testrr, err) + } + } +} + +func ExampleParseZone_generate() { + // From the manual: http://www.bind9.net/manual/bind/9.3.2/Bv9ARM.ch06.html#id2566761 + zone := "$GENERATE 1-2 0 NS SERVER$.EXAMPLE.\n$GENERATE 1-8 $ CNAME $.0" + to := ParseZone(strings.NewReader(zone), "0.0.192.IN-ADDR.ARPA.", "") + for x := range to { + if x.Error == nil { + fmt.Println(x.RR.String()) + } + } + // Output: + // 0.0.0.192.IN-ADDR.ARPA. 3600 IN NS SERVER1.EXAMPLE. + // 0.0.0.192.IN-ADDR.ARPA. 3600 IN NS SERVER2.EXAMPLE. + // 1.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 1.0.0.0.192.IN-ADDR.ARPA. + // 2.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 2.0.0.0.192.IN-ADDR.ARPA. + // 3.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 3.0.0.0.192.IN-ADDR.ARPA. + // 4.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 4.0.0.0.192.IN-ADDR.ARPA. + // 5.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 5.0.0.0.192.IN-ADDR.ARPA. + // 6.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 6.0.0.0.192.IN-ADDR.ARPA. + // 7.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 7.0.0.0.192.IN-ADDR.ARPA. + // 8.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 8.0.0.0.192.IN-ADDR.ARPA. +} + +func TestSRVPacking(t *testing.T) { + msg := Msg{} + + things := []string{"1.2.3.4:8484", + "45.45.45.45:8484", + "84.84.84.84:8484", + } + + for i, n := range things { + h, p, err := net.SplitHostPort(n) + if err != nil { + continue + } + port, _ := strconv.ParseUint(p, 10, 16) + + rr := &SRV{ + Hdr: RR_Header{Name: "somename.", + Rrtype: TypeSRV, + Class: ClassINET, + Ttl: 5}, + Priority: uint16(i), + Weight: 5, + Port: uint16(port), + Target: h + ".", + } + + msg.Answer = append(msg.Answer, rr) + } + + _, err := msg.Pack() + if err != nil { + t.Fatalf("couldn't pack %v: %v", msg, err) + } +} + +func TestParseBackslash(t *testing.T) { + if r, err := NewRR("nul\\000gap.test.globnix.net. 600 IN A 192.0.2.10"); err != nil { + t.Errorf("could not create RR with \\000 in it") + } else { + t.Logf("parsed %s", r.String()) + } + if r, err := NewRR(`nul\000gap.test.globnix.net. 600 IN TXT "Hello\123"`); err != nil { + t.Errorf("could not create RR with \\000 in it") + } else { + t.Logf("parsed %s", r.String()) + } + if r, err := NewRR(`m\ @\ iek.nl. IN 3600 A 127.0.0.1`); err != nil { + t.Errorf("could not create RR with \\ and \\@ in it") + } else { + t.Logf("parsed %s", r.String()) + } +} + +func TestILNP(t *testing.T) { + tests := []string{ + "host1.example.com.\t3600\tIN\tNID\t10 0014:4fff:ff20:ee64", + "host1.example.com.\t3600\tIN\tNID\t20 0015:5fff:ff21:ee65", + "host2.example.com.\t3600\tIN\tNID\t10 0016:6fff:ff22:ee66", + "host1.example.com.\t3600\tIN\tL32\t10 10.1.2.0", + "host1.example.com.\t3600\tIN\tL32\t20 10.1.4.0", + "host2.example.com.\t3600\tIN\tL32\t10 10.1.8.0", + "host1.example.com.\t3600\tIN\tL64\t10 2001:0DB8:1140:1000", + "host1.example.com.\t3600\tIN\tL64\t20 2001:0DB8:2140:2000", + "host2.example.com.\t3600\tIN\tL64\t10 2001:0DB8:4140:4000", + "host1.example.com.\t3600\tIN\tLP\t10 l64-subnet1.example.com.", + "host1.example.com.\t3600\tIN\tLP\t10 l64-subnet2.example.com.", + "host1.example.com.\t3600\tIN\tLP\t20 l32-subnet1.example.com.", + } + for _, t1 := range tests { + r, err := NewRR(t1) + if err != nil { + t.Fatalf("an error occurred: %v", err) + } else { + if t1 != r.String() { + t.Fatalf("strings should be equal %s %s", t1, r.String()) + } + } + } +} + +func TestGposEidNimloc(t *testing.T) { + dt := map[string]string{ + "444433332222111199990123000000ff. NSAP-PTR foo.bar.com.": "444433332222111199990123000000ff.\t3600\tIN\tNSAP-PTR\tfoo.bar.com.", + "lillee. IN GPOS -32.6882 116.8652 10.0": "lillee.\t3600\tIN\tGPOS\t-32.6882 116.8652 10.0", + "hinault. IN GPOS -22.6882 116.8652 250.0": "hinault.\t3600\tIN\tGPOS\t-22.6882 116.8652 250.0", + "VENERA. IN NIMLOC 75234159EAC457800920": "VENERA.\t3600\tIN\tNIMLOC\t75234159EAC457800920", + "VAXA. IN EID 3141592653589793": "VAXA.\t3600\tIN\tEID\t3141592653589793", + } + for i, o := range dt { + rr, err := NewRR(i) + if err != nil { + t.Error("failed to parse RR: ", err) + continue + } + if rr.String() != o { + t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) + } else { + t.Logf("RR is OK: `%s'", rr.String()) + } + } +} + +func TestPX(t *testing.T) { + dt := map[string]string{ + "*.net2.it. IN PX 10 net2.it. PRMD-net2.ADMD-p400.C-it.": "*.net2.it.\t3600\tIN\tPX\t10 net2.it. PRMD-net2.ADMD-p400.C-it.", + "ab.net2.it. IN PX 10 ab.net2.it. O-ab.PRMD-net2.ADMDb.C-it.": "ab.net2.it.\t3600\tIN\tPX\t10 ab.net2.it. O-ab.PRMD-net2.ADMDb.C-it.", + } + for i, o := range dt { + rr, err := NewRR(i) + if err != nil { + t.Error("failed to parse RR: ", err) + continue + } + if rr.String() != o { + t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) + } else { + t.Logf("RR is OK: `%s'", rr.String()) + } + } +} + +func TestComment(t *testing.T) { + // Comments we must see + comments := map[string]bool{"; this is comment 1": true, + "; this is comment 4": true, "; this is comment 6": true, + "; this is comment 7": true, "; this is comment 8": true} + zone := ` +foo. IN A 10.0.0.1 ; this is comment 1 +foo. IN A ( + 10.0.0.2 ; this is comment2 +) +; this is comment3 +foo. IN A 10.0.0.3 +foo. IN A ( 10.0.0.4 ); this is comment 4 + +foo. IN A 10.0.0.5 +; this is comment5 + +foo. IN A 10.0.0.6 + +foo. IN DNSKEY 256 3 5 AwEAAb+8l ; this is comment 6 +foo. IN NSEC miek.nl. TXT RRSIG NSEC; this is comment 7 +foo. IN TXT "THIS IS TEXT MAN"; this is comment 8 +` + for x := range ParseZone(strings.NewReader(zone), ".", "") { + if x.Error == nil { + if x.Comment != "" { + if _, ok := comments[x.Comment]; !ok { + t.Errorf("wrong comment %s", x.Comment) + } + } + } + } +} + +func TestEUIxx(t *testing.T) { + tests := map[string]string{ + "host.example. IN EUI48 00-00-5e-90-01-2a": "host.example.\t3600\tIN\tEUI48\t00-00-5e-90-01-2a", + "host.example. IN EUI64 00-00-5e-ef-00-00-00-2a": "host.example.\t3600\tIN\tEUI64\t00-00-5e-ef-00-00-00-2a", + } + for i, o := range tests { + r, err := NewRR(i) + if err != nil { + t.Errorf("failed to parse %s: %v", i, err) + } + if r.String() != o { + t.Errorf("want %s, got %s", o, r.String()) + } + } +} + +func TestUserRR(t *testing.T) { + tests := map[string]string{ + "host.example. IN UID 1234": "host.example.\t3600\tIN\tUID\t1234", + "host.example. IN GID 1234556": "host.example.\t3600\tIN\tGID\t1234556", + "host.example. IN UINFO \"Miek Gieben\"": "host.example.\t3600\tIN\tUINFO\t\"Miek Gieben\"", + } + for i, o := range tests { + r, err := NewRR(i) + if err != nil { + t.Errorf("failed to parse %s: %v", i, err) + } + if r.String() != o { + t.Errorf("want %s, got %s", o, r.String()) + } + } +} + +func TestTXT(t *testing.T) { + // Test single entry TXT record + rr, err := NewRR(`_raop._tcp.local. 60 IN TXT "single value"`) + if err != nil { + t.Error("failed to parse single value TXT record", err) + } else if rr, ok := rr.(*TXT); !ok { + t.Error("wrong type, record should be of type TXT") + } else { + if len(rr.Txt) != 1 { + t.Error("bad size of TXT value:", len(rr.Txt)) + } else if rr.Txt[0] != "single value" { + t.Error("bad single value") + } + if rr.String() != `_raop._tcp.local. 60 IN TXT "single value"` { + t.Error("bad representation of TXT record:", rr.String()) + } + if rr.len() != 28+1+12 { + t.Error("bad size of serialized record:", rr.len()) + } + } + + // Test multi entries TXT record + rr, err = NewRR(`_raop._tcp.local. 60 IN TXT "a=1" "b=2" "c=3" "d=4"`) + if err != nil { + t.Error("failed to parse multi-values TXT record", err) + } else if rr, ok := rr.(*TXT); !ok { + t.Error("wrong type, record should be of type TXT") + } else { + if len(rr.Txt) != 4 { + t.Error("bad size of TXT multi-value:", len(rr.Txt)) + } else if rr.Txt[0] != "a=1" || rr.Txt[1] != "b=2" || rr.Txt[2] != "c=3" || rr.Txt[3] != "d=4" { + t.Error("bad values in TXT records") + } + if rr.String() != `_raop._tcp.local. 60 IN TXT "a=1" "b=2" "c=3" "d=4"` { + t.Error("bad representation of TXT multi value record:", rr.String()) + } + if rr.len() != 28+1+3+1+3+1+3+1+3 { + t.Error("bad size of serialized multi value record:", rr.len()) + } + } + + // Test empty-string in TXT record + rr, err = NewRR(`_raop._tcp.local. 60 IN TXT ""`) + if err != nil { + t.Error("failed to parse empty-string TXT record", err) + } else if rr, ok := rr.(*TXT); !ok { + t.Error("wrong type, record should be of type TXT") + } else { + if len(rr.Txt) != 1 { + t.Error("bad size of TXT empty-string value:", len(rr.Txt)) + } else if rr.Txt[0] != "" { + t.Error("bad value for empty-string TXT record") + } + if rr.String() != `_raop._tcp.local. 60 IN TXT ""` { + t.Error("bad representation of empty-string TXT record:", rr.String()) + } + if rr.len() != 28+1 { + t.Error("bad size of serialized record:", rr.len()) + } + } + + // Test TXT record with chunk larger than 255 bytes, they should be split up, by the parser + s := "" + for i := 0; i < 255; i++ { + s += "a" + } + s += "b" + rr, err = NewRR(`test.local. 60 IN TXT "` + s + `"`) + if err != nil { + t.Error("failed to parse empty-string TXT record", err) + } + if rr.(*TXT).Txt[1] != "b" { + t.Errorf("Txt should have two chunk, last one my be 'b', but is %s", rr.(*TXT).Txt[1]) + } + t.Log(rr.String()) +} + +func TestTypeXXXX(t *testing.T) { + _, err := NewRR("example.com IN TYPE1234 \\# 4 aabbccdd") + if err != nil { + t.Errorf("failed to parse TYPE1234 RR: %v", err) + } + _, err = NewRR("example.com IN TYPE655341 \\# 8 aabbccddaabbccdd") + if err == nil { + t.Errorf("this should not work, for TYPE655341") + } + _, err = NewRR("example.com IN TYPE1 \\# 4 0a000001") + if err == nil { + t.Errorf("this should not work") + } +} + +func TestPTR(t *testing.T) { + _, err := NewRR("144.2.0.192.in-addr.arpa. 900 IN PTR ilouse03146p0\\(.example.com.") + if err != nil { + t.Error("failed to parse ", err) + } +} + +func TestDigit(t *testing.T) { + tests := map[string]byte{ + "miek\\000.nl. 100 IN TXT \"A\"": 0, + "miek\\001.nl. 100 IN TXT \"A\"": 1, + "miek\\254.nl. 100 IN TXT \"A\"": 254, + "miek\\255.nl. 100 IN TXT \"A\"": 255, + "miek\\256.nl. 100 IN TXT \"A\"": 0, + "miek\\257.nl. 100 IN TXT \"A\"": 1, + "miek\\004.nl. 100 IN TXT \"A\"": 4, + } + for s, i := range tests { + r, err := NewRR(s) + buf := make([]byte, 40) + if err != nil { + t.Fatalf("failed to parse %v", err) + } + PackRR(r, buf, 0, nil, false) + t.Log(buf) + if buf[5] != i { + t.Fatalf("5 pos must be %d, is %d", i, buf[5]) + } + r1, _, _ := UnpackRR(buf, 0) + if r1.Header().Ttl != 100 { + t.Fatalf("TTL should %d, is %d", 100, r1.Header().Ttl) + } + } +} + +func TestParseRRSIGTimestamp(t *testing.T) { + tests := map[string]bool{ + `miek.nl. IN RRSIG SOA 8 2 43200 20140210031301 20140111031301 12051 miek.nl. MVZUyrYwq0iZhMFDDnVXD2BvuNiUJjSYlJAgzyAE6CF875BMvvZa+Sb0 RlSCL7WODQSQHhCx/fegHhVVF+Iz8N8kOLrmXD1+jO3Bm6Prl5UhcsPx WTBsg/kmxbp8sR1kvH4oZJtVfakG3iDerrxNaf0sQwhZzyfJQAqpC7pcBoc=`: true, + `miek.nl. IN RRSIG SOA 8 2 43200 315565800 4102477800 12051 miek.nl. MVZUyrYwq0iZhMFDDnVXD2BvuNiUJjSYlJAgzyAE6CF875BMvvZa+Sb0 RlSCL7WODQSQHhCx/fegHhVVF+Iz8N8kOLrmXD1+jO3Bm6Prl5UhcsPx WTBsg/kmxbp8sR1kvH4oZJtVfakG3iDerrxNaf0sQwhZzyfJQAqpC7pcBoc=`: true, + } + for r := range tests { + _, err := NewRR(r) + if err != nil { + t.Error(err) + } + } +} + +func TestTxtEqual(t *testing.T) { + rr1 := new(TXT) + rr1.Hdr = RR_Header{Name: ".", Rrtype: TypeTXT, Class: ClassINET, Ttl: 0} + rr1.Txt = []string{"a\"a", "\"", "b"} + rr2, _ := NewRR(rr1.String()) + if rr1.String() != rr2.String() { + // This is not an error, but keep this test. + t.Errorf("these two TXT records should match:\n%s\n%s", rr1.String(), rr2.String()) + } + t.Logf("%s\n%s", rr1.String(), rr2.String()) +} + +func TestTxtLong(t *testing.T) { + rr1 := new(TXT) + rr1.Hdr = RR_Header{Name: ".", Rrtype: TypeTXT, Class: ClassINET, Ttl: 0} + // Make a long txt record, this breaks when sending the packet, + // but not earlier. + rr1.Txt = []string{"start-"} + for i := 0; i < 200; i++ { + rr1.Txt[0] += "start-" + } + str := rr1.String() + if len(str) < len(rr1.Txt[0]) { + t.Error("string conversion should work") + } +} + +// Basically, don't crash. +func TestMalformedPackets(t *testing.T) { + var packets = []string{ + "0021641c0000000100000000000078787878787878787878787303636f6d0000100001", + } + + // com = 63 6f 6d + for _, packet := range packets { + data, _ := hex.DecodeString(packet) + // for _, v := range data { + // t.Log(v) + // } + var msg Msg + msg.Unpack(data) + // println(msg.String()) + } +} + +type algorithm struct { + name uint8 + bits int +} + +func TestNewPrivateKey(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + algorithms := []algorithm{ + {ECDSAP256SHA256, 256}, + {ECDSAP384SHA384, 384}, + {RSASHA1, 1024}, + {RSASHA256, 2048}, + {DSA, 1024}, + } + + for _, algo := range algorithms { + key := new(DNSKEY) + key.Hdr.Rrtype = TypeDNSKEY + key.Hdr.Name = "miek.nl." + key.Hdr.Class = ClassINET + key.Hdr.Ttl = 14400 + key.Flags = 256 + key.Protocol = 3 + key.Algorithm = algo.name + privkey, err := key.Generate(algo.bits) + if err != nil { + t.Fatal(err) + } + + newPrivKey, err := key.NewPrivateKey(key.PrivateKeyString(privkey)) + if err != nil { + t.Error(key.String()) + t.Error(key.PrivateKeyString(privkey)) + t.Fatal(err) + } + + switch newPrivKey := newPrivKey.(type) { + case *rsa.PrivateKey: + newPrivKey.Precompute() + } + + if !reflect.DeepEqual(privkey, newPrivKey) { + t.Errorf("[%v] Private keys differ:\n%#v\n%#v", AlgorithmToString[algo.name], privkey, newPrivKey) + } + } +} + +// special input test +func TestNewRRSpecial(t *testing.T) { + var ( + rr RR + err error + expect string + ) + + rr, err = NewRR("; comment") + expect = "" + if err != nil { + t.Errorf("unexpected err: %v", err) + } + if rr != nil { + t.Errorf("unexpected result: [%s] != [%s]", rr, expect) + } + + rr, err = NewRR("") + expect = "" + if err != nil { + t.Errorf("unexpected err: %v", err) + } + if rr != nil { + t.Errorf("unexpected result: [%s] != [%s]", rr, expect) + } + + rr, err = NewRR("$ORIGIN foo.") + expect = "" + if err != nil { + t.Errorf("unexpected err: %v", err) + } + if rr != nil { + t.Errorf("unexpected result: [%s] != [%s]", rr, expect) + } + + rr, err = NewRR(" ") + expect = "" + if err != nil { + t.Errorf("unexpected err: %v", err) + } + if rr != nil { + t.Errorf("unexpected result: [%s] != [%s]", rr, expect) + } + + rr, err = NewRR("\n") + expect = "" + if err != nil { + t.Errorf("unexpected err: %v", err) + } + if rr != nil { + t.Errorf("unexpected result: [%s] != [%s]", rr, expect) + } + + rr, err = NewRR("foo. A 1.1.1.1\nbar. A 2.2.2.2") + expect = "foo.\t3600\tIN\tA\t1.1.1.1" + if err != nil { + t.Errorf("unexpected err: %v", err) + } + if rr == nil || rr.String() != expect { + t.Errorf("unexpected result: [%s] != [%s]", rr, expect) + } +} + +func TestPrintfVerbsRdata(t *testing.T) { + x, _ := NewRR("www.miek.nl. IN MX 20 mx.miek.nl.") + if Field(x, 1) != "20" { + t.Errorf("should be 20") + } + if Field(x, 2) != "mx.miek.nl." { + t.Errorf("should be mx.miek.nl.") + } + + x, _ = NewRR("www.miek.nl. IN A 127.0.0.1") + if Field(x, 1) != "127.0.0.1" { + t.Errorf("should be 127.0.0.1") + } + + x, _ = NewRR("www.miek.nl. IN AAAA ::1") + if Field(x, 1) != "::1" { + t.Errorf("should be ::1") + } + + x, _ = NewRR("www.miek.nl. IN NSEC a.miek.nl. A NS SOA MX AAAA") + if Field(x, 1) != "a.miek.nl." { + t.Errorf("should be a.miek.nl.") + } + if Field(x, 2) != "A NS SOA MX AAAA" { + t.Errorf("should be A NS SOA MX AAAA") + } + + x, _ = NewRR("www.miek.nl. IN TXT \"first\" \"second\"") + if Field(x, 1) != "first second" { + t.Errorf("should be first second") + } + if Field(x, 0) != "" { + t.Errorf("should be empty") + } +} + +func TestParseTokenOverflow(t *testing.T) { + _, err := NewRR("_443._tcp.example.org. IN TLSA 0 0 0 308205e8308204d0a00302010202100411de8f53b462f6a5a861b712ec6b59300d06092a864886f70d01010b05003070310b300906035504061302555331153013060355040a130c446967694365727420496e6331193017060355040b13107777772e64696769636572742e636f6d312f302d06035504031326446967694365727420534841322048696768204173737572616e636520536572766572204341301e170d3134313130363030303030305a170d3135313131333132303030305a3081a5310b3009060355040613025553311330110603550408130a43616c69666f726e6961311430120603550407130b4c6f7320416e67656c6573313c303a060355040a1333496e7465726e657420436f72706f726174696f6e20666f722041737369676e6564204e616d657320616e64204e756d6265727331133011060355040b130a546563686e6f6c6f6779311830160603550403130f7777772e6578616d706c652e6f726730820122300d06092a864886f70d01010105000382010f003082010a02820101009e663f52a3d18cb67cdfed547408a4e47e4036538988da2798da3b6655f7240d693ed1cb3fe6d6ad3a9e657ff6efa86b83b0cad24e5d31ff2bf70ec3b78b213f1b4bf61bdc669cbbc07d67154128ca92a9b3cbb4213a836fb823ddd4d7cc04918314d25f06086fa9970ba17e357cca9b458c27eb71760ab95e3f9bc898ae89050ae4d09ba2f7e4259d9ff1e072a6971b18355a8b9e53670c3d5dbdbd283f93a764e71b3a4140ca0746090c08510e2e21078d7d07844bf9c03865b531a0bf2ee766bc401f6451c5a1e6f6fb5d5c1d6a97a0abe91ae8b02e89241e07353909ccd5b41c46de207c06801e08f20713603827f2ae3e68cf15ef881d7e0608f70742e30203010001a382024630820242301f0603551d230418301680145168ff90af0207753cccd9656462a212b859723b301d0603551d0e04160414b000a7f422e9b1ce216117c4c46e7164c8e60c553081810603551d11047a3078820f7777772e6578616d706c652e6f7267820b6578616d706c652e636f6d820b6578616d706c652e656475820b6578616d706c652e6e6574820b6578616d706c652e6f7267820f7777772e6578616d706c652e636f6d820f7777772e6578616d706c652e656475820f7777772e6578616d706c652e6e6574300e0603551d0f0101ff0404030205a0301d0603551d250416301406082b0601050507030106082b0601050507030230750603551d1f046e306c3034a032a030862e687474703a2f2f63726c332e64696769636572742e636f6d2f736861322d68612d7365727665722d67332e63726c3034a032a030862e687474703a2f2f63726c342e64696769636572742e636f6d2f736861322d68612d7365727665722d67332e63726c30420603551d20043b3039303706096086480186fd6c0101302a302806082b06010505070201161c68747470733a2f2f7777772e64696769636572742e636f6d2f43505330818306082b0601050507010104773075302406082b060105050730018618687474703a2f2f6f6373702e64696769636572742e636f6d304d06082b060105050730028641687474703a2f2f636163657274732e64696769636572742e636f6d2f446967694365727453484132486967684173737572616e636553657276657243412e637274300c0603551d130101ff04023000300d06092a864886f70d01010b050003820101005eac2124dedb3978a86ff3608406acb542d3cb54cb83facd63aec88144d6a1bf15dbf1f215c4a73e241e582365cba9ea50dd306541653b3513af1a0756c1b2720e8d112b34fb67181efad9c4609bdc670fb025fa6e6d42188161b026cf3089a08369c2f3609fc84bcc3479140c1922ede430ca8dbac2b2a3cdacb305ba15dc7361c4c3a5e6daa99cb446cb221b28078a7a944efba70d96f31ac143d959bccd2fd50e30c325ea2624fb6b6dbe9344dbcf133bfbd5b4e892d635dbf31596451672c6b65ba5ac9b3cddea92b35dab1065cae3c8cb6bb450a62ea2f72ea7c6bdc7b65fa09b012392543734083c7687d243f8d0375304d99ccd2e148966a8637a6797") + if err == nil { + t.Fatalf("token overflow should return an error") + } + t.Logf("err: %s\n", err) +} + +func TestParseTLSA(t *testing.T) { + lt := []string{ + "_443._tcp.example.org.\t3600\tIN\tTLSA\t1 1 1 c22be239f483c08957bc106219cc2d3ac1a308dfbbdd0a365f17b9351234cf00", + "_443._tcp.example.org.\t3600\tIN\tTLSA\t2 1 2 4e85f45179e9cd6e0e68e2eb5be2e85ec9b92d91c609caf3ef0315213e3f92ece92c38397a607214de95c7fadc0ad0f1c604a469a0387959745032c0d51492f3", + "_443._tcp.example.org.\t3600\tIN\tTLSA\t3 0 2 69ec8d2277360b215d0cd956b0e2747108dff34b27d461a41c800629e38ee6c2d1230cc9e8e36711330adc6766e6ff7c5fbb37f106f248337c1a20ad682888d2", + } + for _, o := range lt { + rr, err := NewRR(o) + if err != nil { + t.Error("failed to parse RR: ", err) + continue + } + if rr.String() != o { + t.Errorf("`%s' should be equal to\n`%s', but is `%s'", o, o, rr.String()) + } else { + t.Logf("RR is OK: `%s'", rr.String()) + } + } +} + +func TestParseSMIMEA(t *testing.T) { + lt := map[string]string{ + "2e85e1db3e62be6ea._smimecert.example.com.\t3600\tIN\tSMIMEA\t1 1 2 bd80f334566928fc18f58df7e4928c1886f48f71ca3fd41cd9b1854aca7c2180aaacad2819612ed68e7bd3701cc39be7f2529b017c0bc6a53e8fb3f0c7d48070": "2e85e1db3e62be6ea._smimecert.example.com.\t3600\tIN\tSMIMEA\t1 1 2 bd80f334566928fc18f58df7e4928c1886f48f71ca3fd41cd9b1854aca7c2180aaacad2819612ed68e7bd3701cc39be7f2529b017c0bc6a53e8fb3f0c7d48070", + "2e85e1db3e62be6ea._smimecert.example.com.\t3600\tIN\tSMIMEA\t0 0 1 cdcf0fc66b182928c5217ddd42c826983f5a4b94160ee6c1c9be62d38199f710": "2e85e1db3e62be6ea._smimecert.example.com.\t3600\tIN\tSMIMEA\t0 0 1 cdcf0fc66b182928c5217ddd42c826983f5a4b94160ee6c1c9be62d38199f710", + "2e85e1db3e62be6ea._smimecert.example.com.\t3600\tIN\tSMIMEA\t3 0 2 499a1eda2af8828b552cdb9d80c3744a25872fddd73f3898d8e4afa3549595d2dd4340126e759566fe8c26b251fa0c887ba4869f011a65f7e79967c2eb729f5b": "2e85e1db3e62be6ea._smimecert.example.com.\t3600\tIN\tSMIMEA\t3 0 2 499a1eda2af8828b552cdb9d80c3744a25872fddd73f3898d8e4afa3549595d2dd4340126e759566fe8c26b251fa0c887ba4869f011a65f7e79967c2eb729f5b", + "2e85e1db3e62be6eb._smimecert.example.com.\t3600\tIN\tSMIMEA\t3 0 2 499a1eda2af8828b552cdb9d80c3744a25872fddd73f3898d8e4afa3549595d2dd4340126e759566fe8 c26b251fa0c887ba4869f01 1a65f7e79967c2eb729f5b": "2e85e1db3e62be6eb._smimecert.example.com.\t3600\tIN\tSMIMEA\t3 0 2 499a1eda2af8828b552cdb9d80c3744a25872fddd73f3898d8e4afa3549595d2dd4340126e759566fe8c26b251fa0c887ba4869f011a65f7e79967c2eb729f5b", + } + for i, o := range lt { + rr, err := NewRR(i) + if err != nil { + t.Error("failed to parse RR: ", err) + continue + } + if rr.String() != o { + t.Errorf("`%s' should be equal to\n`%s', but is `%s'", o, o, rr.String()) + } else { + t.Logf("RR is OK: `%s'", rr.String()) + } + } +} + +func TestParseSSHFP(t *testing.T) { + lt := []string{ + "test.example.org.\t300\tSSHFP\t1 2 (\n" + + "\t\t\t\t\tBC6533CDC95A79078A39A56EA7635984ED655318ADA9\n" + + "\t\t\t\t\tB6159E30723665DA95BB )", + "test.example.org.\t300\tSSHFP\t1 2 ( BC6533CDC 95A79078A39A56EA7635984ED655318AD A9B6159E3072366 5DA95BB )", + } + result := "test.example.org.\t300\tIN\tSSHFP\t1 2 BC6533CDC95A79078A39A56EA7635984ED655318ADA9B6159E30723665DA95BB" + for _, o := range lt { + rr, err := NewRR(o) + if err != nil { + t.Error("failed to parse RR: ", err) + continue + } + if rr.String() != result { + t.Errorf("`%s' should be equal to\n\n`%s', but is \n`%s'", o, result, rr.String()) + } else { + t.Logf("RR is OK: `%s'", rr.String()) + } + } +} + +func TestParseHINFO(t *testing.T) { + dt := map[string]string{ + "example.net. HINFO A B": "example.net. 3600 IN HINFO \"A\" \"B\"", + "example.net. HINFO \"A\" \"B\"": "example.net. 3600 IN HINFO \"A\" \"B\"", + "example.net. HINFO A B C D E F": "example.net. 3600 IN HINFO \"A\" \"B C D E F\"", + "example.net. HINFO AB": "example.net. 3600 IN HINFO \"AB\" \"\"", + // "example.net. HINFO PC-Intel-700mhz \"Redhat Linux 7.1\"": "example.net. 3600 IN HINFO \"PC-Intel-700mhz\" \"Redhat Linux 7.1\"", + // This one is recommended in Pro Bind book http://www.zytrax.com/books/dns/ch8/hinfo.html + // but effectively, even Bind would replace it to correctly formed text when you AXFR + // TODO: remove this set of comments or figure support for quoted/unquoted combinations in endingToTxtSlice function + } + for i, o := range dt { + rr, err := NewRR(i) + if err != nil { + t.Error("failed to parse RR: ", err) + continue + } + if rr.String() != o { + t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) + } else { + t.Logf("RR is OK: `%s'", rr.String()) + } + } +} + +func TestParseCAA(t *testing.T) { + lt := map[string]string{ + "example.net. CAA 0 issue \"symantec.com\"": "example.net.\t3600\tIN\tCAA\t0 issue \"symantec.com\"", + "example.net. CAA 0 issuewild \"symantec.com; stuff\"": "example.net.\t3600\tIN\tCAA\t0 issuewild \"symantec.com; stuff\"", + "example.net. CAA 128 tbs \"critical\"": "example.net.\t3600\tIN\tCAA\t128 tbs \"critical\"", + "example.net. CAA 2 auth \"0>09\\006\\010+\\006\\001\\004\\001\\214y\\002\\003\\001\\006\\009`\\134H\\001e\\003\\004\\002\\001\\004 y\\209\\012\\221r\\220\\156Q\\218\\150\\150{\\166\\245:\\231\\182%\\157:\\133\\179}\\1923r\\238\\151\\255\\128q\\145\\002\\001\\000\"": "example.net.\t3600\tIN\tCAA\t2 auth \"0>09\\006\\010+\\006\\001\\004\\001\\214y\\002\\003\\001\\006\\009`\\134H\\001e\\003\\004\\002\\001\\004 y\\209\\012\\221r\\220\\156Q\\218\\150\\150{\\166\\245:\\231\\182%\\157:\\133\\179}\\1923r\\238\\151\\255\\128q\\145\\002\\001\\000\"", + "example.net. TYPE257 0 issue \"symantec.com\"": "example.net.\t3600\tIN\tCAA\t0 issue \"symantec.com\"", + } + for i, o := range lt { + rr, err := NewRR(i) + if err != nil { + t.Error("failed to parse RR: ", err) + continue + } + if rr.String() != o { + t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) + } else { + t.Logf("RR is OK: `%s'", rr.String()) + } + } +} + +func TestPackCAA(t *testing.T) { + m := new(Msg) + record := new(CAA) + record.Hdr = RR_Header{Name: "example.com.", Rrtype: TypeCAA, Class: ClassINET, Ttl: 0} + record.Tag = "issue" + record.Value = "symantec.com" + record.Flag = 1 + + m.Answer = append(m.Answer, record) + bytes, err := m.Pack() + if err != nil { + t.Fatalf("failed to pack msg: %v", err) + } + if err := m.Unpack(bytes); err != nil { + t.Fatalf("failed to unpack msg: %v", err) + } + if len(m.Answer) != 1 { + t.Fatalf("incorrect number of answers unpacked") + } + rr := m.Answer[0].(*CAA) + if rr.Tag != "issue" { + t.Fatalf("invalid tag for unpacked answer") + } else if rr.Value != "symantec.com" { + t.Fatalf("invalid value for unpacked answer") + } else if rr.Flag != 1 { + t.Fatalf("invalid flag for unpacked answer") + } +} + +func TestParseURI(t *testing.T) { + lt := map[string]string{ + "_http._tcp. IN URI 10 1 \"http://www.example.com/path\"": "_http._tcp.\t3600\tIN\tURI\t10 1 \"http://www.example.com/path\"", + "_http._tcp. IN URI 10 1 \"\"": "_http._tcp.\t3600\tIN\tURI\t10 1 \"\"", + } + for i, o := range lt { + rr, err := NewRR(i) + if err != nil { + t.Error("failed to parse RR: ", err) + continue + } + if rr.String() != o { + t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) + } else { + t.Logf("RR is OK: `%s'", rr.String()) + } + } +} + +func TestParseAVC(t *testing.T) { + avcs := map[string]string{ + `example.org. IN AVC "app-name:WOLFGANG|app-class:OAM|business=yes"`: `example.org. 3600 IN AVC "app-name:WOLFGANG|app-class:OAM|business=yes"`, + } + for avc, o := range avcs { + rr, err := NewRR(avc) + if err != nil { + t.Error("failed to parse RR: ", err) + continue + } + if rr.String() != o { + t.Errorf("`%s' should be equal to\n`%s', but is `%s'", avc, o, rr.String()) + } else { + t.Logf("RR is OK: `%s'", rr.String()) + } + } +} + +func TestUnbalancedParens(t *testing.T) { + sig := `example.com. 3600 IN RRSIG MX 15 2 3600 ( + 1440021600 1438207200 3613 example.com. ( + oL9krJun7xfBOIWcGHi7mag5/hdZrKWw15jPGrHpjQeRAvTdszaPD+QLs3f + x8A4M3e23mRZ9VrbpMngwcrqNAg== )` + _, err := NewRR(sig) + if err == nil { + t.Fatalf("Failed to detect extra opening brace") + } +} diff --git a/vendor/github.com/miekg/dns/privaterr.go b/vendor/github.com/miekg/dns/privaterr.go new file mode 100644 index 0000000..6b08e6e --- /dev/null +++ b/vendor/github.com/miekg/dns/privaterr.go @@ -0,0 +1,149 @@ +package dns + +import ( + "fmt" + "strings" +) + +// PrivateRdata is an interface used for implementing "Private Use" RR types, see +// RFC 6895. This allows one to experiment with new RR types, without requesting an +// official type code. Also see dns.PrivateHandle and dns.PrivateHandleRemove. +type PrivateRdata interface { + // String returns the text presentaton of the Rdata of the Private RR. + String() string + // Parse parses the Rdata of the private RR. + Parse([]string) error + // Pack is used when packing a private RR into a buffer. + Pack([]byte) (int, error) + // Unpack is used when unpacking a private RR from a buffer. + // TODO(miek): diff. signature than Pack, see edns0.go for instance. + Unpack([]byte) (int, error) + // Copy copies the Rdata. + Copy(PrivateRdata) error + // Len returns the length in octets of the Rdata. + Len() int +} + +// PrivateRR represents an RR that uses a PrivateRdata user-defined type. +// It mocks normal RRs and implements dns.RR interface. +type PrivateRR struct { + Hdr RR_Header + Data PrivateRdata +} + +func mkPrivateRR(rrtype uint16) *PrivateRR { + // Panics if RR is not an instance of PrivateRR. + rrfunc, ok := TypeToRR[rrtype] + if !ok { + panic(fmt.Sprintf("dns: invalid operation with Private RR type %d", rrtype)) + } + + anyrr := rrfunc() + switch rr := anyrr.(type) { + case *PrivateRR: + return rr + } + panic(fmt.Sprintf("dns: RR is not a PrivateRR, TypeToRR[%d] generator returned %T", rrtype, anyrr)) +} + +// Header return the RR header of r. +func (r *PrivateRR) Header() *RR_Header { return &r.Hdr } + +func (r *PrivateRR) String() string { return r.Hdr.String() + r.Data.String() } + +// Private len and copy parts to satisfy RR interface. +func (r *PrivateRR) len() int { return r.Hdr.len() + r.Data.Len() } +func (r *PrivateRR) copy() RR { + // make new RR like this: + rr := mkPrivateRR(r.Hdr.Rrtype) + newh := r.Hdr.copyHeader() + rr.Hdr = *newh + + err := r.Data.Copy(rr.Data) + if err != nil { + panic("dns: got value that could not be used to copy Private rdata") + } + return rr +} +func (r *PrivateRR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := r.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + n, err := r.Data.Pack(msg[off:]) + if err != nil { + return len(msg), err + } + off += n + r.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +// PrivateHandle registers a private resource record type. It requires +// string and numeric representation of private RR type and generator function as argument. +func PrivateHandle(rtypestr string, rtype uint16, generator func() PrivateRdata) { + rtypestr = strings.ToUpper(rtypestr) + + TypeToRR[rtype] = func() RR { return &PrivateRR{RR_Header{}, generator()} } + TypeToString[rtype] = rtypestr + StringToType[rtypestr] = rtype + + typeToUnpack[rtype] = func(h RR_Header, msg []byte, off int) (RR, int, error) { + if noRdata(h) { + return &h, off, nil + } + var err error + + rr := mkPrivateRR(h.Rrtype) + rr.Hdr = h + + off1, err := rr.Data.Unpack(msg[off:]) + off += off1 + if err != nil { + return rr, off, err + } + return rr, off, err + } + + setPrivateRR := func(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := mkPrivateRR(h.Rrtype) + rr.Hdr = h + + var l lex + text := make([]string, 0, 2) // could be 0..N elements, median is probably 1 + Fetch: + for { + // TODO(miek): we could also be returning _QUOTE, this might or might not + // be an issue (basically parsing TXT becomes hard) + switch l = <-c; l.value { + case zNewline, zEOF: + break Fetch + case zString: + text = append(text, l.token) + } + } + + err := rr.Data.Parse(text) + if err != nil { + return nil, &ParseError{f, err.Error(), l}, "" + } + + return rr, nil, "" + } + + typeToparserFunc[rtype] = parserFunc{setPrivateRR, true} +} + +// PrivateHandleRemove removes defenitions required to support private RR type. +func PrivateHandleRemove(rtype uint16) { + rtypestr, ok := TypeToString[rtype] + if ok { + delete(TypeToRR, rtype) + delete(TypeToString, rtype) + delete(typeToparserFunc, rtype) + delete(StringToType, rtypestr) + delete(typeToUnpack, rtype) + } + return +} diff --git a/vendor/github.com/miekg/dns/privaterr_test.go b/vendor/github.com/miekg/dns/privaterr_test.go new file mode 100644 index 0000000..72ec8f5 --- /dev/null +++ b/vendor/github.com/miekg/dns/privaterr_test.go @@ -0,0 +1,171 @@ +package dns_test + +import ( + "strings" + "testing" + + "github.com/miekg/dns" +) + +const TypeISBN uint16 = 0xFF00 + +// A crazy new RR type :) +type ISBN struct { + x string // rdata with 10 or 13 numbers, dashes or spaces allowed +} + +func NewISBN() dns.PrivateRdata { return &ISBN{""} } + +func (rd *ISBN) Len() int { return len([]byte(rd.x)) } +func (rd *ISBN) String() string { return rd.x } + +func (rd *ISBN) Parse(txt []string) error { + rd.x = strings.TrimSpace(strings.Join(txt, " ")) + return nil +} + +func (rd *ISBN) Pack(buf []byte) (int, error) { + b := []byte(rd.x) + n := copy(buf, b) + if n != len(b) { + return n, dns.ErrBuf + } + return n, nil +} + +func (rd *ISBN) Unpack(buf []byte) (int, error) { + rd.x = string(buf) + return len(buf), nil +} + +func (rd *ISBN) Copy(dest dns.PrivateRdata) error { + isbn, ok := dest.(*ISBN) + if !ok { + return dns.ErrRdata + } + isbn.x = rd.x + return nil +} + +var testrecord = strings.Join([]string{"example.org.", "3600", "IN", "ISBN", "12-3 456789-0-123"}, "\t") + +func TestPrivateText(t *testing.T) { + dns.PrivateHandle("ISBN", TypeISBN, NewISBN) + defer dns.PrivateHandleRemove(TypeISBN) + + rr, err := dns.NewRR(testrecord) + if err != nil { + t.Fatal(err) + } + if rr.String() != testrecord { + t.Errorf("record string representation did not match original %#v != %#v", rr.String(), testrecord) + } else { + t.Log(rr.String()) + } +} + +func TestPrivateByteSlice(t *testing.T) { + dns.PrivateHandle("ISBN", TypeISBN, NewISBN) + defer dns.PrivateHandleRemove(TypeISBN) + + rr, err := dns.NewRR(testrecord) + if err != nil { + t.Fatal(err) + } + + buf := make([]byte, 100) + off, err := dns.PackRR(rr, buf, 0, nil, false) + if err != nil { + t.Errorf("got error packing ISBN: %v", err) + } + + custrr := rr.(*dns.PrivateRR) + if ln := custrr.Data.Len() + len(custrr.Header().Name) + 11; ln != off { + t.Errorf("offset is not matching to length of Private RR: %d!=%d", off, ln) + } + + rr1, off1, err := dns.UnpackRR(buf[:off], 0) + if err != nil { + t.Errorf("got error unpacking ISBN: %v", err) + return + } + + if off1 != off { + t.Errorf("offset after unpacking differs: %d != %d", off1, off) + } + + if rr1.String() != testrecord { + t.Errorf("record string representation did not match original %#v != %#v", rr1.String(), testrecord) + } else { + t.Log(rr1.String()) + } +} + +const TypeVERSION uint16 = 0xFF01 + +type VERSION struct { + x string +} + +func NewVersion() dns.PrivateRdata { return &VERSION{""} } + +func (rd *VERSION) String() string { return rd.x } +func (rd *VERSION) Parse(txt []string) error { + rd.x = strings.TrimSpace(strings.Join(txt, " ")) + return nil +} + +func (rd *VERSION) Pack(buf []byte) (int, error) { + b := []byte(rd.x) + n := copy(buf, b) + if n != len(b) { + return n, dns.ErrBuf + } + return n, nil +} + +func (rd *VERSION) Unpack(buf []byte) (int, error) { + rd.x = string(buf) + return len(buf), nil +} + +func (rd *VERSION) Copy(dest dns.PrivateRdata) error { + isbn, ok := dest.(*VERSION) + if !ok { + return dns.ErrRdata + } + isbn.x = rd.x + return nil +} + +func (rd *VERSION) Len() int { + return len([]byte(rd.x)) +} + +var smallzone = `$ORIGIN example.org. +@ SOA sns.dns.icann.org. noc.dns.icann.org. ( + 2014091518 7200 3600 1209600 3600 +) + A 1.2.3.4 +ok ISBN 1231-92110-12 +go VERSION ( + 1.3.1 ; comment +) +www ISBN 1231-92110-16 +* CNAME @ +` + +func TestPrivateZoneParser(t *testing.T) { + dns.PrivateHandle("ISBN", TypeISBN, NewISBN) + dns.PrivateHandle("VERSION", TypeVERSION, NewVersion) + defer dns.PrivateHandleRemove(TypeISBN) + defer dns.PrivateHandleRemove(TypeVERSION) + + r := strings.NewReader(smallzone) + for x := range dns.ParseZone(r, ".", "") { + if err := x.Error; err != nil { + t.Fatal(err) + } + t.Log(x.RR) + } +} diff --git a/vendor/github.com/miekg/dns/rawmsg.go b/vendor/github.com/miekg/dns/rawmsg.go new file mode 100644 index 0000000..6e21fba --- /dev/null +++ b/vendor/github.com/miekg/dns/rawmsg.go @@ -0,0 +1,49 @@ +package dns + +import "encoding/binary" + +// rawSetRdlength sets the rdlength in the header of +// the RR. The offset 'off' must be positioned at the +// start of the header of the RR, 'end' must be the +// end of the RR. +func rawSetRdlength(msg []byte, off, end int) bool { + l := len(msg) +Loop: + for { + if off+1 > l { + return false + } + c := int(msg[off]) + off++ + switch c & 0xC0 { + case 0x00: + if c == 0x00 { + // End of the domainname + break Loop + } + if off+c > l { + return false + } + off += c + + case 0xC0: + // pointer, next byte included, ends domainname + off++ + break Loop + } + } + // The domainname has been seen, we at the start of the fixed part in the header. + // Type is 2 bytes, class is 2 bytes, ttl 4 and then 2 bytes for the length. + off += 2 + 2 + 4 + if off+2 > l { + return false + } + //off+1 is the end of the header, 'end' is the end of the rr + //so 'end' - 'off+2' is the length of the rdata + rdatalen := end - (off + 2) + if rdatalen > 0xFFFF { + return false + } + binary.BigEndian.PutUint16(msg[off:], uint16(rdatalen)) + return true +} diff --git a/vendor/github.com/miekg/dns/remote_test.go b/vendor/github.com/miekg/dns/remote_test.go new file mode 100644 index 0000000..4cf701f --- /dev/null +++ b/vendor/github.com/miekg/dns/remote_test.go @@ -0,0 +1,19 @@ +package dns + +import "testing" + +const LinodeAddr = "176.58.119.54:53" + +func TestClientRemote(t *testing.T) { + m := new(Msg) + m.SetQuestion("go.dns.miek.nl.", TypeTXT) + + c := new(Client) + r, _, err := c.Exchange(m, LinodeAddr) + if err != nil { + t.Errorf("failed to exchange: %v", err) + } + if r != nil && r.Rcode != RcodeSuccess { + t.Errorf("failed to get an valid answer\n%v", r) + } +} diff --git a/vendor/github.com/miekg/dns/reverse.go b/vendor/github.com/miekg/dns/reverse.go new file mode 100644 index 0000000..f6e7a47 --- /dev/null +++ b/vendor/github.com/miekg/dns/reverse.go @@ -0,0 +1,38 @@ +package dns + +// StringToType is the reverse of TypeToString, needed for string parsing. +var StringToType = reverseInt16(TypeToString) + +// StringToClass is the reverse of ClassToString, needed for string parsing. +var StringToClass = reverseInt16(ClassToString) + +// StringToOpcode is a map of opcodes to strings. +var StringToOpcode = reverseInt(OpcodeToString) + +// StringToRcode is a map of rcodes to strings. +var StringToRcode = reverseInt(RcodeToString) + +// Reverse a map +func reverseInt8(m map[uint8]string) map[string]uint8 { + n := make(map[string]uint8, len(m)) + for u, s := range m { + n[s] = u + } + return n +} + +func reverseInt16(m map[uint16]string) map[string]uint16 { + n := make(map[string]uint16, len(m)) + for u, s := range m { + n[s] = u + } + return n +} + +func reverseInt(m map[int]string) map[string]int { + n := make(map[string]int, len(m)) + for u, s := range m { + n[s] = u + } + return n +} diff --git a/vendor/github.com/miekg/dns/sanitize.go b/vendor/github.com/miekg/dns/sanitize.go new file mode 100644 index 0000000..b489f3f --- /dev/null +++ b/vendor/github.com/miekg/dns/sanitize.go @@ -0,0 +1,84 @@ +package dns + +// Dedup removes identical RRs from rrs. It preserves the original ordering. +// The lowest TTL of any duplicates is used in the remaining one. Dedup modifies +// rrs. +// m is used to store the RRs temporay. If it is nil a new map will be allocated. +func Dedup(rrs []RR, m map[string]RR) []RR { + if m == nil { + m = make(map[string]RR) + } + // Save the keys, so we don't have to call normalizedString twice. + keys := make([]*string, 0, len(rrs)) + + for _, r := range rrs { + key := normalizedString(r) + keys = append(keys, &key) + if _, ok := m[key]; ok { + // Shortest TTL wins. + if m[key].Header().Ttl > r.Header().Ttl { + m[key].Header().Ttl = r.Header().Ttl + } + continue + } + + m[key] = r + } + // If the length of the result map equals the amount of RRs we got, + // it means they were all different. We can then just return the original rrset. + if len(m) == len(rrs) { + return rrs + } + + j := 0 + for i, r := range rrs { + // If keys[i] lives in the map, we should copy and remove it. + if _, ok := m[*keys[i]]; ok { + delete(m, *keys[i]) + rrs[j] = r + j++ + } + + if len(m) == 0 { + break + } + } + + return rrs[:j] +} + +// normalizedString returns a normalized string from r. The TTL +// is removed and the domain name is lowercased. We go from this: +// DomainName<TAB>TTL<TAB>CLASS<TAB>TYPE<TAB>RDATA to: +// lowercasename<TAB>CLASS<TAB>TYPE... +func normalizedString(r RR) string { + // A string Go DNS makes has: domainname<TAB>TTL<TAB>... + b := []byte(r.String()) + + // find the first non-escaped tab, then another, so we capture where the TTL lives. + esc := false + ttlStart, ttlEnd := 0, 0 + for i := 0; i < len(b) && ttlEnd == 0; i++ { + switch { + case b[i] == '\\': + esc = !esc + case b[i] == '\t' && !esc: + if ttlStart == 0 { + ttlStart = i + continue + } + if ttlEnd == 0 { + ttlEnd = i + } + case b[i] >= 'A' && b[i] <= 'Z' && !esc: + b[i] += 32 + default: + esc = false + } + } + + // remove TTL. + copy(b[ttlStart:], b[ttlEnd:]) + cut := ttlEnd - ttlStart + return string(b[:len(b)-cut]) +} diff --git a/vendor/github.com/miekg/dns/sanitize_test.go b/vendor/github.com/miekg/dns/sanitize_test.go new file mode 100644 index 0000000..2ba3fe9 --- /dev/null +++ b/vendor/github.com/miekg/dns/sanitize_test.go @@ -0,0 +1,84 @@ +package dns + +import "testing" + +func TestDedup(t *testing.T) { + // make it []string + testcases := map[[3]RR][]string{ + [...]RR{ + newRR(t, "mIek.nl. IN A 127.0.0.1"), + newRR(t, "mieK.nl. IN A 127.0.0.1"), + newRR(t, "miek.Nl. IN A 127.0.0.1"), + }: {"mIek.nl.\t3600\tIN\tA\t127.0.0.1"}, + [...]RR{ + newRR(t, "miEk.nl. 2000 IN A 127.0.0.1"), + newRR(t, "mieK.Nl. 1000 IN A 127.0.0.1"), + newRR(t, "Miek.nL. 500 IN A 127.0.0.1"), + }: {"miEk.nl.\t500\tIN\tA\t127.0.0.1"}, + [...]RR{ + newRR(t, "miek.nl. IN A 127.0.0.1"), + newRR(t, "miek.nl. CH A 127.0.0.1"), + newRR(t, "miek.nl. IN A 127.0.0.1"), + }: {"miek.nl.\t3600\tIN\tA\t127.0.0.1", + "miek.nl.\t3600\tCH\tA\t127.0.0.1", + }, + [...]RR{ + newRR(t, "miek.nl. CH A 127.0.0.1"), + newRR(t, "miek.nl. IN A 127.0.0.1"), + newRR(t, "miek.de. IN A 127.0.0.1"), + }: {"miek.nl.\t3600\tCH\tA\t127.0.0.1", + "miek.nl.\t3600\tIN\tA\t127.0.0.1", + "miek.de.\t3600\tIN\tA\t127.0.0.1", + }, + [...]RR{ + newRR(t, "miek.de. IN A 127.0.0.1"), + newRR(t, "miek.nl. 200 IN A 127.0.0.1"), + newRR(t, "miek.nl. 300 IN A 127.0.0.1"), + }: {"miek.de.\t3600\tIN\tA\t127.0.0.1", + "miek.nl.\t200\tIN\tA\t127.0.0.1", + }, + } + + for rr, expected := range testcases { + out := Dedup([]RR{rr[0], rr[1], rr[2]}, nil) + for i, o := range out { + if o.String() != expected[i] { + t.Fatalf("expected %v, got %v", expected[i], o.String()) + } + } + } +} + +func BenchmarkDedup(b *testing.B) { + rrs := []RR{ + newRR(nil, "miEk.nl. 2000 IN A 127.0.0.1"), + newRR(nil, "mieK.Nl. 1000 IN A 127.0.0.1"), + newRR(nil, "Miek.nL. 500 IN A 127.0.0.1"), + } + m := make(map[string]RR) + for i := 0; i < b.N; i++ { + Dedup(rrs, m) + } +} + +func TestNormalizedString(t *testing.T) { + tests := map[RR]string{ + newRR(t, "mIEk.Nl. 3600 IN A 127.0.0.1"): "miek.nl.\tIN\tA\t127.0.0.1", + newRR(t, "m\\ iek.nL. 3600 IN A 127.0.0.1"): "m\\ iek.nl.\tIN\tA\t127.0.0.1", + newRR(t, "m\\\tIeK.nl. 3600 in A 127.0.0.1"): "m\\009iek.nl.\tIN\tA\t127.0.0.1", + } + for tc, expected := range tests { + n := normalizedString(tc) + if n != expected { + t.Errorf("expected %s, got %s", expected, n) + } + } +} + +func newRR(t *testing.T, s string) RR { + r, err := NewRR(s) + if err != nil { + t.Logf("newRR: %v", err) + } + return r +} diff --git a/vendor/github.com/miekg/dns/scan.go b/vendor/github.com/miekg/dns/scan.go new file mode 100644 index 0000000..8d4773c --- /dev/null +++ b/vendor/github.com/miekg/dns/scan.go @@ -0,0 +1,987 @@ +package dns + +import ( + "io" + "log" + "os" + "strconv" + "strings" +) + +type debugging bool + +const debug debugging = false + +func (d debugging) Printf(format string, args ...interface{}) { + if d { + log.Printf(format, args...) + } +} + +const maxTok = 2048 // Largest token we can return. +const maxUint16 = 1<<16 - 1 + +// Tokinize a RFC 1035 zone file. The tokenizer will normalize it: +// * Add ownernames if they are left blank; +// * Suppress sequences of spaces; +// * Make each RR fit on one line (_NEWLINE is send as last) +// * Handle comments: ; +// * Handle braces - anywhere. +const ( + // Zonefile + zEOF = iota + zString + zBlank + zQuote + zNewline + zRrtpe + zOwner + zClass + zDirOrigin // $ORIGIN + zDirTtl // $TTL + zDirInclude // $INCLUDE + zDirGenerate // $GENERATE + + // Privatekey file + zValue + zKey + + zExpectOwnerDir // Ownername + zExpectOwnerBl // Whitespace after the ownername + zExpectAny // Expect rrtype, ttl or class + zExpectAnyNoClass // Expect rrtype or ttl + zExpectAnyNoClassBl // The whitespace after _EXPECT_ANY_NOCLASS + zExpectAnyNoTtl // Expect rrtype or class + zExpectAnyNoTtlBl // Whitespace after _EXPECT_ANY_NOTTL + zExpectRrtype // Expect rrtype + zExpectRrtypeBl // Whitespace BEFORE rrtype + zExpectRdata // The first element of the rdata + zExpectDirTtlBl // Space after directive $TTL + zExpectDirTtl // Directive $TTL + zExpectDirOriginBl // Space after directive $ORIGIN + zExpectDirOrigin // Directive $ORIGIN + zExpectDirIncludeBl // Space after directive $INCLUDE + zExpectDirInclude // Directive $INCLUDE + zExpectDirGenerate // Directive $GENERATE + zExpectDirGenerateBl // Space after directive $GENERATE +) + +// ParseError is a parsing error. It contains the parse error and the location in the io.Reader +// where the error occurred. +type ParseError struct { + file string + err string + lex lex +} + +func (e *ParseError) Error() (s string) { + if e.file != "" { + s = e.file + ": " + } + s += "dns: " + e.err + ": " + strconv.QuoteToASCII(e.lex.token) + " at line: " + + strconv.Itoa(e.lex.line) + ":" + strconv.Itoa(e.lex.column) + return +} + +type lex struct { + token string // text of the token + tokenUpper string // uppercase text of the token + length int // length of the token + err bool // when true, token text has lexer error + value uint8 // value: zString, _BLANK, etc. + line int // line in the file + column int // column in the file + torc uint16 // type or class as parsed in the lexer, we only need to look this up in the grammar + comment string // any comment text seen +} + +// Token holds the token that are returned when a zone file is parsed. +type Token struct { + // The scanned resource record when error is not nil. + RR + // When an error occurred, this has the error specifics. + Error *ParseError + // A potential comment positioned after the RR and on the same line. + Comment string +} + +// NewRR reads the RR contained in the string s. Only the first RR is +// returned. If s contains no RR, return nil with no error. The class +// defaults to IN and TTL defaults to 3600. The full zone file syntax +// like $TTL, $ORIGIN, etc. is supported. All fields of the returned +// RR are set, except RR.Header().Rdlength which is set to 0. +func NewRR(s string) (RR, error) { + if len(s) > 0 && s[len(s)-1] != '\n' { // We need a closing newline + return ReadRR(strings.NewReader(s+"\n"), "") + } + return ReadRR(strings.NewReader(s), "") +} + +// ReadRR reads the RR contained in q. +// See NewRR for more documentation. +func ReadRR(q io.Reader, filename string) (RR, error) { + r := <-parseZoneHelper(q, ".", filename, 1) + if r == nil { + return nil, nil + } + + if r.Error != nil { + return nil, r.Error + } + return r.RR, nil +} + +// ParseZone reads a RFC 1035 style zonefile from r. It returns *Tokens on the +// returned channel, which consist out the parsed RR, a potential comment or an error. +// If there is an error the RR is nil. The string file is only used +// in error reporting. The string origin is used as the initial origin, as +// if the file would start with: $ORIGIN origin . +// The directives $INCLUDE, $ORIGIN, $TTL and $GENERATE are supported. +// The channel t is closed by ParseZone when the end of r is reached. +// +// Basic usage pattern when reading from a string (z) containing the +// zone data: +// +// for x := range dns.ParseZone(strings.NewReader(z), "", "") { +// if x.Error != nil { +// // log.Println(x.Error) +// } else { +// // Do something with x.RR +// } +// } +// +// Comments specified after an RR (and on the same line!) are returned too: +// +// foo. IN A 10.0.0.1 ; this is a comment +// +// The text "; this is comment" is returned in Token.Comment. Comments inside the +// RR are discarded. Comments on a line by themselves are discarded too. +func ParseZone(r io.Reader, origin, file string) chan *Token { + return parseZoneHelper(r, origin, file, 10000) +} + +func parseZoneHelper(r io.Reader, origin, file string, chansize int) chan *Token { + t := make(chan *Token, chansize) + go parseZone(r, origin, file, t, 0) + return t +} + +func parseZone(r io.Reader, origin, f string, t chan *Token, include int) { + defer func() { + if include == 0 { + close(t) + } + }() + s := scanInit(r) + c := make(chan lex) + // Start the lexer + go zlexer(s, c) + // 6 possible beginnings of a line, _ is a space + // 0. zRRTYPE -> all omitted until the rrtype + // 1. zOwner _ zRrtype -> class/ttl omitted + // 2. zOwner _ zString _ zRrtype -> class omitted + // 3. zOwner _ zString _ zClass _ zRrtype -> ttl/class + // 4. zOwner _ zClass _ zRrtype -> ttl omitted + // 5. zOwner _ zClass _ zString _ zRrtype -> class/ttl (reversed) + // After detecting these, we know the zRrtype so we can jump to functions + // handling the rdata for each of these types. + + if origin == "" { + origin = "." + } + origin = Fqdn(origin) + if _, ok := IsDomainName(origin); !ok { + t <- &Token{Error: &ParseError{f, "bad initial origin name", lex{}}} + return + } + + st := zExpectOwnerDir // initial state + var h RR_Header + var defttl uint32 = defaultTtl + var prevName string + for l := range c { + // Lexer spotted an error already + if l.err == true { + t <- &Token{Error: &ParseError{f, l.token, l}} + return + + } + switch st { + case zExpectOwnerDir: + // We can also expect a directive, like $TTL or $ORIGIN + h.Ttl = defttl + h.Class = ClassINET + switch l.value { + case zNewline: + st = zExpectOwnerDir + case zOwner: + h.Name = l.token + if l.token[0] == '@' { + h.Name = origin + prevName = h.Name + st = zExpectOwnerBl + break + } + if h.Name[l.length-1] != '.' { + h.Name = appendOrigin(h.Name, origin) + } + _, ok := IsDomainName(l.token) + if !ok { + t <- &Token{Error: &ParseError{f, "bad owner name", l}} + return + } + prevName = h.Name + st = zExpectOwnerBl + case zDirTtl: + st = zExpectDirTtlBl + case zDirOrigin: + st = zExpectDirOriginBl + case zDirInclude: + st = zExpectDirIncludeBl + case zDirGenerate: + st = zExpectDirGenerateBl + case zRrtpe: + h.Name = prevName + h.Rrtype = l.torc + st = zExpectRdata + case zClass: + h.Name = prevName + h.Class = l.torc + st = zExpectAnyNoClassBl + case zBlank: + // Discard, can happen when there is nothing on the + // line except the RR type + case zString: + ttl, ok := stringToTtl(l.token) + if !ok { + t <- &Token{Error: &ParseError{f, "not a TTL", l}} + return + } + h.Ttl = ttl + // Don't about the defttl, we should take the $TTL value + // defttl = ttl + st = zExpectAnyNoTtlBl + + default: + t <- &Token{Error: &ParseError{f, "syntax error at beginning", l}} + return + } + case zExpectDirIncludeBl: + if l.value != zBlank { + t <- &Token{Error: &ParseError{f, "no blank after $INCLUDE-directive", l}} + return + } + st = zExpectDirInclude + case zExpectDirInclude: + if l.value != zString { + t <- &Token{Error: &ParseError{f, "expecting $INCLUDE value, not this...", l}} + return + } + neworigin := origin // There may be optionally a new origin set after the filename, if not use current one + l := <-c + switch l.value { + case zBlank: + l := <-c + if l.value == zString { + if _, ok := IsDomainName(l.token); !ok || l.length == 0 || l.err { + t <- &Token{Error: &ParseError{f, "bad origin name", l}} + return + } + // a new origin is specified. + if l.token[l.length-1] != '.' { + if origin != "." { // Prevent .. endings + neworigin = l.token + "." + origin + } else { + neworigin = l.token + origin + } + } else { + neworigin = l.token + } + } + case zNewline, zEOF: + // Ok + default: + t <- &Token{Error: &ParseError{f, "garbage after $INCLUDE", l}} + return + } + // Start with the new file + r1, e1 := os.Open(l.token) + if e1 != nil { + t <- &Token{Error: &ParseError{f, "failed to open `" + l.token + "'", l}} + return + } + if include+1 > 7 { + t <- &Token{Error: &ParseError{f, "too deeply nested $INCLUDE", l}} + return + } + parseZone(r1, l.token, neworigin, t, include+1) + st = zExpectOwnerDir + case zExpectDirTtlBl: + if l.value != zBlank { + t <- &Token{Error: &ParseError{f, "no blank after $TTL-directive", l}} + return + } + st = zExpectDirTtl + case zExpectDirTtl: + if l.value != zString { + t <- &Token{Error: &ParseError{f, "expecting $TTL value, not this...", l}} + return + } + if e, _ := slurpRemainder(c, f); e != nil { + t <- &Token{Error: e} + return + } + ttl, ok := stringToTtl(l.token) + if !ok { + t <- &Token{Error: &ParseError{f, "expecting $TTL value, not this...", l}} + return + } + defttl = ttl + st = zExpectOwnerDir + case zExpectDirOriginBl: + if l.value != zBlank { + t <- &Token{Error: &ParseError{f, "no blank after $ORIGIN-directive", l}} + return + } + st = zExpectDirOrigin + case zExpectDirOrigin: + if l.value != zString { + t <- &Token{Error: &ParseError{f, "expecting $ORIGIN value, not this...", l}} + return + } + if e, _ := slurpRemainder(c, f); e != nil { + t <- &Token{Error: e} + } + if _, ok := IsDomainName(l.token); !ok { + t <- &Token{Error: &ParseError{f, "bad origin name", l}} + return + } + if l.token[l.length-1] != '.' { + if origin != "." { // Prevent .. endings + origin = l.token + "." + origin + } else { + origin = l.token + origin + } + } else { + origin = l.token + } + st = zExpectOwnerDir + case zExpectDirGenerateBl: + if l.value != zBlank { + t <- &Token{Error: &ParseError{f, "no blank after $GENERATE-directive", l}} + return + } + st = zExpectDirGenerate + case zExpectDirGenerate: + if l.value != zString { + t <- &Token{Error: &ParseError{f, "expecting $GENERATE value, not this...", l}} + return + } + if errMsg := generate(l, c, t, origin); errMsg != "" { + t <- &Token{Error: &ParseError{f, errMsg, l}} + return + } + st = zExpectOwnerDir + case zExpectOwnerBl: + if l.value != zBlank { + t <- &Token{Error: &ParseError{f, "no blank after owner", l}} + return + } + st = zExpectAny + case zExpectAny: + switch l.value { + case zRrtpe: + h.Rrtype = l.torc + st = zExpectRdata + case zClass: + h.Class = l.torc + st = zExpectAnyNoClassBl + case zString: + ttl, ok := stringToTtl(l.token) + if !ok { + t <- &Token{Error: &ParseError{f, "not a TTL", l}} + return + } + h.Ttl = ttl + // defttl = ttl // don't set the defttl here + st = zExpectAnyNoTtlBl + default: + t <- &Token{Error: &ParseError{f, "expecting RR type, TTL or class, not this...", l}} + return + } + case zExpectAnyNoClassBl: + if l.value != zBlank { + t <- &Token{Error: &ParseError{f, "no blank before class", l}} + return + } + st = zExpectAnyNoClass + case zExpectAnyNoTtlBl: + if l.value != zBlank { + t <- &Token{Error: &ParseError{f, "no blank before TTL", l}} + return + } + st = zExpectAnyNoTtl + case zExpectAnyNoTtl: + switch l.value { + case zClass: + h.Class = l.torc + st = zExpectRrtypeBl + case zRrtpe: + h.Rrtype = l.torc + st = zExpectRdata + default: + t <- &Token{Error: &ParseError{f, "expecting RR type or class, not this...", l}} + return + } + case zExpectAnyNoClass: + switch l.value { + case zString: + ttl, ok := stringToTtl(l.token) + if !ok { + t <- &Token{Error: &ParseError{f, "not a TTL", l}} + return + } + h.Ttl = ttl + // defttl = ttl // don't set the def ttl anymore + st = zExpectRrtypeBl + case zRrtpe: + h.Rrtype = l.torc + st = zExpectRdata + default: + t <- &Token{Error: &ParseError{f, "expecting RR type or TTL, not this...", l}} + return + } + case zExpectRrtypeBl: + if l.value != zBlank { + t <- &Token{Error: &ParseError{f, "no blank before RR type", l}} + return + } + st = zExpectRrtype + case zExpectRrtype: + if l.value != zRrtpe { + t <- &Token{Error: &ParseError{f, "unknown RR type", l}} + return + } + h.Rrtype = l.torc + st = zExpectRdata + case zExpectRdata: + r, e, c1 := setRR(h, c, origin, f) + if e != nil { + // If e.lex is nil than we have encounter a unknown RR type + // in that case we substitute our current lex token + if e.lex.token == "" && e.lex.value == 0 { + e.lex = l // Uh, dirty + } + t <- &Token{Error: e} + return + } + t <- &Token{RR: r, Comment: c1} + st = zExpectOwnerDir + } + } + // If we get here, we and the h.Rrtype is still zero, we haven't parsed anything, this + // is not an error, because an empty zone file is still a zone file. +} + +// zlexer scans the sourcefile and returns tokens on the channel c. +func zlexer(s *scan, c chan lex) { + var l lex + str := make([]byte, maxTok) // Should be enough for any token + stri := 0 // Offset in str (0 means empty) + com := make([]byte, maxTok) // Hold comment text + comi := 0 + quote := false + escape := false + space := false + commt := false + rrtype := false + owner := true + brace := 0 + x, err := s.tokenText() + defer close(c) + for err == nil { + l.column = s.position.Column + l.line = s.position.Line + if stri >= maxTok { + l.token = "token length insufficient for parsing" + l.err = true + debug.Printf("[%+v]", l.token) + c <- l + return + } + if comi >= maxTok { + l.token = "comment length insufficient for parsing" + l.err = true + debug.Printf("[%+v]", l.token) + c <- l + return + } + + switch x { + case ' ', '\t': + if escape { + escape = false + str[stri] = x + stri++ + break + } + if quote { + // Inside quotes this is legal + str[stri] = x + stri++ + break + } + if commt { + com[comi] = x + comi++ + break + } + if stri == 0 { + // Space directly in the beginning, handled in the grammar + } else if owner { + // If we have a string and its the first, make it an owner + l.value = zOwner + l.token = string(str[:stri]) + l.tokenUpper = strings.ToUpper(l.token) + l.length = stri + // escape $... start with a \ not a $, so this will work + switch l.tokenUpper { + case "$TTL": + l.value = zDirTtl + case "$ORIGIN": + l.value = zDirOrigin + case "$INCLUDE": + l.value = zDirInclude + case "$GENERATE": + l.value = zDirGenerate + } + debug.Printf("[7 %+v]", l.token) + c <- l + } else { + l.value = zString + l.token = string(str[:stri]) + l.tokenUpper = strings.ToUpper(l.token) + l.length = stri + if !rrtype { + if t, ok := StringToType[l.tokenUpper]; ok { + l.value = zRrtpe + l.torc = t + rrtype = true + } else { + if strings.HasPrefix(l.tokenUpper, "TYPE") { + t, ok := typeToInt(l.token) + if !ok { + l.token = "unknown RR type" + l.err = true + c <- l + return + } + l.value = zRrtpe + l.torc = t + } + } + if t, ok := StringToClass[l.tokenUpper]; ok { + l.value = zClass + l.torc = t + } else { + if strings.HasPrefix(l.tokenUpper, "CLASS") { + t, ok := classToInt(l.token) + if !ok { + l.token = "unknown class" + l.err = true + c <- l + return + } + l.value = zClass + l.torc = t + } + } + } + debug.Printf("[6 %+v]", l.token) + c <- l + } + stri = 0 + // I reverse space stuff here + if !space && !commt { + l.value = zBlank + l.token = " " + l.length = 1 + debug.Printf("[5 %+v]", l.token) + c <- l + } + owner = false + space = true + case ';': + if escape { + escape = false + str[stri] = x + stri++ + break + } + if quote { + // Inside quotes this is legal + str[stri] = x + stri++ + break + } + if stri > 0 { + l.value = zString + l.token = string(str[:stri]) + l.tokenUpper = strings.ToUpper(l.token) + l.length = stri + debug.Printf("[4 %+v]", l.token) + c <- l + stri = 0 + } + commt = true + com[comi] = ';' + comi++ + case '\r': + escape = false + if quote { + str[stri] = x + stri++ + break + } + // discard if outside of quotes + case '\n': + escape = false + // Escaped newline + if quote { + str[stri] = x + stri++ + break + } + // inside quotes this is legal + if commt { + // Reset a comment + commt = false + rrtype = false + stri = 0 + // If not in a brace this ends the comment AND the RR + if brace == 0 { + owner = true + owner = true + l.value = zNewline + l.token = "\n" + l.tokenUpper = l.token + l.length = 1 + l.comment = string(com[:comi]) + debug.Printf("[3 %+v %+v]", l.token, l.comment) + c <- l + l.comment = "" + comi = 0 + break + } + com[comi] = ' ' // convert newline to space + comi++ + break + } + + if brace == 0 { + // If there is previous text, we should output it here + if stri != 0 { + l.value = zString + l.token = string(str[:stri]) + l.tokenUpper = strings.ToUpper(l.token) + + l.length = stri + if !rrtype { + if t, ok := StringToType[l.tokenUpper]; ok { + l.value = zRrtpe + l.torc = t + rrtype = true + } + } + debug.Printf("[2 %+v]", l.token) + c <- l + } + l.value = zNewline + l.token = "\n" + l.tokenUpper = l.token + l.length = 1 + debug.Printf("[1 %+v]", l.token) + c <- l + stri = 0 + commt = false + rrtype = false + owner = true + comi = 0 + } + case '\\': + // comments do not get escaped chars, everything is copied + if commt { + com[comi] = x + comi++ + break + } + // something already escaped must be in string + if escape { + str[stri] = x + stri++ + escape = false + break + } + // something escaped outside of string gets added to string + str[stri] = x + stri++ + escape = true + case '"': + if commt { + com[comi] = x + comi++ + break + } + if escape { + str[stri] = x + stri++ + escape = false + break + } + space = false + // send previous gathered text and the quote + if stri != 0 { + l.value = zString + l.token = string(str[:stri]) + l.tokenUpper = strings.ToUpper(l.token) + l.length = stri + + debug.Printf("[%+v]", l.token) + c <- l + stri = 0 + } + + // send quote itself as separate token + l.value = zQuote + l.token = "\"" + l.tokenUpper = l.token + l.length = 1 + c <- l + quote = !quote + case '(', ')': + if commt { + com[comi] = x + comi++ + break + } + if escape { + str[stri] = x + stri++ + escape = false + break + } + if quote { + str[stri] = x + stri++ + break + } + switch x { + case ')': + brace-- + if brace < 0 { + l.token = "extra closing brace" + l.tokenUpper = l.token + l.err = true + debug.Printf("[%+v]", l.token) + c <- l + return + } + case '(': + brace++ + } + default: + escape = false + if commt { + com[comi] = x + comi++ + break + } + str[stri] = x + stri++ + space = false + } + x, err = s.tokenText() + } + if stri > 0 { + // Send remainder + l.token = string(str[:stri]) + l.tokenUpper = strings.ToUpper(l.token) + l.length = stri + l.value = zString + debug.Printf("[%+v]", l.token) + c <- l + } + if brace != 0 { + l.token = "unbalanced brace" + l.tokenUpper = l.token + l.err = true + c <- l + } +} + +// Extract the class number from CLASSxx +func classToInt(token string) (uint16, bool) { + offset := 5 + if len(token) < offset+1 { + return 0, false + } + class, err := strconv.ParseUint(token[offset:], 10, 16) + if err != nil { + return 0, false + } + return uint16(class), true +} + +// Extract the rr number from TYPExxx +func typeToInt(token string) (uint16, bool) { + offset := 4 + if len(token) < offset+1 { + return 0, false + } + typ, err := strconv.ParseUint(token[offset:], 10, 16) + if err != nil { + return 0, false + } + return uint16(typ), true +} + +// Parse things like 2w, 2m, etc, Return the time in seconds. +func stringToTtl(token string) (uint32, bool) { + s := uint32(0) + i := uint32(0) + for _, c := range token { + switch c { + case 's', 'S': + s += i + i = 0 + case 'm', 'M': + s += i * 60 + i = 0 + case 'h', 'H': + s += i * 60 * 60 + i = 0 + case 'd', 'D': + s += i * 60 * 60 * 24 + i = 0 + case 'w', 'W': + s += i * 60 * 60 * 24 * 7 + i = 0 + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + i *= 10 + i += uint32(c) - '0' + default: + return 0, false + } + } + return s + i, true +} + +// Parse LOC records' <digits>[.<digits>][mM] into a +// mantissa exponent format. Token should contain the entire +// string (i.e. no spaces allowed) +func stringToCm(token string) (e, m uint8, ok bool) { + if token[len(token)-1] == 'M' || token[len(token)-1] == 'm' { + token = token[0 : len(token)-1] + } + s := strings.SplitN(token, ".", 2) + var meters, cmeters, val int + var err error + switch len(s) { + case 2: + if cmeters, err = strconv.Atoi(s[1]); err != nil { + return + } + fallthrough + case 1: + if meters, err = strconv.Atoi(s[0]); err != nil { + return + } + case 0: + // huh? + return 0, 0, false + } + ok = true + if meters > 0 { + e = 2 + val = meters + } else { + e = 0 + val = cmeters + } + for val > 10 { + e++ + val /= 10 + } + if e > 9 { + ok = false + } + m = uint8(val) + return +} + +func appendOrigin(name, origin string) string { + if origin == "." { + return name + origin + } + return name + "." + origin +} + +// LOC record helper function +func locCheckNorth(token string, latitude uint32) (uint32, bool) { + switch token { + case "n", "N": + return LOC_EQUATOR + latitude, true + case "s", "S": + return LOC_EQUATOR - latitude, true + } + return latitude, false +} + +// LOC record helper function +func locCheckEast(token string, longitude uint32) (uint32, bool) { + switch token { + case "e", "E": + return LOC_EQUATOR + longitude, true + case "w", "W": + return LOC_EQUATOR - longitude, true + } + return longitude, false +} + +// "Eat" the rest of the "line". Return potential comments +func slurpRemainder(c chan lex, f string) (*ParseError, string) { + l := <-c + com := "" + switch l.value { + case zBlank: + l = <-c + com = l.comment + if l.value != zNewline && l.value != zEOF { + return &ParseError{f, "garbage after rdata", l}, "" + } + case zNewline: + com = l.comment + case zEOF: + default: + return &ParseError{f, "garbage after rdata", l}, "" + } + return nil, com +} + +// Parse a 64 bit-like ipv6 address: "0014:4fff:ff20:ee64" +// Used for NID and L64 record. +func stringToNodeID(l lex) (uint64, *ParseError) { + if len(l.token) < 19 { + return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l} + } + // There must be three colons at fixes postitions, if not its a parse error + if l.token[4] != ':' && l.token[9] != ':' && l.token[14] != ':' { + return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l} + } + s := l.token[0:4] + l.token[5:9] + l.token[10:14] + l.token[15:19] + u, err := strconv.ParseUint(s, 16, 64) + if err != nil { + return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l} + } + return u, nil +} diff --git a/vendor/github.com/miekg/dns/scan_rr.go b/vendor/github.com/miekg/dns/scan_rr.go new file mode 100644 index 0000000..b8b18fd --- /dev/null +++ b/vendor/github.com/miekg/dns/scan_rr.go @@ -0,0 +1,2184 @@ +package dns + +import ( + "encoding/base64" + "net" + "strconv" + "strings" +) + +type parserFunc struct { + // Func defines the function that parses the tokens and returns the RR + // or an error. The last string contains any comments in the line as + // they returned by the lexer as well. + Func func(h RR_Header, c chan lex, origin string, file string) (RR, *ParseError, string) + // Signals if the RR ending is of variable length, like TXT or records + // that have Hexadecimal or Base64 as their last element in the Rdata. Records + // that have a fixed ending or for instance A, AAAA, SOA and etc. + Variable bool +} + +// Parse the rdata of each rrtype. +// All data from the channel c is either zString or zBlank. +// After the rdata there may come a zBlank and then a zNewline +// or immediately a zNewline. If this is not the case we flag +// an *ParseError: garbage after rdata. +func setRR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + parserfunc, ok := typeToparserFunc[h.Rrtype] + if ok { + r, e, cm := parserfunc.Func(h, c, o, f) + if parserfunc.Variable { + return r, e, cm + } + if e != nil { + return nil, e, "" + } + e, cm = slurpRemainder(c, f) + if e != nil { + return nil, e, "" + } + return r, nil, cm + } + // RFC3957 RR (Unknown RR handling) + return setRFC3597(h, c, o, f) +} + +// A remainder of the rdata with embedded spaces, return the parsed string (sans the spaces) +// or an error +func endingToString(c chan lex, errstr, f string) (string, *ParseError, string) { + s := "" + l := <-c // zString + for l.value != zNewline && l.value != zEOF { + if l.err { + return s, &ParseError{f, errstr, l}, "" + } + switch l.value { + case zString: + s += l.token + case zBlank: // Ok + default: + return "", &ParseError{f, errstr, l}, "" + } + l = <-c + } + return s, nil, l.comment +} + +// A remainder of the rdata with embedded spaces, split on unquoted whitespace +// and return the parsed string slice or an error +func endingToTxtSlice(c chan lex, errstr, f string) ([]string, *ParseError, string) { + // Get the remaining data until we see a zNewline + l := <-c + if l.err { + return nil, &ParseError{f, errstr, l}, "" + } + + // Build the slice + s := make([]string, 0) + quote := false + empty := false + for l.value != zNewline && l.value != zEOF { + if l.err { + return nil, &ParseError{f, errstr, l}, "" + } + switch l.value { + case zString: + empty = false + if len(l.token) > 255 { + // split up tokens that are larger than 255 into 255-chunks + sx := []string{} + p, i := 0, 255 + for { + if i <= len(l.token) { + sx = append(sx, l.token[p:i]) + } else { + sx = append(sx, l.token[p:]) + break + + } + p, i = p+255, i+255 + } + s = append(s, sx...) + break + } + + s = append(s, l.token) + case zBlank: + if quote { + // zBlank can only be seen in between txt parts. + return nil, &ParseError{f, errstr, l}, "" + } + case zQuote: + if empty && quote { + s = append(s, "") + } + quote = !quote + empty = true + default: + return nil, &ParseError{f, errstr, l}, "" + } + l = <-c + } + if quote { + return nil, &ParseError{f, errstr, l}, "" + } + return s, nil, l.comment +} + +func setA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(A) + rr.Hdr = h + + l := <-c + if l.length == 0 { // Dynamic updates. + return rr, nil, "" + } + rr.A = net.ParseIP(l.token) + if rr.A == nil || l.err { + return nil, &ParseError{f, "bad A A", l}, "" + } + return rr, nil, "" +} + +func setAAAA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(AAAA) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + rr.AAAA = net.ParseIP(l.token) + if rr.AAAA == nil || l.err { + return nil, &ParseError{f, "bad AAAA AAAA", l}, "" + } + return rr, nil, "" +} + +func setNS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NS) + rr.Hdr = h + + l := <-c + rr.Ns = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Ns = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad NS Ns", l}, "" + } + if rr.Ns[l.length-1] != '.' { + rr.Ns = appendOrigin(rr.Ns, o) + } + return rr, nil, "" +} + +func setPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(PTR) + rr.Hdr = h + + l := <-c + rr.Ptr = l.token + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + if l.token == "@" { + rr.Ptr = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad PTR Ptr", l}, "" + } + if rr.Ptr[l.length-1] != '.' { + rr.Ptr = appendOrigin(rr.Ptr, o) + } + return rr, nil, "" +} + +func setNSAPPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NSAPPTR) + rr.Hdr = h + + l := <-c + rr.Ptr = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Ptr = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad NSAP-PTR Ptr", l}, "" + } + if rr.Ptr[l.length-1] != '.' { + rr.Ptr = appendOrigin(rr.Ptr, o) + } + return rr, nil, "" +} + +func setRP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(RP) + rr.Hdr = h + + l := <-c + rr.Mbox = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Mbox = o + } else { + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad RP Mbox", l}, "" + } + if rr.Mbox[l.length-1] != '.' { + rr.Mbox = appendOrigin(rr.Mbox, o) + } + } + <-c // zBlank + l = <-c + rr.Txt = l.token + if l.token == "@" { + rr.Txt = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad RP Txt", l}, "" + } + if rr.Txt[l.length-1] != '.' { + rr.Txt = appendOrigin(rr.Txt, o) + } + return rr, nil, "" +} + +func setMR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(MR) + rr.Hdr = h + + l := <-c + rr.Mr = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Mr = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad MR Mr", l}, "" + } + if rr.Mr[l.length-1] != '.' { + rr.Mr = appendOrigin(rr.Mr, o) + } + return rr, nil, "" +} + +func setMB(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(MB) + rr.Hdr = h + + l := <-c + rr.Mb = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Mb = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad MB Mb", l}, "" + } + if rr.Mb[l.length-1] != '.' { + rr.Mb = appendOrigin(rr.Mb, o) + } + return rr, nil, "" +} + +func setMG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(MG) + rr.Hdr = h + + l := <-c + rr.Mg = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Mg = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad MG Mg", l}, "" + } + if rr.Mg[l.length-1] != '.' { + rr.Mg = appendOrigin(rr.Mg, o) + } + return rr, nil, "" +} + +func setHINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(HINFO) + rr.Hdr = h + + chunks, e, c1 := endingToTxtSlice(c, "bad HINFO Fields", f) + if e != nil { + return nil, e, c1 + } + + if ln := len(chunks); ln == 0 { + return rr, nil, "" + } else if ln == 1 { + // Can we split it? + if out := strings.Fields(chunks[0]); len(out) > 1 { + chunks = out + } else { + chunks = append(chunks, "") + } + } + + rr.Cpu = chunks[0] + rr.Os = strings.Join(chunks[1:], " ") + + return rr, nil, "" +} + +func setMINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(MINFO) + rr.Hdr = h + + l := <-c + rr.Rmail = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Rmail = o + } else { + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad MINFO Rmail", l}, "" + } + if rr.Rmail[l.length-1] != '.' { + rr.Rmail = appendOrigin(rr.Rmail, o) + } + } + <-c // zBlank + l = <-c + rr.Email = l.token + if l.token == "@" { + rr.Email = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad MINFO Email", l}, "" + } + if rr.Email[l.length-1] != '.' { + rr.Email = appendOrigin(rr.Email, o) + } + return rr, nil, "" +} + +func setMF(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(MF) + rr.Hdr = h + + l := <-c + rr.Mf = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Mf = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad MF Mf", l}, "" + } + if rr.Mf[l.length-1] != '.' { + rr.Mf = appendOrigin(rr.Mf, o) + } + return rr, nil, "" +} + +func setMD(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(MD) + rr.Hdr = h + + l := <-c + rr.Md = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Md = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad MD Md", l}, "" + } + if rr.Md[l.length-1] != '.' { + rr.Md = appendOrigin(rr.Md, o) + } + return rr, nil, "" +} + +func setMX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(MX) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad MX Pref", l}, "" + } + rr.Preference = uint16(i) + <-c // zBlank + l = <-c // zString + rr.Mx = l.token + if l.token == "@" { + rr.Mx = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad MX Mx", l}, "" + } + if rr.Mx[l.length-1] != '.' { + rr.Mx = appendOrigin(rr.Mx, o) + } + return rr, nil, "" +} + +func setRT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(RT) + rr.Hdr = h + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil { + return nil, &ParseError{f, "bad RT Preference", l}, "" + } + rr.Preference = uint16(i) + <-c // zBlank + l = <-c // zString + rr.Host = l.token + if l.token == "@" { + rr.Host = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad RT Host", l}, "" + } + if rr.Host[l.length-1] != '.' { + rr.Host = appendOrigin(rr.Host, o) + } + return rr, nil, "" +} + +func setAFSDB(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(AFSDB) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad AFSDB Subtype", l}, "" + } + rr.Subtype = uint16(i) + <-c // zBlank + l = <-c // zString + rr.Hostname = l.token + if l.token == "@" { + rr.Hostname = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad AFSDB Hostname", l}, "" + } + if rr.Hostname[l.length-1] != '.' { + rr.Hostname = appendOrigin(rr.Hostname, o) + } + return rr, nil, "" +} + +func setX25(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(X25) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + if l.err { + return nil, &ParseError{f, "bad X25 PSDNAddress", l}, "" + } + rr.PSDNAddress = l.token + return rr, nil, "" +} + +func setKX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(KX) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad KX Pref", l}, "" + } + rr.Preference = uint16(i) + <-c // zBlank + l = <-c // zString + rr.Exchanger = l.token + if l.token == "@" { + rr.Exchanger = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad KX Exchanger", l}, "" + } + if rr.Exchanger[l.length-1] != '.' { + rr.Exchanger = appendOrigin(rr.Exchanger, o) + } + return rr, nil, "" +} + +func setCNAME(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(CNAME) + rr.Hdr = h + + l := <-c + rr.Target = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Target = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad CNAME Target", l}, "" + } + if rr.Target[l.length-1] != '.' { + rr.Target = appendOrigin(rr.Target, o) + } + return rr, nil, "" +} + +func setDNAME(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(DNAME) + rr.Hdr = h + + l := <-c + rr.Target = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Target = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad CNAME Target", l}, "" + } + if rr.Target[l.length-1] != '.' { + rr.Target = appendOrigin(rr.Target, o) + } + return rr, nil, "" +} + +func setSOA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(SOA) + rr.Hdr = h + + l := <-c + rr.Ns = l.token + if l.length == 0 { + return rr, nil, "" + } + <-c // zBlank + if l.token == "@" { + rr.Ns = o + } else { + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad SOA Ns", l}, "" + } + if rr.Ns[l.length-1] != '.' { + rr.Ns = appendOrigin(rr.Ns, o) + } + } + + l = <-c + rr.Mbox = l.token + if l.token == "@" { + rr.Mbox = o + } else { + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad SOA Mbox", l}, "" + } + if rr.Mbox[l.length-1] != '.' { + rr.Mbox = appendOrigin(rr.Mbox, o) + } + } + <-c // zBlank + + var ( + v uint32 + ok bool + ) + for i := 0; i < 5; i++ { + l = <-c + if l.err { + return nil, &ParseError{f, "bad SOA zone parameter", l}, "" + } + if j, e := strconv.ParseUint(l.token, 10, 32); e != nil { + if i == 0 { + // Serial should be a number + return nil, &ParseError{f, "bad SOA zone parameter", l}, "" + } + if v, ok = stringToTtl(l.token); !ok { + return nil, &ParseError{f, "bad SOA zone parameter", l}, "" + + } + } else { + v = uint32(j) + } + switch i { + case 0: + rr.Serial = v + <-c // zBlank + case 1: + rr.Refresh = v + <-c // zBlank + case 2: + rr.Retry = v + <-c // zBlank + case 3: + rr.Expire = v + <-c // zBlank + case 4: + rr.Minttl = v + } + } + return rr, nil, "" +} + +func setSRV(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(SRV) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad SRV Priority", l}, "" + } + rr.Priority = uint16(i) + <-c // zBlank + l = <-c // zString + i, e = strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad SRV Weight", l}, "" + } + rr.Weight = uint16(i) + <-c // zBlank + l = <-c // zString + i, e = strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad SRV Port", l}, "" + } + rr.Port = uint16(i) + <-c // zBlank + l = <-c // zString + rr.Target = l.token + if l.token == "@" { + rr.Target = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad SRV Target", l}, "" + } + if rr.Target[l.length-1] != '.' { + rr.Target = appendOrigin(rr.Target, o) + } + return rr, nil, "" +} + +func setNAPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NAPTR) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad NAPTR Order", l}, "" + } + rr.Order = uint16(i) + <-c // zBlank + l = <-c // zString + i, e = strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad NAPTR Preference", l}, "" + } + rr.Preference = uint16(i) + // Flags + <-c // zBlank + l = <-c // _QUOTE + if l.value != zQuote { + return nil, &ParseError{f, "bad NAPTR Flags", l}, "" + } + l = <-c // Either String or Quote + if l.value == zString { + rr.Flags = l.token + l = <-c // _QUOTE + if l.value != zQuote { + return nil, &ParseError{f, "bad NAPTR Flags", l}, "" + } + } else if l.value == zQuote { + rr.Flags = "" + } else { + return nil, &ParseError{f, "bad NAPTR Flags", l}, "" + } + + // Service + <-c // zBlank + l = <-c // _QUOTE + if l.value != zQuote { + return nil, &ParseError{f, "bad NAPTR Service", l}, "" + } + l = <-c // Either String or Quote + if l.value == zString { + rr.Service = l.token + l = <-c // _QUOTE + if l.value != zQuote { + return nil, &ParseError{f, "bad NAPTR Service", l}, "" + } + } else if l.value == zQuote { + rr.Service = "" + } else { + return nil, &ParseError{f, "bad NAPTR Service", l}, "" + } + + // Regexp + <-c // zBlank + l = <-c // _QUOTE + if l.value != zQuote { + return nil, &ParseError{f, "bad NAPTR Regexp", l}, "" + } + l = <-c // Either String or Quote + if l.value == zString { + rr.Regexp = l.token + l = <-c // _QUOTE + if l.value != zQuote { + return nil, &ParseError{f, "bad NAPTR Regexp", l}, "" + } + } else if l.value == zQuote { + rr.Regexp = "" + } else { + return nil, &ParseError{f, "bad NAPTR Regexp", l}, "" + } + // After quote no space?? + <-c // zBlank + l = <-c // zString + rr.Replacement = l.token + if l.token == "@" { + rr.Replacement = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad NAPTR Replacement", l}, "" + } + if rr.Replacement[l.length-1] != '.' { + rr.Replacement = appendOrigin(rr.Replacement, o) + } + return rr, nil, "" +} + +func setTALINK(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(TALINK) + rr.Hdr = h + + l := <-c + rr.PreviousName = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.PreviousName = o + } else { + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad TALINK PreviousName", l}, "" + } + if rr.PreviousName[l.length-1] != '.' { + rr.PreviousName = appendOrigin(rr.PreviousName, o) + } + } + <-c // zBlank + l = <-c + rr.NextName = l.token + if l.token == "@" { + rr.NextName = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad TALINK NextName", l}, "" + } + if rr.NextName[l.length-1] != '.' { + rr.NextName = appendOrigin(rr.NextName, o) + } + return rr, nil, "" +} + +func setLOC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(LOC) + rr.Hdr = h + // Non zero defaults for LOC record, see RFC 1876, Section 3. + rr.HorizPre = 165 // 10000 + rr.VertPre = 162 // 10 + rr.Size = 18 // 1 + ok := false + // North + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.ParseUint(l.token, 10, 32) + if e != nil || l.err { + return nil, &ParseError{f, "bad LOC Latitude", l}, "" + } + rr.Latitude = 1000 * 60 * 60 * uint32(i) + + <-c // zBlank + // Either number, 'N' or 'S' + l = <-c + if rr.Latitude, ok = locCheckNorth(l.token, rr.Latitude); ok { + goto East + } + i, e = strconv.ParseUint(l.token, 10, 32) + if e != nil || l.err { + return nil, &ParseError{f, "bad LOC Latitude minutes", l}, "" + } + rr.Latitude += 1000 * 60 * uint32(i) + + <-c // zBlank + l = <-c + if i, e := strconv.ParseFloat(l.token, 32); e != nil || l.err { + return nil, &ParseError{f, "bad LOC Latitude seconds", l}, "" + } else { + rr.Latitude += uint32(1000 * i) + } + <-c // zBlank + // Either number, 'N' or 'S' + l = <-c + if rr.Latitude, ok = locCheckNorth(l.token, rr.Latitude); ok { + goto East + } + // If still alive, flag an error + return nil, &ParseError{f, "bad LOC Latitude North/South", l}, "" + +East: + // East + <-c // zBlank + l = <-c + if i, e := strconv.ParseUint(l.token, 10, 32); e != nil || l.err { + return nil, &ParseError{f, "bad LOC Longitude", l}, "" + } else { + rr.Longitude = 1000 * 60 * 60 * uint32(i) + } + <-c // zBlank + // Either number, 'E' or 'W' + l = <-c + if rr.Longitude, ok = locCheckEast(l.token, rr.Longitude); ok { + goto Altitude + } + if i, e := strconv.ParseUint(l.token, 10, 32); e != nil || l.err { + return nil, &ParseError{f, "bad LOC Longitude minutes", l}, "" + } else { + rr.Longitude += 1000 * 60 * uint32(i) + } + <-c // zBlank + l = <-c + if i, e := strconv.ParseFloat(l.token, 32); e != nil || l.err { + return nil, &ParseError{f, "bad LOC Longitude seconds", l}, "" + } else { + rr.Longitude += uint32(1000 * i) + } + <-c // zBlank + // Either number, 'E' or 'W' + l = <-c + if rr.Longitude, ok = locCheckEast(l.token, rr.Longitude); ok { + goto Altitude + } + // If still alive, flag an error + return nil, &ParseError{f, "bad LOC Longitude East/West", l}, "" + +Altitude: + <-c // zBlank + l = <-c + if l.length == 0 || l.err { + return nil, &ParseError{f, "bad LOC Altitude", l}, "" + } + if l.token[len(l.token)-1] == 'M' || l.token[len(l.token)-1] == 'm' { + l.token = l.token[0 : len(l.token)-1] + } + if i, e := strconv.ParseFloat(l.token, 32); e != nil { + return nil, &ParseError{f, "bad LOC Altitude", l}, "" + } else { + rr.Altitude = uint32(i*100.0 + 10000000.0 + 0.5) + } + + // And now optionally the other values + l = <-c + count := 0 + for l.value != zNewline && l.value != zEOF { + switch l.value { + case zString: + switch count { + case 0: // Size + e, m, ok := stringToCm(l.token) + if !ok { + return nil, &ParseError{f, "bad LOC Size", l}, "" + } + rr.Size = (e & 0x0f) | (m << 4 & 0xf0) + case 1: // HorizPre + e, m, ok := stringToCm(l.token) + if !ok { + return nil, &ParseError{f, "bad LOC HorizPre", l}, "" + } + rr.HorizPre = (e & 0x0f) | (m << 4 & 0xf0) + case 2: // VertPre + e, m, ok := stringToCm(l.token) + if !ok { + return nil, &ParseError{f, "bad LOC VertPre", l}, "" + } + rr.VertPre = (e & 0x0f) | (m << 4 & 0xf0) + } + count++ + case zBlank: + // Ok + default: + return nil, &ParseError{f, "bad LOC Size, HorizPre or VertPre", l}, "" + } + l = <-c + } + return rr, nil, "" +} + +func setHIP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(HIP) + rr.Hdr = h + + // HitLength is not represented + l := <-c + if l.length == 0 { + return rr, nil, l.comment + } + i, e := strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad HIP PublicKeyAlgorithm", l}, "" + } + rr.PublicKeyAlgorithm = uint8(i) + <-c // zBlank + l = <-c // zString + if l.length == 0 || l.err { + return nil, &ParseError{f, "bad HIP Hit", l}, "" + } + rr.Hit = l.token // This can not contain spaces, see RFC 5205 Section 6. + rr.HitLength = uint8(len(rr.Hit)) / 2 + + <-c // zBlank + l = <-c // zString + if l.length == 0 || l.err { + return nil, &ParseError{f, "bad HIP PublicKey", l}, "" + } + rr.PublicKey = l.token // This cannot contain spaces + rr.PublicKeyLength = uint16(base64.StdEncoding.DecodedLen(len(rr.PublicKey))) + + // RendezvousServers (if any) + l = <-c + var xs []string + for l.value != zNewline && l.value != zEOF { + switch l.value { + case zString: + if l.token == "@" { + xs = append(xs, o) + l = <-c + continue + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad HIP RendezvousServers", l}, "" + } + if l.token[l.length-1] != '.' { + l.token = appendOrigin(l.token, o) + } + xs = append(xs, l.token) + case zBlank: + // Ok + default: + return nil, &ParseError{f, "bad HIP RendezvousServers", l}, "" + } + l = <-c + } + rr.RendezvousServers = xs + return rr, nil, l.comment +} + +func setCERT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(CERT) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, l.comment + } + if v, ok := StringToCertType[l.token]; ok { + rr.Type = v + } else if i, e := strconv.ParseUint(l.token, 10, 16); e != nil { + return nil, &ParseError{f, "bad CERT Type", l}, "" + } else { + rr.Type = uint16(i) + } + <-c // zBlank + l = <-c // zString + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad CERT KeyTag", l}, "" + } + rr.KeyTag = uint16(i) + <-c // zBlank + l = <-c // zString + if v, ok := StringToAlgorithm[l.token]; ok { + rr.Algorithm = v + } else if i, e := strconv.ParseUint(l.token, 10, 8); e != nil { + return nil, &ParseError{f, "bad CERT Algorithm", l}, "" + } else { + rr.Algorithm = uint8(i) + } + s, e1, c1 := endingToString(c, "bad CERT Certificate", f) + if e1 != nil { + return nil, e1, c1 + } + rr.Certificate = s + return rr, nil, c1 +} + +func setOPENPGPKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(OPENPGPKEY) + rr.Hdr = h + + s, e, c1 := endingToString(c, "bad OPENPGPKEY PublicKey", f) + if e != nil { + return nil, e, c1 + } + rr.PublicKey = s + return rr, nil, c1 +} + +func setSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + r, e, s := setRRSIG(h, c, o, f) + if r != nil { + return &SIG{*r.(*RRSIG)}, e, s + } + return nil, e, s +} + +func setRRSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(RRSIG) + rr.Hdr = h + l := <-c + if l.length == 0 { + return rr, nil, l.comment + } + if t, ok := StringToType[l.tokenUpper]; !ok { + if strings.HasPrefix(l.tokenUpper, "TYPE") { + t, ok = typeToInt(l.tokenUpper) + if !ok { + return nil, &ParseError{f, "bad RRSIG Typecovered", l}, "" + } + rr.TypeCovered = t + } else { + return nil, &ParseError{f, "bad RRSIG Typecovered", l}, "" + } + } else { + rr.TypeCovered = t + } + <-c // zBlank + l = <-c + i, err := strconv.ParseUint(l.token, 10, 8) + if err != nil || l.err { + return nil, &ParseError{f, "bad RRSIG Algorithm", l}, "" + } + rr.Algorithm = uint8(i) + <-c // zBlank + l = <-c + i, err = strconv.ParseUint(l.token, 10, 8) + if err != nil || l.err { + return nil, &ParseError{f, "bad RRSIG Labels", l}, "" + } + rr.Labels = uint8(i) + <-c // zBlank + l = <-c + i, err = strconv.ParseUint(l.token, 10, 32) + if err != nil || l.err { + return nil, &ParseError{f, "bad RRSIG OrigTtl", l}, "" + } + rr.OrigTtl = uint32(i) + <-c // zBlank + l = <-c + if i, err := StringToTime(l.token); err != nil { + // Try to see if all numeric and use it as epoch + if i, err := strconv.ParseInt(l.token, 10, 64); err == nil { + // TODO(miek): error out on > MAX_UINT32, same below + rr.Expiration = uint32(i) + } else { + return nil, &ParseError{f, "bad RRSIG Expiration", l}, "" + } + } else { + rr.Expiration = i + } + <-c // zBlank + l = <-c + if i, err := StringToTime(l.token); err != nil { + if i, err := strconv.ParseInt(l.token, 10, 64); err == nil { + rr.Inception = uint32(i) + } else { + return nil, &ParseError{f, "bad RRSIG Inception", l}, "" + } + } else { + rr.Inception = i + } + <-c // zBlank + l = <-c + i, err = strconv.ParseUint(l.token, 10, 16) + if err != nil || l.err { + return nil, &ParseError{f, "bad RRSIG KeyTag", l}, "" + } + rr.KeyTag = uint16(i) + <-c // zBlank + l = <-c + rr.SignerName = l.token + if l.token == "@" { + rr.SignerName = o + } else { + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad RRSIG SignerName", l}, "" + } + if rr.SignerName[l.length-1] != '.' { + rr.SignerName = appendOrigin(rr.SignerName, o) + } + } + s, e, c1 := endingToString(c, "bad RRSIG Signature", f) + if e != nil { + return nil, e, c1 + } + rr.Signature = s + return rr, nil, c1 +} + +func setNSEC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NSEC) + rr.Hdr = h + + l := <-c + rr.NextDomain = l.token + if l.length == 0 { + return rr, nil, l.comment + } + if l.token == "@" { + rr.NextDomain = o + } else { + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad NSEC NextDomain", l}, "" + } + if rr.NextDomain[l.length-1] != '.' { + rr.NextDomain = appendOrigin(rr.NextDomain, o) + } + } + + rr.TypeBitMap = make([]uint16, 0) + var ( + k uint16 + ok bool + ) + l = <-c + for l.value != zNewline && l.value != zEOF { + switch l.value { + case zBlank: + // Ok + case zString: + if k, ok = StringToType[l.tokenUpper]; !ok { + if k, ok = typeToInt(l.tokenUpper); !ok { + return nil, &ParseError{f, "bad NSEC TypeBitMap", l}, "" + } + } + rr.TypeBitMap = append(rr.TypeBitMap, k) + default: + return nil, &ParseError{f, "bad NSEC TypeBitMap", l}, "" + } + l = <-c + } + return rr, nil, l.comment +} + +func setNSEC3(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NSEC3) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, l.comment + } + i, e := strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad NSEC3 Hash", l}, "" + } + rr.Hash = uint8(i) + <-c // zBlank + l = <-c + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad NSEC3 Flags", l}, "" + } + rr.Flags = uint8(i) + <-c // zBlank + l = <-c + i, e = strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad NSEC3 Iterations", l}, "" + } + rr.Iterations = uint16(i) + <-c + l = <-c + if len(l.token) == 0 || l.err { + return nil, &ParseError{f, "bad NSEC3 Salt", l}, "" + } + rr.SaltLength = uint8(len(l.token)) / 2 + rr.Salt = l.token + + <-c + l = <-c + if len(l.token) == 0 || l.err { + return nil, &ParseError{f, "bad NSEC3 NextDomain", l}, "" + } + rr.HashLength = 20 // Fix for NSEC3 (sha1 160 bits) + rr.NextDomain = l.token + + rr.TypeBitMap = make([]uint16, 0) + var ( + k uint16 + ok bool + ) + l = <-c + for l.value != zNewline && l.value != zEOF { + switch l.value { + case zBlank: + // Ok + case zString: + if k, ok = StringToType[l.tokenUpper]; !ok { + if k, ok = typeToInt(l.tokenUpper); !ok { + return nil, &ParseError{f, "bad NSEC3 TypeBitMap", l}, "" + } + } + rr.TypeBitMap = append(rr.TypeBitMap, k) + default: + return nil, &ParseError{f, "bad NSEC3 TypeBitMap", l}, "" + } + l = <-c + } + return rr, nil, l.comment +} + +func setNSEC3PARAM(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NSEC3PARAM) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad NSEC3PARAM Hash", l}, "" + } + rr.Hash = uint8(i) + <-c // zBlank + l = <-c + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad NSEC3PARAM Flags", l}, "" + } + rr.Flags = uint8(i) + <-c // zBlank + l = <-c + i, e = strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad NSEC3PARAM Iterations", l}, "" + } + rr.Iterations = uint16(i) + <-c + l = <-c + rr.SaltLength = uint8(len(l.token)) + rr.Salt = l.token + return rr, nil, "" +} + +func setEUI48(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(EUI48) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + if l.length != 17 || l.err { + return nil, &ParseError{f, "bad EUI48 Address", l}, "" + } + addr := make([]byte, 12) + dash := 0 + for i := 0; i < 10; i += 2 { + addr[i] = l.token[i+dash] + addr[i+1] = l.token[i+1+dash] + dash++ + if l.token[i+1+dash] != '-' { + return nil, &ParseError{f, "bad EUI48 Address", l}, "" + } + } + addr[10] = l.token[15] + addr[11] = l.token[16] + + i, e := strconv.ParseUint(string(addr), 16, 48) + if e != nil { + return nil, &ParseError{f, "bad EUI48 Address", l}, "" + } + rr.Address = i + return rr, nil, "" +} + +func setEUI64(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(EUI64) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + if l.length != 23 || l.err { + return nil, &ParseError{f, "bad EUI64 Address", l}, "" + } + addr := make([]byte, 16) + dash := 0 + for i := 0; i < 14; i += 2 { + addr[i] = l.token[i+dash] + addr[i+1] = l.token[i+1+dash] + dash++ + if l.token[i+1+dash] != '-' { + return nil, &ParseError{f, "bad EUI64 Address", l}, "" + } + } + addr[14] = l.token[21] + addr[15] = l.token[22] + + i, e := strconv.ParseUint(string(addr), 16, 64) + if e != nil { + return nil, &ParseError{f, "bad EUI68 Address", l}, "" + } + rr.Address = uint64(i) + return rr, nil, "" +} + +func setSSHFP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(SSHFP) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad SSHFP Algorithm", l}, "" + } + rr.Algorithm = uint8(i) + <-c // zBlank + l = <-c + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad SSHFP Type", l}, "" + } + rr.Type = uint8(i) + <-c // zBlank + s, e1, c1 := endingToString(c, "bad SSHFP Fingerprint", f) + if e1 != nil { + return nil, e1, c1 + } + rr.FingerPrint = s + return rr, nil, "" +} + +func setDNSKEYs(h RR_Header, c chan lex, o, f, typ string) (RR, *ParseError, string) { + rr := new(DNSKEY) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, l.comment + } + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad " + typ + " Flags", l}, "" + } + rr.Flags = uint16(i) + <-c // zBlank + l = <-c // zString + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad " + typ + " Protocol", l}, "" + } + rr.Protocol = uint8(i) + <-c // zBlank + l = <-c // zString + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad " + typ + " Algorithm", l}, "" + } + rr.Algorithm = uint8(i) + s, e1, c1 := endingToString(c, "bad "+typ+" PublicKey", f) + if e1 != nil { + return nil, e1, c1 + } + rr.PublicKey = s + return rr, nil, c1 +} + +func setKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + r, e, s := setDNSKEYs(h, c, o, f, "KEY") + if r != nil { + return &KEY{*r.(*DNSKEY)}, e, s + } + return nil, e, s +} + +func setDNSKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + r, e, s := setDNSKEYs(h, c, o, f, "DNSKEY") + return r, e, s +} + +func setCDNSKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + r, e, s := setDNSKEYs(h, c, o, f, "CDNSKEY") + if r != nil { + return &CDNSKEY{*r.(*DNSKEY)}, e, s + } + return nil, e, s +} + +func setRKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(RKEY) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, l.comment + } + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad RKEY Flags", l}, "" + } + rr.Flags = uint16(i) + <-c // zBlank + l = <-c // zString + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad RKEY Protocol", l}, "" + } + rr.Protocol = uint8(i) + <-c // zBlank + l = <-c // zString + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad RKEY Algorithm", l}, "" + } + rr.Algorithm = uint8(i) + s, e1, c1 := endingToString(c, "bad RKEY PublicKey", f) + if e1 != nil { + return nil, e1, c1 + } + rr.PublicKey = s + return rr, nil, c1 +} + +func setEID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(EID) + rr.Hdr = h + s, e, c1 := endingToString(c, "bad EID Endpoint", f) + if e != nil { + return nil, e, c1 + } + rr.Endpoint = s + return rr, nil, c1 +} + +func setNIMLOC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NIMLOC) + rr.Hdr = h + s, e, c1 := endingToString(c, "bad NIMLOC Locator", f) + if e != nil { + return nil, e, c1 + } + rr.Locator = s + return rr, nil, c1 +} + +func setGPOS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(GPOS) + rr.Hdr = h + l := <-c + if l.length == 0 { + return rr, nil, "" + } + _, e := strconv.ParseFloat(l.token, 64) + if e != nil || l.err { + return nil, &ParseError{f, "bad GPOS Longitude", l}, "" + } + rr.Longitude = l.token + <-c // zBlank + l = <-c + _, e = strconv.ParseFloat(l.token, 64) + if e != nil || l.err { + return nil, &ParseError{f, "bad GPOS Latitude", l}, "" + } + rr.Latitude = l.token + <-c // zBlank + l = <-c + _, e = strconv.ParseFloat(l.token, 64) + if e != nil || l.err { + return nil, &ParseError{f, "bad GPOS Altitude", l}, "" + } + rr.Altitude = l.token + return rr, nil, "" +} + +func setDSs(h RR_Header, c chan lex, o, f, typ string) (RR, *ParseError, string) { + rr := new(DS) + rr.Hdr = h + l := <-c + if l.length == 0 { + return rr, nil, l.comment + } + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad " + typ + " KeyTag", l}, "" + } + rr.KeyTag = uint16(i) + <-c // zBlank + l = <-c + if i, e = strconv.ParseUint(l.token, 10, 8); e != nil { + i, ok := StringToAlgorithm[l.tokenUpper] + if !ok || l.err { + return nil, &ParseError{f, "bad " + typ + " Algorithm", l}, "" + } + rr.Algorithm = i + } else { + rr.Algorithm = uint8(i) + } + <-c // zBlank + l = <-c + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad " + typ + " DigestType", l}, "" + } + rr.DigestType = uint8(i) + s, e1, c1 := endingToString(c, "bad "+typ+" Digest", f) + if e1 != nil { + return nil, e1, c1 + } + rr.Digest = s + return rr, nil, c1 +} + +func setDS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + r, e, s := setDSs(h, c, o, f, "DS") + return r, e, s +} + +func setDLV(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + r, e, s := setDSs(h, c, o, f, "DLV") + if r != nil { + return &DLV{*r.(*DS)}, e, s + } + return nil, e, s +} + +func setCDS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + r, e, s := setDSs(h, c, o, f, "CDS") + if r != nil { + return &CDS{*r.(*DS)}, e, s + } + return nil, e, s +} + +func setTA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(TA) + rr.Hdr = h + l := <-c + if l.length == 0 { + return rr, nil, l.comment + } + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad TA KeyTag", l}, "" + } + rr.KeyTag = uint16(i) + <-c // zBlank + l = <-c + if i, e := strconv.ParseUint(l.token, 10, 8); e != nil { + i, ok := StringToAlgorithm[l.tokenUpper] + if !ok || l.err { + return nil, &ParseError{f, "bad TA Algorithm", l}, "" + } + rr.Algorithm = i + } else { + rr.Algorithm = uint8(i) + } + <-c // zBlank + l = <-c + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad TA DigestType", l}, "" + } + rr.DigestType = uint8(i) + s, e, c1 := endingToString(c, "bad TA Digest", f) + if e != nil { + return nil, e.(*ParseError), c1 + } + rr.Digest = s + return rr, nil, c1 +} + +func setTLSA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(TLSA) + rr.Hdr = h + l := <-c + if l.length == 0 { + return rr, nil, l.comment + } + i, e := strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad TLSA Usage", l}, "" + } + rr.Usage = uint8(i) + <-c // zBlank + l = <-c + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad TLSA Selector", l}, "" + } + rr.Selector = uint8(i) + <-c // zBlank + l = <-c + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad TLSA MatchingType", l}, "" + } + rr.MatchingType = uint8(i) + // So this needs be e2 (i.e. different than e), because...??t + s, e2, c1 := endingToString(c, "bad TLSA Certificate", f) + if e2 != nil { + return nil, e2, c1 + } + rr.Certificate = s + return rr, nil, c1 +} + +func setSMIMEA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(SMIMEA) + rr.Hdr = h + l := <-c + if l.length == 0 { + return rr, nil, l.comment + } + i, e := strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad SMIMEA Usage", l}, "" + } + rr.Usage = uint8(i) + <-c // zBlank + l = <-c + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad SMIMEA Selector", l}, "" + } + rr.Selector = uint8(i) + <-c // zBlank + l = <-c + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad SMIMEA MatchingType", l}, "" + } + rr.MatchingType = uint8(i) + // So this needs be e2 (i.e. different than e), because...??t + s, e2, c1 := endingToString(c, "bad SMIMEA Certificate", f) + if e2 != nil { + return nil, e2, c1 + } + rr.Certificate = s + return rr, nil, c1 +} + +func setRFC3597(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(RFC3597) + rr.Hdr = h + l := <-c + if l.token != "\\#" { + return nil, &ParseError{f, "bad RFC3597 Rdata", l}, "" + } + <-c // zBlank + l = <-c + rdlength, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad RFC3597 Rdata ", l}, "" + } + + s, e1, c1 := endingToString(c, "bad RFC3597 Rdata", f) + if e1 != nil { + return nil, e1, c1 + } + if rdlength*2 != len(s) { + return nil, &ParseError{f, "bad RFC3597 Rdata", l}, "" + } + rr.Rdata = s + return rr, nil, c1 +} + +func setSPF(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(SPF) + rr.Hdr = h + + s, e, c1 := endingToTxtSlice(c, "bad SPF Txt", f) + if e != nil { + return nil, e, "" + } + rr.Txt = s + return rr, nil, c1 +} + +func setAVC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(AVC) + rr.Hdr = h + + s, e, c1 := endingToTxtSlice(c, "bad AVC Txt", f) + if e != nil { + return nil, e, "" + } + rr.Txt = s + return rr, nil, c1 +} + +func setTXT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(TXT) + rr.Hdr = h + + // no zBlank reading here, because all this rdata is TXT + s, e, c1 := endingToTxtSlice(c, "bad TXT Txt", f) + if e != nil { + return nil, e, "" + } + rr.Txt = s + return rr, nil, c1 +} + +// identical to setTXT +func setNINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NINFO) + rr.Hdr = h + + s, e, c1 := endingToTxtSlice(c, "bad NINFO ZSData", f) + if e != nil { + return nil, e, "" + } + rr.ZSData = s + return rr, nil, c1 +} + +func setURI(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(URI) + rr.Hdr = h + + l := <-c + if l.length == 0 { // Dynamic updates. + return rr, nil, "" + } + + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad URI Priority", l}, "" + } + rr.Priority = uint16(i) + <-c // zBlank + l = <-c + i, e = strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad URI Weight", l}, "" + } + rr.Weight = uint16(i) + + <-c // zBlank + s, err, c1 := endingToTxtSlice(c, "bad URI Target", f) + if err != nil { + return nil, err, "" + } + if len(s) > 1 { + return nil, &ParseError{f, "bad URI Target", l}, "" + } + rr.Target = s[0] + return rr, nil, c1 +} + +func setDHCID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + // awesome record to parse! + rr := new(DHCID) + rr.Hdr = h + + s, e, c1 := endingToString(c, "bad DHCID Digest", f) + if e != nil { + return nil, e, c1 + } + rr.Digest = s + return rr, nil, c1 +} + +func setNID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NID) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad NID Preference", l}, "" + } + rr.Preference = uint16(i) + <-c // zBlank + l = <-c // zString + u, err := stringToNodeID(l) + if err != nil || l.err { + return nil, err, "" + } + rr.NodeID = u + return rr, nil, "" +} + +func setL32(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(L32) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad L32 Preference", l}, "" + } + rr.Preference = uint16(i) + <-c // zBlank + l = <-c // zString + rr.Locator32 = net.ParseIP(l.token) + if rr.Locator32 == nil || l.err { + return nil, &ParseError{f, "bad L32 Locator", l}, "" + } + return rr, nil, "" +} + +func setLP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(LP) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad LP Preference", l}, "" + } + rr.Preference = uint16(i) + <-c // zBlank + l = <-c // zString + rr.Fqdn = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Fqdn = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad LP Fqdn", l}, "" + } + if rr.Fqdn[l.length-1] != '.' { + rr.Fqdn = appendOrigin(rr.Fqdn, o) + } + return rr, nil, "" +} + +func setL64(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(L64) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad L64 Preference", l}, "" + } + rr.Preference = uint16(i) + <-c // zBlank + l = <-c // zString + u, err := stringToNodeID(l) + if err != nil || l.err { + return nil, err, "" + } + rr.Locator64 = u + return rr, nil, "" +} + +func setUID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(UID) + rr.Hdr = h + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.ParseUint(l.token, 10, 32) + if e != nil || l.err { + return nil, &ParseError{f, "bad UID Uid", l}, "" + } + rr.Uid = uint32(i) + return rr, nil, "" +} + +func setGID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(GID) + rr.Hdr = h + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.ParseUint(l.token, 10, 32) + if e != nil || l.err { + return nil, &ParseError{f, "bad GID Gid", l}, "" + } + rr.Gid = uint32(i) + return rr, nil, "" +} + +func setUINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(UINFO) + rr.Hdr = h + s, e, c1 := endingToTxtSlice(c, "bad UINFO Uinfo", f) + if e != nil { + return nil, e, c1 + } + if ln := len(s); ln == 0 { + return rr, nil, c1 + } + rr.Uinfo = s[0] // silently discard anything after the first character-string + return rr, nil, c1 +} + +func setPX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(PX) + rr.Hdr = h + + l := <-c + if l.length == 0 { + return rr, nil, "" + } + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad PX Preference", l}, "" + } + rr.Preference = uint16(i) + <-c // zBlank + l = <-c // zString + rr.Map822 = l.token + if l.length == 0 { + return rr, nil, "" + } + if l.token == "@" { + rr.Map822 = o + return rr, nil, "" + } + _, ok := IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad PX Map822", l}, "" + } + if rr.Map822[l.length-1] != '.' { + rr.Map822 = appendOrigin(rr.Map822, o) + } + <-c // zBlank + l = <-c // zString + rr.Mapx400 = l.token + if l.token == "@" { + rr.Mapx400 = o + return rr, nil, "" + } + _, ok = IsDomainName(l.token) + if !ok || l.length == 0 || l.err { + return nil, &ParseError{f, "bad PX Mapx400", l}, "" + } + if rr.Mapx400[l.length-1] != '.' { + rr.Mapx400 = appendOrigin(rr.Mapx400, o) + } + return rr, nil, "" +} + +func setCAA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(CAA) + rr.Hdr = h + l := <-c + if l.length == 0 { + return rr, nil, l.comment + } + i, err := strconv.ParseUint(l.token, 10, 8) + if err != nil || l.err { + return nil, &ParseError{f, "bad CAA Flag", l}, "" + } + rr.Flag = uint8(i) + + <-c // zBlank + l = <-c // zString + if l.value != zString { + return nil, &ParseError{f, "bad CAA Tag", l}, "" + } + rr.Tag = l.token + + <-c // zBlank + s, e, c1 := endingToTxtSlice(c, "bad CAA Value", f) + if e != nil { + return nil, e, "" + } + if len(s) > 1 { + return nil, &ParseError{f, "bad CAA Value", l}, "" + } + rr.Value = s[0] + return rr, nil, c1 +} + +var typeToparserFunc = map[uint16]parserFunc{ + TypeAAAA: {setAAAA, false}, + TypeAFSDB: {setAFSDB, false}, + TypeA: {setA, false}, + TypeCAA: {setCAA, true}, + TypeCDS: {setCDS, true}, + TypeCDNSKEY: {setCDNSKEY, true}, + TypeCERT: {setCERT, true}, + TypeCNAME: {setCNAME, false}, + TypeDHCID: {setDHCID, true}, + TypeDLV: {setDLV, true}, + TypeDNAME: {setDNAME, false}, + TypeKEY: {setKEY, true}, + TypeDNSKEY: {setDNSKEY, true}, + TypeDS: {setDS, true}, + TypeEID: {setEID, true}, + TypeEUI48: {setEUI48, false}, + TypeEUI64: {setEUI64, false}, + TypeGID: {setGID, false}, + TypeGPOS: {setGPOS, false}, + TypeHINFO: {setHINFO, true}, + TypeHIP: {setHIP, true}, + TypeKX: {setKX, false}, + TypeL32: {setL32, false}, + TypeL64: {setL64, false}, + TypeLOC: {setLOC, true}, + TypeLP: {setLP, false}, + TypeMB: {setMB, false}, + TypeMD: {setMD, false}, + TypeMF: {setMF, false}, + TypeMG: {setMG, false}, + TypeMINFO: {setMINFO, false}, + TypeMR: {setMR, false}, + TypeMX: {setMX, false}, + TypeNAPTR: {setNAPTR, false}, + TypeNID: {setNID, false}, + TypeNIMLOC: {setNIMLOC, true}, + TypeNINFO: {setNINFO, true}, + TypeNSAPPTR: {setNSAPPTR, false}, + TypeNSEC3PARAM: {setNSEC3PARAM, false}, + TypeNSEC3: {setNSEC3, true}, + TypeNSEC: {setNSEC, true}, + TypeNS: {setNS, false}, + TypeOPENPGPKEY: {setOPENPGPKEY, true}, + TypePTR: {setPTR, false}, + TypePX: {setPX, false}, + TypeSIG: {setSIG, true}, + TypeRKEY: {setRKEY, true}, + TypeRP: {setRP, false}, + TypeRRSIG: {setRRSIG, true}, + TypeRT: {setRT, false}, + TypeSMIMEA: {setSMIMEA, true}, + TypeSOA: {setSOA, false}, + TypeSPF: {setSPF, true}, + TypeAVC: {setAVC, true}, + TypeSRV: {setSRV, false}, + TypeSSHFP: {setSSHFP, true}, + TypeTALINK: {setTALINK, false}, + TypeTA: {setTA, true}, + TypeTLSA: {setTLSA, true}, + TypeTXT: {setTXT, true}, + TypeUID: {setUID, false}, + TypeUINFO: {setUINFO, true}, + TypeURI: {setURI, true}, + TypeX25: {setX25, false}, +} diff --git a/vendor/github.com/miekg/dns/scanner.go b/vendor/github.com/miekg/dns/scanner.go new file mode 100644 index 0000000..c29bc2f --- /dev/null +++ b/vendor/github.com/miekg/dns/scanner.go @@ -0,0 +1,43 @@ +package dns + +// Implement a simple scanner, return a byte stream from an io reader. + +import ( + "bufio" + "io" + "text/scanner" +) + +type scan struct { + src *bufio.Reader + position scanner.Position + eof bool // Have we just seen a eof +} + +func scanInit(r io.Reader) *scan { + s := new(scan) + s.src = bufio.NewReader(r) + s.position.Line = 1 + return s +} + +// tokenText returns the next byte from the input +func (s *scan) tokenText() (byte, error) { + c, err := s.src.ReadByte() + if err != nil { + return c, err + } + // delay the newline handling until the next token is delivered, + // fixes off-by-one errors when reporting a parse error. + if s.eof == true { + s.position.Line++ + s.position.Column = 0 + s.eof = false + } + if c == '\n' { + s.eof = true + return c, nil + } + s.position.Column++ + return c, nil +} diff --git a/vendor/github.com/miekg/dns/server.go b/vendor/github.com/miekg/dns/server.go new file mode 100644 index 0000000..0ca6e00 --- /dev/null +++ b/vendor/github.com/miekg/dns/server.go @@ -0,0 +1,734 @@ +// DNS server implementation. + +package dns + +import ( + "bytes" + "crypto/tls" + "encoding/binary" + "io" + "net" + "sync" + "time" +) + +// Maximum number of TCP queries before we close the socket. +const maxTCPQueries = 128 + +// Handler is implemented by any value that implements ServeDNS. +type Handler interface { + ServeDNS(w ResponseWriter, r *Msg) +} + +// A ResponseWriter interface is used by an DNS handler to +// construct an DNS response. +type ResponseWriter interface { + // LocalAddr returns the net.Addr of the server + LocalAddr() net.Addr + // RemoteAddr returns the net.Addr of the client that sent the current request. + RemoteAddr() net.Addr + // WriteMsg writes a reply back to the client. + WriteMsg(*Msg) error + // Write writes a raw buffer back to the client. + Write([]byte) (int, error) + // Close closes the connection. + Close() error + // TsigStatus returns the status of the Tsig. + TsigStatus() error + // TsigTimersOnly sets the tsig timers only boolean. + TsigTimersOnly(bool) + // Hijack lets the caller take over the connection. + // After a call to Hijack(), the DNS package will not do anything with the connection. + Hijack() +} + +type response struct { + hijacked bool // connection has been hijacked by handler + tsigStatus error + tsigTimersOnly bool + tsigRequestMAC string + tsigSecret map[string]string // the tsig secrets + udp *net.UDPConn // i/o connection if UDP was used + tcp net.Conn // i/o connection if TCP was used + udpSession *SessionUDP // oob data to get egress interface right + remoteAddr net.Addr // address of the client + writer Writer // writer to output the raw DNS bits +} + +// ServeMux is an DNS request multiplexer. It matches the +// zone name of each incoming request against a list of +// registered patterns add calls the handler for the pattern +// that most closely matches the zone name. ServeMux is DNSSEC aware, meaning +// that queries for the DS record are redirected to the parent zone (if that +// is also registered), otherwise the child gets the query. +// ServeMux is also safe for concurrent access from multiple goroutines. +type ServeMux struct { + z map[string]Handler + m *sync.RWMutex +} + +// NewServeMux allocates and returns a new ServeMux. +func NewServeMux() *ServeMux { return &ServeMux{z: make(map[string]Handler), m: new(sync.RWMutex)} } + +// DefaultServeMux is the default ServeMux used by Serve. +var DefaultServeMux = NewServeMux() + +// The HandlerFunc type is an adapter to allow the use of +// ordinary functions as DNS handlers. If f is a function +// with the appropriate signature, HandlerFunc(f) is a +// Handler object that calls f. +type HandlerFunc func(ResponseWriter, *Msg) + +// ServeDNS calls f(w, r). +func (f HandlerFunc) ServeDNS(w ResponseWriter, r *Msg) { + f(w, r) +} + +// HandleFailed returns a HandlerFunc that returns SERVFAIL for every request it gets. +func HandleFailed(w ResponseWriter, r *Msg) { + m := new(Msg) + m.SetRcode(r, RcodeServerFailure) + // does not matter if this write fails + w.WriteMsg(m) +} + +func failedHandler() Handler { return HandlerFunc(HandleFailed) } + +// ListenAndServe Starts a server on address and network specified Invoke handler +// for incoming queries. +func ListenAndServe(addr string, network string, handler Handler) error { + server := &Server{Addr: addr, Net: network, Handler: handler} + return server.ListenAndServe() +} + +// ListenAndServeTLS acts like http.ListenAndServeTLS, more information in +// http://golang.org/pkg/net/http/#ListenAndServeTLS +func ListenAndServeTLS(addr, certFile, keyFile string, handler Handler) error { + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return err + } + + config := tls.Config{ + Certificates: []tls.Certificate{cert}, + } + + server := &Server{ + Addr: addr, + Net: "tcp-tls", + TLSConfig: &config, + Handler: handler, + } + + return server.ListenAndServe() +} + +// ActivateAndServe activates a server with a listener from systemd, +// l and p should not both be non-nil. +// If both l and p are not nil only p will be used. +// Invoke handler for incoming queries. +func ActivateAndServe(l net.Listener, p net.PacketConn, handler Handler) error { + server := &Server{Listener: l, PacketConn: p, Handler: handler} + return server.ActivateAndServe() +} + +func (mux *ServeMux) match(q string, t uint16) Handler { + mux.m.RLock() + defer mux.m.RUnlock() + var handler Handler + b := make([]byte, len(q)) // worst case, one label of length q + off := 0 + end := false + for { + l := len(q[off:]) + for i := 0; i < l; i++ { + b[i] = q[off+i] + if b[i] >= 'A' && b[i] <= 'Z' { + b[i] |= ('a' - 'A') + } + } + if h, ok := mux.z[string(b[:l])]; ok { // causes garbage, might want to change the map key + if t != TypeDS { + return h + } + // Continue for DS to see if we have a parent too, if so delegeate to the parent + handler = h + } + off, end = NextLabel(q, off) + if end { + break + } + } + // Wildcard match, if we have found nothing try the root zone as a last resort. + if h, ok := mux.z["."]; ok { + return h + } + return handler +} + +// Handle adds a handler to the ServeMux for pattern. +func (mux *ServeMux) Handle(pattern string, handler Handler) { + if pattern == "" { + panic("dns: invalid pattern " + pattern) + } + mux.m.Lock() + mux.z[Fqdn(pattern)] = handler + mux.m.Unlock() +} + +// HandleFunc adds a handler function to the ServeMux for pattern. +func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) { + mux.Handle(pattern, HandlerFunc(handler)) +} + +// HandleRemove deregistrars the handler specific for pattern from the ServeMux. +func (mux *ServeMux) HandleRemove(pattern string) { + if pattern == "" { + panic("dns: invalid pattern " + pattern) + } + mux.m.Lock() + delete(mux.z, Fqdn(pattern)) + mux.m.Unlock() +} + +// ServeDNS dispatches the request to the handler whose +// pattern most closely matches the request message. If DefaultServeMux +// is used the correct thing for DS queries is done: a possible parent +// is sought. +// If no handler is found a standard SERVFAIL message is returned +// If the request message does not have exactly one question in the +// question section a SERVFAIL is returned, unlesss Unsafe is true. +func (mux *ServeMux) ServeDNS(w ResponseWriter, request *Msg) { + var h Handler + if len(request.Question) < 1 { // allow more than one question + h = failedHandler() + } else { + if h = mux.match(request.Question[0].Name, request.Question[0].Qtype); h == nil { + h = failedHandler() + } + } + h.ServeDNS(w, request) +} + +// Handle registers the handler with the given pattern +// in the DefaultServeMux. The documentation for +// ServeMux explains how patterns are matched. +func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) } + +// HandleRemove deregisters the handle with the given pattern +// in the DefaultServeMux. +func HandleRemove(pattern string) { DefaultServeMux.HandleRemove(pattern) } + +// HandleFunc registers the handler function with the given pattern +// in the DefaultServeMux. +func HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) { + DefaultServeMux.HandleFunc(pattern, handler) +} + +// Writer writes raw DNS messages; each call to Write should send an entire message. +type Writer interface { + io.Writer +} + +// Reader reads raw DNS messages; each call to ReadTCP or ReadUDP should return an entire message. +type Reader interface { + // ReadTCP reads a raw message from a TCP connection. Implementations may alter + // connection properties, for example the read-deadline. + ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error) + // ReadUDP reads a raw message from a UDP connection. Implementations may alter + // connection properties, for example the read-deadline. + ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) +} + +// defaultReader is an adapter for the Server struct that implements the Reader interface +// using the readTCP and readUDP func of the embedded Server. +type defaultReader struct { + *Server +} + +func (dr *defaultReader) ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error) { + return dr.readTCP(conn, timeout) +} + +func (dr *defaultReader) ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) { + return dr.readUDP(conn, timeout) +} + +// DecorateReader is a decorator hook for extending or supplanting the functionality of a Reader. +// Implementations should never return a nil Reader. +type DecorateReader func(Reader) Reader + +// DecorateWriter is a decorator hook for extending or supplanting the functionality of a Writer. +// Implementations should never return a nil Writer. +type DecorateWriter func(Writer) Writer + +// A Server defines parameters for running an DNS server. +type Server struct { + // Address to listen on, ":dns" if empty. + Addr string + // if "tcp" or "tcp-tls" (DNS over TLS) it will invoke a TCP listener, otherwise an UDP one + Net string + // TCP Listener to use, this is to aid in systemd's socket activation. + Listener net.Listener + // TLS connection configuration + TLSConfig *tls.Config + // UDP "Listener" to use, this is to aid in systemd's socket activation. + PacketConn net.PacketConn + // Handler to invoke, dns.DefaultServeMux if nil. + Handler Handler + // Default buffer size to use to read incoming UDP messages. If not set + // it defaults to MinMsgSize (512 B). + UDPSize int + // The net.Conn.SetReadTimeout value for new connections, defaults to 2 * time.Second. + ReadTimeout time.Duration + // The net.Conn.SetWriteTimeout value for new connections, defaults to 2 * time.Second. + WriteTimeout time.Duration + // TCP idle timeout for multiple queries, if nil, defaults to 8 * time.Second (RFC 5966). + IdleTimeout func() time.Duration + // Secret(s) for Tsig map[<zonename>]<base64 secret>. + TsigSecret map[string]string + // Unsafe instructs the server to disregard any sanity checks and directly hand the message to + // the handler. It will specifically not check if the query has the QR bit not set. + Unsafe bool + // If NotifyStartedFunc is set it is called once the server has started listening. + NotifyStartedFunc func() + // DecorateReader is optional, allows customization of the process that reads raw DNS messages. + DecorateReader DecorateReader + // DecorateWriter is optional, allows customization of the process that writes raw DNS messages. + DecorateWriter DecorateWriter + + // Graceful shutdown handling + + inFlight sync.WaitGroup + + lock sync.RWMutex + started bool +} + +// ListenAndServe starts a nameserver on the configured address in *Server. +func (srv *Server) ListenAndServe() error { + srv.lock.Lock() + defer srv.lock.Unlock() + if srv.started { + return &Error{err: "server already started"} + } + addr := srv.Addr + if addr == "" { + addr = ":domain" + } + if srv.UDPSize == 0 { + srv.UDPSize = MinMsgSize + } + switch srv.Net { + case "tcp", "tcp4", "tcp6": + a, err := net.ResolveTCPAddr(srv.Net, addr) + if err != nil { + return err + } + l, err := net.ListenTCP(srv.Net, a) + if err != nil { + return err + } + srv.Listener = l + srv.started = true + srv.lock.Unlock() + err = srv.serveTCP(l) + srv.lock.Lock() // to satisfy the defer at the top + return err + case "tcp-tls", "tcp4-tls", "tcp6-tls": + network := "tcp" + if srv.Net == "tcp4-tls" { + network = "tcp4" + } else if srv.Net == "tcp6-tls" { + network = "tcp6" + } + + l, err := tls.Listen(network, addr, srv.TLSConfig) + if err != nil { + return err + } + srv.Listener = l + srv.started = true + srv.lock.Unlock() + err = srv.serveTCP(l) + srv.lock.Lock() // to satisfy the defer at the top + return err + case "udp", "udp4", "udp6": + a, err := net.ResolveUDPAddr(srv.Net, addr) + if err != nil { + return err + } + l, err := net.ListenUDP(srv.Net, a) + if err != nil { + return err + } + if e := setUDPSocketOptions(l); e != nil { + return e + } + srv.PacketConn = l + srv.started = true + srv.lock.Unlock() + err = srv.serveUDP(l) + srv.lock.Lock() // to satisfy the defer at the top + return err + } + return &Error{err: "bad network"} +} + +// ActivateAndServe starts a nameserver with the PacketConn or Listener +// configured in *Server. Its main use is to start a server from systemd. +func (srv *Server) ActivateAndServe() error { + srv.lock.Lock() + defer srv.lock.Unlock() + if srv.started { + return &Error{err: "server already started"} + } + pConn := srv.PacketConn + l := srv.Listener + if pConn != nil { + if srv.UDPSize == 0 { + srv.UDPSize = MinMsgSize + } + // Check PacketConn interface's type is valid and value + // is not nil + if t, ok := pConn.(*net.UDPConn); ok && t != nil { + if e := setUDPSocketOptions(t); e != nil { + return e + } + srv.started = true + srv.lock.Unlock() + e := srv.serveUDP(t) + srv.lock.Lock() // to satisfy the defer at the top + return e + } + } + if l != nil { + srv.started = true + srv.lock.Unlock() + e := srv.serveTCP(l) + srv.lock.Lock() // to satisfy the defer at the top + return e + } + return &Error{err: "bad listeners"} +} + +// Shutdown gracefully shuts down a server. After a call to Shutdown, ListenAndServe and +// ActivateAndServe will return. All in progress queries are completed before the server +// is taken down. If the Shutdown is taking longer than the reading timeout an error +// is returned. +func (srv *Server) Shutdown() error { + srv.lock.Lock() + if !srv.started { + srv.lock.Unlock() + return &Error{err: "server not started"} + } + srv.started = false + srv.lock.Unlock() + + if srv.PacketConn != nil { + srv.PacketConn.Close() + } + if srv.Listener != nil { + srv.Listener.Close() + } + + fin := make(chan bool) + go func() { + srv.inFlight.Wait() + fin <- true + }() + + select { + case <-time.After(srv.getReadTimeout()): + return &Error{err: "server shutdown is pending"} + case <-fin: + return nil + } +} + +// getReadTimeout is a helper func to use system timeout if server did not intend to change it. +func (srv *Server) getReadTimeout() time.Duration { + rtimeout := dnsTimeout + if srv.ReadTimeout != 0 { + rtimeout = srv.ReadTimeout + } + return rtimeout +} + +// serveTCP starts a TCP listener for the server. +// Each request is handled in a separate goroutine. +func (srv *Server) serveTCP(l net.Listener) error { + defer l.Close() + + if srv.NotifyStartedFunc != nil { + srv.NotifyStartedFunc() + } + + reader := Reader(&defaultReader{srv}) + if srv.DecorateReader != nil { + reader = srv.DecorateReader(reader) + } + + handler := srv.Handler + if handler == nil { + handler = DefaultServeMux + } + rtimeout := srv.getReadTimeout() + // deadline is not used here + for { + rw, err := l.Accept() + if err != nil { + if neterr, ok := err.(net.Error); ok && neterr.Temporary() { + continue + } + return err + } + m, err := reader.ReadTCP(rw, rtimeout) + srv.lock.RLock() + if !srv.started { + srv.lock.RUnlock() + return nil + } + srv.lock.RUnlock() + if err != nil { + continue + } + srv.inFlight.Add(1) + go srv.serve(rw.RemoteAddr(), handler, m, nil, nil, rw) + } +} + +// serveUDP starts a UDP listener for the server. +// Each request is handled in a separate goroutine. +func (srv *Server) serveUDP(l *net.UDPConn) error { + defer l.Close() + + if srv.NotifyStartedFunc != nil { + srv.NotifyStartedFunc() + } + + reader := Reader(&defaultReader{srv}) + if srv.DecorateReader != nil { + reader = srv.DecorateReader(reader) + } + + handler := srv.Handler + if handler == nil { + handler = DefaultServeMux + } + rtimeout := srv.getReadTimeout() + // deadline is not used here + for { + m, s, err := reader.ReadUDP(l, rtimeout) + srv.lock.RLock() + if !srv.started { + srv.lock.RUnlock() + return nil + } + srv.lock.RUnlock() + if err != nil { + continue + } + srv.inFlight.Add(1) + go srv.serve(s.RemoteAddr(), handler, m, l, s, nil) + } +} + +// Serve a new connection. +func (srv *Server) serve(a net.Addr, h Handler, m []byte, u *net.UDPConn, s *SessionUDP, t net.Conn) { + defer srv.inFlight.Done() + + w := &response{tsigSecret: srv.TsigSecret, udp: u, tcp: t, remoteAddr: a, udpSession: s} + if srv.DecorateWriter != nil { + w.writer = srv.DecorateWriter(w) + } else { + w.writer = w + } + + q := 0 // counter for the amount of TCP queries we get + + reader := Reader(&defaultReader{srv}) + if srv.DecorateReader != nil { + reader = srv.DecorateReader(reader) + } +Redo: + req := new(Msg) + err := req.Unpack(m) + if err != nil { // Send a FormatError back + x := new(Msg) + x.SetRcodeFormatError(req) + w.WriteMsg(x) + goto Exit + } + if !srv.Unsafe && req.Response { + goto Exit + } + + w.tsigStatus = nil + if w.tsigSecret != nil { + if t := req.IsTsig(); t != nil { + secret := t.Hdr.Name + if _, ok := w.tsigSecret[secret]; !ok { + w.tsigStatus = ErrKeyAlg + } + w.tsigStatus = TsigVerify(m, w.tsigSecret[secret], "", false) + w.tsigTimersOnly = false + w.tsigRequestMAC = req.Extra[len(req.Extra)-1].(*TSIG).MAC + } + } + h.ServeDNS(w, req) // Writes back to the client + +Exit: + if w.tcp == nil { + return + } + // TODO(miek): make this number configurable? + if q > maxTCPQueries { // close socket after this many queries + w.Close() + return + } + + if w.hijacked { + return // client calls Close() + } + if u != nil { // UDP, "close" and return + w.Close() + return + } + idleTimeout := tcpIdleTimeout + if srv.IdleTimeout != nil { + idleTimeout = srv.IdleTimeout() + } + m, err = reader.ReadTCP(w.tcp, idleTimeout) + if err == nil { + q++ + goto Redo + } + w.Close() + return +} + +func (srv *Server) readTCP(conn net.Conn, timeout time.Duration) ([]byte, error) { + conn.SetReadDeadline(time.Now().Add(timeout)) + l := make([]byte, 2) + n, err := conn.Read(l) + if err != nil || n != 2 { + if err != nil { + return nil, err + } + return nil, ErrShortRead + } + length := binary.BigEndian.Uint16(l) + if length == 0 { + return nil, ErrShortRead + } + m := make([]byte, int(length)) + n, err = conn.Read(m[:int(length)]) + if err != nil || n == 0 { + if err != nil { + return nil, err + } + return nil, ErrShortRead + } + i := n + for i < int(length) { + j, err := conn.Read(m[i:int(length)]) + if err != nil { + return nil, err + } + i += j + } + n = i + m = m[:n] + return m, nil +} + +func (srv *Server) readUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) { + conn.SetReadDeadline(time.Now().Add(timeout)) + m := make([]byte, srv.UDPSize) + n, s, err := ReadFromSessionUDP(conn, m) + if err != nil || n == 0 { + if err != nil { + return nil, nil, err + } + return nil, nil, ErrShortRead + } + m = m[:n] + return m, s, nil +} + +// WriteMsg implements the ResponseWriter.WriteMsg method. +func (w *response) WriteMsg(m *Msg) (err error) { + var data []byte + if w.tsigSecret != nil { // if no secrets, dont check for the tsig (which is a longer check) + if t := m.IsTsig(); t != nil { + data, w.tsigRequestMAC, err = TsigGenerate(m, w.tsigSecret[t.Hdr.Name], w.tsigRequestMAC, w.tsigTimersOnly) + if err != nil { + return err + } + _, err = w.writer.Write(data) + return err + } + } + data, err = m.Pack() + if err != nil { + return err + } + _, err = w.writer.Write(data) + return err +} + +// Write implements the ResponseWriter.Write method. +func (w *response) Write(m []byte) (int, error) { + switch { + case w.udp != nil: + n, err := WriteToSessionUDP(w.udp, m, w.udpSession) + return n, err + case w.tcp != nil: + lm := len(m) + if lm < 2 { + return 0, io.ErrShortBuffer + } + if lm > MaxMsgSize { + return 0, &Error{err: "message too large"} + } + l := make([]byte, 2, 2+lm) + binary.BigEndian.PutUint16(l, uint16(lm)) + m = append(l, m...) + + n, err := io.Copy(w.tcp, bytes.NewReader(m)) + return int(n), err + } + panic("not reached") +} + +// LocalAddr implements the ResponseWriter.LocalAddr method. +func (w *response) LocalAddr() net.Addr { + if w.tcp != nil { + return w.tcp.LocalAddr() + } + return w.udp.LocalAddr() +} + +// RemoteAddr implements the ResponseWriter.RemoteAddr method. +func (w *response) RemoteAddr() net.Addr { return w.remoteAddr } + +// TsigStatus implements the ResponseWriter.TsigStatus method. +func (w *response) TsigStatus() error { return w.tsigStatus } + +// TsigTimersOnly implements the ResponseWriter.TsigTimersOnly method. +func (w *response) TsigTimersOnly(b bool) { w.tsigTimersOnly = b } + +// Hijack implements the ResponseWriter.Hijack method. +func (w *response) Hijack() { w.hijacked = true } + +// Close implements the ResponseWriter.Close method +func (w *response) Close() error { + // Can't close the udp conn, as that is actually the listener. + if w.tcp != nil { + e := w.tcp.Close() + w.tcp = nil + return e + } + return nil +} diff --git a/vendor/github.com/miekg/dns/server_test.go b/vendor/github.com/miekg/dns/server_test.go new file mode 100644 index 0000000..f17a2f9 --- /dev/null +++ b/vendor/github.com/miekg/dns/server_test.go @@ -0,0 +1,719 @@ +package dns + +import ( + "crypto/tls" + "fmt" + "io" + "net" + "runtime" + "sync" + "testing" + "time" +) + +func HelloServer(w ResponseWriter, req *Msg) { + m := new(Msg) + m.SetReply(req) + + m.Extra = make([]RR, 1) + m.Extra[0] = &TXT{Hdr: RR_Header{Name: m.Question[0].Name, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello world"}} + w.WriteMsg(m) +} + +func HelloServerBadID(w ResponseWriter, req *Msg) { + m := new(Msg) + m.SetReply(req) + m.Id++ + + m.Extra = make([]RR, 1) + m.Extra[0] = &TXT{Hdr: RR_Header{Name: m.Question[0].Name, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello world"}} + w.WriteMsg(m) +} + +func AnotherHelloServer(w ResponseWriter, req *Msg) { + m := new(Msg) + m.SetReply(req) + + m.Extra = make([]RR, 1) + m.Extra[0] = &TXT{Hdr: RR_Header{Name: m.Question[0].Name, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello example"}} + w.WriteMsg(m) +} + +func RunLocalUDPServer(laddr string) (*Server, string, error) { + server, l, _, err := RunLocalUDPServerWithFinChan(laddr) + + return server, l, err +} + +func RunLocalUDPServerWithFinChan(laddr string) (*Server, string, chan struct{}, error) { + pc, err := net.ListenPacket("udp", laddr) + if err != nil { + return nil, "", nil, err + } + server := &Server{PacketConn: pc, ReadTimeout: time.Hour, WriteTimeout: time.Hour} + + waitLock := sync.Mutex{} + waitLock.Lock() + server.NotifyStartedFunc = waitLock.Unlock + + fin := make(chan struct{}, 0) + + go func() { + server.ActivateAndServe() + close(fin) + pc.Close() + }() + + waitLock.Lock() + return server, pc.LocalAddr().String(), fin, nil +} + +func RunLocalUDPServerUnsafe(laddr string) (*Server, string, error) { + pc, err := net.ListenPacket("udp", laddr) + if err != nil { + return nil, "", err + } + server := &Server{PacketConn: pc, Unsafe: true, + ReadTimeout: time.Hour, WriteTimeout: time.Hour} + + waitLock := sync.Mutex{} + waitLock.Lock() + server.NotifyStartedFunc = waitLock.Unlock + + go func() { + server.ActivateAndServe() + pc.Close() + }() + + waitLock.Lock() + return server, pc.LocalAddr().String(), nil +} + +func RunLocalTCPServer(laddr string) (*Server, string, error) { + l, err := net.Listen("tcp", laddr) + if err != nil { + return nil, "", err + } + + server := &Server{Listener: l, ReadTimeout: time.Hour, WriteTimeout: time.Hour} + + waitLock := sync.Mutex{} + waitLock.Lock() + server.NotifyStartedFunc = waitLock.Unlock + + go func() { + server.ActivateAndServe() + l.Close() + }() + + waitLock.Lock() + return server, l.Addr().String(), nil +} + +func RunLocalTLSServer(laddr string, config *tls.Config) (*Server, string, error) { + l, err := tls.Listen("tcp", laddr, config) + if err != nil { + return nil, "", err + } + + server := &Server{Listener: l, ReadTimeout: time.Hour, WriteTimeout: time.Hour} + + waitLock := sync.Mutex{} + waitLock.Lock() + server.NotifyStartedFunc = waitLock.Unlock + + go func() { + server.ActivateAndServe() + l.Close() + }() + + waitLock.Lock() + return server, l.Addr().String(), nil +} + +func TestServing(t *testing.T) { + HandleFunc("miek.nl.", HelloServer) + HandleFunc("example.com.", AnotherHelloServer) + defer HandleRemove("miek.nl.") + defer HandleRemove("example.com.") + + s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") + if err != nil { + t.Fatalf("unable to run test server: %v", err) + } + defer s.Shutdown() + + c := new(Client) + m := new(Msg) + m.SetQuestion("miek.nl.", TypeTXT) + r, _, err := c.Exchange(m, addrstr) + if err != nil || len(r.Extra) == 0 { + t.Fatal("failed to exchange miek.nl", err) + } + txt := r.Extra[0].(*TXT).Txt[0] + if txt != "Hello world" { + t.Error("unexpected result for miek.nl", txt, "!= Hello world") + } + + m.SetQuestion("example.com.", TypeTXT) + r, _, err = c.Exchange(m, addrstr) + if err != nil { + t.Fatal("failed to exchange example.com", err) + } + txt = r.Extra[0].(*TXT).Txt[0] + if txt != "Hello example" { + t.Error("unexpected result for example.com", txt, "!= Hello example") + } + + // Test Mixes cased as noticed by Ask. + m.SetQuestion("eXaMplE.cOm.", TypeTXT) + r, _, err = c.Exchange(m, addrstr) + if err != nil { + t.Error("failed to exchange eXaMplE.cOm", err) + } + txt = r.Extra[0].(*TXT).Txt[0] + if txt != "Hello example" { + t.Error("unexpected result for example.com", txt, "!= Hello example") + } +} + +func TestServingTLS(t *testing.T) { + HandleFunc("miek.nl.", HelloServer) + HandleFunc("example.com.", AnotherHelloServer) + defer HandleRemove("miek.nl.") + defer HandleRemove("example.com.") + + cert, err := tls.X509KeyPair(CertPEMBlock, KeyPEMBlock) + if err != nil { + t.Fatalf("unable to build certificate: %v", err) + } + + config := tls.Config{ + Certificates: []tls.Certificate{cert}, + } + + s, addrstr, err := RunLocalTLSServer("127.0.0.1:0", &config) + if err != nil { + t.Fatalf("unable to run test server: %v", err) + } + defer s.Shutdown() + + c := new(Client) + c.Net = "tcp-tls" + c.TLSConfig = &tls.Config{ + InsecureSkipVerify: true, + } + + m := new(Msg) + m.SetQuestion("miek.nl.", TypeTXT) + r, _, err := c.Exchange(m, addrstr) + if err != nil || len(r.Extra) == 0 { + t.Fatal("failed to exchange miek.nl", err) + } + txt := r.Extra[0].(*TXT).Txt[0] + if txt != "Hello world" { + t.Error("unexpected result for miek.nl", txt, "!= Hello world") + } + + m.SetQuestion("example.com.", TypeTXT) + r, _, err = c.Exchange(m, addrstr) + if err != nil { + t.Fatal("failed to exchange example.com", err) + } + txt = r.Extra[0].(*TXT).Txt[0] + if txt != "Hello example" { + t.Error("unexpected result for example.com", txt, "!= Hello example") + } + + // Test Mixes cased as noticed by Ask. + m.SetQuestion("eXaMplE.cOm.", TypeTXT) + r, _, err = c.Exchange(m, addrstr) + if err != nil { + t.Error("failed to exchange eXaMplE.cOm", err) + } + txt = r.Extra[0].(*TXT).Txt[0] + if txt != "Hello example" { + t.Error("unexpected result for example.com", txt, "!= Hello example") + } +} + +func BenchmarkServe(b *testing.B) { + b.StopTimer() + HandleFunc("miek.nl.", HelloServer) + defer HandleRemove("miek.nl.") + a := runtime.GOMAXPROCS(4) + + s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") + if err != nil { + b.Fatalf("unable to run test server: %v", err) + } + defer s.Shutdown() + + c := new(Client) + m := new(Msg) + m.SetQuestion("miek.nl", TypeSOA) + + b.StartTimer() + for i := 0; i < b.N; i++ { + c.Exchange(m, addrstr) + } + runtime.GOMAXPROCS(a) +} + +func benchmarkServe6(b *testing.B) { + b.StopTimer() + HandleFunc("miek.nl.", HelloServer) + defer HandleRemove("miek.nl.") + a := runtime.GOMAXPROCS(4) + s, addrstr, err := RunLocalUDPServer("[::1]:0") + if err != nil { + b.Fatalf("unable to run test server: %v", err) + } + defer s.Shutdown() + + c := new(Client) + m := new(Msg) + m.SetQuestion("miek.nl", TypeSOA) + + b.StartTimer() + for i := 0; i < b.N; i++ { + c.Exchange(m, addrstr) + } + runtime.GOMAXPROCS(a) +} + +func HelloServerCompress(w ResponseWriter, req *Msg) { + m := new(Msg) + m.SetReply(req) + m.Extra = make([]RR, 1) + m.Extra[0] = &TXT{Hdr: RR_Header{Name: m.Question[0].Name, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello world"}} + m.Compress = true + w.WriteMsg(m) +} + +func BenchmarkServeCompress(b *testing.B) { + b.StopTimer() + HandleFunc("miek.nl.", HelloServerCompress) + defer HandleRemove("miek.nl.") + a := runtime.GOMAXPROCS(4) + s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") + if err != nil { + b.Fatalf("unable to run test server: %v", err) + } + defer s.Shutdown() + + c := new(Client) + m := new(Msg) + m.SetQuestion("miek.nl", TypeSOA) + b.StartTimer() + for i := 0; i < b.N; i++ { + c.Exchange(m, addrstr) + } + runtime.GOMAXPROCS(a) +} + +func TestDotAsCatchAllWildcard(t *testing.T) { + mux := NewServeMux() + mux.Handle(".", HandlerFunc(HelloServer)) + mux.Handle("example.com.", HandlerFunc(AnotherHelloServer)) + + handler := mux.match("www.miek.nl.", TypeTXT) + if handler == nil { + t.Error("wildcard match failed") + } + + handler = mux.match("www.example.com.", TypeTXT) + if handler == nil { + t.Error("example.com match failed") + } + + handler = mux.match("a.www.example.com.", TypeTXT) + if handler == nil { + t.Error("a.www.example.com match failed") + } + + handler = mux.match("boe.", TypeTXT) + if handler == nil { + t.Error("boe. match failed") + } +} + +func TestCaseFolding(t *testing.T) { + mux := NewServeMux() + mux.Handle("_udp.example.com.", HandlerFunc(HelloServer)) + + handler := mux.match("_dns._udp.example.com.", TypeSRV) + if handler == nil { + t.Error("case sensitive characters folded") + } + + handler = mux.match("_DNS._UDP.EXAMPLE.COM.", TypeSRV) + if handler == nil { + t.Error("case insensitive characters not folded") + } +} + +func TestRootServer(t *testing.T) { + mux := NewServeMux() + mux.Handle(".", HandlerFunc(HelloServer)) + + handler := mux.match(".", TypeNS) + if handler == nil { + t.Error("root match failed") + } +} + +type maxRec struct { + max int + sync.RWMutex +} + +var M = new(maxRec) + +func HelloServerLargeResponse(resp ResponseWriter, req *Msg) { + m := new(Msg) + m.SetReply(req) + m.Authoritative = true + m1 := 0 + M.RLock() + m1 = M.max + M.RUnlock() + for i := 0; i < m1; i++ { + aRec := &A{ + Hdr: RR_Header{ + Name: req.Question[0].Name, + Rrtype: TypeA, + Class: ClassINET, + Ttl: 0, + }, + A: net.ParseIP(fmt.Sprintf("127.0.0.%d", i+1)).To4(), + } + m.Answer = append(m.Answer, aRec) + } + resp.WriteMsg(m) +} + +func TestServingLargeResponses(t *testing.T) { + HandleFunc("example.", HelloServerLargeResponse) + defer HandleRemove("example.") + + s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") + if err != nil { + t.Fatalf("unable to run test server: %v", err) + } + defer s.Shutdown() + + // Create request + m := new(Msg) + m.SetQuestion("web.service.example.", TypeANY) + + c := new(Client) + c.Net = "udp" + M.Lock() + M.max = 2 + M.Unlock() + _, _, err = c.Exchange(m, addrstr) + if err != nil { + t.Errorf("failed to exchange: %v", err) + } + // This must fail + M.Lock() + M.max = 20 + M.Unlock() + _, _, err = c.Exchange(m, addrstr) + if err == nil { + t.Error("failed to fail exchange, this should generate packet error") + } + // But this must work again + c.UDPSize = 7000 + _, _, err = c.Exchange(m, addrstr) + if err != nil { + t.Errorf("failed to exchange: %v", err) + } +} + +func TestServingResponse(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + HandleFunc("miek.nl.", HelloServer) + s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") + if err != nil { + t.Fatalf("unable to run test server: %v", err) + } + + c := new(Client) + m := new(Msg) + m.SetQuestion("miek.nl.", TypeTXT) + m.Response = false + _, _, err = c.Exchange(m, addrstr) + if err != nil { + t.Fatal("failed to exchange", err) + } + m.Response = true + _, _, err = c.Exchange(m, addrstr) + if err == nil { + t.Fatal("exchanged response message") + } + + s.Shutdown() + s, addrstr, err = RunLocalUDPServerUnsafe("127.0.0.1:0") + if err != nil { + t.Fatalf("unable to run test server: %v", err) + } + defer s.Shutdown() + + m.Response = true + _, _, err = c.Exchange(m, addrstr) + if err != nil { + t.Fatal("could exchanged response message in Unsafe mode") + } +} + +func TestShutdownTCP(t *testing.T) { + s, _, err := RunLocalTCPServer("127.0.0.1:0") + if err != nil { + t.Fatalf("unable to run test server: %v", err) + } + err = s.Shutdown() + if err != nil { + t.Errorf("could not shutdown test TCP server, %v", err) + } +} + +func TestShutdownTLS(t *testing.T) { + cert, err := tls.X509KeyPair(CertPEMBlock, KeyPEMBlock) + if err != nil { + t.Fatalf("unable to build certificate: %v", err) + } + + config := tls.Config{ + Certificates: []tls.Certificate{cert}, + } + + s, _, err := RunLocalTLSServer("127.0.0.1:0", &config) + if err != nil { + t.Fatalf("unable to run test server: %v", err) + } + err = s.Shutdown() + if err != nil { + t.Errorf("could not shutdown test TLS server, %v", err) + } +} + +type trigger struct { + done bool + sync.RWMutex +} + +func (t *trigger) Set() { + t.Lock() + defer t.Unlock() + t.done = true +} +func (t *trigger) Get() bool { + t.RLock() + defer t.RUnlock() + return t.done +} + +func TestHandlerCloseTCP(t *testing.T) { + + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + panic(err) + } + addr := ln.Addr().String() + + server := &Server{Addr: addr, Net: "tcp", Listener: ln} + + hname := "testhandlerclosetcp." + triggered := &trigger{} + HandleFunc(hname, func(w ResponseWriter, r *Msg) { + triggered.Set() + w.Close() + }) + defer HandleRemove(hname) + + go func() { + defer server.Shutdown() + c := &Client{Net: "tcp"} + m := new(Msg).SetQuestion(hname, 1) + tries := 0 + exchange: + _, _, err := c.Exchange(m, addr) + if err != nil && err != io.EOF { + t.Logf("exchange failed: %s\n", err) + if tries == 3 { + return + } + time.Sleep(time.Second / 10) + tries++ + goto exchange + } + }() + server.ActivateAndServe() + if !triggered.Get() { + t.Fatalf("handler never called") + } +} + +func TestShutdownUDP(t *testing.T) { + s, _, fin, err := RunLocalUDPServerWithFinChan("127.0.0.1:0") + if err != nil { + t.Fatalf("unable to run test server: %v", err) + } + err = s.Shutdown() + if err != nil { + t.Errorf("could not shutdown test UDP server, %v", err) + } + select { + case <-fin: + case <-time.After(2 * time.Second): + t.Error("Could not shutdown test UDP server. Gave up waiting") + } +} + +type ExampleFrameLengthWriter struct { + Writer +} + +func (e *ExampleFrameLengthWriter) Write(m []byte) (int, error) { + fmt.Println("writing raw DNS message of length", len(m)) + return e.Writer.Write(m) +} + +func ExampleDecorateWriter() { + // instrument raw DNS message writing + wf := DecorateWriter(func(w Writer) Writer { + return &ExampleFrameLengthWriter{w} + }) + + // simple UDP server + pc, err := net.ListenPacket("udp", "127.0.0.1:0") + if err != nil { + fmt.Println(err.Error()) + return + } + server := &Server{ + PacketConn: pc, + DecorateWriter: wf, + ReadTimeout: time.Hour, WriteTimeout: time.Hour, + } + + waitLock := sync.Mutex{} + waitLock.Lock() + server.NotifyStartedFunc = waitLock.Unlock + defer server.Shutdown() + + go func() { + server.ActivateAndServe() + pc.Close() + }() + + waitLock.Lock() + + HandleFunc("miek.nl.", HelloServer) + + c := new(Client) + m := new(Msg) + m.SetQuestion("miek.nl.", TypeTXT) + _, _, err = c.Exchange(m, pc.LocalAddr().String()) + if err != nil { + fmt.Println("failed to exchange", err.Error()) + return + } + // Output: writing raw DNS message of length 56 +} + +var ( + // CertPEMBlock is a X509 data used to test TLS servers (used with tls.X509KeyPair) + CertPEMBlock = []byte(`-----BEGIN CERTIFICATE----- +MIIDAzCCAeugAwIBAgIRAJFYMkcn+b8dpU15wjf++GgwDQYJKoZIhvcNAQELBQAw +EjEQMA4GA1UEChMHQWNtZSBDbzAeFw0xNjAxMDgxMjAzNTNaFw0xNzAxMDcxMjAz +NTNaMBIxEDAOBgNVBAoTB0FjbWUgQ28wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQDXjqO6skvP03k58CNjQggd9G/mt+Wa+xRU+WXiKCCHttawM8x+slq5 +yfsHCwxlwsGn79HmJqecNqgHb2GWBXAvVVokFDTcC1hUP4+gp2gu9Ny27UHTjlLm +O0l/xZ5MN8tfKyYlFw18tXu3fkaPyHj8v/D1RDkuo4ARdFvGSe8TqisbhLk2+9ow +xfIGbEM9Fdiw8qByC2+d+FfvzIKz3GfQVwn0VoRom8L6NBIANq1IGrB5JefZB6nv +DnfuxkBmY7F1513HKuEJ8KsLWWZWV9OPU4j4I4Rt+WJNlKjbD2srHxyrS2RDsr91 +8nCkNoWVNO3sZq0XkWKecdc921vL4ginAgMBAAGjVDBSMA4GA1UdDwEB/wQEAwIC +pDATBgNVHSUEDDAKBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MBoGA1UdEQQT +MBGCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEAGcU3iyLBIVZj +aDzSvEDHUd1bnLBl1C58Xu/CyKlPqVU7mLfK0JcgEaYQTSX6fCJVNLbbCrcGLsPJ +fbjlBbyeLjTV413fxPVuona62pBFjqdtbli2Qe8FRH2KBdm41JUJGdo+SdsFu7nc +BFOcubdw6LLIXvsTvwndKcHWx1rMX709QU1Vn1GAIsbJV/DWI231Jyyb+lxAUx/C +8vce5uVxiKcGS+g6OjsN3D3TtiEQGSXLh013W6Wsih8td8yMCMZ3w8LQ38br1GUe +ahLIgUJ9l6HDguM17R7kGqxNvbElsMUHfTtXXP7UDQUiYXDakg8xDP6n9DCDhJ8Y +bSt7OLB7NQ== +-----END CERTIFICATE-----`) + + // KeyPEMBlock is a X509 data used to test TLS servers (used with tls.X509KeyPair) + KeyPEMBlock = []byte(`-----BEGIN RSA PRIVATE KEY----- +MIIEpQIBAAKCAQEA146jurJLz9N5OfAjY0IIHfRv5rflmvsUVPll4iggh7bWsDPM +frJaucn7BwsMZcLBp+/R5iannDaoB29hlgVwL1VaJBQ03AtYVD+PoKdoLvTctu1B +045S5jtJf8WeTDfLXysmJRcNfLV7t35Gj8h4/L/w9UQ5LqOAEXRbxknvE6orG4S5 +NvvaMMXyBmxDPRXYsPKgcgtvnfhX78yCs9xn0FcJ9FaEaJvC+jQSADatSBqweSXn +2Qep7w537sZAZmOxdeddxyrhCfCrC1lmVlfTj1OI+COEbfliTZSo2w9rKx8cq0tk +Q7K/dfJwpDaFlTTt7GatF5FinnHXPdtby+IIpwIDAQABAoIBAAJK4RDmPooqTJrC +JA41MJLo+5uvjwCT9QZmVKAQHzByUFw1YNJkITTiognUI0CdzqNzmH7jIFs39ZeG +proKusO2G6xQjrNcZ4cV2fgyb5g4QHStl0qhs94A+WojduiGm2IaumAgm6Mc5wDv +ld6HmknN3Mku/ZCyanVFEIjOVn2WB7ZQLTBs6ZYaebTJG2Xv6p9t2YJW7pPQ9Xce +s9ohAWohyM4X/OvfnfnLtQp2YLw/BxwehBsCR5SXM3ibTKpFNtxJC8hIfTuWtxZu +2ywrmXShYBRB1WgtZt5k04bY/HFncvvcHK3YfI1+w4URKtwdaQgPUQRbVwDwuyBn +flfkCJECgYEA/eWt01iEyE/lXkGn6V9lCocUU7lCU6yk5UT8VXVUc5If4KZKPfCk +p4zJDOqwn2eM673aWz/mG9mtvAvmnugaGjcaVCyXOp/D/GDmKSoYcvW5B/yjfkLy +dK6Yaa5LDRVYlYgyzcdCT5/9Qc626NzFwKCZNI4ncIU8g7ViATRxWJ8CgYEA2Ver +vZ0M606sfgC0H3NtwNBxmuJ+lIF5LNp/wDi07lDfxRR1rnZMX5dnxjcpDr/zvm8J +WtJJX3xMgqjtHuWKL3yKKony9J5ZPjichSbSbhrzfovgYIRZLxLLDy4MP9L3+CX/ +yBXnqMWuSnFX+M5fVGxdDWiYF3V+wmeOv9JvavkCgYEAiXAPDFzaY+R78O3xiu7M +r0o3wqqCMPE/wav6O/hrYrQy9VSO08C0IM6g9pEEUwWmzuXSkZqhYWoQFb8Lc/GI +T7CMXAxXQLDDUpbRgG79FR3Wr3AewHZU8LyiXHKwxcBMV4WGmsXGK3wbh8fyU1NO +6NsGk+BvkQVOoK1LBAPzZ1kCgYEAsBSmD8U33T9s4dxiEYTrqyV0lH3g/SFz8ZHH +pAyNEPI2iC1ONhyjPWKlcWHpAokiyOqeUpVBWnmSZtzC1qAydsxYB6ShT+sl9BHb +RMix/QAauzBJhQhUVJ3OIys0Q1UBDmqCsjCE8SfOT4NKOUnA093C+YT+iyrmmktZ +zDCJkckCgYEAndqM5KXGk5xYo+MAA1paZcbTUXwaWwjLU+XSRSSoyBEi5xMtfvUb +7+a1OMhLwWbuz+pl64wFKrbSUyimMOYQpjVE/1vk/kb99pxbgol27hdKyTH1d+ov +kFsxKCqxAnBVGEWAvVZAiiTOxleQFjz5RnL0BQp9Lg2cQe+dvuUmIAA= +-----END RSA PRIVATE KEY-----`) +) + +func testShutdownBindPort(t *testing.T, protocol string, port string) { + handler := NewServeMux() + handler.HandleFunc(".", func(w ResponseWriter, r *Msg) {}) + startedCh := make(chan struct{}) + s := &Server{ + Addr: net.JoinHostPort("127.0.0.1", port), + Net: protocol, + Handler: handler, + NotifyStartedFunc: func() { + startedCh <- struct{}{} + }, + } + go func() { + if err := s.ListenAndServe(); err != nil { + t.Log(err) + } + }() + <-startedCh + t.Logf("DNS server is started on: %s", s.Addr) + if err := s.Shutdown(); err != nil { + t.Fatal(err) + } + time.Sleep(100 * time.Millisecond) + go func() { + if err := s.ListenAndServe(); err != nil { + t.Fatal(err) + } + }() + <-startedCh + t.Logf("DNS server is started on: %s", s.Addr) +} + +func TestShutdownBindPortUDP(t *testing.T) { + testShutdownBindPort(t, "udp", "1153") +} + +func TestShutdownBindPortTCP(t *testing.T) { + testShutdownBindPort(t, "tcp", "1154") +} diff --git a/vendor/github.com/miekg/dns/sig0.go b/vendor/github.com/miekg/dns/sig0.go new file mode 100644 index 0000000..f31e9e6 --- /dev/null +++ b/vendor/github.com/miekg/dns/sig0.go @@ -0,0 +1,218 @@ +package dns + +import ( + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/rsa" + "encoding/binary" + "math/big" + "strings" + "time" +) + +// Sign signs a dns.Msg. It fills the signature with the appropriate data. +// The SIG record should have the SignerName, KeyTag, Algorithm, Inception +// and Expiration set. +func (rr *SIG) Sign(k crypto.Signer, m *Msg) ([]byte, error) { + if k == nil { + return nil, ErrPrivKey + } + if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 { + return nil, ErrKey + } + rr.Header().Rrtype = TypeSIG + rr.Header().Class = ClassANY + rr.Header().Ttl = 0 + rr.Header().Name = "." + rr.OrigTtl = 0 + rr.TypeCovered = 0 + rr.Labels = 0 + + buf := make([]byte, m.Len()+rr.len()) + mbuf, err := m.PackBuffer(buf) + if err != nil { + return nil, err + } + if &buf[0] != &mbuf[0] { + return nil, ErrBuf + } + off, err := PackRR(rr, buf, len(mbuf), nil, false) + if err != nil { + return nil, err + } + buf = buf[:off:cap(buf)] + + hash, ok := AlgorithmToHash[rr.Algorithm] + if !ok { + return nil, ErrAlg + } + + hasher := hash.New() + // Write SIG rdata + hasher.Write(buf[len(mbuf)+1+2+2+4+2:]) + // Write message + hasher.Write(buf[:len(mbuf)]) + + signature, err := sign(k, hasher.Sum(nil), hash, rr.Algorithm) + if err != nil { + return nil, err + } + + rr.Signature = toBase64(signature) + + buf = append(buf, signature...) + if len(buf) > int(^uint16(0)) { + return nil, ErrBuf + } + // Adjust sig data length + rdoff := len(mbuf) + 1 + 2 + 2 + 4 + rdlen := binary.BigEndian.Uint16(buf[rdoff:]) + rdlen += uint16(len(signature)) + binary.BigEndian.PutUint16(buf[rdoff:], rdlen) + // Adjust additional count + adc := binary.BigEndian.Uint16(buf[10:]) + adc++ + binary.BigEndian.PutUint16(buf[10:], adc) + return buf, nil +} + +// Verify validates the message buf using the key k. +// It's assumed that buf is a valid message from which rr was unpacked. +func (rr *SIG) Verify(k *KEY, buf []byte) error { + if k == nil { + return ErrKey + } + if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 { + return ErrKey + } + + var hash crypto.Hash + switch rr.Algorithm { + case DSA, RSASHA1: + hash = crypto.SHA1 + case RSASHA256, ECDSAP256SHA256: + hash = crypto.SHA256 + case ECDSAP384SHA384: + hash = crypto.SHA384 + case RSASHA512: + hash = crypto.SHA512 + default: + return ErrAlg + } + hasher := hash.New() + + buflen := len(buf) + qdc := binary.BigEndian.Uint16(buf[4:]) + anc := binary.BigEndian.Uint16(buf[6:]) + auc := binary.BigEndian.Uint16(buf[8:]) + adc := binary.BigEndian.Uint16(buf[10:]) + offset := 12 + var err error + for i := uint16(0); i < qdc && offset < buflen; i++ { + _, offset, err = UnpackDomainName(buf, offset) + if err != nil { + return err + } + // Skip past Type and Class + offset += 2 + 2 + } + for i := uint16(1); i < anc+auc+adc && offset < buflen; i++ { + _, offset, err = UnpackDomainName(buf, offset) + if err != nil { + return err + } + // Skip past Type, Class and TTL + offset += 2 + 2 + 4 + if offset+1 >= buflen { + continue + } + var rdlen uint16 + rdlen = binary.BigEndian.Uint16(buf[offset:]) + offset += 2 + offset += int(rdlen) + } + if offset >= buflen { + return &Error{err: "overflowing unpacking signed message"} + } + + // offset should be just prior to SIG + bodyend := offset + // owner name SHOULD be root + _, offset, err = UnpackDomainName(buf, offset) + if err != nil { + return err + } + // Skip Type, Class, TTL, RDLen + offset += 2 + 2 + 4 + 2 + sigstart := offset + // Skip Type Covered, Algorithm, Labels, Original TTL + offset += 2 + 1 + 1 + 4 + if offset+4+4 >= buflen { + return &Error{err: "overflow unpacking signed message"} + } + expire := binary.BigEndian.Uint32(buf[offset:]) + offset += 4 + incept := binary.BigEndian.Uint32(buf[offset:]) + offset += 4 + now := uint32(time.Now().Unix()) + if now < incept || now > expire { + return ErrTime + } + // Skip key tag + offset += 2 + var signername string + signername, offset, err = UnpackDomainName(buf, offset) + if err != nil { + return err + } + // If key has come from the DNS name compression might + // have mangled the case of the name + if strings.ToLower(signername) != strings.ToLower(k.Header().Name) { + return &Error{err: "signer name doesn't match key name"} + } + sigend := offset + hasher.Write(buf[sigstart:sigend]) + hasher.Write(buf[:10]) + hasher.Write([]byte{ + byte((adc - 1) << 8), + byte(adc - 1), + }) + hasher.Write(buf[12:bodyend]) + + hashed := hasher.Sum(nil) + sig := buf[sigend:] + switch k.Algorithm { + case DSA: + pk := k.publicKeyDSA() + sig = sig[1:] + r := big.NewInt(0) + r.SetBytes(sig[:len(sig)/2]) + s := big.NewInt(0) + s.SetBytes(sig[len(sig)/2:]) + if pk != nil { + if dsa.Verify(pk, hashed, r, s) { + return nil + } + return ErrSig + } + case RSASHA1, RSASHA256, RSASHA512: + pk := k.publicKeyRSA() + if pk != nil { + return rsa.VerifyPKCS1v15(pk, hash, hashed, sig) + } + case ECDSAP256SHA256, ECDSAP384SHA384: + pk := k.publicKeyECDSA() + r := big.NewInt(0) + r.SetBytes(sig[:len(sig)/2]) + s := big.NewInt(0) + s.SetBytes(sig[len(sig)/2:]) + if pk != nil { + if ecdsa.Verify(pk, hashed, r, s) { + return nil + } + return ErrSig + } + } + return ErrKeyAlg +} diff --git a/vendor/github.com/miekg/dns/sig0_test.go b/vendor/github.com/miekg/dns/sig0_test.go new file mode 100644 index 0000000..122de6a --- /dev/null +++ b/vendor/github.com/miekg/dns/sig0_test.go @@ -0,0 +1,89 @@ +package dns + +import ( + "crypto" + "testing" + "time" +) + +func TestSIG0(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + m := new(Msg) + m.SetQuestion("example.org.", TypeSOA) + for _, alg := range []uint8{ECDSAP256SHA256, ECDSAP384SHA384, RSASHA1, RSASHA256, RSASHA512} { + algstr := AlgorithmToString[alg] + keyrr := new(KEY) + keyrr.Hdr.Name = algstr + "." + keyrr.Hdr.Rrtype = TypeKEY + keyrr.Hdr.Class = ClassINET + keyrr.Algorithm = alg + keysize := 1024 + switch alg { + case ECDSAP256SHA256: + keysize = 256 + case ECDSAP384SHA384: + keysize = 384 + } + pk, err := keyrr.Generate(keysize) + if err != nil { + t.Errorf("failed to generate key for “%s”: %v", algstr, err) + continue + } + now := uint32(time.Now().Unix()) + sigrr := new(SIG) + sigrr.Hdr.Name = "." + sigrr.Hdr.Rrtype = TypeSIG + sigrr.Hdr.Class = ClassANY + sigrr.Algorithm = alg + sigrr.Expiration = now + 300 + sigrr.Inception = now - 300 + sigrr.KeyTag = keyrr.KeyTag() + sigrr.SignerName = keyrr.Hdr.Name + mb, err := sigrr.Sign(pk.(crypto.Signer), m) + if err != nil { + t.Errorf("failed to sign message using “%s”: %v", algstr, err) + continue + } + m := new(Msg) + if err := m.Unpack(mb); err != nil { + t.Errorf("failed to unpack message signed using “%s”: %v", algstr, err) + continue + } + if len(m.Extra) != 1 { + t.Errorf("missing SIG for message signed using “%s”", algstr) + continue + } + var sigrrwire *SIG + switch rr := m.Extra[0].(type) { + case *SIG: + sigrrwire = rr + default: + t.Errorf("expected SIG RR, instead: %v", rr) + continue + } + for _, rr := range []*SIG{sigrr, sigrrwire} { + id := "sigrr" + if rr == sigrrwire { + id = "sigrrwire" + } + if err := rr.Verify(keyrr, mb); err != nil { + t.Errorf("failed to verify “%s” signed SIG(%s): %v", algstr, id, err) + continue + } + } + mb[13]++ + if err := sigrr.Verify(keyrr, mb); err == nil { + t.Errorf("verify succeeded on an altered message using “%s”", algstr) + continue + } + sigrr.Expiration = 2 + sigrr.Inception = 1 + mb, _ = sigrr.Sign(pk.(crypto.Signer), m) + if err := sigrr.Verify(keyrr, mb); err == nil { + t.Errorf("verify succeeded on an expired message using “%s”", algstr) + continue + } + } +} diff --git a/vendor/github.com/miekg/dns/singleinflight.go b/vendor/github.com/miekg/dns/singleinflight.go new file mode 100644 index 0000000..9573c7d --- /dev/null +++ b/vendor/github.com/miekg/dns/singleinflight.go @@ -0,0 +1,57 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Adapted for dns package usage by Miek Gieben. + +package dns + +import "sync" +import "time" + +// call is an in-flight or completed singleflight.Do call +type call struct { + wg sync.WaitGroup + val *Msg + rtt time.Duration + err error + dups int +} + +// singleflight represents a class of work and forms a namespace in +// which units of work can be executed with duplicate suppression. +type singleflight struct { + sync.Mutex // protects m + m map[string]*call // lazily initialized +} + +// Do executes and returns the results of the given function, making +// sure that only one execution is in-flight for a given key at a +// time. If a duplicate comes in, the duplicate caller waits for the +// original to complete and receives the same results. +// The return value shared indicates whether v was given to multiple callers. +func (g *singleflight) Do(key string, fn func() (*Msg, time.Duration, error)) (v *Msg, rtt time.Duration, err error, shared bool) { + g.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + g.Unlock() + c.wg.Wait() + return c.val, c.rtt, c.err, true + } + c := new(call) + c.wg.Add(1) + g.m[key] = c + g.Unlock() + + c.val, c.rtt, c.err = fn() + c.wg.Done() + + g.Lock() + delete(g.m, key) + g.Unlock() + + return c.val, c.rtt, c.err, c.dups > 0 +} diff --git a/vendor/github.com/miekg/dns/smimea.go b/vendor/github.com/miekg/dns/smimea.go new file mode 100644 index 0000000..4e7ded4 --- /dev/null +++ b/vendor/github.com/miekg/dns/smimea.go @@ -0,0 +1,47 @@ +package dns + +import ( + "crypto/sha256" + "crypto/x509" + "encoding/hex" +) + +// Sign creates a SMIMEA record from an SSL certificate. +func (r *SMIMEA) Sign(usage, selector, matchingType int, cert *x509.Certificate) (err error) { + r.Hdr.Rrtype = TypeSMIMEA + r.Usage = uint8(usage) + r.Selector = uint8(selector) + r.MatchingType = uint8(matchingType) + + r.Certificate, err = CertificateToDANE(r.Selector, r.MatchingType, cert) + if err != nil { + return err + } + return nil +} + +// Verify verifies a SMIMEA record against an SSL certificate. If it is OK +// a nil error is returned. +func (r *SMIMEA) Verify(cert *x509.Certificate) error { + c, err := CertificateToDANE(r.Selector, r.MatchingType, cert) + if err != nil { + return err // Not also ErrSig? + } + if r.Certificate == c { + return nil + } + return ErrSig // ErrSig, really? +} + +// SMIMEAName returns the ownername of a SMIMEA resource record as per the +// format specified in RFC 'draft-ietf-dane-smime-12' Section 2 and 3 +func SMIMEAName(email, domain string) (string, error) { + hasher := sha256.New() + hasher.Write([]byte(email)) + + // RFC Section 3: "The local-part is hashed using the SHA2-256 + // algorithm with the hash truncated to 28 octets and + // represented in its hexadecimal representation to become the + // left-most label in the prepared domain name" + return hex.EncodeToString(hasher.Sum(nil)[:28]) + "." + "_smimecert." + domain, nil +} diff --git a/vendor/github.com/miekg/dns/tlsa.go b/vendor/github.com/miekg/dns/tlsa.go new file mode 100644 index 0000000..431e2fb --- /dev/null +++ b/vendor/github.com/miekg/dns/tlsa.go @@ -0,0 +1,47 @@ +package dns + +import ( + "crypto/x509" + "net" + "strconv" +) + +// Sign creates a TLSA record from an SSL certificate. +func (r *TLSA) Sign(usage, selector, matchingType int, cert *x509.Certificate) (err error) { + r.Hdr.Rrtype = TypeTLSA + r.Usage = uint8(usage) + r.Selector = uint8(selector) + r.MatchingType = uint8(matchingType) + + r.Certificate, err = CertificateToDANE(r.Selector, r.MatchingType, cert) + if err != nil { + return err + } + return nil +} + +// Verify verifies a TLSA record against an SSL certificate. If it is OK +// a nil error is returned. +func (r *TLSA) Verify(cert *x509.Certificate) error { + c, err := CertificateToDANE(r.Selector, r.MatchingType, cert) + if err != nil { + return err // Not also ErrSig? + } + if r.Certificate == c { + return nil + } + return ErrSig // ErrSig, really? +} + +// TLSAName returns the ownername of a TLSA resource record as per the +// rules specified in RFC 6698, Section 3. +func TLSAName(name, service, network string) (string, error) { + if !IsFqdn(name) { + return "", ErrFqdn + } + p, err := net.LookupPort(network, service) + if err != nil { + return "", err + } + return "_" + strconv.Itoa(p) + "._" + network + "." + name, nil +} diff --git a/vendor/github.com/miekg/dns/tsig.go b/vendor/github.com/miekg/dns/tsig.go new file mode 100644 index 0000000..2401309 --- /dev/null +++ b/vendor/github.com/miekg/dns/tsig.go @@ -0,0 +1,383 @@ +package dns + +import ( + "crypto/hmac" + "crypto/md5" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "encoding/binary" + "encoding/hex" + "hash" + "strconv" + "strings" + "time" +) + +// HMAC hashing codes. These are transmitted as domain names. +const ( + HmacMD5 = "hmac-md5.sig-alg.reg.int." + HmacSHA1 = "hmac-sha1." + HmacSHA256 = "hmac-sha256." + HmacSHA512 = "hmac-sha512." +) + +// TSIG is the RR the holds the transaction signature of a message. +// See RFC 2845 and RFC 4635. +type TSIG struct { + Hdr RR_Header + Algorithm string `dns:"domain-name"` + TimeSigned uint64 `dns:"uint48"` + Fudge uint16 + MACSize uint16 + MAC string `dns:"size-hex:MACSize"` + OrigId uint16 + Error uint16 + OtherLen uint16 + OtherData string `dns:"size-hex:OtherLen"` +} + +// TSIG has no official presentation format, but this will suffice. + +func (rr *TSIG) String() string { + s := "\n;; TSIG PSEUDOSECTION:\n" + s += rr.Hdr.String() + + " " + rr.Algorithm + + " " + tsigTimeToString(rr.TimeSigned) + + " " + strconv.Itoa(int(rr.Fudge)) + + " " + strconv.Itoa(int(rr.MACSize)) + + " " + strings.ToUpper(rr.MAC) + + " " + strconv.Itoa(int(rr.OrigId)) + + " " + strconv.Itoa(int(rr.Error)) + // BIND prints NOERROR + " " + strconv.Itoa(int(rr.OtherLen)) + + " " + rr.OtherData + return s +} + +// The following values must be put in wireformat, so that the MAC can be calculated. +// RFC 2845, section 3.4.2. TSIG Variables. +type tsigWireFmt struct { + // From RR_Header + Name string `dns:"domain-name"` + Class uint16 + Ttl uint32 + // Rdata of the TSIG + Algorithm string `dns:"domain-name"` + TimeSigned uint64 `dns:"uint48"` + Fudge uint16 + // MACSize, MAC and OrigId excluded + Error uint16 + OtherLen uint16 + OtherData string `dns:"size-hex:OtherLen"` +} + +// If we have the MAC use this type to convert it to wiredata. Section 3.4.3. Request MAC +type macWireFmt struct { + MACSize uint16 + MAC string `dns:"size-hex:MACSize"` +} + +// 3.3. Time values used in TSIG calculations +type timerWireFmt struct { + TimeSigned uint64 `dns:"uint48"` + Fudge uint16 +} + +// TsigGenerate fills out the TSIG record attached to the message. +// The message should contain +// a "stub" TSIG RR with the algorithm, key name (owner name of the RR), +// time fudge (defaults to 300 seconds) and the current time +// The TSIG MAC is saved in that Tsig RR. +// When TsigGenerate is called for the first time requestMAC is set to the empty string and +// timersOnly is false. +// If something goes wrong an error is returned, otherwise it is nil. +func TsigGenerate(m *Msg, secret, requestMAC string, timersOnly bool) ([]byte, string, error) { + if m.IsTsig() == nil { + panic("dns: TSIG not last RR in additional") + } + // If we barf here, the caller is to blame + rawsecret, err := fromBase64([]byte(secret)) + if err != nil { + return nil, "", err + } + + rr := m.Extra[len(m.Extra)-1].(*TSIG) + m.Extra = m.Extra[0 : len(m.Extra)-1] // kill the TSIG from the msg + mbuf, err := m.Pack() + if err != nil { + return nil, "", err + } + buf := tsigBuffer(mbuf, rr, requestMAC, timersOnly) + + t := new(TSIG) + var h hash.Hash + switch strings.ToLower(rr.Algorithm) { + case HmacMD5: + h = hmac.New(md5.New, []byte(rawsecret)) + case HmacSHA1: + h = hmac.New(sha1.New, []byte(rawsecret)) + case HmacSHA256: + h = hmac.New(sha256.New, []byte(rawsecret)) + case HmacSHA512: + h = hmac.New(sha512.New, []byte(rawsecret)) + default: + return nil, "", ErrKeyAlg + } + h.Write(buf) + t.MAC = hex.EncodeToString(h.Sum(nil)) + t.MACSize = uint16(len(t.MAC) / 2) // Size is half! + + t.Hdr = RR_Header{Name: rr.Hdr.Name, Rrtype: TypeTSIG, Class: ClassANY, Ttl: 0} + t.Fudge = rr.Fudge + t.TimeSigned = rr.TimeSigned + t.Algorithm = rr.Algorithm + t.OrigId = m.Id + + tbuf := make([]byte, t.len()) + if off, err := PackRR(t, tbuf, 0, nil, false); err == nil { + tbuf = tbuf[:off] // reset to actual size used + } else { + return nil, "", err + } + mbuf = append(mbuf, tbuf...) + // Update the ArCount directly in the buffer. + binary.BigEndian.PutUint16(mbuf[10:], uint16(len(m.Extra)+1)) + + return mbuf, t.MAC, nil +} + +// TsigVerify verifies the TSIG on a message. +// If the signature does not validate err contains the +// error, otherwise it is nil. +func TsigVerify(msg []byte, secret, requestMAC string, timersOnly bool) error { + rawsecret, err := fromBase64([]byte(secret)) + if err != nil { + return err + } + // Strip the TSIG from the incoming msg + stripped, tsig, err := stripTsig(msg) + if err != nil { + return err + } + + msgMAC, err := hex.DecodeString(tsig.MAC) + if err != nil { + return err + } + + buf := tsigBuffer(stripped, tsig, requestMAC, timersOnly) + + // Fudge factor works both ways. A message can arrive before it was signed because + // of clock skew. + now := uint64(time.Now().Unix()) + ti := now - tsig.TimeSigned + if now < tsig.TimeSigned { + ti = tsig.TimeSigned - now + } + if uint64(tsig.Fudge) < ti { + return ErrTime + } + + var h hash.Hash + switch strings.ToLower(tsig.Algorithm) { + case HmacMD5: + h = hmac.New(md5.New, rawsecret) + case HmacSHA1: + h = hmac.New(sha1.New, rawsecret) + case HmacSHA256: + h = hmac.New(sha256.New, rawsecret) + case HmacSHA512: + h = hmac.New(sha512.New, rawsecret) + default: + return ErrKeyAlg + } + h.Write(buf) + if !hmac.Equal(h.Sum(nil), msgMAC) { + return ErrSig + } + return nil +} + +// Create a wiredata buffer for the MAC calculation. +func tsigBuffer(msgbuf []byte, rr *TSIG, requestMAC string, timersOnly bool) []byte { + var buf []byte + if rr.TimeSigned == 0 { + rr.TimeSigned = uint64(time.Now().Unix()) + } + if rr.Fudge == 0 { + rr.Fudge = 300 // Standard (RFC) default. + } + + if requestMAC != "" { + m := new(macWireFmt) + m.MACSize = uint16(len(requestMAC) / 2) + m.MAC = requestMAC + buf = make([]byte, len(requestMAC)) // long enough + n, _ := packMacWire(m, buf) + buf = buf[:n] + } + + tsigvar := make([]byte, DefaultMsgSize) + if timersOnly { + tsig := new(timerWireFmt) + tsig.TimeSigned = rr.TimeSigned + tsig.Fudge = rr.Fudge + n, _ := packTimerWire(tsig, tsigvar) + tsigvar = tsigvar[:n] + } else { + tsig := new(tsigWireFmt) + tsig.Name = strings.ToLower(rr.Hdr.Name) + tsig.Class = ClassANY + tsig.Ttl = rr.Hdr.Ttl + tsig.Algorithm = strings.ToLower(rr.Algorithm) + tsig.TimeSigned = rr.TimeSigned + tsig.Fudge = rr.Fudge + tsig.Error = rr.Error + tsig.OtherLen = rr.OtherLen + tsig.OtherData = rr.OtherData + n, _ := packTsigWire(tsig, tsigvar) + tsigvar = tsigvar[:n] + } + + if requestMAC != "" { + x := append(buf, msgbuf...) + buf = append(x, tsigvar...) + } else { + buf = append(msgbuf, tsigvar...) + } + return buf +} + +// Strip the TSIG from the raw message. +func stripTsig(msg []byte) ([]byte, *TSIG, error) { + // Copied from msg.go's Unpack() Header, but modified. + var ( + dh Header + err error + ) + off, tsigoff := 0, 0 + + if dh, off, err = unpackMsgHdr(msg, off); err != nil { + return nil, nil, err + } + if dh.Arcount == 0 { + return nil, nil, ErrNoSig + } + + // Rcode, see msg.go Unpack() + if int(dh.Bits&0xF) == RcodeNotAuth { + return nil, nil, ErrAuth + } + + for i := 0; i < int(dh.Qdcount); i++ { + _, off, err = unpackQuestion(msg, off) + if err != nil { + return nil, nil, err + } + } + + _, off, err = unpackRRslice(int(dh.Ancount), msg, off) + if err != nil { + return nil, nil, err + } + _, off, err = unpackRRslice(int(dh.Nscount), msg, off) + if err != nil { + return nil, nil, err + } + + rr := new(TSIG) + var extra RR + for i := 0; i < int(dh.Arcount); i++ { + tsigoff = off + extra, off, err = UnpackRR(msg, off) + if err != nil { + return nil, nil, err + } + if extra.Header().Rrtype == TypeTSIG { + rr = extra.(*TSIG) + // Adjust Arcount. + arcount := binary.BigEndian.Uint16(msg[10:]) + binary.BigEndian.PutUint16(msg[10:], arcount-1) + break + } + } + if rr == nil { + return nil, nil, ErrNoSig + } + return msg[:tsigoff], rr, nil +} + +// Translate the TSIG time signed into a date. There is no +// need for RFC1982 calculations as this date is 48 bits. +func tsigTimeToString(t uint64) string { + ti := time.Unix(int64(t), 0).UTC() + return ti.Format("20060102150405") +} + +func packTsigWire(tw *tsigWireFmt, msg []byte) (int, error) { + // copied from zmsg.go TSIG packing + // RR_Header + off, err := PackDomainName(tw.Name, msg, 0, nil, false) + if err != nil { + return off, err + } + off, err = packUint16(tw.Class, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(tw.Ttl, msg, off) + if err != nil { + return off, err + } + + off, err = PackDomainName(tw.Algorithm, msg, off, nil, false) + if err != nil { + return off, err + } + off, err = packUint48(tw.TimeSigned, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(tw.Fudge, msg, off) + if err != nil { + return off, err + } + + off, err = packUint16(tw.Error, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(tw.OtherLen, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(tw.OtherData, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func packMacWire(mw *macWireFmt, msg []byte) (int, error) { + off, err := packUint16(mw.MACSize, msg, 0) + if err != nil { + return off, err + } + off, err = packStringHex(mw.MAC, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func packTimerWire(tw *timerWireFmt, msg []byte) (int, error) { + off, err := packUint48(tw.TimeSigned, msg, 0) + if err != nil { + return off, err + } + off, err = packUint16(tw.Fudge, msg, off) + if err != nil { + return off, err + } + return off, nil +} diff --git a/vendor/github.com/miekg/dns/tsig_test.go b/vendor/github.com/miekg/dns/tsig_test.go new file mode 100644 index 0000000..48b9988 --- /dev/null +++ b/vendor/github.com/miekg/dns/tsig_test.go @@ -0,0 +1,37 @@ +package dns + +import ( + "testing" + "time" +) + +func newTsig(algo string) *Msg { + m := new(Msg) + m.SetQuestion("example.org.", TypeA) + m.SetTsig("example.", algo, 300, time.Now().Unix()) + return m +} + +func TestTsig(t *testing.T) { + m := newTsig(HmacMD5) + buf, _, err := TsigGenerate(m, "pRZgBrBvI4NAHZYhxmhs/Q==", "", false) + if err != nil { + t.Fatal(err) + } + err = TsigVerify(buf, "pRZgBrBvI4NAHZYhxmhs/Q==", "", false) + if err != nil { + t.Fatal(err) + } +} + +func TestTsigCase(t *testing.T) { + m := newTsig("HmAc-mD5.sig-ALg.rEg.int.") // HmacMD5 + buf, _, err := TsigGenerate(m, "pRZgBrBvI4NAHZYhxmhs/Q==", "", false) + if err != nil { + t.Fatal(err) + } + err = TsigVerify(buf, "pRZgBrBvI4NAHZYhxmhs/Q==", "", false) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/miekg/dns/types.go b/vendor/github.com/miekg/dns/types.go new file mode 100644 index 0000000..53da475 --- /dev/null +++ b/vendor/github.com/miekg/dns/types.go @@ -0,0 +1,1287 @@ +package dns + +import ( + "fmt" + "net" + "strconv" + "strings" + "time" +) + +type ( + // Type is a DNS type. + Type uint16 + // Class is a DNS class. + Class uint16 + // Name is a DNS domain name. + Name string +) + +// Packet formats + +// Wire constants and supported types. +const ( + // valid RR_Header.Rrtype and Question.qtype + + TypeNone uint16 = 0 + TypeA uint16 = 1 + TypeNS uint16 = 2 + TypeMD uint16 = 3 + TypeMF uint16 = 4 + TypeCNAME uint16 = 5 + TypeSOA uint16 = 6 + TypeMB uint16 = 7 + TypeMG uint16 = 8 + TypeMR uint16 = 9 + TypeNULL uint16 = 10 + TypePTR uint16 = 12 + TypeHINFO uint16 = 13 + TypeMINFO uint16 = 14 + TypeMX uint16 = 15 + TypeTXT uint16 = 16 + TypeRP uint16 = 17 + TypeAFSDB uint16 = 18 + TypeX25 uint16 = 19 + TypeISDN uint16 = 20 + TypeRT uint16 = 21 + TypeNSAPPTR uint16 = 23 + TypeSIG uint16 = 24 + TypeKEY uint16 = 25 + TypePX uint16 = 26 + TypeGPOS uint16 = 27 + TypeAAAA uint16 = 28 + TypeLOC uint16 = 29 + TypeNXT uint16 = 30 + TypeEID uint16 = 31 + TypeNIMLOC uint16 = 32 + TypeSRV uint16 = 33 + TypeATMA uint16 = 34 + TypeNAPTR uint16 = 35 + TypeKX uint16 = 36 + TypeCERT uint16 = 37 + TypeDNAME uint16 = 39 + TypeOPT uint16 = 41 // EDNS + TypeDS uint16 = 43 + TypeSSHFP uint16 = 44 + TypeRRSIG uint16 = 46 + TypeNSEC uint16 = 47 + TypeDNSKEY uint16 = 48 + TypeDHCID uint16 = 49 + TypeNSEC3 uint16 = 50 + TypeNSEC3PARAM uint16 = 51 + TypeTLSA uint16 = 52 + TypeSMIMEA uint16 = 53 + TypeHIP uint16 = 55 + TypeNINFO uint16 = 56 + TypeRKEY uint16 = 57 + TypeTALINK uint16 = 58 + TypeCDS uint16 = 59 + TypeCDNSKEY uint16 = 60 + TypeOPENPGPKEY uint16 = 61 + TypeSPF uint16 = 99 + TypeUINFO uint16 = 100 + TypeUID uint16 = 101 + TypeGID uint16 = 102 + TypeUNSPEC uint16 = 103 + TypeNID uint16 = 104 + TypeL32 uint16 = 105 + TypeL64 uint16 = 106 + TypeLP uint16 = 107 + TypeEUI48 uint16 = 108 + TypeEUI64 uint16 = 109 + TypeURI uint16 = 256 + TypeCAA uint16 = 257 + TypeAVC uint16 = 258 + + TypeTKEY uint16 = 249 + TypeTSIG uint16 = 250 + + // valid Question.Qtype only + TypeIXFR uint16 = 251 + TypeAXFR uint16 = 252 + TypeMAILB uint16 = 253 + TypeMAILA uint16 = 254 + TypeANY uint16 = 255 + + TypeTA uint16 = 32768 + TypeDLV uint16 = 32769 + TypeReserved uint16 = 65535 + + // valid Question.Qclass + ClassINET = 1 + ClassCSNET = 2 + ClassCHAOS = 3 + ClassHESIOD = 4 + ClassNONE = 254 + ClassANY = 255 + + // Message Response Codes. + RcodeSuccess = 0 + RcodeFormatError = 1 + RcodeServerFailure = 2 + RcodeNameError = 3 + RcodeNotImplemented = 4 + RcodeRefused = 5 + RcodeYXDomain = 6 + RcodeYXRrset = 7 + RcodeNXRrset = 8 + RcodeNotAuth = 9 + RcodeNotZone = 10 + RcodeBadSig = 16 // TSIG + RcodeBadVers = 16 // EDNS0 + RcodeBadKey = 17 + RcodeBadTime = 18 + RcodeBadMode = 19 // TKEY + RcodeBadName = 20 + RcodeBadAlg = 21 + RcodeBadTrunc = 22 // TSIG + RcodeBadCookie = 23 // DNS Cookies + + // Message Opcodes. There is no 3. + OpcodeQuery = 0 + OpcodeIQuery = 1 + OpcodeStatus = 2 + OpcodeNotify = 4 + OpcodeUpdate = 5 +) + +// Header is the wire format for the DNS packet header. +type Header struct { + Id uint16 + Bits uint16 + Qdcount, Ancount, Nscount, Arcount uint16 +} + +const ( + headerSize = 12 + + // Header.Bits + _QR = 1 << 15 // query/response (response=1) + _AA = 1 << 10 // authoritative + _TC = 1 << 9 // truncated + _RD = 1 << 8 // recursion desired + _RA = 1 << 7 // recursion available + _Z = 1 << 6 // Z + _AD = 1 << 5 // authticated data + _CD = 1 << 4 // checking disabled + + LOC_EQUATOR = 1 << 31 // RFC 1876, Section 2. + LOC_PRIMEMERIDIAN = 1 << 31 // RFC 1876, Section 2. + + LOC_HOURS = 60 * 1000 + LOC_DEGREES = 60 * LOC_HOURS + + LOC_ALTITUDEBASE = 100000 +) + +// Different Certificate Types, see RFC 4398, Section 2.1 +const ( + CertPKIX = 1 + iota + CertSPKI + CertPGP + CertIPIX + CertISPKI + CertIPGP + CertACPKIX + CertIACPKIX + CertURI = 253 + CertOID = 254 +) + +// CertTypeToString converts the Cert Type to its string representation. +// See RFC 4398 and RFC 6944. +var CertTypeToString = map[uint16]string{ + CertPKIX: "PKIX", + CertSPKI: "SPKI", + CertPGP: "PGP", + CertIPIX: "IPIX", + CertISPKI: "ISPKI", + CertIPGP: "IPGP", + CertACPKIX: "ACPKIX", + CertIACPKIX: "IACPKIX", + CertURI: "URI", + CertOID: "OID", +} + +// StringToCertType is the reverseof CertTypeToString. +var StringToCertType = reverseInt16(CertTypeToString) + +//go:generate go run types_generate.go + +// Question holds a DNS question. There can be multiple questions in the +// question section of a message. Usually there is just one. +type Question struct { + Name string `dns:"cdomain-name"` // "cdomain-name" specifies encoding (and may be compressed) + Qtype uint16 + Qclass uint16 +} + +func (q *Question) len() int { + return len(q.Name) + 1 + 2 + 2 +} + +func (q *Question) String() (s string) { + // prefix with ; (as in dig) + s = ";" + sprintName(q.Name) + "\t" + s += Class(q.Qclass).String() + "\t" + s += " " + Type(q.Qtype).String() + return s +} + +// ANY is a wildcard record. See RFC 1035, Section 3.2.3. ANY +// is named "*" there. +type ANY struct { + Hdr RR_Header + // Does not have any rdata +} + +func (rr *ANY) String() string { return rr.Hdr.String() } + +type CNAME struct { + Hdr RR_Header + Target string `dns:"cdomain-name"` +} + +func (rr *CNAME) String() string { return rr.Hdr.String() + sprintName(rr.Target) } + +type HINFO struct { + Hdr RR_Header + Cpu string + Os string +} + +func (rr *HINFO) String() string { + return rr.Hdr.String() + sprintTxt([]string{rr.Cpu, rr.Os}) +} + +type MB struct { + Hdr RR_Header + Mb string `dns:"cdomain-name"` +} + +func (rr *MB) String() string { return rr.Hdr.String() + sprintName(rr.Mb) } + +type MG struct { + Hdr RR_Header + Mg string `dns:"cdomain-name"` +} + +func (rr *MG) String() string { return rr.Hdr.String() + sprintName(rr.Mg) } + +type MINFO struct { + Hdr RR_Header + Rmail string `dns:"cdomain-name"` + Email string `dns:"cdomain-name"` +} + +func (rr *MINFO) String() string { + return rr.Hdr.String() + sprintName(rr.Rmail) + " " + sprintName(rr.Email) +} + +type MR struct { + Hdr RR_Header + Mr string `dns:"cdomain-name"` +} + +func (rr *MR) String() string { + return rr.Hdr.String() + sprintName(rr.Mr) +} + +type MF struct { + Hdr RR_Header + Mf string `dns:"cdomain-name"` +} + +func (rr *MF) String() string { + return rr.Hdr.String() + sprintName(rr.Mf) +} + +type MD struct { + Hdr RR_Header + Md string `dns:"cdomain-name"` +} + +func (rr *MD) String() string { + return rr.Hdr.String() + sprintName(rr.Md) +} + +type MX struct { + Hdr RR_Header + Preference uint16 + Mx string `dns:"cdomain-name"` +} + +func (rr *MX) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Mx) +} + +type AFSDB struct { + Hdr RR_Header + Subtype uint16 + Hostname string `dns:"cdomain-name"` +} + +func (rr *AFSDB) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Subtype)) + " " + sprintName(rr.Hostname) +} + +type X25 struct { + Hdr RR_Header + PSDNAddress string +} + +func (rr *X25) String() string { + return rr.Hdr.String() + rr.PSDNAddress +} + +type RT struct { + Hdr RR_Header + Preference uint16 + Host string `dns:"cdomain-name"` +} + +func (rr *RT) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Host) +} + +type NS struct { + Hdr RR_Header + Ns string `dns:"cdomain-name"` +} + +func (rr *NS) String() string { + return rr.Hdr.String() + sprintName(rr.Ns) +} + +type PTR struct { + Hdr RR_Header + Ptr string `dns:"cdomain-name"` +} + +func (rr *PTR) String() string { + return rr.Hdr.String() + sprintName(rr.Ptr) +} + +type RP struct { + Hdr RR_Header + Mbox string `dns:"domain-name"` + Txt string `dns:"domain-name"` +} + +func (rr *RP) String() string { + return rr.Hdr.String() + rr.Mbox + " " + sprintTxt([]string{rr.Txt}) +} + +type SOA struct { + Hdr RR_Header + Ns string `dns:"cdomain-name"` + Mbox string `dns:"cdomain-name"` + Serial uint32 + Refresh uint32 + Retry uint32 + Expire uint32 + Minttl uint32 +} + +func (rr *SOA) String() string { + return rr.Hdr.String() + sprintName(rr.Ns) + " " + sprintName(rr.Mbox) + + " " + strconv.FormatInt(int64(rr.Serial), 10) + + " " + strconv.FormatInt(int64(rr.Refresh), 10) + + " " + strconv.FormatInt(int64(rr.Retry), 10) + + " " + strconv.FormatInt(int64(rr.Expire), 10) + + " " + strconv.FormatInt(int64(rr.Minttl), 10) +} + +type TXT struct { + Hdr RR_Header + Txt []string `dns:"txt"` +} + +func (rr *TXT) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) } + +func sprintName(s string) string { + src := []byte(s) + dst := make([]byte, 0, len(src)) + for i := 0; i < len(src); { + if i+1 < len(src) && src[i] == '\\' && src[i+1] == '.' { + dst = append(dst, src[i:i+2]...) + i += 2 + } else { + b, n := nextByte(src, i) + if n == 0 { + i++ // dangling back slash + } else if b == '.' { + dst = append(dst, b) + } else { + dst = appendDomainNameByte(dst, b) + } + i += n + } + } + return string(dst) +} + +func sprintTxtOctet(s string) string { + src := []byte(s) + dst := make([]byte, 0, len(src)) + dst = append(dst, '"') + for i := 0; i < len(src); { + if i+1 < len(src) && src[i] == '\\' && src[i+1] == '.' { + dst = append(dst, src[i:i+2]...) + i += 2 + } else { + b, n := nextByte(src, i) + if n == 0 { + i++ // dangling back slash + } else if b == '.' { + dst = append(dst, b) + } else { + if b < ' ' || b > '~' { + dst = appendByte(dst, b) + } else { + dst = append(dst, b) + } + } + i += n + } + } + dst = append(dst, '"') + return string(dst) +} + +func sprintTxt(txt []string) string { + var out []byte + for i, s := range txt { + if i > 0 { + out = append(out, ` "`...) + } else { + out = append(out, '"') + } + bs := []byte(s) + for j := 0; j < len(bs); { + b, n := nextByte(bs, j) + if n == 0 { + break + } + out = appendTXTStringByte(out, b) + j += n + } + out = append(out, '"') + } + return string(out) +} + +func appendDomainNameByte(s []byte, b byte) []byte { + switch b { + case '.', ' ', '\'', '@', ';', '(', ')': // additional chars to escape + return append(s, '\\', b) + } + return appendTXTStringByte(s, b) +} + +func appendTXTStringByte(s []byte, b byte) []byte { + switch b { + case '"', '\\': + return append(s, '\\', b) + } + if b < ' ' || b > '~' { + return appendByte(s, b) + } + return append(s, b) +} + +func appendByte(s []byte, b byte) []byte { + var buf [3]byte + bufs := strconv.AppendInt(buf[:0], int64(b), 10) + s = append(s, '\\') + for i := 0; i < 3-len(bufs); i++ { + s = append(s, '0') + } + for _, r := range bufs { + s = append(s, r) + } + return s +} + +func nextByte(b []byte, offset int) (byte, int) { + if offset >= len(b) { + return 0, 0 + } + if b[offset] != '\\' { + // not an escape sequence + return b[offset], 1 + } + switch len(b) - offset { + case 1: // dangling escape + return 0, 0 + case 2, 3: // too short to be \ddd + default: // maybe \ddd + if isDigit(b[offset+1]) && isDigit(b[offset+2]) && isDigit(b[offset+3]) { + return dddToByte(b[offset+1:]), 4 + } + } + // not \ddd, just an RFC 1035 "quoted" character + return b[offset+1], 2 +} + +type SPF struct { + Hdr RR_Header + Txt []string `dns:"txt"` +} + +func (rr *SPF) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) } + +type AVC struct { + Hdr RR_Header + Txt []string `dns:"txt"` +} + +func (rr *AVC) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) } + +type SRV struct { + Hdr RR_Header + Priority uint16 + Weight uint16 + Port uint16 + Target string `dns:"domain-name"` +} + +func (rr *SRV) String() string { + return rr.Hdr.String() + + strconv.Itoa(int(rr.Priority)) + " " + + strconv.Itoa(int(rr.Weight)) + " " + + strconv.Itoa(int(rr.Port)) + " " + sprintName(rr.Target) +} + +type NAPTR struct { + Hdr RR_Header + Order uint16 + Preference uint16 + Flags string + Service string + Regexp string + Replacement string `dns:"domain-name"` +} + +func (rr *NAPTR) String() string { + return rr.Hdr.String() + + strconv.Itoa(int(rr.Order)) + " " + + strconv.Itoa(int(rr.Preference)) + " " + + "\"" + rr.Flags + "\" " + + "\"" + rr.Service + "\" " + + "\"" + rr.Regexp + "\" " + + rr.Replacement +} + +// The CERT resource record, see RFC 4398. +type CERT struct { + Hdr RR_Header + Type uint16 + KeyTag uint16 + Algorithm uint8 + Certificate string `dns:"base64"` +} + +func (rr *CERT) String() string { + var ( + ok bool + certtype, algorithm string + ) + if certtype, ok = CertTypeToString[rr.Type]; !ok { + certtype = strconv.Itoa(int(rr.Type)) + } + if algorithm, ok = AlgorithmToString[rr.Algorithm]; !ok { + algorithm = strconv.Itoa(int(rr.Algorithm)) + } + return rr.Hdr.String() + certtype + + " " + strconv.Itoa(int(rr.KeyTag)) + + " " + algorithm + + " " + rr.Certificate +} + +// The DNAME resource record, see RFC 2672. +type DNAME struct { + Hdr RR_Header + Target string `dns:"domain-name"` +} + +func (rr *DNAME) String() string { + return rr.Hdr.String() + sprintName(rr.Target) +} + +type A struct { + Hdr RR_Header + A net.IP `dns:"a"` +} + +func (rr *A) String() string { + if rr.A == nil { + return rr.Hdr.String() + } + return rr.Hdr.String() + rr.A.String() +} + +type AAAA struct { + Hdr RR_Header + AAAA net.IP `dns:"aaaa"` +} + +func (rr *AAAA) String() string { + if rr.AAAA == nil { + return rr.Hdr.String() + } + return rr.Hdr.String() + rr.AAAA.String() +} + +type PX struct { + Hdr RR_Header + Preference uint16 + Map822 string `dns:"domain-name"` + Mapx400 string `dns:"domain-name"` +} + +func (rr *PX) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Map822) + " " + sprintName(rr.Mapx400) +} + +type GPOS struct { + Hdr RR_Header + Longitude string + Latitude string + Altitude string +} + +func (rr *GPOS) String() string { + return rr.Hdr.String() + rr.Longitude + " " + rr.Latitude + " " + rr.Altitude +} + +type LOC struct { + Hdr RR_Header + Version uint8 + Size uint8 + HorizPre uint8 + VertPre uint8 + Latitude uint32 + Longitude uint32 + Altitude uint32 +} + +// cmToM takes a cm value expressed in RFC1876 SIZE mantissa/exponent +// format and returns a string in m (two decimals for the cm) +func cmToM(m, e uint8) string { + if e < 2 { + if e == 1 { + m *= 10 + } + + return fmt.Sprintf("0.%02d", m) + } + + s := fmt.Sprintf("%d", m) + for e > 2 { + s += "0" + e-- + } + return s +} + +func (rr *LOC) String() string { + s := rr.Hdr.String() + + lat := rr.Latitude + ns := "N" + if lat > LOC_EQUATOR { + lat = lat - LOC_EQUATOR + } else { + ns = "S" + lat = LOC_EQUATOR - lat + } + h := lat / LOC_DEGREES + lat = lat % LOC_DEGREES + m := lat / LOC_HOURS + lat = lat % LOC_HOURS + s += fmt.Sprintf("%02d %02d %0.3f %s ", h, m, (float64(lat) / 1000), ns) + + lon := rr.Longitude + ew := "E" + if lon > LOC_PRIMEMERIDIAN { + lon = lon - LOC_PRIMEMERIDIAN + } else { + ew = "W" + lon = LOC_PRIMEMERIDIAN - lon + } + h = lon / LOC_DEGREES + lon = lon % LOC_DEGREES + m = lon / LOC_HOURS + lon = lon % LOC_HOURS + s += fmt.Sprintf("%02d %02d %0.3f %s ", h, m, (float64(lon) / 1000), ew) + + var alt = float64(rr.Altitude) / 100 + alt -= LOC_ALTITUDEBASE + if rr.Altitude%100 != 0 { + s += fmt.Sprintf("%.2fm ", alt) + } else { + s += fmt.Sprintf("%.0fm ", alt) + } + + s += cmToM((rr.Size&0xf0)>>4, rr.Size&0x0f) + "m " + s += cmToM((rr.HorizPre&0xf0)>>4, rr.HorizPre&0x0f) + "m " + s += cmToM((rr.VertPre&0xf0)>>4, rr.VertPre&0x0f) + "m" + + return s +} + +// SIG is identical to RRSIG and nowadays only used for SIG(0), RFC2931. +type SIG struct { + RRSIG +} + +type RRSIG struct { + Hdr RR_Header + TypeCovered uint16 + Algorithm uint8 + Labels uint8 + OrigTtl uint32 + Expiration uint32 + Inception uint32 + KeyTag uint16 + SignerName string `dns:"domain-name"` + Signature string `dns:"base64"` +} + +func (rr *RRSIG) String() string { + s := rr.Hdr.String() + s += Type(rr.TypeCovered).String() + s += " " + strconv.Itoa(int(rr.Algorithm)) + + " " + strconv.Itoa(int(rr.Labels)) + + " " + strconv.FormatInt(int64(rr.OrigTtl), 10) + + " " + TimeToString(rr.Expiration) + + " " + TimeToString(rr.Inception) + + " " + strconv.Itoa(int(rr.KeyTag)) + + " " + sprintName(rr.SignerName) + + " " + rr.Signature + return s +} + +type NSEC struct { + Hdr RR_Header + NextDomain string `dns:"domain-name"` + TypeBitMap []uint16 `dns:"nsec"` +} + +func (rr *NSEC) String() string { + s := rr.Hdr.String() + sprintName(rr.NextDomain) + for i := 0; i < len(rr.TypeBitMap); i++ { + s += " " + Type(rr.TypeBitMap[i]).String() + } + return s +} + +func (rr *NSEC) len() int { + l := rr.Hdr.len() + len(rr.NextDomain) + 1 + lastwindow := uint32(2 ^ 32 + 1) + for _, t := range rr.TypeBitMap { + window := t / 256 + if uint32(window) != lastwindow { + l += 1 + 32 + } + lastwindow = uint32(window) + } + return l +} + +type DLV struct { + DS +} + +type CDS struct { + DS +} + +type DS struct { + Hdr RR_Header + KeyTag uint16 + Algorithm uint8 + DigestType uint8 + Digest string `dns:"hex"` +} + +func (rr *DS) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.KeyTag)) + + " " + strconv.Itoa(int(rr.Algorithm)) + + " " + strconv.Itoa(int(rr.DigestType)) + + " " + strings.ToUpper(rr.Digest) +} + +type KX struct { + Hdr RR_Header + Preference uint16 + Exchanger string `dns:"domain-name"` +} + +func (rr *KX) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + + " " + sprintName(rr.Exchanger) +} + +type TA struct { + Hdr RR_Header + KeyTag uint16 + Algorithm uint8 + DigestType uint8 + Digest string `dns:"hex"` +} + +func (rr *TA) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.KeyTag)) + + " " + strconv.Itoa(int(rr.Algorithm)) + + " " + strconv.Itoa(int(rr.DigestType)) + + " " + strings.ToUpper(rr.Digest) +} + +type TALINK struct { + Hdr RR_Header + PreviousName string `dns:"domain-name"` + NextName string `dns:"domain-name"` +} + +func (rr *TALINK) String() string { + return rr.Hdr.String() + + sprintName(rr.PreviousName) + " " + sprintName(rr.NextName) +} + +type SSHFP struct { + Hdr RR_Header + Algorithm uint8 + Type uint8 + FingerPrint string `dns:"hex"` +} + +func (rr *SSHFP) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Algorithm)) + + " " + strconv.Itoa(int(rr.Type)) + + " " + strings.ToUpper(rr.FingerPrint) +} + +type KEY struct { + DNSKEY +} + +type CDNSKEY struct { + DNSKEY +} + +type DNSKEY struct { + Hdr RR_Header + Flags uint16 + Protocol uint8 + Algorithm uint8 + PublicKey string `dns:"base64"` +} + +func (rr *DNSKEY) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Flags)) + + " " + strconv.Itoa(int(rr.Protocol)) + + " " + strconv.Itoa(int(rr.Algorithm)) + + " " + rr.PublicKey +} + +type RKEY struct { + Hdr RR_Header + Flags uint16 + Protocol uint8 + Algorithm uint8 + PublicKey string `dns:"base64"` +} + +func (rr *RKEY) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Flags)) + + " " + strconv.Itoa(int(rr.Protocol)) + + " " + strconv.Itoa(int(rr.Algorithm)) + + " " + rr.PublicKey +} + +type NSAPPTR struct { + Hdr RR_Header + Ptr string `dns:"domain-name"` +} + +func (rr *NSAPPTR) String() string { return rr.Hdr.String() + sprintName(rr.Ptr) } + +type NSEC3 struct { + Hdr RR_Header + Hash uint8 + Flags uint8 + Iterations uint16 + SaltLength uint8 + Salt string `dns:"size-hex:SaltLength"` + HashLength uint8 + NextDomain string `dns:"size-base32:HashLength"` + TypeBitMap []uint16 `dns:"nsec"` +} + +func (rr *NSEC3) String() string { + s := rr.Hdr.String() + s += strconv.Itoa(int(rr.Hash)) + + " " + strconv.Itoa(int(rr.Flags)) + + " " + strconv.Itoa(int(rr.Iterations)) + + " " + saltToString(rr.Salt) + + " " + rr.NextDomain + for i := 0; i < len(rr.TypeBitMap); i++ { + s += " " + Type(rr.TypeBitMap[i]).String() + } + return s +} + +func (rr *NSEC3) len() int { + l := rr.Hdr.len() + 6 + len(rr.Salt)/2 + 1 + len(rr.NextDomain) + 1 + lastwindow := uint32(2 ^ 32 + 1) + for _, t := range rr.TypeBitMap { + window := t / 256 + if uint32(window) != lastwindow { + l += 1 + 32 + } + lastwindow = uint32(window) + } + return l +} + +type NSEC3PARAM struct { + Hdr RR_Header + Hash uint8 + Flags uint8 + Iterations uint16 + SaltLength uint8 + Salt string `dns:"size-hex:SaltLength"` +} + +func (rr *NSEC3PARAM) String() string { + s := rr.Hdr.String() + s += strconv.Itoa(int(rr.Hash)) + + " " + strconv.Itoa(int(rr.Flags)) + + " " + strconv.Itoa(int(rr.Iterations)) + + " " + saltToString(rr.Salt) + return s +} + +type TKEY struct { + Hdr RR_Header + Algorithm string `dns:"domain-name"` + Inception uint32 + Expiration uint32 + Mode uint16 + Error uint16 + KeySize uint16 + Key string + OtherLen uint16 + OtherData string +} + +func (rr *TKEY) String() string { + // It has no presentation format + return "" +} + +// RFC3597 represents an unknown/generic RR. +type RFC3597 struct { + Hdr RR_Header + Rdata string `dns:"hex"` +} + +func (rr *RFC3597) String() string { + // Let's call it a hack + s := rfc3597Header(rr.Hdr) + + s += "\\# " + strconv.Itoa(len(rr.Rdata)/2) + " " + rr.Rdata + return s +} + +func rfc3597Header(h RR_Header) string { + var s string + + s += sprintName(h.Name) + "\t" + s += strconv.FormatInt(int64(h.Ttl), 10) + "\t" + s += "CLASS" + strconv.Itoa(int(h.Class)) + "\t" + s += "TYPE" + strconv.Itoa(int(h.Rrtype)) + "\t" + return s +} + +type URI struct { + Hdr RR_Header + Priority uint16 + Weight uint16 + Target string `dns:"octet"` +} + +func (rr *URI) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Priority)) + + " " + strconv.Itoa(int(rr.Weight)) + " " + sprintTxtOctet(rr.Target) +} + +type DHCID struct { + Hdr RR_Header + Digest string `dns:"base64"` +} + +func (rr *DHCID) String() string { return rr.Hdr.String() + rr.Digest } + +type TLSA struct { + Hdr RR_Header + Usage uint8 + Selector uint8 + MatchingType uint8 + Certificate string `dns:"hex"` +} + +func (rr *TLSA) String() string { + return rr.Hdr.String() + + strconv.Itoa(int(rr.Usage)) + + " " + strconv.Itoa(int(rr.Selector)) + + " " + strconv.Itoa(int(rr.MatchingType)) + + " " + rr.Certificate +} + +type SMIMEA struct { + Hdr RR_Header + Usage uint8 + Selector uint8 + MatchingType uint8 + Certificate string `dns:"hex"` +} + +func (rr *SMIMEA) String() string { + s := rr.Hdr.String() + + strconv.Itoa(int(rr.Usage)) + + " " + strconv.Itoa(int(rr.Selector)) + + " " + strconv.Itoa(int(rr.MatchingType)) + + // Every Nth char needs a space on this output. If we output + // this as one giant line, we can't read it can in because in some cases + // the cert length overflows scan.maxTok (2048). + sx := splitN(rr.Certificate, 1024) // conservative value here + s += " " + strings.Join(sx, " ") + return s +} + +type HIP struct { + Hdr RR_Header + HitLength uint8 + PublicKeyAlgorithm uint8 + PublicKeyLength uint16 + Hit string `dns:"size-hex:HitLength"` + PublicKey string `dns:"size-base64:PublicKeyLength"` + RendezvousServers []string `dns:"domain-name"` +} + +func (rr *HIP) String() string { + s := rr.Hdr.String() + + strconv.Itoa(int(rr.PublicKeyAlgorithm)) + + " " + rr.Hit + + " " + rr.PublicKey + for _, d := range rr.RendezvousServers { + s += " " + sprintName(d) + } + return s +} + +type NINFO struct { + Hdr RR_Header + ZSData []string `dns:"txt"` +} + +func (rr *NINFO) String() string { return rr.Hdr.String() + sprintTxt(rr.ZSData) } + +type NID struct { + Hdr RR_Header + Preference uint16 + NodeID uint64 +} + +func (rr *NID) String() string { + s := rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + node := fmt.Sprintf("%0.16x", rr.NodeID) + s += " " + node[0:4] + ":" + node[4:8] + ":" + node[8:12] + ":" + node[12:16] + return s +} + +type L32 struct { + Hdr RR_Header + Preference uint16 + Locator32 net.IP `dns:"a"` +} + +func (rr *L32) String() string { + if rr.Locator32 == nil { + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + } + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + + " " + rr.Locator32.String() +} + +type L64 struct { + Hdr RR_Header + Preference uint16 + Locator64 uint64 +} + +func (rr *L64) String() string { + s := rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + node := fmt.Sprintf("%0.16X", rr.Locator64) + s += " " + node[0:4] + ":" + node[4:8] + ":" + node[8:12] + ":" + node[12:16] + return s +} + +type LP struct { + Hdr RR_Header + Preference uint16 + Fqdn string `dns:"domain-name"` +} + +func (rr *LP) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Fqdn) +} + +type EUI48 struct { + Hdr RR_Header + Address uint64 `dns:"uint48"` +} + +func (rr *EUI48) String() string { return rr.Hdr.String() + euiToString(rr.Address, 48) } + +type EUI64 struct { + Hdr RR_Header + Address uint64 +} + +func (rr *EUI64) String() string { return rr.Hdr.String() + euiToString(rr.Address, 64) } + +type CAA struct { + Hdr RR_Header + Flag uint8 + Tag string + Value string `dns:"octet"` +} + +func (rr *CAA) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Flag)) + " " + rr.Tag + " " + sprintTxtOctet(rr.Value) +} + +type UID struct { + Hdr RR_Header + Uid uint32 +} + +func (rr *UID) String() string { return rr.Hdr.String() + strconv.FormatInt(int64(rr.Uid), 10) } + +type GID struct { + Hdr RR_Header + Gid uint32 +} + +func (rr *GID) String() string { return rr.Hdr.String() + strconv.FormatInt(int64(rr.Gid), 10) } + +type UINFO struct { + Hdr RR_Header + Uinfo string +} + +func (rr *UINFO) String() string { return rr.Hdr.String() + sprintTxt([]string{rr.Uinfo}) } + +type EID struct { + Hdr RR_Header + Endpoint string `dns:"hex"` +} + +func (rr *EID) String() string { return rr.Hdr.String() + strings.ToUpper(rr.Endpoint) } + +type NIMLOC struct { + Hdr RR_Header + Locator string `dns:"hex"` +} + +func (rr *NIMLOC) String() string { return rr.Hdr.String() + strings.ToUpper(rr.Locator) } + +type OPENPGPKEY struct { + Hdr RR_Header + PublicKey string `dns:"base64"` +} + +func (rr *OPENPGPKEY) String() string { return rr.Hdr.String() + rr.PublicKey } + +// TimeToString translates the RRSIG's incep. and expir. times to the +// string representation used when printing the record. +// It takes serial arithmetic (RFC 1982) into account. +func TimeToString(t uint32) string { + mod := ((int64(t) - time.Now().Unix()) / year68) - 1 + if mod < 0 { + mod = 0 + } + ti := time.Unix(int64(t)-(mod*year68), 0).UTC() + return ti.Format("20060102150405") +} + +// StringToTime translates the RRSIG's incep. and expir. times from +// string values like "20110403154150" to an 32 bit integer. +// It takes serial arithmetic (RFC 1982) into account. +func StringToTime(s string) (uint32, error) { + t, err := time.Parse("20060102150405", s) + if err != nil { + return 0, err + } + mod := (t.Unix() / year68) - 1 + if mod < 0 { + mod = 0 + } + return uint32(t.Unix() - (mod * year68)), nil +} + +// saltToString converts a NSECX salt to uppercase and returns "-" when it is empty. +func saltToString(s string) string { + if len(s) == 0 { + return "-" + } + return strings.ToUpper(s) +} + +func euiToString(eui uint64, bits int) (hex string) { + switch bits { + case 64: + hex = fmt.Sprintf("%16.16x", eui) + hex = hex[0:2] + "-" + hex[2:4] + "-" + hex[4:6] + "-" + hex[6:8] + + "-" + hex[8:10] + "-" + hex[10:12] + "-" + hex[12:14] + "-" + hex[14:16] + case 48: + hex = fmt.Sprintf("%12.12x", eui) + hex = hex[0:2] + "-" + hex[2:4] + "-" + hex[4:6] + "-" + hex[6:8] + + "-" + hex[8:10] + "-" + hex[10:12] + } + return +} + +// copyIP returns a copy of ip. +func copyIP(ip net.IP) net.IP { + p := make(net.IP, len(ip)) + copy(p, ip) + return p +} + +// SplitN splits a string into N sized string chunks. +// This might become an exported function once. +func splitN(s string, n int) []string { + if len(s) < n { + return []string{s} + } + sx := []string{} + p, i := 0, n + for { + if i <= len(s) { + sx = append(sx, s[p:i]) + } else { + sx = append(sx, s[p:]) + break + + } + p, i = p+n, i+n + } + + return sx +} diff --git a/vendor/github.com/miekg/dns/types_generate.go b/vendor/github.com/miekg/dns/types_generate.go new file mode 100644 index 0000000..dd13109 --- /dev/null +++ b/vendor/github.com/miekg/dns/types_generate.go @@ -0,0 +1,271 @@ +//+build ignore + +// types_generate.go is meant to run with go generate. It will use +// go/{importer,types} to track down all the RR struct types. Then for each type +// it will generate conversion tables (TypeToRR and TypeToString) and banal +// methods (len, Header, copy) based on the struct tags. The generated source is +// written to ztypes.go, and is meant to be checked into git. +package main + +import ( + "bytes" + "fmt" + "go/format" + "go/importer" + "go/types" + "log" + "os" + "strings" + "text/template" +) + +var skipLen = map[string]struct{}{ + "NSEC": {}, + "NSEC3": {}, + "OPT": {}, +} + +var packageHdr = ` +// *** DO NOT MODIFY *** +// AUTOGENERATED BY go generate from type_generate.go + +package dns + +import ( + "encoding/base64" + "net" +) + +` + +var TypeToRR = template.Must(template.New("TypeToRR").Parse(` +// TypeToRR is a map of constructors for each RR type. +var TypeToRR = map[uint16]func() RR{ +{{range .}}{{if ne . "RFC3597"}} Type{{.}}: func() RR { return new({{.}}) }, +{{end}}{{end}} } + +`)) + +var typeToString = template.Must(template.New("typeToString").Parse(` +// TypeToString is a map of strings for each RR type. +var TypeToString = map[uint16]string{ +{{range .}}{{if ne . "NSAPPTR"}} Type{{.}}: "{{.}}", +{{end}}{{end}} TypeNSAPPTR: "NSAP-PTR", +} + +`)) + +var headerFunc = template.Must(template.New("headerFunc").Parse(` +// Header() functions +{{range .}} func (rr *{{.}}) Header() *RR_Header { return &rr.Hdr } +{{end}} + +`)) + +// getTypeStruct will take a type and the package scope, and return the +// (innermost) struct if the type is considered a RR type (currently defined as +// those structs beginning with a RR_Header, could be redefined as implementing +// the RR interface). The bool return value indicates if embedded structs were +// resolved. +func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) { + st, ok := t.Underlying().(*types.Struct) + if !ok { + return nil, false + } + if st.Field(0).Type() == scope.Lookup("RR_Header").Type() { + return st, false + } + if st.Field(0).Anonymous() { + st, _ := getTypeStruct(st.Field(0).Type(), scope) + return st, true + } + return nil, false +} + +func main() { + // Import and type-check the package + pkg, err := importer.Default().Import("github.com/miekg/dns") + fatalIfErr(err) + scope := pkg.Scope() + + // Collect constants like TypeX + var numberedTypes []string + for _, name := range scope.Names() { + o := scope.Lookup(name) + if o == nil || !o.Exported() { + continue + } + b, ok := o.Type().(*types.Basic) + if !ok || b.Kind() != types.Uint16 { + continue + } + if !strings.HasPrefix(o.Name(), "Type") { + continue + } + name := strings.TrimPrefix(o.Name(), "Type") + if name == "PrivateRR" { + continue + } + numberedTypes = append(numberedTypes, name) + } + + // Collect actual types (*X) + var namedTypes []string + for _, name := range scope.Names() { + o := scope.Lookup(name) + if o == nil || !o.Exported() { + continue + } + if st, _ := getTypeStruct(o.Type(), scope); st == nil { + continue + } + if name == "PrivateRR" { + continue + } + + // Check if corresponding TypeX exists + if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" { + log.Fatalf("Constant Type%s does not exist.", o.Name()) + } + + namedTypes = append(namedTypes, o.Name()) + } + + b := &bytes.Buffer{} + b.WriteString(packageHdr) + + // Generate TypeToRR + fatalIfErr(TypeToRR.Execute(b, namedTypes)) + + // Generate typeToString + fatalIfErr(typeToString.Execute(b, numberedTypes)) + + // Generate headerFunc + fatalIfErr(headerFunc.Execute(b, namedTypes)) + + // Generate len() + fmt.Fprint(b, "// len() functions\n") + for _, name := range namedTypes { + if _, ok := skipLen[name]; ok { + continue + } + o := scope.Lookup(name) + st, isEmbedded := getTypeStruct(o.Type(), scope) + if isEmbedded { + continue + } + fmt.Fprintf(b, "func (rr *%s) len() int {\n", name) + fmt.Fprintf(b, "l := rr.Hdr.len()\n") + for i := 1; i < st.NumFields(); i++ { + o := func(s string) { fmt.Fprintf(b, s, st.Field(i).Name()) } + + if _, ok := st.Field(i).Type().(*types.Slice); ok { + switch st.Tag(i) { + case `dns:"-"`: + // ignored + case `dns:"cdomain-name"`, `dns:"domain-name"`, `dns:"txt"`: + o("for _, x := range rr.%s { l += len(x) + 1 }\n") + default: + log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) + } + continue + } + + switch { + case st.Tag(i) == `dns:"-"`: + // ignored + case st.Tag(i) == `dns:"cdomain-name"`, st.Tag(i) == `dns:"domain-name"`: + o("l += len(rr.%s) + 1\n") + case st.Tag(i) == `dns:"octet"`: + o("l += len(rr.%s)\n") + case strings.HasPrefix(st.Tag(i), `dns:"size-base64`): + fallthrough + case st.Tag(i) == `dns:"base64"`: + o("l += base64.StdEncoding.DecodedLen(len(rr.%s))\n") + case strings.HasPrefix(st.Tag(i), `dns:"size-hex`): + fallthrough + case st.Tag(i) == `dns:"hex"`: + o("l += len(rr.%s)/2 + 1\n") + case st.Tag(i) == `dns:"a"`: + o("l += net.IPv4len // %s\n") + case st.Tag(i) == `dns:"aaaa"`: + o("l += net.IPv6len // %s\n") + case st.Tag(i) == `dns:"txt"`: + o("for _, t := range rr.%s { l += len(t) + 1 }\n") + case st.Tag(i) == `dns:"uint48"`: + o("l += 6 // %s\n") + case st.Tag(i) == "": + switch st.Field(i).Type().(*types.Basic).Kind() { + case types.Uint8: + o("l++ // %s\n") + case types.Uint16: + o("l += 2 // %s\n") + case types.Uint32: + o("l += 4 // %s\n") + case types.Uint64: + o("l += 8 // %s\n") + case types.String: + o("l += len(rr.%s) + 1\n") + default: + log.Fatalln(name, st.Field(i).Name()) + } + default: + log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) + } + } + fmt.Fprintf(b, "return l }\n") + } + + // Generate copy() + fmt.Fprint(b, "// copy() functions\n") + for _, name := range namedTypes { + o := scope.Lookup(name) + st, isEmbedded := getTypeStruct(o.Type(), scope) + if isEmbedded { + continue + } + fmt.Fprintf(b, "func (rr *%s) copy() RR {\n", name) + fields := []string{"*rr.Hdr.copyHeader()"} + for i := 1; i < st.NumFields(); i++ { + f := st.Field(i).Name() + if sl, ok := st.Field(i).Type().(*types.Slice); ok { + t := sl.Underlying().String() + t = strings.TrimPrefix(t, "[]") + if strings.Contains(t, ".") { + splits := strings.Split(t, ".") + t = splits[len(splits)-1] + } + fmt.Fprintf(b, "%s := make([]%s, len(rr.%s)); copy(%s, rr.%s)\n", + f, t, f, f, f) + fields = append(fields, f) + continue + } + if st.Field(i).Type().String() == "net.IP" { + fields = append(fields, "copyIP(rr."+f+")") + continue + } + fields = append(fields, "rr."+f) + } + fmt.Fprintf(b, "return &%s{%s}\n", name, strings.Join(fields, ",")) + fmt.Fprintf(b, "}\n") + } + + // gofmt + res, err := format.Source(b.Bytes()) + if err != nil { + b.WriteTo(os.Stderr) + log.Fatal(err) + } + + // write result + f, err := os.Create("ztypes.go") + fatalIfErr(err) + defer f.Close() + f.Write(res) +} + +func fatalIfErr(err error) { + if err != nil { + log.Fatal(err) + } +} diff --git a/vendor/github.com/miekg/dns/types_test.go b/vendor/github.com/miekg/dns/types_test.go new file mode 100644 index 0000000..c117cfb --- /dev/null +++ b/vendor/github.com/miekg/dns/types_test.go @@ -0,0 +1,74 @@ +package dns + +import ( + "testing" +) + +func TestCmToM(t *testing.T) { + s := cmToM(0, 0) + if s != "0.00" { + t.Error("0, 0") + } + + s = cmToM(1, 0) + if s != "0.01" { + t.Error("1, 0") + } + + s = cmToM(3, 1) + if s != "0.30" { + t.Error("3, 1") + } + + s = cmToM(4, 2) + if s != "4" { + t.Error("4, 2") + } + + s = cmToM(5, 3) + if s != "50" { + t.Error("5, 3") + } + + s = cmToM(7, 5) + if s != "7000" { + t.Error("7, 5") + } + + s = cmToM(9, 9) + if s != "90000000" { + t.Error("9, 9") + } +} + +func TestSplitN(t *testing.T) { + xs := splitN("abc", 5) + if len(xs) != 1 && xs[0] != "abc" { + t.Errorf("Failure to split abc") + } + + s := "" + for i := 0; i < 255; i++ { + s += "a" + } + + xs = splitN(s, 255) + if len(xs) != 1 && xs[0] != s { + t.Errorf("failure to split 255 char long string") + } + + s += "b" + xs = splitN(s, 255) + if len(xs) != 2 || xs[1] != "b" { + t.Errorf("failure to split 256 char long string: %d", len(xs)) + } + + // Make s longer + for i := 0; i < 255; i++ { + s += "a" + } + xs = splitN(s, 255) + if len(xs) != 3 || xs[2] != "a" { + t.Errorf("failure to split 510 char long string: %d", len(xs)) + } +} diff --git a/vendor/github.com/miekg/dns/udp.go b/vendor/github.com/miekg/dns/udp.go new file mode 100644 index 0000000..af111b9 --- /dev/null +++ b/vendor/github.com/miekg/dns/udp.go @@ -0,0 +1,34 @@ +// +build !windows + +package dns + +import ( + "net" +) + +// SessionUDP holds the remote address and the associated +// out-of-band data. +type SessionUDP struct { + raddr *net.UDPAddr + context []byte +} + +// RemoteAddr returns the remote network address. +func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr } + +// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a +// net.UDPAddr. +func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) { + oob := make([]byte, 40) + n, oobn, _, raddr, err := conn.ReadMsgUDP(b, oob) + if err != nil { + return n, nil, err + } + return n, &SessionUDP{raddr, oob[:oobn]}, err +} + +// WriteToSessionUDP acts just like net.UDPConn.WritetTo(), but uses a *SessionUDP instead of a net.Addr. +func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) { + n, _, err := conn.WriteMsgUDP(b, session.context, session.raddr) + return n, err +} diff --git a/vendor/github.com/miekg/dns/udp_linux.go b/vendor/github.com/miekg/dns/udp_linux.go new file mode 100644 index 0000000..033df42 --- /dev/null +++ b/vendor/github.com/miekg/dns/udp_linux.go @@ -0,0 +1,105 @@ +// +build linux,!appengine + +package dns + +// See: +// * http://stackoverflow.com/questions/3062205/setting-the-source-ip-for-a-udp-socket and +// * http://blog.powerdns.com/2012/10/08/on-binding-datagram-udp-sockets-to-the-any-addresses/ +// +// Why do we need this: When listening on 0.0.0.0 with UDP so kernel decides what is the outgoing +// interface, this might not always be the correct one. This code will make sure the egress +// packet's interface matched the ingress' one. + +import ( + "net" + "syscall" +) + +// setUDPSocketOptions sets the UDP socket options. +// This function is implemented on a per platform basis. See udp_*.go for more details +func setUDPSocketOptions(conn *net.UDPConn) error { + sa, err := getUDPSocketName(conn) + if err != nil { + return err + } + switch sa.(type) { + case *syscall.SockaddrInet6: + v6only, err := getUDPSocketOptions6Only(conn) + if err != nil { + return err + } + setUDPSocketOptions6(conn) + if !v6only { + setUDPSocketOptions4(conn) + } + case *syscall.SockaddrInet4: + setUDPSocketOptions4(conn) + } + return nil +} + +// setUDPSocketOptions4 prepares the v4 socket for sessions. +func setUDPSocketOptions4(conn *net.UDPConn) error { + file, err := conn.File() + if err != nil { + return err + } + if err := syscall.SetsockoptInt(int(file.Fd()), syscall.IPPROTO_IP, syscall.IP_PKTINFO, 1); err != nil { + file.Close() + return err + } + // Calling File() above results in the connection becoming blocking, we must fix that. + // See https://github.com/miekg/dns/issues/279 + err = syscall.SetNonblock(int(file.Fd()), true) + if err != nil { + file.Close() + return err + } + file.Close() + return nil +} + +// setUDPSocketOptions6 prepares the v6 socket for sessions. +func setUDPSocketOptions6(conn *net.UDPConn) error { + file, err := conn.File() + if err != nil { + return err + } + if err := syscall.SetsockoptInt(int(file.Fd()), syscall.IPPROTO_IPV6, syscall.IPV6_RECVPKTINFO, 1); err != nil { + file.Close() + return err + } + err = syscall.SetNonblock(int(file.Fd()), true) + if err != nil { + file.Close() + return err + } + file.Close() + return nil +} + +// getUDPSocketOption6Only return true if the socket is v6 only and false when it is v4/v6 combined +// (dualstack). +func getUDPSocketOptions6Only(conn *net.UDPConn) (bool, error) { + file, err := conn.File() + if err != nil { + return false, err + } + // dual stack. See http://stackoverflow.com/questions/1618240/how-to-support-both-ipv4-and-ipv6-connections + v6only, err := syscall.GetsockoptInt(int(file.Fd()), syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY) + if err != nil { + file.Close() + return false, err + } + file.Close() + return v6only == 1, nil +} + +func getUDPSocketName(conn *net.UDPConn) (syscall.Sockaddr, error) { + file, err := conn.File() + if err != nil { + return nil, err + } + defer file.Close() + return syscall.Getsockname(int(file.Fd())) +} diff --git a/vendor/github.com/miekg/dns/udp_other.go b/vendor/github.com/miekg/dns/udp_other.go new file mode 100644 index 0000000..488a282 --- /dev/null +++ b/vendor/github.com/miekg/dns/udp_other.go @@ -0,0 +1,15 @@ +// +build !linux appengine + +package dns + +import ( + "net" +) + +// These do nothing. See udp_linux.go for an example of how to implement this. + +// We tried to adhire to some kind of naming scheme. +func setUDPSocketOptions(conn *net.UDPConn) error { return nil } +func setUDPSocketOptions4(conn *net.UDPConn) error { return nil } +func setUDPSocketOptions6(conn *net.UDPConn) error { return nil } +func getUDPSocketOptions6Only(conn *net.UDPConn) (bool, error) { return false, nil } diff --git a/vendor/github.com/miekg/dns/udp_windows.go b/vendor/github.com/miekg/dns/udp_windows.go new file mode 100644 index 0000000..51e532a --- /dev/null +++ b/vendor/github.com/miekg/dns/udp_windows.go @@ -0,0 +1,29 @@ +// +build windows + +package dns + +import "net" + +type SessionUDP struct { + raddr *net.UDPAddr +} + +func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr } + +// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a +// net.UDPAddr. +func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) { + n, raddr, err := conn.ReadFrom(b) + if err != nil { + return n, nil, err + } + session := &SessionUDP{raddr.(*net.UDPAddr)} + return n, session, err +} + +// WriteToSessionUDP acts just like net.UDPConn.WritetTo(), but uses a *SessionUDP instead of a net.Addr. +func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) { + n, err := conn.WriteTo(b, session.raddr) + return n, err +} + diff --git a/vendor/github.com/miekg/dns/update.go b/vendor/github.com/miekg/dns/update.go new file mode 100644 index 0000000..e90c5c9 --- /dev/null +++ b/vendor/github.com/miekg/dns/update.go @@ -0,0 +1,106 @@ +package dns + +// NameUsed sets the RRs in the prereq section to +// "Name is in use" RRs. RFC 2136 section 2.4.4. +func (u *Msg) NameUsed(rr []RR) { + if u.Answer == nil { + u.Answer = make([]RR, 0, len(rr)) + } + for _, r := range rr { + u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassANY}}) + } +} + +// NameNotUsed sets the RRs in the prereq section to +// "Name is in not use" RRs. RFC 2136 section 2.4.5. +func (u *Msg) NameNotUsed(rr []RR) { + if u.Answer == nil { + u.Answer = make([]RR, 0, len(rr)) + } + for _, r := range rr { + u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassNONE}}) + } +} + +// Used sets the RRs in the prereq section to +// "RRset exists (value dependent -- with rdata)" RRs. RFC 2136 section 2.4.2. +func (u *Msg) Used(rr []RR) { + if len(u.Question) == 0 { + panic("dns: empty question section") + } + if u.Answer == nil { + u.Answer = make([]RR, 0, len(rr)) + } + for _, r := range rr { + r.Header().Class = u.Question[0].Qclass + u.Answer = append(u.Answer, r) + } +} + +// RRsetUsed sets the RRs in the prereq section to +// "RRset exists (value independent -- no rdata)" RRs. RFC 2136 section 2.4.1. +func (u *Msg) RRsetUsed(rr []RR) { + if u.Answer == nil { + u.Answer = make([]RR, 0, len(rr)) + } + for _, r := range rr { + u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: r.Header().Rrtype, Class: ClassANY}}) + } +} + +// RRsetNotUsed sets the RRs in the prereq section to +// "RRset does not exist" RRs. RFC 2136 section 2.4.3. +func (u *Msg) RRsetNotUsed(rr []RR) { + if u.Answer == nil { + u.Answer = make([]RR, 0, len(rr)) + } + for _, r := range rr { + u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: r.Header().Rrtype, Class: ClassNONE}}) + } +} + +// Insert creates a dynamic update packet that adds an complete RRset, see RFC 2136 section 2.5.1. +func (u *Msg) Insert(rr []RR) { + if len(u.Question) == 0 { + panic("dns: empty question section") + } + if u.Ns == nil { + u.Ns = make([]RR, 0, len(rr)) + } + for _, r := range rr { + r.Header().Class = u.Question[0].Qclass + u.Ns = append(u.Ns, r) + } +} + +// RemoveRRset creates a dynamic update packet that deletes an RRset, see RFC 2136 section 2.5.2. +func (u *Msg) RemoveRRset(rr []RR) { + if u.Ns == nil { + u.Ns = make([]RR, 0, len(rr)) + } + for _, r := range rr { + u.Ns = append(u.Ns, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: r.Header().Rrtype, Class: ClassANY}}) + } +} + +// RemoveName creates a dynamic update packet that deletes all RRsets of a name, see RFC 2136 section 2.5.3 +func (u *Msg) RemoveName(rr []RR) { + if u.Ns == nil { + u.Ns = make([]RR, 0, len(rr)) + } + for _, r := range rr { + u.Ns = append(u.Ns, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassANY}}) + } +} + +// Remove creates a dynamic update packet deletes RR from a RRSset, see RFC 2136 section 2.5.4 +func (u *Msg) Remove(rr []RR) { + if u.Ns == nil { + u.Ns = make([]RR, 0, len(rr)) + } + for _, r := range rr { + r.Header().Class = ClassNONE + r.Header().Ttl = 0 + u.Ns = append(u.Ns, r) + } +} diff --git a/vendor/github.com/miekg/dns/update_test.go b/vendor/github.com/miekg/dns/update_test.go new file mode 100644 index 0000000..12760a1 --- /dev/null +++ b/vendor/github.com/miekg/dns/update_test.go @@ -0,0 +1,145 @@ +package dns + +import ( + "bytes" + "testing" +) + +func TestDynamicUpdateParsing(t *testing.T) { + prefix := "example.com. IN " + for _, typ := range TypeToString { + if typ == "OPT" || typ == "AXFR" || typ == "IXFR" || typ == "ANY" || typ == "TKEY" || + typ == "TSIG" || typ == "ISDN" || typ == "UNSPEC" || typ == "NULL" || typ == "ATMA" || + typ == "Reserved" || typ == "None" || typ == "NXT" || typ == "MAILB" || typ == "MAILA" { + continue + } + r, err := NewRR(prefix + typ) + if err != nil { + t.Errorf("failure to parse: %s %s: %v", prefix, typ, err) + } else { + t.Logf("parsed: %s", r.String()) + } + } +} + +func TestDynamicUpdateUnpack(t *testing.T) { + // From https://github.com/miekg/dns/issues/150#issuecomment-62296803 + // It should be an update message for the zone "example.", + // deleting the A RRset "example." and then adding an A record at "example.". + // class ANY, TYPE A + buf := []byte{171, 68, 40, 0, 0, 1, 0, 0, 0, 2, 0, 0, 7, 101, 120, 97, 109, 112, 108, 101, 0, 0, 6, 0, 1, 192, 12, 0, 1, 0, 255, 0, 0, 0, 0, 0, 0, 192, 12, 0, 1, 0, 1, 0, 0, 0, 0, 0, 4, 127, 0, 0, 1} + msg := new(Msg) + err := msg.Unpack(buf) + if err != nil { + t.Errorf("failed to unpack: %v\n%s", err, msg.String()) + } +} + +func TestDynamicUpdateZeroRdataUnpack(t *testing.T) { + m := new(Msg) + rr := &RR_Header{Name: ".", Rrtype: 0, Class: 1, Ttl: ^uint32(0), Rdlength: 0} + m.Answer = []RR{rr, rr, rr, rr, rr} + m.Ns = m.Answer + for n, s := range TypeToString { + rr.Rrtype = n + bytes, err := m.Pack() + if err != nil { + t.Errorf("failed to pack %s: %v", s, err) + continue + } + if err := new(Msg).Unpack(bytes); err != nil { + t.Errorf("failed to unpack %s: %v", s, err) + } + } +} + +func TestRemoveRRset(t *testing.T) { + // Should add a zero data RR in Class ANY with a TTL of 0 + // for each set mentioned in the RRs provided to it. + rr, err := NewRR(". 100 IN A 127.0.0.1") + if err != nil { + t.Fatalf("error constructing RR: %v", err) + } + m := new(Msg) + m.Ns = []RR{&RR_Header{Name: ".", Rrtype: TypeA, Class: ClassANY, Ttl: 0, Rdlength: 0}} + expectstr := m.String() + expect, err := m.Pack() + if err != nil { + t.Fatalf("error packing expected msg: %v", err) + } + + m.Ns = nil + m.RemoveRRset([]RR{rr}) + actual, err := m.Pack() + if err != nil { + t.Fatalf("error packing actual msg: %v", err) + } + if !bytes.Equal(actual, expect) { + tmp := new(Msg) + if err := tmp.Unpack(actual); err != nil { + t.Fatalf("error unpacking actual msg: %v\nexpected: %v\ngot: %v\n", err, expect, actual) + } + t.Errorf("expected msg:\n%s", expectstr) + t.Errorf("actual msg:\n%v", tmp) + } +} + +func TestPreReqAndRemovals(t *testing.T) { + // Build a list of multiple prereqs and then somes removes followed by an insert. + // We should be able to add multiple prereqs and updates. + m := new(Msg) + m.SetUpdate("example.org.") + m.Id = 1234 + + // Use a full set of RRs each time, so we are sure the rdata is stripped. + rrName1, _ := NewRR("name_used. 3600 IN A 127.0.0.1") + rrName2, _ := NewRR("name_not_used. 3600 IN A 127.0.0.1") + rrRemove1, _ := NewRR("remove1. 3600 IN A 127.0.0.1") + rrRemove2, _ := NewRR("remove2. 3600 IN A 127.0.0.1") + rrRemove3, _ := NewRR("remove3. 3600 IN A 127.0.0.1") + rrInsert, _ := NewRR("insert. 3600 IN A 127.0.0.1") + rrRrset1, _ := NewRR("rrset_used1. 3600 IN A 127.0.0.1") + rrRrset2, _ := NewRR("rrset_used2. 3600 IN A 127.0.0.1") + rrRrset3, _ := NewRR("rrset_not_used. 3600 IN A 127.0.0.1") + + // Handle the prereqs. + m.NameUsed([]RR{rrName1}) + m.NameNotUsed([]RR{rrName2}) + m.RRsetUsed([]RR{rrRrset1}) + m.Used([]RR{rrRrset2}) + m.RRsetNotUsed([]RR{rrRrset3}) + + // and now the updates. + m.RemoveName([]RR{rrRemove1}) + m.RemoveRRset([]RR{rrRemove2}) + m.Remove([]RR{rrRemove3}) + m.Insert([]RR{rrInsert}) + + // This test function isn't a Example function because we print these RR with tabs at the + // end and the Example function trim these, thus they never match. + // TODO(miek): don't print these tabs and make this into an Example function. + expect := `;; opcode: UPDATE, status: NOERROR, id: 1234 +;; flags:; QUERY: 1, ANSWER: 5, AUTHORITY: 4, ADDITIONAL: 0 + +;; QUESTION SECTION: +;example.org. IN SOA + +;; ANSWER SECTION: +name_used. 0 ANY ANY +name_not_used. 0 NONE ANY +rrset_used1. 0 ANY A +rrset_used2. 3600 IN A 127.0.0.1 +rrset_not_used. 0 NONE A + +;; AUTHORITY SECTION: +remove1. 0 ANY ANY +remove2. 0 ANY A +remove3. 0 NONE A 127.0.0.1 +insert. 3600 IN A 127.0.0.1 +` + + if m.String() != expect { + t.Errorf("expected msg:\n%s", expect) + t.Errorf("actual msg:\n%v", m.String()) + } +} diff --git a/vendor/github.com/miekg/dns/xfr.go b/vendor/github.com/miekg/dns/xfr.go new file mode 100644 index 0000000..576c559 --- /dev/null +++ b/vendor/github.com/miekg/dns/xfr.go @@ -0,0 +1,255 @@ +package dns + +import ( + "fmt" + "time" +) + +// Envelope is used when doing a zone transfer with a remote server. +type Envelope struct { + RR []RR // The set of RRs in the answer section of the xfr reply message. + Error error // If something went wrong, this contains the error. +} + +// A Transfer defines parameters that are used during a zone transfer. +type Transfer struct { + *Conn + DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds + ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds + WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds + TsigSecret map[string]string // Secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be fully qualified + tsigTimersOnly bool +} + +// Think we need to away to stop the transfer + +// In performs an incoming transfer with the server in a. +// If you would like to set the source IP, or some other attribute +// of a Dialer for a Transfer, you can do so by specifying the attributes +// in the Transfer.Conn: +// +// d := net.Dialer{LocalAddr: transfer_source} +// con, err := d.Dial("tcp", master) +// dnscon := &dns.Conn{Conn:con} +// transfer = &dns.Transfer{Conn: dnscon} +// channel, err := transfer.In(message, master) +// +func (t *Transfer) In(q *Msg, a string) (env chan *Envelope, err error) { + timeout := dnsTimeout + if t.DialTimeout != 0 { + timeout = t.DialTimeout + } + if t.Conn == nil { + t.Conn, err = DialTimeout("tcp", a, timeout) + if err != nil { + return nil, err + } + } + if err := t.WriteMsg(q); err != nil { + return nil, err + } + env = make(chan *Envelope) + go func() { + if q.Question[0].Qtype == TypeAXFR { + go t.inAxfr(q.Id, env) + return + } + if q.Question[0].Qtype == TypeIXFR { + go t.inIxfr(q.Id, env) + return + } + }() + return env, nil +} + +func (t *Transfer) inAxfr(id uint16, c chan *Envelope) { + first := true + defer t.Close() + defer close(c) + timeout := dnsTimeout + if t.ReadTimeout != 0 { + timeout = t.ReadTimeout + } + for { + t.Conn.SetReadDeadline(time.Now().Add(timeout)) + in, err := t.ReadMsg() + if err != nil { + c <- &Envelope{nil, err} + return + } + if id != in.Id { + c <- &Envelope{in.Answer, ErrId} + return + } + if first { + if in.Rcode != RcodeSuccess { + c <- &Envelope{in.Answer, &Error{err: fmt.Sprintf(errXFR, in.Rcode)}} + return + } + if !isSOAFirst(in) { + c <- &Envelope{in.Answer, ErrSoa} + return + } + first = !first + // only one answer that is SOA, receive more + if len(in.Answer) == 1 { + t.tsigTimersOnly = true + c <- &Envelope{in.Answer, nil} + continue + } + } + + if !first { + t.tsigTimersOnly = true // Subsequent envelopes use this. + if isSOALast(in) { + c <- &Envelope{in.Answer, nil} + return + } + c <- &Envelope{in.Answer, nil} + } + } +} + +func (t *Transfer) inIxfr(id uint16, c chan *Envelope) { + serial := uint32(0) // The first serial seen is the current server serial + first := true + defer t.Close() + defer close(c) + timeout := dnsTimeout + if t.ReadTimeout != 0 { + timeout = t.ReadTimeout + } + for { + t.SetReadDeadline(time.Now().Add(timeout)) + in, err := t.ReadMsg() + if err != nil { + c <- &Envelope{nil, err} + return + } + if id != in.Id { + c <- &Envelope{in.Answer, ErrId} + return + } + if first { + if in.Rcode != RcodeSuccess { + c <- &Envelope{in.Answer, &Error{err: fmt.Sprintf(errXFR, in.Rcode)}} + return + } + // A single SOA RR signals "no changes" + if len(in.Answer) == 1 && isSOAFirst(in) { + c <- &Envelope{in.Answer, nil} + return + } + + // Check if the returned answer is ok + if !isSOAFirst(in) { + c <- &Envelope{in.Answer, ErrSoa} + return + } + // This serial is important + serial = in.Answer[0].(*SOA).Serial + first = !first + } + + // Now we need to check each message for SOA records, to see what we need to do + if !first { + t.tsigTimersOnly = true + // If the last record in the IXFR contains the servers' SOA, we should quit + if v, ok := in.Answer[len(in.Answer)-1].(*SOA); ok { + if v.Serial == serial { + c <- &Envelope{in.Answer, nil} + return + } + } + c <- &Envelope{in.Answer, nil} + } + } +} + +// Out performs an outgoing transfer with the client connecting in w. +// Basic use pattern: +// +// ch := make(chan *dns.Envelope) +// tr := new(dns.Transfer) +// go tr.Out(w, r, ch) +// ch <- &dns.Envelope{RR: []dns.RR{soa, rr1, rr2, rr3, soa}} +// close(ch) +// w.Hijack() +// // w.Close() // Client closes connection +// +// The server is responsible for sending the correct sequence of RRs through the +// channel ch. +func (t *Transfer) Out(w ResponseWriter, q *Msg, ch chan *Envelope) error { + for x := range ch { + r := new(Msg) + // Compress? + r.SetReply(q) + r.Authoritative = true + // assume it fits TODO(miek): fix + r.Answer = append(r.Answer, x.RR...) + if err := w.WriteMsg(r); err != nil { + return err + } + } + w.TsigTimersOnly(true) + return nil +} + +// ReadMsg reads a message from the transfer connection t. +func (t *Transfer) ReadMsg() (*Msg, error) { + m := new(Msg) + p := make([]byte, MaxMsgSize) + n, err := t.Read(p) + if err != nil && n == 0 { + return nil, err + } + p = p[:n] + if err := m.Unpack(p); err != nil { + return nil, err + } + if ts := m.IsTsig(); ts != nil && t.TsigSecret != nil { + if _, ok := t.TsigSecret[ts.Hdr.Name]; !ok { + return m, ErrSecret + } + // Need to work on the original message p, as that was used to calculate the tsig. + err = TsigVerify(p, t.TsigSecret[ts.Hdr.Name], t.tsigRequestMAC, t.tsigTimersOnly) + t.tsigRequestMAC = ts.MAC + } + return m, err +} + +// WriteMsg writes a message through the transfer connection t. +func (t *Transfer) WriteMsg(m *Msg) (err error) { + var out []byte + if ts := m.IsTsig(); ts != nil && t.TsigSecret != nil { + if _, ok := t.TsigSecret[ts.Hdr.Name]; !ok { + return ErrSecret + } + out, t.tsigRequestMAC, err = TsigGenerate(m, t.TsigSecret[ts.Hdr.Name], t.tsigRequestMAC, t.tsigTimersOnly) + } else { + out, err = m.Pack() + } + if err != nil { + return err + } + if _, err = t.Write(out); err != nil { + return err + } + return nil +} + +func isSOAFirst(in *Msg) bool { + if len(in.Answer) > 0 { + return in.Answer[0].Header().Rrtype == TypeSOA + } + return false +} + +func isSOALast(in *Msg) bool { + if len(in.Answer) > 0 { + return in.Answer[len(in.Answer)-1].Header().Rrtype == TypeSOA + } + return false +} + +const errXFR = "bad xfr rcode: %d" diff --git a/vendor/github.com/miekg/dns/xfr_test.go b/vendor/github.com/miekg/dns/xfr_test.go new file mode 100644 index 0000000..a478963 --- /dev/null +++ b/vendor/github.com/miekg/dns/xfr_test.go @@ -0,0 +1,184 @@ +// +build net + +package dns + +import ( + "net" + "strings" + "testing" + "time" +) + +func getIP(s string) string { + a, err := net.LookupAddr(s) + if err != nil { + return "" + } + return a[0] +} + +// flaky, need to setup local server and test from that. +func TestAXFR_Miek(t *testing.T) { + // This test runs against a server maintained by Miek + if testing.Short() { + return + } + m := new(Msg) + m.SetAxfr("miek.nl.") + + server := getIP("linode.atoom.net") + + tr := new(Transfer) + + if a, err := tr.In(m, net.JoinHostPort(server, "53")); err != nil { + t.Fatal("failed to setup axfr: ", err) + } else { + for ex := range a { + if ex.Error != nil { + t.Errorf("error %v", ex.Error) + break + } + for _, rr := range ex.RR { + t.Log(rr.String()) + } + } + } +} + +// fails. +func TestAXFR_NLNL_MultipleEnvelopes(t *testing.T) { + // This test runs against a server maintained by NLnet Labs + if testing.Short() { + return + } + m := new(Msg) + m.SetAxfr("nlnetlabs.nl.") + + server := getIP("open.nlnetlabs.nl.") + + tr := new(Transfer) + if a, err := tr.In(m, net.JoinHostPort(server, "53")); err != nil { + t.Fatalf("failed to setup axfr %v for server: %v", err, server) + } else { + for ex := range a { + if ex.Error != nil { + t.Errorf("error %v", ex.Error) + break + } + } + } +} + +func TestAXFR_Miek_Tsig(t *testing.T) { + // This test runs against a server maintained by Miek + if testing.Short() { + return + } + m := new(Msg) + m.SetAxfr("example.nl.") + m.SetTsig("axfr.", HmacMD5, 300, time.Now().Unix()) + + tr := new(Transfer) + tr.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="} + + if a, err := tr.In(m, "176.58.119.54:53"); err != nil { + t.Fatal("failed to setup axfr: ", err) + } else { + for ex := range a { + if ex.Error != nil { + t.Errorf("error %v", ex.Error) + break + } + for _, rr := range ex.RR { + t.Log(rr.String()) + } + } + } +} + +func TestAXFR_SIDN_NSD3_NONE(t *testing.T) { testAXFRSIDN(t, "nsd", "") } +func TestAXFR_SIDN_NSD3_MD5(t *testing.T) { testAXFRSIDN(t, "nsd", HmacMD5) } +func TestAXFR_SIDN_NSD3_SHA1(t *testing.T) { testAXFRSIDN(t, "nsd", HmacSHA1) } +func TestAXFR_SIDN_NSD3_SHA256(t *testing.T) { testAXFRSIDN(t, "nsd", HmacSHA256) } + +func TestAXFR_SIDN_NSD4_NONE(t *testing.T) { testAXFRSIDN(t, "nsd4", "") } +func TestAXFR_SIDN_NSD4_MD5(t *testing.T) { testAXFRSIDN(t, "nsd4", HmacMD5) } +func TestAXFR_SIDN_NSD4_SHA1(t *testing.T) { testAXFRSIDN(t, "nsd4", HmacSHA1) } +func TestAXFR_SIDN_NSD4_SHA256(t *testing.T) { testAXFRSIDN(t, "nsd4", HmacSHA256) } + +func TestAXFR_SIDN_BIND9_NONE(t *testing.T) { testAXFRSIDN(t, "bind9", "") } +func TestAXFR_SIDN_BIND9_MD5(t *testing.T) { testAXFRSIDN(t, "bind9", HmacMD5) } +func TestAXFR_SIDN_BIND9_SHA1(t *testing.T) { testAXFRSIDN(t, "bind9", HmacSHA1) } +func TestAXFR_SIDN_BIND9_SHA256(t *testing.T) { testAXFRSIDN(t, "bind9", HmacSHA256) } + +func TestAXFR_SIDN_KNOT_NONE(t *testing.T) { testAXFRSIDN(t, "knot", "") } +func TestAXFR_SIDN_KNOT_MD5(t *testing.T) { testAXFRSIDN(t, "knot", HmacMD5) } +func TestAXFR_SIDN_KNOT_SHA1(t *testing.T) { testAXFRSIDN(t, "knot", HmacSHA1) } +func TestAXFR_SIDN_KNOT_SHA256(t *testing.T) { testAXFRSIDN(t, "knot", HmacSHA256) } + +func TestAXFR_SIDN_POWERDNS_NONE(t *testing.T) { testAXFRSIDN(t, "powerdns", "") } +func TestAXFR_SIDN_POWERDNS_MD5(t *testing.T) { testAXFRSIDN(t, "powerdns", HmacMD5) } +func TestAXFR_SIDN_POWERDNS_SHA1(t *testing.T) { testAXFRSIDN(t, "powerdns", HmacSHA1) } +func TestAXFR_SIDN_POWERDNS_SHA256(t *testing.T) { testAXFRSIDN(t, "powerdns", HmacSHA256) } + +func TestAXFR_SIDN_YADIFA_NONE(t *testing.T) { testAXFRSIDN(t, "yadifa", "") } +func TestAXFR_SIDN_YADIFA_MD5(t *testing.T) { testAXFRSIDN(t, "yadifa", HmacMD5) } +func TestAXFR_SIDN_YADIFA_SHA1(t *testing.T) { testAXFRSIDN(t, "yadifa", HmacSHA1) } +func TestAXFR_SIDN_YADIFA_SHA256(t *testing.T) { testAXFRSIDN(t, "yadifa", HmacSHA256) } + +func testAXFRSIDN(t *testing.T, host, alg string) { + // This tests run against a server maintained by SIDN labs, see: + // https://workbench.sidnlabs.nl/ + if testing.Short() { + return + } + x := new(Transfer) + x.TsigSecret = map[string]string{ + "wb_md5.": "Wu/utSasZUkoeCNku152Zw==", + "wb_sha1_longkey.": "uhMpEhPq/RAD9Bt4mqhfmi+7ZdKmjLQb/lcrqYPXR4s/nnbsqw==", + "wb_sha256.": "npfrIJjt/MJOjGJoBNZtsjftKMhkSpIYMv2RzRZt1f8=", + } + keyname := map[string]string{ + HmacMD5: "wb_md5.", + HmacSHA1: "wb_sha1_longkey.", + HmacSHA256: "wb_sha256.", + }[alg] + + m := new(Msg) + m.SetAxfr("types.wb.sidnlabs.nl.") + if keyname != "" { + m.SetTsig(keyname, alg, 300, time.Now().Unix()) + } + c, err := x.In(m, host+".sidnlabs.nl:53") + if err != nil { + t.Fatal(err) + } + for e := range c { + if e.Error != nil { + t.Fatal(e.Error) + } + } +} + +func TestAXFRFailNotAuth(t *testing.T) { + // This tests run against a server maintained by SIDN labs, see: + // https://workbench.sidnlabs.nl/ + if testing.Short() { + return + } + x := new(Transfer) + + m := new(Msg) + m.SetAxfr("sidnlabs.nl.") + c, err := x.In(m, "yadifa.sidnlabs.nl:53") + if err != nil { + t.Fatal(err) + } + for e := range c { + if e.Error != nil { + if !strings.HasPrefix(e.Error.Error(), "dns: bad xfr rcode:") { + t.Fatal(e.Error) + } + } + } +} diff --git a/vendor/github.com/miekg/dns/zcompress.go b/vendor/github.com/miekg/dns/zcompress.go new file mode 100644 index 0000000..b277978 --- /dev/null +++ b/vendor/github.com/miekg/dns/zcompress.go @@ -0,0 +1,119 @@ +// *** DO NOT MODIFY *** +// AUTOGENERATED BY go generate from compress_generate.go + +package dns + +func compressionLenHelperType(c map[string]int, r RR) { + switch x := r.(type) { + case *PTR: + compressionLenHelper(c, x.Ptr) + case *SOA: + compressionLenHelper(c, x.Ns) + compressionLenHelper(c, x.Mbox) + case *AFSDB: + compressionLenHelper(c, x.Hostname) + case *HIP: + for i := range x.RendezvousServers { + compressionLenHelper(c, x.RendezvousServers[i]) + } + case *LP: + compressionLenHelper(c, x.Fqdn) + case *CNAME: + compressionLenHelper(c, x.Target) + case *MB: + compressionLenHelper(c, x.Mb) + case *RP: + compressionLenHelper(c, x.Mbox) + compressionLenHelper(c, x.Txt) + case *RRSIG: + compressionLenHelper(c, x.SignerName) + case *MF: + compressionLenHelper(c, x.Mf) + case *MINFO: + compressionLenHelper(c, x.Rmail) + compressionLenHelper(c, x.Email) + case *SIG: + compressionLenHelper(c, x.SignerName) + case *SRV: + compressionLenHelper(c, x.Target) + case *TSIG: + compressionLenHelper(c, x.Algorithm) + case *KX: + compressionLenHelper(c, x.Exchanger) + case *MG: + compressionLenHelper(c, x.Mg) + case *NSAPPTR: + compressionLenHelper(c, x.Ptr) + case *PX: + compressionLenHelper(c, x.Map822) + compressionLenHelper(c, x.Mapx400) + case *DNAME: + compressionLenHelper(c, x.Target) + case *MR: + compressionLenHelper(c, x.Mr) + case *MX: + compressionLenHelper(c, x.Mx) + case *TKEY: + compressionLenHelper(c, x.Algorithm) + case *NSEC: + compressionLenHelper(c, x.NextDomain) + case *TALINK: + compressionLenHelper(c, x.PreviousName) + compressionLenHelper(c, x.NextName) + case *MD: + compressionLenHelper(c, x.Md) + case *NAPTR: + compressionLenHelper(c, x.Replacement) + case *NS: + compressionLenHelper(c, x.Ns) + case *RT: + compressionLenHelper(c, x.Host) + } +} + +func compressionLenSearchType(c map[string]int, r RR) (int, bool) { + switch x := r.(type) { + case *MG: + k1, ok1 := compressionLenSearch(c, x.Mg) + return k1, ok1 + case *PTR: + k1, ok1 := compressionLenSearch(c, x.Ptr) + return k1, ok1 + case *AFSDB: + k1, ok1 := compressionLenSearch(c, x.Hostname) + return k1, ok1 + case *MB: + k1, ok1 := compressionLenSearch(c, x.Mb) + return k1, ok1 + case *MD: + k1, ok1 := compressionLenSearch(c, x.Md) + return k1, ok1 + case *MF: + k1, ok1 := compressionLenSearch(c, x.Mf) + return k1, ok1 + case *NS: + k1, ok1 := compressionLenSearch(c, x.Ns) + return k1, ok1 + case *RT: + k1, ok1 := compressionLenSearch(c, x.Host) + return k1, ok1 + case *SOA: + k1, ok1 := compressionLenSearch(c, x.Ns) + k2, ok2 := compressionLenSearch(c, x.Mbox) + return k1 + k2, ok1 && ok2 + case *CNAME: + k1, ok1 := compressionLenSearch(c, x.Target) + return k1, ok1 + case *MINFO: + k1, ok1 := compressionLenSearch(c, x.Rmail) + k2, ok2 := compressionLenSearch(c, x.Email) + return k1 + k2, ok1 && ok2 + case *MR: + k1, ok1 := compressionLenSearch(c, x.Mr) + return k1, ok1 + case *MX: + k1, ok1 := compressionLenSearch(c, x.Mx) + return k1, ok1 + } + return 0, false +} diff --git a/vendor/github.com/miekg/dns/zmsg.go b/vendor/github.com/miekg/dns/zmsg.go new file mode 100644 index 0000000..418fb1f --- /dev/null +++ b/vendor/github.com/miekg/dns/zmsg.go @@ -0,0 +1,3565 @@ +// *** DO NOT MODIFY *** +// AUTOGENERATED BY go generate from msg_generate.go + +package dns + +// pack*() functions + +func (rr *A) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packDataA(rr.A, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *AAAA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packDataAAAA(rr.AAAA, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *AFSDB) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Subtype, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Hostname, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *ANY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *AVC) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packStringTxt(rr.Txt, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *CAA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint8(rr.Flag, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Tag, msg, off) + if err != nil { + return off, err + } + off, err = packStringOctet(rr.Value, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *CDNSKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Protocol, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.PublicKey, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *CDS) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.DigestType, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Digest, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *CERT) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Type, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.Certificate, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *CNAME) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Target, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *DHCID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packStringBase64(rr.Digest, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *DLV) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.DigestType, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Digest, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *DNAME) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Target, msg, off, compression, false) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *DNSKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Protocol, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.PublicKey, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *DS) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.DigestType, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Digest, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *EID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packStringHex(rr.Endpoint, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *EUI48) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint48(rr.Address, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *EUI64) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint64(rr.Address, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *GID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint32(rr.Gid, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *GPOS) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packString(rr.Longitude, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Latitude, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Altitude, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *HINFO) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packString(rr.Cpu, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Os, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *HIP) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint8(rr.HitLength, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.PublicKeyAlgorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.PublicKeyLength, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Hit, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.PublicKey, msg, off) + if err != nil { + return off, err + } + off, err = packDataDomainNames(rr.RendezvousServers, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *KEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Protocol, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.PublicKey, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *KX) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Exchanger, msg, off, compression, false) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *L32) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = packDataA(rr.Locator32, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *L64) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = packUint64(rr.Locator64, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *LOC) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint8(rr.Version, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Size, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.HorizPre, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.VertPre, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Latitude, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Longitude, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Altitude, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *LP) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Fqdn, msg, off, compression, false) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *MB) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Mb, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *MD) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Md, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *MF) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Mf, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *MG) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Mg, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *MINFO) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Rmail, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Email, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *MR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Mr, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *MX) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Mx, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NAPTR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Order, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Service, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Regexp, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Replacement, msg, off, compression, false) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = packUint64(rr.NodeID, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NIMLOC) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packStringHex(rr.Locator, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NINFO) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packStringTxt(rr.ZSData, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NS) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Ns, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NSAPPTR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Ptr, msg, off, compression, false) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NSEC) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.NextDomain, msg, off, compression, false) + if err != nil { + return off, err + } + off, err = packDataNsec(rr.TypeBitMap, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NSEC3) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint8(rr.Hash, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Iterations, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.SaltLength, msg, off) + if err != nil { + return off, err + } + // Only pack salt if value is not "-", i.e. empty + if rr.Salt != "-" { + off, err = packStringHex(rr.Salt, msg, off) + if err != nil { + return off, err + } + } + off, err = packUint8(rr.HashLength, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase32(rr.NextDomain, msg, off) + if err != nil { + return off, err + } + off, err = packDataNsec(rr.TypeBitMap, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NSEC3PARAM) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint8(rr.Hash, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Iterations, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.SaltLength, msg, off) + if err != nil { + return off, err + } + // Only pack salt if value is not "-", i.e. empty + if rr.Salt != "-" { + off, err = packStringHex(rr.Salt, msg, off) + if err != nil { + return off, err + } + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *OPENPGPKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packStringBase64(rr.PublicKey, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *OPT) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packDataOpt(rr.Option, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *PTR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Ptr, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *PX) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Map822, msg, off, compression, false) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Mapx400, msg, off, compression, false) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *RFC3597) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packStringHex(rr.Rdata, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *RKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Protocol, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.PublicKey, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *RP) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Mbox, msg, off, compression, false) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Txt, msg, off, compression, false) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *RRSIG) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.TypeCovered, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Labels, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.OrigTtl, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Expiration, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Inception, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.SignerName, msg, off, compression, false) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.Signature, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *RT) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Host, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *SIG) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.TypeCovered, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Labels, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.OrigTtl, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Expiration, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Inception, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.SignerName, msg, off, compression, false) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.Signature, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *SMIMEA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint8(rr.Usage, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Selector, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.MatchingType, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Certificate, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *SOA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Ns, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Mbox, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = packUint32(rr.Serial, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Refresh, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Retry, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Expire, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Minttl, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *SPF) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packStringTxt(rr.Txt, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *SRV) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Priority, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Weight, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Port, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Target, msg, off, compression, false) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *SSHFP) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Type, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.FingerPrint, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *TA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.DigestType, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Digest, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *TALINK) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.PreviousName, msg, off, compression, false) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.NextName, msg, off, compression, false) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *TKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Algorithm, msg, off, compression, false) + if err != nil { + return off, err + } + off, err = packUint32(rr.Inception, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Expiration, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Mode, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Error, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.KeySize, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Key, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.OtherLen, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.OtherData, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *TLSA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint8(rr.Usage, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Selector, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.MatchingType, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Certificate, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *TSIG) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Algorithm, msg, off, compression, false) + if err != nil { + return off, err + } + off, err = packUint48(rr.TimeSigned, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Fudge, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.MACSize, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.MAC, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.OrigId, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Error, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.OtherLen, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.OtherData, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *TXT) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packStringTxt(rr.Txt, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *UID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint32(rr.Uid, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *UINFO) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packString(rr.Uinfo, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *URI) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Priority, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Weight, msg, off) + if err != nil { + return off, err + } + off, err = packStringOctet(rr.Target, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *X25) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packString(rr.PSDNAddress, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +// unpack*() functions + +func unpackA(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(A) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.A, off, err = unpackDataA(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackAAAA(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(AAAA) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.AAAA, off, err = unpackDataAAAA(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackAFSDB(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(AFSDB) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Subtype, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Hostname, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackANY(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(ANY) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + return rr, off, err +} + +func unpackAVC(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(AVC) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Txt, off, err = unpackStringTxt(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackCAA(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(CAA) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Flag, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Tag, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Value, off, err = unpackStringOctet(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackCDNSKEY(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(CDNSKEY) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Flags, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Protocol, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackCDS(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(CDS) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.DigestType, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackCERT(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(CERT) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Type, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Certificate, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackCNAME(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(CNAME) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Target, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackDHCID(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(DHCID) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Digest, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackDLV(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(DLV) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.DigestType, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackDNAME(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(DNAME) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Target, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackDNSKEY(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(DNSKEY) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Flags, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Protocol, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackDS(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(DS) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.DigestType, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackEID(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(EID) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Endpoint, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackEUI48(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(EUI48) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Address, off, err = unpackUint48(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackEUI64(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(EUI64) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Address, off, err = unpackUint64(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackGID(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(GID) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Gid, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackGPOS(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(GPOS) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Longitude, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Latitude, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Altitude, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackHINFO(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(HINFO) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Cpu, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Os, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackHIP(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(HIP) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.HitLength, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.PublicKeyAlgorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.PublicKeyLength, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Hit, off, err = unpackStringHex(msg, off, off+int(rr.HitLength)) + if err != nil { + return rr, off, err + } + rr.PublicKey, off, err = unpackStringBase64(msg, off, off+int(rr.PublicKeyLength)) + if err != nil { + return rr, off, err + } + rr.RendezvousServers, off, err = unpackDataDomainNames(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackKEY(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(KEY) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Flags, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Protocol, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackKX(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(KX) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Exchanger, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackL32(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(L32) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Locator32, off, err = unpackDataA(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackL64(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(L64) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Locator64, off, err = unpackUint64(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackLOC(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(LOC) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Version, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Size, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.HorizPre, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.VertPre, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Latitude, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Longitude, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Altitude, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackLP(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(LP) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Fqdn, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackMB(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(MB) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Mb, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackMD(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(MD) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Md, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackMF(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(MF) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Mf, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackMG(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(MG) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Mg, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackMINFO(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(MINFO) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Rmail, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Email, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackMR(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(MR) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Mr, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackMX(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(MX) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Mx, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNAPTR(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NAPTR) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Order, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Flags, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Service, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Regexp, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Replacement, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNID(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NID) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.NodeID, off, err = unpackUint64(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNIMLOC(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NIMLOC) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Locator, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNINFO(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NINFO) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.ZSData, off, err = unpackStringTxt(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNS(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NS) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Ns, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNSAPPTR(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NSAPPTR) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Ptr, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNSEC(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NSEC) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.NextDomain, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.TypeBitMap, off, err = unpackDataNsec(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNSEC3(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NSEC3) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Hash, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Flags, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Iterations, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.SaltLength, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Salt, off, err = unpackStringHex(msg, off, off+int(rr.SaltLength)) + if err != nil { + return rr, off, err + } + rr.HashLength, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.NextDomain, off, err = unpackStringBase32(msg, off, off+int(rr.HashLength)) + if err != nil { + return rr, off, err + } + rr.TypeBitMap, off, err = unpackDataNsec(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNSEC3PARAM(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NSEC3PARAM) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Hash, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Flags, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Iterations, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.SaltLength, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Salt, off, err = unpackStringHex(msg, off, off+int(rr.SaltLength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackOPENPGPKEY(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(OPENPGPKEY) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackOPT(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(OPT) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Option, off, err = unpackDataOpt(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackPTR(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(PTR) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Ptr, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackPX(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(PX) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Map822, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Mapx400, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackRFC3597(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(RFC3597) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Rdata, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackRKEY(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(RKEY) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Flags, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Protocol, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackRP(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(RP) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Mbox, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Txt, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackRRSIG(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(RRSIG) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.TypeCovered, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Labels, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.OrigTtl, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Expiration, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Inception, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.SignerName, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Signature, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackRT(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(RT) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Host, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackSIG(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(SIG) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.TypeCovered, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Labels, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.OrigTtl, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Expiration, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Inception, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.SignerName, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Signature, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackSMIMEA(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(SMIMEA) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Usage, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Selector, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.MatchingType, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Certificate, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackSOA(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(SOA) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Ns, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Mbox, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Serial, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Refresh, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Retry, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Expire, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Minttl, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackSPF(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(SPF) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Txt, off, err = unpackStringTxt(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackSRV(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(SRV) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Priority, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Weight, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Port, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Target, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackSSHFP(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(SSHFP) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Type, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.FingerPrint, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackTA(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(TA) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.DigestType, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackTALINK(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(TALINK) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.PreviousName, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.NextName, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackTKEY(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(TKEY) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Algorithm, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Inception, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Expiration, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Mode, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Error, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.KeySize, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Key, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.OtherLen, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.OtherData, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackTLSA(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(TLSA) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Usage, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Selector, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.MatchingType, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Certificate, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackTSIG(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(TSIG) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Algorithm, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.TimeSigned, off, err = unpackUint48(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Fudge, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.MACSize, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.MAC, off, err = unpackStringHex(msg, off, off+int(rr.MACSize)) + if err != nil { + return rr, off, err + } + rr.OrigId, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Error, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.OtherLen, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.OtherData, off, err = unpackStringHex(msg, off, off+int(rr.OtherLen)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackTXT(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(TXT) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Txt, off, err = unpackStringTxt(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackUID(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(UID) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Uid, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackUINFO(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(UINFO) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Uinfo, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackURI(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(URI) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Priority, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Weight, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Target, off, err = unpackStringOctet(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackX25(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(X25) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.PSDNAddress, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +var typeToUnpack = map[uint16]func(RR_Header, []byte, int) (RR, int, error){ + TypeA: unpackA, + TypeAAAA: unpackAAAA, + TypeAFSDB: unpackAFSDB, + TypeANY: unpackANY, + TypeAVC: unpackAVC, + TypeCAA: unpackCAA, + TypeCDNSKEY: unpackCDNSKEY, + TypeCDS: unpackCDS, + TypeCERT: unpackCERT, + TypeCNAME: unpackCNAME, + TypeDHCID: unpackDHCID, + TypeDLV: unpackDLV, + TypeDNAME: unpackDNAME, + TypeDNSKEY: unpackDNSKEY, + TypeDS: unpackDS, + TypeEID: unpackEID, + TypeEUI48: unpackEUI48, + TypeEUI64: unpackEUI64, + TypeGID: unpackGID, + TypeGPOS: unpackGPOS, + TypeHINFO: unpackHINFO, + TypeHIP: unpackHIP, + TypeKEY: unpackKEY, + TypeKX: unpackKX, + TypeL32: unpackL32, + TypeL64: unpackL64, + TypeLOC: unpackLOC, + TypeLP: unpackLP, + TypeMB: unpackMB, + TypeMD: unpackMD, + TypeMF: unpackMF, + TypeMG: unpackMG, + TypeMINFO: unpackMINFO, + TypeMR: unpackMR, + TypeMX: unpackMX, + TypeNAPTR: unpackNAPTR, + TypeNID: unpackNID, + TypeNIMLOC: unpackNIMLOC, + TypeNINFO: unpackNINFO, + TypeNS: unpackNS, + TypeNSAPPTR: unpackNSAPPTR, + TypeNSEC: unpackNSEC, + TypeNSEC3: unpackNSEC3, + TypeNSEC3PARAM: unpackNSEC3PARAM, + TypeOPENPGPKEY: unpackOPENPGPKEY, + TypeOPT: unpackOPT, + TypePTR: unpackPTR, + TypePX: unpackPX, + TypeRKEY: unpackRKEY, + TypeRP: unpackRP, + TypeRRSIG: unpackRRSIG, + TypeRT: unpackRT, + TypeSIG: unpackSIG, + TypeSMIMEA: unpackSMIMEA, + TypeSOA: unpackSOA, + TypeSPF: unpackSPF, + TypeSRV: unpackSRV, + TypeSSHFP: unpackSSHFP, + TypeTA: unpackTA, + TypeTALINK: unpackTALINK, + TypeTKEY: unpackTKEY, + TypeTLSA: unpackTLSA, + TypeTSIG: unpackTSIG, + TypeTXT: unpackTXT, + TypeUID: unpackUID, + TypeUINFO: unpackUINFO, + TypeURI: unpackURI, + TypeX25: unpackX25, +} diff --git a/vendor/github.com/miekg/dns/ztypes.go b/vendor/github.com/miekg/dns/ztypes.go new file mode 100644 index 0000000..3e534f1 --- /dev/null +++ b/vendor/github.com/miekg/dns/ztypes.go @@ -0,0 +1,857 @@ +// *** DO NOT MODIFY *** +// AUTOGENERATED BY go generate from type_generate.go + +package dns + +import ( + "encoding/base64" + "net" +) + +// TypeToRR is a map of constructors for each RR type. +var TypeToRR = map[uint16]func() RR{ + TypeA: func() RR { return new(A) }, + TypeAAAA: func() RR { return new(AAAA) }, + TypeAFSDB: func() RR { return new(AFSDB) }, + TypeANY: func() RR { return new(ANY) }, + TypeAVC: func() RR { return new(AVC) }, + TypeCAA: func() RR { return new(CAA) }, + TypeCDNSKEY: func() RR { return new(CDNSKEY) }, + TypeCDS: func() RR { return new(CDS) }, + TypeCERT: func() RR { return new(CERT) }, + TypeCNAME: func() RR { return new(CNAME) }, + TypeDHCID: func() RR { return new(DHCID) }, + TypeDLV: func() RR { return new(DLV) }, + TypeDNAME: func() RR { return new(DNAME) }, + TypeDNSKEY: func() RR { return new(DNSKEY) }, + TypeDS: func() RR { return new(DS) }, + TypeEID: func() RR { return new(EID) }, + TypeEUI48: func() RR { return new(EUI48) }, + TypeEUI64: func() RR { return new(EUI64) }, + TypeGID: func() RR { return new(GID) }, + TypeGPOS: func() RR { return new(GPOS) }, + TypeHINFO: func() RR { return new(HINFO) }, + TypeHIP: func() RR { return new(HIP) }, + TypeKEY: func() RR { return new(KEY) }, + TypeKX: func() RR { return new(KX) }, + TypeL32: func() RR { return new(L32) }, + TypeL64: func() RR { return new(L64) }, + TypeLOC: func() RR { return new(LOC) }, + TypeLP: func() RR { return new(LP) }, + TypeMB: func() RR { return new(MB) }, + TypeMD: func() RR { return new(MD) }, + TypeMF: func() RR { return new(MF) }, + TypeMG: func() RR { return new(MG) }, + TypeMINFO: func() RR { return new(MINFO) }, + TypeMR: func() RR { return new(MR) }, + TypeMX: func() RR { return new(MX) }, + TypeNAPTR: func() RR { return new(NAPTR) }, + TypeNID: func() RR { return new(NID) }, + TypeNIMLOC: func() RR { return new(NIMLOC) }, + TypeNINFO: func() RR { return new(NINFO) }, + TypeNS: func() RR { return new(NS) }, + TypeNSAPPTR: func() RR { return new(NSAPPTR) }, + TypeNSEC: func() RR { return new(NSEC) }, + TypeNSEC3: func() RR { return new(NSEC3) }, + TypeNSEC3PARAM: func() RR { return new(NSEC3PARAM) }, + TypeOPENPGPKEY: func() RR { return new(OPENPGPKEY) }, + TypeOPT: func() RR { return new(OPT) }, + TypePTR: func() RR { return new(PTR) }, + TypePX: func() RR { return new(PX) }, + TypeRKEY: func() RR { return new(RKEY) }, + TypeRP: func() RR { return new(RP) }, + TypeRRSIG: func() RR { return new(RRSIG) }, + TypeRT: func() RR { return new(RT) }, + TypeSIG: func() RR { return new(SIG) }, + TypeSMIMEA: func() RR { return new(SMIMEA) }, + TypeSOA: func() RR { return new(SOA) }, + TypeSPF: func() RR { return new(SPF) }, + TypeSRV: func() RR { return new(SRV) }, + TypeSSHFP: func() RR { return new(SSHFP) }, + TypeTA: func() RR { return new(TA) }, + TypeTALINK: func() RR { return new(TALINK) }, + TypeTKEY: func() RR { return new(TKEY) }, + TypeTLSA: func() RR { return new(TLSA) }, + TypeTSIG: func() RR { return new(TSIG) }, + TypeTXT: func() RR { return new(TXT) }, + TypeUID: func() RR { return new(UID) }, + TypeUINFO: func() RR { return new(UINFO) }, + TypeURI: func() RR { return new(URI) }, + TypeX25: func() RR { return new(X25) }, +} + +// TypeToString is a map of strings for each RR type. +var TypeToString = map[uint16]string{ + TypeA: "A", + TypeAAAA: "AAAA", + TypeAFSDB: "AFSDB", + TypeANY: "ANY", + TypeATMA: "ATMA", + TypeAVC: "AVC", + TypeAXFR: "AXFR", + TypeCAA: "CAA", + TypeCDNSKEY: "CDNSKEY", + TypeCDS: "CDS", + TypeCERT: "CERT", + TypeCNAME: "CNAME", + TypeDHCID: "DHCID", + TypeDLV: "DLV", + TypeDNAME: "DNAME", + TypeDNSKEY: "DNSKEY", + TypeDS: "DS", + TypeEID: "EID", + TypeEUI48: "EUI48", + TypeEUI64: "EUI64", + TypeGID: "GID", + TypeGPOS: "GPOS", + TypeHINFO: "HINFO", + TypeHIP: "HIP", + TypeISDN: "ISDN", + TypeIXFR: "IXFR", + TypeKEY: "KEY", + TypeKX: "KX", + TypeL32: "L32", + TypeL64: "L64", + TypeLOC: "LOC", + TypeLP: "LP", + TypeMAILA: "MAILA", + TypeMAILB: "MAILB", + TypeMB: "MB", + TypeMD: "MD", + TypeMF: "MF", + TypeMG: "MG", + TypeMINFO: "MINFO", + TypeMR: "MR", + TypeMX: "MX", + TypeNAPTR: "NAPTR", + TypeNID: "NID", + TypeNIMLOC: "NIMLOC", + TypeNINFO: "NINFO", + TypeNS: "NS", + TypeNSEC: "NSEC", + TypeNSEC3: "NSEC3", + TypeNSEC3PARAM: "NSEC3PARAM", + TypeNULL: "NULL", + TypeNXT: "NXT", + TypeNone: "None", + TypeOPENPGPKEY: "OPENPGPKEY", + TypeOPT: "OPT", + TypePTR: "PTR", + TypePX: "PX", + TypeRKEY: "RKEY", + TypeRP: "RP", + TypeRRSIG: "RRSIG", + TypeRT: "RT", + TypeReserved: "Reserved", + TypeSIG: "SIG", + TypeSMIMEA: "SMIMEA", + TypeSOA: "SOA", + TypeSPF: "SPF", + TypeSRV: "SRV", + TypeSSHFP: "SSHFP", + TypeTA: "TA", + TypeTALINK: "TALINK", + TypeTKEY: "TKEY", + TypeTLSA: "TLSA", + TypeTSIG: "TSIG", + TypeTXT: "TXT", + TypeUID: "UID", + TypeUINFO: "UINFO", + TypeUNSPEC: "UNSPEC", + TypeURI: "URI", + TypeX25: "X25", + TypeNSAPPTR: "NSAP-PTR", +} + +// Header() functions +func (rr *A) Header() *RR_Header { return &rr.Hdr } +func (rr *AAAA) Header() *RR_Header { return &rr.Hdr } +func (rr *AFSDB) Header() *RR_Header { return &rr.Hdr } +func (rr *ANY) Header() *RR_Header { return &rr.Hdr } +func (rr *AVC) Header() *RR_Header { return &rr.Hdr } +func (rr *CAA) Header() *RR_Header { return &rr.Hdr } +func (rr *CDNSKEY) Header() *RR_Header { return &rr.Hdr } +func (rr *CDS) Header() *RR_Header { return &rr.Hdr } +func (rr *CERT) Header() *RR_Header { return &rr.Hdr } +func (rr *CNAME) Header() *RR_Header { return &rr.Hdr } +func (rr *DHCID) Header() *RR_Header { return &rr.Hdr } +func (rr *DLV) Header() *RR_Header { return &rr.Hdr } +func (rr *DNAME) Header() *RR_Header { return &rr.Hdr } +func (rr *DNSKEY) Header() *RR_Header { return &rr.Hdr } +func (rr *DS) Header() *RR_Header { return &rr.Hdr } +func (rr *EID) Header() *RR_Header { return &rr.Hdr } +func (rr *EUI48) Header() *RR_Header { return &rr.Hdr } +func (rr *EUI64) Header() *RR_Header { return &rr.Hdr } +func (rr *GID) Header() *RR_Header { return &rr.Hdr } +func (rr *GPOS) Header() *RR_Header { return &rr.Hdr } +func (rr *HINFO) Header() *RR_Header { return &rr.Hdr } +func (rr *HIP) Header() *RR_Header { return &rr.Hdr } +func (rr *KEY) Header() *RR_Header { return &rr.Hdr } +func (rr *KX) Header() *RR_Header { return &rr.Hdr } +func (rr *L32) Header() *RR_Header { return &rr.Hdr } +func (rr *L64) Header() *RR_Header { return &rr.Hdr } +func (rr *LOC) Header() *RR_Header { return &rr.Hdr } +func (rr *LP) Header() *RR_Header { return &rr.Hdr } +func (rr *MB) Header() *RR_Header { return &rr.Hdr } +func (rr *MD) Header() *RR_Header { return &rr.Hdr } +func (rr *MF) Header() *RR_Header { return &rr.Hdr } +func (rr *MG) Header() *RR_Header { return &rr.Hdr } +func (rr *MINFO) Header() *RR_Header { return &rr.Hdr } +func (rr *MR) Header() *RR_Header { return &rr.Hdr } +func (rr *MX) Header() *RR_Header { return &rr.Hdr } +func (rr *NAPTR) Header() *RR_Header { return &rr.Hdr } +func (rr *NID) Header() *RR_Header { return &rr.Hdr } +func (rr *NIMLOC) Header() *RR_Header { return &rr.Hdr } +func (rr *NINFO) Header() *RR_Header { return &rr.Hdr } +func (rr *NS) Header() *RR_Header { return &rr.Hdr } +func (rr *NSAPPTR) Header() *RR_Header { return &rr.Hdr } +func (rr *NSEC) Header() *RR_Header { return &rr.Hdr } +func (rr *NSEC3) Header() *RR_Header { return &rr.Hdr } +func (rr *NSEC3PARAM) Header() *RR_Header { return &rr.Hdr } +func (rr *OPENPGPKEY) Header() *RR_Header { return &rr.Hdr } +func (rr *OPT) Header() *RR_Header { return &rr.Hdr } +func (rr *PTR) Header() *RR_Header { return &rr.Hdr } +func (rr *PX) Header() *RR_Header { return &rr.Hdr } +func (rr *RFC3597) Header() *RR_Header { return &rr.Hdr } +func (rr *RKEY) Header() *RR_Header { return &rr.Hdr } +func (rr *RP) Header() *RR_Header { return &rr.Hdr } +func (rr *RRSIG) Header() *RR_Header { return &rr.Hdr } +func (rr *RT) Header() *RR_Header { return &rr.Hdr } +func (rr *SIG) Header() *RR_Header { return &rr.Hdr } +func (rr *SMIMEA) Header() *RR_Header { return &rr.Hdr } +func (rr *SOA) Header() *RR_Header { return &rr.Hdr } +func (rr *SPF) Header() *RR_Header { return &rr.Hdr } +func (rr *SRV) Header() *RR_Header { return &rr.Hdr } +func (rr *SSHFP) Header() *RR_Header { return &rr.Hdr } +func (rr *TA) Header() *RR_Header { return &rr.Hdr } +func (rr *TALINK) Header() *RR_Header { return &rr.Hdr } +func (rr *TKEY) Header() *RR_Header { return &rr.Hdr } +func (rr *TLSA) Header() *RR_Header { return &rr.Hdr } +func (rr *TSIG) Header() *RR_Header { return &rr.Hdr } +func (rr *TXT) Header() *RR_Header { return &rr.Hdr } +func (rr *UID) Header() *RR_Header { return &rr.Hdr } +func (rr *UINFO) Header() *RR_Header { return &rr.Hdr } +func (rr *URI) Header() *RR_Header { return &rr.Hdr } +func (rr *X25) Header() *RR_Header { return &rr.Hdr } + +// len() functions +func (rr *A) len() int { + l := rr.Hdr.len() + l += net.IPv4len // A + return l +} +func (rr *AAAA) len() int { + l := rr.Hdr.len() + l += net.IPv6len // AAAA + return l +} +func (rr *AFSDB) len() int { + l := rr.Hdr.len() + l += 2 // Subtype + l += len(rr.Hostname) + 1 + return l +} +func (rr *ANY) len() int { + l := rr.Hdr.len() + return l +} +func (rr *AVC) len() int { + l := rr.Hdr.len() + for _, x := range rr.Txt { + l += len(x) + 1 + } + return l +} +func (rr *CAA) len() int { + l := rr.Hdr.len() + l++ // Flag + l += len(rr.Tag) + 1 + l += len(rr.Value) + return l +} +func (rr *CERT) len() int { + l := rr.Hdr.len() + l += 2 // Type + l += 2 // KeyTag + l++ // Algorithm + l += base64.StdEncoding.DecodedLen(len(rr.Certificate)) + return l +} +func (rr *CNAME) len() int { + l := rr.Hdr.len() + l += len(rr.Target) + 1 + return l +} +func (rr *DHCID) len() int { + l := rr.Hdr.len() + l += base64.StdEncoding.DecodedLen(len(rr.Digest)) + return l +} +func (rr *DNAME) len() int { + l := rr.Hdr.len() + l += len(rr.Target) + 1 + return l +} +func (rr *DNSKEY) len() int { + l := rr.Hdr.len() + l += 2 // Flags + l++ // Protocol + l++ // Algorithm + l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) + return l +} +func (rr *DS) len() int { + l := rr.Hdr.len() + l += 2 // KeyTag + l++ // Algorithm + l++ // DigestType + l += len(rr.Digest)/2 + 1 + return l +} +func (rr *EID) len() int { + l := rr.Hdr.len() + l += len(rr.Endpoint)/2 + 1 + return l +} +func (rr *EUI48) len() int { + l := rr.Hdr.len() + l += 6 // Address + return l +} +func (rr *EUI64) len() int { + l := rr.Hdr.len() + l += 8 // Address + return l +} +func (rr *GID) len() int { + l := rr.Hdr.len() + l += 4 // Gid + return l +} +func (rr *GPOS) len() int { + l := rr.Hdr.len() + l += len(rr.Longitude) + 1 + l += len(rr.Latitude) + 1 + l += len(rr.Altitude) + 1 + return l +} +func (rr *HINFO) len() int { + l := rr.Hdr.len() + l += len(rr.Cpu) + 1 + l += len(rr.Os) + 1 + return l +} +func (rr *HIP) len() int { + l := rr.Hdr.len() + l++ // HitLength + l++ // PublicKeyAlgorithm + l += 2 // PublicKeyLength + l += len(rr.Hit)/2 + 1 + l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) + for _, x := range rr.RendezvousServers { + l += len(x) + 1 + } + return l +} +func (rr *KX) len() int { + l := rr.Hdr.len() + l += 2 // Preference + l += len(rr.Exchanger) + 1 + return l +} +func (rr *L32) len() int { + l := rr.Hdr.len() + l += 2 // Preference + l += net.IPv4len // Locator32 + return l +} +func (rr *L64) len() int { + l := rr.Hdr.len() + l += 2 // Preference + l += 8 // Locator64 + return l +} +func (rr *LOC) len() int { + l := rr.Hdr.len() + l++ // Version + l++ // Size + l++ // HorizPre + l++ // VertPre + l += 4 // Latitude + l += 4 // Longitude + l += 4 // Altitude + return l +} +func (rr *LP) len() int { + l := rr.Hdr.len() + l += 2 // Preference + l += len(rr.Fqdn) + 1 + return l +} +func (rr *MB) len() int { + l := rr.Hdr.len() + l += len(rr.Mb) + 1 + return l +} +func (rr *MD) len() int { + l := rr.Hdr.len() + l += len(rr.Md) + 1 + return l +} +func (rr *MF) len() int { + l := rr.Hdr.len() + l += len(rr.Mf) + 1 + return l +} +func (rr *MG) len() int { + l := rr.Hdr.len() + l += len(rr.Mg) + 1 + return l +} +func (rr *MINFO) len() int { + l := rr.Hdr.len() + l += len(rr.Rmail) + 1 + l += len(rr.Email) + 1 + return l +} +func (rr *MR) len() int { + l := rr.Hdr.len() + l += len(rr.Mr) + 1 + return l +} +func (rr *MX) len() int { + l := rr.Hdr.len() + l += 2 // Preference + l += len(rr.Mx) + 1 + return l +} +func (rr *NAPTR) len() int { + l := rr.Hdr.len() + l += 2 // Order + l += 2 // Preference + l += len(rr.Flags) + 1 + l += len(rr.Service) + 1 + l += len(rr.Regexp) + 1 + l += len(rr.Replacement) + 1 + return l +} +func (rr *NID) len() int { + l := rr.Hdr.len() + l += 2 // Preference + l += 8 // NodeID + return l +} +func (rr *NIMLOC) len() int { + l := rr.Hdr.len() + l += len(rr.Locator)/2 + 1 + return l +} +func (rr *NINFO) len() int { + l := rr.Hdr.len() + for _, x := range rr.ZSData { + l += len(x) + 1 + } + return l +} +func (rr *NS) len() int { + l := rr.Hdr.len() + l += len(rr.Ns) + 1 + return l +} +func (rr *NSAPPTR) len() int { + l := rr.Hdr.len() + l += len(rr.Ptr) + 1 + return l +} +func (rr *NSEC3PARAM) len() int { + l := rr.Hdr.len() + l++ // Hash + l++ // Flags + l += 2 // Iterations + l++ // SaltLength + l += len(rr.Salt)/2 + 1 + return l +} +func (rr *OPENPGPKEY) len() int { + l := rr.Hdr.len() + l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) + return l +} +func (rr *PTR) len() int { + l := rr.Hdr.len() + l += len(rr.Ptr) + 1 + return l +} +func (rr *PX) len() int { + l := rr.Hdr.len() + l += 2 // Preference + l += len(rr.Map822) + 1 + l += len(rr.Mapx400) + 1 + return l +} +func (rr *RFC3597) len() int { + l := rr.Hdr.len() + l += len(rr.Rdata)/2 + 1 + return l +} +func (rr *RKEY) len() int { + l := rr.Hdr.len() + l += 2 // Flags + l++ // Protocol + l++ // Algorithm + l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) + return l +} +func (rr *RP) len() int { + l := rr.Hdr.len() + l += len(rr.Mbox) + 1 + l += len(rr.Txt) + 1 + return l +} +func (rr *RRSIG) len() int { + l := rr.Hdr.len() + l += 2 // TypeCovered + l++ // Algorithm + l++ // Labels + l += 4 // OrigTtl + l += 4 // Expiration + l += 4 // Inception + l += 2 // KeyTag + l += len(rr.SignerName) + 1 + l += base64.StdEncoding.DecodedLen(len(rr.Signature)) + return l +} +func (rr *RT) len() int { + l := rr.Hdr.len() + l += 2 // Preference + l += len(rr.Host) + 1 + return l +} +func (rr *SMIMEA) len() int { + l := rr.Hdr.len() + l++ // Usage + l++ // Selector + l++ // MatchingType + l += len(rr.Certificate)/2 + 1 + return l +} +func (rr *SOA) len() int { + l := rr.Hdr.len() + l += len(rr.Ns) + 1 + l += len(rr.Mbox) + 1 + l += 4 // Serial + l += 4 // Refresh + l += 4 // Retry + l += 4 // Expire + l += 4 // Minttl + return l +} +func (rr *SPF) len() int { + l := rr.Hdr.len() + for _, x := range rr.Txt { + l += len(x) + 1 + } + return l +} +func (rr *SRV) len() int { + l := rr.Hdr.len() + l += 2 // Priority + l += 2 // Weight + l += 2 // Port + l += len(rr.Target) + 1 + return l +} +func (rr *SSHFP) len() int { + l := rr.Hdr.len() + l++ // Algorithm + l++ // Type + l += len(rr.FingerPrint)/2 + 1 + return l +} +func (rr *TA) len() int { + l := rr.Hdr.len() + l += 2 // KeyTag + l++ // Algorithm + l++ // DigestType + l += len(rr.Digest)/2 + 1 + return l +} +func (rr *TALINK) len() int { + l := rr.Hdr.len() + l += len(rr.PreviousName) + 1 + l += len(rr.NextName) + 1 + return l +} +func (rr *TKEY) len() int { + l := rr.Hdr.len() + l += len(rr.Algorithm) + 1 + l += 4 // Inception + l += 4 // Expiration + l += 2 // Mode + l += 2 // Error + l += 2 // KeySize + l += len(rr.Key) + 1 + l += 2 // OtherLen + l += len(rr.OtherData) + 1 + return l +} +func (rr *TLSA) len() int { + l := rr.Hdr.len() + l++ // Usage + l++ // Selector + l++ // MatchingType + l += len(rr.Certificate)/2 + 1 + return l +} +func (rr *TSIG) len() int { + l := rr.Hdr.len() + l += len(rr.Algorithm) + 1 + l += 6 // TimeSigned + l += 2 // Fudge + l += 2 // MACSize + l += len(rr.MAC)/2 + 1 + l += 2 // OrigId + l += 2 // Error + l += 2 // OtherLen + l += len(rr.OtherData)/2 + 1 + return l +} +func (rr *TXT) len() int { + l := rr.Hdr.len() + for _, x := range rr.Txt { + l += len(x) + 1 + } + return l +} +func (rr *UID) len() int { + l := rr.Hdr.len() + l += 4 // Uid + return l +} +func (rr *UINFO) len() int { + l := rr.Hdr.len() + l += len(rr.Uinfo) + 1 + return l +} +func (rr *URI) len() int { + l := rr.Hdr.len() + l += 2 // Priority + l += 2 // Weight + l += len(rr.Target) + return l +} +func (rr *X25) len() int { + l := rr.Hdr.len() + l += len(rr.PSDNAddress) + 1 + return l +} + +// copy() functions +func (rr *A) copy() RR { + return &A{*rr.Hdr.copyHeader(), copyIP(rr.A)} +} +func (rr *AAAA) copy() RR { + return &AAAA{*rr.Hdr.copyHeader(), copyIP(rr.AAAA)} +} +func (rr *AFSDB) copy() RR { + return &AFSDB{*rr.Hdr.copyHeader(), rr.Subtype, rr.Hostname} +} +func (rr *ANY) copy() RR { + return &ANY{*rr.Hdr.copyHeader()} +} +func (rr *AVC) copy() RR { + Txt := make([]string, len(rr.Txt)) + copy(Txt, rr.Txt) + return &AVC{*rr.Hdr.copyHeader(), Txt} +} +func (rr *CAA) copy() RR { + return &CAA{*rr.Hdr.copyHeader(), rr.Flag, rr.Tag, rr.Value} +} +func (rr *CERT) copy() RR { + return &CERT{*rr.Hdr.copyHeader(), rr.Type, rr.KeyTag, rr.Algorithm, rr.Certificate} +} +func (rr *CNAME) copy() RR { + return &CNAME{*rr.Hdr.copyHeader(), rr.Target} +} +func (rr *DHCID) copy() RR { + return &DHCID{*rr.Hdr.copyHeader(), rr.Digest} +} +func (rr *DNAME) copy() RR { + return &DNAME{*rr.Hdr.copyHeader(), rr.Target} +} +func (rr *DNSKEY) copy() RR { + return &DNSKEY{*rr.Hdr.copyHeader(), rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey} +} +func (rr *DS) copy() RR { + return &DS{*rr.Hdr.copyHeader(), rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest} +} +func (rr *EID) copy() RR { + return &EID{*rr.Hdr.copyHeader(), rr.Endpoint} +} +func (rr *EUI48) copy() RR { + return &EUI48{*rr.Hdr.copyHeader(), rr.Address} +} +func (rr *EUI64) copy() RR { + return &EUI64{*rr.Hdr.copyHeader(), rr.Address} +} +func (rr *GID) copy() RR { + return &GID{*rr.Hdr.copyHeader(), rr.Gid} +} +func (rr *GPOS) copy() RR { + return &GPOS{*rr.Hdr.copyHeader(), rr.Longitude, rr.Latitude, rr.Altitude} +} +func (rr *HINFO) copy() RR { + return &HINFO{*rr.Hdr.copyHeader(), rr.Cpu, rr.Os} +} +func (rr *HIP) copy() RR { + RendezvousServers := make([]string, len(rr.RendezvousServers)) + copy(RendezvousServers, rr.RendezvousServers) + return &HIP{*rr.Hdr.copyHeader(), rr.HitLength, rr.PublicKeyAlgorithm, rr.PublicKeyLength, rr.Hit, rr.PublicKey, RendezvousServers} +} +func (rr *KX) copy() RR { + return &KX{*rr.Hdr.copyHeader(), rr.Preference, rr.Exchanger} +} +func (rr *L32) copy() RR { + return &L32{*rr.Hdr.copyHeader(), rr.Preference, copyIP(rr.Locator32)} +} +func (rr *L64) copy() RR { + return &L64{*rr.Hdr.copyHeader(), rr.Preference, rr.Locator64} +} +func (rr *LOC) copy() RR { + return &LOC{*rr.Hdr.copyHeader(), rr.Version, rr.Size, rr.HorizPre, rr.VertPre, rr.Latitude, rr.Longitude, rr.Altitude} +} +func (rr *LP) copy() RR { + return &LP{*rr.Hdr.copyHeader(), rr.Preference, rr.Fqdn} +} +func (rr *MB) copy() RR { + return &MB{*rr.Hdr.copyHeader(), rr.Mb} +} +func (rr *MD) copy() RR { + return &MD{*rr.Hdr.copyHeader(), rr.Md} +} +func (rr *MF) copy() RR { + return &MF{*rr.Hdr.copyHeader(), rr.Mf} +} +func (rr *MG) copy() RR { + return &MG{*rr.Hdr.copyHeader(), rr.Mg} +} +func (rr *MINFO) copy() RR { + return &MINFO{*rr.Hdr.copyHeader(), rr.Rmail, rr.Email} +} +func (rr *MR) copy() RR { + return &MR{*rr.Hdr.copyHeader(), rr.Mr} +} +func (rr *MX) copy() RR { + return &MX{*rr.Hdr.copyHeader(), rr.Preference, rr.Mx} +} +func (rr *NAPTR) copy() RR { + return &NAPTR{*rr.Hdr.copyHeader(), rr.Order, rr.Preference, rr.Flags, rr.Service, rr.Regexp, rr.Replacement} +} +func (rr *NID) copy() RR { + return &NID{*rr.Hdr.copyHeader(), rr.Preference, rr.NodeID} +} +func (rr *NIMLOC) copy() RR { + return &NIMLOC{*rr.Hdr.copyHeader(), rr.Locator} +} +func (rr *NINFO) copy() RR { + ZSData := make([]string, len(rr.ZSData)) + copy(ZSData, rr.ZSData) + return &NINFO{*rr.Hdr.copyHeader(), ZSData} +} +func (rr *NS) copy() RR { + return &NS{*rr.Hdr.copyHeader(), rr.Ns} +} +func (rr *NSAPPTR) copy() RR { + return &NSAPPTR{*rr.Hdr.copyHeader(), rr.Ptr} +} +func (rr *NSEC) copy() RR { + TypeBitMap := make([]uint16, len(rr.TypeBitMap)) + copy(TypeBitMap, rr.TypeBitMap) + return &NSEC{*rr.Hdr.copyHeader(), rr.NextDomain, TypeBitMap} +} +func (rr *NSEC3) copy() RR { + TypeBitMap := make([]uint16, len(rr.TypeBitMap)) + copy(TypeBitMap, rr.TypeBitMap) + return &NSEC3{*rr.Hdr.copyHeader(), rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt, rr.HashLength, rr.NextDomain, TypeBitMap} +} +func (rr *NSEC3PARAM) copy() RR { + return &NSEC3PARAM{*rr.Hdr.copyHeader(), rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt} +} +func (rr *OPENPGPKEY) copy() RR { + return &OPENPGPKEY{*rr.Hdr.copyHeader(), rr.PublicKey} +} +func (rr *OPT) copy() RR { + Option := make([]EDNS0, len(rr.Option)) + copy(Option, rr.Option) + return &OPT{*rr.Hdr.copyHeader(), Option} +} +func (rr *PTR) copy() RR { + return &PTR{*rr.Hdr.copyHeader(), rr.Ptr} +} +func (rr *PX) copy() RR { + return &PX{*rr.Hdr.copyHeader(), rr.Preference, rr.Map822, rr.Mapx400} +} +func (rr *RFC3597) copy() RR { + return &RFC3597{*rr.Hdr.copyHeader(), rr.Rdata} +} +func (rr *RKEY) copy() RR { + return &RKEY{*rr.Hdr.copyHeader(), rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey} +} +func (rr *RP) copy() RR { + return &RP{*rr.Hdr.copyHeader(), rr.Mbox, rr.Txt} +} +func (rr *RRSIG) copy() RR { + return &RRSIG{*rr.Hdr.copyHeader(), rr.TypeCovered, rr.Algorithm, rr.Labels, rr.OrigTtl, rr.Expiration, rr.Inception, rr.KeyTag, rr.SignerName, rr.Signature} +} +func (rr *RT) copy() RR { + return &RT{*rr.Hdr.copyHeader(), rr.Preference, rr.Host} +} +func (rr *SMIMEA) copy() RR { + return &SMIMEA{*rr.Hdr.copyHeader(), rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate} +} +func (rr *SOA) copy() RR { + return &SOA{*rr.Hdr.copyHeader(), rr.Ns, rr.Mbox, rr.Serial, rr.Refresh, rr.Retry, rr.Expire, rr.Minttl} +} +func (rr *SPF) copy() RR { + Txt := make([]string, len(rr.Txt)) + copy(Txt, rr.Txt) + return &SPF{*rr.Hdr.copyHeader(), Txt} +} +func (rr *SRV) copy() RR { + return &SRV{*rr.Hdr.copyHeader(), rr.Priority, rr.Weight, rr.Port, rr.Target} +} +func (rr *SSHFP) copy() RR { + return &SSHFP{*rr.Hdr.copyHeader(), rr.Algorithm, rr.Type, rr.FingerPrint} +} +func (rr *TA) copy() RR { + return &TA{*rr.Hdr.copyHeader(), rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest} +} +func (rr *TALINK) copy() RR { + return &TALINK{*rr.Hdr.copyHeader(), rr.PreviousName, rr.NextName} +} +func (rr *TKEY) copy() RR { + return &TKEY{*rr.Hdr.copyHeader(), rr.Algorithm, rr.Inception, rr.Expiration, rr.Mode, rr.Error, rr.KeySize, rr.Key, rr.OtherLen, rr.OtherData} +} +func (rr *TLSA) copy() RR { + return &TLSA{*rr.Hdr.copyHeader(), rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate} +} +func (rr *TSIG) copy() RR { + return &TSIG{*rr.Hdr.copyHeader(), rr.Algorithm, rr.TimeSigned, rr.Fudge, rr.MACSize, rr.MAC, rr.OrigId, rr.Error, rr.OtherLen, rr.OtherData} +} +func (rr *TXT) copy() RR { + Txt := make([]string, len(rr.Txt)) + copy(Txt, rr.Txt) + return &TXT{*rr.Hdr.copyHeader(), Txt} +} +func (rr *UID) copy() RR { + return &UID{*rr.Hdr.copyHeader(), rr.Uid} +} +func (rr *UINFO) copy() RR { + return &UINFO{*rr.Hdr.copyHeader(), rr.Uinfo} +} +func (rr *URI) copy() RR { + return &URI{*rr.Hdr.copyHeader(), rr.Priority, rr.Weight, rr.Target} +} +func (rr *X25) copy() RR { + return &X25{*rr.Hdr.copyHeader(), rr.PSDNAddress} +} |
