aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFelix Hanley <felix@userspace.com.au>2018-03-13 10:06:29 +0000
committerFelix Hanley <felix@userspace.com.au>2018-03-13 10:07:45 +0000
commita67075e332458002ce4c56c0aa07e03807dc22ea (patch)
tree30abaff23afac39d9c1ed2a33387289796531641
parentb0cfa1d27cc19f0712e579f5f9357d276b11be6b (diff)
downloaddhtsearch-a67075e332458002ce4c56c0aa07e03807dc22ea.tar.gz
dhtsearch-a67075e332458002ce4c56c0aa07e03807dc22ea.tar.bz2
Add node blacklist to DHT
-rw-r--r--Gopkg.lock23
-rw-r--r--Gopkg.toml8
-rw-r--r--dht/node.go19
-rw-r--r--dht/options.go9
-rw-r--r--tag.go108
-rw-r--r--vendor/github.com/hashicorp/golang-lru/.gitignore (renamed from vendor/github.com/jmoiron/sqlx/.gitignore)3
-rw-r--r--vendor/github.com/hashicorp/golang-lru/2q.go223
-rw-r--r--vendor/github.com/hashicorp/golang-lru/LICENSE362
-rw-r--r--vendor/github.com/hashicorp/golang-lru/README.md25
-rw-r--r--vendor/github.com/hashicorp/golang-lru/arc.go257
-rw-r--r--vendor/github.com/hashicorp/golang-lru/doc.go21
-rw-r--r--vendor/github.com/hashicorp/golang-lru/lru.go110
-rw-r--r--vendor/github.com/hashicorp/golang-lru/simplelru/lru.go161
-rw-r--r--vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go37
-rw-r--r--vendor/github.com/jackc/pgx/stdlib/sql.go609
-rw-r--r--vendor/github.com/jmoiron/sqlx/LICENSE23
-rw-r--r--vendor/github.com/jmoiron/sqlx/README.md185
-rw-r--r--vendor/github.com/jmoiron/sqlx/bind.go207
-rw-r--r--vendor/github.com/jmoiron/sqlx/doc.go12
-rw-r--r--vendor/github.com/jmoiron/sqlx/named.go346
-rw-r--r--vendor/github.com/jmoiron/sqlx/named_context.go132
-rw-r--r--vendor/github.com/jmoiron/sqlx/reflectx/README.md17
-rw-r--r--vendor/github.com/jmoiron/sqlx/reflectx/reflect.go441
-rw-r--r--vendor/github.com/jmoiron/sqlx/sqlx.go1039
-rw-r--r--vendor/github.com/jmoiron/sqlx/sqlx_context.go335
25 files changed, 1237 insertions, 3475 deletions
diff --git a/Gopkg.lock b/Gopkg.lock
index b52fde8..e5997e1 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -8,6 +8,15 @@
version = "0.1.1"
[[projects]]
+ branch = "master"
+ name = "github.com/hashicorp/golang-lru"
+ packages = [
+ ".",
+ "simplelru"
+ ]
+ revision = "0fb14efe8c47ae851c0034ed7a448854d3d34cf3"
+
+[[projects]]
name = "github.com/jackc/pgx"
packages = [
".",
@@ -15,22 +24,12 @@
"internal/sanitize",
"pgio",
"pgproto3",
- "pgtype",
- "stdlib"
+ "pgtype"
]
revision = "da3231b0b66e2e74cdb779f1d46c5e958ba8be27"
version = "v3.1.0"
[[projects]]
- branch = "master"
- name = "github.com/jmoiron/sqlx"
- packages = [
- ".",
- "reflectx"
- ]
- revision = "05cef0741ade10ca668982355b3f3f0bcf0ff0a8"
-
-[[projects]]
name = "github.com/pkg/errors"
packages = ["."]
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
@@ -51,6 +50,6 @@
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
- inputs-digest = "3a93c20329f5c9baa71609787d58bcd42837e2dbe406f6686f0fd53585353484"
+ inputs-digest = "34c16759f6131880df1acf05556ccb8429da1b15ddfb5a6676b55d3a5beac5c8"
solver-name = "gps-cdcl"
solver-version = 1
diff --git a/Gopkg.toml b/Gopkg.toml
index 6555b6b..20b7533 100644
--- a/Gopkg.toml
+++ b/Gopkg.toml
@@ -35,12 +35,12 @@
[[constraint]]
branch = "master"
- name = "github.com/jmoiron/sqlx"
-
-[[constraint]]
- branch = "master"
name = "golang.org/x/time"
[prune]
go-tests = true
unused-packages = true
+
+[[constraint]]
+ branch = "master"
+ name = "github.com/hashicorp/golang-lru"
diff --git a/dht/node.go b/dht/node.go
index 423891b..829f244 100644
--- a/dht/node.go
+++ b/dht/node.go
@@ -10,6 +10,7 @@ import (
"github.com/felix/dhtsearch/krpc"
"github.com/felix/dhtsearch/models"
"github.com/felix/logger"
+ "github.com/hashicorp/golang-lru"
"golang.org/x/time/rate"
)
@@ -36,6 +37,7 @@ type Node struct {
packetsOut chan packet
log logger.Logger
limiter *rate.Limiter
+ blacklist *lru.ARCCache
// OnAnnoucePeer is called for each peer that announces itself
OnAnnouncePeer func(p models.Peer)
@@ -69,6 +71,13 @@ func NewNode(opts ...Option) (*Node, error) {
}
}
+ if n.blacklist == nil {
+ n.blacklist, err = lru.NewARC(1000)
+ if err != nil {
+ return nil, err
+ }
+ }
+
if n.family != "udp4" {
n.log.Debug("trying udp6 server")
n.conn, err = net.ListenPacket("udp6", fmt.Sprintf("[%s]:%d", net.IPv6zero.String(), n.port))
@@ -179,7 +188,7 @@ func (n *Node) packetWriter() {
//n.log.Debug("writing packet", "dest", p.raddr.String())
_, err := n.conn.WriteTo(p.data, p.raddr)
if err != nil {
- // TODO remove from routing or add to blacklist?
+ n.blacklist.Add(p.raddr.String(), true)
// TODO reduce limit
n.log.Warn("failed to write packet", "error", err)
}
@@ -235,6 +244,10 @@ func (n *Node) processPacket(p packet) error {
return err
}
+ if _, black := n.blacklist.Get(p.raddr.String()); black {
+ return fmt.Errorf("blacklisted", "address", p.raddr.String())
+ }
+
switch y {
case "q":
err = n.handleRequest(p.raddr, response)
@@ -243,11 +256,11 @@ func (n *Node) processPacket(p packet) error {
case "e":
err = n.handleError(p.raddr, response)
default:
- n.log.Warn("missing request type")
- return nil
+ err = fmt.Errorf("missing request type")
}
if err != nil {
n.log.Warn("failed to process packet", "error", err)
+ n.blacklist.Add(p.raddr.String(), true)
}
return err
}
diff --git a/dht/options.go b/dht/options.go
index 12a64a6..094d8f7 100644
--- a/dht/options.go
+++ b/dht/options.go
@@ -3,6 +3,7 @@ package dht
import (
"github.com/felix/dhtsearch/models"
"github.com/felix/logger"
+ "github.com/hashicorp/golang-lru"
)
type Option func(*Node) error
@@ -55,3 +56,11 @@ func SetLogger(l logger.Logger) Option {
return nil
}
}
+
+// SetBlacklistSize sets the size of the node blacklist
+func SetBlacklistSize(s int) Option {
+ return func(n *Node) (err error) {
+ n.blacklist, err = lru.NewARC(s)
+ return err
+ }
+}
diff --git a/tag.go b/tag.go
deleted file mode 100644
index 3c069ca..0000000
--- a/tag.go
+++ /dev/null
@@ -1,108 +0,0 @@
-package dhtsearch
-
-import (
- "fmt"
- "regexp"
- "strings"
- "unicode"
-)
-
-// Default tags, can be supplimented or overwritten by config
-var tags = map[string]string{
- "flac": `\.flac$`,
- "episode": "(season|episode|s[0-9]{2}e[0-9]{2})",
- "1080": "1080",
- "720": "720",
- "hd": "hd|720|1080",
- "bdrip": "bdrip",
- "adult": `(xxx|p(orn|ussy)|censor|sex|urbat|a(ss|nal)|o(rgy|gasm)|(fu|di|co)ck|esbian|milf|lust|gay)|rotic|18(\+|yr)`,
- "dvdrip": "dvdrip",
- "ebook": "epub",
- "application": `\.(apk|exe|msi|dmg)$`,
- "android": `\.apk$`,
- "apple": `\.dmg$`,
- "subtitles": `\.s(rt|ub)$`,
- "archive": `\.(zip|rar|p7|tgz|bz2)$`,
- "video": `\.(3g2|3gp|amv|asf|avi|drc|f4a|f4b|f4p|f4v|flv|gif|gifv|m2v|m4p|m4v|mkv|mng|mov|mp2|mp4|mpe|mpeg|mpg|mpv|mxf|net|nsv|ogv|qt|rm|rmvb|roq|svi|vob|webm|wmv|yuv)$`,
- "audio": `\.(aa|aac|aax|act|aiff|amr|ape|au|awb|dct|dss|dvf|flac|gsm|iklax|ivs|m4a|m4b|mmf|mp3|mpc|msv|ogg|opus|ra|raw|sln|tta|vox|wav|wma|wv)$`,
- "document": `\.(cbr|cbz|cb7|cbt|cba|epub|djvu|fb2|ibook|azw.|lit|prc|mobi|pdb|pdb|oxps|xps)$`,
- "font": `(font|\.(ttf|fon)$)`,
-}
-
-func mergeCharacterTagREs(tagREs map[string]*regexp.Regexp) error {
- // Add character classes
- for cc := range unicode.Scripts {
- if cc == "Latin" || cc == "Common" {
- continue
- }
- className := strings.ToLower(cc)
- // Test for 3 or more characters per character class
- tagREs[className], err = regexp.Compile(fmt.Sprintf(`(?i)\p{%s}{3,}`, cc))
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-func mergeTagRegexps(tagREs map[string]*regexp.Regexp, tags map[string]string) error {
- for tag, re := range tags {
- tagREs[tag], err = regexp.Compile("(?i)" + re)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-func createTag(tag string) (tagId int, err error) {
- err = DB.QueryRow(sqlSelectTag, tag).Scan(&tagId)
- if err == nil {
- if Config.Debug {
- fmt.Printf("Found existing tag %s\n", tag)
- }
- } else {
- err = DB.QueryRow(sqlInsertTag, tag).Scan(&tagId)
- if err != nil {
- fmt.Println(err)
- return -1, err
- }
- if Config.Debug {
- fmt.Printf("Created new tag %s\n", tag)
- }
- }
- return tagId, nil
-}
-
-func tagTorrent(t *Torrent) {
- ttags := make(map[string]bool)
-
- for tag, re := range tagREs {
- if re.MatchString(t.Name) {
- ttags[tag] = true
- }
- for _, f := range t.Files {
- if re.MatchString(f.Path) {
- ttags[tag] = true
- }
- }
- }
- // Make unique
- for tt := range ttags {
- t.Tags = append(t.Tags, tt)
- }
-}
-
-func hasTag(t Torrent, tag string) bool {
- for _, t := range t.Tags {
- if tag == t {
- return true
- }
- }
- return false
-}
-
-const (
- sqlSelectTag = `select id from tags where name = $1`
- sqlInsertTag = `insert into tags (name) values ($1) returning id`
-)
diff --git a/vendor/github.com/jmoiron/sqlx/.gitignore b/vendor/github.com/hashicorp/golang-lru/.gitignore
index 529841c..8365624 100644
--- a/vendor/github.com/jmoiron/sqlx/.gitignore
+++ b/vendor/github.com/hashicorp/golang-lru/.gitignore
@@ -20,5 +20,4 @@ _cgo_export.*
_testmain.go
*.exe
-tags
-environ
+*.test
diff --git a/vendor/github.com/hashicorp/golang-lru/2q.go b/vendor/github.com/hashicorp/golang-lru/2q.go
new file mode 100644
index 0000000..e474cd0
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/2q.go
@@ -0,0 +1,223 @@
+package lru
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/hashicorp/golang-lru/simplelru"
+)
+
+const (
+ // Default2QRecentRatio is the ratio of the 2Q cache dedicated
+ // to recently added entries that have only been accessed once.
+ Default2QRecentRatio = 0.25
+
+ // Default2QGhostEntries is the default ratio of ghost
+ // entries kept to track entries recently evicted
+ Default2QGhostEntries = 0.50
+)
+
+// TwoQueueCache is a thread-safe fixed size 2Q cache.
+// 2Q is an enhancement over the standard LRU cache
+// in that it tracks both frequently and recently used
+// entries separately. This avoids a burst in access to new
+// entries from evicting frequently used entries. It adds some
+// additional tracking overhead to the standard LRU cache, and is
+// computationally about 2x the cost, and adds some metadata over
+// head. The ARCCache is similar, but does not require setting any
+// parameters.
+type TwoQueueCache struct {
+ size int
+ recentSize int
+
+ recent simplelru.LRUCache
+ frequent simplelru.LRUCache
+ recentEvict simplelru.LRUCache
+ lock sync.RWMutex
+}
+
+// New2Q creates a new TwoQueueCache using the default
+// values for the parameters.
+func New2Q(size int) (*TwoQueueCache, error) {
+ return New2QParams(size, Default2QRecentRatio, Default2QGhostEntries)
+}
+
+// New2QParams creates a new TwoQueueCache using the provided
+// parameter values.
+func New2QParams(size int, recentRatio float64, ghostRatio float64) (*TwoQueueCache, error) {
+ if size <= 0 {
+ return nil, fmt.Errorf("invalid size")
+ }
+ if recentRatio < 0.0 || recentRatio > 1.0 {
+ return nil, fmt.Errorf("invalid recent ratio")
+ }
+ if ghostRatio < 0.0 || ghostRatio > 1.0 {
+ return nil, fmt.Errorf("invalid ghost ratio")
+ }
+
+ // Determine the sub-sizes
+ recentSize := int(float64(size) * recentRatio)
+ evictSize := int(float64(size) * ghostRatio)
+
+ // Allocate the LRUs
+ recent, err := simplelru.NewLRU(size, nil)
+ if err != nil {
+ return nil, err
+ }
+ frequent, err := simplelru.NewLRU(size, nil)
+ if err != nil {
+ return nil, err
+ }
+ recentEvict, err := simplelru.NewLRU(evictSize, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Initialize the cache
+ c := &TwoQueueCache{
+ size: size,
+ recentSize: recentSize,
+ recent: recent,
+ frequent: frequent,
+ recentEvict: recentEvict,
+ }
+ return c, nil
+}
+
+// Get looks up a key's value from the cache.
+func (c *TwoQueueCache) Get(key interface{}) (value interface{}, ok bool) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ // Check if this is a frequent value
+ if val, ok := c.frequent.Get(key); ok {
+ return val, ok
+ }
+
+ // If the value is contained in recent, then we
+ // promote it to frequent
+ if val, ok := c.recent.Peek(key); ok {
+ c.recent.Remove(key)
+ c.frequent.Add(key, val)
+ return val, ok
+ }
+
+ // No hit
+ return nil, false
+}
+
+// Add adds a value to the cache.
+func (c *TwoQueueCache) Add(key, value interface{}) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ // Check if the value is frequently used already,
+ // and just update the value
+ if c.frequent.Contains(key) {
+ c.frequent.Add(key, value)
+ return
+ }
+
+ // Check if the value is recently used, and promote
+ // the value into the frequent list
+ if c.recent.Contains(key) {
+ c.recent.Remove(key)
+ c.frequent.Add(key, value)
+ return
+ }
+
+ // If the value was recently evicted, add it to the
+ // frequently used list
+ if c.recentEvict.Contains(key) {
+ c.ensureSpace(true)
+ c.recentEvict.Remove(key)
+ c.frequent.Add(key, value)
+ return
+ }
+
+ // Add to the recently seen list
+ c.ensureSpace(false)
+ c.recent.Add(key, value)
+ return
+}
+
+// ensureSpace is used to ensure we have space in the cache
+func (c *TwoQueueCache) ensureSpace(recentEvict bool) {
+ // If we have space, nothing to do
+ recentLen := c.recent.Len()
+ freqLen := c.frequent.Len()
+ if recentLen+freqLen < c.size {
+ return
+ }
+
+ // If the recent buffer is larger than
+ // the target, evict from there
+ if recentLen > 0 && (recentLen > c.recentSize || (recentLen == c.recentSize && !recentEvict)) {
+ k, _, _ := c.recent.RemoveOldest()
+ c.recentEvict.Add(k, nil)
+ return
+ }
+
+ // Remove from the frequent list otherwise
+ c.frequent.RemoveOldest()
+}
+
+// Len returns the number of items in the cache.
+func (c *TwoQueueCache) Len() int {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ return c.recent.Len() + c.frequent.Len()
+}
+
+// Keys returns a slice of the keys in the cache.
+// The frequently used keys are first in the returned slice.
+func (c *TwoQueueCache) Keys() []interface{} {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ k1 := c.frequent.Keys()
+ k2 := c.recent.Keys()
+ return append(k1, k2...)
+}
+
+// Remove removes the provided key from the cache.
+func (c *TwoQueueCache) Remove(key interface{}) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ if c.frequent.Remove(key) {
+ return
+ }
+ if c.recent.Remove(key) {
+ return
+ }
+ if c.recentEvict.Remove(key) {
+ return
+ }
+}
+
+// Purge is used to completely clear the cache.
+func (c *TwoQueueCache) Purge() {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ c.recent.Purge()
+ c.frequent.Purge()
+ c.recentEvict.Purge()
+}
+
+// Contains is used to check if the cache contains a key
+// without updating recency or frequency.
+func (c *TwoQueueCache) Contains(key interface{}) bool {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ return c.frequent.Contains(key) || c.recent.Contains(key)
+}
+
+// Peek is used to inspect the cache value of a key
+// without updating recency or frequency.
+func (c *TwoQueueCache) Peek(key interface{}) (value interface{}, ok bool) {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ if val, ok := c.frequent.Peek(key); ok {
+ return val, ok
+ }
+ return c.recent.Peek(key)
+}
diff --git a/vendor/github.com/hashicorp/golang-lru/LICENSE b/vendor/github.com/hashicorp/golang-lru/LICENSE
new file mode 100644
index 0000000..be2cc4d
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/LICENSE
@@ -0,0 +1,362 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. "Contributor"
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the terms of
+ a Secondary License.
+
+1.6. "Executable Form"
+
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+
+ means a work that combines Covered Software with other material, in a
+ separate file or files, that is not Covered Software.
+
+1.8. "License"
+
+ means this document.
+
+1.9. "Licensable"
+
+ means having the right to grant, to the maximum extent possible, whether
+ at the time of the initial grant or subsequently, any and all of the
+ rights conveyed by this License.
+
+1.10. "Modifications"
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. "Patent Claims" of a Contributor
+
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the License,
+ by the making, using, selling, offering for sale, having made, import,
+ or transfer of either its Contributions or its Contributor Version.
+
+1.12. "Secondary License"
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. "Source Code Form"
+
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, "control" means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution
+ become effective for each Contribution on the date the Contributor first
+ distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under
+ this License. No additional rights or licenses will be implied from the
+ distribution or licensing of Covered Software under this License.
+ Notwithstanding Section 2.1(b) above, no patent license is granted by a
+ Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+ This License does not grant any rights in the trademarks, service marks,
+ or logos of any Contributor (except as may be necessary to comply with
+ the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this
+ License (see Section 10.2) or under the terms of a Secondary License (if
+ permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its
+ Contributions are its original creation(s) or it has sufficient rights to
+ grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under
+ applicable copyright doctrines of fair use, fair dealing, or other
+ equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under
+ the terms of this License. You must inform recipients that the Source
+ Code Form of the Covered Software is governed by the terms of this
+ License, and how they can obtain a copy of this License. You may not
+ attempt to alter or restrict the recipients' rights in the Source Code
+ Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter the
+ recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for
+ the Covered Software. If the Larger Work is a combination of Covered
+ Software with a work governed by one or more Secondary Licenses, and the
+ Covered Software is not Incompatible With Secondary Licenses, this
+ License permits You to additionally distribute such Covered Software
+ under the terms of such Secondary License(s), so that the recipient of
+ the Larger Work may, at their option, further distribute the Covered
+ Software under the terms of either this License or such Secondary
+ License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices
+ (including copyright notices, patent notices, disclaimers of warranty, or
+ limitations of liability) contained within the Source Code Form of the
+ Covered Software, except that You may alter any license notices to the
+ extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on
+ behalf of any Contributor. You must make it absolutely clear that any
+ such warranty, support, indemnity, or liability obligation is offered by
+ You alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute,
+ judicial order, or regulation then You must: (a) comply with the terms of
+ this License to the maximum extent possible; and (b) describe the
+ limitations and the code they affect. Such description must be placed in a
+ text file included with all distributions of the Covered Software under
+ this License. Except to the extent prohibited by statute or regulation,
+ such description must be sufficiently detailed for a recipient of ordinary
+ skill to be able to understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing
+ basis, if such Contributor fails to notify You of the non-compliance by
+ some reasonable means prior to 60 days after You have come back into
+ compliance. Moreover, Your grants from a particular Contributor are
+ reinstated on an ongoing basis if such Contributor notifies You of the
+ non-compliance by some reasonable means, this is the first time You have
+ received notice of non-compliance with this License from such
+ Contributor, and You become compliant prior to 30 days after Your receipt
+ of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions,
+ counter-claims, and cross-claims) alleging that a Contributor Version
+ directly or indirectly infringes any patent, then the rights granted to
+ You by any and all Contributors for the Covered Software under Section
+ 2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an "as is" basis,
+ without warranty of any kind, either expressed, implied, or statutory,
+ including, without limitation, warranties that the Covered Software is free
+ of defects, merchantable, fit for a particular purpose or non-infringing.
+ The entire risk as to the quality and performance of the Covered Software
+ is with You. Should any Covered Software prove defective in any respect,
+ You (not any Contributor) assume the cost of any necessary servicing,
+ repair, or correction. This disclaimer of warranty constitutes an essential
+ part of this License. No use of any Covered Software is authorized under
+ this License except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from
+ such party's negligence to the extent applicable law prohibits such
+ limitation. Some jurisdictions do not allow the exclusion or limitation of
+ incidental or consequential damages, so this exclusion and limitation may
+ not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts
+ of a jurisdiction where the defendant maintains its principal place of
+ business and such litigation shall be governed by laws of that
+ jurisdiction, without reference to its conflict-of-law provisions. Nothing
+ in this Section shall prevent a party's ability to bring cross-claims or
+ counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject
+ matter hereof. If any provision of this License is held to be
+ unenforceable, such provision shall be reformed only to the extent
+ necessary to make it enforceable. Any law or regulation which provides that
+ the language of a contract shall be construed against the drafter shall not
+ be used to construe this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version
+ of the License under which You originally received the Covered Software,
+ or under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a
+ modified version of this License if you rename the license and remove
+ any references to the name of the license steward (except to note that
+ such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+ Licenses If You choose to distribute Source Code Form that is
+ Incompatible With Secondary Licenses under the terms of this version of
+ the License, the notice described in Exhibit B of this License must be
+ attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file,
+then You may include the notice in a location (such as a LICENSE file in a
+relevant directory) where a recipient would be likely to look for such a
+notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+
+ This Source Code Form is "Incompatible
+ With Secondary Licenses", as defined by
+ the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/hashicorp/golang-lru/README.md b/vendor/github.com/hashicorp/golang-lru/README.md
new file mode 100644
index 0000000..33e58cf
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/README.md
@@ -0,0 +1,25 @@
+golang-lru
+==========
+
+This provides the `lru` package which implements a fixed-size
+thread safe LRU cache. It is based on the cache in Groupcache.
+
+Documentation
+=============
+
+Full docs are available on [Godoc](http://godoc.org/github.com/hashicorp/golang-lru)
+
+Example
+=======
+
+Using the LRU is very simple:
+
+```go
+l, _ := New(128)
+for i := 0; i < 256; i++ {
+ l.Add(i, nil)
+}
+if l.Len() != 128 {
+ panic(fmt.Sprintf("bad len: %v", l.Len()))
+}
+```
diff --git a/vendor/github.com/hashicorp/golang-lru/arc.go b/vendor/github.com/hashicorp/golang-lru/arc.go
new file mode 100644
index 0000000..555225a
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/arc.go
@@ -0,0 +1,257 @@
+package lru
+
+import (
+ "sync"
+
+ "github.com/hashicorp/golang-lru/simplelru"
+)
+
+// ARCCache is a thread-safe fixed size Adaptive Replacement Cache (ARC).
+// ARC is an enhancement over the standard LRU cache in that tracks both
+// frequency and recency of use. This avoids a burst in access to new
+// entries from evicting the frequently used older entries. It adds some
+// additional tracking overhead to a standard LRU cache, computationally
+// it is roughly 2x the cost, and the extra memory overhead is linear
+// with the size of the cache. ARC has been patented by IBM, but is
+// similar to the TwoQueueCache (2Q) which requires setting parameters.
+type ARCCache struct {
+ size int // Size is the total capacity of the cache
+ p int // P is the dynamic preference towards T1 or T2
+
+ t1 simplelru.LRUCache // T1 is the LRU for recently accessed items
+ b1 simplelru.LRUCache // B1 is the LRU for evictions from t1
+
+ t2 simplelru.LRUCache // T2 is the LRU for frequently accessed items
+ b2 simplelru.LRUCache // B2 is the LRU for evictions from t2
+
+ lock sync.RWMutex
+}
+
+// NewARC creates an ARC of the given size
+func NewARC(size int) (*ARCCache, error) {
+ // Create the sub LRUs
+ b1, err := simplelru.NewLRU(size, nil)
+ if err != nil {
+ return nil, err
+ }
+ b2, err := simplelru.NewLRU(size, nil)
+ if err != nil {
+ return nil, err
+ }
+ t1, err := simplelru.NewLRU(size, nil)
+ if err != nil {
+ return nil, err
+ }
+ t2, err := simplelru.NewLRU(size, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // Initialize the ARC
+ c := &ARCCache{
+ size: size,
+ p: 0,
+ t1: t1,
+ b1: b1,
+ t2: t2,
+ b2: b2,
+ }
+ return c, nil
+}
+
+// Get looks up a key's value from the cache.
+func (c *ARCCache) Get(key interface{}) (value interface{}, ok bool) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ // If the value is contained in T1 (recent), then
+ // promote it to T2 (frequent)
+ if val, ok := c.t1.Peek(key); ok {
+ c.t1.Remove(key)
+ c.t2.Add(key, val)
+ return val, ok
+ }
+
+ // Check if the value is contained in T2 (frequent)
+ if val, ok := c.t2.Get(key); ok {
+ return val, ok
+ }
+
+ // No hit
+ return nil, false
+}
+
+// Add adds a value to the cache.
+func (c *ARCCache) Add(key, value interface{}) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ // Check if the value is contained in T1 (recent), and potentially
+ // promote it to frequent T2
+ if c.t1.Contains(key) {
+ c.t1.Remove(key)
+ c.t2.Add(key, value)
+ return
+ }
+
+ // Check if the value is already in T2 (frequent) and update it
+ if c.t2.Contains(key) {
+ c.t2.Add(key, value)
+ return
+ }
+
+ // Check if this value was recently evicted as part of the
+ // recently used list
+ if c.b1.Contains(key) {
+ // T1 set is too small, increase P appropriately
+ delta := 1
+ b1Len := c.b1.Len()
+ b2Len := c.b2.Len()
+ if b2Len > b1Len {
+ delta = b2Len / b1Len
+ }
+ if c.p+delta >= c.size {
+ c.p = c.size
+ } else {
+ c.p += delta
+ }
+
+ // Potentially need to make room in the cache
+ if c.t1.Len()+c.t2.Len() >= c.size {
+ c.replace(false)
+ }
+
+ // Remove from B1
+ c.b1.Remove(key)
+
+ // Add the key to the frequently used list
+ c.t2.Add(key, value)
+ return
+ }
+
+ // Check if this value was recently evicted as part of the
+ // frequently used list
+ if c.b2.Contains(key) {
+ // T2 set is too small, decrease P appropriately
+ delta := 1
+ b1Len := c.b1.Len()
+ b2Len := c.b2.Len()
+ if b1Len > b2Len {
+ delta = b1Len / b2Len
+ }
+ if delta >= c.p {
+ c.p = 0
+ } else {
+ c.p -= delta
+ }
+
+ // Potentially need to make room in the cache
+ if c.t1.Len()+c.t2.Len() >= c.size {
+ c.replace(true)
+ }
+
+ // Remove from B2
+ c.b2.Remove(key)
+
+ // Add the key to the frequently used list
+ c.t2.Add(key, value)
+ return
+ }
+
+ // Potentially need to make room in the cache
+ if c.t1.Len()+c.t2.Len() >= c.size {
+ c.replace(false)
+ }
+
+ // Keep the size of the ghost buffers trim
+ if c.b1.Len() > c.size-c.p {
+ c.b1.RemoveOldest()
+ }
+ if c.b2.Len() > c.p {
+ c.b2.RemoveOldest()
+ }
+
+ // Add to the recently seen list
+ c.t1.Add(key, value)
+ return
+}
+
+// replace is used to adaptively evict from either T1 or T2
+// based on the current learned value of P
+func (c *ARCCache) replace(b2ContainsKey bool) {
+ t1Len := c.t1.Len()
+ if t1Len > 0 && (t1Len > c.p || (t1Len == c.p && b2ContainsKey)) {
+ k, _, ok := c.t1.RemoveOldest()
+ if ok {
+ c.b1.Add(k, nil)
+ }
+ } else {
+ k, _, ok := c.t2.RemoveOldest()
+ if ok {
+ c.b2.Add(k, nil)
+ }
+ }
+}
+
+// Len returns the number of cached entries
+func (c *ARCCache) Len() int {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ return c.t1.Len() + c.t2.Len()
+}
+
+// Keys returns all the cached keys
+func (c *ARCCache) Keys() []interface{} {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ k1 := c.t1.Keys()
+ k2 := c.t2.Keys()
+ return append(k1, k2...)
+}
+
+// Remove is used to purge a key from the cache
+func (c *ARCCache) Remove(key interface{}) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ if c.t1.Remove(key) {
+ return
+ }
+ if c.t2.Remove(key) {
+ return
+ }
+ if c.b1.Remove(key) {
+ return
+ }
+ if c.b2.Remove(key) {
+ return
+ }
+}
+
+// Purge is used to clear the cache
+func (c *ARCCache) Purge() {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ c.t1.Purge()
+ c.t2.Purge()
+ c.b1.Purge()
+ c.b2.Purge()
+}
+
+// Contains is used to check if the cache contains a key
+// without updating recency or frequency.
+func (c *ARCCache) Contains(key interface{}) bool {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ return c.t1.Contains(key) || c.t2.Contains(key)
+}
+
+// Peek is used to inspect the cache value of a key
+// without updating recency or frequency.
+func (c *ARCCache) Peek(key interface{}) (value interface{}, ok bool) {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ if val, ok := c.t1.Peek(key); ok {
+ return val, ok
+ }
+ return c.t2.Peek(key)
+}
diff --git a/vendor/github.com/hashicorp/golang-lru/doc.go b/vendor/github.com/hashicorp/golang-lru/doc.go
new file mode 100644
index 0000000..2547df9
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/doc.go
@@ -0,0 +1,21 @@
+// Package lru provides three different LRU caches of varying sophistication.
+//
+// Cache is a simple LRU cache. It is based on the
+// LRU implementation in groupcache:
+// https://github.com/golang/groupcache/tree/master/lru
+//
+// TwoQueueCache tracks frequently used and recently used entries separately.
+// This avoids a burst of accesses from taking out frequently used entries,
+// at the cost of about 2x computational overhead and some extra bookkeeping.
+//
+// ARCCache is an adaptive replacement cache. It tracks recent evictions as
+// well as recent usage in both the frequent and recent caches. Its
+// computational overhead is comparable to TwoQueueCache, but the memory
+// overhead is linear with the size of the cache.
+//
+// ARC has been patented by IBM, so do not use it if that is problematic for
+// your program.
+//
+// All caches in this package take locks while operating, and are therefore
+// thread-safe for consumers.
+package lru
diff --git a/vendor/github.com/hashicorp/golang-lru/lru.go b/vendor/github.com/hashicorp/golang-lru/lru.go
new file mode 100644
index 0000000..c8d9b0a
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/lru.go
@@ -0,0 +1,110 @@
+package lru
+
+import (
+ "sync"
+
+ "github.com/hashicorp/golang-lru/simplelru"
+)
+
+// Cache is a thread-safe fixed size LRU cache.
+type Cache struct {
+ lru simplelru.LRUCache
+ lock sync.RWMutex
+}
+
+// New creates an LRU of the given size.
+func New(size int) (*Cache, error) {
+ return NewWithEvict(size, nil)
+}
+
+// NewWithEvict constructs a fixed size cache with the given eviction
+// callback.
+func NewWithEvict(size int, onEvicted func(key interface{}, value interface{})) (*Cache, error) {
+ lru, err := simplelru.NewLRU(size, simplelru.EvictCallback(onEvicted))
+ if err != nil {
+ return nil, err
+ }
+ c := &Cache{
+ lru: lru,
+ }
+ return c, nil
+}
+
+// Purge is used to completely clear the cache.
+func (c *Cache) Purge() {
+ c.lock.Lock()
+ c.lru.Purge()
+ c.lock.Unlock()
+}
+
+// Add adds a value to the cache. Returns true if an eviction occurred.
+func (c *Cache) Add(key, value interface{}) (evicted bool) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ return c.lru.Add(key, value)
+}
+
+// Get looks up a key's value from the cache.
+func (c *Cache) Get(key interface{}) (value interface{}, ok bool) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ return c.lru.Get(key)
+}
+
+// Contains checks if a key is in the cache, without updating the
+// recent-ness or deleting it for being stale.
+func (c *Cache) Contains(key interface{}) bool {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ return c.lru.Contains(key)
+}
+
+// Peek returns the key value (or undefined if not found) without updating
+// the "recently used"-ness of the key.
+func (c *Cache) Peek(key interface{}) (value interface{}, ok bool) {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ return c.lru.Peek(key)
+}
+
+// ContainsOrAdd checks if a key is in the cache without updating the
+// recent-ness or deleting it for being stale, and if not, adds the value.
+// Returns whether found and whether an eviction occurred.
+func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evicted bool) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ if c.lru.Contains(key) {
+ return true, false
+ }
+ evicted = c.lru.Add(key, value)
+ return false, evicted
+}
+
+// Remove removes the provided key from the cache.
+func (c *Cache) Remove(key interface{}) {
+ c.lock.Lock()
+ c.lru.Remove(key)
+ c.lock.Unlock()
+}
+
+// RemoveOldest removes the oldest item from the cache.
+func (c *Cache) RemoveOldest() {
+ c.lock.Lock()
+ c.lru.RemoveOldest()
+ c.lock.Unlock()
+}
+
+// Keys returns a slice of the keys in the cache, from oldest to newest.
+func (c *Cache) Keys() []interface{} {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ return c.lru.Keys()
+}
+
+// Len returns the number of items in the cache.
+func (c *Cache) Len() int {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ return c.lru.Len()
+}
diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go
new file mode 100644
index 0000000..5673773
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go
@@ -0,0 +1,161 @@
+package simplelru
+
+import (
+ "container/list"
+ "errors"
+)
+
+// EvictCallback is used to get a callback when a cache entry is evicted
+type EvictCallback func(key interface{}, value interface{})
+
+// LRU implements a non-thread safe fixed size LRU cache
+type LRU struct {
+ size int
+ evictList *list.List
+ items map[interface{}]*list.Element
+ onEvict EvictCallback
+}
+
+// entry is used to hold a value in the evictList
+type entry struct {
+ key interface{}
+ value interface{}
+}
+
+// NewLRU constructs an LRU of the given size
+func NewLRU(size int, onEvict EvictCallback) (*LRU, error) {
+ if size <= 0 {
+ return nil, errors.New("Must provide a positive size")
+ }
+ c := &LRU{
+ size: size,
+ evictList: list.New(),
+ items: make(map[interface{}]*list.Element),
+ onEvict: onEvict,
+ }
+ return c, nil
+}
+
+// Purge is used to completely clear the cache.
+func (c *LRU) Purge() {
+ for k, v := range c.items {
+ if c.onEvict != nil {
+ c.onEvict(k, v.Value.(*entry).value)
+ }
+ delete(c.items, k)
+ }
+ c.evictList.Init()
+}
+
+// Add adds a value to the cache. Returns true if an eviction occurred.
+func (c *LRU) Add(key, value interface{}) (evicted bool) {
+ // Check for existing item
+ if ent, ok := c.items[key]; ok {
+ c.evictList.MoveToFront(ent)
+ ent.Value.(*entry).value = value
+ return false
+ }
+
+ // Add new item
+ ent := &entry{key, value}
+ entry := c.evictList.PushFront(ent)
+ c.items[key] = entry
+
+ evict := c.evictList.Len() > c.size
+ // Verify size not exceeded
+ if evict {
+ c.removeOldest()
+ }
+ return evict
+}
+
+// Get looks up a key's value from the cache.
+func (c *LRU) Get(key interface{}) (value interface{}, ok bool) {
+ if ent, ok := c.items[key]; ok {
+ c.evictList.MoveToFront(ent)
+ return ent.Value.(*entry).value, true
+ }
+ return
+}
+
+// Contains checks if a key is in the cache, without updating the recent-ness
+// or deleting it for being stale.
+func (c *LRU) Contains(key interface{}) (ok bool) {
+ _, ok = c.items[key]
+ return ok
+}
+
+// Peek returns the key value (or undefined if not found) without updating
+// the "recently used"-ness of the key.
+func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) {
+ var ent *list.Element
+ if ent, ok = c.items[key]; ok {
+ return ent.Value.(*entry).value, true
+ }
+ return nil, ok
+}
+
+// Remove removes the provided key from the cache, returning if the
+// key was contained.
+func (c *LRU) Remove(key interface{}) (present bool) {
+ if ent, ok := c.items[key]; ok {
+ c.removeElement(ent)
+ return true
+ }
+ return false
+}
+
+// RemoveOldest removes the oldest item from the cache.
+func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) {
+ ent := c.evictList.Back()
+ if ent != nil {
+ c.removeElement(ent)
+ kv := ent.Value.(*entry)
+ return kv.key, kv.value, true
+ }
+ return nil, nil, false
+}
+
+// GetOldest returns the oldest entry
+func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) {
+ ent := c.evictList.Back()
+ if ent != nil {
+ kv := ent.Value.(*entry)
+ return kv.key, kv.value, true
+ }
+ return nil, nil, false
+}
+
+// Keys returns a slice of the keys in the cache, from oldest to newest.
+func (c *LRU) Keys() []interface{} {
+ keys := make([]interface{}, len(c.items))
+ i := 0
+ for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() {
+ keys[i] = ent.Value.(*entry).key
+ i++
+ }
+ return keys
+}
+
+// Len returns the number of items in the cache.
+func (c *LRU) Len() int {
+ return c.evictList.Len()
+}
+
+// removeOldest removes the oldest item from the cache.
+func (c *LRU) removeOldest() {
+ ent := c.evictList.Back()
+ if ent != nil {
+ c.removeElement(ent)
+ }
+}
+
+// removeElement is used to remove a given list element from the cache
+func (c *LRU) removeElement(e *list.Element) {
+ c.evictList.Remove(e)
+ kv := e.Value.(*entry)
+ delete(c.items, kv.key)
+ if c.onEvict != nil {
+ c.onEvict(kv.key, kv.value)
+ }
+}
diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go
new file mode 100644
index 0000000..744cac0
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go
@@ -0,0 +1,37 @@
+package simplelru
+
+
+// LRUCache is the interface for simple LRU cache.
+type LRUCache interface {
+ // Adds a value to the cache, returns true if an eviction occurred and
+ // updates the "recently used"-ness of the key.
+ Add(key, value interface{}) bool
+
+ // Returns key's value from the cache and
+ // updates the "recently used"-ness of the key. #value, isFound
+ Get(key interface{}) (value interface{}, ok bool)
+
+ // Check if a key exsists in cache without updating the recent-ness.
+ Contains(key interface{}) (ok bool)
+
+ // Returns key's value without updating the "recently used"-ness of the key.
+ Peek(key interface{}) (value interface{}, ok bool)
+
+ // Removes a key from the cache.
+ Remove(key interface{}) bool
+
+ // Removes the oldest entry from cache.
+ RemoveOldest() (interface{}, interface{}, bool)
+
+ // Returns the oldest entry from the cache. #key, value, isFound
+ GetOldest() (interface{}, interface{}, bool)
+
+ // Returns a slice of the keys in the cache, from oldest to newest.
+ Keys() []interface{}
+
+ // Returns the number of items in the cache.
+ Len() int
+
+ // Clear all cache entries
+ Purge()
+}
diff --git a/vendor/github.com/jackc/pgx/stdlib/sql.go b/vendor/github.com/jackc/pgx/stdlib/sql.go
deleted file mode 100644
index 2d4930e..0000000
--- a/vendor/github.com/jackc/pgx/stdlib/sql.go
+++ /dev/null
@@ -1,609 +0,0 @@
-// Package stdlib is the compatibility layer from pgx to database/sql.
-//
-// A database/sql connection can be established through sql.Open.
-//
-// db, err := sql.Open("pgx", "postgres://pgx_md5:secret@localhost:5432/pgx_test?sslmode=disable")
-// if err != nil {
-// return err
-// }
-//
-// Or from a DSN string.
-//
-// db, err := sql.Open("pgx", "user=postgres password=secret host=localhost port=5432 database=pgx_test sslmode=disable")
-// if err != nil {
-// return err
-// }
-//
-// A DriverConfig can be used to further configure the connection process. This
-// allows configuring TLS configuration, setting a custom dialer, logging, and
-// setting an AfterConnect hook.
-//
-// driverConfig := stdlib.DriverConfig{
-// ConnConfig: pgx.ConnConfig{
-// Logger: logger,
-// },
-// AfterConnect: func(c *pgx.Conn) error {
-// // Ensure all connections have this temp table available
-// _, err := c.Exec("create temporary table foo(...)")
-// return err
-// },
-// }
-//
-// stdlib.RegisterDriverConfig(&driverConfig)
-//
-// db, err := sql.Open("pgx", driverConfig.ConnectionString("postgres://pgx_md5:secret@127.0.0.1:5432/pgx_test"))
-// if err != nil {
-// return err
-// }
-//
-// pgx uses standard PostgreSQL positional parameters in queries. e.g. $1, $2.
-// It does not support named parameters.
-//
-// db.QueryRow("select * from users where id=$1", userID)
-//
-// AcquireConn and ReleaseConn acquire and release a *pgx.Conn from the standard
-// database/sql.DB connection pool. This allows operations that must be
-// performed on a single connection, but should not be run in a transaction or
-// to use pgx specific functionality.
-//
-// conn, err := stdlib.AcquireConn(db)
-// if err != nil {
-// return err
-// }
-// defer stdlib.ReleaseConn(db, conn)
-//
-// // do stuff with pgx.Conn
-//
-// It also can be used to enable a fast path for pgx while preserving
-// compatibility with other drivers and database.
-//
-// conn, err := stdlib.AcquireConn(db)
-// if err == nil {
-// // fast path with pgx
-// // ...
-// // release conn when done
-// stdlib.ReleaseConn(db, conn)
-// } else {
-// // normal path for other drivers and databases
-// }
-package stdlib
-
-import (
- "context"
- "database/sql"
- "database/sql/driver"
- "encoding/binary"
- "fmt"
- "io"
- "reflect"
- "strings"
- "sync"
-
- "github.com/pkg/errors"
-
- "github.com/jackc/pgx"
- "github.com/jackc/pgx/pgtype"
-)
-
-// oids that map to intrinsic database/sql types. These will be allowed to be
-// binary, anything else will be forced to text format
-var databaseSqlOIDs map[pgtype.OID]bool
-
-var pgxDriver *Driver
-
-type ctxKey int
-
-var ctxKeyFakeTx ctxKey = 0
-
-var ErrNotPgx = errors.New("not pgx *sql.DB")
-
-func init() {
- pgxDriver = &Driver{
- configs: make(map[int64]*DriverConfig),
- fakeTxConns: make(map[*pgx.Conn]*sql.Tx),
- }
- sql.Register("pgx", pgxDriver)
-
- databaseSqlOIDs = make(map[pgtype.OID]bool)
- databaseSqlOIDs[pgtype.BoolOID] = true
- databaseSqlOIDs[pgtype.ByteaOID] = true
- databaseSqlOIDs[pgtype.CIDOID] = true
- databaseSqlOIDs[pgtype.DateOID] = true
- databaseSqlOIDs[pgtype.Float4OID] = true
- databaseSqlOIDs[pgtype.Float8OID] = true
- databaseSqlOIDs[pgtype.Int2OID] = true
- databaseSqlOIDs[pgtype.Int4OID] = true
- databaseSqlOIDs[pgtype.Int8OID] = true
- databaseSqlOIDs[pgtype.OIDOID] = true
- databaseSqlOIDs[pgtype.TimestampOID] = true
- databaseSqlOIDs[pgtype.TimestamptzOID] = true
- databaseSqlOIDs[pgtype.XIDOID] = true
-}
-
-type Driver struct {
- configMutex sync.Mutex
- configCount int64
- configs map[int64]*DriverConfig
-
- fakeTxMutex sync.Mutex
- fakeTxConns map[*pgx.Conn]*sql.Tx
-}
-
-func (d *Driver) Open(name string) (driver.Conn, error) {
- var connConfig pgx.ConnConfig
- var afterConnect func(*pgx.Conn) error
- if len(name) >= 9 && name[0] == 0 {
- idBuf := []byte(name)[1:9]
- id := int64(binary.BigEndian.Uint64(idBuf))
- connConfig = d.configs[id].ConnConfig
- afterConnect = d.configs[id].AfterConnect
- name = name[9:]
- }
-
- parsedConfig, err := pgx.ParseConnectionString(name)
- if err != nil {
- return nil, err
- }
- connConfig = connConfig.Merge(parsedConfig)
-
- conn, err := pgx.Connect(connConfig)
- if err != nil {
- return nil, err
- }
-
- if afterConnect != nil {
- err = afterConnect(conn)
- if err != nil {
- return nil, err
- }
- }
-
- c := &Conn{conn: conn, driver: d, connConfig: connConfig}
- return c, nil
-}
-
-type DriverConfig struct {
- pgx.ConnConfig
- AfterConnect func(*pgx.Conn) error // function to call on every new connection
- driver *Driver
- id int64
-}
-
-// ConnectionString encodes the DriverConfig into the original connection
-// string. DriverConfig must be registered before calling ConnectionString.
-func (c *DriverConfig) ConnectionString(original string) string {
- if c.driver == nil {
- panic("DriverConfig must be registered before calling ConnectionString")
- }
-
- buf := make([]byte, 9)
- binary.BigEndian.PutUint64(buf[1:], uint64(c.id))
- buf = append(buf, original...)
- return string(buf)
-}
-
-func (d *Driver) registerDriverConfig(c *DriverConfig) {
- d.configMutex.Lock()
-
- c.driver = d
- c.id = d.configCount
- d.configs[d.configCount] = c
- d.configCount++
-
- d.configMutex.Unlock()
-}
-
-func (d *Driver) unregisterDriverConfig(c *DriverConfig) {
- d.configMutex.Lock()
- delete(d.configs, c.id)
- d.configMutex.Unlock()
-}
-
-// RegisterDriverConfig registers a DriverConfig for use with Open.
-func RegisterDriverConfig(c *DriverConfig) {
- pgxDriver.registerDriverConfig(c)
-}
-
-// UnregisterDriverConfig removes a DriverConfig registration.
-func UnregisterDriverConfig(c *DriverConfig) {
- pgxDriver.unregisterDriverConfig(c)
-}
-
-type Conn struct {
- conn *pgx.Conn
- psCount int64 // Counter used for creating unique prepared statement names
- driver *Driver
- connConfig pgx.ConnConfig
-}
-
-func (c *Conn) Prepare(query string) (driver.Stmt, error) {
- return c.PrepareContext(context.Background(), query)
-}
-
-func (c *Conn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) {
- if !c.conn.IsAlive() {
- return nil, driver.ErrBadConn
- }
-
- name := fmt.Sprintf("pgx_%d", c.psCount)
- c.psCount++
-
- ps, err := c.conn.PrepareEx(ctx, name, query, nil)
- if err != nil {
- return nil, err
- }
-
- restrictBinaryToDatabaseSqlTypes(ps)
-
- return &Stmt{ps: ps, conn: c}, nil
-}
-
-func (c *Conn) Close() error {
- return c.conn.Close()
-}
-
-func (c *Conn) Begin() (driver.Tx, error) {
- return c.BeginTx(context.Background(), driver.TxOptions{})
-}
-
-func (c *Conn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
- if !c.conn.IsAlive() {
- return nil, driver.ErrBadConn
- }
-
- if pconn, ok := ctx.Value(ctxKeyFakeTx).(**pgx.Conn); ok {
- *pconn = c.conn
- return fakeTx{}, nil
- }
-
- var pgxOpts pgx.TxOptions
- switch sql.IsolationLevel(opts.Isolation) {
- case sql.LevelDefault:
- case sql.LevelReadUncommitted:
- pgxOpts.IsoLevel = pgx.ReadUncommitted
- case sql.LevelReadCommitted:
- pgxOpts.IsoLevel = pgx.ReadCommitted
- case sql.LevelSnapshot:
- pgxOpts.IsoLevel = pgx.RepeatableRead
- case sql.LevelSerializable:
- pgxOpts.IsoLevel = pgx.Serializable
- default:
- return nil, errors.Errorf("unsupported isolation: %v", opts.Isolation)
- }
-
- if opts.ReadOnly {
- pgxOpts.AccessMode = pgx.ReadOnly
- }
-
- return c.conn.BeginEx(ctx, &pgxOpts)
-}
-
-func (c *Conn) Exec(query string, argsV []driver.Value) (driver.Result, error) {
- if !c.conn.IsAlive() {
- return nil, driver.ErrBadConn
- }
-
- args := valueToInterface(argsV)
- commandTag, err := c.conn.Exec(query, args...)
- return driver.RowsAffected(commandTag.RowsAffected()), err
-}
-
-func (c *Conn) ExecContext(ctx context.Context, query string, argsV []driver.NamedValue) (driver.Result, error) {
- if !c.conn.IsAlive() {
- return nil, driver.ErrBadConn
- }
-
- args := namedValueToInterface(argsV)
-
- commandTag, err := c.conn.ExecEx(ctx, query, nil, args...)
- return driver.RowsAffected(commandTag.RowsAffected()), err
-}
-
-func (c *Conn) Query(query string, argsV []driver.Value) (driver.Rows, error) {
- if !c.conn.IsAlive() {
- return nil, driver.ErrBadConn
- }
-
- if !c.connConfig.PreferSimpleProtocol {
- ps, err := c.conn.Prepare("", query)
- if err != nil {
- return nil, err
- }
-
- restrictBinaryToDatabaseSqlTypes(ps)
- return c.queryPrepared("", argsV)
- }
-
- rows, err := c.conn.Query(query, valueToInterface(argsV)...)
- if err != nil {
- return nil, err
- }
-
- // Preload first row because otherwise we won't know what columns are available when database/sql asks.
- more := rows.Next()
- return &Rows{rows: rows, skipNext: true, skipNextMore: more}, nil
-}
-
-func (c *Conn) QueryContext(ctx context.Context, query string, argsV []driver.NamedValue) (driver.Rows, error) {
- if !c.conn.IsAlive() {
- return nil, driver.ErrBadConn
- }
-
- if !c.connConfig.PreferSimpleProtocol {
- ps, err := c.conn.PrepareEx(ctx, "", query, nil)
- if err != nil {
- return nil, err
- }
-
- restrictBinaryToDatabaseSqlTypes(ps)
- return c.queryPreparedContext(ctx, "", argsV)
- }
-
- rows, err := c.conn.QueryEx(ctx, query, nil, namedValueToInterface(argsV)...)
- if err != nil {
- return nil, err
- }
-
- // Preload first row because otherwise we won't know what columns are available when database/sql asks.
- more := rows.Next()
- return &Rows{rows: rows, skipNext: true, skipNextMore: more}, nil
-}
-
-func (c *Conn) queryPrepared(name string, argsV []driver.Value) (driver.Rows, error) {
- if !c.conn.IsAlive() {
- return nil, driver.ErrBadConn
- }
-
- args := valueToInterface(argsV)
-
- rows, err := c.conn.Query(name, args...)
- if err != nil {
- return nil, err
- }
-
- return &Rows{rows: rows}, nil
-}
-
-func (c *Conn) queryPreparedContext(ctx context.Context, name string, argsV []driver.NamedValue) (driver.Rows, error) {
- if !c.conn.IsAlive() {
- return nil, driver.ErrBadConn
- }
-
- args := namedValueToInterface(argsV)
-
- rows, err := c.conn.QueryEx(ctx, name, nil, args...)
- if err != nil {
- return nil, err
- }
-
- return &Rows{rows: rows}, nil
-}
-
-func (c *Conn) Ping(ctx context.Context) error {
- if !c.conn.IsAlive() {
- return driver.ErrBadConn
- }
-
- return c.conn.Ping(ctx)
-}
-
-// Anything that isn't a database/sql compatible type needs to be forced to
-// text format so that pgx.Rows.Values doesn't decode it into a native type
-// (e.g. []int32)
-func restrictBinaryToDatabaseSqlTypes(ps *pgx.PreparedStatement) {
- for i := range ps.FieldDescriptions {
- intrinsic, _ := databaseSqlOIDs[ps.FieldDescriptions[i].DataType]
- if !intrinsic {
- ps.FieldDescriptions[i].FormatCode = pgx.TextFormatCode
- }
- }
-}
-
-type Stmt struct {
- ps *pgx.PreparedStatement
- conn *Conn
-}
-
-func (s *Stmt) Close() error {
- return s.conn.conn.Deallocate(s.ps.Name)
-}
-
-func (s *Stmt) NumInput() int {
- return len(s.ps.ParameterOIDs)
-}
-
-func (s *Stmt) Exec(argsV []driver.Value) (driver.Result, error) {
- return s.conn.Exec(s.ps.Name, argsV)
-}
-
-func (s *Stmt) ExecContext(ctx context.Context, argsV []driver.NamedValue) (driver.Result, error) {
- return s.conn.ExecContext(ctx, s.ps.Name, argsV)
-}
-
-func (s *Stmt) Query(argsV []driver.Value) (driver.Rows, error) {
- return s.conn.queryPrepared(s.ps.Name, argsV)
-}
-
-func (s *Stmt) QueryContext(ctx context.Context, argsV []driver.NamedValue) (driver.Rows, error) {
- return s.conn.queryPreparedContext(ctx, s.ps.Name, argsV)
-}
-
-type Rows struct {
- rows *pgx.Rows
- values []interface{}
- skipNext bool
- skipNextMore bool
-}
-
-func (r *Rows) Columns() []string {
- fieldDescriptions := r.rows.FieldDescriptions()
- names := make([]string, 0, len(fieldDescriptions))
- for _, fd := range fieldDescriptions {
- names = append(names, fd.Name)
- }
- return names
-}
-
-// ColumnTypeDatabaseTypeName return the database system type name.
-func (r *Rows) ColumnTypeDatabaseTypeName(index int) string {
- return strings.ToUpper(r.rows.FieldDescriptions()[index].DataTypeName)
-}
-
-// ColumnTypeLength returns the length of the column type if the column is a
-// variable length type. If the column is not a variable length type ok
-// should return false.
-func (r *Rows) ColumnTypeLength(index int) (int64, bool) {
- return r.rows.FieldDescriptions()[index].Length()
-}
-
-// ColumnTypePrecisionScale should return the precision and scale for decimal
-// types. If not applicable, ok should be false.
-func (r *Rows) ColumnTypePrecisionScale(index int) (precision, scale int64, ok bool) {
- return r.rows.FieldDescriptions()[index].PrecisionScale()
-}
-
-// ColumnTypeScanType returns the value type that can be used to scan types into.
-func (r *Rows) ColumnTypeScanType(index int) reflect.Type {
- return r.rows.FieldDescriptions()[index].Type()
-}
-
-func (r *Rows) Close() error {
- r.rows.Close()
- return nil
-}
-
-func (r *Rows) Next(dest []driver.Value) error {
- if r.values == nil {
- r.values = make([]interface{}, len(r.rows.FieldDescriptions()))
- for i, fd := range r.rows.FieldDescriptions() {
- switch fd.DataType {
- case pgtype.BoolOID:
- r.values[i] = &pgtype.Bool{}
- case pgtype.ByteaOID:
- r.values[i] = &pgtype.Bytea{}
- case pgtype.CIDOID:
- r.values[i] = &pgtype.CID{}
- case pgtype.DateOID:
- r.values[i] = &pgtype.Date{}
- case pgtype.Float4OID:
- r.values[i] = &pgtype.Float4{}
- case pgtype.Float8OID:
- r.values[i] = &pgtype.Float8{}
- case pgtype.Int2OID:
- r.values[i] = &pgtype.Int2{}
- case pgtype.Int4OID:
- r.values[i] = &pgtype.Int4{}
- case pgtype.Int8OID:
- r.values[i] = &pgtype.Int8{}
- case pgtype.OIDOID:
- r.values[i] = &pgtype.OIDValue{}
- case pgtype.TimestampOID:
- r.values[i] = &pgtype.Timestamp{}
- case pgtype.TimestamptzOID:
- r.values[i] = &pgtype.Timestamptz{}
- case pgtype.XIDOID:
- r.values[i] = &pgtype.XID{}
- default:
- r.values[i] = &pgtype.GenericText{}
- }
- }
- }
-
- var more bool
- if r.skipNext {
- more = r.skipNextMore
- r.skipNext = false
- } else {
- more = r.rows.Next()
- }
-
- if !more {
- if r.rows.Err() == nil {
- return io.EOF
- } else {
- return r.rows.Err()
- }
- }
-
- err := r.rows.Scan(r.values...)
- if err != nil {
- return err
- }
-
- for i, v := range r.values {
- dest[i], err = v.(driver.Valuer).Value()
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func valueToInterface(argsV []driver.Value) []interface{} {
- args := make([]interface{}, 0, len(argsV))
- for _, v := range argsV {
- if v != nil {
- args = append(args, v.(interface{}))
- } else {
- args = append(args, nil)
- }
- }
- return args
-}
-
-func namedValueToInterface(argsV []driver.NamedValue) []interface{} {
- args := make([]interface{}, 0, len(argsV))
- for _, v := range argsV {
- if v.Value != nil {
- args = append(args, v.Value.(interface{}))
- } else {
- args = append(args, nil)
- }
- }
- return args
-}
-
-type fakeTx struct{}
-
-func (fakeTx) Commit() error { return nil }
-
-func (fakeTx) Rollback() error { return nil }
-
-func AcquireConn(db *sql.DB) (*pgx.Conn, error) {
- driver, ok := db.Driver().(*Driver)
- if !ok {
- return nil, ErrNotPgx
- }
-
- var conn *pgx.Conn
- ctx := context.WithValue(context.Background(), ctxKeyFakeTx, &conn)
- tx, err := db.BeginTx(ctx, nil)
- if err != nil {
- return nil, err
- }
-
- driver.fakeTxMutex.Lock()
- driver.fakeTxConns[conn] = tx
- driver.fakeTxMutex.Unlock()
-
- return conn, nil
-}
-
-func ReleaseConn(db *sql.DB, conn *pgx.Conn) error {
- var tx *sql.Tx
- var ok bool
-
- driver := db.Driver().(*Driver)
- driver.fakeTxMutex.Lock()
- tx, ok = driver.fakeTxConns[conn]
- if ok {
- delete(driver.fakeTxConns, conn)
- driver.fakeTxMutex.Unlock()
- } else {
- driver.fakeTxMutex.Unlock()
- return errors.Errorf("can't release conn that is not acquired")
- }
-
- return tx.Rollback()
-}
diff --git a/vendor/github.com/jmoiron/sqlx/LICENSE b/vendor/github.com/jmoiron/sqlx/LICENSE
deleted file mode 100644
index 0d31edf..0000000
--- a/vendor/github.com/jmoiron/sqlx/LICENSE
+++ /dev/null
@@ -1,23 +0,0 @@
- Copyright (c) 2013, Jason Moiron
-
- Permission is hereby granted, free of charge, to any person
- obtaining a copy of this software and associated documentation
- files (the "Software"), to deal in the Software without
- restriction, including without limitation the rights to use,
- copy, modify, merge, publish, distribute, sublicense, and/or sell
- copies of the Software, and to permit persons to whom the
- Software is furnished to do so, subject to the following
- conditions:
-
- The above copyright notice and this permission notice shall be
- included in all copies or substantial portions of the Software.
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- OTHER DEALINGS IN THE SOFTWARE.
-
diff --git a/vendor/github.com/jmoiron/sqlx/README.md b/vendor/github.com/jmoiron/sqlx/README.md
deleted file mode 100644
index d2d1258..0000000
--- a/vendor/github.com/jmoiron/sqlx/README.md
+++ /dev/null
@@ -1,185 +0,0 @@
-# sqlx
-
-[![Build Status](https://drone.io/github.com/jmoiron/sqlx/status.png)](https://drone.io/github.com/jmoiron/sqlx/latest) [![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/jmoiron/sqlx) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/jmoiron/sqlx/master/LICENSE)
-
-sqlx is a library which provides a set of extensions on go's standard
-`database/sql` library. The sqlx versions of `sql.DB`, `sql.TX`, `sql.Stmt`,
-et al. all leave the underlying interfaces untouched, so that their interfaces
-are a superset on the standard ones. This makes it relatively painless to
-integrate existing codebases using database/sql with sqlx.
-
-Major additional concepts are:
-
-* Marshal rows into structs (with embedded struct support), maps, and slices
-* Named parameter support including prepared statements
-* `Get` and `Select` to go quickly from query to struct/slice
-
-In addition to the [godoc API documentation](http://godoc.org/github.com/jmoiron/sqlx),
-there is also some [standard documentation](http://jmoiron.github.io/sqlx/) that
-explains how to use `database/sql` along with sqlx.
-
-## Recent Changes
-
-* sqlx/types.JsonText has been renamed to JSONText to follow Go naming conventions.
-
-This breaks backwards compatibility, but it's in a way that is trivially fixable
-(`s/JsonText/JSONText/g`). The `types` package is both experimental and not in
-active development currently.
-
-* Using Go 1.6 and below with `types.JSONText` and `types.GzippedText` can be _potentially unsafe_, **especially** when used with common auto-scan sqlx idioms like `Select` and `Get`. See [golang bug #13905](https://github.com/golang/go/issues/13905).
-
-### Backwards Compatibility
-
-There is no Go1-like promise of absolute stability, but I take the issue seriously
-and will maintain the library in a compatible state unless vital bugs prevent me
-from doing so. Since [#59](https://github.com/jmoiron/sqlx/issues/59) and
-[#60](https://github.com/jmoiron/sqlx/issues/60) necessitated breaking behavior,
-a wider API cleanup was done at the time of fixing. It's possible this will happen
-in future; if it does, a git tag will be provided for users requiring the old
-behavior to continue to use it until such a time as they can migrate.
-
-## install
-
- go get github.com/jmoiron/sqlx
-
-## issues
-
-Row headers can be ambiguous (`SELECT 1 AS a, 2 AS a`), and the result of
-`Columns()` does not fully qualify column names in queries like:
-
-```sql
-SELECT a.id, a.name, b.id, b.name FROM foos AS a JOIN foos AS b ON a.parent = b.id;
-```
-
-making a struct or map destination ambiguous. Use `AS` in your queries
-to give columns distinct names, `rows.Scan` to scan them manually, or
-`SliceScan` to get a slice of results.
-
-## usage
-
-Below is an example which shows some common use cases for sqlx. Check
-[sqlx_test.go](https://github.com/jmoiron/sqlx/blob/master/sqlx_test.go) for more
-usage.
-
-
-```go
-package main
-
-import (
- "database/sql"
- "fmt"
- "log"
-
- _ "github.com/lib/pq"
- "github.com/jmoiron/sqlx"
-)
-
-var schema = `
-CREATE TABLE person (
- first_name text,
- last_name text,
- email text
-);
-
-CREATE TABLE place (
- country text,
- city text NULL,
- telcode integer
-)`
-
-type Person struct {
- FirstName string `db:"first_name"`
- LastName string `db:"last_name"`
- Email string
-}
-
-type Place struct {
- Country string
- City sql.NullString
- TelCode int
-}
-
-func main() {
- // this Pings the database trying to connect, panics on error
- // use sqlx.Open() for sql.Open() semantics
- db, err := sqlx.Connect("postgres", "user=foo dbname=bar sslmode=disable")
- if err != nil {
- log.Fatalln(err)
- }
-
- // exec the schema or fail; multi-statement Exec behavior varies between
- // database drivers; pq will exec them all, sqlite3 won't, ymmv
- db.MustExec(schema)
-
- tx := db.MustBegin()
- tx.MustExec("INSERT INTO person (first_name, last_name, email) VALUES ($1, $2, $3)", "Jason", "Moiron", "jmoiron@jmoiron.net")
- tx.MustExec("INSERT INTO person (first_name, last_name, email) VALUES ($1, $2, $3)", "John", "Doe", "johndoeDNE@gmail.net")
- tx.MustExec("INSERT INTO place (country, city, telcode) VALUES ($1, $2, $3)", "United States", "New York", "1")
- tx.MustExec("INSERT INTO place (country, telcode) VALUES ($1, $2)", "Hong Kong", "852")
- tx.MustExec("INSERT INTO place (country, telcode) VALUES ($1, $2)", "Singapore", "65")
- // Named queries can use structs, so if you have an existing struct (i.e. person := &Person{}) that you have populated, you can pass it in as &person
- tx.NamedExec("INSERT INTO person (first_name, last_name, email) VALUES (:first_name, :last_name, :email)", &Person{"Jane", "Citizen", "jane.citzen@example.com"})
- tx.Commit()
-
- // Query the database, storing results in a []Person (wrapped in []interface{})
- people := []Person{}
- db.Select(&people, "SELECT * FROM person ORDER BY first_name ASC")
- jason, john := people[0], people[1]
-
- fmt.Printf("%#v\n%#v", jason, john)
- // Person{FirstName:"Jason", LastName:"Moiron", Email:"jmoiron@jmoiron.net"}
- // Person{FirstName:"John", LastName:"Doe", Email:"johndoeDNE@gmail.net"}
-
- // You can also get a single result, a la QueryRow
- jason = Person{}
- err = db.Get(&jason, "SELECT * FROM person WHERE first_name=$1", "Jason")
- fmt.Printf("%#v\n", jason)
- // Person{FirstName:"Jason", LastName:"Moiron", Email:"jmoiron@jmoiron.net"}
-
- // if you have null fields and use SELECT *, you must use sql.Null* in your struct
- places := []Place{}
- err = db.Select(&places, "SELECT * FROM place ORDER BY telcode ASC")
- if err != nil {
- fmt.Println(err)
- return
- }
- usa, singsing, honkers := places[0], places[1], places[2]
-
- fmt.Printf("%#v\n%#v\n%#v\n", usa, singsing, honkers)
- // Place{Country:"United States", City:sql.NullString{String:"New York", Valid:true}, TelCode:1}
- // Place{Country:"Singapore", City:sql.NullString{String:"", Valid:false}, TelCode:65}
- // Place{Country:"Hong Kong", City:sql.NullString{String:"", Valid:false}, TelCode:852}
-
- // Loop through rows using only one struct
- place := Place{}
- rows, err := db.Queryx("SELECT * FROM place")
- for rows.Next() {
- err := rows.StructScan(&place)
- if err != nil {
- log.Fatalln(err)
- }
- fmt.Printf("%#v\n", place)
- }
- // Place{Country:"United States", City:sql.NullString{String:"New York", Valid:true}, TelCode:1}
- // Place{Country:"Hong Kong", City:sql.NullString{String:"", Valid:false}, TelCode:852}
- // Place{Country:"Singapore", City:sql.NullString{String:"", Valid:false}, TelCode:65}
-
- // Named queries, using `:name` as the bindvar. Automatic bindvar support
- // which takes into account the dbtype based on the driverName on sqlx.Open/Connect
- _, err = db.NamedExec(`INSERT INTO person (first_name,last_name,email) VALUES (:first,:last,:email)`,
- map[string]interface{}{
- "first": "Bin",
- "last": "Smuth",
- "email": "bensmith@allblacks.nz",
- })
-
- // Selects Mr. Smith from the database
- rows, err = db.NamedQuery(`SELECT * FROM person WHERE first_name=:fn`, map[string]interface{}{"fn": "Bin"})
-
- // Named queries can also use structs. Their bind names follow the same rules
- // as the name -> db mapping, so struct fields are lowercased and the `db` tag
- // is taken into consideration.
- rows, err = db.NamedQuery(`SELECT * FROM person WHERE first_name=:first_name`, jason)
-}
-```
-
diff --git a/vendor/github.com/jmoiron/sqlx/bind.go b/vendor/github.com/jmoiron/sqlx/bind.go
deleted file mode 100644
index b81e6fc..0000000
--- a/vendor/github.com/jmoiron/sqlx/bind.go
+++ /dev/null
@@ -1,207 +0,0 @@
-package sqlx
-
-import (
- "bytes"
- "errors"
- "reflect"
- "strconv"
- "strings"
-
- "github.com/jmoiron/sqlx/reflectx"
-)
-
-// Bindvar types supported by Rebind, BindMap and BindStruct.
-const (
- UNKNOWN = iota
- QUESTION
- DOLLAR
- NAMED
-)
-
-// BindType returns the bindtype for a given database given a drivername.
-func BindType(driverName string) int {
- switch driverName {
- case "postgres", "pgx", "pq-timeouts":
- return DOLLAR
- case "mysql":
- return QUESTION
- case "sqlite3":
- return QUESTION
- case "oci8", "ora", "goracle":
- return NAMED
- }
- return UNKNOWN
-}
-
-// FIXME: this should be able to be tolerant of escaped ?'s in queries without
-// losing much speed, and should be to avoid confusion.
-
-// Rebind a query from the default bindtype (QUESTION) to the target bindtype.
-func Rebind(bindType int, query string) string {
- switch bindType {
- case QUESTION, UNKNOWN:
- return query
- }
-
- // Add space enough for 10 params before we have to allocate
- rqb := make([]byte, 0, len(query)+10)
-
- var i, j int
-
- for i = strings.Index(query, "?"); i != -1; i = strings.Index(query, "?") {
- rqb = append(rqb, query[:i]...)
-
- switch bindType {
- case DOLLAR:
- rqb = append(rqb, '$')
- case NAMED:
- rqb = append(rqb, ':', 'a', 'r', 'g')
- }
-
- j++
- rqb = strconv.AppendInt(rqb, int64(j), 10)
-
- query = query[i+1:]
- }
-
- return string(append(rqb, query...))
-}
-
-// Experimental implementation of Rebind which uses a bytes.Buffer. The code is
-// much simpler and should be more resistant to odd unicode, but it is twice as
-// slow. Kept here for benchmarking purposes and to possibly replace Rebind if
-// problems arise with its somewhat naive handling of unicode.
-func rebindBuff(bindType int, query string) string {
- if bindType != DOLLAR {
- return query
- }
-
- b := make([]byte, 0, len(query))
- rqb := bytes.NewBuffer(b)
- j := 1
- for _, r := range query {
- if r == '?' {
- rqb.WriteRune('$')
- rqb.WriteString(strconv.Itoa(j))
- j++
- } else {
- rqb.WriteRune(r)
- }
- }
-
- return rqb.String()
-}
-
-// In expands slice values in args, returning the modified query string
-// and a new arg list that can be executed by a database. The `query` should
-// use the `?` bindVar. The return value uses the `?` bindVar.
-func In(query string, args ...interface{}) (string, []interface{}, error) {
- // argMeta stores reflect.Value and length for slices and
- // the value itself for non-slice arguments
- type argMeta struct {
- v reflect.Value
- i interface{}
- length int
- }
-
- var flatArgsCount int
- var anySlices bool
-
- meta := make([]argMeta, len(args))
-
- for i, arg := range args {
- v := reflect.ValueOf(arg)
- t := reflectx.Deref(v.Type())
-
- if t.Kind() == reflect.Slice {
- meta[i].length = v.Len()
- meta[i].v = v
-
- anySlices = true
- flatArgsCount += meta[i].length
-
- if meta[i].length == 0 {
- return "", nil, errors.New("empty slice passed to 'in' query")
- }
- } else {
- meta[i].i = arg
- flatArgsCount++
- }
- }
-
- // don't do any parsing if there aren't any slices; note that this means
- // some errors that we might have caught below will not be returned.
- if !anySlices {
- return query, args, nil
- }
-
- newArgs := make([]interface{}, 0, flatArgsCount)
- buf := bytes.NewBuffer(make([]byte, 0, len(query)+len(", ?")*flatArgsCount))
-
- var arg, offset int
-
- for i := strings.IndexByte(query[offset:], '?'); i != -1; i = strings.IndexByte(query[offset:], '?') {
- if arg >= len(meta) {
- // if an argument wasn't passed, lets return an error; this is
- // not actually how database/sql Exec/Query works, but since we are
- // creating an argument list programmatically, we want to be able
- // to catch these programmer errors earlier.
- return "", nil, errors.New("number of bindVars exceeds arguments")
- }
-
- argMeta := meta[arg]
- arg++
-
- // not a slice, continue.
- // our questionmark will either be written before the next expansion
- // of a slice or after the loop when writing the rest of the query
- if argMeta.length == 0 {
- offset = offset + i + 1
- newArgs = append(newArgs, argMeta.i)
- continue
- }
-
- // write everything up to and including our ? character
- buf.WriteString(query[:offset+i+1])
-
- for si := 1; si < argMeta.length; si++ {
- buf.WriteString(", ?")
- }
-
- newArgs = appendReflectSlice(newArgs, argMeta.v, argMeta.length)
-
- // slice the query and reset the offset. this avoids some bookkeeping for
- // the write after the loop
- query = query[offset+i+1:]
- offset = 0
- }
-
- buf.WriteString(query)
-
- if arg < len(meta) {
- return "", nil, errors.New("number of bindVars less than number arguments")
- }
-
- return buf.String(), newArgs, nil
-}
-
-func appendReflectSlice(args []interface{}, v reflect.Value, vlen int) []interface{} {
- switch val := v.Interface().(type) {
- case []interface{}:
- args = append(args, val...)
- case []int:
- for i := range val {
- args = append(args, val[i])
- }
- case []string:
- for i := range val {
- args = append(args, val[i])
- }
- default:
- for si := 0; si < vlen; si++ {
- args = append(args, v.Index(si).Interface())
- }
- }
-
- return args
-}
diff --git a/vendor/github.com/jmoiron/sqlx/doc.go b/vendor/github.com/jmoiron/sqlx/doc.go
deleted file mode 100644
index e2b4e60..0000000
--- a/vendor/github.com/jmoiron/sqlx/doc.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Package sqlx provides general purpose extensions to database/sql.
-//
-// It is intended to seamlessly wrap database/sql and provide convenience
-// methods which are useful in the development of database driven applications.
-// None of the underlying database/sql methods are changed. Instead all extended
-// behavior is implemented through new methods defined on wrapper types.
-//
-// Additions include scanning into structs, named query support, rebinding
-// queries for different drivers, convenient shorthands for common error handling
-// and more.
-//
-package sqlx
diff --git a/vendor/github.com/jmoiron/sqlx/named.go b/vendor/github.com/jmoiron/sqlx/named.go
deleted file mode 100644
index 69eb954..0000000
--- a/vendor/github.com/jmoiron/sqlx/named.go
+++ /dev/null
@@ -1,346 +0,0 @@
-package sqlx
-
-// Named Query Support
-//
-// * BindMap - bind query bindvars to map/struct args
-// * NamedExec, NamedQuery - named query w/ struct or map
-// * NamedStmt - a pre-compiled named query which is a prepared statement
-//
-// Internal Interfaces:
-//
-// * compileNamedQuery - rebind a named query, returning a query and list of names
-// * bindArgs, bindMapArgs, bindAnyArgs - given a list of names, return an arglist
-//
-import (
- "database/sql"
- "errors"
- "fmt"
- "reflect"
- "strconv"
- "unicode"
-
- "github.com/jmoiron/sqlx/reflectx"
-)
-
-// NamedStmt is a prepared statement that executes named queries. Prepare it
-// how you would execute a NamedQuery, but pass in a struct or map when executing.
-type NamedStmt struct {
- Params []string
- QueryString string
- Stmt *Stmt
-}
-
-// Close closes the named statement.
-func (n *NamedStmt) Close() error {
- return n.Stmt.Close()
-}
-
-// Exec executes a named statement using the struct passed.
-// Any named placeholder parameters are replaced with fields from arg.
-func (n *NamedStmt) Exec(arg interface{}) (sql.Result, error) {
- args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
- if err != nil {
- return *new(sql.Result), err
- }
- return n.Stmt.Exec(args...)
-}
-
-// Query executes a named statement using the struct argument, returning rows.
-// Any named placeholder parameters are replaced with fields from arg.
-func (n *NamedStmt) Query(arg interface{}) (*sql.Rows, error) {
- args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
- if err != nil {
- return nil, err
- }
- return n.Stmt.Query(args...)
-}
-
-// QueryRow executes a named statement against the database. Because sqlx cannot
-// create a *sql.Row with an error condition pre-set for binding errors, sqlx
-// returns a *sqlx.Row instead.
-// Any named placeholder parameters are replaced with fields from arg.
-func (n *NamedStmt) QueryRow(arg interface{}) *Row {
- args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
- if err != nil {
- return &Row{err: err}
- }
- return n.Stmt.QueryRowx(args...)
-}
-
-// MustExec execs a NamedStmt, panicing on error
-// Any named placeholder parameters are replaced with fields from arg.
-func (n *NamedStmt) MustExec(arg interface{}) sql.Result {
- res, err := n.Exec(arg)
- if err != nil {
- panic(err)
- }
- return res
-}
-
-// Queryx using this NamedStmt
-// Any named placeholder parameters are replaced with fields from arg.
-func (n *NamedStmt) Queryx(arg interface{}) (*Rows, error) {
- r, err := n.Query(arg)
- if err != nil {
- return nil, err
- }
- return &Rows{Rows: r, Mapper: n.Stmt.Mapper, unsafe: isUnsafe(n)}, err
-}
-
-// QueryRowx this NamedStmt. Because of limitations with QueryRow, this is
-// an alias for QueryRow.
-// Any named placeholder parameters are replaced with fields from arg.
-func (n *NamedStmt) QueryRowx(arg interface{}) *Row {
- return n.QueryRow(arg)
-}
-
-// Select using this NamedStmt
-// Any named placeholder parameters are replaced with fields from arg.
-func (n *NamedStmt) Select(dest interface{}, arg interface{}) error {
- rows, err := n.Queryx(arg)
- if err != nil {
- return err
- }
- // if something happens here, we want to make sure the rows are Closed
- defer rows.Close()
- return scanAll(rows, dest, false)
-}
-
-// Get using this NamedStmt
-// Any named placeholder parameters are replaced with fields from arg.
-func (n *NamedStmt) Get(dest interface{}, arg interface{}) error {
- r := n.QueryRowx(arg)
- return r.scanAny(dest, false)
-}
-
-// Unsafe creates an unsafe version of the NamedStmt
-func (n *NamedStmt) Unsafe() *NamedStmt {
- r := &NamedStmt{Params: n.Params, Stmt: n.Stmt, QueryString: n.QueryString}
- r.Stmt.unsafe = true
- return r
-}
-
-// A union interface of preparer and binder, required to be able to prepare
-// named statements (as the bindtype must be determined).
-type namedPreparer interface {
- Preparer
- binder
-}
-
-func prepareNamed(p namedPreparer, query string) (*NamedStmt, error) {
- bindType := BindType(p.DriverName())
- q, args, err := compileNamedQuery([]byte(query), bindType)
- if err != nil {
- return nil, err
- }
- stmt, err := Preparex(p, q)
- if err != nil {
- return nil, err
- }
- return &NamedStmt{
- QueryString: q,
- Params: args,
- Stmt: stmt,
- }, nil
-}
-
-func bindAnyArgs(names []string, arg interface{}, m *reflectx.Mapper) ([]interface{}, error) {
- if maparg, ok := arg.(map[string]interface{}); ok {
- return bindMapArgs(names, maparg)
- }
- return bindArgs(names, arg, m)
-}
-
-// private interface to generate a list of interfaces from a given struct
-// type, given a list of names to pull out of the struct. Used by public
-// BindStruct interface.
-func bindArgs(names []string, arg interface{}, m *reflectx.Mapper) ([]interface{}, error) {
- arglist := make([]interface{}, 0, len(names))
-
- // grab the indirected value of arg
- v := reflect.ValueOf(arg)
- for v = reflect.ValueOf(arg); v.Kind() == reflect.Ptr; {
- v = v.Elem()
- }
-
- err := m.TraversalsByNameFunc(v.Type(), names, func(i int, t []int) error {
- if len(t) == 0 {
- return fmt.Errorf("could not find name %s in %#v", names[i], arg)
- }
-
- val := reflectx.FieldByIndexesReadOnly(v, t)
- arglist = append(arglist, val.Interface())
-
- return nil
- })
-
- return arglist, err
-}
-
-// like bindArgs, but for maps.
-func bindMapArgs(names []string, arg map[string]interface{}) ([]interface{}, error) {
- arglist := make([]interface{}, 0, len(names))
-
- for _, name := range names {
- val, ok := arg[name]
- if !ok {
- return arglist, fmt.Errorf("could not find name %s in %#v", name, arg)
- }
- arglist = append(arglist, val)
- }
- return arglist, nil
-}
-
-// bindStruct binds a named parameter query with fields from a struct argument.
-// The rules for binding field names to parameter names follow the same
-// conventions as for StructScan, including obeying the `db` struct tags.
-func bindStruct(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) {
- bound, names, err := compileNamedQuery([]byte(query), bindType)
- if err != nil {
- return "", []interface{}{}, err
- }
-
- arglist, err := bindArgs(names, arg, m)
- if err != nil {
- return "", []interface{}{}, err
- }
-
- return bound, arglist, nil
-}
-
-// bindMap binds a named parameter query with a map of arguments.
-func bindMap(bindType int, query string, args map[string]interface{}) (string, []interface{}, error) {
- bound, names, err := compileNamedQuery([]byte(query), bindType)
- if err != nil {
- return "", []interface{}{}, err
- }
-
- arglist, err := bindMapArgs(names, args)
- return bound, arglist, err
-}
-
-// -- Compilation of Named Queries
-
-// Allow digits and letters in bind params; additionally runes are
-// checked against underscores, meaning that bind params can have be
-// alphanumeric with underscores. Mind the difference between unicode
-// digits and numbers, where '5' is a digit but '五' is not.
-var allowedBindRunes = []*unicode.RangeTable{unicode.Letter, unicode.Digit}
-
-// FIXME: this function isn't safe for unicode named params, as a failing test
-// can testify. This is not a regression but a failure of the original code
-// as well. It should be modified to range over runes in a string rather than
-// bytes, even though this is less convenient and slower. Hopefully the
-// addition of the prepared NamedStmt (which will only do this once) will make
-// up for the slightly slower ad-hoc NamedExec/NamedQuery.
-
-// compile a NamedQuery into an unbound query (using the '?' bindvar) and
-// a list of names.
-func compileNamedQuery(qs []byte, bindType int) (query string, names []string, err error) {
- names = make([]string, 0, 10)
- rebound := make([]byte, 0, len(qs))
-
- inName := false
- last := len(qs) - 1
- currentVar := 1
- name := make([]byte, 0, 10)
-
- for i, b := range qs {
- // a ':' while we're in a name is an error
- if b == ':' {
- // if this is the second ':' in a '::' escape sequence, append a ':'
- if inName && i > 0 && qs[i-1] == ':' {
- rebound = append(rebound, ':')
- inName = false
- continue
- } else if inName {
- err = errors.New("unexpected `:` while reading named param at " + strconv.Itoa(i))
- return query, names, err
- }
- inName = true
- name = []byte{}
- // if we're in a name, and this is an allowed character, continue
- } else if inName && (unicode.IsOneOf(allowedBindRunes, rune(b)) || b == '_' || b == '.') && i != last {
- // append the byte to the name if we are in a name and not on the last byte
- name = append(name, b)
- // if we're in a name and it's not an allowed character, the name is done
- } else if inName {
- inName = false
- // if this is the final byte of the string and it is part of the name, then
- // make sure to add it to the name
- if i == last && unicode.IsOneOf(allowedBindRunes, rune(b)) {
- name = append(name, b)
- }
- // add the string representation to the names list
- names = append(names, string(name))
- // add a proper bindvar for the bindType
- switch bindType {
- // oracle only supports named type bind vars even for positional
- case NAMED:
- rebound = append(rebound, ':')
- rebound = append(rebound, name...)
- case QUESTION, UNKNOWN:
- rebound = append(rebound, '?')
- case DOLLAR:
- rebound = append(rebound, '$')
- for _, b := range strconv.Itoa(currentVar) {
- rebound = append(rebound, byte(b))
- }
- currentVar++
- }
- // add this byte to string unless it was not part of the name
- if i != last {
- rebound = append(rebound, b)
- } else if !unicode.IsOneOf(allowedBindRunes, rune(b)) {
- rebound = append(rebound, b)
- }
- } else {
- // this is a normal byte and should just go onto the rebound query
- rebound = append(rebound, b)
- }
- }
-
- return string(rebound), names, err
-}
-
-// BindNamed binds a struct or a map to a query with named parameters.
-// DEPRECATED: use sqlx.Named` instead of this, it may be removed in future.
-func BindNamed(bindType int, query string, arg interface{}) (string, []interface{}, error) {
- return bindNamedMapper(bindType, query, arg, mapper())
-}
-
-// Named takes a query using named parameters and an argument and
-// returns a new query with a list of args that can be executed by
-// a database. The return value uses the `?` bindvar.
-func Named(query string, arg interface{}) (string, []interface{}, error) {
- return bindNamedMapper(QUESTION, query, arg, mapper())
-}
-
-func bindNamedMapper(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) {
- if maparg, ok := arg.(map[string]interface{}); ok {
- return bindMap(bindType, query, maparg)
- }
- return bindStruct(bindType, query, arg, m)
-}
-
-// NamedQuery binds a named query and then runs Query on the result using the
-// provided Ext (sqlx.Tx, sqlx.Db). It works with both structs and with
-// map[string]interface{} types.
-func NamedQuery(e Ext, query string, arg interface{}) (*Rows, error) {
- q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e))
- if err != nil {
- return nil, err
- }
- return e.Queryx(q, args...)
-}
-
-// NamedExec uses BindStruct to get a query executable by the driver and
-// then runs Exec on the result. Returns an error from the binding
-// or the query excution itself.
-func NamedExec(e Ext, query string, arg interface{}) (sql.Result, error) {
- q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e))
- if err != nil {
- return nil, err
- }
- return e.Exec(q, args...)
-}
diff --git a/vendor/github.com/jmoiron/sqlx/named_context.go b/vendor/github.com/jmoiron/sqlx/named_context.go
deleted file mode 100644
index 9405007..0000000
--- a/vendor/github.com/jmoiron/sqlx/named_context.go
+++ /dev/null
@@ -1,132 +0,0 @@
-// +build go1.8
-
-package sqlx
-
-import (
- "context"
- "database/sql"
-)
-
-// A union interface of contextPreparer and binder, required to be able to
-// prepare named statements with context (as the bindtype must be determined).
-type namedPreparerContext interface {
- PreparerContext
- binder
-}
-
-func prepareNamedContext(ctx context.Context, p namedPreparerContext, query string) (*NamedStmt, error) {
- bindType := BindType(p.DriverName())
- q, args, err := compileNamedQuery([]byte(query), bindType)
- if err != nil {
- return nil, err
- }
- stmt, err := PreparexContext(ctx, p, q)
- if err != nil {
- return nil, err
- }
- return &NamedStmt{
- QueryString: q,
- Params: args,
- Stmt: stmt,
- }, nil
-}
-
-// ExecContext executes a named statement using the struct passed.
-// Any named placeholder parameters are replaced with fields from arg.
-func (n *NamedStmt) ExecContext(ctx context.Context, arg interface{}) (sql.Result, error) {
- args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
- if err != nil {
- return *new(sql.Result), err
- }
- return n.Stmt.ExecContext(ctx, args...)
-}
-
-// QueryContext executes a named statement using the struct argument, returning rows.
-// Any named placeholder parameters are replaced with fields from arg.
-func (n *NamedStmt) QueryContext(ctx context.Context, arg interface{}) (*sql.Rows, error) {
- args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
- if err != nil {
- return nil, err
- }
- return n.Stmt.QueryContext(ctx, args...)
-}
-
-// QueryRowContext executes a named statement against the database. Because sqlx cannot
-// create a *sql.Row with an error condition pre-set for binding errors, sqlx
-// returns a *sqlx.Row instead.
-// Any named placeholder parameters are replaced with fields from arg.
-func (n *NamedStmt) QueryRowContext(ctx context.Context, arg interface{}) *Row {
- args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
- if err != nil {
- return &Row{err: err}
- }
- return n.Stmt.QueryRowxContext(ctx, args...)
-}
-
-// MustExecContext execs a NamedStmt, panicing on error
-// Any named placeholder parameters are replaced with fields from arg.
-func (n *NamedStmt) MustExecContext(ctx context.Context, arg interface{}) sql.Result {
- res, err := n.ExecContext(ctx, arg)
- if err != nil {
- panic(err)
- }
- return res
-}
-
-// QueryxContext using this NamedStmt
-// Any named placeholder parameters are replaced with fields from arg.
-func (n *NamedStmt) QueryxContext(ctx context.Context, arg interface{}) (*Rows, error) {
- r, err := n.QueryContext(ctx, arg)
- if err != nil {
- return nil, err
- }
- return &Rows{Rows: r, Mapper: n.Stmt.Mapper, unsafe: isUnsafe(n)}, err
-}
-
-// QueryRowxContext this NamedStmt. Because of limitations with QueryRow, this is
-// an alias for QueryRow.
-// Any named placeholder parameters are replaced with fields from arg.
-func (n *NamedStmt) QueryRowxContext(ctx context.Context, arg interface{}) *Row {
- return n.QueryRowContext(ctx, arg)
-}
-
-// SelectContext using this NamedStmt
-// Any named placeholder parameters are replaced with fields from arg.
-func (n *NamedStmt) SelectContext(ctx context.Context, dest interface{}, arg interface{}) error {
- rows, err := n.QueryxContext(ctx, arg)
- if err != nil {
- return err
- }
- // if something happens here, we want to make sure the rows are Closed
- defer rows.Close()
- return scanAll(rows, dest, false)
-}
-
-// GetContext using this NamedStmt
-// Any named placeholder parameters are replaced with fields from arg.
-func (n *NamedStmt) GetContext(ctx context.Context, dest interface{}, arg interface{}) error {
- r := n.QueryRowxContext(ctx, arg)
- return r.scanAny(dest, false)
-}
-
-// NamedQueryContext binds a named query and then runs Query on the result using the
-// provided Ext (sqlx.Tx, sqlx.Db). It works with both structs and with
-// map[string]interface{} types.
-func NamedQueryContext(ctx context.Context, e ExtContext, query string, arg interface{}) (*Rows, error) {
- q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e))
- if err != nil {
- return nil, err
- }
- return e.QueryxContext(ctx, q, args...)
-}
-
-// NamedExecContext uses BindStruct to get a query executable by the driver and
-// then runs Exec on the result. Returns an error from the binding
-// or the query excution itself.
-func NamedExecContext(ctx context.Context, e ExtContext, query string, arg interface{}) (sql.Result, error) {
- q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e))
- if err != nil {
- return nil, err
- }
- return e.ExecContext(ctx, q, args...)
-}
diff --git a/vendor/github.com/jmoiron/sqlx/reflectx/README.md b/vendor/github.com/jmoiron/sqlx/reflectx/README.md
deleted file mode 100644
index f01d3d1..0000000
--- a/vendor/github.com/jmoiron/sqlx/reflectx/README.md
+++ /dev/null
@@ -1,17 +0,0 @@
-# reflectx
-
-The sqlx package has special reflect needs. In particular, it needs to:
-
-* be able to map a name to a field
-* understand embedded structs
-* understand mapping names to fields by a particular tag
-* user specified name -> field mapping functions
-
-These behaviors mimic the behaviors by the standard library marshallers and also the
-behavior of standard Go accessors.
-
-The first two are amply taken care of by `Reflect.Value.FieldByName`, and the third is
-addressed by `Reflect.Value.FieldByNameFunc`, but these don't quite understand struct
-tags in the ways that are vital to most marshallers, and they are slow.
-
-This reflectx package extends reflect to achieve these goals.
diff --git a/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go b/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go
deleted file mode 100644
index 73c21eb..0000000
--- a/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go
+++ /dev/null
@@ -1,441 +0,0 @@
-// Package reflectx implements extensions to the standard reflect lib suitable
-// for implementing marshalling and unmarshalling packages. The main Mapper type
-// allows for Go-compatible named attribute access, including accessing embedded
-// struct attributes and the ability to use functions and struct tags to
-// customize field names.
-//
-package reflectx
-
-import (
- "reflect"
- "runtime"
- "strings"
- "sync"
-)
-
-// A FieldInfo is metadata for a struct field.
-type FieldInfo struct {
- Index []int
- Path string
- Field reflect.StructField
- Zero reflect.Value
- Name string
- Options map[string]string
- Embedded bool
- Children []*FieldInfo
- Parent *FieldInfo
-}
-
-// A StructMap is an index of field metadata for a struct.
-type StructMap struct {
- Tree *FieldInfo
- Index []*FieldInfo
- Paths map[string]*FieldInfo
- Names map[string]*FieldInfo
-}
-
-// GetByPath returns a *FieldInfo for a given string path.
-func (f StructMap) GetByPath(path string) *FieldInfo {
- return f.Paths[path]
-}
-
-// GetByTraversal returns a *FieldInfo for a given integer path. It is
-// analogous to reflect.FieldByIndex, but using the cached traversal
-// rather than re-executing the reflect machinery each time.
-func (f StructMap) GetByTraversal(index []int) *FieldInfo {
- if len(index) == 0 {
- return nil
- }
-
- tree := f.Tree
- for _, i := range index {
- if i >= len(tree.Children) || tree.Children[i] == nil {
- return nil
- }
- tree = tree.Children[i]
- }
- return tree
-}
-
-// Mapper is a general purpose mapper of names to struct fields. A Mapper
-// behaves like most marshallers in the standard library, obeying a field tag
-// for name mapping but also providing a basic transform function.
-type Mapper struct {
- cache map[reflect.Type]*StructMap
- tagName string
- tagMapFunc func(string) string
- mapFunc func(string) string
- mutex sync.Mutex
-}
-
-// NewMapper returns a new mapper using the tagName as its struct field tag.
-// If tagName is the empty string, it is ignored.
-func NewMapper(tagName string) *Mapper {
- return &Mapper{
- cache: make(map[reflect.Type]*StructMap),
- tagName: tagName,
- }
-}
-
-// NewMapperTagFunc returns a new mapper which contains a mapper for field names
-// AND a mapper for tag values. This is useful for tags like json which can
-// have values like "name,omitempty".
-func NewMapperTagFunc(tagName string, mapFunc, tagMapFunc func(string) string) *Mapper {
- return &Mapper{
- cache: make(map[reflect.Type]*StructMap),
- tagName: tagName,
- mapFunc: mapFunc,
- tagMapFunc: tagMapFunc,
- }
-}
-
-// NewMapperFunc returns a new mapper which optionally obeys a field tag and
-// a struct field name mapper func given by f. Tags will take precedence, but
-// for any other field, the mapped name will be f(field.Name)
-func NewMapperFunc(tagName string, f func(string) string) *Mapper {
- return &Mapper{
- cache: make(map[reflect.Type]*StructMap),
- tagName: tagName,
- mapFunc: f,
- }
-}
-
-// TypeMap returns a mapping of field strings to int slices representing
-// the traversal down the struct to reach the field.
-func (m *Mapper) TypeMap(t reflect.Type) *StructMap {
- m.mutex.Lock()
- mapping, ok := m.cache[t]
- if !ok {
- mapping = getMapping(t, m.tagName, m.mapFunc, m.tagMapFunc)
- m.cache[t] = mapping
- }
- m.mutex.Unlock()
- return mapping
-}
-
-// FieldMap returns the mapper's mapping of field names to reflect values. Panics
-// if v's Kind is not Struct, or v is not Indirectable to a struct kind.
-func (m *Mapper) FieldMap(v reflect.Value) map[string]reflect.Value {
- v = reflect.Indirect(v)
- mustBe(v, reflect.Struct)
-
- r := map[string]reflect.Value{}
- tm := m.TypeMap(v.Type())
- for tagName, fi := range tm.Names {
- r[tagName] = FieldByIndexes(v, fi.Index)
- }
- return r
-}
-
-// FieldByName returns a field by its mapped name as a reflect.Value.
-// Panics if v's Kind is not Struct or v is not Indirectable to a struct Kind.
-// Returns zero Value if the name is not found.
-func (m *Mapper) FieldByName(v reflect.Value, name string) reflect.Value {
- v = reflect.Indirect(v)
- mustBe(v, reflect.Struct)
-
- tm := m.TypeMap(v.Type())
- fi, ok := tm.Names[name]
- if !ok {
- return v
- }
- return FieldByIndexes(v, fi.Index)
-}
-
-// FieldsByName returns a slice of values corresponding to the slice of names
-// for the value. Panics if v's Kind is not Struct or v is not Indirectable
-// to a struct Kind. Returns zero Value for each name not found.
-func (m *Mapper) FieldsByName(v reflect.Value, names []string) []reflect.Value {
- v = reflect.Indirect(v)
- mustBe(v, reflect.Struct)
-
- tm := m.TypeMap(v.Type())
- vals := make([]reflect.Value, 0, len(names))
- for _, name := range names {
- fi, ok := tm.Names[name]
- if !ok {
- vals = append(vals, *new(reflect.Value))
- } else {
- vals = append(vals, FieldByIndexes(v, fi.Index))
- }
- }
- return vals
-}
-
-// TraversalsByName returns a slice of int slices which represent the struct
-// traversals for each mapped name. Panics if t is not a struct or Indirectable
-// to a struct. Returns empty int slice for each name not found.
-func (m *Mapper) TraversalsByName(t reflect.Type, names []string) [][]int {
- r := make([][]int, 0, len(names))
- m.TraversalsByNameFunc(t, names, func(_ int, i []int) error {
- if i == nil {
- r = append(r, []int{})
- } else {
- r = append(r, i)
- }
-
- return nil
- })
- return r
-}
-
-// TraversalsByNameFunc traverses the mapped names and calls fn with the index of
-// each name and the struct traversal represented by that name. Panics if t is not
-// a struct or Indirectable to a struct. Returns the first error returned by fn or nil.
-func (m *Mapper) TraversalsByNameFunc(t reflect.Type, names []string, fn func(int, []int) error) error {
- t = Deref(t)
- mustBe(t, reflect.Struct)
- tm := m.TypeMap(t)
- for i, name := range names {
- fi, ok := tm.Names[name]
- if !ok {
- if err := fn(i, nil); err != nil {
- return err
- }
- } else {
- if err := fn(i, fi.Index); err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-// FieldByIndexes returns a value for the field given by the struct traversal
-// for the given value.
-func FieldByIndexes(v reflect.Value, indexes []int) reflect.Value {
- for _, i := range indexes {
- v = reflect.Indirect(v).Field(i)
- // if this is a pointer and it's nil, allocate a new value and set it
- if v.Kind() == reflect.Ptr && v.IsNil() {
- alloc := reflect.New(Deref(v.Type()))
- v.Set(alloc)
- }
- if v.Kind() == reflect.Map && v.IsNil() {
- v.Set(reflect.MakeMap(v.Type()))
- }
- }
- return v
-}
-
-// FieldByIndexesReadOnly returns a value for a particular struct traversal,
-// but is not concerned with allocating nil pointers because the value is
-// going to be used for reading and not setting.
-func FieldByIndexesReadOnly(v reflect.Value, indexes []int) reflect.Value {
- for _, i := range indexes {
- v = reflect.Indirect(v).Field(i)
- }
- return v
-}
-
-// Deref is Indirect for reflect.Types
-func Deref(t reflect.Type) reflect.Type {
- if t.Kind() == reflect.Ptr {
- t = t.Elem()
- }
- return t
-}
-
-// -- helpers & utilities --
-
-type kinder interface {
- Kind() reflect.Kind
-}
-
-// mustBe checks a value against a kind, panicing with a reflect.ValueError
-// if the kind isn't that which is required.
-func mustBe(v kinder, expected reflect.Kind) {
- if k := v.Kind(); k != expected {
- panic(&reflect.ValueError{Method: methodName(), Kind: k})
- }
-}
-
-// methodName returns the caller of the function calling methodName
-func methodName() string {
- pc, _, _, _ := runtime.Caller(2)
- f := runtime.FuncForPC(pc)
- if f == nil {
- return "unknown method"
- }
- return f.Name()
-}
-
-type typeQueue struct {
- t reflect.Type
- fi *FieldInfo
- pp string // Parent path
-}
-
-// A copying append that creates a new slice each time.
-func apnd(is []int, i int) []int {
- x := make([]int, len(is)+1)
- for p, n := range is {
- x[p] = n
- }
- x[len(x)-1] = i
- return x
-}
-
-type mapf func(string) string
-
-// parseName parses the tag and the target name for the given field using
-// the tagName (eg 'json' for `json:"foo"` tags), mapFunc for mapping the
-// field's name to a target name, and tagMapFunc for mapping the tag to
-// a target name.
-func parseName(field reflect.StructField, tagName string, mapFunc, tagMapFunc mapf) (tag, fieldName string) {
- // first, set the fieldName to the field's name
- fieldName = field.Name
- // if a mapFunc is set, use that to override the fieldName
- if mapFunc != nil {
- fieldName = mapFunc(fieldName)
- }
-
- // if there's no tag to look for, return the field name
- if tagName == "" {
- return "", fieldName
- }
-
- // if this tag is not set using the normal convention in the tag,
- // then return the fieldname.. this check is done because according
- // to the reflect documentation:
- // If the tag does not have the conventional format,
- // the value returned by Get is unspecified.
- // which doesn't sound great.
- if !strings.Contains(string(field.Tag), tagName+":") {
- return "", fieldName
- }
-
- // at this point we're fairly sure that we have a tag, so lets pull it out
- tag = field.Tag.Get(tagName)
-
- // if we have a mapper function, call it on the whole tag
- // XXX: this is a change from the old version, which pulled out the name
- // before the tagMapFunc could be run, but I think this is the right way
- if tagMapFunc != nil {
- tag = tagMapFunc(tag)
- }
-
- // finally, split the options from the name
- parts := strings.Split(tag, ",")
- fieldName = parts[0]
-
- return tag, fieldName
-}
-
-// parseOptions parses options out of a tag string, skipping the name
-func parseOptions(tag string) map[string]string {
- parts := strings.Split(tag, ",")
- options := make(map[string]string, len(parts))
- if len(parts) > 1 {
- for _, opt := range parts[1:] {
- // short circuit potentially expensive split op
- if strings.Contains(opt, "=") {
- kv := strings.Split(opt, "=")
- options[kv[0]] = kv[1]
- continue
- }
- options[opt] = ""
- }
- }
- return options
-}
-
-// getMapping returns a mapping for the t type, using the tagName, mapFunc and
-// tagMapFunc to determine the canonical names of fields.
-func getMapping(t reflect.Type, tagName string, mapFunc, tagMapFunc mapf) *StructMap {
- m := []*FieldInfo{}
-
- root := &FieldInfo{}
- queue := []typeQueue{}
- queue = append(queue, typeQueue{Deref(t), root, ""})
-
-QueueLoop:
- for len(queue) != 0 {
- // pop the first item off of the queue
- tq := queue[0]
- queue = queue[1:]
-
- // ignore recursive field
- for p := tq.fi.Parent; p != nil; p = p.Parent {
- if tq.fi.Field.Type == p.Field.Type {
- continue QueueLoop
- }
- }
-
- nChildren := 0
- if tq.t.Kind() == reflect.Struct {
- nChildren = tq.t.NumField()
- }
- tq.fi.Children = make([]*FieldInfo, nChildren)
-
- // iterate through all of its fields
- for fieldPos := 0; fieldPos < nChildren; fieldPos++ {
-
- f := tq.t.Field(fieldPos)
-
- // parse the tag and the target name using the mapping options for this field
- tag, name := parseName(f, tagName, mapFunc, tagMapFunc)
-
- // if the name is "-", disabled via a tag, skip it
- if name == "-" {
- continue
- }
-
- fi := FieldInfo{
- Field: f,
- Name: name,
- Zero: reflect.New(f.Type).Elem(),
- Options: parseOptions(tag),
- }
-
- // if the path is empty this path is just the name
- if tq.pp == "" {
- fi.Path = fi.Name
- } else {
- fi.Path = tq.pp + "." + fi.Name
- }
-
- // skip unexported fields
- if len(f.PkgPath) != 0 && !f.Anonymous {
- continue
- }
-
- // bfs search of anonymous embedded structs
- if f.Anonymous {
- pp := tq.pp
- if tag != "" {
- pp = fi.Path
- }
-
- fi.Embedded = true
- fi.Index = apnd(tq.fi.Index, fieldPos)
- nChildren := 0
- ft := Deref(f.Type)
- if ft.Kind() == reflect.Struct {
- nChildren = ft.NumField()
- }
- fi.Children = make([]*FieldInfo, nChildren)
- queue = append(queue, typeQueue{Deref(f.Type), &fi, pp})
- } else if fi.Zero.Kind() == reflect.Struct || (fi.Zero.Kind() == reflect.Ptr && fi.Zero.Type().Elem().Kind() == reflect.Struct) {
- fi.Index = apnd(tq.fi.Index, fieldPos)
- fi.Children = make([]*FieldInfo, Deref(f.Type).NumField())
- queue = append(queue, typeQueue{Deref(f.Type), &fi, fi.Path})
- }
-
- fi.Index = apnd(tq.fi.Index, fieldPos)
- fi.Parent = tq.fi
- tq.fi.Children[fieldPos] = &fi
- m = append(m, &fi)
- }
- }
-
- flds := &StructMap{Index: m, Tree: root, Paths: map[string]*FieldInfo{}, Names: map[string]*FieldInfo{}}
- for _, fi := range flds.Index {
- flds.Paths[fi.Path] = fi
- if fi.Name != "" && !fi.Embedded {
- flds.Names[fi.Path] = fi
- }
- }
-
- return flds
-}
diff --git a/vendor/github.com/jmoiron/sqlx/sqlx.go b/vendor/github.com/jmoiron/sqlx/sqlx.go
deleted file mode 100644
index e95f23f..0000000
--- a/vendor/github.com/jmoiron/sqlx/sqlx.go
+++ /dev/null
@@ -1,1039 +0,0 @@
-package sqlx
-
-import (
- "database/sql"
- "database/sql/driver"
- "errors"
- "fmt"
-
- "io/ioutil"
- "path/filepath"
- "reflect"
- "strings"
- "sync"
-
- "github.com/jmoiron/sqlx/reflectx"
-)
-
-// Although the NameMapper is convenient, in practice it should not
-// be relied on except for application code. If you are writing a library
-// that uses sqlx, you should be aware that the name mappings you expect
-// can be overridden by your user's application.
-
-// NameMapper is used to map column names to struct field names. By default,
-// it uses strings.ToLower to lowercase struct field names. It can be set
-// to whatever you want, but it is encouraged to be set before sqlx is used
-// as name-to-field mappings are cached after first use on a type.
-var NameMapper = strings.ToLower
-var origMapper = reflect.ValueOf(NameMapper)
-
-// Rather than creating on init, this is created when necessary so that
-// importers have time to customize the NameMapper.
-var mpr *reflectx.Mapper
-
-// mprMu protects mpr.
-var mprMu sync.Mutex
-
-// mapper returns a valid mapper using the configured NameMapper func.
-func mapper() *reflectx.Mapper {
- mprMu.Lock()
- defer mprMu.Unlock()
-
- if mpr == nil {
- mpr = reflectx.NewMapperFunc("db", NameMapper)
- } else if origMapper != reflect.ValueOf(NameMapper) {
- // if NameMapper has changed, create a new mapper
- mpr = reflectx.NewMapperFunc("db", NameMapper)
- origMapper = reflect.ValueOf(NameMapper)
- }
- return mpr
-}
-
-// isScannable takes the reflect.Type and the actual dest value and returns
-// whether or not it's Scannable. Something is scannable if:
-// * it is not a struct
-// * it implements sql.Scanner
-// * it has no exported fields
-func isScannable(t reflect.Type) bool {
- if reflect.PtrTo(t).Implements(_scannerInterface) {
- return true
- }
- if t.Kind() != reflect.Struct {
- return true
- }
-
- // it's not important that we use the right mapper for this particular object,
- // we're only concerned on how many exported fields this struct has
- m := mapper()
- if len(m.TypeMap(t).Index) == 0 {
- return true
- }
- return false
-}
-
-// ColScanner is an interface used by MapScan and SliceScan
-type ColScanner interface {
- Columns() ([]string, error)
- Scan(dest ...interface{}) error
- Err() error
-}
-
-// Queryer is an interface used by Get and Select
-type Queryer interface {
- Query(query string, args ...interface{}) (*sql.Rows, error)
- Queryx(query string, args ...interface{}) (*Rows, error)
- QueryRowx(query string, args ...interface{}) *Row
-}
-
-// Execer is an interface used by MustExec and LoadFile
-type Execer interface {
- Exec(query string, args ...interface{}) (sql.Result, error)
-}
-
-// Binder is an interface for something which can bind queries (Tx, DB)
-type binder interface {
- DriverName() string
- Rebind(string) string
- BindNamed(string, interface{}) (string, []interface{}, error)
-}
-
-// Ext is a union interface which can bind, query, and exec, used by
-// NamedQuery and NamedExec.
-type Ext interface {
- binder
- Queryer
- Execer
-}
-
-// Preparer is an interface used by Preparex.
-type Preparer interface {
- Prepare(query string) (*sql.Stmt, error)
-}
-
-// determine if any of our extensions are unsafe
-func isUnsafe(i interface{}) bool {
- switch v := i.(type) {
- case Row:
- return v.unsafe
- case *Row:
- return v.unsafe
- case Rows:
- return v.unsafe
- case *Rows:
- return v.unsafe
- case NamedStmt:
- return v.Stmt.unsafe
- case *NamedStmt:
- return v.Stmt.unsafe
- case Stmt:
- return v.unsafe
- case *Stmt:
- return v.unsafe
- case qStmt:
- return v.unsafe
- case *qStmt:
- return v.unsafe
- case DB:
- return v.unsafe
- case *DB:
- return v.unsafe
- case Tx:
- return v.unsafe
- case *Tx:
- return v.unsafe
- case sql.Rows, *sql.Rows:
- return false
- default:
- return false
- }
-}
-
-func mapperFor(i interface{}) *reflectx.Mapper {
- switch i.(type) {
- case DB:
- return i.(DB).Mapper
- case *DB:
- return i.(*DB).Mapper
- case Tx:
- return i.(Tx).Mapper
- case *Tx:
- return i.(*Tx).Mapper
- default:
- return mapper()
- }
-}
-
-var _scannerInterface = reflect.TypeOf((*sql.Scanner)(nil)).Elem()
-var _valuerInterface = reflect.TypeOf((*driver.Valuer)(nil)).Elem()
-
-// Row is a reimplementation of sql.Row in order to gain access to the underlying
-// sql.Rows.Columns() data, necessary for StructScan.
-type Row struct {
- err error
- unsafe bool
- rows *sql.Rows
- Mapper *reflectx.Mapper
-}
-
-// Scan is a fixed implementation of sql.Row.Scan, which does not discard the
-// underlying error from the internal rows object if it exists.
-func (r *Row) Scan(dest ...interface{}) error {
- if r.err != nil {
- return r.err
- }
-
- // TODO(bradfitz): for now we need to defensively clone all
- // []byte that the driver returned (not permitting
- // *RawBytes in Rows.Scan), since we're about to close
- // the Rows in our defer, when we return from this function.
- // the contract with the driver.Next(...) interface is that it
- // can return slices into read-only temporary memory that's
- // only valid until the next Scan/Close. But the TODO is that
- // for a lot of drivers, this copy will be unnecessary. We
- // should provide an optional interface for drivers to
- // implement to say, "don't worry, the []bytes that I return
- // from Next will not be modified again." (for instance, if
- // they were obtained from the network anyway) But for now we
- // don't care.
- defer r.rows.Close()
- for _, dp := range dest {
- if _, ok := dp.(*sql.RawBytes); ok {
- return errors.New("sql: RawBytes isn't allowed on Row.Scan")
- }
- }
-
- if !r.rows.Next() {
- if err := r.rows.Err(); err != nil {
- return err
- }
- return sql.ErrNoRows
- }
- err := r.rows.Scan(dest...)
- if err != nil {
- return err
- }
- // Make sure the query can be processed to completion with no errors.
- if err := r.rows.Close(); err != nil {
- return err
- }
- return nil
-}
-
-// Columns returns the underlying sql.Rows.Columns(), or the deferred error usually
-// returned by Row.Scan()
-func (r *Row) Columns() ([]string, error) {
- if r.err != nil {
- return []string{}, r.err
- }
- return r.rows.Columns()
-}
-
-// Err returns the error encountered while scanning.
-func (r *Row) Err() error {
- return r.err
-}
-
-// DB is a wrapper around sql.DB which keeps track of the driverName upon Open,
-// used mostly to automatically bind named queries using the right bindvars.
-type DB struct {
- *sql.DB
- driverName string
- unsafe bool
- Mapper *reflectx.Mapper
-}
-
-// NewDb returns a new sqlx DB wrapper for a pre-existing *sql.DB. The
-// driverName of the original database is required for named query support.
-func NewDb(db *sql.DB, driverName string) *DB {
- return &DB{DB: db, driverName: driverName, Mapper: mapper()}
-}
-
-// DriverName returns the driverName passed to the Open function for this DB.
-func (db *DB) DriverName() string {
- return db.driverName
-}
-
-// Open is the same as sql.Open, but returns an *sqlx.DB instead.
-func Open(driverName, dataSourceName string) (*DB, error) {
- db, err := sql.Open(driverName, dataSourceName)
- if err != nil {
- return nil, err
- }
- return &DB{DB: db, driverName: driverName, Mapper: mapper()}, err
-}
-
-// MustOpen is the same as sql.Open, but returns an *sqlx.DB instead and panics on error.
-func MustOpen(driverName, dataSourceName string) *DB {
- db, err := Open(driverName, dataSourceName)
- if err != nil {
- panic(err)
- }
- return db
-}
-
-// MapperFunc sets a new mapper for this db using the default sqlx struct tag
-// and the provided mapper function.
-func (db *DB) MapperFunc(mf func(string) string) {
- db.Mapper = reflectx.NewMapperFunc("db", mf)
-}
-
-// Rebind transforms a query from QUESTION to the DB driver's bindvar type.
-func (db *DB) Rebind(query string) string {
- return Rebind(BindType(db.driverName), query)
-}
-
-// Unsafe returns a version of DB which will silently succeed to scan when
-// columns in the SQL result have no fields in the destination struct.
-// sqlx.Stmt and sqlx.Tx which are created from this DB will inherit its
-// safety behavior.
-func (db *DB) Unsafe() *DB {
- return &DB{DB: db.DB, driverName: db.driverName, unsafe: true, Mapper: db.Mapper}
-}
-
-// BindNamed binds a query using the DB driver's bindvar type.
-func (db *DB) BindNamed(query string, arg interface{}) (string, []interface{}, error) {
- return bindNamedMapper(BindType(db.driverName), query, arg, db.Mapper)
-}
-
-// NamedQuery using this DB.
-// Any named placeholder parameters are replaced with fields from arg.
-func (db *DB) NamedQuery(query string, arg interface{}) (*Rows, error) {
- return NamedQuery(db, query, arg)
-}
-
-// NamedExec using this DB.
-// Any named placeholder parameters are replaced with fields from arg.
-func (db *DB) NamedExec(query string, arg interface{}) (sql.Result, error) {
- return NamedExec(db, query, arg)
-}
-
-// Select using this DB.
-// Any placeholder parameters are replaced with supplied args.
-func (db *DB) Select(dest interface{}, query string, args ...interface{}) error {
- return Select(db, dest, query, args...)
-}
-
-// Get using this DB.
-// Any placeholder parameters are replaced with supplied args.
-// An error is returned if the result set is empty.
-func (db *DB) Get(dest interface{}, query string, args ...interface{}) error {
- return Get(db, dest, query, args...)
-}
-
-// MustBegin starts a transaction, and panics on error. Returns an *sqlx.Tx instead
-// of an *sql.Tx.
-func (db *DB) MustBegin() *Tx {
- tx, err := db.Beginx()
- if err != nil {
- panic(err)
- }
- return tx
-}
-
-// Beginx begins a transaction and returns an *sqlx.Tx instead of an *sql.Tx.
-func (db *DB) Beginx() (*Tx, error) {
- tx, err := db.DB.Begin()
- if err != nil {
- return nil, err
- }
- return &Tx{Tx: tx, driverName: db.driverName, unsafe: db.unsafe, Mapper: db.Mapper}, err
-}
-
-// Queryx queries the database and returns an *sqlx.Rows.
-// Any placeholder parameters are replaced with supplied args.
-func (db *DB) Queryx(query string, args ...interface{}) (*Rows, error) {
- r, err := db.DB.Query(query, args...)
- if err != nil {
- return nil, err
- }
- return &Rows{Rows: r, unsafe: db.unsafe, Mapper: db.Mapper}, err
-}
-
-// QueryRowx queries the database and returns an *sqlx.Row.
-// Any placeholder parameters are replaced with supplied args.
-func (db *DB) QueryRowx(query string, args ...interface{}) *Row {
- rows, err := db.DB.Query(query, args...)
- return &Row{rows: rows, err: err, unsafe: db.unsafe, Mapper: db.Mapper}
-}
-
-// MustExec (panic) runs MustExec using this database.
-// Any placeholder parameters are replaced with supplied args.
-func (db *DB) MustExec(query string, args ...interface{}) sql.Result {
- return MustExec(db, query, args...)
-}
-
-// Preparex returns an sqlx.Stmt instead of a sql.Stmt
-func (db *DB) Preparex(query string) (*Stmt, error) {
- return Preparex(db, query)
-}
-
-// PrepareNamed returns an sqlx.NamedStmt
-func (db *DB) PrepareNamed(query string) (*NamedStmt, error) {
- return prepareNamed(db, query)
-}
-
-// Tx is an sqlx wrapper around sql.Tx with extra functionality
-type Tx struct {
- *sql.Tx
- driverName string
- unsafe bool
- Mapper *reflectx.Mapper
-}
-
-// DriverName returns the driverName used by the DB which began this transaction.
-func (tx *Tx) DriverName() string {
- return tx.driverName
-}
-
-// Rebind a query within a transaction's bindvar type.
-func (tx *Tx) Rebind(query string) string {
- return Rebind(BindType(tx.driverName), query)
-}
-
-// Unsafe returns a version of Tx which will silently succeed to scan when
-// columns in the SQL result have no fields in the destination struct.
-func (tx *Tx) Unsafe() *Tx {
- return &Tx{Tx: tx.Tx, driverName: tx.driverName, unsafe: true, Mapper: tx.Mapper}
-}
-
-// BindNamed binds a query within a transaction's bindvar type.
-func (tx *Tx) BindNamed(query string, arg interface{}) (string, []interface{}, error) {
- return bindNamedMapper(BindType(tx.driverName), query, arg, tx.Mapper)
-}
-
-// NamedQuery within a transaction.
-// Any named placeholder parameters are replaced with fields from arg.
-func (tx *Tx) NamedQuery(query string, arg interface{}) (*Rows, error) {
- return NamedQuery(tx, query, arg)
-}
-
-// NamedExec a named query within a transaction.
-// Any named placeholder parameters are replaced with fields from arg.
-func (tx *Tx) NamedExec(query string, arg interface{}) (sql.Result, error) {
- return NamedExec(tx, query, arg)
-}
-
-// Select within a transaction.
-// Any placeholder parameters are replaced with supplied args.
-func (tx *Tx) Select(dest interface{}, query string, args ...interface{}) error {
- return Select(tx, dest, query, args...)
-}
-
-// Queryx within a transaction.
-// Any placeholder parameters are replaced with supplied args.
-func (tx *Tx) Queryx(query string, args ...interface{}) (*Rows, error) {
- r, err := tx.Tx.Query(query, args...)
- if err != nil {
- return nil, err
- }
- return &Rows{Rows: r, unsafe: tx.unsafe, Mapper: tx.Mapper}, err
-}
-
-// QueryRowx within a transaction.
-// Any placeholder parameters are replaced with supplied args.
-func (tx *Tx) QueryRowx(query string, args ...interface{}) *Row {
- rows, err := tx.Tx.Query(query, args...)
- return &Row{rows: rows, err: err, unsafe: tx.unsafe, Mapper: tx.Mapper}
-}
-
-// Get within a transaction.
-// Any placeholder parameters are replaced with supplied args.
-// An error is returned if the result set is empty.
-func (tx *Tx) Get(dest interface{}, query string, args ...interface{}) error {
- return Get(tx, dest, query, args...)
-}
-
-// MustExec runs MustExec within a transaction.
-// Any placeholder parameters are replaced with supplied args.
-func (tx *Tx) MustExec(query string, args ...interface{}) sql.Result {
- return MustExec(tx, query, args...)
-}
-
-// Preparex a statement within a transaction.
-func (tx *Tx) Preparex(query string) (*Stmt, error) {
- return Preparex(tx, query)
-}
-
-// Stmtx returns a version of the prepared statement which runs within a transaction. Provided
-// stmt can be either *sql.Stmt or *sqlx.Stmt.
-func (tx *Tx) Stmtx(stmt interface{}) *Stmt {
- var s *sql.Stmt
- switch v := stmt.(type) {
- case Stmt:
- s = v.Stmt
- case *Stmt:
- s = v.Stmt
- case sql.Stmt:
- s = &v
- case *sql.Stmt:
- s = v
- default:
- panic(fmt.Sprintf("non-statement type %v passed to Stmtx", reflect.ValueOf(stmt).Type()))
- }
- return &Stmt{Stmt: tx.Stmt(s), Mapper: tx.Mapper}
-}
-
-// NamedStmt returns a version of the prepared statement which runs within a transaction.
-func (tx *Tx) NamedStmt(stmt *NamedStmt) *NamedStmt {
- return &NamedStmt{
- QueryString: stmt.QueryString,
- Params: stmt.Params,
- Stmt: tx.Stmtx(stmt.Stmt),
- }
-}
-
-// PrepareNamed returns an sqlx.NamedStmt
-func (tx *Tx) PrepareNamed(query string) (*NamedStmt, error) {
- return prepareNamed(tx, query)
-}
-
-// Stmt is an sqlx wrapper around sql.Stmt with extra functionality
-type Stmt struct {
- *sql.Stmt
- unsafe bool
- Mapper *reflectx.Mapper
-}
-
-// Unsafe returns a version of Stmt which will silently succeed to scan when
-// columns in the SQL result have no fields in the destination struct.
-func (s *Stmt) Unsafe() *Stmt {
- return &Stmt{Stmt: s.Stmt, unsafe: true, Mapper: s.Mapper}
-}
-
-// Select using the prepared statement.
-// Any placeholder parameters are replaced with supplied args.
-func (s *Stmt) Select(dest interface{}, args ...interface{}) error {
- return Select(&qStmt{s}, dest, "", args...)
-}
-
-// Get using the prepared statement.
-// Any placeholder parameters are replaced with supplied args.
-// An error is returned if the result set is empty.
-func (s *Stmt) Get(dest interface{}, args ...interface{}) error {
- return Get(&qStmt{s}, dest, "", args...)
-}
-
-// MustExec (panic) using this statement. Note that the query portion of the error
-// output will be blank, as Stmt does not expose its query.
-// Any placeholder parameters are replaced with supplied args.
-func (s *Stmt) MustExec(args ...interface{}) sql.Result {
- return MustExec(&qStmt{s}, "", args...)
-}
-
-// QueryRowx using this statement.
-// Any placeholder parameters are replaced with supplied args.
-func (s *Stmt) QueryRowx(args ...interface{}) *Row {
- qs := &qStmt{s}
- return qs.QueryRowx("", args...)
-}
-
-// Queryx using this statement.
-// Any placeholder parameters are replaced with supplied args.
-func (s *Stmt) Queryx(args ...interface{}) (*Rows, error) {
- qs := &qStmt{s}
- return qs.Queryx("", args...)
-}
-
-// qStmt is an unexposed wrapper which lets you use a Stmt as a Queryer & Execer by
-// implementing those interfaces and ignoring the `query` argument.
-type qStmt struct{ *Stmt }
-
-func (q *qStmt) Query(query string, args ...interface{}) (*sql.Rows, error) {
- return q.Stmt.Query(args...)
-}
-
-func (q *qStmt) Queryx(query string, args ...interface{}) (*Rows, error) {
- r, err := q.Stmt.Query(args...)
- if err != nil {
- return nil, err
- }
- return &Rows{Rows: r, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}, err
-}
-
-func (q *qStmt) QueryRowx(query string, args ...interface{}) *Row {
- rows, err := q.Stmt.Query(args...)
- return &Row{rows: rows, err: err, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}
-}
-
-func (q *qStmt) Exec(query string, args ...interface{}) (sql.Result, error) {
- return q.Stmt.Exec(args...)
-}
-
-// Rows is a wrapper around sql.Rows which caches costly reflect operations
-// during a looped StructScan
-type Rows struct {
- *sql.Rows
- unsafe bool
- Mapper *reflectx.Mapper
- // these fields cache memory use for a rows during iteration w/ structScan
- started bool
- fields [][]int
- values []interface{}
-}
-
-// SliceScan using this Rows.
-func (r *Rows) SliceScan() ([]interface{}, error) {
- return SliceScan(r)
-}
-
-// MapScan using this Rows.
-func (r *Rows) MapScan(dest map[string]interface{}) error {
- return MapScan(r, dest)
-}
-
-// StructScan is like sql.Rows.Scan, but scans a single Row into a single Struct.
-// Use this and iterate over Rows manually when the memory load of Select() might be
-// prohibitive. *Rows.StructScan caches the reflect work of matching up column
-// positions to fields to avoid that overhead per scan, which means it is not safe
-// to run StructScan on the same Rows instance with different struct types.
-func (r *Rows) StructScan(dest interface{}) error {
- v := reflect.ValueOf(dest)
-
- if v.Kind() != reflect.Ptr {
- return errors.New("must pass a pointer, not a value, to StructScan destination")
- }
-
- v = reflect.Indirect(v)
-
- if !r.started {
- columns, err := r.Columns()
- if err != nil {
- return err
- }
- m := r.Mapper
-
- r.fields = m.TraversalsByName(v.Type(), columns)
- // if we are not unsafe and are missing fields, return an error
- if f, err := missingFields(r.fields); err != nil && !r.unsafe {
- return fmt.Errorf("missing destination name %s in %T", columns[f], dest)
- }
- r.values = make([]interface{}, len(columns))
- r.started = true
- }
-
- err := fieldsByTraversal(v, r.fields, r.values, true)
- if err != nil {
- return err
- }
- // scan into the struct field pointers and append to our results
- err = r.Scan(r.values...)
- if err != nil {
- return err
- }
- return r.Err()
-}
-
-// Connect to a database and verify with a ping.
-func Connect(driverName, dataSourceName string) (*DB, error) {
- db, err := Open(driverName, dataSourceName)
- if err != nil {
- return nil, err
- }
- err = db.Ping()
- if err != nil {
- db.Close()
- return nil, err
- }
- return db, nil
-}
-
-// MustConnect connects to a database and panics on error.
-func MustConnect(driverName, dataSourceName string) *DB {
- db, err := Connect(driverName, dataSourceName)
- if err != nil {
- panic(err)
- }
- return db
-}
-
-// Preparex prepares a statement.
-func Preparex(p Preparer, query string) (*Stmt, error) {
- s, err := p.Prepare(query)
- if err != nil {
- return nil, err
- }
- return &Stmt{Stmt: s, unsafe: isUnsafe(p), Mapper: mapperFor(p)}, err
-}
-
-// Select executes a query using the provided Queryer, and StructScans each row
-// into dest, which must be a slice. If the slice elements are scannable, then
-// the result set must have only one column. Otherwise, StructScan is used.
-// The *sql.Rows are closed automatically.
-// Any placeholder parameters are replaced with supplied args.
-func Select(q Queryer, dest interface{}, query string, args ...interface{}) error {
- rows, err := q.Queryx(query, args...)
- if err != nil {
- return err
- }
- // if something happens here, we want to make sure the rows are Closed
- defer rows.Close()
- return scanAll(rows, dest, false)
-}
-
-// Get does a QueryRow using the provided Queryer, and scans the resulting row
-// to dest. If dest is scannable, the result must only have one column. Otherwise,
-// StructScan is used. Get will return sql.ErrNoRows like row.Scan would.
-// Any placeholder parameters are replaced with supplied args.
-// An error is returned if the result set is empty.
-func Get(q Queryer, dest interface{}, query string, args ...interface{}) error {
- r := q.QueryRowx(query, args...)
- return r.scanAny(dest, false)
-}
-
-// LoadFile exec's every statement in a file (as a single call to Exec).
-// LoadFile may return a nil *sql.Result if errors are encountered locating or
-// reading the file at path. LoadFile reads the entire file into memory, so it
-// is not suitable for loading large data dumps, but can be useful for initializing
-// schemas or loading indexes.
-//
-// FIXME: this does not really work with multi-statement files for mattn/go-sqlite3
-// or the go-mysql-driver/mysql drivers; pq seems to be an exception here. Detecting
-// this by requiring something with DriverName() and then attempting to split the
-// queries will be difficult to get right, and its current driver-specific behavior
-// is deemed at least not complex in its incorrectness.
-func LoadFile(e Execer, path string) (*sql.Result, error) {
- realpath, err := filepath.Abs(path)
- if err != nil {
- return nil, err
- }
- contents, err := ioutil.ReadFile(realpath)
- if err != nil {
- return nil, err
- }
- res, err := e.Exec(string(contents))
- return &res, err
-}
-
-// MustExec execs the query using e and panics if there was an error.
-// Any placeholder parameters are replaced with supplied args.
-func MustExec(e Execer, query string, args ...interface{}) sql.Result {
- res, err := e.Exec(query, args...)
- if err != nil {
- panic(err)
- }
- return res
-}
-
-// SliceScan using this Rows.
-func (r *Row) SliceScan() ([]interface{}, error) {
- return SliceScan(r)
-}
-
-// MapScan using this Rows.
-func (r *Row) MapScan(dest map[string]interface{}) error {
- return MapScan(r, dest)
-}
-
-func (r *Row) scanAny(dest interface{}, structOnly bool) error {
- if r.err != nil {
- return r.err
- }
- if r.rows == nil {
- r.err = sql.ErrNoRows
- return r.err
- }
- defer r.rows.Close()
-
- v := reflect.ValueOf(dest)
- if v.Kind() != reflect.Ptr {
- return errors.New("must pass a pointer, not a value, to StructScan destination")
- }
- if v.IsNil() {
- return errors.New("nil pointer passed to StructScan destination")
- }
-
- base := reflectx.Deref(v.Type())
- scannable := isScannable(base)
-
- if structOnly && scannable {
- return structOnlyError(base)
- }
-
- columns, err := r.Columns()
- if err != nil {
- return err
- }
-
- if scannable && len(columns) > 1 {
- return fmt.Errorf("scannable dest type %s with >1 columns (%d) in result", base.Kind(), len(columns))
- }
-
- if scannable {
- return r.Scan(dest)
- }
-
- m := r.Mapper
-
- fields := m.TraversalsByName(v.Type(), columns)
- // if we are not unsafe and are missing fields, return an error
- if f, err := missingFields(fields); err != nil && !r.unsafe {
- return fmt.Errorf("missing destination name %s in %T", columns[f], dest)
- }
- values := make([]interface{}, len(columns))
-
- err = fieldsByTraversal(v, fields, values, true)
- if err != nil {
- return err
- }
- // scan into the struct field pointers and append to our results
- return r.Scan(values...)
-}
-
-// StructScan a single Row into dest.
-func (r *Row) StructScan(dest interface{}) error {
- return r.scanAny(dest, true)
-}
-
-// SliceScan a row, returning a []interface{} with values similar to MapScan.
-// This function is primarily intended for use where the number of columns
-// is not known. Because you can pass an []interface{} directly to Scan,
-// it's recommended that you do that as it will not have to allocate new
-// slices per row.
-func SliceScan(r ColScanner) ([]interface{}, error) {
- // ignore r.started, since we needn't use reflect for anything.
- columns, err := r.Columns()
- if err != nil {
- return []interface{}{}, err
- }
-
- values := make([]interface{}, len(columns))
- for i := range values {
- values[i] = new(interface{})
- }
-
- err = r.Scan(values...)
-
- if err != nil {
- return values, err
- }
-
- for i := range columns {
- values[i] = *(values[i].(*interface{}))
- }
-
- return values, r.Err()
-}
-
-// MapScan scans a single Row into the dest map[string]interface{}.
-// Use this to get results for SQL that might not be under your control
-// (for instance, if you're building an interface for an SQL server that
-// executes SQL from input). Please do not use this as a primary interface!
-// This will modify the map sent to it in place, so reuse the same map with
-// care. Columns which occur more than once in the result will overwrite
-// each other!
-func MapScan(r ColScanner, dest map[string]interface{}) error {
- // ignore r.started, since we needn't use reflect for anything.
- columns, err := r.Columns()
- if err != nil {
- return err
- }
-
- values := make([]interface{}, len(columns))
- for i := range values {
- values[i] = new(interface{})
- }
-
- err = r.Scan(values...)
- if err != nil {
- return err
- }
-
- for i, column := range columns {
- dest[column] = *(values[i].(*interface{}))
- }
-
- return r.Err()
-}
-
-type rowsi interface {
- Close() error
- Columns() ([]string, error)
- Err() error
- Next() bool
- Scan(...interface{}) error
-}
-
-// structOnlyError returns an error appropriate for type when a non-scannable
-// struct is expected but something else is given
-func structOnlyError(t reflect.Type) error {
- isStruct := t.Kind() == reflect.Struct
- isScanner := reflect.PtrTo(t).Implements(_scannerInterface)
- if !isStruct {
- return fmt.Errorf("expected %s but got %s", reflect.Struct, t.Kind())
- }
- if isScanner {
- return fmt.Errorf("structscan expects a struct dest but the provided struct type %s implements scanner", t.Name())
- }
- return fmt.Errorf("expected a struct, but struct %s has no exported fields", t.Name())
-}
-
-// scanAll scans all rows into a destination, which must be a slice of any
-// type. If the destination slice type is a Struct, then StructScan will be
-// used on each row. If the destination is some other kind of base type, then
-// each row must only have one column which can scan into that type. This
-// allows you to do something like:
-//
-// rows, _ := db.Query("select id from people;")
-// var ids []int
-// scanAll(rows, &ids, false)
-//
-// and ids will be a list of the id results. I realize that this is a desirable
-// interface to expose to users, but for now it will only be exposed via changes
-// to `Get` and `Select`. The reason that this has been implemented like this is
-// this is the only way to not duplicate reflect work in the new API while
-// maintaining backwards compatibility.
-func scanAll(rows rowsi, dest interface{}, structOnly bool) error {
- var v, vp reflect.Value
-
- value := reflect.ValueOf(dest)
-
- // json.Unmarshal returns errors for these
- if value.Kind() != reflect.Ptr {
- return errors.New("must pass a pointer, not a value, to StructScan destination")
- }
- if value.IsNil() {
- return errors.New("nil pointer passed to StructScan destination")
- }
- direct := reflect.Indirect(value)
-
- slice, err := baseType(value.Type(), reflect.Slice)
- if err != nil {
- return err
- }
-
- isPtr := slice.Elem().Kind() == reflect.Ptr
- base := reflectx.Deref(slice.Elem())
- scannable := isScannable(base)
-
- if structOnly && scannable {
- return structOnlyError(base)
- }
-
- columns, err := rows.Columns()
- if err != nil {
- return err
- }
-
- // if it's a base type make sure it only has 1 column; if not return an error
- if scannable && len(columns) > 1 {
- return fmt.Errorf("non-struct dest type %s with >1 columns (%d)", base.Kind(), len(columns))
- }
-
- if !scannable {
- var values []interface{}
- var m *reflectx.Mapper
-
- switch rows.(type) {
- case *Rows:
- m = rows.(*Rows).Mapper
- default:
- m = mapper()
- }
-
- fields := m.TraversalsByName(base, columns)
- // if we are not unsafe and are missing fields, return an error
- if f, err := missingFields(fields); err != nil && !isUnsafe(rows) {
- return fmt.Errorf("missing destination name %s in %T", columns[f], dest)
- }
- values = make([]interface{}, len(columns))
-
- for rows.Next() {
- // create a new struct type (which returns PtrTo) and indirect it
- vp = reflect.New(base)
- v = reflect.Indirect(vp)
-
- err = fieldsByTraversal(v, fields, values, true)
- if err != nil {
- return err
- }
-
- // scan into the struct field pointers and append to our results
- err = rows.Scan(values...)
- if err != nil {
- return err
- }
-
- if isPtr {
- direct.Set(reflect.Append(direct, vp))
- } else {
- direct.Set(reflect.Append(direct, v))
- }
- }
- } else {
- for rows.Next() {
- vp = reflect.New(base)
- err = rows.Scan(vp.Interface())
- if err != nil {
- return err
- }
- // append
- if isPtr {
- direct.Set(reflect.Append(direct, vp))
- } else {
- direct.Set(reflect.Append(direct, reflect.Indirect(vp)))
- }
- }
- }
-
- return rows.Err()
-}
-
-// FIXME: StructScan was the very first bit of API in sqlx, and now unfortunately
-// it doesn't really feel like it's named properly. There is an incongruency
-// between this and the way that StructScan (which might better be ScanStruct
-// anyway) works on a rows object.
-
-// StructScan all rows from an sql.Rows or an sqlx.Rows into the dest slice.
-// StructScan will scan in the entire rows result, so if you do not want to
-// allocate structs for the entire result, use Queryx and see sqlx.Rows.StructScan.
-// If rows is sqlx.Rows, it will use its mapper, otherwise it will use the default.
-func StructScan(rows rowsi, dest interface{}) error {
- return scanAll(rows, dest, true)
-
-}
-
-// reflect helpers
-
-func baseType(t reflect.Type, expected reflect.Kind) (reflect.Type, error) {
- t = reflectx.Deref(t)
- if t.Kind() != expected {
- return nil, fmt.Errorf("expected %s but got %s", expected, t.Kind())
- }
- return t, nil
-}
-
-// fieldsByName fills a values interface with fields from the passed value based
-// on the traversals in int. If ptrs is true, return addresses instead of values.
-// We write this instead of using FieldsByName to save allocations and map lookups
-// when iterating over many rows. Empty traversals will get an interface pointer.
-// Because of the necessity of requesting ptrs or values, it's considered a bit too
-// specialized for inclusion in reflectx itself.
-func fieldsByTraversal(v reflect.Value, traversals [][]int, values []interface{}, ptrs bool) error {
- v = reflect.Indirect(v)
- if v.Kind() != reflect.Struct {
- return errors.New("argument not a struct")
- }
-
- for i, traversal := range traversals {
- if len(traversal) == 0 {
- values[i] = new(interface{})
- continue
- }
- f := reflectx.FieldByIndexes(v, traversal)
- if ptrs {
- values[i] = f.Addr().Interface()
- } else {
- values[i] = f.Interface()
- }
- }
- return nil
-}
-
-func missingFields(transversals [][]int) (field int, err error) {
- for i, t := range transversals {
- if len(t) == 0 {
- return i, errors.New("missing field")
- }
- }
- return 0, nil
-}
diff --git a/vendor/github.com/jmoiron/sqlx/sqlx_context.go b/vendor/github.com/jmoiron/sqlx/sqlx_context.go
deleted file mode 100644
index 0b17145..0000000
--- a/vendor/github.com/jmoiron/sqlx/sqlx_context.go
+++ /dev/null
@@ -1,335 +0,0 @@
-// +build go1.8
-
-package sqlx
-
-import (
- "context"
- "database/sql"
- "fmt"
- "io/ioutil"
- "path/filepath"
- "reflect"
-)
-
-// ConnectContext to a database and verify with a ping.
-func ConnectContext(ctx context.Context, driverName, dataSourceName string) (*DB, error) {
- db, err := Open(driverName, dataSourceName)
- if err != nil {
- return db, err
- }
- err = db.PingContext(ctx)
- return db, err
-}
-
-// QueryerContext is an interface used by GetContext and SelectContext
-type QueryerContext interface {
- QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error)
- QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error)
- QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row
-}
-
-// PreparerContext is an interface used by PreparexContext.
-type PreparerContext interface {
- PrepareContext(ctx context.Context, query string) (*sql.Stmt, error)
-}
-
-// ExecerContext is an interface used by MustExecContext and LoadFileContext
-type ExecerContext interface {
- ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error)
-}
-
-// ExtContext is a union interface which can bind, query, and exec, with Context
-// used by NamedQueryContext and NamedExecContext.
-type ExtContext interface {
- binder
- QueryerContext
- ExecerContext
-}
-
-// SelectContext executes a query using the provided Queryer, and StructScans
-// each row into dest, which must be a slice. If the slice elements are
-// scannable, then the result set must have only one column. Otherwise,
-// StructScan is used. The *sql.Rows are closed automatically.
-// Any placeholder parameters are replaced with supplied args.
-func SelectContext(ctx context.Context, q QueryerContext, dest interface{}, query string, args ...interface{}) error {
- rows, err := q.QueryxContext(ctx, query, args...)
- if err != nil {
- return err
- }
- // if something happens here, we want to make sure the rows are Closed
- defer rows.Close()
- return scanAll(rows, dest, false)
-}
-
-// PreparexContext prepares a statement.
-//
-// The provided context is used for the preparation of the statement, not for
-// the execution of the statement.
-func PreparexContext(ctx context.Context, p PreparerContext, query string) (*Stmt, error) {
- s, err := p.PrepareContext(ctx, query)
- if err != nil {
- return nil, err
- }
- return &Stmt{Stmt: s, unsafe: isUnsafe(p), Mapper: mapperFor(p)}, err
-}
-
-// GetContext does a QueryRow using the provided Queryer, and scans the
-// resulting row to dest. If dest is scannable, the result must only have one
-// column. Otherwise, StructScan is used. Get will return sql.ErrNoRows like
-// row.Scan would. Any placeholder parameters are replaced with supplied args.
-// An error is returned if the result set is empty.
-func GetContext(ctx context.Context, q QueryerContext, dest interface{}, query string, args ...interface{}) error {
- r := q.QueryRowxContext(ctx, query, args...)
- return r.scanAny(dest, false)
-}
-
-// LoadFileContext exec's every statement in a file (as a single call to Exec).
-// LoadFileContext may return a nil *sql.Result if errors are encountered
-// locating or reading the file at path. LoadFile reads the entire file into
-// memory, so it is not suitable for loading large data dumps, but can be useful
-// for initializing schemas or loading indexes.
-//
-// FIXME: this does not really work with multi-statement files for mattn/go-sqlite3
-// or the go-mysql-driver/mysql drivers; pq seems to be an exception here. Detecting
-// this by requiring something with DriverName() and then attempting to split the
-// queries will be difficult to get right, and its current driver-specific behavior
-// is deemed at least not complex in its incorrectness.
-func LoadFileContext(ctx context.Context, e ExecerContext, path string) (*sql.Result, error) {
- realpath, err := filepath.Abs(path)
- if err != nil {
- return nil, err
- }
- contents, err := ioutil.ReadFile(realpath)
- if err != nil {
- return nil, err
- }
- res, err := e.ExecContext(ctx, string(contents))
- return &res, err
-}
-
-// MustExecContext execs the query using e and panics if there was an error.
-// Any placeholder parameters are replaced with supplied args.
-func MustExecContext(ctx context.Context, e ExecerContext, query string, args ...interface{}) sql.Result {
- res, err := e.ExecContext(ctx, query, args...)
- if err != nil {
- panic(err)
- }
- return res
-}
-
-// PrepareNamedContext returns an sqlx.NamedStmt
-func (db *DB) PrepareNamedContext(ctx context.Context, query string) (*NamedStmt, error) {
- return prepareNamedContext(ctx, db, query)
-}
-
-// NamedQueryContext using this DB.
-// Any named placeholder parameters are replaced with fields from arg.
-func (db *DB) NamedQueryContext(ctx context.Context, query string, arg interface{}) (*Rows, error) {
- return NamedQueryContext(ctx, db, query, arg)
-}
-
-// NamedExecContext using this DB.
-// Any named placeholder parameters are replaced with fields from arg.
-func (db *DB) NamedExecContext(ctx context.Context, query string, arg interface{}) (sql.Result, error) {
- return NamedExecContext(ctx, db, query, arg)
-}
-
-// SelectContext using this DB.
-// Any placeholder parameters are replaced with supplied args.
-func (db *DB) SelectContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error {
- return SelectContext(ctx, db, dest, query, args...)
-}
-
-// GetContext using this DB.
-// Any placeholder parameters are replaced with supplied args.
-// An error is returned if the result set is empty.
-func (db *DB) GetContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error {
- return GetContext(ctx, db, dest, query, args...)
-}
-
-// PreparexContext returns an sqlx.Stmt instead of a sql.Stmt.
-//
-// The provided context is used for the preparation of the statement, not for
-// the execution of the statement.
-func (db *DB) PreparexContext(ctx context.Context, query string) (*Stmt, error) {
- return PreparexContext(ctx, db, query)
-}
-
-// QueryxContext queries the database and returns an *sqlx.Rows.
-// Any placeholder parameters are replaced with supplied args.
-func (db *DB) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) {
- r, err := db.DB.QueryContext(ctx, query, args...)
- if err != nil {
- return nil, err
- }
- return &Rows{Rows: r, unsafe: db.unsafe, Mapper: db.Mapper}, err
-}
-
-// QueryRowxContext queries the database and returns an *sqlx.Row.
-// Any placeholder parameters are replaced with supplied args.
-func (db *DB) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row {
- rows, err := db.DB.QueryContext(ctx, query, args...)
- return &Row{rows: rows, err: err, unsafe: db.unsafe, Mapper: db.Mapper}
-}
-
-// MustBeginTx starts a transaction, and panics on error. Returns an *sqlx.Tx instead
-// of an *sql.Tx.
-//
-// The provided context is used until the transaction is committed or rolled
-// back. If the context is canceled, the sql package will roll back the
-// transaction. Tx.Commit will return an error if the context provided to
-// MustBeginContext is canceled.
-func (db *DB) MustBeginTx(ctx context.Context, opts *sql.TxOptions) *Tx {
- tx, err := db.BeginTxx(ctx, opts)
- if err != nil {
- panic(err)
- }
- return tx
-}
-
-// MustExecContext (panic) runs MustExec using this database.
-// Any placeholder parameters are replaced with supplied args.
-func (db *DB) MustExecContext(ctx context.Context, query string, args ...interface{}) sql.Result {
- return MustExecContext(ctx, db, query, args...)
-}
-
-// BeginTxx begins a transaction and returns an *sqlx.Tx instead of an
-// *sql.Tx.
-//
-// The provided context is used until the transaction is committed or rolled
-// back. If the context is canceled, the sql package will roll back the
-// transaction. Tx.Commit will return an error if the context provided to
-// BeginxContext is canceled.
-func (db *DB) BeginTxx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) {
- tx, err := db.DB.BeginTx(ctx, opts)
- if err != nil {
- return nil, err
- }
- return &Tx{Tx: tx, driverName: db.driverName, unsafe: db.unsafe, Mapper: db.Mapper}, err
-}
-
-// StmtxContext returns a version of the prepared statement which runs within a
-// transaction. Provided stmt can be either *sql.Stmt or *sqlx.Stmt.
-func (tx *Tx) StmtxContext(ctx context.Context, stmt interface{}) *Stmt {
- var s *sql.Stmt
- switch v := stmt.(type) {
- case Stmt:
- s = v.Stmt
- case *Stmt:
- s = v.Stmt
- case sql.Stmt:
- s = &v
- case *sql.Stmt:
- s = v
- default:
- panic(fmt.Sprintf("non-statement type %v passed to Stmtx", reflect.ValueOf(stmt).Type()))
- }
- return &Stmt{Stmt: tx.StmtContext(ctx, s), Mapper: tx.Mapper}
-}
-
-// NamedStmtContext returns a version of the prepared statement which runs
-// within a transaction.
-func (tx *Tx) NamedStmtContext(ctx context.Context, stmt *NamedStmt) *NamedStmt {
- return &NamedStmt{
- QueryString: stmt.QueryString,
- Params: stmt.Params,
- Stmt: tx.StmtxContext(ctx, stmt.Stmt),
- }
-}
-
-// MustExecContext runs MustExecContext within a transaction.
-// Any placeholder parameters are replaced with supplied args.
-func (tx *Tx) MustExecContext(ctx context.Context, query string, args ...interface{}) sql.Result {
- return MustExecContext(ctx, tx, query, args...)
-}
-
-// QueryxContext within a transaction and context.
-// Any placeholder parameters are replaced with supplied args.
-func (tx *Tx) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) {
- r, err := tx.Tx.QueryContext(ctx, query, args...)
- if err != nil {
- return nil, err
- }
- return &Rows{Rows: r, unsafe: tx.unsafe, Mapper: tx.Mapper}, err
-}
-
-// SelectContext within a transaction and context.
-// Any placeholder parameters are replaced with supplied args.
-func (tx *Tx) SelectContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error {
- return SelectContext(ctx, tx, dest, query, args...)
-}
-
-// GetContext within a transaction and context.
-// Any placeholder parameters are replaced with supplied args.
-// An error is returned if the result set is empty.
-func (tx *Tx) GetContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error {
- return GetContext(ctx, tx, dest, query, args...)
-}
-
-// QueryRowxContext within a transaction and context.
-// Any placeholder parameters are replaced with supplied args.
-func (tx *Tx) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row {
- rows, err := tx.Tx.QueryContext(ctx, query, args...)
- return &Row{rows: rows, err: err, unsafe: tx.unsafe, Mapper: tx.Mapper}
-}
-
-// NamedExecContext using this Tx.
-// Any named placeholder parameters are replaced with fields from arg.
-func (tx *Tx) NamedExecContext(ctx context.Context, query string, arg interface{}) (sql.Result, error) {
- return NamedExecContext(ctx, tx, query, arg)
-}
-
-// SelectContext using the prepared statement.
-// Any placeholder parameters are replaced with supplied args.
-func (s *Stmt) SelectContext(ctx context.Context, dest interface{}, args ...interface{}) error {
- return SelectContext(ctx, &qStmt{s}, dest, "", args...)
-}
-
-// GetContext using the prepared statement.
-// Any placeholder parameters are replaced with supplied args.
-// An error is returned if the result set is empty.
-func (s *Stmt) GetContext(ctx context.Context, dest interface{}, args ...interface{}) error {
- return GetContext(ctx, &qStmt{s}, dest, "", args...)
-}
-
-// MustExecContext (panic) using this statement. Note that the query portion of
-// the error output will be blank, as Stmt does not expose its query.
-// Any placeholder parameters are replaced with supplied args.
-func (s *Stmt) MustExecContext(ctx context.Context, args ...interface{}) sql.Result {
- return MustExecContext(ctx, &qStmt{s}, "", args...)
-}
-
-// QueryRowxContext using this statement.
-// Any placeholder parameters are replaced with supplied args.
-func (s *Stmt) QueryRowxContext(ctx context.Context, args ...interface{}) *Row {
- qs := &qStmt{s}
- return qs.QueryRowxContext(ctx, "", args...)
-}
-
-// QueryxContext using this statement.
-// Any placeholder parameters are replaced with supplied args.
-func (s *Stmt) QueryxContext(ctx context.Context, args ...interface{}) (*Rows, error) {
- qs := &qStmt{s}
- return qs.QueryxContext(ctx, "", args...)
-}
-
-func (q *qStmt) QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) {
- return q.Stmt.QueryContext(ctx, args...)
-}
-
-func (q *qStmt) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) {
- r, err := q.Stmt.QueryContext(ctx, args...)
- if err != nil {
- return nil, err
- }
- return &Rows{Rows: r, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}, err
-}
-
-func (q *qStmt) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row {
- rows, err := q.Stmt.QueryContext(ctx, args...)
- return &Row{rows: rows, err: err, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}
-}
-
-func (q *qStmt) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) {
- return q.Stmt.ExecContext(ctx, args...)
-}