aboutsummaryrefslogtreecommitdiff
path: root/vendor
diff options
context:
space:
mode:
authorFelix Hanley <felix@userspace.com.au>2018-02-21 04:21:58 +0000
committerFelix Hanley <felix@userspace.com.au>2018-02-21 04:22:32 +0000
commit734db776ce17a234825e83180a547cd3ad28f5e2 (patch)
treed5c289056fb01dfc1f3f438dd4eec21a31a23166 /vendor
parente9adf3a2bf8b81615275a6705b7957e43753f0ec (diff)
downloaddhtsearch-734db776ce17a234825e83180a547cd3ad28f5e2.tar.gz
dhtsearch-734db776ce17a234825e83180a547cd3ad28f5e2.tar.bz2
Update vendor deps
Diffstat (limited to 'vendor')
-rw-r--r--vendor/github.com/BurntSushi/toml/.gitignore5
-rw-r--r--vendor/github.com/BurntSushi/toml/.travis.yml15
-rw-r--r--vendor/github.com/BurntSushi/toml/COMPATIBLE3
-rw-r--r--vendor/github.com/BurntSushi/toml/COPYING14
-rw-r--r--vendor/github.com/BurntSushi/toml/Makefile19
-rw-r--r--vendor/github.com/BurntSushi/toml/README.md218
-rw-r--r--vendor/github.com/BurntSushi/toml/decode.go509
-rw-r--r--vendor/github.com/BurntSushi/toml/decode_meta.go121
-rw-r--r--vendor/github.com/BurntSushi/toml/decode_test.go1447
-rw-r--r--vendor/github.com/BurntSushi/toml/doc.go27
-rw-r--r--vendor/github.com/BurntSushi/toml/encode.go568
-rw-r--r--vendor/github.com/BurntSushi/toml/encode_test.go615
-rw-r--r--vendor/github.com/BurntSushi/toml/encoding_types.go19
-rw-r--r--vendor/github.com/BurntSushi/toml/encoding_types_1.1.go18
-rw-r--r--vendor/github.com/BurntSushi/toml/lex.go953
-rw-r--r--vendor/github.com/BurntSushi/toml/parse.go592
-rw-r--r--vendor/github.com/BurntSushi/toml/session.vim1
-rw-r--r--vendor/github.com/BurntSushi/toml/type_check.go91
-rw-r--r--vendor/github.com/BurntSushi/toml/type_fields.go242
-rw-r--r--vendor/github.com/felix/logger/logger_test.go114
-rw-r--r--vendor/github.com/jackc/pgx/.gitignore1
-rw-r--r--vendor/github.com/jackc/pgx/.travis.yml45
-rw-r--r--vendor/github.com/jackc/pgx/CHANGELOG.md111
-rw-r--r--vendor/github.com/jackc/pgx/README.md88
-rw-r--r--vendor/github.com/jackc/pgx/aclitem_parse_test.go126
-rw-r--r--vendor/github.com/jackc/pgx/batch.go313
-rw-r--r--vendor/github.com/jackc/pgx/bench_test.go765
-rw-r--r--vendor/github.com/jackc/pgx/chunkreader/chunkreader.go89
-rw-r--r--vendor/github.com/jackc/pgx/conn.go1562
-rw-r--r--vendor/github.com/jackc/pgx/conn_config_test.go.example3
-rw-r--r--vendor/github.com/jackc/pgx/conn_config_test.go.travis12
-rw-r--r--vendor/github.com/jackc/pgx/conn_pool.go138
-rw-r--r--vendor/github.com/jackc/pgx/conn_pool_private_test.go44
-rw-r--r--vendor/github.com/jackc/pgx/conn_pool_test.go982
-rw-r--r--vendor/github.com/jackc/pgx/conn_test.go1744
-rw-r--r--vendor/github.com/jackc/pgx/copy_from.go90
-rw-r--r--vendor/github.com/jackc/pgx/copy_from_test.go428
-rw-r--r--vendor/github.com/jackc/pgx/copy_to.go222
-rw-r--r--vendor/github.com/jackc/pgx/copy_to_test.go367
-rw-r--r--vendor/github.com/jackc/pgx/doc.go82
-rw-r--r--vendor/github.com/jackc/pgx/example_custom_type_test.go104
-rw-r--r--vendor/github.com/jackc/pgx/example_json_test.go43
-rw-r--r--vendor/github.com/jackc/pgx/fastpath.go65
-rw-r--r--vendor/github.com/jackc/pgx/go_stdlib.go61
-rw-r--r--vendor/github.com/jackc/pgx/helper_test.go74
-rw-r--r--vendor/github.com/jackc/pgx/hstore.go222
-rw-r--r--vendor/github.com/jackc/pgx/hstore_test.go181
-rw-r--r--vendor/github.com/jackc/pgx/internal/sanitize/sanitize.go237
-rw-r--r--vendor/github.com/jackc/pgx/large_objects.go38
-rw-r--r--vendor/github.com/jackc/pgx/large_objects_test.go121
-rw-r--r--vendor/github.com/jackc/pgx/logger.go41
-rw-r--r--vendor/github.com/jackc/pgx/messages.go248
-rw-r--r--vendor/github.com/jackc/pgx/msg_reader.go316
-rw-r--r--vendor/github.com/jackc/pgx/pgio/doc.go6
-rw-r--r--vendor/github.com/jackc/pgx/pgio/write.go40
-rw-r--r--vendor/github.com/jackc/pgx/pgpass_test.go57
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/authentication.go54
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/backend.go110
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/backend_key_data.go46
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/big_endian.go37
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/bind.go171
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/bind_complete.go29
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/close.go59
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/close_complete.go29
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/command_complete.go48
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/copy_both_response.go65
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/copy_data.go37
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/copy_in_response.go65
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/copy_out_response.go65
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/data_row.go112
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/describe.go59
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/empty_query_response.go29
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/error_response.go197
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/execute.go60
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/flush.go29
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/frontend.go122
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/function_call_response.go78
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/no_data.go29
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/notice_response.go13
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/notification_response.go67
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/parameter_description.go61
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/parameter_status.go61
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/parse.go83
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/parse_complete.go29
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/password_message.go46
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/pgproto3.go42
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/query.go45
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/ready_for_query.go35
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/row_description.go100
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/startup_message.go97
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/sync.go29
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/terminate.go29
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/aclitem.go126
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/aclitem_array.go212
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/array.go352
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/bit.go37
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/bool.go159
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/bool_array.go300
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/box.go162
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/bpchar.go68
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/bpchar_array.go300
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/bytea.go156
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/bytea_array.go300
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/cid.go61
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/cidr.go31
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/cidr_array.go329
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/circle.go146
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/convert.go424
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/database_sql.go42
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/date.go209
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/date_array.go301
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/daterange.go250
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/decimal.go31
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/enum_array.go212
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/float4.go197
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/float4_array.go300
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/float8.go187
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/float8_array.go300
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/generic_binary.go39
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/generic_text.go39
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/hstore.go434
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/hstore_array.go300
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/inet.go215
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/inet_array.go329
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/int2.go209
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/int2_array.go328
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/int4.go213
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/int4_array.go328
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/int4range.go250
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/int8.go199
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/int8_array.go328
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/int8range.go250
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/interval.go250
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/json.go161
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/jsonb.go70
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/line.go143
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/lseg.go161
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/macaddr.go154
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/name.go58
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/numeric.go600
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/numeric_array.go328
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/numrange.go250
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/oid.go81
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/oid_value.go55
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/path.go193
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/pgtype.go280
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/pguint32.go162
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/point.go139
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/polygon.go174
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/qchar.go146
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/range.go278
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/record.go129
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/text.go163
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/text_array.go300
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/tid.go144
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/timestamp.go225
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/timestamp_array.go301
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/timestamptz.go221
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/timestamptz_array.go301
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/tsrange.go250
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/tstzrange.go250
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/typed_array.go.erb304
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/typed_array_gen.sh24
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/typed_range.go.erb252
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/typed_range_gen.sh7
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/unknown.go44
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/uuid.go183
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/uuid_array.go356
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/varbit.go133
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/varchar.go58
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/varchar_array.go300
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/xid.go64
-rw-r--r--vendor/github.com/jackc/pgx/query.go585
-rw-r--r--vendor/github.com/jackc/pgx/query_test.go1414
-rw-r--r--vendor/github.com/jackc/pgx/replication.go198
-rw-r--r--vendor/github.com/jackc/pgx/replication_test.go329
-rw-r--r--vendor/github.com/jackc/pgx/sql_test.go36
-rw-r--r--vendor/github.com/jackc/pgx/stdlib/sql.go513
-rw-r--r--vendor/github.com/jackc/pgx/stdlib/sql_test.go691
-rw-r--r--vendor/github.com/jackc/pgx/stress_test.go346
-rw-r--r--vendor/github.com/jackc/pgx/tx.go180
-rw-r--r--vendor/github.com/jackc/pgx/tx_test.go297
-rw-r--r--vendor/github.com/jackc/pgx/value_reader.go156
-rw-r--r--vendor/github.com/jackc/pgx/values.go3512
-rw-r--r--vendor/github.com/jackc/pgx/values_test.go1183
-rw-r--r--vendor/github.com/jmoiron/sqlx/README.md6
-rw-r--r--vendor/github.com/jmoiron/sqlx/bind.go2
-rw-r--r--vendor/github.com/jmoiron/sqlx/named.go12
-rw-r--r--vendor/github.com/jmoiron/sqlx/named_context_test.go136
-rw-r--r--vendor/github.com/jmoiron/sqlx/named_test.go227
-rw-r--r--vendor/github.com/jmoiron/sqlx/reflectx/reflect.go31
-rw-r--r--vendor/github.com/jmoiron/sqlx/reflectx/reflect_test.go905
-rw-r--r--vendor/github.com/jmoiron/sqlx/sqlx.go8
-rw-r--r--vendor/github.com/jmoiron/sqlx/sqlx_context_test.go1344
-rw-r--r--vendor/github.com/jmoiron/sqlx/sqlx_test.go1792
-rw-r--r--vendor/github.com/pkg/errors/.gitignore24
-rw-r--r--vendor/github.com/pkg/errors/.travis.yml11
-rw-r--r--vendor/github.com/pkg/errors/LICENSE23
-rw-r--r--vendor/github.com/pkg/errors/README.md52
-rw-r--r--vendor/github.com/pkg/errors/appveyor.yml32
-rw-r--r--vendor/github.com/pkg/errors/errors.go269
-rw-r--r--vendor/github.com/pkg/errors/stack.go178
-rw-r--r--vendor/golang.org/x/net/AUTHORS3
-rw-r--r--vendor/golang.org/x/net/CONTRIBUTORS3
-rw-r--r--vendor/golang.org/x/net/LICENSE27
-rw-r--r--vendor/golang.org/x/net/PATENTS22
-rw-r--r--vendor/golang.org/x/net/context/context.go56
-rw-r--r--vendor/golang.org/x/net/context/go17.go72
-rw-r--r--vendor/golang.org/x/net/context/go19.go20
-rw-r--r--vendor/golang.org/x/net/context/pre_go17.go300
-rw-r--r--vendor/golang.org/x/net/context/pre_go19.go109
-rw-r--r--vendor/golang.org/x/time/AUTHORS3
-rw-r--r--vendor/golang.org/x/time/CONTRIBUTORS3
-rw-r--r--vendor/golang.org/x/time/LICENSE27
-rw-r--r--vendor/golang.org/x/time/PATENTS22
-rw-r--r--vendor/golang.org/x/time/rate/rate.go380
-rw-r--r--vendor/golang.org/x/time/rate/rate_go16.go21
-rw-r--r--vendor/golang.org/x/time/rate/rate_go17.go21
218 files changed, 23789 insertions, 25061 deletions
diff --git a/vendor/github.com/BurntSushi/toml/.gitignore b/vendor/github.com/BurntSushi/toml/.gitignore
deleted file mode 100644
index 0cd3800..0000000
--- a/vendor/github.com/BurntSushi/toml/.gitignore
+++ /dev/null
@@ -1,5 +0,0 @@
-TAGS
-tags
-.*.swp
-tomlcheck/tomlcheck
-toml.test
diff --git a/vendor/github.com/BurntSushi/toml/.travis.yml b/vendor/github.com/BurntSushi/toml/.travis.yml
deleted file mode 100644
index 8b8afc4..0000000
--- a/vendor/github.com/BurntSushi/toml/.travis.yml
+++ /dev/null
@@ -1,15 +0,0 @@
-language: go
-go:
- - 1.1
- - 1.2
- - 1.3
- - 1.4
- - 1.5
- - 1.6
- - tip
-install:
- - go install ./...
- - go get github.com/BurntSushi/toml-test
-script:
- - export PATH="$PATH:$HOME/gopath/bin"
- - make test
diff --git a/vendor/github.com/BurntSushi/toml/COMPATIBLE b/vendor/github.com/BurntSushi/toml/COMPATIBLE
deleted file mode 100644
index 6efcfd0..0000000
--- a/vendor/github.com/BurntSushi/toml/COMPATIBLE
+++ /dev/null
@@ -1,3 +0,0 @@
-Compatible with TOML version
-[v0.4.0](https://github.com/toml-lang/toml/blob/v0.4.0/versions/en/toml-v0.4.0.md)
-
diff --git a/vendor/github.com/BurntSushi/toml/COPYING b/vendor/github.com/BurntSushi/toml/COPYING
deleted file mode 100644
index 5a8e332..0000000
--- a/vendor/github.com/BurntSushi/toml/COPYING
+++ /dev/null
@@ -1,14 +0,0 @@
- DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
- Version 2, December 2004
-
- Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
-
- Everyone is permitted to copy and distribute verbatim or modified
- copies of this license document, and changing it is allowed as long
- as the name is changed.
-
- DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
- TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
-
- 0. You just DO WHAT THE FUCK YOU WANT TO.
-
diff --git a/vendor/github.com/BurntSushi/toml/Makefile b/vendor/github.com/BurntSushi/toml/Makefile
deleted file mode 100644
index 3600848..0000000
--- a/vendor/github.com/BurntSushi/toml/Makefile
+++ /dev/null
@@ -1,19 +0,0 @@
-install:
- go install ./...
-
-test: install
- go test -v
- toml-test toml-test-decoder
- toml-test -encoder toml-test-encoder
-
-fmt:
- gofmt -w *.go */*.go
- colcheck *.go */*.go
-
-tags:
- find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS
-
-push:
- git push origin master
- git push github master
-
diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md
deleted file mode 100644
index 7c1b37e..0000000
--- a/vendor/github.com/BurntSushi/toml/README.md
+++ /dev/null
@@ -1,218 +0,0 @@
-## TOML parser and encoder for Go with reflection
-
-TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
-reflection interface similar to Go's standard library `json` and `xml`
-packages. This package also supports the `encoding.TextUnmarshaler` and
-`encoding.TextMarshaler` interfaces so that you can define custom data
-representations. (There is an example of this below.)
-
-Spec: https://github.com/toml-lang/toml
-
-Compatible with TOML version
-[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md)
-
-Documentation: https://godoc.org/github.com/BurntSushi/toml
-
-Installation:
-
-```bash
-go get github.com/BurntSushi/toml
-```
-
-Try the toml validator:
-
-```bash
-go get github.com/BurntSushi/toml/cmd/tomlv
-tomlv some-toml-file.toml
-```
-
-[![Build Status](https://travis-ci.org/BurntSushi/toml.svg?branch=master)](https://travis-ci.org/BurntSushi/toml) [![GoDoc](https://godoc.org/github.com/BurntSushi/toml?status.svg)](https://godoc.org/github.com/BurntSushi/toml)
-
-### Testing
-
-This package passes all tests in
-[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder
-and the encoder.
-
-### Examples
-
-This package works similarly to how the Go standard library handles `XML`
-and `JSON`. Namely, data is loaded into Go values via reflection.
-
-For the simplest example, consider some TOML file as just a list of keys
-and values:
-
-```toml
-Age = 25
-Cats = [ "Cauchy", "Plato" ]
-Pi = 3.14
-Perfection = [ 6, 28, 496, 8128 ]
-DOB = 1987-07-05T05:45:00Z
-```
-
-Which could be defined in Go as:
-
-```go
-type Config struct {
- Age int
- Cats []string
- Pi float64
- Perfection []int
- DOB time.Time // requires `import time`
-}
-```
-
-And then decoded with:
-
-```go
-var conf Config
-if _, err := toml.Decode(tomlData, &conf); err != nil {
- // handle error
-}
-```
-
-You can also use struct tags if your struct field name doesn't map to a TOML
-key value directly:
-
-```toml
-some_key_NAME = "wat"
-```
-
-```go
-type TOML struct {
- ObscureKey string `toml:"some_key_NAME"`
-}
-```
-
-### Using the `encoding.TextUnmarshaler` interface
-
-Here's an example that automatically parses duration strings into
-`time.Duration` values:
-
-```toml
-[[song]]
-name = "Thunder Road"
-duration = "4m49s"
-
-[[song]]
-name = "Stairway to Heaven"
-duration = "8m03s"
-```
-
-Which can be decoded with:
-
-```go
-type song struct {
- Name string
- Duration duration
-}
-type songs struct {
- Song []song
-}
-var favorites songs
-if _, err := toml.Decode(blob, &favorites); err != nil {
- log.Fatal(err)
-}
-
-for _, s := range favorites.Song {
- fmt.Printf("%s (%s)\n", s.Name, s.Duration)
-}
-```
-
-And you'll also need a `duration` type that satisfies the
-`encoding.TextUnmarshaler` interface:
-
-```go
-type duration struct {
- time.Duration
-}
-
-func (d *duration) UnmarshalText(text []byte) error {
- var err error
- d.Duration, err = time.ParseDuration(string(text))
- return err
-}
-```
-
-### More complex usage
-
-Here's an example of how to load the example from the official spec page:
-
-```toml
-# This is a TOML document. Boom.
-
-title = "TOML Example"
-
-[owner]
-name = "Tom Preston-Werner"
-organization = "GitHub"
-bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
-dob = 1979-05-27T07:32:00Z # First class dates? Why not?
-
-[database]
-server = "192.168.1.1"
-ports = [ 8001, 8001, 8002 ]
-connection_max = 5000
-enabled = true
-
-[servers]
-
- # You can indent as you please. Tabs or spaces. TOML don't care.
- [servers.alpha]
- ip = "10.0.0.1"
- dc = "eqdc10"
-
- [servers.beta]
- ip = "10.0.0.2"
- dc = "eqdc10"
-
-[clients]
-data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
-
-# Line breaks are OK when inside arrays
-hosts = [
- "alpha",
- "omega"
-]
-```
-
-And the corresponding Go types are:
-
-```go
-type tomlConfig struct {
- Title string
- Owner ownerInfo
- DB database `toml:"database"`
- Servers map[string]server
- Clients clients
-}
-
-type ownerInfo struct {
- Name string
- Org string `toml:"organization"`
- Bio string
- DOB time.Time
-}
-
-type database struct {
- Server string
- Ports []int
- ConnMax int `toml:"connection_max"`
- Enabled bool
-}
-
-type server struct {
- IP string
- DC string
-}
-
-type clients struct {
- Data [][]interface{}
- Hosts []string
-}
-```
-
-Note that a case insensitive match will be tried if an exact match can't be
-found.
-
-A working example of the above can be found in `_examples/example.{go,toml}`.
diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go
deleted file mode 100644
index b0fd51d..0000000
--- a/vendor/github.com/BurntSushi/toml/decode.go
+++ /dev/null
@@ -1,509 +0,0 @@
-package toml
-
-import (
- "fmt"
- "io"
- "io/ioutil"
- "math"
- "reflect"
- "strings"
- "time"
-)
-
-func e(format string, args ...interface{}) error {
- return fmt.Errorf("toml: "+format, args...)
-}
-
-// Unmarshaler is the interface implemented by objects that can unmarshal a
-// TOML description of themselves.
-type Unmarshaler interface {
- UnmarshalTOML(interface{}) error
-}
-
-// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`.
-func Unmarshal(p []byte, v interface{}) error {
- _, err := Decode(string(p), v)
- return err
-}
-
-// Primitive is a TOML value that hasn't been decoded into a Go value.
-// When using the various `Decode*` functions, the type `Primitive` may
-// be given to any value, and its decoding will be delayed.
-//
-// A `Primitive` value can be decoded using the `PrimitiveDecode` function.
-//
-// The underlying representation of a `Primitive` value is subject to change.
-// Do not rely on it.
-//
-// N.B. Primitive values are still parsed, so using them will only avoid
-// the overhead of reflection. They can be useful when you don't know the
-// exact type of TOML data until run time.
-type Primitive struct {
- undecoded interface{}
- context Key
-}
-
-// DEPRECATED!
-//
-// Use MetaData.PrimitiveDecode instead.
-func PrimitiveDecode(primValue Primitive, v interface{}) error {
- md := MetaData{decoded: make(map[string]bool)}
- return md.unify(primValue.undecoded, rvalue(v))
-}
-
-// PrimitiveDecode is just like the other `Decode*` functions, except it
-// decodes a TOML value that has already been parsed. Valid primitive values
-// can *only* be obtained from values filled by the decoder functions,
-// including this method. (i.e., `v` may contain more `Primitive`
-// values.)
-//
-// Meta data for primitive values is included in the meta data returned by
-// the `Decode*` functions with one exception: keys returned by the Undecoded
-// method will only reflect keys that were decoded. Namely, any keys hidden
-// behind a Primitive will be considered undecoded. Executing this method will
-// update the undecoded keys in the meta data. (See the example.)
-func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
- md.context = primValue.context
- defer func() { md.context = nil }()
- return md.unify(primValue.undecoded, rvalue(v))
-}
-
-// Decode will decode the contents of `data` in TOML format into a pointer
-// `v`.
-//
-// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be
-// used interchangeably.)
-//
-// TOML arrays of tables correspond to either a slice of structs or a slice
-// of maps.
-//
-// TOML datetimes correspond to Go `time.Time` values.
-//
-// All other TOML types (float, string, int, bool and array) correspond
-// to the obvious Go types.
-//
-// An exception to the above rules is if a type implements the
-// encoding.TextUnmarshaler interface. In this case, any primitive TOML value
-// (floats, strings, integers, booleans and datetimes) will be converted to
-// a byte string and given to the value's UnmarshalText method. See the
-// Unmarshaler example for a demonstration with time duration strings.
-//
-// Key mapping
-//
-// TOML keys can map to either keys in a Go map or field names in a Go
-// struct. The special `toml` struct tag may be used to map TOML keys to
-// struct fields that don't match the key name exactly. (See the example.)
-// A case insensitive match to struct names will be tried if an exact match
-// can't be found.
-//
-// The mapping between TOML values and Go values is loose. That is, there
-// may exist TOML values that cannot be placed into your representation, and
-// there may be parts of your representation that do not correspond to
-// TOML values. This loose mapping can be made stricter by using the IsDefined
-// and/or Undecoded methods on the MetaData returned.
-//
-// This decoder will not handle cyclic types. If a cyclic type is passed,
-// `Decode` will not terminate.
-func Decode(data string, v interface{}) (MetaData, error) {
- rv := reflect.ValueOf(v)
- if rv.Kind() != reflect.Ptr {
- return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v))
- }
- if rv.IsNil() {
- return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v))
- }
- p, err := parse(data)
- if err != nil {
- return MetaData{}, err
- }
- md := MetaData{
- p.mapping, p.types, p.ordered,
- make(map[string]bool, len(p.ordered)), nil,
- }
- return md, md.unify(p.mapping, indirect(rv))
-}
-
-// DecodeFile is just like Decode, except it will automatically read the
-// contents of the file at `fpath` and decode it for you.
-func DecodeFile(fpath string, v interface{}) (MetaData, error) {
- bs, err := ioutil.ReadFile(fpath)
- if err != nil {
- return MetaData{}, err
- }
- return Decode(string(bs), v)
-}
-
-// DecodeReader is just like Decode, except it will consume all bytes
-// from the reader and decode it for you.
-func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
- bs, err := ioutil.ReadAll(r)
- if err != nil {
- return MetaData{}, err
- }
- return Decode(string(bs), v)
-}
-
-// unify performs a sort of type unification based on the structure of `rv`,
-// which is the client representation.
-//
-// Any type mismatch produces an error. Finding a type that we don't know
-// how to handle produces an unsupported type error.
-func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
-
- // Special case. Look for a `Primitive` value.
- if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
- // Save the undecoded data and the key context into the primitive
- // value.
- context := make(Key, len(md.context))
- copy(context, md.context)
- rv.Set(reflect.ValueOf(Primitive{
- undecoded: data,
- context: context,
- }))
- return nil
- }
-
- // Special case. Unmarshaler Interface support.
- if rv.CanAddr() {
- if v, ok := rv.Addr().Interface().(Unmarshaler); ok {
- return v.UnmarshalTOML(data)
- }
- }
-
- // Special case. Handle time.Time values specifically.
- // TODO: Remove this code when we decide to drop support for Go 1.1.
- // This isn't necessary in Go 1.2 because time.Time satisfies the encoding
- // interfaces.
- if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) {
- return md.unifyDatetime(data, rv)
- }
-
- // Special case. Look for a value satisfying the TextUnmarshaler interface.
- if v, ok := rv.Interface().(TextUnmarshaler); ok {
- return md.unifyText(data, v)
- }
- // BUG(burntsushi)
- // The behavior here is incorrect whenever a Go type satisfies the
- // encoding.TextUnmarshaler interface but also corresponds to a TOML
- // hash or array. In particular, the unmarshaler should only be applied
- // to primitive TOML values. But at this point, it will be applied to
- // all kinds of values and produce an incorrect error whenever those values
- // are hashes or arrays (including arrays of tables).
-
- k := rv.Kind()
-
- // laziness
- if k >= reflect.Int && k <= reflect.Uint64 {
- return md.unifyInt(data, rv)
- }
- switch k {
- case reflect.Ptr:
- elem := reflect.New(rv.Type().Elem())
- err := md.unify(data, reflect.Indirect(elem))
- if err != nil {
- return err
- }
- rv.Set(elem)
- return nil
- case reflect.Struct:
- return md.unifyStruct(data, rv)
- case reflect.Map:
- return md.unifyMap(data, rv)
- case reflect.Array:
- return md.unifyArray(data, rv)
- case reflect.Slice:
- return md.unifySlice(data, rv)
- case reflect.String:
- return md.unifyString(data, rv)
- case reflect.Bool:
- return md.unifyBool(data, rv)
- case reflect.Interface:
- // we only support empty interfaces.
- if rv.NumMethod() > 0 {
- return e("unsupported type %s", rv.Type())
- }
- return md.unifyAnything(data, rv)
- case reflect.Float32:
- fallthrough
- case reflect.Float64:
- return md.unifyFloat64(data, rv)
- }
- return e("unsupported type %s", rv.Kind())
-}
-
-func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
- tmap, ok := mapping.(map[string]interface{})
- if !ok {
- if mapping == nil {
- return nil
- }
- return e("type mismatch for %s: expected table but found %T",
- rv.Type().String(), mapping)
- }
-
- for key, datum := range tmap {
- var f *field
- fields := cachedTypeFields(rv.Type())
- for i := range fields {
- ff := &fields[i]
- if ff.name == key {
- f = ff
- break
- }
- if f == nil && strings.EqualFold(ff.name, key) {
- f = ff
- }
- }
- if f != nil {
- subv := rv
- for _, i := range f.index {
- subv = indirect(subv.Field(i))
- }
- if isUnifiable(subv) {
- md.decoded[md.context.add(key).String()] = true
- md.context = append(md.context, key)
- if err := md.unify(datum, subv); err != nil {
- return err
- }
- md.context = md.context[0 : len(md.context)-1]
- } else if f.name != "" {
- // Bad user! No soup for you!
- return e("cannot write unexported field %s.%s",
- rv.Type().String(), f.name)
- }
- }
- }
- return nil
-}
-
-func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
- tmap, ok := mapping.(map[string]interface{})
- if !ok {
- if tmap == nil {
- return nil
- }
- return badtype("map", mapping)
- }
- if rv.IsNil() {
- rv.Set(reflect.MakeMap(rv.Type()))
- }
- for k, v := range tmap {
- md.decoded[md.context.add(k).String()] = true
- md.context = append(md.context, k)
-
- rvkey := indirect(reflect.New(rv.Type().Key()))
- rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
- if err := md.unify(v, rvval); err != nil {
- return err
- }
- md.context = md.context[0 : len(md.context)-1]
-
- rvkey.SetString(k)
- rv.SetMapIndex(rvkey, rvval)
- }
- return nil
-}
-
-func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
- datav := reflect.ValueOf(data)
- if datav.Kind() != reflect.Slice {
- if !datav.IsValid() {
- return nil
- }
- return badtype("slice", data)
- }
- sliceLen := datav.Len()
- if sliceLen != rv.Len() {
- return e("expected array length %d; got TOML array of length %d",
- rv.Len(), sliceLen)
- }
- return md.unifySliceArray(datav, rv)
-}
-
-func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
- datav := reflect.ValueOf(data)
- if datav.Kind() != reflect.Slice {
- if !datav.IsValid() {
- return nil
- }
- return badtype("slice", data)
- }
- n := datav.Len()
- if rv.IsNil() || rv.Cap() < n {
- rv.Set(reflect.MakeSlice(rv.Type(), n, n))
- }
- rv.SetLen(n)
- return md.unifySliceArray(datav, rv)
-}
-
-func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
- sliceLen := data.Len()
- for i := 0; i < sliceLen; i++ {
- v := data.Index(i).Interface()
- sliceval := indirect(rv.Index(i))
- if err := md.unify(v, sliceval); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error {
- if _, ok := data.(time.Time); ok {
- rv.Set(reflect.ValueOf(data))
- return nil
- }
- return badtype("time.Time", data)
-}
-
-func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
- if s, ok := data.(string); ok {
- rv.SetString(s)
- return nil
- }
- return badtype("string", data)
-}
-
-func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
- if num, ok := data.(float64); ok {
- switch rv.Kind() {
- case reflect.Float32:
- fallthrough
- case reflect.Float64:
- rv.SetFloat(num)
- default:
- panic("bug")
- }
- return nil
- }
- return badtype("float", data)
-}
-
-func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
- if num, ok := data.(int64); ok {
- if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 {
- switch rv.Kind() {
- case reflect.Int, reflect.Int64:
- // No bounds checking necessary.
- case reflect.Int8:
- if num < math.MinInt8 || num > math.MaxInt8 {
- return e("value %d is out of range for int8", num)
- }
- case reflect.Int16:
- if num < math.MinInt16 || num > math.MaxInt16 {
- return e("value %d is out of range for int16", num)
- }
- case reflect.Int32:
- if num < math.MinInt32 || num > math.MaxInt32 {
- return e("value %d is out of range for int32", num)
- }
- }
- rv.SetInt(num)
- } else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 {
- unum := uint64(num)
- switch rv.Kind() {
- case reflect.Uint, reflect.Uint64:
- // No bounds checking necessary.
- case reflect.Uint8:
- if num < 0 || unum > math.MaxUint8 {
- return e("value %d is out of range for uint8", num)
- }
- case reflect.Uint16:
- if num < 0 || unum > math.MaxUint16 {
- return e("value %d is out of range for uint16", num)
- }
- case reflect.Uint32:
- if num < 0 || unum > math.MaxUint32 {
- return e("value %d is out of range for uint32", num)
- }
- }
- rv.SetUint(unum)
- } else {
- panic("unreachable")
- }
- return nil
- }
- return badtype("integer", data)
-}
-
-func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
- if b, ok := data.(bool); ok {
- rv.SetBool(b)
- return nil
- }
- return badtype("boolean", data)
-}
-
-func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
- rv.Set(reflect.ValueOf(data))
- return nil
-}
-
-func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error {
- var s string
- switch sdata := data.(type) {
- case TextMarshaler:
- text, err := sdata.MarshalText()
- if err != nil {
- return err
- }
- s = string(text)
- case fmt.Stringer:
- s = sdata.String()
- case string:
- s = sdata
- case bool:
- s = fmt.Sprintf("%v", sdata)
- case int64:
- s = fmt.Sprintf("%d", sdata)
- case float64:
- s = fmt.Sprintf("%f", sdata)
- default:
- return badtype("primitive (string-like)", data)
- }
- if err := v.UnmarshalText([]byte(s)); err != nil {
- return err
- }
- return nil
-}
-
-// rvalue returns a reflect.Value of `v`. All pointers are resolved.
-func rvalue(v interface{}) reflect.Value {
- return indirect(reflect.ValueOf(v))
-}
-
-// indirect returns the value pointed to by a pointer.
-// Pointers are followed until the value is not a pointer.
-// New values are allocated for each nil pointer.
-//
-// An exception to this rule is if the value satisfies an interface of
-// interest to us (like encoding.TextUnmarshaler).
-func indirect(v reflect.Value) reflect.Value {
- if v.Kind() != reflect.Ptr {
- if v.CanSet() {
- pv := v.Addr()
- if _, ok := pv.Interface().(TextUnmarshaler); ok {
- return pv
- }
- }
- return v
- }
- if v.IsNil() {
- v.Set(reflect.New(v.Type().Elem()))
- }
- return indirect(reflect.Indirect(v))
-}
-
-func isUnifiable(rv reflect.Value) bool {
- if rv.CanSet() {
- return true
- }
- if _, ok := rv.Interface().(TextUnmarshaler); ok {
- return true
- }
- return false
-}
-
-func badtype(expected string, data interface{}) error {
- return e("cannot load TOML value of type %T into a Go %s", data, expected)
-}
diff --git a/vendor/github.com/BurntSushi/toml/decode_meta.go b/vendor/github.com/BurntSushi/toml/decode_meta.go
deleted file mode 100644
index b9914a6..0000000
--- a/vendor/github.com/BurntSushi/toml/decode_meta.go
+++ /dev/null
@@ -1,121 +0,0 @@
-package toml
-
-import "strings"
-
-// MetaData allows access to meta information about TOML data that may not
-// be inferrable via reflection. In particular, whether a key has been defined
-// and the TOML type of a key.
-type MetaData struct {
- mapping map[string]interface{}
- types map[string]tomlType
- keys []Key
- decoded map[string]bool
- context Key // Used only during decoding.
-}
-
-// IsDefined returns true if the key given exists in the TOML data. The key
-// should be specified hierarchially. e.g.,
-//
-// // access the TOML key 'a.b.c'
-// IsDefined("a", "b", "c")
-//
-// IsDefined will return false if an empty key given. Keys are case sensitive.
-func (md *MetaData) IsDefined(key ...string) bool {
- if len(key) == 0 {
- return false
- }
-
- var hash map[string]interface{}
- var ok bool
- var hashOrVal interface{} = md.mapping
- for _, k := range key {
- if hash, ok = hashOrVal.(map[string]interface{}); !ok {
- return false
- }
- if hashOrVal, ok = hash[k]; !ok {
- return false
- }
- }
- return true
-}
-
-// Type returns a string representation of the type of the key specified.
-//
-// Type will return the empty string if given an empty key or a key that
-// does not exist. Keys are case sensitive.
-func (md *MetaData) Type(key ...string) string {
- fullkey := strings.Join(key, ".")
- if typ, ok := md.types[fullkey]; ok {
- return typ.typeString()
- }
- return ""
-}
-
-// Key is the type of any TOML key, including key groups. Use (MetaData).Keys
-// to get values of this type.
-type Key []string
-
-func (k Key) String() string {
- return strings.Join(k, ".")
-}
-
-func (k Key) maybeQuotedAll() string {
- var ss []string
- for i := range k {
- ss = append(ss, k.maybeQuoted(i))
- }
- return strings.Join(ss, ".")
-}
-
-func (k Key) maybeQuoted(i int) string {
- quote := false
- for _, c := range k[i] {
- if !isBareKeyChar(c) {
- quote = true
- break
- }
- }
- if quote {
- return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\""
- }
- return k[i]
-}
-
-func (k Key) add(piece string) Key {
- newKey := make(Key, len(k)+1)
- copy(newKey, k)
- newKey[len(k)] = piece
- return newKey
-}
-
-// Keys returns a slice of every key in the TOML data, including key groups.
-// Each key is itself a slice, where the first element is the top of the
-// hierarchy and the last is the most specific.
-//
-// The list will have the same order as the keys appeared in the TOML data.
-//
-// All keys returned are non-empty.
-func (md *MetaData) Keys() []Key {
- return md.keys
-}
-
-// Undecoded returns all keys that have not been decoded in the order in which
-// they appear in the original TOML document.
-//
-// This includes keys that haven't been decoded because of a Primitive value.
-// Once the Primitive value is decoded, the keys will be considered decoded.
-//
-// Also note that decoding into an empty interface will result in no decoding,
-// and so no keys will be considered decoded.
-//
-// In this sense, the Undecoded keys correspond to keys in the TOML document
-// that do not have a concrete type in your representation.
-func (md *MetaData) Undecoded() []Key {
- undecoded := make([]Key, 0, len(md.keys))
- for _, key := range md.keys {
- if !md.decoded[key.String()] {
- undecoded = append(undecoded, key)
- }
- }
- return undecoded
-}
diff --git a/vendor/github.com/BurntSushi/toml/decode_test.go b/vendor/github.com/BurntSushi/toml/decode_test.go
deleted file mode 100644
index 0c36b33..0000000
--- a/vendor/github.com/BurntSushi/toml/decode_test.go
+++ /dev/null
@@ -1,1447 +0,0 @@
-package toml
-
-import (
- "fmt"
- "log"
- "math"
- "reflect"
- "strings"
- "testing"
- "time"
-)
-
-func TestDecodeSimple(t *testing.T) {
- var testSimple = `
-age = 250
-andrew = "gallant"
-kait = "brady"
-now = 1987-07-05T05:45:00Z
-yesOrNo = true
-pi = 3.14
-colors = [
- ["red", "green", "blue"],
- ["cyan", "magenta", "yellow", "black"],
-]
-
-[My.Cats]
-plato = "cat 1"
-cauchy = "cat 2"
-`
-
- type cats struct {
- Plato string
- Cauchy string
- }
- type simple struct {
- Age int
- Colors [][]string
- Pi float64
- YesOrNo bool
- Now time.Time
- Andrew string
- Kait string
- My map[string]cats
- }
-
- var val simple
- _, err := Decode(testSimple, &val)
- if err != nil {
- t.Fatal(err)
- }
-
- now, err := time.Parse("2006-01-02T15:04:05", "1987-07-05T05:45:00")
- if err != nil {
- panic(err)
- }
- var answer = simple{
- Age: 250,
- Andrew: "gallant",
- Kait: "brady",
- Now: now,
- YesOrNo: true,
- Pi: 3.14,
- Colors: [][]string{
- {"red", "green", "blue"},
- {"cyan", "magenta", "yellow", "black"},
- },
- My: map[string]cats{
- "Cats": {Plato: "cat 1", Cauchy: "cat 2"},
- },
- }
- if !reflect.DeepEqual(val, answer) {
- t.Fatalf("Expected\n-----\n%#v\n-----\nbut got\n-----\n%#v\n",
- answer, val)
- }
-}
-
-func TestDecodeEmbedded(t *testing.T) {
- type Dog struct{ Name string }
- type Age int
- type cat struct{ Name string }
-
- for _, test := range []struct {
- label string
- input string
- decodeInto interface{}
- wantDecoded interface{}
- }{
- {
- label: "embedded struct",
- input: `Name = "milton"`,
- decodeInto: &struct{ Dog }{},
- wantDecoded: &struct{ Dog }{Dog{"milton"}},
- },
- {
- label: "embedded non-nil pointer to struct",
- input: `Name = "milton"`,
- decodeInto: &struct{ *Dog }{},
- wantDecoded: &struct{ *Dog }{&Dog{"milton"}},
- },
- {
- label: "embedded nil pointer to struct",
- input: ``,
- decodeInto: &struct{ *Dog }{},
- wantDecoded: &struct{ *Dog }{nil},
- },
- {
- label: "unexported embedded struct",
- input: `Name = "socks"`,
- decodeInto: &struct{ cat }{},
- wantDecoded: &struct{ cat }{cat{"socks"}},
- },
- {
- label: "embedded int",
- input: `Age = -5`,
- decodeInto: &struct{ Age }{},
- wantDecoded: &struct{ Age }{-5},
- },
- } {
- _, err := Decode(test.input, test.decodeInto)
- if err != nil {
- t.Fatal(err)
- }
- if !reflect.DeepEqual(test.wantDecoded, test.decodeInto) {
- t.Errorf("%s: want decoded == %+v, got %+v",
- test.label, test.wantDecoded, test.decodeInto)
- }
- }
-}
-
-func TestDecodeIgnoredFields(t *testing.T) {
- type simple struct {
- Number int `toml:"-"`
- }
- const input = `
-Number = 123
-- = 234
-`
- var s simple
- if _, err := Decode(input, &s); err != nil {
- t.Fatal(err)
- }
- if s.Number != 0 {
- t.Errorf("got: %d; want 0", s.Number)
- }
-}
-
-func TestTableArrays(t *testing.T) {
- var tomlTableArrays = `
-[[albums]]
-name = "Born to Run"
-
- [[albums.songs]]
- name = "Jungleland"
-
- [[albums.songs]]
- name = "Meeting Across the River"
-
-[[albums]]
-name = "Born in the USA"
-
- [[albums.songs]]
- name = "Glory Days"
-
- [[albums.songs]]
- name = "Dancing in the Dark"
-`
-
- type Song struct {
- Name string
- }
-
- type Album struct {
- Name string
- Songs []Song
- }
-
- type Music struct {
- Albums []Album
- }
-
- expected := Music{[]Album{
- {"Born to Run", []Song{{"Jungleland"}, {"Meeting Across the River"}}},
- {"Born in the USA", []Song{{"Glory Days"}, {"Dancing in the Dark"}}},
- }}
- var got Music
- if _, err := Decode(tomlTableArrays, &got); err != nil {
- t.Fatal(err)
- }
- if !reflect.DeepEqual(expected, got) {
- t.Fatalf("\n%#v\n!=\n%#v\n", expected, got)
- }
-}
-
-func TestTableNesting(t *testing.T) {
- for _, tt := range []struct {
- t string
- want []string
- }{
- {"[a.b.c]", []string{"a", "b", "c"}},
- {`[a."b.c"]`, []string{"a", "b.c"}},
- {`[a.'b.c']`, []string{"a", "b.c"}},
- {`[a.' b ']`, []string{"a", " b "}},
- {"[ d.e.f ]", []string{"d", "e", "f"}},
- {"[ g . h . i ]", []string{"g", "h", "i"}},
- {`[ j . "ʞ" . 'l' ]`, []string{"j", "ʞ", "l"}},
- } {
- var m map[string]interface{}
- if _, err := Decode(tt.t, &m); err != nil {
- t.Errorf("Decode(%q): got error: %s", tt.t, err)
- continue
- }
- if keys := extractNestedKeys(m); !reflect.DeepEqual(keys, tt.want) {
- t.Errorf("Decode(%q): got nested keys %#v; want %#v",
- tt.t, keys, tt.want)
- }
- }
-}
-
-func extractNestedKeys(v map[string]interface{}) []string {
- var result []string
- for {
- if len(v) != 1 {
- return result
- }
- for k, m := range v {
- result = append(result, k)
- var ok bool
- v, ok = m.(map[string]interface{})
- if !ok {
- return result
- }
- }
-
- }
-}
-
-// Case insensitive matching tests.
-// A bit more comprehensive than needed given the current implementation,
-// but implementations change.
-// Probably still missing demonstrations of some ugly corner cases regarding
-// case insensitive matching and multiple fields.
-func TestCase(t *testing.T) {
- var caseToml = `
-tOpString = "string"
-tOpInt = 1
-tOpFloat = 1.1
-tOpBool = true
-tOpdate = 2006-01-02T15:04:05Z
-tOparray = [ "array" ]
-Match = "i should be in Match only"
-MatcH = "i should be in MatcH only"
-once = "just once"
-[nEst.eD]
-nEstedString = "another string"
-`
-
- type InsensitiveEd struct {
- NestedString string
- }
-
- type InsensitiveNest struct {
- Ed InsensitiveEd
- }
-
- type Insensitive struct {
- TopString string
- TopInt int
- TopFloat float64
- TopBool bool
- TopDate time.Time
- TopArray []string
- Match string
- MatcH string
- Once string
- OncE string
- Nest InsensitiveNest
- }
-
- tme, err := time.Parse(time.RFC3339, time.RFC3339[:len(time.RFC3339)-5])
- if err != nil {
- panic(err)
- }
- expected := Insensitive{
- TopString: "string",
- TopInt: 1,
- TopFloat: 1.1,
- TopBool: true,
- TopDate: tme,
- TopArray: []string{"array"},
- MatcH: "i should be in MatcH only",
- Match: "i should be in Match only",
- Once: "just once",
- OncE: "",
- Nest: InsensitiveNest{
- Ed: InsensitiveEd{NestedString: "another string"},
- },
- }
- var got Insensitive
- if _, err := Decode(caseToml, &got); err != nil {
- t.Fatal(err)
- }
- if !reflect.DeepEqual(expected, got) {
- t.Fatalf("\n%#v\n!=\n%#v\n", expected, got)
- }
-}
-
-func TestPointers(t *testing.T) {
- type Object struct {
- Type string
- Description string
- }
-
- type Dict struct {
- NamedObject map[string]*Object
- BaseObject *Object
- Strptr *string
- Strptrs []*string
- }
- s1, s2, s3 := "blah", "abc", "def"
- expected := &Dict{
- Strptr: &s1,
- Strptrs: []*string{&s2, &s3},
- NamedObject: map[string]*Object{
- "foo": {"FOO", "fooooo!!!"},
- "bar": {"BAR", "ba-ba-ba-ba-barrrr!!!"},
- },
- BaseObject: &Object{"BASE", "da base"},
- }
-
- ex1 := `
-Strptr = "blah"
-Strptrs = ["abc", "def"]
-
-[NamedObject.foo]
-Type = "FOO"
-Description = "fooooo!!!"
-
-[NamedObject.bar]
-Type = "BAR"
-Description = "ba-ba-ba-ba-barrrr!!!"
-
-[BaseObject]
-Type = "BASE"
-Description = "da base"
-`
- dict := new(Dict)
- _, err := Decode(ex1, dict)
- if err != nil {
- t.Errorf("Decode error: %v", err)
- }
- if !reflect.DeepEqual(expected, dict) {
- t.Fatalf("\n%#v\n!=\n%#v\n", expected, dict)
- }
-}
-
-func TestDecodeDatetime(t *testing.T) {
- const noTimestamp = "2006-01-02T15:04:05"
- for _, tt := range []struct {
- s string
- t string
- format string
- }{
- {"1979-05-27T07:32:00Z", "1979-05-27T07:32:00Z", time.RFC3339},
- {"1979-05-27T00:32:00-07:00", "1979-05-27T00:32:00-07:00", time.RFC3339},
- {
- "1979-05-27T00:32:00.999999-07:00",
- "1979-05-27T00:32:00.999999-07:00",
- time.RFC3339,
- },
- {"1979-05-27T07:32:00", "1979-05-27T07:32:00", noTimestamp},
- {
- "1979-05-27T00:32:00.999999",
- "1979-05-27T00:32:00.999999",
- noTimestamp,
- },
- {"1979-05-27", "1979-05-27T00:00:00", noTimestamp},
- } {
- var x struct{ D time.Time }
- input := "d = " + tt.s
- if _, err := Decode(input, &x); err != nil {
- t.Errorf("Decode(%q): got error: %s", input, err)
- continue
- }
- want, err := time.ParseInLocation(tt.format, tt.t, time.Local)
- if err != nil {
- panic(err)
- }
- if !x.D.Equal(want) {
- t.Errorf("Decode(%q): got %s; want %s", input, x.D, want)
- }
- }
-}
-
-func TestDecodeBadDatetime(t *testing.T) {
- var x struct{ T time.Time }
- for _, s := range []string{
- "123",
- "2006-01-50T00:00:00Z",
- "2006-01-30T00:00",
- "2006-01-30T",
- } {
- input := "T = " + s
- if _, err := Decode(input, &x); err == nil {
- t.Errorf("Expected invalid DateTime error for %q", s)
- }
- }
-}
-
-func TestDecodeMultilineStrings(t *testing.T) {
- var x struct {
- S string
- }
- const s0 = `s = """
-a b \n c
-d e f
-"""`
- if _, err := Decode(s0, &x); err != nil {
- t.Fatal(err)
- }
- if want := "a b \n c\nd e f\n"; x.S != want {
- t.Errorf("got: %q; want: %q", x.S, want)
- }
- const s1 = `s = """a b c\
-"""`
- if _, err := Decode(s1, &x); err != nil {
- t.Fatal(err)
- }
- if want := "a b c"; x.S != want {
- t.Errorf("got: %q; want: %q", x.S, want)
- }
-}
-
-type sphere struct {
- Center [3]float64
- Radius float64
-}
-
-func TestDecodeSimpleArray(t *testing.T) {
- var s1 sphere
- if _, err := Decode(`center = [0.0, 1.5, 0.0]`, &s1); err != nil {
- t.Fatal(err)
- }
-}
-
-func TestDecodeArrayWrongSize(t *testing.T) {
- var s1 sphere
- if _, err := Decode(`center = [0.1, 2.3]`, &s1); err == nil {
- t.Fatal("Expected array type mismatch error")
- }
-}
-
-func TestDecodeLargeIntoSmallInt(t *testing.T) {
- type table struct {
- Value int8
- }
- var tab table
- if _, err := Decode(`value = 500`, &tab); err == nil {
- t.Fatal("Expected integer out-of-bounds error.")
- }
-}
-
-func TestDecodeSizedInts(t *testing.T) {
- type table struct {
- U8 uint8
- U16 uint16
- U32 uint32
- U64 uint64
- U uint
- I8 int8
- I16 int16
- I32 int32
- I64 int64
- I int
- }
- answer := table{1, 1, 1, 1, 1, -1, -1, -1, -1, -1}
- toml := `
- u8 = 1
- u16 = 1
- u32 = 1
- u64 = 1
- u = 1
- i8 = -1
- i16 = -1
- i32 = -1
- i64 = -1
- i = -1
- `
- var tab table
- if _, err := Decode(toml, &tab); err != nil {
- t.Fatal(err.Error())
- }
- if answer != tab {
- t.Fatalf("Expected %#v but got %#v", answer, tab)
- }
-}
-
-func TestDecodeInts(t *testing.T) {
- for _, tt := range []struct {
- s string
- want int64
- }{
- {"0", 0},
- {"+99", 99},
- {"-10", -10},
- {"1_234_567", 1234567},
- {"1_2_3_4", 1234},
- {"-9_223_372_036_854_775_808", math.MinInt64},
- {"9_223_372_036_854_775_807", math.MaxInt64},
- } {
- var x struct{ N int64 }
- input := "n = " + tt.s
- if _, err := Decode(input, &x); err != nil {
- t.Errorf("Decode(%q): got error: %s", input, err)
- continue
- }
- if x.N != tt.want {
- t.Errorf("Decode(%q): got %d; want %d", input, x.N, tt.want)
- }
- }
-}
-
-func TestDecodeFloats(t *testing.T) {
- for _, tt := range []struct {
- s string
- want float64
- }{
- {"+1.0", 1},
- {"3.1415", 3.1415},
- {"-0.01", -0.01},
- {"5e+22", 5e22},
- {"1e6", 1e6},
- {"-2E-2", -2e-2},
- {"6.626e-34", 6.626e-34},
- {"9_224_617.445_991_228_313", 9224617.445991228313},
- {"9_876.54_32e1_0", 9876.5432e10},
- } {
- var x struct{ N float64 }
- input := "n = " + tt.s
- if _, err := Decode(input, &x); err != nil {
- t.Errorf("Decode(%q): got error: %s", input, err)
- continue
- }
- if x.N != tt.want {
- t.Errorf("Decode(%q): got %f; want %f", input, x.N, tt.want)
- }
- }
-}
-
-func TestDecodeMalformedNumbers(t *testing.T) {
- for _, tt := range []struct {
- s string
- want string
- }{
- {"++99", "expected a digit"},
- {"0..1", "must be followed by one or more digits"},
- {"0.1.2", "Invalid float value"},
- {"1e2.3", "Invalid float value"},
- {"1e2e3", "Invalid float value"},
- {"_123", "expected value"},
- {"123_", "surrounded by digits"},
- {"1._23", "surrounded by digits"},
- {"1e__23", "surrounded by digits"},
- {"123.", "must be followed by one or more digits"},
- {"1.e2", "must be followed by one or more digits"},
- } {
- var x struct{ N interface{} }
- input := "n = " + tt.s
- _, err := Decode(input, &x)
- if err == nil {
- t.Errorf("Decode(%q): got nil, want error containing %q",
- input, tt.want)
- continue
- }
- if !strings.Contains(err.Error(), tt.want) {
- t.Errorf("Decode(%q): got %q, want error containing %q",
- input, err, tt.want)
- }
- }
-}
-
-func TestDecodeBadValues(t *testing.T) {
- for _, tt := range []struct {
- v interface{}
- want string
- }{
- {3, "non-pointer int"},
- {(*int)(nil), "nil"},
- } {
- _, err := Decode(`x = 3`, tt.v)
- if err == nil {
- t.Errorf("Decode(%v): got nil; want error containing %q",
- tt.v, tt.want)
- continue
- }
- if !strings.Contains(err.Error(), tt.want) {
- t.Errorf("Decode(%v): got %q; want error containing %q",
- tt.v, err, tt.want)
- }
- }
-}
-
-func TestUnmarshaler(t *testing.T) {
-
- var tomlBlob = `
-[dishes.hamboogie]
-name = "Hamboogie with fries"
-price = 10.99
-
-[[dishes.hamboogie.ingredients]]
-name = "Bread Bun"
-
-[[dishes.hamboogie.ingredients]]
-name = "Lettuce"
-
-[[dishes.hamboogie.ingredients]]
-name = "Real Beef Patty"
-
-[[dishes.hamboogie.ingredients]]
-name = "Tomato"
-
-[dishes.eggsalad]
-name = "Egg Salad with rice"
-price = 3.99
-
-[[dishes.eggsalad.ingredients]]
-name = "Egg"
-
-[[dishes.eggsalad.ingredients]]
-name = "Mayo"
-
-[[dishes.eggsalad.ingredients]]
-name = "Rice"
-`
- m := &menu{}
- if _, err := Decode(tomlBlob, m); err != nil {
- t.Fatal(err)
- }
-
- if len(m.Dishes) != 2 {
- t.Log("two dishes should be loaded with UnmarshalTOML()")
- t.Errorf("expected %d but got %d", 2, len(m.Dishes))
- }
-
- eggSalad := m.Dishes["eggsalad"]
- if _, ok := interface{}(eggSalad).(dish); !ok {
- t.Errorf("expected a dish")
- }
-
- if eggSalad.Name != "Egg Salad with rice" {
- t.Errorf("expected the dish to be named 'Egg Salad with rice'")
- }
-
- if len(eggSalad.Ingredients) != 3 {
- t.Log("dish should be loaded with UnmarshalTOML()")
- t.Errorf("expected %d but got %d", 3, len(eggSalad.Ingredients))
- }
-
- found := false
- for _, i := range eggSalad.Ingredients {
- if i.Name == "Rice" {
- found = true
- break
- }
- }
- if !found {
- t.Error("Rice was not loaded in UnmarshalTOML()")
- }
-
- // test on a value - must be passed as *
- o := menu{}
- if _, err := Decode(tomlBlob, &o); err != nil {
- t.Fatal(err)
- }
-
-}
-
-func TestDecodeInlineTable(t *testing.T) {
- input := `
-[CookieJar]
-Types = {Chocolate = "yummy", Oatmeal = "best ever"}
-
-[Seasons]
-Locations = {NY = {Temp = "not cold", Rating = 4}, MI = {Temp = "freezing", Rating = 9}}
-`
- type cookieJar struct {
- Types map[string]string
- }
- type properties struct {
- Temp string
- Rating int
- }
- type seasons struct {
- Locations map[string]properties
- }
- type wrapper struct {
- CookieJar cookieJar
- Seasons seasons
- }
- var got wrapper
-
- meta, err := Decode(input, &got)
- if err != nil {
- t.Fatal(err)
- }
- want := wrapper{
- CookieJar: cookieJar{
- Types: map[string]string{
- "Chocolate": "yummy",
- "Oatmeal": "best ever",
- },
- },
- Seasons: seasons{
- Locations: map[string]properties{
- "NY": {
- Temp: "not cold",
- Rating: 4,
- },
- "MI": {
- Temp: "freezing",
- Rating: 9,
- },
- },
- },
- }
- if !reflect.DeepEqual(got, want) {
- t.Fatalf("after decode, got:\n\n%#v\n\nwant:\n\n%#v", got, want)
- }
- if len(meta.keys) != 12 {
- t.Errorf("after decode, got %d meta keys; want 12", len(meta.keys))
- }
- if len(meta.types) != 12 {
- t.Errorf("after decode, got %d meta types; want 12", len(meta.types))
- }
-}
-
-func TestDecodeInlineTableArray(t *testing.T) {
- type point struct {
- X, Y, Z int
- }
- var got struct {
- Points []point
- }
- // Example inline table array from the spec.
- const in = `
-points = [ { x = 1, y = 2, z = 3 },
- { x = 7, y = 8, z = 9 },
- { x = 2, y = 4, z = 8 } ]
-
-`
- if _, err := Decode(in, &got); err != nil {
- t.Fatal(err)
- }
- want := []point{
- {X: 1, Y: 2, Z: 3},
- {X: 7, Y: 8, Z: 9},
- {X: 2, Y: 4, Z: 8},
- }
- if !reflect.DeepEqual(got.Points, want) {
- t.Errorf("got %#v; want %#v", got.Points, want)
- }
-}
-
-func TestDecodeMalformedInlineTable(t *testing.T) {
- for _, tt := range []struct {
- s string
- want string
- }{
- {"{,}", "unexpected comma"},
- {"{x = 3 y = 4}", "expected a comma or an inline table terminator"},
- {"{x=3,,y=4}", "unexpected comma"},
- {"{x=3,\ny=4}", "newlines not allowed"},
- {"{x=3\n,y=4}", "newlines not allowed"},
- } {
- var x struct{ A map[string]int }
- input := "a = " + tt.s
- _, err := Decode(input, &x)
- if err == nil {
- t.Errorf("Decode(%q): got nil, want error containing %q",
- input, tt.want)
- continue
- }
- if !strings.Contains(err.Error(), tt.want) {
- t.Errorf("Decode(%q): got %q, want error containing %q",
- input, err, tt.want)
- }
- }
-}
-
-type menu struct {
- Dishes map[string]dish
-}
-
-func (m *menu) UnmarshalTOML(p interface{}) error {
- m.Dishes = make(map[string]dish)
- data, _ := p.(map[string]interface{})
- dishes := data["dishes"].(map[string]interface{})
- for n, v := range dishes {
- if d, ok := v.(map[string]interface{}); ok {
- nd := dish{}
- nd.UnmarshalTOML(d)
- m.Dishes[n] = nd
- } else {
- return fmt.Errorf("not a dish")
- }
- }
- return nil
-}
-
-type dish struct {
- Name string
- Price float32
- Ingredients []ingredient
-}
-
-func (d *dish) UnmarshalTOML(p interface{}) error {
- data, _ := p.(map[string]interface{})
- d.Name, _ = data["name"].(string)
- d.Price, _ = data["price"].(float32)
- ingredients, _ := data["ingredients"].([]map[string]interface{})
- for _, e := range ingredients {
- n, _ := interface{}(e).(map[string]interface{})
- name, _ := n["name"].(string)
- i := ingredient{name}
- d.Ingredients = append(d.Ingredients, i)
- }
- return nil
-}
-
-type ingredient struct {
- Name string
-}
-
-func TestDecodeSlices(t *testing.T) {
- type T struct {
- S []string
- }
- for i, tt := range []struct {
- v T
- input string
- want T
- }{
- {T{}, "", T{}},
- {T{[]string{}}, "", T{[]string{}}},
- {T{[]string{"a", "b"}}, "", T{[]string{"a", "b"}}},
- {T{}, "S = []", T{[]string{}}},
- {T{[]string{}}, "S = []", T{[]string{}}},
- {T{[]string{"a", "b"}}, "S = []", T{[]string{}}},
- {T{}, `S = ["x"]`, T{[]string{"x"}}},
- {T{[]string{}}, `S = ["x"]`, T{[]string{"x"}}},
- {T{[]string{"a", "b"}}, `S = ["x"]`, T{[]string{"x"}}},
- } {
- if _, err := Decode(tt.input, &tt.v); err != nil {
- t.Errorf("[%d] %s", i, err)
- continue
- }
- if !reflect.DeepEqual(tt.v, tt.want) {
- t.Errorf("[%d] got %#v; want %#v", i, tt.v, tt.want)
- }
- }
-}
-
-func TestDecodePrimitive(t *testing.T) {
- type S struct {
- P Primitive
- }
- type T struct {
- S []int
- }
- slicep := func(s []int) *[]int { return &s }
- arrayp := func(a [2]int) *[2]int { return &a }
- mapp := func(m map[string]int) *map[string]int { return &m }
- for i, tt := range []struct {
- v interface{}
- input string
- want interface{}
- }{
- // slices
- {slicep(nil), "", slicep(nil)},
- {slicep([]int{}), "", slicep([]int{})},
- {slicep([]int{1, 2, 3}), "", slicep([]int{1, 2, 3})},
- {slicep(nil), "P = [1,2]", slicep([]int{1, 2})},
- {slicep([]int{}), "P = [1,2]", slicep([]int{1, 2})},
- {slicep([]int{1, 2, 3}), "P = [1,2]", slicep([]int{1, 2})},
-
- // arrays
- {arrayp([2]int{2, 3}), "", arrayp([2]int{2, 3})},
- {arrayp([2]int{2, 3}), "P = [3,4]", arrayp([2]int{3, 4})},
-
- // maps
- {mapp(nil), "", mapp(nil)},
- {mapp(map[string]int{}), "", mapp(map[string]int{})},
- {mapp(map[string]int{"a": 1}), "", mapp(map[string]int{"a": 1})},
- {mapp(nil), "[P]\na = 2", mapp(map[string]int{"a": 2})},
- {mapp(map[string]int{}), "[P]\na = 2", mapp(map[string]int{"a": 2})},
- {mapp(map[string]int{"a": 1, "b": 3}), "[P]\na = 2", mapp(map[string]int{"a": 2, "b": 3})},
-
- // structs
- {&T{nil}, "[P]", &T{nil}},
- {&T{[]int{}}, "[P]", &T{[]int{}}},
- {&T{[]int{1, 2, 3}}, "[P]", &T{[]int{1, 2, 3}}},
- {&T{nil}, "[P]\nS = [1,2]", &T{[]int{1, 2}}},
- {&T{[]int{}}, "[P]\nS = [1,2]", &T{[]int{1, 2}}},
- {&T{[]int{1, 2, 3}}, "[P]\nS = [1,2]", &T{[]int{1, 2}}},
- } {
- var s S
- md, err := Decode(tt.input, &s)
- if err != nil {
- t.Errorf("[%d] Decode error: %s", i, err)
- continue
- }
- if err := md.PrimitiveDecode(s.P, tt.v); err != nil {
- t.Errorf("[%d] PrimitiveDecode error: %s", i, err)
- continue
- }
- if !reflect.DeepEqual(tt.v, tt.want) {
- t.Errorf("[%d] got %#v; want %#v", i, tt.v, tt.want)
- }
- }
-}
-
-func TestDecodeErrors(t *testing.T) {
- for _, s := range []string{
- `x="`,
- `x='`,
- `x='''`,
-
- // Cases found by fuzzing in
- // https://github.com/BurntSushi/toml/issues/155.
- `""�`, // used to panic with index out of range
- `e="""`, // used to hang
- } {
- var x struct{}
- _, err := Decode(s, &x)
- if err == nil {
- t.Errorf("Decode(%q): got nil error", s)
- }
- }
-}
-
-// Test for https://github.com/BurntSushi/toml/pull/166.
-func TestDecodeBoolArray(t *testing.T) {
- for _, tt := range []struct {
- s string
- got interface{}
- want interface{}
- }{
- {
- "a = [true, false]",
- &struct{ A []bool }{},
- &struct{ A []bool }{[]bool{true, false}},
- },
- {
- "a = {a = true, b = false}",
- &struct{ A map[string]bool }{},
- &struct{ A map[string]bool }{map[string]bool{"a": true, "b": false}},
- },
- } {
- if _, err := Decode(tt.s, tt.got); err != nil {
- t.Errorf("Decode(%q): %s", tt.s, err)
- continue
- }
- if !reflect.DeepEqual(tt.got, tt.want) {
- t.Errorf("Decode(%q): got %#v; want %#v", tt.s, tt.got, tt.want)
- }
- }
-}
-
-func ExampleMetaData_PrimitiveDecode() {
- var md MetaData
- var err error
-
- var tomlBlob = `
-ranking = ["Springsteen", "J Geils"]
-
-[bands.Springsteen]
-started = 1973
-albums = ["Greetings", "WIESS", "Born to Run", "Darkness"]
-
-[bands."J Geils"]
-started = 1970
-albums = ["The J. Geils Band", "Full House", "Blow Your Face Out"]
-`
-
- type band struct {
- Started int
- Albums []string
- }
- type classics struct {
- Ranking []string
- Bands map[string]Primitive
- }
-
- // Do the initial decode. Reflection is delayed on Primitive values.
- var music classics
- if md, err = Decode(tomlBlob, &music); err != nil {
- log.Fatal(err)
- }
-
- // MetaData still includes information on Primitive values.
- fmt.Printf("Is `bands.Springsteen` defined? %v\n",
- md.IsDefined("bands", "Springsteen"))
-
- // Decode primitive data into Go values.
- for _, artist := range music.Ranking {
- // A band is a primitive value, so we need to decode it to get a
- // real `band` value.
- primValue := music.Bands[artist]
-
- var aBand band
- if err = md.PrimitiveDecode(primValue, &aBand); err != nil {
- log.Fatal(err)
- }
- fmt.Printf("%s started in %d.\n", artist, aBand.Started)
- }
- // Check to see if there were any fields left undecoded.
- // Note that this won't be empty before decoding the Primitive value!
- fmt.Printf("Undecoded: %q\n", md.Undecoded())
-
- // Output:
- // Is `bands.Springsteen` defined? true
- // Springsteen started in 1973.
- // J Geils started in 1970.
- // Undecoded: []
-}
-
-func ExampleDecode() {
- var tomlBlob = `
-# Some comments.
-[alpha]
-ip = "10.0.0.1"
-
- [alpha.config]
- Ports = [ 8001, 8002 ]
- Location = "Toronto"
- Created = 1987-07-05T05:45:00Z
-
-[beta]
-ip = "10.0.0.2"
-
- [beta.config]
- Ports = [ 9001, 9002 ]
- Location = "New Jersey"
- Created = 1887-01-05T05:55:00Z
-`
-
- type serverConfig struct {
- Ports []int
- Location string
- Created time.Time
- }
-
- type server struct {
- IP string `toml:"ip,omitempty"`
- Config serverConfig `toml:"config"`
- }
-
- type servers map[string]server
-
- var config servers
- if _, err := Decode(tomlBlob, &config); err != nil {
- log.Fatal(err)
- }
-
- for _, name := range []string{"alpha", "beta"} {
- s := config[name]
- fmt.Printf("Server: %s (ip: %s) in %s created on %s\n",
- name, s.IP, s.Config.Location,
- s.Config.Created.Format("2006-01-02"))
- fmt.Printf("Ports: %v\n", s.Config.Ports)
- }
-
- // Output:
- // Server: alpha (ip: 10.0.0.1) in Toronto created on 1987-07-05
- // Ports: [8001 8002]
- // Server: beta (ip: 10.0.0.2) in New Jersey created on 1887-01-05
- // Ports: [9001 9002]
-}
-
-type duration struct {
- time.Duration
-}
-
-func (d *duration) UnmarshalText(text []byte) error {
- var err error
- d.Duration, err = time.ParseDuration(string(text))
- return err
-}
-
-// Example Unmarshaler shows how to decode TOML strings into your own
-// custom data type.
-func Example_unmarshaler() {
- blob := `
-[[song]]
-name = "Thunder Road"
-duration = "4m49s"
-
-[[song]]
-name = "Stairway to Heaven"
-duration = "8m03s"
-`
- type song struct {
- Name string
- Duration duration
- }
- type songs struct {
- Song []song
- }
- var favorites songs
- if _, err := Decode(blob, &favorites); err != nil {
- log.Fatal(err)
- }
-
- // Code to implement the TextUnmarshaler interface for `duration`:
- //
- // type duration struct {
- // time.Duration
- // }
- //
- // func (d *duration) UnmarshalText(text []byte) error {
- // var err error
- // d.Duration, err = time.ParseDuration(string(text))
- // return err
- // }
-
- for _, s := range favorites.Song {
- fmt.Printf("%s (%s)\n", s.Name, s.Duration)
- }
- // Output:
- // Thunder Road (4m49s)
- // Stairway to Heaven (8m3s)
-}
-
-// Example StrictDecoding shows how to detect whether there are keys in the
-// TOML document that weren't decoded into the value given. This is useful
-// for returning an error to the user if they've included extraneous fields
-// in their configuration.
-func Example_strictDecoding() {
- var blob = `
-key1 = "value1"
-key2 = "value2"
-key3 = "value3"
-`
- type config struct {
- Key1 string
- Key3 string
- }
-
- var conf config
- md, err := Decode(blob, &conf)
- if err != nil {
- log.Fatal(err)
- }
- fmt.Printf("Undecoded keys: %q\n", md.Undecoded())
- // Output:
- // Undecoded keys: ["key2"]
-}
-
-// Example UnmarshalTOML shows how to implement a struct type that knows how to
-// unmarshal itself. The struct must take full responsibility for mapping the
-// values passed into the struct. The method may be used with interfaces in a
-// struct in cases where the actual type is not known until the data is
-// examined.
-func Example_unmarshalTOML() {
-
- var blob = `
-[[parts]]
-type = "valve"
-id = "valve-1"
-size = 1.2
-rating = 4
-
-[[parts]]
-type = "valve"
-id = "valve-2"
-size = 2.1
-rating = 5
-
-[[parts]]
-type = "pipe"
-id = "pipe-1"
-length = 2.1
-diameter = 12
-
-[[parts]]
-type = "cable"
-id = "cable-1"
-length = 12
-rating = 3.1
-`
- o := &order{}
- err := Unmarshal([]byte(blob), o)
- if err != nil {
- log.Fatal(err)
- }
-
- fmt.Println(len(o.parts))
-
- for _, part := range o.parts {
- fmt.Println(part.Name())
- }
-
- // Code to implement UmarshalJSON.
-
- // type order struct {
- // // NOTE `order.parts` is a private slice of type `part` which is an
- // // interface and may only be loaded from toml using the
- // // UnmarshalTOML() method of the Umarshaler interface.
- // parts parts
- // }
-
- // func (o *order) UnmarshalTOML(data interface{}) error {
-
- // // NOTE the example below contains detailed type casting to show how
- // // the 'data' is retrieved. In operational use, a type cast wrapper
- // // may be preferred e.g.
- // //
- // // func AsMap(v interface{}) (map[string]interface{}, error) {
- // // return v.(map[string]interface{})
- // // }
- // //
- // // resulting in:
- // // d, _ := AsMap(data)
- // //
-
- // d, _ := data.(map[string]interface{})
- // parts, _ := d["parts"].([]map[string]interface{})
-
- // for _, p := range parts {
-
- // typ, _ := p["type"].(string)
- // id, _ := p["id"].(string)
-
- // // detect the type of part and handle each case
- // switch p["type"] {
- // case "valve":
-
- // size := float32(p["size"].(float64))
- // rating := int(p["rating"].(int64))
-
- // valve := &valve{
- // Type: typ,
- // ID: id,
- // Size: size,
- // Rating: rating,
- // }
-
- // o.parts = append(o.parts, valve)
-
- // case "pipe":
-
- // length := float32(p["length"].(float64))
- // diameter := int(p["diameter"].(int64))
-
- // pipe := &pipe{
- // Type: typ,
- // ID: id,
- // Length: length,
- // Diameter: diameter,
- // }
-
- // o.parts = append(o.parts, pipe)
-
- // case "cable":
-
- // length := int(p["length"].(int64))
- // rating := float32(p["rating"].(float64))
-
- // cable := &cable{
- // Type: typ,
- // ID: id,
- // Length: length,
- // Rating: rating,
- // }
-
- // o.parts = append(o.parts, cable)
-
- // }
- // }
-
- // return nil
- // }
-
- // type parts []part
-
- // type part interface {
- // Name() string
- // }
-
- // type valve struct {
- // Type string
- // ID string
- // Size float32
- // Rating int
- // }
-
- // func (v *valve) Name() string {
- // return fmt.Sprintf("VALVE: %s", v.ID)
- // }
-
- // type pipe struct {
- // Type string
- // ID string
- // Length float32
- // Diameter int
- // }
-
- // func (p *pipe) Name() string {
- // return fmt.Sprintf("PIPE: %s", p.ID)
- // }
-
- // type cable struct {
- // Type string
- // ID string
- // Length int
- // Rating float32
- // }
-
- // func (c *cable) Name() string {
- // return fmt.Sprintf("CABLE: %s", c.ID)
- // }
-
- // Output:
- // 4
- // VALVE: valve-1
- // VALVE: valve-2
- // PIPE: pipe-1
- // CABLE: cable-1
-
-}
-
-type order struct {
- // NOTE `order.parts` is a private slice of type `part` which is an
- // interface and may only be loaded from toml using the UnmarshalTOML()
- // method of the Umarshaler interface.
- parts parts
-}
-
-func (o *order) UnmarshalTOML(data interface{}) error {
-
- // NOTE the example below contains detailed type casting to show how
- // the 'data' is retrieved. In operational use, a type cast wrapper
- // may be preferred e.g.
- //
- // func AsMap(v interface{}) (map[string]interface{}, error) {
- // return v.(map[string]interface{})
- // }
- //
- // resulting in:
- // d, _ := AsMap(data)
- //
-
- d, _ := data.(map[string]interface{})
- parts, _ := d["parts"].([]map[string]interface{})
-
- for _, p := range parts {
-
- typ, _ := p["type"].(string)
- id, _ := p["id"].(string)
-
- // detect the type of part and handle each case
- switch p["type"] {
- case "valve":
-
- size := float32(p["size"].(float64))
- rating := int(p["rating"].(int64))
-
- valve := &valve{
- Type: typ,
- ID: id,
- Size: size,
- Rating: rating,
- }
-
- o.parts = append(o.parts, valve)
-
- case "pipe":
-
- length := float32(p["length"].(float64))
- diameter := int(p["diameter"].(int64))
-
- pipe := &pipe{
- Type: typ,
- ID: id,
- Length: length,
- Diameter: diameter,
- }
-
- o.parts = append(o.parts, pipe)
-
- case "cable":
-
- length := int(p["length"].(int64))
- rating := float32(p["rating"].(float64))
-
- cable := &cable{
- Type: typ,
- ID: id,
- Length: length,
- Rating: rating,
- }
-
- o.parts = append(o.parts, cable)
-
- }
- }
-
- return nil
-}
-
-type parts []part
-
-type part interface {
- Name() string
-}
-
-type valve struct {
- Type string
- ID string
- Size float32
- Rating int
-}
-
-func (v *valve) Name() string {
- return fmt.Sprintf("VALVE: %s", v.ID)
-}
-
-type pipe struct {
- Type string
- ID string
- Length float32
- Diameter int
-}
-
-func (p *pipe) Name() string {
- return fmt.Sprintf("PIPE: %s", p.ID)
-}
-
-type cable struct {
- Type string
- ID string
- Length int
- Rating float32
-}
-
-func (c *cable) Name() string {
- return fmt.Sprintf("CABLE: %s", c.ID)
-}
diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go
deleted file mode 100644
index b371f39..0000000
--- a/vendor/github.com/BurntSushi/toml/doc.go
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
-Package toml provides facilities for decoding and encoding TOML configuration
-files via reflection. There is also support for delaying decoding with
-the Primitive type, and querying the set of keys in a TOML document with the
-MetaData type.
-
-The specification implemented: https://github.com/toml-lang/toml
-
-The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify
-whether a file is a valid TOML document. It can also be used to print the
-type of each key in a TOML document.
-
-Testing
-
-There are two important types of tests used for this package. The first is
-contained inside '*_test.go' files and uses the standard Go unit testing
-framework. These tests are primarily devoted to holistically testing the
-decoder and encoder.
-
-The second type of testing is used to verify the implementation's adherence
-to the TOML specification. These tests have been factored into their own
-project: https://github.com/BurntSushi/toml-test
-
-The reason the tests are in a separate project is so that they can be used by
-any implementation of TOML. Namely, it is language agnostic.
-*/
-package toml
diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go
deleted file mode 100644
index d905c21..0000000
--- a/vendor/github.com/BurntSushi/toml/encode.go
+++ /dev/null
@@ -1,568 +0,0 @@
-package toml
-
-import (
- "bufio"
- "errors"
- "fmt"
- "io"
- "reflect"
- "sort"
- "strconv"
- "strings"
- "time"
-)
-
-type tomlEncodeError struct{ error }
-
-var (
- errArrayMixedElementTypes = errors.New(
- "toml: cannot encode array with mixed element types")
- errArrayNilElement = errors.New(
- "toml: cannot encode array with nil element")
- errNonString = errors.New(
- "toml: cannot encode a map with non-string key type")
- errAnonNonStruct = errors.New(
- "toml: cannot encode an anonymous field that is not a struct")
- errArrayNoTable = errors.New(
- "toml: TOML array element cannot contain a table")
- errNoKey = errors.New(
- "toml: top-level values must be Go maps or structs")
- errAnything = errors.New("") // used in testing
-)
-
-var quotedReplacer = strings.NewReplacer(
- "\t", "\\t",
- "\n", "\\n",
- "\r", "\\r",
- "\"", "\\\"",
- "\\", "\\\\",
-)
-
-// Encoder controls the encoding of Go values to a TOML document to some
-// io.Writer.
-//
-// The indentation level can be controlled with the Indent field.
-type Encoder struct {
- // A single indentation level. By default it is two spaces.
- Indent string
-
- // hasWritten is whether we have written any output to w yet.
- hasWritten bool
- w *bufio.Writer
-}
-
-// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer
-// given. By default, a single indentation level is 2 spaces.
-func NewEncoder(w io.Writer) *Encoder {
- return &Encoder{
- w: bufio.NewWriter(w),
- Indent: " ",
- }
-}
-
-// Encode writes a TOML representation of the Go value to the underlying
-// io.Writer. If the value given cannot be encoded to a valid TOML document,
-// then an error is returned.
-//
-// The mapping between Go values and TOML values should be precisely the same
-// as for the Decode* functions. Similarly, the TextMarshaler interface is
-// supported by encoding the resulting bytes as strings. (If you want to write
-// arbitrary binary data then you will need to use something like base64 since
-// TOML does not have any binary types.)
-//
-// When encoding TOML hashes (i.e., Go maps or structs), keys without any
-// sub-hashes are encoded first.
-//
-// If a Go map is encoded, then its keys are sorted alphabetically for
-// deterministic output. More control over this behavior may be provided if
-// there is demand for it.
-//
-// Encoding Go values without a corresponding TOML representation---like map
-// types with non-string keys---will cause an error to be returned. Similarly
-// for mixed arrays/slices, arrays/slices with nil elements, embedded
-// non-struct types and nested slices containing maps or structs.
-// (e.g., [][]map[string]string is not allowed but []map[string]string is OK
-// and so is []map[string][]string.)
-func (enc *Encoder) Encode(v interface{}) error {
- rv := eindirect(reflect.ValueOf(v))
- if err := enc.safeEncode(Key([]string{}), rv); err != nil {
- return err
- }
- return enc.w.Flush()
-}
-
-func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
- defer func() {
- if r := recover(); r != nil {
- if terr, ok := r.(tomlEncodeError); ok {
- err = terr.error
- return
- }
- panic(r)
- }
- }()
- enc.encode(key, rv)
- return nil
-}
-
-func (enc *Encoder) encode(key Key, rv reflect.Value) {
- // Special case. Time needs to be in ISO8601 format.
- // Special case. If we can marshal the type to text, then we used that.
- // Basically, this prevents the encoder for handling these types as
- // generic structs (or whatever the underlying type of a TextMarshaler is).
- switch rv.Interface().(type) {
- case time.Time, TextMarshaler:
- enc.keyEqElement(key, rv)
- return
- }
-
- k := rv.Kind()
- switch k {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
- reflect.Int64,
- reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
- reflect.Uint64,
- reflect.Float32, reflect.Float64, reflect.String, reflect.Bool:
- enc.keyEqElement(key, rv)
- case reflect.Array, reflect.Slice:
- if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) {
- enc.eArrayOfTables(key, rv)
- } else {
- enc.keyEqElement(key, rv)
- }
- case reflect.Interface:
- if rv.IsNil() {
- return
- }
- enc.encode(key, rv.Elem())
- case reflect.Map:
- if rv.IsNil() {
- return
- }
- enc.eTable(key, rv)
- case reflect.Ptr:
- if rv.IsNil() {
- return
- }
- enc.encode(key, rv.Elem())
- case reflect.Struct:
- enc.eTable(key, rv)
- default:
- panic(e("unsupported type for key '%s': %s", key, k))
- }
-}
-
-// eElement encodes any value that can be an array element (primitives and
-// arrays).
-func (enc *Encoder) eElement(rv reflect.Value) {
- switch v := rv.Interface().(type) {
- case time.Time:
- // Special case time.Time as a primitive. Has to come before
- // TextMarshaler below because time.Time implements
- // encoding.TextMarshaler, but we need to always use UTC.
- enc.wf(v.UTC().Format("2006-01-02T15:04:05Z"))
- return
- case TextMarshaler:
- // Special case. Use text marshaler if it's available for this value.
- if s, err := v.MarshalText(); err != nil {
- encPanic(err)
- } else {
- enc.writeQuoted(string(s))
- }
- return
- }
- switch rv.Kind() {
- case reflect.Bool:
- enc.wf(strconv.FormatBool(rv.Bool()))
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
- reflect.Int64:
- enc.wf(strconv.FormatInt(rv.Int(), 10))
- case reflect.Uint, reflect.Uint8, reflect.Uint16,
- reflect.Uint32, reflect.Uint64:
- enc.wf(strconv.FormatUint(rv.Uint(), 10))
- case reflect.Float32:
- enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32)))
- case reflect.Float64:
- enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64)))
- case reflect.Array, reflect.Slice:
- enc.eArrayOrSliceElement(rv)
- case reflect.Interface:
- enc.eElement(rv.Elem())
- case reflect.String:
- enc.writeQuoted(rv.String())
- default:
- panic(e("unexpected primitive type: %s", rv.Kind()))
- }
-}
-
-// By the TOML spec, all floats must have a decimal with at least one
-// number on either side.
-func floatAddDecimal(fstr string) string {
- if !strings.Contains(fstr, ".") {
- return fstr + ".0"
- }
- return fstr
-}
-
-func (enc *Encoder) writeQuoted(s string) {
- enc.wf("\"%s\"", quotedReplacer.Replace(s))
-}
-
-func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
- length := rv.Len()
- enc.wf("[")
- for i := 0; i < length; i++ {
- elem := rv.Index(i)
- enc.eElement(elem)
- if i != length-1 {
- enc.wf(", ")
- }
- }
- enc.wf("]")
-}
-
-func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
- if len(key) == 0 {
- encPanic(errNoKey)
- }
- for i := 0; i < rv.Len(); i++ {
- trv := rv.Index(i)
- if isNil(trv) {
- continue
- }
- panicIfInvalidKey(key)
- enc.newline()
- enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll())
- enc.newline()
- enc.eMapOrStruct(key, trv)
- }
-}
-
-func (enc *Encoder) eTable(key Key, rv reflect.Value) {
- panicIfInvalidKey(key)
- if len(key) == 1 {
- // Output an extra newline between top-level tables.
- // (The newline isn't written if nothing else has been written though.)
- enc.newline()
- }
- if len(key) > 0 {
- enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll())
- enc.newline()
- }
- enc.eMapOrStruct(key, rv)
-}
-
-func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) {
- switch rv := eindirect(rv); rv.Kind() {
- case reflect.Map:
- enc.eMap(key, rv)
- case reflect.Struct:
- enc.eStruct(key, rv)
- default:
- panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String())
- }
-}
-
-func (enc *Encoder) eMap(key Key, rv reflect.Value) {
- rt := rv.Type()
- if rt.Key().Kind() != reflect.String {
- encPanic(errNonString)
- }
-
- // Sort keys so that we have deterministic output. And write keys directly
- // underneath this key first, before writing sub-structs or sub-maps.
- var mapKeysDirect, mapKeysSub []string
- for _, mapKey := range rv.MapKeys() {
- k := mapKey.String()
- if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) {
- mapKeysSub = append(mapKeysSub, k)
- } else {
- mapKeysDirect = append(mapKeysDirect, k)
- }
- }
-
- var writeMapKeys = func(mapKeys []string) {
- sort.Strings(mapKeys)
- for _, mapKey := range mapKeys {
- mrv := rv.MapIndex(reflect.ValueOf(mapKey))
- if isNil(mrv) {
- // Don't write anything for nil fields.
- continue
- }
- enc.encode(key.add(mapKey), mrv)
- }
- }
- writeMapKeys(mapKeysDirect)
- writeMapKeys(mapKeysSub)
-}
-
-func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
- // Write keys for fields directly under this key first, because if we write
- // a field that creates a new table, then all keys under it will be in that
- // table (not the one we're writing here).
- rt := rv.Type()
- var fieldsDirect, fieldsSub [][]int
- var addFields func(rt reflect.Type, rv reflect.Value, start []int)
- addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
- for i := 0; i < rt.NumField(); i++ {
- f := rt.Field(i)
- // skip unexported fields
- if f.PkgPath != "" && !f.Anonymous {
- continue
- }
- frv := rv.Field(i)
- if f.Anonymous {
- t := f.Type
- switch t.Kind() {
- case reflect.Struct:
- // Treat anonymous struct fields with
- // tag names as though they are not
- // anonymous, like encoding/json does.
- if getOptions(f.Tag).name == "" {
- addFields(t, frv, f.Index)
- continue
- }
- case reflect.Ptr:
- if t.Elem().Kind() == reflect.Struct &&
- getOptions(f.Tag).name == "" {
- if !frv.IsNil() {
- addFields(t.Elem(), frv.Elem(), f.Index)
- }
- continue
- }
- // Fall through to the normal field encoding logic below
- // for non-struct anonymous fields.
- }
- }
-
- if typeIsHash(tomlTypeOfGo(frv)) {
- fieldsSub = append(fieldsSub, append(start, f.Index...))
- } else {
- fieldsDirect = append(fieldsDirect, append(start, f.Index...))
- }
- }
- }
- addFields(rt, rv, nil)
-
- var writeFields = func(fields [][]int) {
- for _, fieldIndex := range fields {
- sft := rt.FieldByIndex(fieldIndex)
- sf := rv.FieldByIndex(fieldIndex)
- if isNil(sf) {
- // Don't write anything for nil fields.
- continue
- }
-
- opts := getOptions(sft.Tag)
- if opts.skip {
- continue
- }
- keyName := sft.Name
- if opts.name != "" {
- keyName = opts.name
- }
- if opts.omitempty && isEmpty(sf) {
- continue
- }
- if opts.omitzero && isZero(sf) {
- continue
- }
-
- enc.encode(key.add(keyName), sf)
- }
- }
- writeFields(fieldsDirect)
- writeFields(fieldsSub)
-}
-
-// tomlTypeName returns the TOML type name of the Go value's type. It is
-// used to determine whether the types of array elements are mixed (which is
-// forbidden). If the Go value is nil, then it is illegal for it to be an array
-// element, and valueIsNil is returned as true.
-
-// Returns the TOML type of a Go value. The type may be `nil`, which means
-// no concrete TOML type could be found.
-func tomlTypeOfGo(rv reflect.Value) tomlType {
- if isNil(rv) || !rv.IsValid() {
- return nil
- }
- switch rv.Kind() {
- case reflect.Bool:
- return tomlBool
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
- reflect.Int64,
- reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
- reflect.Uint64:
- return tomlInteger
- case reflect.Float32, reflect.Float64:
- return tomlFloat
- case reflect.Array, reflect.Slice:
- if typeEqual(tomlHash, tomlArrayType(rv)) {
- return tomlArrayHash
- }
- return tomlArray
- case reflect.Ptr, reflect.Interface:
- return tomlTypeOfGo(rv.Elem())
- case reflect.String:
- return tomlString
- case reflect.Map:
- return tomlHash
- case reflect.Struct:
- switch rv.Interface().(type) {
- case time.Time:
- return tomlDatetime
- case TextMarshaler:
- return tomlString
- default:
- return tomlHash
- }
- default:
- panic("unexpected reflect.Kind: " + rv.Kind().String())
- }
-}
-
-// tomlArrayType returns the element type of a TOML array. The type returned
-// may be nil if it cannot be determined (e.g., a nil slice or a zero length
-// slize). This function may also panic if it finds a type that cannot be
-// expressed in TOML (such as nil elements, heterogeneous arrays or directly
-// nested arrays of tables).
-func tomlArrayType(rv reflect.Value) tomlType {
- if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
- return nil
- }
- firstType := tomlTypeOfGo(rv.Index(0))
- if firstType == nil {
- encPanic(errArrayNilElement)
- }
-
- rvlen := rv.Len()
- for i := 1; i < rvlen; i++ {
- elem := rv.Index(i)
- switch elemType := tomlTypeOfGo(elem); {
- case elemType == nil:
- encPanic(errArrayNilElement)
- case !typeEqual(firstType, elemType):
- encPanic(errArrayMixedElementTypes)
- }
- }
- // If we have a nested array, then we must make sure that the nested
- // array contains ONLY primitives.
- // This checks arbitrarily nested arrays.
- if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) {
- nest := tomlArrayType(eindirect(rv.Index(0)))
- if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) {
- encPanic(errArrayNoTable)
- }
- }
- return firstType
-}
-
-type tagOptions struct {
- skip bool // "-"
- name string
- omitempty bool
- omitzero bool
-}
-
-func getOptions(tag reflect.StructTag) tagOptions {
- t := tag.Get("toml")
- if t == "-" {
- return tagOptions{skip: true}
- }
- var opts tagOptions
- parts := strings.Split(t, ",")
- opts.name = parts[0]
- for _, s := range parts[1:] {
- switch s {
- case "omitempty":
- opts.omitempty = true
- case "omitzero":
- opts.omitzero = true
- }
- }
- return opts
-}
-
-func isZero(rv reflect.Value) bool {
- switch rv.Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return rv.Int() == 0
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- return rv.Uint() == 0
- case reflect.Float32, reflect.Float64:
- return rv.Float() == 0.0
- }
- return false
-}
-
-func isEmpty(rv reflect.Value) bool {
- switch rv.Kind() {
- case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
- return rv.Len() == 0
- case reflect.Bool:
- return !rv.Bool()
- }
- return false
-}
-
-func (enc *Encoder) newline() {
- if enc.hasWritten {
- enc.wf("\n")
- }
-}
-
-func (enc *Encoder) keyEqElement(key Key, val reflect.Value) {
- if len(key) == 0 {
- encPanic(errNoKey)
- }
- panicIfInvalidKey(key)
- enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
- enc.eElement(val)
- enc.newline()
-}
-
-func (enc *Encoder) wf(format string, v ...interface{}) {
- if _, err := fmt.Fprintf(enc.w, format, v...); err != nil {
- encPanic(err)
- }
- enc.hasWritten = true
-}
-
-func (enc *Encoder) indentStr(key Key) string {
- return strings.Repeat(enc.Indent, len(key)-1)
-}
-
-func encPanic(err error) {
- panic(tomlEncodeError{err})
-}
-
-func eindirect(v reflect.Value) reflect.Value {
- switch v.Kind() {
- case reflect.Ptr, reflect.Interface:
- return eindirect(v.Elem())
- default:
- return v
- }
-}
-
-func isNil(rv reflect.Value) bool {
- switch rv.Kind() {
- case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
- return rv.IsNil()
- default:
- return false
- }
-}
-
-func panicIfInvalidKey(key Key) {
- for _, k := range key {
- if len(k) == 0 {
- encPanic(e("Key '%s' is not a valid table name. Key names "+
- "cannot be empty.", key.maybeQuotedAll()))
- }
- }
-}
-
-func isValidKeyName(s string) bool {
- return len(s) != 0
-}
diff --git a/vendor/github.com/BurntSushi/toml/encode_test.go b/vendor/github.com/BurntSushi/toml/encode_test.go
deleted file mode 100644
index 673b7b0..0000000
--- a/vendor/github.com/BurntSushi/toml/encode_test.go
+++ /dev/null
@@ -1,615 +0,0 @@
-package toml
-
-import (
- "bytes"
- "fmt"
- "log"
- "net"
- "testing"
- "time"
-)
-
-func TestEncodeRoundTrip(t *testing.T) {
- type Config struct {
- Age int
- Cats []string
- Pi float64
- Perfection []int
- DOB time.Time
- Ipaddress net.IP
- }
-
- var inputs = Config{
- 13,
- []string{"one", "two", "three"},
- 3.145,
- []int{11, 2, 3, 4},
- time.Now(),
- net.ParseIP("192.168.59.254"),
- }
-
- var firstBuffer bytes.Buffer
- e := NewEncoder(&firstBuffer)
- err := e.Encode(inputs)
- if err != nil {
- t.Fatal(err)
- }
- var outputs Config
- if _, err := Decode(firstBuffer.String(), &outputs); err != nil {
- t.Logf("Could not decode:\n-----\n%s\n-----\n",
- firstBuffer.String())
- t.Fatal(err)
- }
-
- // could test each value individually, but I'm lazy
- var secondBuffer bytes.Buffer
- e2 := NewEncoder(&secondBuffer)
- err = e2.Encode(outputs)
- if err != nil {
- t.Fatal(err)
- }
- if firstBuffer.String() != secondBuffer.String() {
- t.Error(
- firstBuffer.String(),
- "\n\n is not identical to\n\n",
- secondBuffer.String())
- }
-}
-
-// XXX(burntsushi)
-// I think these tests probably should be removed. They are good, but they
-// ought to be obsolete by toml-test.
-func TestEncode(t *testing.T) {
- type Embedded struct {
- Int int `toml:"_int"`
- }
- type NonStruct int
-
- date := time.Date(2014, 5, 11, 20, 30, 40, 0, time.FixedZone("IST", 3600))
- dateStr := "2014-05-11T19:30:40Z"
-
- tests := map[string]struct {
- input interface{}
- wantOutput string
- wantError error
- }{
- "bool field": {
- input: struct {
- BoolTrue bool
- BoolFalse bool
- }{true, false},
- wantOutput: "BoolTrue = true\nBoolFalse = false\n",
- },
- "int fields": {
- input: struct {
- Int int
- Int8 int8
- Int16 int16
- Int32 int32
- Int64 int64
- }{1, 2, 3, 4, 5},
- wantOutput: "Int = 1\nInt8 = 2\nInt16 = 3\nInt32 = 4\nInt64 = 5\n",
- },
- "uint fields": {
- input: struct {
- Uint uint
- Uint8 uint8
- Uint16 uint16
- Uint32 uint32
- Uint64 uint64
- }{1, 2, 3, 4, 5},
- wantOutput: "Uint = 1\nUint8 = 2\nUint16 = 3\nUint32 = 4" +
- "\nUint64 = 5\n",
- },
- "float fields": {
- input: struct {
- Float32 float32
- Float64 float64
- }{1.5, 2.5},
- wantOutput: "Float32 = 1.5\nFloat64 = 2.5\n",
- },
- "string field": {
- input: struct{ String string }{"foo"},
- wantOutput: "String = \"foo\"\n",
- },
- "string field and unexported field": {
- input: struct {
- String string
- unexported int
- }{"foo", 0},
- wantOutput: "String = \"foo\"\n",
- },
- "datetime field in UTC": {
- input: struct{ Date time.Time }{date},
- wantOutput: fmt.Sprintf("Date = %s\n", dateStr),
- },
- "datetime field as primitive": {
- // Using a map here to fail if isStructOrMap() returns true for
- // time.Time.
- input: map[string]interface{}{
- "Date": date,
- "Int": 1,
- },
- wantOutput: fmt.Sprintf("Date = %s\nInt = 1\n", dateStr),
- },
- "array fields": {
- input: struct {
- IntArray0 [0]int
- IntArray3 [3]int
- }{[0]int{}, [3]int{1, 2, 3}},
- wantOutput: "IntArray0 = []\nIntArray3 = [1, 2, 3]\n",
- },
- "slice fields": {
- input: struct{ IntSliceNil, IntSlice0, IntSlice3 []int }{
- nil, []int{}, []int{1, 2, 3},
- },
- wantOutput: "IntSlice0 = []\nIntSlice3 = [1, 2, 3]\n",
- },
- "datetime slices": {
- input: struct{ DatetimeSlice []time.Time }{
- []time.Time{date, date},
- },
- wantOutput: fmt.Sprintf("DatetimeSlice = [%s, %s]\n",
- dateStr, dateStr),
- },
- "nested arrays and slices": {
- input: struct {
- SliceOfArrays [][2]int
- ArrayOfSlices [2][]int
- SliceOfArraysOfSlices [][2][]int
- ArrayOfSlicesOfArrays [2][][2]int
- SliceOfMixedArrays [][2]interface{}
- ArrayOfMixedSlices [2][]interface{}
- }{
- [][2]int{{1, 2}, {3, 4}},
- [2][]int{{1, 2}, {3, 4}},
- [][2][]int{
- {
- {1, 2}, {3, 4},
- },
- {
- {5, 6}, {7, 8},
- },
- },
- [2][][2]int{
- {
- {1, 2}, {3, 4},
- },
- {
- {5, 6}, {7, 8},
- },
- },
- [][2]interface{}{
- {1, 2}, {"a", "b"},
- },
- [2][]interface{}{
- {1, 2}, {"a", "b"},
- },
- },
- wantOutput: `SliceOfArrays = [[1, 2], [3, 4]]
-ArrayOfSlices = [[1, 2], [3, 4]]
-SliceOfArraysOfSlices = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
-ArrayOfSlicesOfArrays = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
-SliceOfMixedArrays = [[1, 2], ["a", "b"]]
-ArrayOfMixedSlices = [[1, 2], ["a", "b"]]
-`,
- },
- "empty slice": {
- input: struct{ Empty []interface{} }{[]interface{}{}},
- wantOutput: "Empty = []\n",
- },
- "(error) slice with element type mismatch (string and integer)": {
- input: struct{ Mixed []interface{} }{[]interface{}{1, "a"}},
- wantError: errArrayMixedElementTypes,
- },
- "(error) slice with element type mismatch (integer and float)": {
- input: struct{ Mixed []interface{} }{[]interface{}{1, 2.5}},
- wantError: errArrayMixedElementTypes,
- },
- "slice with elems of differing Go types, same TOML types": {
- input: struct {
- MixedInts []interface{}
- MixedFloats []interface{}
- }{
- []interface{}{
- int(1), int8(2), int16(3), int32(4), int64(5),
- uint(1), uint8(2), uint16(3), uint32(4), uint64(5),
- },
- []interface{}{float32(1.5), float64(2.5)},
- },
- wantOutput: "MixedInts = [1, 2, 3, 4, 5, 1, 2, 3, 4, 5]\n" +
- "MixedFloats = [1.5, 2.5]\n",
- },
- "(error) slice w/ element type mismatch (one is nested array)": {
- input: struct{ Mixed []interface{} }{
- []interface{}{1, []interface{}{2}},
- },
- wantError: errArrayMixedElementTypes,
- },
- "(error) slice with 1 nil element": {
- input: struct{ NilElement1 []interface{} }{[]interface{}{nil}},
- wantError: errArrayNilElement,
- },
- "(error) slice with 1 nil element (and other non-nil elements)": {
- input: struct{ NilElement []interface{} }{
- []interface{}{1, nil},
- },
- wantError: errArrayNilElement,
- },
- "simple map": {
- input: map[string]int{"a": 1, "b": 2},
- wantOutput: "a = 1\nb = 2\n",
- },
- "map with interface{} value type": {
- input: map[string]interface{}{"a": 1, "b": "c"},
- wantOutput: "a = 1\nb = \"c\"\n",
- },
- "map with interface{} value type, some of which are structs": {
- input: map[string]interface{}{
- "a": struct{ Int int }{2},
- "b": 1,
- },
- wantOutput: "b = 1\n\n[a]\n Int = 2\n",
- },
- "nested map": {
- input: map[string]map[string]int{
- "a": {"b": 1},
- "c": {"d": 2},
- },
- wantOutput: "[a]\n b = 1\n\n[c]\n d = 2\n",
- },
- "nested struct": {
- input: struct{ Struct struct{ Int int } }{
- struct{ Int int }{1},
- },
- wantOutput: "[Struct]\n Int = 1\n",
- },
- "nested struct and non-struct field": {
- input: struct {
- Struct struct{ Int int }
- Bool bool
- }{struct{ Int int }{1}, true},
- wantOutput: "Bool = true\n\n[Struct]\n Int = 1\n",
- },
- "2 nested structs": {
- input: struct{ Struct1, Struct2 struct{ Int int } }{
- struct{ Int int }{1}, struct{ Int int }{2},
- },
- wantOutput: "[Struct1]\n Int = 1\n\n[Struct2]\n Int = 2\n",
- },
- "deeply nested structs": {
- input: struct {
- Struct1, Struct2 struct{ Struct3 *struct{ Int int } }
- }{
- struct{ Struct3 *struct{ Int int } }{&struct{ Int int }{1}},
- struct{ Struct3 *struct{ Int int } }{nil},
- },
- wantOutput: "[Struct1]\n [Struct1.Struct3]\n Int = 1" +
- "\n\n[Struct2]\n",
- },
- "nested struct with nil struct elem": {
- input: struct {
- Struct struct{ Inner *struct{ Int int } }
- }{
- struct{ Inner *struct{ Int int } }{nil},
- },
- wantOutput: "[Struct]\n",
- },
- "nested struct with no fields": {
- input: struct {
- Struct struct{ Inner struct{} }
- }{
- struct{ Inner struct{} }{struct{}{}},
- },
- wantOutput: "[Struct]\n [Struct.Inner]\n",
- },
- "struct with tags": {
- input: struct {
- Struct struct {
- Int int `toml:"_int"`
- } `toml:"_struct"`
- Bool bool `toml:"_bool"`
- }{
- struct {
- Int int `toml:"_int"`
- }{1}, true,
- },
- wantOutput: "_bool = true\n\n[_struct]\n _int = 1\n",
- },
- "embedded struct": {
- input: struct{ Embedded }{Embedded{1}},
- wantOutput: "_int = 1\n",
- },
- "embedded *struct": {
- input: struct{ *Embedded }{&Embedded{1}},
- wantOutput: "_int = 1\n",
- },
- "nested embedded struct": {
- input: struct {
- Struct struct{ Embedded } `toml:"_struct"`
- }{struct{ Embedded }{Embedded{1}}},
- wantOutput: "[_struct]\n _int = 1\n",
- },
- "nested embedded *struct": {
- input: struct {
- Struct struct{ *Embedded } `toml:"_struct"`
- }{struct{ *Embedded }{&Embedded{1}}},
- wantOutput: "[_struct]\n _int = 1\n",
- },
- "embedded non-struct": {
- input: struct{ NonStruct }{5},
- wantOutput: "NonStruct = 5\n",
- },
- "array of tables": {
- input: struct {
- Structs []*struct{ Int int } `toml:"struct"`
- }{
- []*struct{ Int int }{{1}, {3}},
- },
- wantOutput: "[[struct]]\n Int = 1\n\n[[struct]]\n Int = 3\n",
- },
- "array of tables order": {
- input: map[string]interface{}{
- "map": map[string]interface{}{
- "zero": 5,
- "arr": []map[string]int{
- {
- "friend": 5,
- },
- },
- },
- },
- wantOutput: "[map]\n zero = 5\n\n [[map.arr]]\n friend = 5\n",
- },
- "(error) top-level slice": {
- input: []struct{ Int int }{{1}, {2}, {3}},
- wantError: errNoKey,
- },
- "(error) slice of slice": {
- input: struct {
- Slices [][]struct{ Int int }
- }{
- [][]struct{ Int int }{{{1}}, {{2}}, {{3}}},
- },
- wantError: errArrayNoTable,
- },
- "(error) map no string key": {
- input: map[int]string{1: ""},
- wantError: errNonString,
- },
- "(error) empty key name": {
- input: map[string]int{"": 1},
- wantError: errAnything,
- },
- "(error) empty map name": {
- input: map[string]interface{}{
- "": map[string]int{"v": 1},
- },
- wantError: errAnything,
- },
- }
- for label, test := range tests {
- encodeExpected(t, label, test.input, test.wantOutput, test.wantError)
- }
-}
-
-func TestEncodeNestedTableArrays(t *testing.T) {
- type song struct {
- Name string `toml:"name"`
- }
- type album struct {
- Name string `toml:"name"`
- Songs []song `toml:"songs"`
- }
- type springsteen struct {
- Albums []album `toml:"albums"`
- }
- value := springsteen{
- []album{
- {"Born to Run",
- []song{{"Jungleland"}, {"Meeting Across the River"}}},
- {"Born in the USA",
- []song{{"Glory Days"}, {"Dancing in the Dark"}}},
- },
- }
- expected := `[[albums]]
- name = "Born to Run"
-
- [[albums.songs]]
- name = "Jungleland"
-
- [[albums.songs]]
- name = "Meeting Across the River"
-
-[[albums]]
- name = "Born in the USA"
-
- [[albums.songs]]
- name = "Glory Days"
-
- [[albums.songs]]
- name = "Dancing in the Dark"
-`
- encodeExpected(t, "nested table arrays", value, expected, nil)
-}
-
-func TestEncodeArrayHashWithNormalHashOrder(t *testing.T) {
- type Alpha struct {
- V int
- }
- type Beta struct {
- V int
- }
- type Conf struct {
- V int
- A Alpha
- B []Beta
- }
-
- val := Conf{
- V: 1,
- A: Alpha{2},
- B: []Beta{{3}},
- }
- expected := "V = 1\n\n[A]\n V = 2\n\n[[B]]\n V = 3\n"
- encodeExpected(t, "array hash with normal hash order", val, expected, nil)
-}
-
-func TestEncodeWithOmitEmpty(t *testing.T) {
- type simple struct {
- Bool bool `toml:"bool,omitempty"`
- String string `toml:"string,omitempty"`
- Array [0]byte `toml:"array,omitempty"`
- Slice []int `toml:"slice,omitempty"`
- Map map[string]string `toml:"map,omitempty"`
- }
-
- var v simple
- encodeExpected(t, "fields with omitempty are omitted when empty", v, "", nil)
- v = simple{
- Bool: true,
- String: " ",
- Slice: []int{2, 3, 4},
- Map: map[string]string{"foo": "bar"},
- }
- expected := `bool = true
-string = " "
-slice = [2, 3, 4]
-
-[map]
- foo = "bar"
-`
- encodeExpected(t, "fields with omitempty are not omitted when non-empty",
- v, expected, nil)
-}
-
-func TestEncodeWithOmitZero(t *testing.T) {
- type simple struct {
- Number int `toml:"number,omitzero"`
- Real float64 `toml:"real,omitzero"`
- Unsigned uint `toml:"unsigned,omitzero"`
- }
-
- value := simple{0, 0.0, uint(0)}
- expected := ""
-
- encodeExpected(t, "simple with omitzero, all zero", value, expected, nil)
-
- value.Number = 10
- value.Real = 20
- value.Unsigned = 5
- expected = `number = 10
-real = 20.0
-unsigned = 5
-`
- encodeExpected(t, "simple with omitzero, non-zero", value, expected, nil)
-}
-
-func TestEncodeOmitemptyWithEmptyName(t *testing.T) {
- type simple struct {
- S []int `toml:",omitempty"`
- }
- v := simple{[]int{1, 2, 3}}
- expected := "S = [1, 2, 3]\n"
- encodeExpected(t, "simple with omitempty, no name, non-empty field",
- v, expected, nil)
-}
-
-func TestEncodeAnonymousStruct(t *testing.T) {
- type Inner struct{ N int }
- type Outer0 struct{ Inner }
- type Outer1 struct {
- Inner `toml:"inner"`
- }
-
- v0 := Outer0{Inner{3}}
- expected := "N = 3\n"
- encodeExpected(t, "embedded anonymous untagged struct", v0, expected, nil)
-
- v1 := Outer1{Inner{3}}
- expected = "[inner]\n N = 3\n"
- encodeExpected(t, "embedded anonymous tagged struct", v1, expected, nil)
-}
-
-func TestEncodeAnonymousStructPointerField(t *testing.T) {
- type Inner struct{ N int }
- type Outer0 struct{ *Inner }
- type Outer1 struct {
- *Inner `toml:"inner"`
- }
-
- v0 := Outer0{}
- expected := ""
- encodeExpected(t, "nil anonymous untagged struct pointer field", v0, expected, nil)
-
- v0 = Outer0{&Inner{3}}
- expected = "N = 3\n"
- encodeExpected(t, "non-nil anonymous untagged struct pointer field", v0, expected, nil)
-
- v1 := Outer1{}
- expected = ""
- encodeExpected(t, "nil anonymous tagged struct pointer field", v1, expected, nil)
-
- v1 = Outer1{&Inner{3}}
- expected = "[inner]\n N = 3\n"
- encodeExpected(t, "non-nil anonymous tagged struct pointer field", v1, expected, nil)
-}
-
-func TestEncodeIgnoredFields(t *testing.T) {
- type simple struct {
- Number int `toml:"-"`
- }
- value := simple{}
- expected := ""
- encodeExpected(t, "ignored field", value, expected, nil)
-}
-
-func encodeExpected(
- t *testing.T, label string, val interface{}, wantStr string, wantErr error,
-) {
- var buf bytes.Buffer
- enc := NewEncoder(&buf)
- err := enc.Encode(val)
- if err != wantErr {
- if wantErr != nil {
- if wantErr == errAnything && err != nil {
- return
- }
- t.Errorf("%s: want Encode error %v, got %v", label, wantErr, err)
- } else {
- t.Errorf("%s: Encode failed: %s", label, err)
- }
- }
- if err != nil {
- return
- }
- if got := buf.String(); wantStr != got {
- t.Errorf("%s: want\n-----\n%q\n-----\nbut got\n-----\n%q\n-----\n",
- label, wantStr, got)
- }
-}
-
-func ExampleEncoder_Encode() {
- date, _ := time.Parse(time.RFC822, "14 Mar 10 18:00 UTC")
- var config = map[string]interface{}{
- "date": date,
- "counts": []int{1, 1, 2, 3, 5, 8},
- "hash": map[string]string{
- "key1": "val1",
- "key2": "val2",
- },
- }
- buf := new(bytes.Buffer)
- if err := NewEncoder(buf).Encode(config); err != nil {
- log.Fatal(err)
- }
- fmt.Println(buf.String())
-
- // Output:
- // counts = [1, 1, 2, 3, 5, 8]
- // date = 2010-03-14T18:00:00Z
- //
- // [hash]
- // key1 = "val1"
- // key2 = "val2"
-}
diff --git a/vendor/github.com/BurntSushi/toml/encoding_types.go b/vendor/github.com/BurntSushi/toml/encoding_types.go
deleted file mode 100644
index d36e1dd..0000000
--- a/vendor/github.com/BurntSushi/toml/encoding_types.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// +build go1.2
-
-package toml
-
-// In order to support Go 1.1, we define our own TextMarshaler and
-// TextUnmarshaler types. For Go 1.2+, we just alias them with the
-// standard library interfaces.
-
-import (
- "encoding"
-)
-
-// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
-// so that Go 1.1 can be supported.
-type TextMarshaler encoding.TextMarshaler
-
-// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
-// here so that Go 1.1 can be supported.
-type TextUnmarshaler encoding.TextUnmarshaler
diff --git a/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go b/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go
deleted file mode 100644
index e8d503d..0000000
--- a/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// +build !go1.2
-
-package toml
-
-// These interfaces were introduced in Go 1.2, so we add them manually when
-// compiling for Go 1.1.
-
-// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
-// so that Go 1.1 can be supported.
-type TextMarshaler interface {
- MarshalText() (text []byte, err error)
-}
-
-// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
-// here so that Go 1.1 can be supported.
-type TextUnmarshaler interface {
- UnmarshalText(text []byte) error
-}
diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go
deleted file mode 100644
index 6dee7fc..0000000
--- a/vendor/github.com/BurntSushi/toml/lex.go
+++ /dev/null
@@ -1,953 +0,0 @@
-package toml
-
-import (
- "fmt"
- "strings"
- "unicode"
- "unicode/utf8"
-)
-
-type itemType int
-
-const (
- itemError itemType = iota
- itemNIL // used in the parser to indicate no type
- itemEOF
- itemText
- itemString
- itemRawString
- itemMultilineString
- itemRawMultilineString
- itemBool
- itemInteger
- itemFloat
- itemDatetime
- itemArray // the start of an array
- itemArrayEnd
- itemTableStart
- itemTableEnd
- itemArrayTableStart
- itemArrayTableEnd
- itemKeyStart
- itemCommentStart
- itemInlineTableStart
- itemInlineTableEnd
-)
-
-const (
- eof = 0
- comma = ','
- tableStart = '['
- tableEnd = ']'
- arrayTableStart = '['
- arrayTableEnd = ']'
- tableSep = '.'
- keySep = '='
- arrayStart = '['
- arrayEnd = ']'
- commentStart = '#'
- stringStart = '"'
- stringEnd = '"'
- rawStringStart = '\''
- rawStringEnd = '\''
- inlineTableStart = '{'
- inlineTableEnd = '}'
-)
-
-type stateFn func(lx *lexer) stateFn
-
-type lexer struct {
- input string
- start int
- pos int
- line int
- state stateFn
- items chan item
-
- // Allow for backing up up to three runes.
- // This is necessary because TOML contains 3-rune tokens (""" and ''').
- prevWidths [3]int
- nprev int // how many of prevWidths are in use
- // If we emit an eof, we can still back up, but it is not OK to call
- // next again.
- atEOF bool
-
- // A stack of state functions used to maintain context.
- // The idea is to reuse parts of the state machine in various places.
- // For example, values can appear at the top level or within arbitrarily
- // nested arrays. The last state on the stack is used after a value has
- // been lexed. Similarly for comments.
- stack []stateFn
-}
-
-type item struct {
- typ itemType
- val string
- line int
-}
-
-func (lx *lexer) nextItem() item {
- for {
- select {
- case item := <-lx.items:
- return item
- default:
- lx.state = lx.state(lx)
- }
- }
-}
-
-func lex(input string) *lexer {
- lx := &lexer{
- input: input,
- state: lexTop,
- line: 1,
- items: make(chan item, 10),
- stack: make([]stateFn, 0, 10),
- }
- return lx
-}
-
-func (lx *lexer) push(state stateFn) {
- lx.stack = append(lx.stack, state)
-}
-
-func (lx *lexer) pop() stateFn {
- if len(lx.stack) == 0 {
- return lx.errorf("BUG in lexer: no states to pop")
- }
- last := lx.stack[len(lx.stack)-1]
- lx.stack = lx.stack[0 : len(lx.stack)-1]
- return last
-}
-
-func (lx *lexer) current() string {
- return lx.input[lx.start:lx.pos]
-}
-
-func (lx *lexer) emit(typ itemType) {
- lx.items <- item{typ, lx.current(), lx.line}
- lx.start = lx.pos
-}
-
-func (lx *lexer) emitTrim(typ itemType) {
- lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line}
- lx.start = lx.pos
-}
-
-func (lx *lexer) next() (r rune) {
- if lx.atEOF {
- panic("next called after EOF")
- }
- if lx.pos >= len(lx.input) {
- lx.atEOF = true
- return eof
- }
-
- if lx.input[lx.pos] == '\n' {
- lx.line++
- }
- lx.prevWidths[2] = lx.prevWidths[1]
- lx.prevWidths[1] = lx.prevWidths[0]
- if lx.nprev < 3 {
- lx.nprev++
- }
- r, w := utf8.DecodeRuneInString(lx.input[lx.pos:])
- lx.prevWidths[0] = w
- lx.pos += w
- return r
-}
-
-// ignore skips over the pending input before this point.
-func (lx *lexer) ignore() {
- lx.start = lx.pos
-}
-
-// backup steps back one rune. Can be called only twice between calls to next.
-func (lx *lexer) backup() {
- if lx.atEOF {
- lx.atEOF = false
- return
- }
- if lx.nprev < 1 {
- panic("backed up too far")
- }
- w := lx.prevWidths[0]
- lx.prevWidths[0] = lx.prevWidths[1]
- lx.prevWidths[1] = lx.prevWidths[2]
- lx.nprev--
- lx.pos -= w
- if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
- lx.line--
- }
-}
-
-// accept consumes the next rune if it's equal to `valid`.
-func (lx *lexer) accept(valid rune) bool {
- if lx.next() == valid {
- return true
- }
- lx.backup()
- return false
-}
-
-// peek returns but does not consume the next rune in the input.
-func (lx *lexer) peek() rune {
- r := lx.next()
- lx.backup()
- return r
-}
-
-// skip ignores all input that matches the given predicate.
-func (lx *lexer) skip(pred func(rune) bool) {
- for {
- r := lx.next()
- if pred(r) {
- continue
- }
- lx.backup()
- lx.ignore()
- return
- }
-}
-
-// errorf stops all lexing by emitting an error and returning `nil`.
-// Note that any value that is a character is escaped if it's a special
-// character (newlines, tabs, etc.).
-func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
- lx.items <- item{
- itemError,
- fmt.Sprintf(format, values...),
- lx.line,
- }
- return nil
-}
-
-// lexTop consumes elements at the top level of TOML data.
-func lexTop(lx *lexer) stateFn {
- r := lx.next()
- if isWhitespace(r) || isNL(r) {
- return lexSkip(lx, lexTop)
- }
- switch r {
- case commentStart:
- lx.push(lexTop)
- return lexCommentStart
- case tableStart:
- return lexTableStart
- case eof:
- if lx.pos > lx.start {
- return lx.errorf("unexpected EOF")
- }
- lx.emit(itemEOF)
- return nil
- }
-
- // At this point, the only valid item can be a key, so we back up
- // and let the key lexer do the rest.
- lx.backup()
- lx.push(lexTopEnd)
- return lexKeyStart
-}
-
-// lexTopEnd is entered whenever a top-level item has been consumed. (A value
-// or a table.) It must see only whitespace, and will turn back to lexTop
-// upon a newline. If it sees EOF, it will quit the lexer successfully.
-func lexTopEnd(lx *lexer) stateFn {
- r := lx.next()
- switch {
- case r == commentStart:
- // a comment will read to a newline for us.
- lx.push(lexTop)
- return lexCommentStart
- case isWhitespace(r):
- return lexTopEnd
- case isNL(r):
- lx.ignore()
- return lexTop
- case r == eof:
- lx.emit(itemEOF)
- return nil
- }
- return lx.errorf("expected a top-level item to end with a newline, "+
- "comment, or EOF, but got %q instead", r)
-}
-
-// lexTable lexes the beginning of a table. Namely, it makes sure that
-// it starts with a character other than '.' and ']'.
-// It assumes that '[' has already been consumed.
-// It also handles the case that this is an item in an array of tables.
-// e.g., '[[name]]'.
-func lexTableStart(lx *lexer) stateFn {
- if lx.peek() == arrayTableStart {
- lx.next()
- lx.emit(itemArrayTableStart)
- lx.push(lexArrayTableEnd)
- } else {
- lx.emit(itemTableStart)
- lx.push(lexTableEnd)
- }
- return lexTableNameStart
-}
-
-func lexTableEnd(lx *lexer) stateFn {
- lx.emit(itemTableEnd)
- return lexTopEnd
-}
-
-func lexArrayTableEnd(lx *lexer) stateFn {
- if r := lx.next(); r != arrayTableEnd {
- return lx.errorf("expected end of table array name delimiter %q, "+
- "but got %q instead", arrayTableEnd, r)
- }
- lx.emit(itemArrayTableEnd)
- return lexTopEnd
-}
-
-func lexTableNameStart(lx *lexer) stateFn {
- lx.skip(isWhitespace)
- switch r := lx.peek(); {
- case r == tableEnd || r == eof:
- return lx.errorf("unexpected end of table name " +
- "(table names cannot be empty)")
- case r == tableSep:
- return lx.errorf("unexpected table separator " +
- "(table names cannot be empty)")
- case r == stringStart || r == rawStringStart:
- lx.ignore()
- lx.push(lexTableNameEnd)
- return lexValue // reuse string lexing
- default:
- return lexBareTableName
- }
-}
-
-// lexBareTableName lexes the name of a table. It assumes that at least one
-// valid character for the table has already been read.
-func lexBareTableName(lx *lexer) stateFn {
- r := lx.next()
- if isBareKeyChar(r) {
- return lexBareTableName
- }
- lx.backup()
- lx.emit(itemText)
- return lexTableNameEnd
-}
-
-// lexTableNameEnd reads the end of a piece of a table name, optionally
-// consuming whitespace.
-func lexTableNameEnd(lx *lexer) stateFn {
- lx.skip(isWhitespace)
- switch r := lx.next(); {
- case isWhitespace(r):
- return lexTableNameEnd
- case r == tableSep:
- lx.ignore()
- return lexTableNameStart
- case r == tableEnd:
- return lx.pop()
- default:
- return lx.errorf("expected '.' or ']' to end table name, "+
- "but got %q instead", r)
- }
-}
-
-// lexKeyStart consumes a key name up until the first non-whitespace character.
-// lexKeyStart will ignore whitespace.
-func lexKeyStart(lx *lexer) stateFn {
- r := lx.peek()
- switch {
- case r == keySep:
- return lx.errorf("unexpected key separator %q", keySep)
- case isWhitespace(r) || isNL(r):
- lx.next()
- return lexSkip(lx, lexKeyStart)
- case r == stringStart || r == rawStringStart:
- lx.ignore()
- lx.emit(itemKeyStart)
- lx.push(lexKeyEnd)
- return lexValue // reuse string lexing
- default:
- lx.ignore()
- lx.emit(itemKeyStart)
- return lexBareKey
- }
-}
-
-// lexBareKey consumes the text of a bare key. Assumes that the first character
-// (which is not whitespace) has not yet been consumed.
-func lexBareKey(lx *lexer) stateFn {
- switch r := lx.next(); {
- case isBareKeyChar(r):
- return lexBareKey
- case isWhitespace(r):
- lx.backup()
- lx.emit(itemText)
- return lexKeyEnd
- case r == keySep:
- lx.backup()
- lx.emit(itemText)
- return lexKeyEnd
- default:
- return lx.errorf("bare keys cannot contain %q", r)
- }
-}
-
-// lexKeyEnd consumes the end of a key and trims whitespace (up to the key
-// separator).
-func lexKeyEnd(lx *lexer) stateFn {
- switch r := lx.next(); {
- case r == keySep:
- return lexSkip(lx, lexValue)
- case isWhitespace(r):
- return lexSkip(lx, lexKeyEnd)
- default:
- return lx.errorf("expected key separator %q, but got %q instead",
- keySep, r)
- }
-}
-
-// lexValue starts the consumption of a value anywhere a value is expected.
-// lexValue will ignore whitespace.
-// After a value is lexed, the last state on the next is popped and returned.
-func lexValue(lx *lexer) stateFn {
- // We allow whitespace to precede a value, but NOT newlines.
- // In array syntax, the array states are responsible for ignoring newlines.
- r := lx.next()
- switch {
- case isWhitespace(r):
- return lexSkip(lx, lexValue)
- case isDigit(r):
- lx.backup() // avoid an extra state and use the same as above
- return lexNumberOrDateStart
- }
- switch r {
- case arrayStart:
- lx.ignore()
- lx.emit(itemArray)
- return lexArrayValue
- case inlineTableStart:
- lx.ignore()
- lx.emit(itemInlineTableStart)
- return lexInlineTableValue
- case stringStart:
- if lx.accept(stringStart) {
- if lx.accept(stringStart) {
- lx.ignore() // Ignore """
- return lexMultilineString
- }
- lx.backup()
- }
- lx.ignore() // ignore the '"'
- return lexString
- case rawStringStart:
- if lx.accept(rawStringStart) {
- if lx.accept(rawStringStart) {
- lx.ignore() // Ignore """
- return lexMultilineRawString
- }
- lx.backup()
- }
- lx.ignore() // ignore the "'"
- return lexRawString
- case '+', '-':
- return lexNumberStart
- case '.': // special error case, be kind to users
- return lx.errorf("floats must start with a digit, not '.'")
- }
- if unicode.IsLetter(r) {
- // Be permissive here; lexBool will give a nice error if the
- // user wrote something like
- // x = foo
- // (i.e. not 'true' or 'false' but is something else word-like.)
- lx.backup()
- return lexBool
- }
- return lx.errorf("expected value but found %q instead", r)
-}
-
-// lexArrayValue consumes one value in an array. It assumes that '[' or ','
-// have already been consumed. All whitespace and newlines are ignored.
-func lexArrayValue(lx *lexer) stateFn {
- r := lx.next()
- switch {
- case isWhitespace(r) || isNL(r):
- return lexSkip(lx, lexArrayValue)
- case r == commentStart:
- lx.push(lexArrayValue)
- return lexCommentStart
- case r == comma:
- return lx.errorf("unexpected comma")
- case r == arrayEnd:
- // NOTE(caleb): The spec isn't clear about whether you can have
- // a trailing comma or not, so we'll allow it.
- return lexArrayEnd
- }
-
- lx.backup()
- lx.push(lexArrayValueEnd)
- return lexValue
-}
-
-// lexArrayValueEnd consumes everything between the end of an array value and
-// the next value (or the end of the array): it ignores whitespace and newlines
-// and expects either a ',' or a ']'.
-func lexArrayValueEnd(lx *lexer) stateFn {
- r := lx.next()
- switch {
- case isWhitespace(r) || isNL(r):
- return lexSkip(lx, lexArrayValueEnd)
- case r == commentStart:
- lx.push(lexArrayValueEnd)
- return lexCommentStart
- case r == comma:
- lx.ignore()
- return lexArrayValue // move on to the next value
- case r == arrayEnd:
- return lexArrayEnd
- }
- return lx.errorf(
- "expected a comma or array terminator %q, but got %q instead",
- arrayEnd, r,
- )
-}
-
-// lexArrayEnd finishes the lexing of an array.
-// It assumes that a ']' has just been consumed.
-func lexArrayEnd(lx *lexer) stateFn {
- lx.ignore()
- lx.emit(itemArrayEnd)
- return lx.pop()
-}
-
-// lexInlineTableValue consumes one key/value pair in an inline table.
-// It assumes that '{' or ',' have already been consumed. Whitespace is ignored.
-func lexInlineTableValue(lx *lexer) stateFn {
- r := lx.next()
- switch {
- case isWhitespace(r):
- return lexSkip(lx, lexInlineTableValue)
- case isNL(r):
- return lx.errorf("newlines not allowed within inline tables")
- case r == commentStart:
- lx.push(lexInlineTableValue)
- return lexCommentStart
- case r == comma:
- return lx.errorf("unexpected comma")
- case r == inlineTableEnd:
- return lexInlineTableEnd
- }
- lx.backup()
- lx.push(lexInlineTableValueEnd)
- return lexKeyStart
-}
-
-// lexInlineTableValueEnd consumes everything between the end of an inline table
-// key/value pair and the next pair (or the end of the table):
-// it ignores whitespace and expects either a ',' or a '}'.
-func lexInlineTableValueEnd(lx *lexer) stateFn {
- r := lx.next()
- switch {
- case isWhitespace(r):
- return lexSkip(lx, lexInlineTableValueEnd)
- case isNL(r):
- return lx.errorf("newlines not allowed within inline tables")
- case r == commentStart:
- lx.push(lexInlineTableValueEnd)
- return lexCommentStart
- case r == comma:
- lx.ignore()
- return lexInlineTableValue
- case r == inlineTableEnd:
- return lexInlineTableEnd
- }
- return lx.errorf("expected a comma or an inline table terminator %q, "+
- "but got %q instead", inlineTableEnd, r)
-}
-
-// lexInlineTableEnd finishes the lexing of an inline table.
-// It assumes that a '}' has just been consumed.
-func lexInlineTableEnd(lx *lexer) stateFn {
- lx.ignore()
- lx.emit(itemInlineTableEnd)
- return lx.pop()
-}
-
-// lexString consumes the inner contents of a string. It assumes that the
-// beginning '"' has already been consumed and ignored.
-func lexString(lx *lexer) stateFn {
- r := lx.next()
- switch {
- case r == eof:
- return lx.errorf("unexpected EOF")
- case isNL(r):
- return lx.errorf("strings cannot contain newlines")
- case r == '\\':
- lx.push(lexString)
- return lexStringEscape
- case r == stringEnd:
- lx.backup()
- lx.emit(itemString)
- lx.next()
- lx.ignore()
- return lx.pop()
- }
- return lexString
-}
-
-// lexMultilineString consumes the inner contents of a string. It assumes that
-// the beginning '"""' has already been consumed and ignored.
-func lexMultilineString(lx *lexer) stateFn {
- switch lx.next() {
- case eof:
- return lx.errorf("unexpected EOF")
- case '\\':
- return lexMultilineStringEscape
- case stringEnd:
- if lx.accept(stringEnd) {
- if lx.accept(stringEnd) {
- lx.backup()
- lx.backup()
- lx.backup()
- lx.emit(itemMultilineString)
- lx.next()
- lx.next()
- lx.next()
- lx.ignore()
- return lx.pop()
- }
- lx.backup()
- }
- }
- return lexMultilineString
-}
-
-// lexRawString consumes a raw string. Nothing can be escaped in such a string.
-// It assumes that the beginning "'" has already been consumed and ignored.
-func lexRawString(lx *lexer) stateFn {
- r := lx.next()
- switch {
- case r == eof:
- return lx.errorf("unexpected EOF")
- case isNL(r):
- return lx.errorf("strings cannot contain newlines")
- case r == rawStringEnd:
- lx.backup()
- lx.emit(itemRawString)
- lx.next()
- lx.ignore()
- return lx.pop()
- }
- return lexRawString
-}
-
-// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
-// a string. It assumes that the beginning "'''" has already been consumed and
-// ignored.
-func lexMultilineRawString(lx *lexer) stateFn {
- switch lx.next() {
- case eof:
- return lx.errorf("unexpected EOF")
- case rawStringEnd:
- if lx.accept(rawStringEnd) {
- if lx.accept(rawStringEnd) {
- lx.backup()
- lx.backup()
- lx.backup()
- lx.emit(itemRawMultilineString)
- lx.next()
- lx.next()
- lx.next()
- lx.ignore()
- return lx.pop()
- }
- lx.backup()
- }
- }
- return lexMultilineRawString
-}
-
-// lexMultilineStringEscape consumes an escaped character. It assumes that the
-// preceding '\\' has already been consumed.
-func lexMultilineStringEscape(lx *lexer) stateFn {
- // Handle the special case first:
- if isNL(lx.next()) {
- return lexMultilineString
- }
- lx.backup()
- lx.push(lexMultilineString)
- return lexStringEscape(lx)
-}
-
-func lexStringEscape(lx *lexer) stateFn {
- r := lx.next()
- switch r {
- case 'b':
- fallthrough
- case 't':
- fallthrough
- case 'n':
- fallthrough
- case 'f':
- fallthrough
- case 'r':
- fallthrough
- case '"':
- fallthrough
- case '\\':
- return lx.pop()
- case 'u':
- return lexShortUnicodeEscape
- case 'U':
- return lexLongUnicodeEscape
- }
- return lx.errorf("invalid escape character %q; only the following "+
- "escape characters are allowed: "+
- `\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r)
-}
-
-func lexShortUnicodeEscape(lx *lexer) stateFn {
- var r rune
- for i := 0; i < 4; i++ {
- r = lx.next()
- if !isHexadecimal(r) {
- return lx.errorf(`expected four hexadecimal digits after '\u', `+
- "but got %q instead", lx.current())
- }
- }
- return lx.pop()
-}
-
-func lexLongUnicodeEscape(lx *lexer) stateFn {
- var r rune
- for i := 0; i < 8; i++ {
- r = lx.next()
- if !isHexadecimal(r) {
- return lx.errorf(`expected eight hexadecimal digits after '\U', `+
- "but got %q instead", lx.current())
- }
- }
- return lx.pop()
-}
-
-// lexNumberOrDateStart consumes either an integer, a float, or datetime.
-func lexNumberOrDateStart(lx *lexer) stateFn {
- r := lx.next()
- if isDigit(r) {
- return lexNumberOrDate
- }
- switch r {
- case '_':
- return lexNumber
- case 'e', 'E':
- return lexFloat
- case '.':
- return lx.errorf("floats must start with a digit, not '.'")
- }
- return lx.errorf("expected a digit but got %q", r)
-}
-
-// lexNumberOrDate consumes either an integer, float or datetime.
-func lexNumberOrDate(lx *lexer) stateFn {
- r := lx.next()
- if isDigit(r) {
- return lexNumberOrDate
- }
- switch r {
- case '-':
- return lexDatetime
- case '_':
- return lexNumber
- case '.', 'e', 'E':
- return lexFloat
- }
-
- lx.backup()
- lx.emit(itemInteger)
- return lx.pop()
-}
-
-// lexDatetime consumes a Datetime, to a first approximation.
-// The parser validates that it matches one of the accepted formats.
-func lexDatetime(lx *lexer) stateFn {
- r := lx.next()
- if isDigit(r) {
- return lexDatetime
- }
- switch r {
- case '-', 'T', ':', '.', 'Z':
- return lexDatetime
- }
-
- lx.backup()
- lx.emit(itemDatetime)
- return lx.pop()
-}
-
-// lexNumberStart consumes either an integer or a float. It assumes that a sign
-// has already been read, but that *no* digits have been consumed.
-// lexNumberStart will move to the appropriate integer or float states.
-func lexNumberStart(lx *lexer) stateFn {
- // We MUST see a digit. Even floats have to start with a digit.
- r := lx.next()
- if !isDigit(r) {
- if r == '.' {
- return lx.errorf("floats must start with a digit, not '.'")
- }
- return lx.errorf("expected a digit but got %q", r)
- }
- return lexNumber
-}
-
-// lexNumber consumes an integer or a float after seeing the first digit.
-func lexNumber(lx *lexer) stateFn {
- r := lx.next()
- if isDigit(r) {
- return lexNumber
- }
- switch r {
- case '_':
- return lexNumber
- case '.', 'e', 'E':
- return lexFloat
- }
-
- lx.backup()
- lx.emit(itemInteger)
- return lx.pop()
-}
-
-// lexFloat consumes the elements of a float. It allows any sequence of
-// float-like characters, so floats emitted by the lexer are only a first
-// approximation and must be validated by the parser.
-func lexFloat(lx *lexer) stateFn {
- r := lx.next()
- if isDigit(r) {
- return lexFloat
- }
- switch r {
- case '_', '.', '-', '+', 'e', 'E':
- return lexFloat
- }
-
- lx.backup()
- lx.emit(itemFloat)
- return lx.pop()
-}
-
-// lexBool consumes a bool string: 'true' or 'false.
-func lexBool(lx *lexer) stateFn {
- var rs []rune
- for {
- r := lx.next()
- if !unicode.IsLetter(r) {
- lx.backup()
- break
- }
- rs = append(rs, r)
- }
- s := string(rs)
- switch s {
- case "true", "false":
- lx.emit(itemBool)
- return lx.pop()
- }
- return lx.errorf("expected value but found %q instead", s)
-}
-
-// lexCommentStart begins the lexing of a comment. It will emit
-// itemCommentStart and consume no characters, passing control to lexComment.
-func lexCommentStart(lx *lexer) stateFn {
- lx.ignore()
- lx.emit(itemCommentStart)
- return lexComment
-}
-
-// lexComment lexes an entire comment. It assumes that '#' has been consumed.
-// It will consume *up to* the first newline character, and pass control
-// back to the last state on the stack.
-func lexComment(lx *lexer) stateFn {
- r := lx.peek()
- if isNL(r) || r == eof {
- lx.emit(itemText)
- return lx.pop()
- }
- lx.next()
- return lexComment
-}
-
-// lexSkip ignores all slurped input and moves on to the next state.
-func lexSkip(lx *lexer, nextState stateFn) stateFn {
- return func(lx *lexer) stateFn {
- lx.ignore()
- return nextState
- }
-}
-
-// isWhitespace returns true if `r` is a whitespace character according
-// to the spec.
-func isWhitespace(r rune) bool {
- return r == '\t' || r == ' '
-}
-
-func isNL(r rune) bool {
- return r == '\n' || r == '\r'
-}
-
-func isDigit(r rune) bool {
- return r >= '0' && r <= '9'
-}
-
-func isHexadecimal(r rune) bool {
- return (r >= '0' && r <= '9') ||
- (r >= 'a' && r <= 'f') ||
- (r >= 'A' && r <= 'F')
-}
-
-func isBareKeyChar(r rune) bool {
- return (r >= 'A' && r <= 'Z') ||
- (r >= 'a' && r <= 'z') ||
- (r >= '0' && r <= '9') ||
- r == '_' ||
- r == '-'
-}
-
-func (itype itemType) String() string {
- switch itype {
- case itemError:
- return "Error"
- case itemNIL:
- return "NIL"
- case itemEOF:
- return "EOF"
- case itemText:
- return "Text"
- case itemString, itemRawString, itemMultilineString, itemRawMultilineString:
- return "String"
- case itemBool:
- return "Bool"
- case itemInteger:
- return "Integer"
- case itemFloat:
- return "Float"
- case itemDatetime:
- return "DateTime"
- case itemTableStart:
- return "TableStart"
- case itemTableEnd:
- return "TableEnd"
- case itemKeyStart:
- return "KeyStart"
- case itemArray:
- return "Array"
- case itemArrayEnd:
- return "ArrayEnd"
- case itemCommentStart:
- return "CommentStart"
- }
- panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype)))
-}
-
-func (item item) String() string {
- return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val)
-}
diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go
deleted file mode 100644
index 50869ef..0000000
--- a/vendor/github.com/BurntSushi/toml/parse.go
+++ /dev/null
@@ -1,592 +0,0 @@
-package toml
-
-import (
- "fmt"
- "strconv"
- "strings"
- "time"
- "unicode"
- "unicode/utf8"
-)
-
-type parser struct {
- mapping map[string]interface{}
- types map[string]tomlType
- lx *lexer
-
- // A list of keys in the order that they appear in the TOML data.
- ordered []Key
-
- // the full key for the current hash in scope
- context Key
-
- // the base key name for everything except hashes
- currentKey string
-
- // rough approximation of line number
- approxLine int
-
- // A map of 'key.group.names' to whether they were created implicitly.
- implicits map[string]bool
-}
-
-type parseError string
-
-func (pe parseError) Error() string {
- return string(pe)
-}
-
-func parse(data string) (p *parser, err error) {
- defer func() {
- if r := recover(); r != nil {
- var ok bool
- if err, ok = r.(parseError); ok {
- return
- }
- panic(r)
- }
- }()
-
- p = &parser{
- mapping: make(map[string]interface{}),
- types: make(map[string]tomlType),
- lx: lex(data),
- ordered: make([]Key, 0),
- implicits: make(map[string]bool),
- }
- for {
- item := p.next()
- if item.typ == itemEOF {
- break
- }
- p.topLevel(item)
- }
-
- return p, nil
-}
-
-func (p *parser) panicf(format string, v ...interface{}) {
- msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s",
- p.approxLine, p.current(), fmt.Sprintf(format, v...))
- panic(parseError(msg))
-}
-
-func (p *parser) next() item {
- it := p.lx.nextItem()
- if it.typ == itemError {
- p.panicf("%s", it.val)
- }
- return it
-}
-
-func (p *parser) bug(format string, v ...interface{}) {
- panic(fmt.Sprintf("BUG: "+format+"\n\n", v...))
-}
-
-func (p *parser) expect(typ itemType) item {
- it := p.next()
- p.assertEqual(typ, it.typ)
- return it
-}
-
-func (p *parser) assertEqual(expected, got itemType) {
- if expected != got {
- p.bug("Expected '%s' but got '%s'.", expected, got)
- }
-}
-
-func (p *parser) topLevel(item item) {
- switch item.typ {
- case itemCommentStart:
- p.approxLine = item.line
- p.expect(itemText)
- case itemTableStart:
- kg := p.next()
- p.approxLine = kg.line
-
- var key Key
- for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() {
- key = append(key, p.keyString(kg))
- }
- p.assertEqual(itemTableEnd, kg.typ)
-
- p.establishContext(key, false)
- p.setType("", tomlHash)
- p.ordered = append(p.ordered, key)
- case itemArrayTableStart:
- kg := p.next()
- p.approxLine = kg.line
-
- var key Key
- for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() {
- key = append(key, p.keyString(kg))
- }
- p.assertEqual(itemArrayTableEnd, kg.typ)
-
- p.establishContext(key, true)
- p.setType("", tomlArrayHash)
- p.ordered = append(p.ordered, key)
- case itemKeyStart:
- kname := p.next()
- p.approxLine = kname.line
- p.currentKey = p.keyString(kname)
-
- val, typ := p.value(p.next())
- p.setValue(p.currentKey, val)
- p.setType(p.currentKey, typ)
- p.ordered = append(p.ordered, p.context.add(p.currentKey))
- p.currentKey = ""
- default:
- p.bug("Unexpected type at top level: %s", item.typ)
- }
-}
-
-// Gets a string for a key (or part of a key in a table name).
-func (p *parser) keyString(it item) string {
- switch it.typ {
- case itemText:
- return it.val
- case itemString, itemMultilineString,
- itemRawString, itemRawMultilineString:
- s, _ := p.value(it)
- return s.(string)
- default:
- p.bug("Unexpected key type: %s", it.typ)
- panic("unreachable")
- }
-}
-
-// value translates an expected value from the lexer into a Go value wrapped
-// as an empty interface.
-func (p *parser) value(it item) (interface{}, tomlType) {
- switch it.typ {
- case itemString:
- return p.replaceEscapes(it.val), p.typeOfPrimitive(it)
- case itemMultilineString:
- trimmed := stripFirstNewline(stripEscapedWhitespace(it.val))
- return p.replaceEscapes(trimmed), p.typeOfPrimitive(it)
- case itemRawString:
- return it.val, p.typeOfPrimitive(it)
- case itemRawMultilineString:
- return stripFirstNewline(it.val), p.typeOfPrimitive(it)
- case itemBool:
- switch it.val {
- case "true":
- return true, p.typeOfPrimitive(it)
- case "false":
- return false, p.typeOfPrimitive(it)
- }
- p.bug("Expected boolean value, but got '%s'.", it.val)
- case itemInteger:
- if !numUnderscoresOK(it.val) {
- p.panicf("Invalid integer %q: underscores must be surrounded by digits",
- it.val)
- }
- val := strings.Replace(it.val, "_", "", -1)
- num, err := strconv.ParseInt(val, 10, 64)
- if err != nil {
- // Distinguish integer values. Normally, it'd be a bug if the lexer
- // provides an invalid integer, but it's possible that the number is
- // out of range of valid values (which the lexer cannot determine).
- // So mark the former as a bug but the latter as a legitimate user
- // error.
- if e, ok := err.(*strconv.NumError); ok &&
- e.Err == strconv.ErrRange {
-
- p.panicf("Integer '%s' is out of the range of 64-bit "+
- "signed integers.", it.val)
- } else {
- p.bug("Expected integer value, but got '%s'.", it.val)
- }
- }
- return num, p.typeOfPrimitive(it)
- case itemFloat:
- parts := strings.FieldsFunc(it.val, func(r rune) bool {
- switch r {
- case '.', 'e', 'E':
- return true
- }
- return false
- })
- for _, part := range parts {
- if !numUnderscoresOK(part) {
- p.panicf("Invalid float %q: underscores must be "+
- "surrounded by digits", it.val)
- }
- }
- if !numPeriodsOK(it.val) {
- // As a special case, numbers like '123.' or '1.e2',
- // which are valid as far as Go/strconv are concerned,
- // must be rejected because TOML says that a fractional
- // part consists of '.' followed by 1+ digits.
- p.panicf("Invalid float %q: '.' must be followed "+
- "by one or more digits", it.val)
- }
- val := strings.Replace(it.val, "_", "", -1)
- num, err := strconv.ParseFloat(val, 64)
- if err != nil {
- if e, ok := err.(*strconv.NumError); ok &&
- e.Err == strconv.ErrRange {
-
- p.panicf("Float '%s' is out of the range of 64-bit "+
- "IEEE-754 floating-point numbers.", it.val)
- } else {
- p.panicf("Invalid float value: %q", it.val)
- }
- }
- return num, p.typeOfPrimitive(it)
- case itemDatetime:
- var t time.Time
- var ok bool
- var err error
- for _, format := range []string{
- "2006-01-02T15:04:05Z07:00",
- "2006-01-02T15:04:05",
- "2006-01-02",
- } {
- t, err = time.ParseInLocation(format, it.val, time.Local)
- if err == nil {
- ok = true
- break
- }
- }
- if !ok {
- p.panicf("Invalid TOML Datetime: %q.", it.val)
- }
- return t, p.typeOfPrimitive(it)
- case itemArray:
- array := make([]interface{}, 0)
- types := make([]tomlType, 0)
-
- for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
- if it.typ == itemCommentStart {
- p.expect(itemText)
- continue
- }
-
- val, typ := p.value(it)
- array = append(array, val)
- types = append(types, typ)
- }
- return array, p.typeOfArray(types)
- case itemInlineTableStart:
- var (
- hash = make(map[string]interface{})
- outerContext = p.context
- outerKey = p.currentKey
- )
-
- p.context = append(p.context, p.currentKey)
- p.currentKey = ""
- for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() {
- if it.typ != itemKeyStart {
- p.bug("Expected key start but instead found %q, around line %d",
- it.val, p.approxLine)
- }
- if it.typ == itemCommentStart {
- p.expect(itemText)
- continue
- }
-
- // retrieve key
- k := p.next()
- p.approxLine = k.line
- kname := p.keyString(k)
-
- // retrieve value
- p.currentKey = kname
- val, typ := p.value(p.next())
- // make sure we keep metadata up to date
- p.setType(kname, typ)
- p.ordered = append(p.ordered, p.context.add(p.currentKey))
- hash[kname] = val
- }
- p.context = outerContext
- p.currentKey = outerKey
- return hash, tomlHash
- }
- p.bug("Unexpected value type: %s", it.typ)
- panic("unreachable")
-}
-
-// numUnderscoresOK checks whether each underscore in s is surrounded by
-// characters that are not underscores.
-func numUnderscoresOK(s string) bool {
- accept := false
- for _, r := range s {
- if r == '_' {
- if !accept {
- return false
- }
- accept = false
- continue
- }
- accept = true
- }
- return accept
-}
-
-// numPeriodsOK checks whether every period in s is followed by a digit.
-func numPeriodsOK(s string) bool {
- period := false
- for _, r := range s {
- if period && !isDigit(r) {
- return false
- }
- period = r == '.'
- }
- return !period
-}
-
-// establishContext sets the current context of the parser,
-// where the context is either a hash or an array of hashes. Which one is
-// set depends on the value of the `array` parameter.
-//
-// Establishing the context also makes sure that the key isn't a duplicate, and
-// will create implicit hashes automatically.
-func (p *parser) establishContext(key Key, array bool) {
- var ok bool
-
- // Always start at the top level and drill down for our context.
- hashContext := p.mapping
- keyContext := make(Key, 0)
-
- // We only need implicit hashes for key[0:-1]
- for _, k := range key[0 : len(key)-1] {
- _, ok = hashContext[k]
- keyContext = append(keyContext, k)
-
- // No key? Make an implicit hash and move on.
- if !ok {
- p.addImplicit(keyContext)
- hashContext[k] = make(map[string]interface{})
- }
-
- // If the hash context is actually an array of tables, then set
- // the hash context to the last element in that array.
- //
- // Otherwise, it better be a table, since this MUST be a key group (by
- // virtue of it not being the last element in a key).
- switch t := hashContext[k].(type) {
- case []map[string]interface{}:
- hashContext = t[len(t)-1]
- case map[string]interface{}:
- hashContext = t
- default:
- p.panicf("Key '%s' was already created as a hash.", keyContext)
- }
- }
-
- p.context = keyContext
- if array {
- // If this is the first element for this array, then allocate a new
- // list of tables for it.
- k := key[len(key)-1]
- if _, ok := hashContext[k]; !ok {
- hashContext[k] = make([]map[string]interface{}, 0, 5)
- }
-
- // Add a new table. But make sure the key hasn't already been used
- // for something else.
- if hash, ok := hashContext[k].([]map[string]interface{}); ok {
- hashContext[k] = append(hash, make(map[string]interface{}))
- } else {
- p.panicf("Key '%s' was already created and cannot be used as "+
- "an array.", keyContext)
- }
- } else {
- p.setValue(key[len(key)-1], make(map[string]interface{}))
- }
- p.context = append(p.context, key[len(key)-1])
-}
-
-// setValue sets the given key to the given value in the current context.
-// It will make sure that the key hasn't already been defined, account for
-// implicit key groups.
-func (p *parser) setValue(key string, value interface{}) {
- var tmpHash interface{}
- var ok bool
-
- hash := p.mapping
- keyContext := make(Key, 0)
- for _, k := range p.context {
- keyContext = append(keyContext, k)
- if tmpHash, ok = hash[k]; !ok {
- p.bug("Context for key '%s' has not been established.", keyContext)
- }
- switch t := tmpHash.(type) {
- case []map[string]interface{}:
- // The context is a table of hashes. Pick the most recent table
- // defined as the current hash.
- hash = t[len(t)-1]
- case map[string]interface{}:
- hash = t
- default:
- p.bug("Expected hash to have type 'map[string]interface{}', but "+
- "it has '%T' instead.", tmpHash)
- }
- }
- keyContext = append(keyContext, key)
-
- if _, ok := hash[key]; ok {
- // Typically, if the given key has already been set, then we have
- // to raise an error since duplicate keys are disallowed. However,
- // it's possible that a key was previously defined implicitly. In this
- // case, it is allowed to be redefined concretely. (See the
- // `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.)
- //
- // But we have to make sure to stop marking it as an implicit. (So that
- // another redefinition provokes an error.)
- //
- // Note that since it has already been defined (as a hash), we don't
- // want to overwrite it. So our business is done.
- if p.isImplicit(keyContext) {
- p.removeImplicit(keyContext)
- return
- }
-
- // Otherwise, we have a concrete key trying to override a previous
- // key, which is *always* wrong.
- p.panicf("Key '%s' has already been defined.", keyContext)
- }
- hash[key] = value
-}
-
-// setType sets the type of a particular value at a given key.
-// It should be called immediately AFTER setValue.
-//
-// Note that if `key` is empty, then the type given will be applied to the
-// current context (which is either a table or an array of tables).
-func (p *parser) setType(key string, typ tomlType) {
- keyContext := make(Key, 0, len(p.context)+1)
- for _, k := range p.context {
- keyContext = append(keyContext, k)
- }
- if len(key) > 0 { // allow type setting for hashes
- keyContext = append(keyContext, key)
- }
- p.types[keyContext.String()] = typ
-}
-
-// addImplicit sets the given Key as having been created implicitly.
-func (p *parser) addImplicit(key Key) {
- p.implicits[key.String()] = true
-}
-
-// removeImplicit stops tagging the given key as having been implicitly
-// created.
-func (p *parser) removeImplicit(key Key) {
- p.implicits[key.String()] = false
-}
-
-// isImplicit returns true if the key group pointed to by the key was created
-// implicitly.
-func (p *parser) isImplicit(key Key) bool {
- return p.implicits[key.String()]
-}
-
-// current returns the full key name of the current context.
-func (p *parser) current() string {
- if len(p.currentKey) == 0 {
- return p.context.String()
- }
- if len(p.context) == 0 {
- return p.currentKey
- }
- return fmt.Sprintf("%s.%s", p.context, p.currentKey)
-}
-
-func stripFirstNewline(s string) string {
- if len(s) == 0 || s[0] != '\n' {
- return s
- }
- return s[1:]
-}
-
-func stripEscapedWhitespace(s string) string {
- esc := strings.Split(s, "\\\n")
- if len(esc) > 1 {
- for i := 1; i < len(esc); i++ {
- esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace)
- }
- }
- return strings.Join(esc, "")
-}
-
-func (p *parser) replaceEscapes(str string) string {
- var replaced []rune
- s := []byte(str)
- r := 0
- for r < len(s) {
- if s[r] != '\\' {
- c, size := utf8.DecodeRune(s[r:])
- r += size
- replaced = append(replaced, c)
- continue
- }
- r += 1
- if r >= len(s) {
- p.bug("Escape sequence at end of string.")
- return ""
- }
- switch s[r] {
- default:
- p.bug("Expected valid escape code after \\, but got %q.", s[r])
- return ""
- case 'b':
- replaced = append(replaced, rune(0x0008))
- r += 1
- case 't':
- replaced = append(replaced, rune(0x0009))
- r += 1
- case 'n':
- replaced = append(replaced, rune(0x000A))
- r += 1
- case 'f':
- replaced = append(replaced, rune(0x000C))
- r += 1
- case 'r':
- replaced = append(replaced, rune(0x000D))
- r += 1
- case '"':
- replaced = append(replaced, rune(0x0022))
- r += 1
- case '\\':
- replaced = append(replaced, rune(0x005C))
- r += 1
- case 'u':
- // At this point, we know we have a Unicode escape of the form
- // `uXXXX` at [r, r+5). (Because the lexer guarantees this
- // for us.)
- escaped := p.asciiEscapeToUnicode(s[r+1 : r+5])
- replaced = append(replaced, escaped)
- r += 5
- case 'U':
- // At this point, we know we have a Unicode escape of the form
- // `uXXXX` at [r, r+9). (Because the lexer guarantees this
- // for us.)
- escaped := p.asciiEscapeToUnicode(s[r+1 : r+9])
- replaced = append(replaced, escaped)
- r += 9
- }
- }
- return string(replaced)
-}
-
-func (p *parser) asciiEscapeToUnicode(bs []byte) rune {
- s := string(bs)
- hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
- if err != nil {
- p.bug("Could not parse '%s' as a hexadecimal number, but the "+
- "lexer claims it's OK: %s", s, err)
- }
- if !utf8.ValidRune(rune(hex)) {
- p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s)
- }
- return rune(hex)
-}
-
-func isStringType(ty itemType) bool {
- return ty == itemString || ty == itemMultilineString ||
- ty == itemRawString || ty == itemRawMultilineString
-}
diff --git a/vendor/github.com/BurntSushi/toml/session.vim b/vendor/github.com/BurntSushi/toml/session.vim
deleted file mode 100644
index 562164b..0000000
--- a/vendor/github.com/BurntSushi/toml/session.vim
+++ /dev/null
@@ -1 +0,0 @@
-au BufWritePost *.go silent!make tags > /dev/null 2>&1
diff --git a/vendor/github.com/BurntSushi/toml/type_check.go b/vendor/github.com/BurntSushi/toml/type_check.go
deleted file mode 100644
index c73f8af..0000000
--- a/vendor/github.com/BurntSushi/toml/type_check.go
+++ /dev/null
@@ -1,91 +0,0 @@
-package toml
-
-// tomlType represents any Go type that corresponds to a TOML type.
-// While the first draft of the TOML spec has a simplistic type system that
-// probably doesn't need this level of sophistication, we seem to be militating
-// toward adding real composite types.
-type tomlType interface {
- typeString() string
-}
-
-// typeEqual accepts any two types and returns true if they are equal.
-func typeEqual(t1, t2 tomlType) bool {
- if t1 == nil || t2 == nil {
- return false
- }
- return t1.typeString() == t2.typeString()
-}
-
-func typeIsHash(t tomlType) bool {
- return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash)
-}
-
-type tomlBaseType string
-
-func (btype tomlBaseType) typeString() string {
- return string(btype)
-}
-
-func (btype tomlBaseType) String() string {
- return btype.typeString()
-}
-
-var (
- tomlInteger tomlBaseType = "Integer"
- tomlFloat tomlBaseType = "Float"
- tomlDatetime tomlBaseType = "Datetime"
- tomlString tomlBaseType = "String"
- tomlBool tomlBaseType = "Bool"
- tomlArray tomlBaseType = "Array"
- tomlHash tomlBaseType = "Hash"
- tomlArrayHash tomlBaseType = "ArrayHash"
-)
-
-// typeOfPrimitive returns a tomlType of any primitive value in TOML.
-// Primitive values are: Integer, Float, Datetime, String and Bool.
-//
-// Passing a lexer item other than the following will cause a BUG message
-// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime.
-func (p *parser) typeOfPrimitive(lexItem item) tomlType {
- switch lexItem.typ {
- case itemInteger:
- return tomlInteger
- case itemFloat:
- return tomlFloat
- case itemDatetime:
- return tomlDatetime
- case itemString:
- return tomlString
- case itemMultilineString:
- return tomlString
- case itemRawString:
- return tomlString
- case itemRawMultilineString:
- return tomlString
- case itemBool:
- return tomlBool
- }
- p.bug("Cannot infer primitive type of lex item '%s'.", lexItem)
- panic("unreachable")
-}
-
-// typeOfArray returns a tomlType for an array given a list of types of its
-// values.
-//
-// In the current spec, if an array is homogeneous, then its type is always
-// "Array". If the array is not homogeneous, an error is generated.
-func (p *parser) typeOfArray(types []tomlType) tomlType {
- // Empty arrays are cool.
- if len(types) == 0 {
- return tomlArray
- }
-
- theType := types[0]
- for _, t := range types[1:] {
- if !typeEqual(theType, t) {
- p.panicf("Array contains values of type '%s' and '%s', but "+
- "arrays must be homogeneous.", theType, t)
- }
- }
- return tomlArray
-}
diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go
deleted file mode 100644
index 608997c..0000000
--- a/vendor/github.com/BurntSushi/toml/type_fields.go
+++ /dev/null
@@ -1,242 +0,0 @@
-package toml
-
-// Struct field handling is adapted from code in encoding/json:
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the Go distribution.
-
-import (
- "reflect"
- "sort"
- "sync"
-)
-
-// A field represents a single field found in a struct.
-type field struct {
- name string // the name of the field (`toml` tag included)
- tag bool // whether field has a `toml` tag
- index []int // represents the depth of an anonymous field
- typ reflect.Type // the type of the field
-}
-
-// byName sorts field by name, breaking ties with depth,
-// then breaking ties with "name came from toml tag", then
-// breaking ties with index sequence.
-type byName []field
-
-func (x byName) Len() int { return len(x) }
-
-func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-
-func (x byName) Less(i, j int) bool {
- if x[i].name != x[j].name {
- return x[i].name < x[j].name
- }
- if len(x[i].index) != len(x[j].index) {
- return len(x[i].index) < len(x[j].index)
- }
- if x[i].tag != x[j].tag {
- return x[i].tag
- }
- return byIndex(x).Less(i, j)
-}
-
-// byIndex sorts field by index sequence.
-type byIndex []field
-
-func (x byIndex) Len() int { return len(x) }
-
-func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-
-func (x byIndex) Less(i, j int) bool {
- for k, xik := range x[i].index {
- if k >= len(x[j].index) {
- return false
- }
- if xik != x[j].index[k] {
- return xik < x[j].index[k]
- }
- }
- return len(x[i].index) < len(x[j].index)
-}
-
-// typeFields returns a list of fields that TOML should recognize for the given
-// type. The algorithm is breadth-first search over the set of structs to
-// include - the top struct and then any reachable anonymous structs.
-func typeFields(t reflect.Type) []field {
- // Anonymous fields to explore at the current level and the next.
- current := []field{}
- next := []field{{typ: t}}
-
- // Count of queued names for current level and the next.
- count := map[reflect.Type]int{}
- nextCount := map[reflect.Type]int{}
-
- // Types already visited at an earlier level.
- visited := map[reflect.Type]bool{}
-
- // Fields found.
- var fields []field
-
- for len(next) > 0 {
- current, next = next, current[:0]
- count, nextCount = nextCount, map[reflect.Type]int{}
-
- for _, f := range current {
- if visited[f.typ] {
- continue
- }
- visited[f.typ] = true
-
- // Scan f.typ for fields to include.
- for i := 0; i < f.typ.NumField(); i++ {
- sf := f.typ.Field(i)
- if sf.PkgPath != "" && !sf.Anonymous { // unexported
- continue
- }
- opts := getOptions(sf.Tag)
- if opts.skip {
- continue
- }
- index := make([]int, len(f.index)+1)
- copy(index, f.index)
- index[len(f.index)] = i
-
- ft := sf.Type
- if ft.Name() == "" && ft.Kind() == reflect.Ptr {
- // Follow pointer.
- ft = ft.Elem()
- }
-
- // Record found field and index sequence.
- if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
- tagged := opts.name != ""
- name := opts.name
- if name == "" {
- name = sf.Name
- }
- fields = append(fields, field{name, tagged, index, ft})
- if count[f.typ] > 1 {
- // If there were multiple instances, add a second,
- // so that the annihilation code will see a duplicate.
- // It only cares about the distinction between 1 or 2,
- // so don't bother generating any more copies.
- fields = append(fields, fields[len(fields)-1])
- }
- continue
- }
-
- // Record new anonymous struct to explore in next round.
- nextCount[ft]++
- if nextCount[ft] == 1 {
- f := field{name: ft.Name(), index: index, typ: ft}
- next = append(next, f)
- }
- }
- }
- }
-
- sort.Sort(byName(fields))
-
- // Delete all fields that are hidden by the Go rules for embedded fields,
- // except that fields with TOML tags are promoted.
-
- // The fields are sorted in primary order of name, secondary order
- // of field index length. Loop over names; for each name, delete
- // hidden fields by choosing the one dominant field that survives.
- out := fields[:0]
- for advance, i := 0, 0; i < len(fields); i += advance {
- // One iteration per name.
- // Find the sequence of fields with the name of this first field.
- fi := fields[i]
- name := fi.name
- for advance = 1; i+advance < len(fields); advance++ {
- fj := fields[i+advance]
- if fj.name != name {
- break
- }
- }
- if advance == 1 { // Only one field with this name
- out = append(out, fi)
- continue
- }
- dominant, ok := dominantField(fields[i : i+advance])
- if ok {
- out = append(out, dominant)
- }
- }
-
- fields = out
- sort.Sort(byIndex(fields))
-
- return fields
-}
-
-// dominantField looks through the fields, all of which are known to
-// have the same name, to find the single field that dominates the
-// others using Go's embedding rules, modified by the presence of
-// TOML tags. If there are multiple top-level fields, the boolean
-// will be false: This condition is an error in Go and we skip all
-// the fields.
-func dominantField(fields []field) (field, bool) {
- // The fields are sorted in increasing index-length order. The winner
- // must therefore be one with the shortest index length. Drop all
- // longer entries, which is easy: just truncate the slice.
- length := len(fields[0].index)
- tagged := -1 // Index of first tagged field.
- for i, f := range fields {
- if len(f.index) > length {
- fields = fields[:i]
- break
- }
- if f.tag {
- if tagged >= 0 {
- // Multiple tagged fields at the same level: conflict.
- // Return no field.
- return field{}, false
- }
- tagged = i
- }
- }
- if tagged >= 0 {
- return fields[tagged], true
- }
- // All remaining fields have the same length. If there's more than one,
- // we have a conflict (two fields named "X" at the same level) and we
- // return no field.
- if len(fields) > 1 {
- return field{}, false
- }
- return fields[0], true
-}
-
-var fieldCache struct {
- sync.RWMutex
- m map[reflect.Type][]field
-}
-
-// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
-func cachedTypeFields(t reflect.Type) []field {
- fieldCache.RLock()
- f := fieldCache.m[t]
- fieldCache.RUnlock()
- if f != nil {
- return f
- }
-
- // Compute fields without lock.
- // Might duplicate effort but won't hold other computations back.
- f = typeFields(t)
- if f == nil {
- f = []field{}
- }
-
- fieldCache.Lock()
- if fieldCache.m == nil {
- fieldCache.m = map[reflect.Type][]field{}
- }
- fieldCache.m[t] = f
- fieldCache.Unlock()
- return f
-}
diff --git a/vendor/github.com/felix/logger/logger_test.go b/vendor/github.com/felix/logger/logger_test.go
deleted file mode 100644
index 3f64430..0000000
--- a/vendor/github.com/felix/logger/logger_test.go
+++ /dev/null
@@ -1,114 +0,0 @@
-package logger
-
-import (
- "bytes"
- "strings"
- "testing"
-)
-
-func TestKeyValueWriter(t *testing.T) {
- var tests = []struct {
- in []interface{}
- out string
- }{
- {
- in: []interface{}{"one"},
- out: "[INFO ] test: message=one\n",
- },
- {
- in: []interface{}{"one", "two", "2"},
- out: "[INFO ] test: message=one two=2\n",
- },
- {
- in: []interface{}{"one", "two", "2", "three", 3},
- out: "[INFO ] test: message=one two=2 three=3\n",
- },
- {
- in: []interface{}{"one", "two", "2", "three", 3, "fo ur", "# 4"},
- out: "[INFO ] test: message=one two=2 three=3 \"fo ur\"=\"# 4\"\n",
- },
- }
-
- for _, tt := range tests {
- var buf bytes.Buffer
- logger := New(&Options{
- Name: "test",
- Output: &buf,
- })
-
- logger.Info(tt.in...)
-
- str := buf.String()
-
- // Chop timestamp
- dataIdx := strings.IndexByte(str, ' ')
- rest := str[dataIdx+1:]
-
- if rest != tt.out {
- t.Errorf("Info(%q) => %q, expected %q\n", tt.in, rest, tt.out)
- }
- }
-}
-
-func TestKeyValueWriterWithFields(t *testing.T) {
- var tests = []struct {
- in []interface{}
- out string
- }{
- {
- in: []interface{}{"one"},
- out: "[INFO ] test: message=one added=this\n",
- },
- {
- in: []interface{}{"one", "two", "2"},
- out: "[INFO ] test: message=one two=2 added=this\n",
- },
- {
- in: []interface{}{"one", "two", "2", "three", 3},
- out: "[INFO ] test: message=one two=2 three=3 added=this\n",
- },
- {
- in: []interface{}{"one", "two", "2", "three", 3, "fo ur", "# 4"},
- out: "[INFO ] test: message=one two=2 three=3 \"fo ur\"=\"# 4\" added=this\n",
- },
- }
- for _, tt := range tests {
- var buf bytes.Buffer
- logger := New(&Options{
- Name: "test",
- Output: &buf,
- }).WithFields("added", "this")
-
- logger.Info(tt.in...)
-
- str := buf.String()
-
- // Chop timestamp
- dataIdx := strings.IndexByte(str, ' ')
- rest := str[dataIdx+1:]
-
- if rest != tt.out {
- t.Errorf("Info(%q) => %q, expected %q\n", tt.in, rest, tt.out)
- }
- }
-}
-
-func TestLevels(t *testing.T) {
- logger := New(&Options{
- Name: "test",
- Level: Debug,
- })
-
- if !logger.IsDebug() {
- t.Errorf("Level Debug check failed")
- }
-
- logger = New(&Options{
- Name: "test",
- Level: Error,
- })
-
- if !logger.IsError() {
- t.Errorf("Level Error check failed")
- }
-}
diff --git a/vendor/github.com/jackc/pgx/.gitignore b/vendor/github.com/jackc/pgx/.gitignore
index cb0cd90..0ff0080 100644
--- a/vendor/github.com/jackc/pgx/.gitignore
+++ b/vendor/github.com/jackc/pgx/.gitignore
@@ -22,3 +22,4 @@ _testmain.go
*.exe
conn_config_test.go
+.envrc
diff --git a/vendor/github.com/jackc/pgx/.travis.yml b/vendor/github.com/jackc/pgx/.travis.yml
index d9ea43b..6d4b3cd 100644
--- a/vendor/github.com/jackc/pgx/.travis.yml
+++ b/vendor/github.com/jackc/pgx/.travis.yml
@@ -1,59 +1,32 @@
language: go
go:
- - 1.7.4
- - 1.6.4
+ - 1.x
- tip
# Derived from https://github.com/lib/pq/blob/master/.travis.yml
before_install:
- - sudo apt-get remove -y --purge postgresql libpq-dev libpq5 postgresql-client-common postgresql-common
- - sudo rm -rf /var/lib/postgresql
- - wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add -
- - sudo sh -c "echo deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main $PGVERSION >> /etc/apt/sources.list.d/postgresql.list"
- - sudo apt-get update -qq
- - sudo apt-get -y -o Dpkg::Options::=--force-confdef -o Dpkg::Options::="--force-confnew" install postgresql-$PGVERSION postgresql-server-dev-$PGVERSION postgresql-contrib-$PGVERSION
- - sudo chmod 777 /etc/postgresql/$PGVERSION/main/pg_hba.conf
- - echo "local all postgres trust" > /etc/postgresql/$PGVERSION/main/pg_hba.conf
- - echo "local all all trust" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf
- - echo "host all pgx_md5 127.0.0.1/32 md5" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf
- - echo "host all pgx_pw 127.0.0.1/32 password" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf
- - echo "hostssl all pgx_ssl 127.0.0.1/32 md5" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf
- - echo "host replication pgx_replication 127.0.0.1/32 md5" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf
- - echo "host pgx_test pgx_replication 127.0.0.1/32 md5" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf
- - sudo chmod 777 /etc/postgresql/$PGVERSION/main/postgresql.conf
- - "[[ $PGVERSION < 9.6 ]] || echo \"wal_level='logical'\" >> /etc/postgresql/$PGVERSION/main/postgresql.conf"
- - "[[ $PGVERSION < 9.6 ]] || echo \"max_wal_senders=5\" >> /etc/postgresql/$PGVERSION/main/postgresql.conf"
- - "[[ $PGVERSION < 9.6 ]] || echo \"max_replication_slots=5\" >> /etc/postgresql/$PGVERSION/main/postgresql.conf"
- - sudo /etc/init.d/postgresql restart
+ - ./travis/before_install.bash
env:
+ global:
+ - PGX_TEST_DATABASE=postgres://pgx_md5:secret@127.0.0.1/pgx_test
matrix:
+ - CRATEVERSION=2.1
+ - PGVERSION=10
- PGVERSION=9.6
- PGVERSION=9.5
- PGVERSION=9.4
- PGVERSION=9.3
- - PGVERSION=9.2
-# The tricky test user, below, has to actually exist so that it can be used in a test
-# of aclitem formatting. It turns out aclitems cannot contain non-existing users/roles.
before_script:
- - mv conn_config_test.go.travis conn_config_test.go
- - psql -U postgres -c 'create database pgx_test'
- - "[[ \"${PGVERSION}\" = '9.0' ]] && psql -U postgres -f /usr/share/postgresql/9.0/contrib/hstore.sql pgx_test || psql -U postgres pgx_test -c 'create extension hstore'"
- - psql -U postgres -c "create user pgx_ssl SUPERUSER PASSWORD 'secret'"
- - psql -U postgres -c "create user pgx_md5 SUPERUSER PASSWORD 'secret'"
- - psql -U postgres -c "create user pgx_pw SUPERUSER PASSWORD 'secret'"
- - psql -U postgres -c "create user pgx_replication with replication password 'secret'"
- - psql -U postgres -c "create user \" tricky, ' } \"\" \\ test user \" superuser password 'secret'"
+ - ./travis/before_script.bash
install:
- - go get -u github.com/shopspring/decimal
- - go get -u gopkg.in/inconshreveable/log15.v2
- - go get -u github.com/jackc/fake
+ - ./travis/install.bash
script:
- - go test -v -race -short ./...
+ - ./travis/script.bash
matrix:
allow_failures:
diff --git a/vendor/github.com/jackc/pgx/CHANGELOG.md b/vendor/github.com/jackc/pgx/CHANGELOG.md
index 88c663b..0bc4162 100644
--- a/vendor/github.com/jackc/pgx/CHANGELOG.md
+++ b/vendor/github.com/jackc/pgx/CHANGELOG.md
@@ -1,4 +1,113 @@
-# Unreleased
+# 3.1.0 (January 15, 2018)
+
+## Features
+
+* Add QueryEx, QueryRowEx, ExecEx, and RollbackEx to Tx
+* Add more ColumnType support (Timothée Peignier)
+* Add UUIDArray type (Kelsey Francis)
+* Add zap log adapter (Kelsey Francis)
+* Add CreateReplicationSlotEx that consistent_point and snapshot_name (Mark Fletcher)
+* Add BeginBatch to Tx (Gaspard Douady)
+* Support CrateDB (Felix Geisendörfer)
+* Allow use of logrus logger with fields configured (André Bierlein)
+* Add array of enum support
+* Add support for bit type
+* Handle timeout parameters (Timothée Peignier)
+* Allow overriding connection info (James Lawrence)
+* Add support for bpchar type (Iurii Krasnoshchok)
+* Add ConnConfig.PreferSimpleProtocol
+
+## Fixes
+
+* Fix numeric EncodeBinary bug (Wei Congrui)
+* Fix logrus updated package name (Damir Vandic)
+* Fix some invalid one round trip execs failing to return non-nil error. (Kelsey Francis)
+* Return ErrClosedPool when Acquire() with closed pool (Mike Graf)
+* Fix decoding row with same type values
+* Always return non-nil \*Rows from Query to fix QueryRow (Kelsey Francis)
+* Fix pgtype types that can Set database/sql/driver.driver.Valuer
+* Prefix types in namespaces other than pg_catalog or public (Kelsey Francis)
+* Fix incomplete selects during batch (Gaspard Douady and Jack Christensen)
+* Support nil pointers to value implementing driver.Valuer
+* Fix time logging for QueryEx
+* Fix ranges with text format where end is unbounded
+* Detect erroneous JSON(B) encoding
+* Fix missing interval mapping
+* ConnPool begin should not retry if ctx is done (Gaspard Douady)
+* Fix reading interrupted messages could break connection
+* Return error on unknown oid while decoding record instead of panic (Iurii Krasnoshchok)
+
+## Changes
+
+* Align sslmode "require" more closely to libpq (Johan Brandhorst)
+
+# 3.0.1 (August 12, 2017)
+
+## Fixes
+
+* Fix compilation on 32-bit platform
+* Fix invalid MarshalJSON of types with status Undefined
+* Fix pid logging
+
+# 3.0.0 (July 24, 2017)
+
+## Changes
+
+* Pid to PID in accordance with Go naming conventions.
+* Conn.Pid changed to accessor method Conn.PID()
+* Conn.SecretKey removed
+* Remove Conn.TxStatus
+* Logger interface reduced to single Log method.
+* Replace BeginIso with BeginEx. BeginEx adds support for read/write mode and deferrable mode.
+* Transaction isolation level constants are now typed strings instead of bare strings.
+* Conn.WaitForNotification now takes context.Context instead of time.Duration for cancellation support.
+* Conn.WaitForNotification no longer automatically pings internally every 15 seconds.
+* ReplicationConn.WaitForReplicationMessage now takes context.Context instead of time.Duration for cancellation support.
+* Reject scanning binary format values into a string (e.g. binary encoded timestamptz to string). See https://github.com/jackc/pgx/issues/219 and https://github.com/jackc/pgx/issues/228
+* No longer can read raw bytes of any value into a []byte. Use pgtype.GenericBinary if this functionality is needed.
+* Remove CopyTo (functionality is now in CopyFrom)
+* OID constants moved from pgx to pgtype package
+* Replaced Scanner, Encoder, and PgxScanner interfaces with pgtype system
+* Removed ValueReader
+* ConnPool.Close no longer waits for all acquired connections to be released. Instead, it immediately closes all available connections, and closes acquired connections when they are released in the same manner as ConnPool.Reset.
+* Removed Rows.Fatal(error)
+* Removed Rows.AfterClose()
+* Removed Rows.Conn()
+* Removed Tx.AfterClose()
+* Removed Tx.Conn()
+* Use Go casing convention for OID, UUID, JSON(B), ACLItem, CID, TID, XID, and CIDR
+* Replaced stdlib.OpenFromConnPool with DriverConfig system
+
+## Features
+
+* Entirely revamped pluggable type system that supports approximately 60 PostgreSQL types.
+* Types support database/sql interfaces and therefore can be used with other drivers
+* Added context methods supporting cancellation where appropriate
+* Added simple query protocol support
+* Added single round-trip query mode
+* Added batch query operations
+* Added OnNotice
+* github.com/pkg/errors used where possible for errors
+* Added stdlib.DriverConfig which directly allows full configuration of underlying pgx connections without needing to use a pgx.ConnPool
+* Added AcquireConn and ReleaseConn to stdlib to allow acquiring a connection from a database/sql connection.
+
+# 2.11.0 (June 5, 2017)
+
+## Fixes
+
+* Fix race with concurrent execution of stdlib.OpenFromConnPool (Terin Stock)
+
+## Features
+
+* .pgpass support (j7b)
+* Add missing CopyFrom delegators to Tx and ConnPool (Jack Christensen)
+* Add ParseConnectionString (James Lawrence)
+
+## Performance
+
+* Optimize HStore encoding (René Kroon)
+
+# 2.10.0 (March 17, 2017)
## Fixes
diff --git a/vendor/github.com/jackc/pgx/README.md b/vendor/github.com/jackc/pgx/README.md
index 5550f6b..1acaabf 100644
--- a/vendor/github.com/jackc/pgx/README.md
+++ b/vendor/github.com/jackc/pgx/README.md
@@ -1,63 +1,68 @@
[![](https://godoc.org/github.com/jackc/pgx?status.svg)](https://godoc.org/github.com/jackc/pgx)
+[![Build Status](https://travis-ci.org/jackc/pgx.svg)](https://travis-ci.org/jackc/pgx)
-# Pgx
+# pgx - PostgreSQL Driver and Toolkit
-## Master Branch
+pgx is a pure Go driver and toolkit for PostgreSQL. pgx is different from other drivers such as [pq](http://godoc.org/github.com/lib/pq) because, while it can operate as a database/sql compatible driver, pgx is also usable directly. It offers a native interface similar to database/sql that offers better performance and more features.
-This is the `master` branch which tracks the stable release of the current
-version. At the moment this is `v2`. The `v3` branch which is currently in beta.
-General release is planned for July. `v3` is considered to be stable in the
-sense of lack of known bugs, but the API is not considered stable until general
-release. No further changes are planned, but the beta process may surface
-desirable changes. If possible API changes are acceptable, then `v3` is the
-recommented branch for new development. Regardless, please lock to the `v2` or
-`v3` branch as when `v3` is released breaking changes will be applied to the
-master branch.
-Pgx is a pure Go database connection library designed specifically for
-PostgreSQL. Pgx is different from other drivers such as
-[pq](http://godoc.org/github.com/lib/pq) because, while it can operate as a
-database/sql compatible driver, pgx is primarily intended to be used directly.
-It offers a native interface similar to database/sql that offers better
-performance and more features.
+```go
+var name string
+var weight int64
+err := conn.QueryRow("select name, weight from widgets where id=$1", 42).Scan(&name, &weight)
+if err != nil {
+ return err
+}
+```
## Features
-Pgx supports many additional features beyond what is available through database/sql.
+pgx supports many additional features beyond what is available through database/sql.
-* Listen / notify
-* Transaction isolation level control
+* Support for approximately 60 different PostgreSQL types
+* Batch queries
+* Single-round trip query mode
* Full TLS connection control
* Binary format support for custom types (can be much faster)
-* Copy from protocol support for faster bulk data loads
-* Logging support
-* Configurable connection pool with after connect hooks to do arbitrary connection setup
+* Copy protocol support for faster bulk data loads
+* Extendable logging support including built-in support for log15 and logrus
+* Connection pool with after connect hook to do arbitrary connection setup
+* Listen / notify
* PostgreSQL array to Go slice mapping for integers, floats, and strings
* Hstore support
* JSON and JSONB support
* Maps inet and cidr PostgreSQL types to net.IPNet and net.IP
* Large object support
-* Null mapping to Null* struct or pointer to pointer.
+* NULL mapping to Null* struct or pointer to pointer.
* Supports database/sql.Scanner and database/sql/driver.Valuer interfaces for custom types
* Logical replication connections, including receiving WAL and sending standby status updates
+* Notice response handling (this is different than listen / notify)
## Performance
-Pgx performs roughly equivalent to [pq](http://godoc.org/github.com/lib/pq) and
-[go-pg](https://github.com/go-pg/pg) for selecting a single column from a single
-row, but it is substantially faster when selecting multiple entire rows (6893
-queries/sec for pgx vs. 3968 queries/sec for pq -- 73% faster).
+pgx performs roughly equivalent to [go-pg](https://github.com/go-pg/pg) and is almost always faster than [pq](http://godoc.org/github.com/lib/pq). When parsing large result sets the percentage difference can be significant (16483 queries/sec for pgx vs. 10106 queries/sec for pq -- 63% faster).
+
+In many use cases a significant cause of latency is network round trips between the application and the server. pgx supports query batching to bundle multiple queries into a single round trip. Even in the case of a connection with the lowest possible latency, a local Unix domain socket, batching as few as three queries together can yield an improvement of 57%. With a typical network connection the results can be even more substantial.
+
+See this [gist](https://gist.github.com/jackc/4996e8648a0c59839bff644f49d6e434) for the underlying benchmark results or checkout [go_db_bench](https://github.com/jackc/go_db_bench) to run tests for yourself.
+
+In addition to the native driver, pgx also includes a number of packages that provide additional functionality.
+
+## github.com/jackc/pgx/stdlib
-See this [gist](https://gist.github.com/jackc/d282f39e088b495fba3e) for the
-underlying benchmark results or checkout
-[go_db_bench](https://github.com/jackc/go_db_bench) to run tests for yourself.
+database/sql compatibility layer for pgx. pgx can be used as a normal database/sql driver, but at any time the native interface may be acquired for more performance or PostgreSQL specific functionality.
-## database/sql
+## github.com/jackc/pgx/pgtype
-Import the ```github.com/jackc/pgx/stdlib``` package to use pgx as a driver for
-database/sql. It is possible to retrieve a pgx connection from database/sql on
-demand. This allows using the database/sql interface in most places, but using
-pgx directly when more performance or PostgreSQL specific features are needed.
+Approximately 60 PostgreSQL types are supported including uuid, hstore, json, bytea, numeric, interval, inet, and arrays. These types support database/sql interfaces and are usable even outside of pgx. They are fully tested in pgx and pq. They also support a higher performance interface when used with the pgx driver.
+
+## github.com/jackc/pgx/pgproto3
+
+pgproto3 provides standalone encoding and decoding of the PostgreSQL v3 wire protocol. This is useful for implementing very low level PostgreSQL tooling.
+
+## github.com/jackc/pgx/pgmock
+
+pgmock offers the ability to create a server that mocks the PostgreSQL wire protocol. This is used internally to test pgx by purposely inducing unusual errors. pgproto3 and pgmock together provide most of the foundational tooling required to implement a PostgreSQL proxy or MitM (such as for a custom connection pooler).
## Documentation
@@ -74,8 +79,15 @@ skip tests for connection types that are not configured.
To setup the normal test environment, first install these dependencies:
+ go get github.com/cockroachdb/apd
+ go get github.com/hashicorp/go-version
go get github.com/jackc/fake
+ go get github.com/lib/pq
+ go get github.com/pkg/errors
+ go get github.com/satori/go.uuid
go get github.com/shopspring/decimal
+ go get github.com/sirupsen/logrus
+ go get go.uber.org/zap
go get gopkg.in/inconshreveable/log15.v2
Then run the following SQL:
@@ -132,6 +144,8 @@ Change the following settings in your postgresql.conf:
max_wal_senders=5
max_replication_slots=5
+Set `replicationConnConfig` appropriately in `conn_config_test.go`.
+
## Version Policy
-pgx follows semantic versioning for the documented public API on stable releases. Branch `v2` is the latest stable release. `master` can contain new features or behavior that will change or be removed before being merged to the stable `v2` branch (in practice, this occurs very rarely).
+pgx follows semantic versioning for the documented public API on stable releases. Branch `v3` is the latest stable release. `master` can contain new features or behavior that will change or be removed before being merged to the stable `v3` branch (in practice, this occurs very rarely). `v2` is the previous stable release.
diff --git a/vendor/github.com/jackc/pgx/aclitem_parse_test.go b/vendor/github.com/jackc/pgx/aclitem_parse_test.go
deleted file mode 100644
index 5c7c748..0000000
--- a/vendor/github.com/jackc/pgx/aclitem_parse_test.go
+++ /dev/null
@@ -1,126 +0,0 @@
-package pgx
-
-import (
- "reflect"
- "testing"
-)
-
-func TestEscapeAclItem(t *testing.T) {
- tests := []struct {
- input string
- expected string
- }{
- {
- "foo",
- "foo",
- },
- {
- `foo, "\}`,
- `foo\, \"\\\}`,
- },
- }
-
- for i, tt := range tests {
- actual, err := escapeAclItem(tt.input)
-
- if err != nil {
- t.Errorf("%d. Unexpected error %v", i, err)
- }
-
- if actual != tt.expected {
- t.Errorf("%d.\nexpected: %s,\nactual: %s", i, tt.expected, actual)
- }
- }
-}
-
-func TestParseAclItemArray(t *testing.T) {
- tests := []struct {
- input string
- expected []AclItem
- errMsg string
- }{
- {
- "",
- []AclItem{},
- "",
- },
- {
- "one",
- []AclItem{"one"},
- "",
- },
- {
- `"one"`,
- []AclItem{"one"},
- "",
- },
- {
- "one,two,three",
- []AclItem{"one", "two", "three"},
- "",
- },
- {
- `"one","two","three"`,
- []AclItem{"one", "two", "three"},
- "",
- },
- {
- `"one",two,"three"`,
- []AclItem{"one", "two", "three"},
- "",
- },
- {
- `one,two,"three"`,
- []AclItem{"one", "two", "three"},
- "",
- },
- {
- `"one","two",three`,
- []AclItem{"one", "two", "three"},
- "",
- },
- {
- `"one","t w o",three`,
- []AclItem{"one", "t w o", "three"},
- "",
- },
- {
- `"one","t, w o\"\}\\",three`,
- []AclItem{"one", `t, w o"}\`, "three"},
- "",
- },
- {
- `"one","two",three"`,
- []AclItem{"one", "two", `three"`},
- "",
- },
- {
- `"one","two,"three"`,
- nil,
- "unexpected rune after quoted value",
- },
- {
- `"one","two","three`,
- nil,
- "unexpected end of quoted value",
- },
- }
-
- for i, tt := range tests {
- actual, err := parseAclItemArray(tt.input)
-
- if err != nil {
- if tt.errMsg == "" {
- t.Errorf("%d. Unexpected error %v", i, err)
- } else if err.Error() != tt.errMsg {
- t.Errorf("%d. Expected error %v did not match actual error %v", i, tt.errMsg, err.Error())
- }
- } else if tt.errMsg != "" {
- t.Errorf("%d. Expected error not returned: \"%v\"", i, tt.errMsg)
- }
-
- if !reflect.DeepEqual(actual, tt.expected) {
- t.Errorf("%d. Expected %v did not match actual %v", i, tt.expected, actual)
- }
- }
-}
diff --git a/vendor/github.com/jackc/pgx/batch.go b/vendor/github.com/jackc/pgx/batch.go
new file mode 100644
index 0000000..0d7f14c
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/batch.go
@@ -0,0 +1,313 @@
+package pgx
+
+import (
+ "context"
+
+ "github.com/jackc/pgx/pgproto3"
+ "github.com/jackc/pgx/pgtype"
+)
+
+type batchItem struct {
+ query string
+ arguments []interface{}
+ parameterOIDs []pgtype.OID
+ resultFormatCodes []int16
+}
+
+// Batch queries are a way of bundling multiple queries together to avoid
+// unnecessary network round trips.
+type Batch struct {
+ conn *Conn
+ connPool *ConnPool
+ items []*batchItem
+ resultsRead int
+ pendingCommandComplete bool
+ ctx context.Context
+ err error
+ inTx bool
+}
+
+// BeginBatch returns a *Batch query for c.
+func (c *Conn) BeginBatch() *Batch {
+ return &Batch{conn: c}
+}
+
+// BeginBatch returns a *Batch query for tx. Since this *Batch is already part
+// of a transaction it will not automatically be wrapped in a transaction.
+func (tx *Tx) BeginBatch() *Batch {
+ return &Batch{conn: tx.conn, inTx: true}
+}
+
+// Conn returns the underlying connection that b will or was performed on.
+func (b *Batch) Conn() *Conn {
+ return b.conn
+}
+
+// Queue queues a query to batch b. parameterOIDs are required if there are
+// parameters and query is not the name of a prepared statement.
+// resultFormatCodes are required if there is a result.
+func (b *Batch) Queue(query string, arguments []interface{}, parameterOIDs []pgtype.OID, resultFormatCodes []int16) {
+ b.items = append(b.items, &batchItem{
+ query: query,
+ arguments: arguments,
+ parameterOIDs: parameterOIDs,
+ resultFormatCodes: resultFormatCodes,
+ })
+}
+
+// Send sends all queued queries to the server at once.
+// If the batch is created from a conn Object then All queries are wrapped
+// in a transaction. The transaction can optionally be configured with
+// txOptions. The context is in effect until the Batch is closed.
+//
+// Warning: Send writes all queued queries before reading any results. This can
+// cause a deadlock if an excessive number of queries are queued. It is highly
+// advisable to use a timeout context to protect against this possibility.
+// Unfortunately, this excessive number can vary based on operating system,
+// connection type (TCP or Unix domain socket), and type of query. Unix domain
+// sockets seem to be much more susceptible to this issue than TCP connections.
+// However, it usually is at least several thousand.
+//
+// The deadlock occurs when the batched queries to be sent are so large that the
+// PostgreSQL server cannot receive it all at once. PostgreSQL received some of
+// the queued queries and starts executing them. As PostgreSQL executes the
+// queries it sends responses back. pgx will not read any of these responses
+// until it has finished sending. Therefore, if all network buffers are full pgx
+// will not be able to finish sending the queries and PostgreSQL will not be
+// able to finish sending the responses.
+//
+// See https://github.com/jackc/pgx/issues/374.
+func (b *Batch) Send(ctx context.Context, txOptions *TxOptions) error {
+ if b.err != nil {
+ return b.err
+ }
+
+ b.ctx = ctx
+
+ err := b.conn.waitForPreviousCancelQuery(ctx)
+ if err != nil {
+ return err
+ }
+
+ if err := b.conn.ensureConnectionReadyForQuery(); err != nil {
+ return err
+ }
+
+ buf := b.conn.wbuf
+ if !b.inTx {
+ buf = appendQuery(buf, txOptions.beginSQL())
+ }
+
+ err = b.conn.initContext(ctx)
+ if err != nil {
+ return err
+ }
+
+ for _, bi := range b.items {
+ var psName string
+ var psParameterOIDs []pgtype.OID
+
+ if ps, ok := b.conn.preparedStatements[bi.query]; ok {
+ psName = ps.Name
+ psParameterOIDs = ps.ParameterOIDs
+ } else {
+ psParameterOIDs = bi.parameterOIDs
+ buf = appendParse(buf, "", bi.query, psParameterOIDs)
+ }
+
+ var err error
+ buf, err = appendBind(buf, "", psName, b.conn.ConnInfo, psParameterOIDs, bi.arguments, bi.resultFormatCodes)
+ if err != nil {
+ return err
+ }
+
+ buf = appendDescribe(buf, 'P', "")
+ buf = appendExecute(buf, "", 0)
+ }
+
+ buf = appendSync(buf)
+ b.conn.pendingReadyForQueryCount++
+
+ if !b.inTx {
+ buf = appendQuery(buf, "commit")
+ b.conn.pendingReadyForQueryCount++
+ }
+
+ n, err := b.conn.conn.Write(buf)
+ if err != nil {
+ if fatalWriteErr(n, err) {
+ b.conn.die(err)
+ }
+ return err
+ }
+
+ for !b.inTx {
+ msg, err := b.conn.rxMsg()
+ if err != nil {
+ return err
+ }
+
+ switch msg := msg.(type) {
+ case *pgproto3.ReadyForQuery:
+ return nil
+ default:
+ if err := b.conn.processContextFreeMsg(msg); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+// ExecResults reads the results from the next query in the batch as if the
+// query has been sent with Exec.
+func (b *Batch) ExecResults() (CommandTag, error) {
+ if b.err != nil {
+ return "", b.err
+ }
+
+ select {
+ case <-b.ctx.Done():
+ b.die(b.ctx.Err())
+ return "", b.ctx.Err()
+ default:
+ }
+
+ if err := b.ensureCommandComplete(); err != nil {
+ b.die(err)
+ return "", err
+ }
+
+ b.resultsRead++
+
+ b.pendingCommandComplete = true
+
+ for {
+ msg, err := b.conn.rxMsg()
+ if err != nil {
+ return "", err
+ }
+
+ switch msg := msg.(type) {
+ case *pgproto3.CommandComplete:
+ b.pendingCommandComplete = false
+ return CommandTag(msg.CommandTag), nil
+ default:
+ if err := b.conn.processContextFreeMsg(msg); err != nil {
+ return "", err
+ }
+ }
+ }
+}
+
+// QueryResults reads the results from the next query in the batch as if the
+// query has been sent with Query.
+func (b *Batch) QueryResults() (*Rows, error) {
+ rows := b.conn.getRows("batch query", nil)
+
+ if b.err != nil {
+ rows.fatal(b.err)
+ return rows, b.err
+ }
+
+ select {
+ case <-b.ctx.Done():
+ b.die(b.ctx.Err())
+ rows.fatal(b.err)
+ return rows, b.ctx.Err()
+ default:
+ }
+
+ if err := b.ensureCommandComplete(); err != nil {
+ b.die(err)
+ rows.fatal(err)
+ return rows, err
+ }
+
+ b.resultsRead++
+
+ b.pendingCommandComplete = true
+
+ fieldDescriptions, err := b.conn.readUntilRowDescription()
+ if err != nil {
+ b.die(err)
+ rows.fatal(b.err)
+ return rows, err
+ }
+
+ rows.batch = b
+ rows.fields = fieldDescriptions
+ return rows, nil
+}
+
+// QueryRowResults reads the results from the next query in the batch as if the
+// query has been sent with QueryRow.
+func (b *Batch) QueryRowResults() *Row {
+ rows, _ := b.QueryResults()
+ return (*Row)(rows)
+
+}
+
+// Close closes the batch operation. Any error that occured during a batch
+// operation may have made it impossible to resyncronize the connection with the
+// server. In this case the underlying connection will have been closed.
+func (b *Batch) Close() (err error) {
+ if b.err != nil {
+ return b.err
+ }
+
+ defer func() {
+ err = b.conn.termContext(err)
+ if b.conn != nil && b.connPool != nil {
+ b.connPool.Release(b.conn)
+ }
+ }()
+
+ for i := b.resultsRead; i < len(b.items); i++ {
+ if _, err = b.ExecResults(); err != nil {
+ return err
+ }
+ }
+
+ if err = b.conn.ensureConnectionReadyForQuery(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (b *Batch) die(err error) {
+ if b.err != nil {
+ return
+ }
+
+ b.err = err
+ b.conn.die(err)
+
+ if b.conn != nil && b.connPool != nil {
+ b.connPool.Release(b.conn)
+ }
+}
+
+func (b *Batch) ensureCommandComplete() error {
+ for b.pendingCommandComplete {
+ msg, err := b.conn.rxMsg()
+ if err != nil {
+ return err
+ }
+
+ switch msg := msg.(type) {
+ case *pgproto3.CommandComplete:
+ b.pendingCommandComplete = false
+ return nil
+ default:
+ err = b.conn.processContextFreeMsg(msg)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/bench_test.go b/vendor/github.com/jackc/pgx/bench_test.go
deleted file mode 100644
index 30e31e2..0000000
--- a/vendor/github.com/jackc/pgx/bench_test.go
+++ /dev/null
@@ -1,765 +0,0 @@
-package pgx_test
-
-import (
- "bytes"
- "fmt"
- "strings"
- "testing"
- "time"
-
- "github.com/jackc/pgx"
- log "gopkg.in/inconshreveable/log15.v2"
-)
-
-func BenchmarkConnPool(b *testing.B) {
- config := pgx.ConnPoolConfig{ConnConfig: *defaultConnConfig, MaxConnections: 5}
- pool, err := pgx.NewConnPool(config)
- if err != nil {
- b.Fatalf("Unable to create connection pool: %v", err)
- }
- defer pool.Close()
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- var conn *pgx.Conn
- if conn, err = pool.Acquire(); err != nil {
- b.Fatalf("Unable to acquire connection: %v", err)
- }
- pool.Release(conn)
- }
-}
-
-func BenchmarkConnPoolQueryRow(b *testing.B) {
- config := pgx.ConnPoolConfig{ConnConfig: *defaultConnConfig, MaxConnections: 5}
- pool, err := pgx.NewConnPool(config)
- if err != nil {
- b.Fatalf("Unable to create connection pool: %v", err)
- }
- defer pool.Close()
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- num := float64(-1)
- if err := pool.QueryRow("select random()").Scan(&num); err != nil {
- b.Fatal(err)
- }
-
- if num < 0 {
- b.Fatalf("expected `select random()` to return between 0 and 1 but it was: %v", num)
- }
- }
-}
-
-func BenchmarkNullXWithNullValues(b *testing.B) {
- conn := mustConnect(b, *defaultConnConfig)
- defer closeConn(b, conn)
-
- _, err := conn.Prepare("selectNulls", "select 1::int4, 'johnsmith', null::text, null::text, null::text, null::date, null::timestamptz")
- if err != nil {
- b.Fatal(err)
- }
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- var record struct {
- id int32
- userName string
- email pgx.NullString
- name pgx.NullString
- sex pgx.NullString
- birthDate pgx.NullTime
- lastLoginTime pgx.NullTime
- }
-
- err = conn.QueryRow("selectNulls").Scan(
- &record.id,
- &record.userName,
- &record.email,
- &record.name,
- &record.sex,
- &record.birthDate,
- &record.lastLoginTime,
- )
- if err != nil {
- b.Fatal(err)
- }
-
- // These checks both ensure that the correct data was returned
- // and provide a benchmark of accessing the returned values.
- if record.id != 1 {
- b.Fatalf("bad value for id: %v", record.id)
- }
- if record.userName != "johnsmith" {
- b.Fatalf("bad value for userName: %v", record.userName)
- }
- if record.email.Valid {
- b.Fatalf("bad value for email: %v", record.email)
- }
- if record.name.Valid {
- b.Fatalf("bad value for name: %v", record.name)
- }
- if record.sex.Valid {
- b.Fatalf("bad value for sex: %v", record.sex)
- }
- if record.birthDate.Valid {
- b.Fatalf("bad value for birthDate: %v", record.birthDate)
- }
- if record.lastLoginTime.Valid {
- b.Fatalf("bad value for lastLoginTime: %v", record.lastLoginTime)
- }
- }
-}
-
-func BenchmarkNullXWithPresentValues(b *testing.B) {
- conn := mustConnect(b, *defaultConnConfig)
- defer closeConn(b, conn)
-
- _, err := conn.Prepare("selectNulls", "select 1::int4, 'johnsmith', 'johnsmith@example.com', 'John Smith', 'male', '1970-01-01'::date, '2015-01-01 00:00:00'::timestamptz")
- if err != nil {
- b.Fatal(err)
- }
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- var record struct {
- id int32
- userName string
- email pgx.NullString
- name pgx.NullString
- sex pgx.NullString
- birthDate pgx.NullTime
- lastLoginTime pgx.NullTime
- }
-
- err = conn.QueryRow("selectNulls").Scan(
- &record.id,
- &record.userName,
- &record.email,
- &record.name,
- &record.sex,
- &record.birthDate,
- &record.lastLoginTime,
- )
- if err != nil {
- b.Fatal(err)
- }
-
- // These checks both ensure that the correct data was returned
- // and provide a benchmark of accessing the returned values.
- if record.id != 1 {
- b.Fatalf("bad value for id: %v", record.id)
- }
- if record.userName != "johnsmith" {
- b.Fatalf("bad value for userName: %v", record.userName)
- }
- if !record.email.Valid || record.email.String != "johnsmith@example.com" {
- b.Fatalf("bad value for email: %v", record.email)
- }
- if !record.name.Valid || record.name.String != "John Smith" {
- b.Fatalf("bad value for name: %v", record.name)
- }
- if !record.sex.Valid || record.sex.String != "male" {
- b.Fatalf("bad value for sex: %v", record.sex)
- }
- if !record.birthDate.Valid || record.birthDate.Time != time.Date(1970, 1, 1, 0, 0, 0, 0, time.Local) {
- b.Fatalf("bad value for birthDate: %v", record.birthDate)
- }
- if !record.lastLoginTime.Valid || record.lastLoginTime.Time != time.Date(2015, 1, 1, 0, 0, 0, 0, time.Local) {
- b.Fatalf("bad value for lastLoginTime: %v", record.lastLoginTime)
- }
- }
-}
-
-func BenchmarkPointerPointerWithNullValues(b *testing.B) {
- conn := mustConnect(b, *defaultConnConfig)
- defer closeConn(b, conn)
-
- _, err := conn.Prepare("selectNulls", "select 1::int4, 'johnsmith', null::text, null::text, null::text, null::date, null::timestamptz")
- if err != nil {
- b.Fatal(err)
- }
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- var record struct {
- id int32
- userName string
- email *string
- name *string
- sex *string
- birthDate *time.Time
- lastLoginTime *time.Time
- }
-
- err = conn.QueryRow("selectNulls").Scan(
- &record.id,
- &record.userName,
- &record.email,
- &record.name,
- &record.sex,
- &record.birthDate,
- &record.lastLoginTime,
- )
- if err != nil {
- b.Fatal(err)
- }
-
- // These checks both ensure that the correct data was returned
- // and provide a benchmark of accessing the returned values.
- if record.id != 1 {
- b.Fatalf("bad value for id: %v", record.id)
- }
- if record.userName != "johnsmith" {
- b.Fatalf("bad value for userName: %v", record.userName)
- }
- if record.email != nil {
- b.Fatalf("bad value for email: %v", record.email)
- }
- if record.name != nil {
- b.Fatalf("bad value for name: %v", record.name)
- }
- if record.sex != nil {
- b.Fatalf("bad value for sex: %v", record.sex)
- }
- if record.birthDate != nil {
- b.Fatalf("bad value for birthDate: %v", record.birthDate)
- }
- if record.lastLoginTime != nil {
- b.Fatalf("bad value for lastLoginTime: %v", record.lastLoginTime)
- }
- }
-}
-
-func BenchmarkPointerPointerWithPresentValues(b *testing.B) {
- conn := mustConnect(b, *defaultConnConfig)
- defer closeConn(b, conn)
-
- _, err := conn.Prepare("selectNulls", "select 1::int4, 'johnsmith', 'johnsmith@example.com', 'John Smith', 'male', '1970-01-01'::date, '2015-01-01 00:00:00'::timestamptz")
- if err != nil {
- b.Fatal(err)
- }
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- var record struct {
- id int32
- userName string
- email *string
- name *string
- sex *string
- birthDate *time.Time
- lastLoginTime *time.Time
- }
-
- err = conn.QueryRow("selectNulls").Scan(
- &record.id,
- &record.userName,
- &record.email,
- &record.name,
- &record.sex,
- &record.birthDate,
- &record.lastLoginTime,
- )
- if err != nil {
- b.Fatal(err)
- }
-
- // These checks both ensure that the correct data was returned
- // and provide a benchmark of accessing the returned values.
- if record.id != 1 {
- b.Fatalf("bad value for id: %v", record.id)
- }
- if record.userName != "johnsmith" {
- b.Fatalf("bad value for userName: %v", record.userName)
- }
- if record.email == nil || *record.email != "johnsmith@example.com" {
- b.Fatalf("bad value for email: %v", record.email)
- }
- if record.name == nil || *record.name != "John Smith" {
- b.Fatalf("bad value for name: %v", record.name)
- }
- if record.sex == nil || *record.sex != "male" {
- b.Fatalf("bad value for sex: %v", record.sex)
- }
- if record.birthDate == nil || *record.birthDate != time.Date(1970, 1, 1, 0, 0, 0, 0, time.Local) {
- b.Fatalf("bad value for birthDate: %v", record.birthDate)
- }
- if record.lastLoginTime == nil || *record.lastLoginTime != time.Date(2015, 1, 1, 0, 0, 0, 0, time.Local) {
- b.Fatalf("bad value for lastLoginTime: %v", record.lastLoginTime)
- }
- }
-}
-
-func BenchmarkSelectWithoutLogging(b *testing.B) {
- conn := mustConnect(b, *defaultConnConfig)
- defer closeConn(b, conn)
-
- benchmarkSelectWithLog(b, conn)
-}
-
-func BenchmarkSelectWithLoggingTraceWithLog15(b *testing.B) {
- connConfig := *defaultConnConfig
-
- logger := log.New()
- lvl, err := log.LvlFromString("debug")
- if err != nil {
- b.Fatal(err)
- }
- logger.SetHandler(log.LvlFilterHandler(lvl, log.DiscardHandler()))
- connConfig.Logger = logger
- connConfig.LogLevel = pgx.LogLevelTrace
- conn := mustConnect(b, connConfig)
- defer closeConn(b, conn)
-
- benchmarkSelectWithLog(b, conn)
-}
-
-func BenchmarkSelectWithLoggingDebugWithLog15(b *testing.B) {
- connConfig := *defaultConnConfig
-
- logger := log.New()
- lvl, err := log.LvlFromString("debug")
- if err != nil {
- b.Fatal(err)
- }
- logger.SetHandler(log.LvlFilterHandler(lvl, log.DiscardHandler()))
- connConfig.Logger = logger
- connConfig.LogLevel = pgx.LogLevelDebug
- conn := mustConnect(b, connConfig)
- defer closeConn(b, conn)
-
- benchmarkSelectWithLog(b, conn)
-}
-
-func BenchmarkSelectWithLoggingInfoWithLog15(b *testing.B) {
- connConfig := *defaultConnConfig
-
- logger := log.New()
- lvl, err := log.LvlFromString("info")
- if err != nil {
- b.Fatal(err)
- }
- logger.SetHandler(log.LvlFilterHandler(lvl, log.DiscardHandler()))
- connConfig.Logger = logger
- connConfig.LogLevel = pgx.LogLevelInfo
- conn := mustConnect(b, connConfig)
- defer closeConn(b, conn)
-
- benchmarkSelectWithLog(b, conn)
-}
-
-func BenchmarkSelectWithLoggingErrorWithLog15(b *testing.B) {
- connConfig := *defaultConnConfig
-
- logger := log.New()
- lvl, err := log.LvlFromString("error")
- if err != nil {
- b.Fatal(err)
- }
- logger.SetHandler(log.LvlFilterHandler(lvl, log.DiscardHandler()))
- connConfig.Logger = logger
- connConfig.LogLevel = pgx.LogLevelError
- conn := mustConnect(b, connConfig)
- defer closeConn(b, conn)
-
- benchmarkSelectWithLog(b, conn)
-}
-
-func benchmarkSelectWithLog(b *testing.B, conn *pgx.Conn) {
- _, err := conn.Prepare("test", "select 1::int4, 'johnsmith', 'johnsmith@example.com', 'John Smith', 'male', '1970-01-01'::date, '2015-01-01 00:00:00'::timestamptz")
- if err != nil {
- b.Fatal(err)
- }
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- var record struct {
- id int32
- userName string
- email string
- name string
- sex string
- birthDate time.Time
- lastLoginTime time.Time
- }
-
- err = conn.QueryRow("test").Scan(
- &record.id,
- &record.userName,
- &record.email,
- &record.name,
- &record.sex,
- &record.birthDate,
- &record.lastLoginTime,
- )
- if err != nil {
- b.Fatal(err)
- }
-
- // These checks both ensure that the correct data was returned
- // and provide a benchmark of accessing the returned values.
- if record.id != 1 {
- b.Fatalf("bad value for id: %v", record.id)
- }
- if record.userName != "johnsmith" {
- b.Fatalf("bad value for userName: %v", record.userName)
- }
- if record.email != "johnsmith@example.com" {
- b.Fatalf("bad value for email: %v", record.email)
- }
- if record.name != "John Smith" {
- b.Fatalf("bad value for name: %v", record.name)
- }
- if record.sex != "male" {
- b.Fatalf("bad value for sex: %v", record.sex)
- }
- if record.birthDate != time.Date(1970, 1, 1, 0, 0, 0, 0, time.Local) {
- b.Fatalf("bad value for birthDate: %v", record.birthDate)
- }
- if record.lastLoginTime != time.Date(2015, 1, 1, 0, 0, 0, 0, time.Local) {
- b.Fatalf("bad value for lastLoginTime: %v", record.lastLoginTime)
- }
- }
-}
-
-func BenchmarkLog15Discard(b *testing.B) {
- logger := log.New()
- lvl, err := log.LvlFromString("error")
- if err != nil {
- b.Fatal(err)
- }
- logger.SetHandler(log.LvlFilterHandler(lvl, log.DiscardHandler()))
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- logger.Debug("benchmark", "i", i, "b.N", b.N)
- }
-}
-
-const benchmarkWriteTableCreateSQL = `drop table if exists t;
-
-create table t(
- varchar_1 varchar not null,
- varchar_2 varchar not null,
- varchar_null_1 varchar,
- date_1 date not null,
- date_null_1 date,
- int4_1 int4 not null,
- int4_2 int4 not null,
- int4_null_1 int4,
- tstz_1 timestamptz not null,
- tstz_2 timestamptz,
- bool_1 bool not null,
- bool_2 bool not null,
- bool_3 bool not null
-);
-`
-
-const benchmarkWriteTableInsertSQL = `insert into t(
- varchar_1,
- varchar_2,
- varchar_null_1,
- date_1,
- date_null_1,
- int4_1,
- int4_2,
- int4_null_1,
- tstz_1,
- tstz_2,
- bool_1,
- bool_2,
- bool_3
-) values (
- $1::varchar,
- $2::varchar,
- $3::varchar,
- $4::date,
- $5::date,
- $6::int4,
- $7::int4,
- $8::int4,
- $9::timestamptz,
- $10::timestamptz,
- $11::bool,
- $12::bool,
- $13::bool
-)`
-
-type benchmarkWriteTableCopyFromSrc struct {
- count int
- idx int
- row []interface{}
-}
-
-func (s *benchmarkWriteTableCopyFromSrc) Next() bool {
- s.idx++
- return s.idx < s.count
-}
-
-func (s *benchmarkWriteTableCopyFromSrc) Values() ([]interface{}, error) {
- return s.row, nil
-}
-
-func (s *benchmarkWriteTableCopyFromSrc) Err() error {
- return nil
-}
-
-func newBenchmarkWriteTableCopyFromSrc(count int) pgx.CopyFromSource {
- return &benchmarkWriteTableCopyFromSrc{
- count: count,
- row: []interface{}{
- "varchar_1",
- "varchar_2",
- pgx.NullString{},
- time.Date(2000, 1, 1, 0, 0, 0, 0, time.Local),
- pgx.NullTime{},
- 1,
- 2,
- pgx.NullInt32{},
- time.Date(2001, 1, 1, 0, 0, 0, 0, time.Local),
- time.Date(2002, 1, 1, 0, 0, 0, 0, time.Local),
- true,
- false,
- true,
- },
- }
-}
-
-func benchmarkWriteNRowsViaInsert(b *testing.B, n int) {
- conn := mustConnect(b, *defaultConnConfig)
- defer closeConn(b, conn)
-
- mustExec(b, conn, benchmarkWriteTableCreateSQL)
- _, err := conn.Prepare("insert_t", benchmarkWriteTableInsertSQL)
- if err != nil {
- b.Fatal(err)
- }
-
- b.ResetTimer()
-
- for i := 0; i < b.N; i++ {
- src := newBenchmarkWriteTableCopyFromSrc(n)
-
- tx, err := conn.Begin()
- if err != nil {
- b.Fatal(err)
- }
-
- for src.Next() {
- values, _ := src.Values()
- if _, err = tx.Exec("insert_t", values...); err != nil {
- b.Fatalf("Exec unexpectedly failed with: %v", err)
- }
- }
-
- err = tx.Commit()
- if err != nil {
- b.Fatal(err)
- }
- }
-}
-
-// note this function is only used for benchmarks -- it doesn't escape tableName
-// or columnNames
-func multiInsert(conn *pgx.Conn, tableName string, columnNames []string, rowSrc pgx.CopyFromSource) (int, error) {
- maxRowsPerInsert := 65535 / len(columnNames)
- rowsThisInsert := 0
- rowCount := 0
-
- sqlBuf := &bytes.Buffer{}
- args := make(pgx.QueryArgs, 0)
-
- resetQuery := func() {
- sqlBuf.Reset()
- fmt.Fprintf(sqlBuf, "insert into %s(%s) values", tableName, strings.Join(columnNames, ", "))
-
- args = args[0:0]
-
- rowsThisInsert = 0
- }
- resetQuery()
-
- tx, err := conn.Begin()
- if err != nil {
- return 0, err
- }
- defer tx.Rollback()
-
- for rowSrc.Next() {
- if rowsThisInsert > 0 {
- sqlBuf.WriteByte(',')
- }
-
- sqlBuf.WriteByte('(')
-
- values, err := rowSrc.Values()
- if err != nil {
- return 0, err
- }
-
- for i, val := range values {
- if i > 0 {
- sqlBuf.WriteByte(',')
- }
- sqlBuf.WriteString(args.Append(val))
- }
-
- sqlBuf.WriteByte(')')
-
- rowsThisInsert++
-
- if rowsThisInsert == maxRowsPerInsert {
- _, err := tx.Exec(sqlBuf.String(), args...)
- if err != nil {
- return 0, err
- }
-
- rowCount += rowsThisInsert
- resetQuery()
- }
- }
-
- if rowsThisInsert > 0 {
- _, err := tx.Exec(sqlBuf.String(), args...)
- if err != nil {
- return 0, err
- }
-
- rowCount += rowsThisInsert
- }
-
- if err := tx.Commit(); err != nil {
- return 0, nil
- }
-
- return rowCount, nil
-
-}
-
-func benchmarkWriteNRowsViaMultiInsert(b *testing.B, n int) {
- conn := mustConnect(b, *defaultConnConfig)
- defer closeConn(b, conn)
-
- mustExec(b, conn, benchmarkWriteTableCreateSQL)
- _, err := conn.Prepare("insert_t", benchmarkWriteTableInsertSQL)
- if err != nil {
- b.Fatal(err)
- }
-
- b.ResetTimer()
-
- for i := 0; i < b.N; i++ {
- src := newBenchmarkWriteTableCopyFromSrc(n)
-
- _, err := multiInsert(conn, "t",
- []string{"varchar_1",
- "varchar_2",
- "varchar_null_1",
- "date_1",
- "date_null_1",
- "int4_1",
- "int4_2",
- "int4_null_1",
- "tstz_1",
- "tstz_2",
- "bool_1",
- "bool_2",
- "bool_3"},
- src)
- if err != nil {
- b.Fatal(err)
- }
- }
-}
-
-func benchmarkWriteNRowsViaCopy(b *testing.B, n int) {
- conn := mustConnect(b, *defaultConnConfig)
- defer closeConn(b, conn)
-
- mustExec(b, conn, benchmarkWriteTableCreateSQL)
-
- b.ResetTimer()
-
- for i := 0; i < b.N; i++ {
- src := newBenchmarkWriteTableCopyFromSrc(n)
-
- _, err := conn.CopyFrom(pgx.Identifier{"t"},
- []string{"varchar_1",
- "varchar_2",
- "varchar_null_1",
- "date_1",
- "date_null_1",
- "int4_1",
- "int4_2",
- "int4_null_1",
- "tstz_1",
- "tstz_2",
- "bool_1",
- "bool_2",
- "bool_3"},
- src)
- if err != nil {
- b.Fatal(err)
- }
- }
-}
-
-func BenchmarkWrite5RowsViaInsert(b *testing.B) {
- benchmarkWriteNRowsViaInsert(b, 5)
-}
-
-func BenchmarkWrite5RowsViaMultiInsert(b *testing.B) {
- benchmarkWriteNRowsViaMultiInsert(b, 5)
-}
-
-func BenchmarkWrite5RowsViaCopy(b *testing.B) {
- benchmarkWriteNRowsViaCopy(b, 5)
-}
-
-func BenchmarkWrite10RowsViaInsert(b *testing.B) {
- benchmarkWriteNRowsViaInsert(b, 10)
-}
-
-func BenchmarkWrite10RowsViaMultiInsert(b *testing.B) {
- benchmarkWriteNRowsViaMultiInsert(b, 10)
-}
-
-func BenchmarkWrite10RowsViaCopy(b *testing.B) {
- benchmarkWriteNRowsViaCopy(b, 10)
-}
-
-func BenchmarkWrite100RowsViaInsert(b *testing.B) {
- benchmarkWriteNRowsViaInsert(b, 100)
-}
-
-func BenchmarkWrite100RowsViaMultiInsert(b *testing.B) {
- benchmarkWriteNRowsViaMultiInsert(b, 100)
-}
-
-func BenchmarkWrite100RowsViaCopy(b *testing.B) {
- benchmarkWriteNRowsViaCopy(b, 100)
-}
-
-func BenchmarkWrite1000RowsViaInsert(b *testing.B) {
- benchmarkWriteNRowsViaInsert(b, 1000)
-}
-
-func BenchmarkWrite1000RowsViaMultiInsert(b *testing.B) {
- benchmarkWriteNRowsViaMultiInsert(b, 1000)
-}
-
-func BenchmarkWrite1000RowsViaCopy(b *testing.B) {
- benchmarkWriteNRowsViaCopy(b, 1000)
-}
-
-func BenchmarkWrite10000RowsViaInsert(b *testing.B) {
- benchmarkWriteNRowsViaInsert(b, 10000)
-}
-
-func BenchmarkWrite10000RowsViaMultiInsert(b *testing.B) {
- benchmarkWriteNRowsViaMultiInsert(b, 10000)
-}
-
-func BenchmarkWrite10000RowsViaCopy(b *testing.B) {
- benchmarkWriteNRowsViaCopy(b, 10000)
-}
diff --git a/vendor/github.com/jackc/pgx/chunkreader/chunkreader.go b/vendor/github.com/jackc/pgx/chunkreader/chunkreader.go
new file mode 100644
index 0000000..f8d437b
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/chunkreader/chunkreader.go
@@ -0,0 +1,89 @@
+package chunkreader
+
+import (
+ "io"
+)
+
+type ChunkReader struct {
+ r io.Reader
+
+ buf []byte
+ rp, wp int // buf read position and write position
+
+ options Options
+}
+
+type Options struct {
+ MinBufLen int // Minimum buffer length
+}
+
+func NewChunkReader(r io.Reader) *ChunkReader {
+ cr, err := NewChunkReaderEx(r, Options{})
+ if err != nil {
+ panic("default options can't be bad")
+ }
+
+ return cr
+}
+
+func NewChunkReaderEx(r io.Reader, options Options) (*ChunkReader, error) {
+ if options.MinBufLen == 0 {
+ options.MinBufLen = 4096
+ }
+
+ return &ChunkReader{
+ r: r,
+ buf: make([]byte, options.MinBufLen),
+ options: options,
+ }, nil
+}
+
+// Next returns buf filled with the next n bytes. If an error occurs, buf will
+// be nil.
+func (r *ChunkReader) Next(n int) (buf []byte, err error) {
+ // n bytes already in buf
+ if (r.wp - r.rp) >= n {
+ buf = r.buf[r.rp : r.rp+n]
+ r.rp += n
+ return buf, err
+ }
+
+ // available space in buf is less than n
+ if len(r.buf) < n {
+ r.copyBufContents(r.newBuf(n))
+ }
+
+ // buf is large enough, but need to shift filled area to start to make enough contiguous space
+ minReadCount := n - (r.wp - r.rp)
+ if (len(r.buf) - r.wp) < minReadCount {
+ newBuf := r.newBuf(n)
+ r.copyBufContents(newBuf)
+ }
+
+ if err := r.appendAtLeast(minReadCount); err != nil {
+ return nil, err
+ }
+
+ buf = r.buf[r.rp : r.rp+n]
+ r.rp += n
+ return buf, nil
+}
+
+func (r *ChunkReader) appendAtLeast(fillLen int) error {
+ n, err := io.ReadAtLeast(r.r, r.buf[r.wp:], fillLen)
+ r.wp += n
+ return err
+}
+
+func (r *ChunkReader) newBuf(size int) []byte {
+ if size < r.options.MinBufLen {
+ size = r.options.MinBufLen
+ }
+ return make([]byte, size)
+}
+
+func (r *ChunkReader) copyBufContents(dest []byte) {
+ r.wp = copy(dest, r.buf[r.rp:r.wp])
+ r.rp = 0
+ r.buf = dest
+}
diff --git a/vendor/github.com/jackc/pgx/conn.go b/vendor/github.com/jackc/pgx/conn.go
index a2d60e7..125d903 100644
--- a/vendor/github.com/jackc/pgx/conn.go
+++ b/vendor/github.com/jackc/pgx/conn.go
@@ -1,12 +1,11 @@
package pgx
import (
- "bufio"
+ "context"
"crypto/md5"
"crypto/tls"
"encoding/binary"
"encoding/hex"
- "errors"
"fmt"
"io"
"net"
@@ -17,9 +16,46 @@ import (
"regexp"
"strconv"
"strings"
+ "sync"
+ "sync/atomic"
"time"
+
+ "github.com/pkg/errors"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/jackc/pgx/pgproto3"
+ "github.com/jackc/pgx/pgtype"
+)
+
+const (
+ connStatusUninitialized = iota
+ connStatusClosed
+ connStatusIdle
+ connStatusBusy
)
+// minimalConnInfo has just enough static type information to establish the
+// connection and retrieve the type data.
+var minimalConnInfo *pgtype.ConnInfo
+
+func init() {
+ minimalConnInfo = pgtype.NewConnInfo()
+ minimalConnInfo.InitializeDataTypes(map[string]pgtype.OID{
+ "int4": pgtype.Int4OID,
+ "name": pgtype.NameOID,
+ "oid": pgtype.OIDOID,
+ "text": pgtype.TextOID,
+ "varchar": pgtype.VarcharOID,
+ })
+}
+
+// NoticeHandler is a function that can handle notices received from the
+// PostgreSQL server. Notices can be received at any time, usually during
+// handling of a query response. The *Conn is provided so the handler is aware
+// of the origin of the notice, but it must not invoke any query method. Be
+// aware that this is distinct from LISTEN/NOTIFY notification.
+type NoticeHandler func(*Conn, *Notice)
+
// DialFunc is a function that can be used to connect to a PostgreSQL server
type DialFunc func(network, addr string) (net.Conn, error)
@@ -36,38 +72,76 @@ type ConnConfig struct {
Logger Logger
LogLevel int
Dial DialFunc
- RuntimeParams map[string]string // Run-time parameters to set on connection as session default values (e.g. search_path or application_name)
+ RuntimeParams map[string]string // Run-time parameters to set on connection as session default values (e.g. search_path or application_name)
+ OnNotice NoticeHandler // Callback function called when a notice response is received.
+ CustomConnInfo func(*Conn) (*pgtype.ConnInfo, error) // Callback function to implement connection strategies for different backends. crate, pgbouncer, pgpool, etc.
+
+ // PreferSimpleProtocol disables implicit prepared statement usage. By default
+ // pgx automatically uses the unnamed prepared statement for Query and
+ // QueryRow. It also uses a prepared statement when Exec has arguments. This
+ // can improve performance due to being able to use the binary format. It also
+ // does not rely on client side parameter sanitization. However, it does incur
+ // two round-trips per query and may be incompatible proxies such as
+ // PGBouncer. Setting PreferSimpleProtocol causes the simple protocol to be
+ // used by default. The same functionality can be controlled on a per query
+ // basis by setting QueryExOptions.SimpleProtocol.
+ PreferSimpleProtocol bool
+}
+
+func (cc *ConnConfig) networkAddress() (network, address string) {
+ network = "tcp"
+ address = fmt.Sprintf("%s:%d", cc.Host, cc.Port)
+ // See if host is a valid path, if yes connect with a socket
+ if _, err := os.Stat(cc.Host); err == nil {
+ // For backward compatibility accept socket file paths -- but directories are now preferred
+ network = "unix"
+ address = cc.Host
+ if !strings.Contains(address, "/.s.PGSQL.") {
+ address = filepath.Join(address, ".s.PGSQL.") + strconv.FormatInt(int64(cc.Port), 10)
+ }
+ }
+
+ return network, address
}
// Conn is a PostgreSQL connection handle. It is not safe for concurrent usage.
// Use ConnPool to manage access to multiple database connections from multiple
// goroutines.
type Conn struct {
- conn net.Conn // the underlying TCP or unix domain socket connection
- lastActivityTime time.Time // the last time the connection was used
- reader *bufio.Reader // buffered reader to improve read performance
- wbuf [1024]byte
- writeBuf WriteBuf
- Pid int32 // backend pid
- SecretKey int32 // key to use to send a cancel query message to the server
+ conn net.Conn // the underlying TCP or unix domain socket connection
+ lastActivityTime time.Time // the last time the connection was used
+ wbuf []byte
+ pid uint32 // backend pid
+ secretKey uint32 // key to use to send a cancel query message to the server
RuntimeParams map[string]string // parameters that have been reported by the server
- PgTypes map[Oid]PgType // oids to PgTypes
config ConnConfig // config used when establishing this connection
- TxStatus byte
+ txStatus byte
preparedStatements map[string]*PreparedStatement
channels map[string]struct{}
notifications []*Notification
- alive bool
- causeOfDeath error
logger Logger
logLevel int
- mr msgReader
fp *fastpath
- pgsqlAfInet *byte
- pgsqlAfInet6 *byte
- busy bool
poolResetCount int
preallocatedRows []Rows
+ onNotice NoticeHandler
+
+ mux sync.Mutex
+ status byte // One of connStatus* constants
+ causeOfDeath error
+
+ pendingReadyForQueryCount int // numer of ReadyForQuery messages expected
+ cancelQueryInProgress int32
+ cancelQueryCompleted chan struct{}
+
+ // context support
+ ctxInProgress bool
+ doneChan chan struct{}
+ closedChan chan error
+
+ ConnInfo *pgtype.ConnInfo
+
+ frontend *pgproto3.Frontend
}
// PreparedStatement is a description of a prepared statement
@@ -75,27 +149,21 @@ type PreparedStatement struct {
Name string
SQL string
FieldDescriptions []FieldDescription
- ParameterOids []Oid
+ ParameterOIDs []pgtype.OID
}
// PrepareExOptions is an option struct that can be passed to PrepareEx
type PrepareExOptions struct {
- ParameterOids []Oid
+ ParameterOIDs []pgtype.OID
}
// Notification is a message received from the PostgreSQL LISTEN/NOTIFY system
type Notification struct {
- Pid int32 // backend pid that sent the notification
+ PID uint32 // backend pid that sent the notification
Channel string // channel from which notification was received
Payload string
}
-// PgType is information about PostgreSQL type and how to encode and decode it
-type PgType struct {
- Name string // name of type e.g. int4, text, date
- DefaultFormat int16 // default format (text or binary) this type will be requested in
-}
-
// CommandTag is the result of an Exec function
type CommandTag string
@@ -127,9 +195,6 @@ func (ident Identifier) Sanitize() string {
// ErrNoRows occurs when rows are expected but none are returned.
var ErrNoRows = errors.New("no rows in result set")
-// ErrNotificationTimeout occurs when WaitForNotification times out.
-var ErrNotificationTimeout = errors.New("notification timeout")
-
// ErrDeadConn occurs on an attempt to use a dead connection
var ErrDeadConn = errors.New("conn is dead")
@@ -138,7 +203,7 @@ var ErrDeadConn = errors.New("conn is dead")
var ErrTLSRefused = errors.New("server refused TLS connection")
// ErrConnBusy occurs when the connection is busy (for example, in the middle of
-// reading query results) and another action is attempts.
+// reading query results) and another action is attempted.
var ErrConnBusy = errors.New("conn is busy")
// ErrInvalidLogLevel occurs on attempt to set an invalid log level.
@@ -155,29 +220,18 @@ func (e ProtocolError) Error() string {
// config.Host must be specified. config.User will default to the OS user name.
// Other config fields are optional.
func Connect(config ConnConfig) (c *Conn, err error) {
- return connect(config, nil, nil, nil)
+ return connect(config, minimalConnInfo)
}
-func connect(config ConnConfig, pgTypes map[Oid]PgType, pgsqlAfInet *byte, pgsqlAfInet6 *byte) (c *Conn, err error) {
+func defaultDialer() *net.Dialer {
+ return &net.Dialer{KeepAlive: 5 * time.Minute}
+}
+
+func connect(config ConnConfig, connInfo *pgtype.ConnInfo) (c *Conn, err error) {
c = new(Conn)
c.config = config
-
- if pgTypes != nil {
- c.PgTypes = make(map[Oid]PgType, len(pgTypes))
- for k, v := range pgTypes {
- c.PgTypes[k] = v
- }
- }
-
- if pgsqlAfInet != nil {
- c.pgsqlAfInet = new(byte)
- *c.pgsqlAfInet = *pgsqlAfInet
- }
- if pgsqlAfInet6 != nil {
- c.pgsqlAfInet6 = new(byte)
- *c.pgsqlAfInet6 = *pgsqlAfInet6
- }
+ c.ConnInfo = connInfo
if c.config.LogLevel != 0 {
c.logLevel = c.config.LogLevel
@@ -186,8 +240,6 @@ func connect(config ConnConfig, pgTypes map[Oid]PgType, pgsqlAfInet *byte, pgsql
c.logLevel = LogLevelDebug
}
c.logger = c.config.Logger
- c.mr.log = c.log
- c.mr.shouldLog = c.shouldLog
if c.config.User == "" {
user, err := user.Current()
@@ -196,46 +248,39 @@ func connect(config ConnConfig, pgTypes map[Oid]PgType, pgsqlAfInet *byte, pgsql
}
c.config.User = user.Username
if c.shouldLog(LogLevelDebug) {
- c.log(LogLevelDebug, "Using default connection config", "User", c.config.User)
+ c.log(LogLevelDebug, "Using default connection config", map[string]interface{}{"User": c.config.User})
}
}
if c.config.Port == 0 {
c.config.Port = 5432
if c.shouldLog(LogLevelDebug) {
- c.log(LogLevelDebug, "Using default connection config", "Port", c.config.Port)
+ c.log(LogLevelDebug, "Using default connection config", map[string]interface{}{"Port": c.config.Port})
}
}
- network := "tcp"
- address := fmt.Sprintf("%s:%d", c.config.Host, c.config.Port)
- // See if host is a valid path, if yes connect with a socket
- if _, err := os.Stat(c.config.Host); err == nil {
- // For backward compatibility accept socket file paths -- but directories are now preferred
- network = "unix"
- address = c.config.Host
- if !strings.Contains(address, "/.s.PGSQL.") {
- address = filepath.Join(address, ".s.PGSQL.") + strconv.FormatInt(int64(c.config.Port), 10)
- }
- }
+ c.onNotice = config.OnNotice
+
+ network, address := c.config.networkAddress()
if c.config.Dial == nil {
- c.config.Dial = (&net.Dialer{KeepAlive: 5 * time.Minute}).Dial
+ d := defaultDialer()
+ c.config.Dial = d.Dial
}
if c.shouldLog(LogLevelInfo) {
- c.log(LogLevelInfo, fmt.Sprintf("Dialing PostgreSQL server at %s address: %s", network, address))
+ c.log(LogLevelInfo, "Dialing PostgreSQL server", map[string]interface{}{"network": network, "address": address})
}
err = c.connect(config, network, address, config.TLSConfig)
if err != nil && config.UseFallbackTLS {
if c.shouldLog(LogLevelInfo) {
- c.log(LogLevelInfo, fmt.Sprintf("Connect with TLSConfig failed, trying FallbackTLSConfig: %v", err))
+ c.log(LogLevelInfo, "connect with TLSConfig failed, trying FallbackTLSConfig", map[string]interface{}{"err": err})
}
err = c.connect(config, network, address, config.FallbackTLSConfig)
}
if err != nil {
if c.shouldLog(LogLevelError) {
- c.log(LogLevelError, fmt.Sprintf("Connect failed: %v", err))
+ c.log(LogLevelError, "connect failed", map[string]interface{}{"err": err})
}
return nil, err
}
@@ -251,88 +296,95 @@ func (c *Conn) connect(config ConnConfig, network, address string, tlsConfig *tl
defer func() {
if c != nil && err != nil {
c.conn.Close()
- c.alive = false
+ c.mux.Lock()
+ c.status = connStatusClosed
+ c.mux.Unlock()
}
}()
c.RuntimeParams = make(map[string]string)
c.preparedStatements = make(map[string]*PreparedStatement)
c.channels = make(map[string]struct{})
- c.alive = true
c.lastActivityTime = time.Now()
+ c.cancelQueryCompleted = make(chan struct{}, 1)
+ c.doneChan = make(chan struct{})
+ c.closedChan = make(chan error)
+ c.wbuf = make([]byte, 0, 1024)
+
+ c.mux.Lock()
+ c.status = connStatusIdle
+ c.mux.Unlock()
if tlsConfig != nil {
if c.shouldLog(LogLevelDebug) {
- c.log(LogLevelDebug, "Starting TLS handshake")
+ c.log(LogLevelDebug, "starting TLS handshake", nil)
}
if err := c.startTLS(tlsConfig); err != nil {
return err
}
}
- c.reader = bufio.NewReader(c.conn)
- c.mr.reader = c.reader
+ c.frontend, err = pgproto3.NewFrontend(c.conn, c.conn)
+ if err != nil {
+ return err
+ }
- msg := newStartupMessage()
+ startupMsg := pgproto3.StartupMessage{
+ ProtocolVersion: pgproto3.ProtocolVersionNumber,
+ Parameters: make(map[string]string),
+ }
// Default to disabling TLS renegotiation.
//
// Go does not support (https://github.com/golang/go/issues/5742)
// PostgreSQL recommends disabling (http://www.postgresql.org/docs/9.4/static/runtime-config-connection.html#GUC-SSL-RENEGOTIATION-LIMIT)
if tlsConfig != nil {
- msg.options["ssl_renegotiation_limit"] = "0"
+ startupMsg.Parameters["ssl_renegotiation_limit"] = "0"
}
// Copy default run-time params
for k, v := range config.RuntimeParams {
- msg.options[k] = v
+ startupMsg.Parameters[k] = v
}
- msg.options["user"] = c.config.User
+ startupMsg.Parameters["user"] = c.config.User
if c.config.Database != "" {
- msg.options["database"] = c.config.Database
+ startupMsg.Parameters["database"] = c.config.Database
}
- if err = c.txStartupMessage(msg); err != nil {
+ if _, err := c.conn.Write(startupMsg.Encode(nil)); err != nil {
return err
}
+ c.pendingReadyForQueryCount = 1
+
for {
- var t byte
- var r *msgReader
- t, r, err = c.rxMsg()
+ msg, err := c.rxMsg()
if err != nil {
return err
}
- switch t {
- case backendKeyData:
- c.rxBackendKeyData(r)
- case authenticationX:
- if err = c.rxAuthenticationX(r); err != nil {
+ switch msg := msg.(type) {
+ case *pgproto3.BackendKeyData:
+ c.rxBackendKeyData(msg)
+ case *pgproto3.Authentication:
+ if err = c.rxAuthenticationX(msg); err != nil {
return err
}
- case readyForQuery:
- c.rxReadyForQuery(r)
+ case *pgproto3.ReadyForQuery:
+ c.rxReadyForQuery(msg)
if c.shouldLog(LogLevelInfo) {
- c.log(LogLevelInfo, "Connection established")
+ c.log(LogLevelInfo, "connection established", nil)
}
// Replication connections can't execute the queries to
// populate the c.PgTypes and c.pgsqlAfInet
- if _, ok := msg.options["replication"]; ok {
+ if _, ok := config.RuntimeParams["replication"]; ok {
return nil
}
- if c.PgTypes == nil {
- err = c.loadPgTypes()
- if err != nil {
- return err
- }
- }
-
- if c.pgsqlAfInet == nil || c.pgsqlAfInet6 == nil {
- err = c.loadInetConstants()
+ if c.ConnInfo == minimalConnInfo {
+ err = c.initConnInfo()
if err != nil {
return err
}
@@ -340,77 +392,276 @@ func (c *Conn) connect(config ConnConfig, network, address string, tlsConfig *tl
return nil
default:
- if err = c.processContextFreeMsg(t, r); err != nil {
+ if err = c.processContextFreeMsg(msg); err != nil {
return err
}
}
}
}
-func (c *Conn) loadPgTypes() error {
- rows, err := c.Query(`select t.oid, t.typname
+func initPostgresql(c *Conn) (*pgtype.ConnInfo, error) {
+ const (
+ namedOIDQuery = `select t.oid,
+ case when nsp.nspname in ('pg_catalog', 'public') then t.typname
+ else nsp.nspname||'.'||t.typname
+ end
from pg_type t
left join pg_type base_type on t.typelem=base_type.oid
+left join pg_namespace nsp on t.typnamespace=nsp.oid
where (
- t.typtype='b'
- and (base_type.oid is null or base_type.typtype='b')
+ t.typtype in('b', 'p', 'r', 'e')
+ and (base_type.oid is null or base_type.typtype in('b', 'p', 'r'))
+ )`
)
- or t.typname in('record');`)
+
+ nameOIDs, err := connInfoFromRows(c.Query(namedOIDQuery))
if err != nil {
- return err
+ return nil, err
}
- c.PgTypes = make(map[Oid]PgType, 128)
+ cinfo := pgtype.NewConnInfo()
+ cinfo.InitializeDataTypes(nameOIDs)
- for rows.Next() {
- var oid Oid
- var t PgType
+ if err = c.initConnInfoEnumArray(cinfo); err != nil {
+ return nil, err
+ }
+
+ return cinfo, nil
+}
+
+func (c *Conn) initConnInfo() (err error) {
+ var (
+ connInfo *pgtype.ConnInfo
+ )
+
+ if c.config.CustomConnInfo != nil {
+ if c.ConnInfo, err = c.config.CustomConnInfo(c); err != nil {
+ return err
+ }
- rows.Scan(&oid, &t.Name)
+ return nil
+ }
- // The zero value is text format so we ignore any types without a default type format
- t.DefaultFormat, _ = DefaultTypeFormats[t.Name]
+ if connInfo, err = initPostgresql(c); err == nil {
+ c.ConnInfo = connInfo
+ return err
+ }
- c.PgTypes[oid] = t
+ // Check if CrateDB specific approach might still allow us to connect.
+ if connInfo, err = c.crateDBTypesQuery(err); err == nil {
+ c.ConnInfo = connInfo
}
- return rows.Err()
+ return err
}
-// Family is needed for binary encoding of inet/cidr. The constant is based on
-// the server's definition of AF_INET. In theory, this could differ between
-// platforms, so request an IPv4 and an IPv6 inet and get the family from that.
-func (c *Conn) loadInetConstants() error {
- var ipv4, ipv6 []byte
-
- err := c.QueryRow("select '127.0.0.1'::inet, '1::'::inet").Scan(&ipv4, &ipv6)
+// initConnInfoEnumArray introspects for arrays of enums and registers a data type for them.
+func (c *Conn) initConnInfoEnumArray(cinfo *pgtype.ConnInfo) error {
+ nameOIDs := make(map[string]pgtype.OID, 16)
+ rows, err := c.Query(`select t.oid, t.typname
+from pg_type t
+ join pg_type base_type on t.typelem=base_type.oid
+where t.typtype = 'b'
+ and base_type.typtype = 'e'`)
if err != nil {
return err
}
- c.pgsqlAfInet = &ipv4[0]
- c.pgsqlAfInet6 = &ipv6[0]
+ for rows.Next() {
+ var oid pgtype.OID
+ var name pgtype.Text
+ if err := rows.Scan(&oid, &name); err != nil {
+ return err
+ }
+
+ nameOIDs[name.String] = oid
+ }
+
+ if rows.Err() != nil {
+ return rows.Err()
+ }
+
+ for name, oid := range nameOIDs {
+ cinfo.RegisterDataType(pgtype.DataType{
+ Value: &pgtype.EnumArray{},
+ Name: name,
+ OID: oid,
+ })
+ }
return nil
}
+// crateDBTypesQuery checks if the given err is likely to be the result of
+// CrateDB not implementing the pg_types table correctly. If yes, a CrateDB
+// specific query against pg_types is executed and its results are returned. If
+// not, the original error is returned.
+func (c *Conn) crateDBTypesQuery(err error) (*pgtype.ConnInfo, error) {
+ // CrateDB 2.1.6 is a database that implements the PostgreSQL wire protocol,
+ // but not perfectly. In particular, the pg_catalog schema containing the
+ // pg_type table is not visible by default and the pg_type.typtype column is
+ // not implemented. Therefor the query above currently returns the following
+ // error:
+ //
+ // pgx.PgError{Severity:"ERROR", Code:"XX000",
+ // Message:"TableUnknownException: Table 'test.pg_type' unknown",
+ // Detail:"", Hint:"", Position:0, InternalPosition:0, InternalQuery:"",
+ // Where:"", SchemaName:"", TableName:"", ColumnName:"", DataTypeName:"",
+ // ConstraintName:"", File:"Schemas.java", Line:99, Routine:"getTableInfo"}
+ //
+ // If CrateDB was to fix the pg_type table visbility in the future, we'd
+ // still get this error until typtype column is implemented:
+ //
+ // pgx.PgError{Severity:"ERROR", Code:"XX000",
+ // Message:"ColumnUnknownException: Column typtype unknown", Detail:"",
+ // Hint:"", Position:0, InternalPosition:0, InternalQuery:"", Where:"",
+ // SchemaName:"", TableName:"", ColumnName:"", DataTypeName:"",
+ // ConstraintName:"", File:"FullQualifiedNameFieldProvider.java", Line:132,
+ //
+ // Additionally CrateDB doesn't implement Postgres error codes [2], and
+ // instead always returns "XX000" (internal_error). The code below uses all
+ // of this knowledge as a heuristic to detect CrateDB. If CrateDB is
+ // detected, a CrateDB specific pg_type query is executed instead.
+ //
+ // The heuristic is designed to still work even if CrateDB fixes [2] or
+ // renames its internal exception names. If both are changed but pg_types
+ // isn't fixed, this code will need to be changed.
+ //
+ // There is also a small chance the heuristic will yield a false positive for
+ // non-CrateDB databases (e.g. if a real Postgres instance returns a XX000
+ // error), but hopefully there will be no harm in attempting the alternative
+ // query in this case.
+ //
+ // CrateDB also uses the type varchar for the typname column which required
+ // adding varchar to the minimalConnInfo init code.
+ //
+ // Also see the discussion here [3].
+ //
+ // [1] https://crate.io/
+ // [2] https://github.com/crate/crate/issues/5027
+ // [3] https://github.com/jackc/pgx/issues/320
+
+ if pgErr, ok := err.(PgError); ok &&
+ (pgErr.Code == "XX000" ||
+ strings.Contains(pgErr.Message, "TableUnknownException") ||
+ strings.Contains(pgErr.Message, "ColumnUnknownException")) {
+ var (
+ nameOIDs map[string]pgtype.OID
+ )
+
+ if nameOIDs, err = connInfoFromRows(c.Query(`select oid, typname from pg_catalog.pg_type`)); err != nil {
+ return nil, err
+ }
+
+ cinfo := pgtype.NewConnInfo()
+ cinfo.InitializeDataTypes(nameOIDs)
+
+ return cinfo, err
+ }
+
+ return nil, err
+}
+
+// PID returns the backend PID for this connection.
+func (c *Conn) PID() uint32 {
+ return c.pid
+}
+
// Close closes a connection. It is safe to call Close on a already closed
// connection.
func (c *Conn) Close() (err error) {
- if !c.IsAlive() {
+ c.mux.Lock()
+ defer c.mux.Unlock()
+
+ if c.status < connStatusIdle {
return nil
}
+ c.status = connStatusClosed
- wbuf := newWriteBuf(c, 'X')
- wbuf.closeMsg()
+ defer func() {
+ c.conn.Close()
+ c.causeOfDeath = errors.New("Closed")
+ if c.shouldLog(LogLevelInfo) {
+ c.log(LogLevelInfo, "closed connection", nil)
+ }
+ }()
- _, err = c.conn.Write(wbuf.buf)
+ err = c.conn.SetDeadline(time.Time{})
+ if err != nil && c.shouldLog(LogLevelWarn) {
+ c.log(LogLevelWarn, "failed to clear deadlines to send close message", map[string]interface{}{"err": err})
+ return err
+ }
- c.die(errors.New("Closed"))
- if c.shouldLog(LogLevelInfo) {
- c.log(LogLevelInfo, "Closed connection")
+ _, err = c.conn.Write([]byte{'X', 0, 0, 0, 4})
+ if err != nil && c.shouldLog(LogLevelWarn) {
+ c.log(LogLevelWarn, "failed to send terminate message", map[string]interface{}{"err": err})
+ return err
}
- return err
+
+ err = c.conn.SetReadDeadline(time.Now().Add(5 * time.Second))
+ if err != nil && c.shouldLog(LogLevelWarn) {
+ c.log(LogLevelWarn, "failed to set read deadline to finish closing", map[string]interface{}{"err": err})
+ return err
+ }
+
+ _, err = c.conn.Read(make([]byte, 1))
+ if err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// Merge returns a new ConnConfig with the attributes of old and other
+// combined. When an attribute is set on both, other takes precedence.
+//
+// As a security precaution, if the other TLSConfig is nil, all old TLS
+// attributes will be preserved.
+func (old ConnConfig) Merge(other ConnConfig) ConnConfig {
+ cc := old
+
+ if other.Host != "" {
+ cc.Host = other.Host
+ }
+ if other.Port != 0 {
+ cc.Port = other.Port
+ }
+ if other.Database != "" {
+ cc.Database = other.Database
+ }
+ if other.User != "" {
+ cc.User = other.User
+ }
+ if other.Password != "" {
+ cc.Password = other.Password
+ }
+
+ if other.TLSConfig != nil {
+ cc.TLSConfig = other.TLSConfig
+ cc.UseFallbackTLS = other.UseFallbackTLS
+ cc.FallbackTLSConfig = other.FallbackTLSConfig
+ }
+
+ if other.Logger != nil {
+ cc.Logger = other.Logger
+ }
+ if other.LogLevel != 0 {
+ cc.LogLevel = other.LogLevel
+ }
+
+ if other.Dial != nil {
+ cc.Dial = other.Dial
+ }
+
+ cc.RuntimeParams = make(map[string]string)
+ for k, v := range old.RuntimeParams {
+ cc.RuntimeParams[k] = v
+ }
+ for k, v := range other.RuntimeParams {
+ cc.RuntimeParams[k] = v
+ }
+
+ return cc
}
// ParseURI parses a database URI into ConnConfig
@@ -440,13 +691,24 @@ func ParseURI(uri string) (ConnConfig, error) {
}
cp.Database = strings.TrimLeft(url.Path, "/")
+ if pgtimeout := url.Query().Get("connect_timeout"); pgtimeout != "" {
+ timeout, err := strconv.ParseInt(pgtimeout, 10, 64)
+ if err != nil {
+ return cp, err
+ }
+ d := defaultDialer()
+ d.Timeout = time.Duration(timeout) * time.Second
+ cp.Dial = d.Dial
+ }
+
err = configSSL(url.Query().Get("sslmode"), &cp)
if err != nil {
return cp, err
}
ignoreKeys := map[string]struct{}{
- "sslmode": {},
+ "sslmode": {},
+ "connect_timeout": {},
}
cp.RuntimeParams = make(map[string]string)
@@ -504,6 +766,14 @@ func ParseDSN(s string) (ConnConfig, error) {
cp.Database = b[2]
case "sslmode":
sslmode = b[2]
+ case "connect_timeout":
+ timeout, err := strconv.ParseInt(b[2], 10, 64)
+ if err != nil {
+ return cp, err
+ }
+ d := defaultDialer()
+ d.Timeout = time.Duration(timeout) * time.Second
+ cp.Dial = d.Dial
default:
cp.RuntimeParams[b[1]] = b[2]
}
@@ -541,6 +811,7 @@ func ParseConnectionString(s string) (ConnConfig, error) {
// PGPASSWORD
// PGSSLMODE
// PGAPPNAME
+// PGCONNECT_TIMEOUT
//
// Important TLS Security Notes:
// ParseEnvLibpq tries to match libpq behavior with regard to PGSSLMODE. This
@@ -549,10 +820,10 @@ func ParseConnectionString(s string) (ConnConfig, error) {
// See http://www.postgresql.org/docs/9.4/static/libpq-ssl.html#LIBPQ-SSL-PROTECTION
// for details on what level of security each sslmode provides.
//
-// "require" and "verify-ca" modes currently are treated as "verify-full". e.g.
-// They have stronger security guarantees than they would with libpq. Do not
-// rely on this behavior as it may be possible to match libpq in the future. If
-// you need full security use "verify-full".
+// "verify-ca" mode currently is treated as "verify-full". e.g. It has stronger
+// security guarantees than it would with libpq. Do not rely on this behavior as it
+// may be possible to match libpq in the future. If you need full security use
+// "verify-full".
//
// Several of the PGSSLMODE options (including the default behavior of "prefer")
// will set UseFallbackTLS to true and FallbackTLSConfig to a disabled or
@@ -576,6 +847,16 @@ func ParseEnvLibpq() (ConnConfig, error) {
cc.User = os.Getenv("PGUSER")
cc.Password = os.Getenv("PGPASSWORD")
+ if pgtimeout := os.Getenv("PGCONNECT_TIMEOUT"); pgtimeout != "" {
+ if timeout, err := strconv.ParseInt(pgtimeout, 10, 64); err == nil {
+ d := defaultDialer()
+ d.Timeout = time.Duration(timeout) * time.Second
+ cc.Dial = d.Dial
+ } else {
+ return cc, err
+ }
+ }
+
sslmode := os.Getenv("PGSSLMODE")
err := configSSL(sslmode, &cc)
@@ -608,7 +889,9 @@ func configSSL(sslmode string, cc *ConnConfig) error {
cc.TLSConfig = &tls.Config{InsecureSkipVerify: true}
cc.UseFallbackTLS = true
cc.FallbackTLSConfig = nil
- case "require", "verify-ca", "verify-full":
+ case "require":
+ cc.TLSConfig = &tls.Config{InsecureSkipVerify: true}
+ case "verify-ca", "verify-full":
cc.TLSConfig = &tls.Config{
ServerName: cc.Host,
}
@@ -626,7 +909,7 @@ func configSSL(sslmode string, cc *ConnConfig) error {
// name and sql arguments. This allows a code path to Prepare and Query/Exec without
// concern for if the statement has already been prepared.
func (c *Conn) Prepare(name, sql string) (ps *PreparedStatement, err error) {
- return c.PrepareEx(name, sql, nil)
+ return c.PrepareEx(context.Background(), name, sql, nil)
}
// PrepareEx creates a prepared statement with name and sql. sql can contain placeholders
@@ -636,83 +919,95 @@ func (c *Conn) Prepare(name, sql string) (ps *PreparedStatement, err error) {
// PrepareEx is idempotent; i.e. it is safe to call PrepareEx multiple times with the same
// name and sql arguments. This allows a code path to PrepareEx and Query/Exec without
// concern for if the statement has already been prepared.
-func (c *Conn) PrepareEx(name, sql string, opts *PrepareExOptions) (ps *PreparedStatement, err error) {
+func (c *Conn) PrepareEx(ctx context.Context, name, sql string, opts *PrepareExOptions) (ps *PreparedStatement, err error) {
+ err = c.waitForPreviousCancelQuery(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ err = c.initContext(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ ps, err = c.prepareEx(name, sql, opts)
+ err = c.termContext(err)
+ return ps, err
+}
+
+func (c *Conn) prepareEx(name, sql string, opts *PrepareExOptions) (ps *PreparedStatement, err error) {
if name != "" {
if ps, ok := c.preparedStatements[name]; ok && ps.SQL == sql {
return ps, nil
}
}
+ if err := c.ensureConnectionReadyForQuery(); err != nil {
+ return nil, err
+ }
+
if c.shouldLog(LogLevelError) {
defer func() {
if err != nil {
- c.log(LogLevelError, fmt.Sprintf("Prepare `%s` as `%s` failed: %v", name, sql, err))
+ c.log(LogLevelError, "prepareEx failed", map[string]interface{}{"err": err, "name": name, "sql": sql})
}
}()
}
- // parse
- wbuf := newWriteBuf(c, 'P')
- wbuf.WriteCString(name)
- wbuf.WriteCString(sql)
-
- if opts != nil {
- if len(opts.ParameterOids) > 65535 {
- return nil, fmt.Errorf("Number of PrepareExOptions ParameterOids must be between 0 and 65535, received %d", len(opts.ParameterOids))
- }
- wbuf.WriteInt16(int16(len(opts.ParameterOids)))
- for _, oid := range opts.ParameterOids {
- wbuf.WriteInt32(int32(oid))
- }
- } else {
- wbuf.WriteInt16(0)
+ if opts == nil {
+ opts = &PrepareExOptions{}
}
- // describe
- wbuf.startMsg('D')
- wbuf.WriteByte('S')
- wbuf.WriteCString(name)
+ if len(opts.ParameterOIDs) > 65535 {
+ return nil, errors.Errorf("Number of PrepareExOptions ParameterOIDs must be between 0 and 65535, received %d", len(opts.ParameterOIDs))
+ }
- // sync
- wbuf.startMsg('S')
- wbuf.closeMsg()
+ buf := appendParse(c.wbuf, name, sql, opts.ParameterOIDs)
+ buf = appendDescribe(buf, 'S', name)
+ buf = appendSync(buf)
- _, err = c.conn.Write(wbuf.buf)
+ n, err := c.conn.Write(buf)
if err != nil {
- c.die(err)
+ if fatalWriteErr(n, err) {
+ c.die(err)
+ }
return nil, err
}
+ c.pendingReadyForQueryCount++
ps = &PreparedStatement{Name: name, SQL: sql}
var softErr error
for {
- var t byte
- var r *msgReader
- t, r, err := c.rxMsg()
+ msg, err := c.rxMsg()
if err != nil {
return nil, err
}
- switch t {
- case parseComplete:
- case parameterDescription:
- ps.ParameterOids = c.rxParameterDescription(r)
+ switch msg := msg.(type) {
+ case *pgproto3.ParameterDescription:
+ ps.ParameterOIDs = c.rxParameterDescription(msg)
- if len(ps.ParameterOids) > 65535 && softErr == nil {
- softErr = fmt.Errorf("PostgreSQL supports maximum of 65535 parameters, received %d", len(ps.ParameterOids))
+ if len(ps.ParameterOIDs) > 65535 && softErr == nil {
+ softErr = errors.Errorf("PostgreSQL supports maximum of 65535 parameters, received %d", len(ps.ParameterOIDs))
}
- case rowDescription:
- ps.FieldDescriptions = c.rxRowDescription(r)
+ case *pgproto3.RowDescription:
+ ps.FieldDescriptions = c.rxRowDescription(msg)
for i := range ps.FieldDescriptions {
- t, _ := c.PgTypes[ps.FieldDescriptions[i].DataType]
- ps.FieldDescriptions[i].DataTypeName = t.Name
- ps.FieldDescriptions[i].FormatCode = t.DefaultFormat
+ if dt, ok := c.ConnInfo.DataTypeForOID(ps.FieldDescriptions[i].DataType); ok {
+ ps.FieldDescriptions[i].DataTypeName = dt.Name
+ if _, ok := dt.Value.(pgtype.BinaryDecoder); ok {
+ ps.FieldDescriptions[i].FormatCode = BinaryFormatCode
+ } else {
+ ps.FieldDescriptions[i].FormatCode = TextFormatCode
+ }
+ } else {
+ return nil, errors.Errorf("unknown oid: %d", ps.FieldDescriptions[i].DataType)
+ }
}
- case noData:
- case readyForQuery:
- c.rxReadyForQuery(r)
+ case *pgproto3.ReadyForQuery:
+ c.rxReadyForQuery(msg)
if softErr == nil {
c.preparedStatements[name] = ps
@@ -720,7 +1015,7 @@ func (c *Conn) PrepareEx(name, sql string, opts *PrepareExOptions) (ps *Prepared
return ps, softErr
default:
- if e := c.processContextFreeMsg(t, r); e != nil && softErr == nil {
+ if e := c.processContextFreeMsg(msg); e != nil && softErr == nil {
softErr = e
}
}
@@ -728,37 +1023,62 @@ func (c *Conn) PrepareEx(name, sql string, opts *PrepareExOptions) (ps *Prepared
}
// Deallocate released a prepared statement
-func (c *Conn) Deallocate(name string) (err error) {
+func (c *Conn) Deallocate(name string) error {
+ return c.deallocateContext(context.Background(), name)
+}
+
+// TODO - consider making this public
+func (c *Conn) deallocateContext(ctx context.Context, name string) (err error) {
+ err = c.waitForPreviousCancelQuery(ctx)
+ if err != nil {
+ return err
+ }
+
+ err = c.initContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ err = c.termContext(err)
+ }()
+
+ if err := c.ensureConnectionReadyForQuery(); err != nil {
+ return err
+ }
+
delete(c.preparedStatements, name)
// close
- wbuf := newWriteBuf(c, 'C')
- wbuf.WriteByte('S')
- wbuf.WriteCString(name)
+ buf := c.wbuf
+ buf = append(buf, 'C')
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+ buf = append(buf, 'S')
+ buf = append(buf, name...)
+ buf = append(buf, 0)
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])))
// flush
- wbuf.startMsg('H')
- wbuf.closeMsg()
+ buf = append(buf, 'H')
+ buf = pgio.AppendInt32(buf, 4)
- _, err = c.conn.Write(wbuf.buf)
+ _, err = c.conn.Write(buf)
if err != nil {
c.die(err)
return err
}
for {
- var t byte
- var r *msgReader
- t, r, err := c.rxMsg()
+ msg, err := c.rxMsg()
if err != nil {
return err
}
- switch t {
- case closeComplete:
+ switch msg.(type) {
+ case *pgproto3.CloseComplete:
return nil
default:
- err = c.processContextFreeMsg(t, r)
+ err = c.processContextFreeMsg(msg)
if err != nil {
return err
}
@@ -789,9 +1109,8 @@ func (c *Conn) Unlisten(channel string) error {
return nil
}
-// WaitForNotification waits for a PostgreSQL notification for up to timeout.
-// If the timeout occurs it returns pgx.ErrNotificationTimeout
-func (c *Conn) WaitForNotification(timeout time.Duration) (*Notification, error) {
+// WaitForNotification waits for a PostgreSQL notification.
+func (c *Conn) WaitForNotification(ctx context.Context) (notification *Notification, err error) {
// Return already received notification immediately
if len(c.notifications) > 0 {
notification := c.notifications[0]
@@ -799,86 +1118,40 @@ func (c *Conn) WaitForNotification(timeout time.Duration) (*Notification, error)
return notification, nil
}
- stopTime := time.Now().Add(timeout)
-
- for {
- now := time.Now()
-
- if now.After(stopTime) {
- return nil, ErrNotificationTimeout
- }
-
- // If there has been no activity on this connection for a while send a nop message just to ensure
- // the connection is alive
- nextEnsureAliveTime := c.lastActivityTime.Add(15 * time.Second)
- if nextEnsureAliveTime.Before(now) {
- // If the server can't respond to a nop in 15 seconds, assume it's dead
- err := c.conn.SetReadDeadline(now.Add(15 * time.Second))
- if err != nil {
- return nil, err
- }
-
- _, err = c.Exec("--;")
- if err != nil {
- return nil, err
- }
+ err = c.waitForPreviousCancelQuery(ctx)
+ if err != nil {
+ return nil, err
+ }
- c.lastActivityTime = now
- }
+ err = c.initContext(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ err = c.termContext(err)
+ }()
- var deadline time.Time
- if stopTime.Before(nextEnsureAliveTime) {
- deadline = stopTime
- } else {
- deadline = nextEnsureAliveTime
+ if err = c.lock(); err != nil {
+ return nil, err
+ }
+ defer func() {
+ if unlockErr := c.unlock(); unlockErr != nil && err == nil {
+ err = unlockErr
}
+ }()
- notification, err := c.waitForNotification(deadline)
- if err != ErrNotificationTimeout {
- return notification, err
- }
+ if err := c.ensureConnectionReadyForQuery(); err != nil {
+ return nil, err
}
-}
-
-func (c *Conn) waitForNotification(deadline time.Time) (*Notification, error) {
- var zeroTime time.Time
for {
- // Use SetReadDeadline to implement the timeout. SetReadDeadline will
- // cause operations to fail with a *net.OpError that has a Timeout()
- // of true. Because the normal pgx rxMsg path considers any error to
- // have potentially corrupted the state of the connection, it dies
- // on any errors. So to avoid timeout errors in rxMsg we set the
- // deadline and peek into the reader. If a timeout error occurs there
- // we don't break the pgx connection. If the Peek returns that data
- // is available then we turn off the read deadline before the rxMsg.
- err := c.conn.SetReadDeadline(deadline)
+ msg, err := c.rxMsg()
if err != nil {
return nil, err
}
- // Wait until there is a byte available before continuing onto the normal msg reading path
- _, err = c.reader.Peek(1)
+ err = c.processContextFreeMsg(msg)
if err != nil {
- c.conn.SetReadDeadline(zeroTime) // we can only return one error and we already have one -- so ignore possiple error from SetReadDeadline
- if err, ok := err.(*net.OpError); ok && err.Timeout() {
- return nil, ErrNotificationTimeout
- }
- return nil, err
- }
-
- err = c.conn.SetReadDeadline(zeroTime)
- if err != nil {
- return nil, err
- }
-
- var t byte
- var r *msgReader
- if t, r, err = c.rxMsg(); err == nil {
- if err = c.processContextFreeMsg(t, r); err != nil {
- return nil, err
- }
- } else {
return nil, err
}
@@ -891,10 +1164,14 @@ func (c *Conn) waitForNotification(deadline time.Time) (*Notification, error) {
}
func (c *Conn) IsAlive() bool {
- return c.alive
+ c.mux.Lock()
+ defer c.mux.Unlock()
+ return c.status >= connStatusIdle
}
func (c *Conn) CauseOfDeath() error {
+ c.mux.Lock()
+ defer c.mux.Unlock()
return c.causeOfDeath
}
@@ -906,17 +1183,19 @@ func (c *Conn) sendQuery(sql string, arguments ...interface{}) (err error) {
}
func (c *Conn) sendSimpleQuery(sql string, args ...interface{}) error {
+ if err := c.ensureConnectionReadyForQuery(); err != nil {
+ return err
+ }
if len(args) == 0 {
- wbuf := newWriteBuf(c, 'Q')
- wbuf.WriteCString(sql)
- wbuf.closeMsg()
+ buf := appendQuery(c.wbuf, sql)
- _, err := c.conn.Write(wbuf.buf)
+ _, err := c.conn.Write(buf)
if err != nil {
c.die(err)
return err
}
+ c.pendingReadyForQueryCount++
return nil
}
@@ -930,168 +1209,105 @@ func (c *Conn) sendSimpleQuery(sql string, args ...interface{}) error {
}
func (c *Conn) sendPreparedQuery(ps *PreparedStatement, arguments ...interface{}) (err error) {
- if len(ps.ParameterOids) != len(arguments) {
- return fmt.Errorf("Prepared statement \"%v\" requires %d parameters, but %d were provided", ps.Name, len(ps.ParameterOids), len(arguments))
- }
-
- // bind
- wbuf := newWriteBuf(c, 'B')
- wbuf.WriteByte(0)
- wbuf.WriteCString(ps.Name)
-
- wbuf.WriteInt16(int16(len(ps.ParameterOids)))
- for i, oid := range ps.ParameterOids {
- switch arg := arguments[i].(type) {
- case Encoder:
- wbuf.WriteInt16(arg.FormatCode())
- case string, *string:
- wbuf.WriteInt16(TextFormatCode)
- default:
- switch oid {
- case BoolOid, ByteaOid, Int2Oid, Int4Oid, Int8Oid, Float4Oid, Float8Oid, TimestampTzOid, TimestampTzArrayOid, TimestampOid, TimestampArrayOid, DateOid, BoolArrayOid, ByteaArrayOid, Int2ArrayOid, Int4ArrayOid, Int8ArrayOid, Float4ArrayOid, Float8ArrayOid, TextArrayOid, VarcharArrayOid, OidOid, InetOid, CidrOid, InetArrayOid, CidrArrayOid, RecordOid, JsonOid, JsonbOid:
- wbuf.WriteInt16(BinaryFormatCode)
- default:
- wbuf.WriteInt16(TextFormatCode)
- }
- }
+ if len(ps.ParameterOIDs) != len(arguments) {
+ return errors.Errorf("Prepared statement \"%v\" requires %d parameters, but %d were provided", ps.Name, len(ps.ParameterOIDs), len(arguments))
}
- wbuf.WriteInt16(int16(len(arguments)))
- for i, oid := range ps.ParameterOids {
- if err := Encode(wbuf, oid, arguments[i]); err != nil {
- return err
- }
+ if err := c.ensureConnectionReadyForQuery(); err != nil {
+ return err
}
- wbuf.WriteInt16(int16(len(ps.FieldDescriptions)))
- for _, fd := range ps.FieldDescriptions {
- wbuf.WriteInt16(fd.FormatCode)
+ resultFormatCodes := make([]int16, len(ps.FieldDescriptions))
+ for i, fd := range ps.FieldDescriptions {
+ resultFormatCodes[i] = fd.FormatCode
}
-
- // execute
- wbuf.startMsg('E')
- wbuf.WriteByte(0)
- wbuf.WriteInt32(0)
-
- // sync
- wbuf.startMsg('S')
- wbuf.closeMsg()
-
- _, err = c.conn.Write(wbuf.buf)
+ buf, err := appendBind(c.wbuf, "", ps.Name, c.ConnInfo, ps.ParameterOIDs, arguments, resultFormatCodes)
if err != nil {
- c.die(err)
- }
-
- return err
-}
-
-// Exec executes sql. sql can be either a prepared statement name or an SQL string.
-// arguments should be referenced positionally from the sql string as $1, $2, etc.
-func (c *Conn) Exec(sql string, arguments ...interface{}) (commandTag CommandTag, err error) {
- if err = c.lock(); err != nil {
- return commandTag, err
+ return err
}
- startTime := time.Now()
- c.lastActivityTime = startTime
+ buf = appendExecute(buf, "", 0)
+ buf = appendSync(buf)
- defer func() {
- if err == nil {
- if c.shouldLog(LogLevelInfo) {
- endTime := time.Now()
- c.log(LogLevelInfo, "Exec", "sql", sql, "args", logQueryArgs(arguments), "time", endTime.Sub(startTime), "commandTag", commandTag)
- }
- } else {
- if c.shouldLog(LogLevelError) {
- c.log(LogLevelError, "Exec", "sql", sql, "args", logQueryArgs(arguments), "error", err)
- }
+ n, err := c.conn.Write(buf)
+ if err != nil {
+ if fatalWriteErr(n, err) {
+ c.die(err)
}
+ return err
+ }
+ c.pendingReadyForQueryCount++
- if unlockErr := c.unlock(); unlockErr != nil && err == nil {
- err = unlockErr
- }
- }()
+ return nil
+}
- if err = c.sendQuery(sql, arguments...); err != nil {
- return
+// fatalWriteError takes the response of a net.Conn.Write and determines if it is fatal
+func fatalWriteErr(bytesWritten int, err error) bool {
+ // Partial writes break the connection
+ if bytesWritten > 0 {
+ return true
}
- var softErr error
-
- for {
- var t byte
- var r *msgReader
- t, r, err = c.rxMsg()
- if err != nil {
- return commandTag, err
- }
+ netErr, is := err.(net.Error)
+ return !(is && netErr.Timeout())
+}
- switch t {
- case readyForQuery:
- c.rxReadyForQuery(r)
- return commandTag, softErr
- case rowDescription:
- case dataRow:
- case bindComplete:
- case commandComplete:
- commandTag = CommandTag(r.readCString())
- default:
- if e := c.processContextFreeMsg(t, r); e != nil && softErr == nil {
- softErr = e
- }
- }
- }
+// Exec executes sql. sql can be either a prepared statement name or an SQL string.
+// arguments should be referenced positionally from the sql string as $1, $2, etc.
+func (c *Conn) Exec(sql string, arguments ...interface{}) (commandTag CommandTag, err error) {
+ return c.ExecEx(context.Background(), sql, nil, arguments...)
}
// Processes messages that are not exclusive to one context such as
-// authentication or query response. The response to these messages
-// is the same regardless of when they occur.
-func (c *Conn) processContextFreeMsg(t byte, r *msgReader) (err error) {
- switch t {
- case 'S':
- c.rxParameterStatus(r)
- return nil
- case errorResponse:
- return c.rxErrorResponse(r)
- case noticeResponse:
- return nil
- case emptyQueryResponse:
- return nil
- case notificationResponse:
- c.rxNotificationResponse(r)
- return nil
- default:
- return fmt.Errorf("Received unknown message type: %c", t)
+// authentication or query response. The response to these messages is the same
+// regardless of when they occur. It also ignores messages that are only
+// meaningful in a given context. These messages can occur due to a context
+// deadline interrupting message processing. For example, an interrupted query
+// may have left DataRow messages on the wire.
+func (c *Conn) processContextFreeMsg(msg pgproto3.BackendMessage) (err error) {
+ switch msg := msg.(type) {
+ case *pgproto3.ErrorResponse:
+ return c.rxErrorResponse(msg)
+ case *pgproto3.NoticeResponse:
+ c.rxNoticeResponse(msg)
+ case *pgproto3.NotificationResponse:
+ c.rxNotificationResponse(msg)
+ case *pgproto3.ReadyForQuery:
+ c.rxReadyForQuery(msg)
+ case *pgproto3.ParameterStatus:
+ c.rxParameterStatus(msg)
}
+
+ return nil
}
-func (c *Conn) rxMsg() (t byte, r *msgReader, err error) {
- if !c.alive {
- return 0, nil, ErrDeadConn
+func (c *Conn) rxMsg() (pgproto3.BackendMessage, error) {
+ if !c.IsAlive() {
+ return nil, ErrDeadConn
}
- t, err = c.mr.rxMsg()
+ msg, err := c.frontend.Receive()
if err != nil {
- c.die(err)
+ if netErr, ok := err.(net.Error); !(ok && netErr.Timeout()) {
+ c.die(err)
+ }
+ return nil, err
}
c.lastActivityTime = time.Now()
- if c.shouldLog(LogLevelTrace) {
- c.log(LogLevelTrace, "rxMsg", "type", string(t), "msgBytesRemaining", c.mr.msgBytesRemaining)
- }
+ // fmt.Printf("rxMsg: %#v\n", msg)
- return t, &c.mr, err
+ return msg, nil
}
-func (c *Conn) rxAuthenticationX(r *msgReader) (err error) {
- switch r.readInt32() {
- case 0: // AuthenticationOk
- case 3: // AuthenticationCleartextPassword
+func (c *Conn) rxAuthenticationX(msg *pgproto3.Authentication) (err error) {
+ switch msg.Type {
+ case pgproto3.AuthTypeOk:
+ case pgproto3.AuthTypeCleartextPassword:
err = c.txPasswordMessage(c.config.Password)
- case 5: // AuthenticationMD5Password
- salt := r.readString(4)
- digestedPassword := "md5" + hexMD5(hexMD5(c.config.Password+c.config.User)+salt)
+ case pgproto3.AuthTypeMD5Password:
+ digestedPassword := "md5" + hexMD5(hexMD5(c.config.Password+c.config.User)+string(msg.Salt[:]))
err = c.txPasswordMessage(digestedPassword)
default:
err = errors.New("Received unknown authentication message")
@@ -1106,114 +1322,103 @@ func hexMD5(s string) string {
return hex.EncodeToString(hash.Sum(nil))
}
-func (c *Conn) rxParameterStatus(r *msgReader) {
- key := r.readCString()
- value := r.readCString()
- c.RuntimeParams[key] = value
+func (c *Conn) rxParameterStatus(msg *pgproto3.ParameterStatus) {
+ c.RuntimeParams[msg.Name] = msg.Value
}
-func (c *Conn) rxErrorResponse(r *msgReader) (err PgError) {
- for {
- switch r.readByte() {
- case 'S':
- err.Severity = r.readCString()
- case 'C':
- err.Code = r.readCString()
- case 'M':
- err.Message = r.readCString()
- case 'D':
- err.Detail = r.readCString()
- case 'H':
- err.Hint = r.readCString()
- case 'P':
- s := r.readCString()
- n, _ := strconv.ParseInt(s, 10, 32)
- err.Position = int32(n)
- case 'p':
- s := r.readCString()
- n, _ := strconv.ParseInt(s, 10, 32)
- err.InternalPosition = int32(n)
- case 'q':
- err.InternalQuery = r.readCString()
- case 'W':
- err.Where = r.readCString()
- case 's':
- err.SchemaName = r.readCString()
- case 't':
- err.TableName = r.readCString()
- case 'c':
- err.ColumnName = r.readCString()
- case 'd':
- err.DataTypeName = r.readCString()
- case 'n':
- err.ConstraintName = r.readCString()
- case 'F':
- err.File = r.readCString()
- case 'L':
- s := r.readCString()
- n, _ := strconv.ParseInt(s, 10, 32)
- err.Line = int32(n)
- case 'R':
- err.Routine = r.readCString()
-
- case 0: // End of error message
- if err.Severity == "FATAL" {
- c.die(err)
- }
- return
- default: // Ignore other error fields
- r.readCString()
- }
+func (c *Conn) rxErrorResponse(msg *pgproto3.ErrorResponse) PgError {
+ err := PgError{
+ Severity: msg.Severity,
+ Code: msg.Code,
+ Message: msg.Message,
+ Detail: msg.Detail,
+ Hint: msg.Hint,
+ Position: msg.Position,
+ InternalPosition: msg.InternalPosition,
+ InternalQuery: msg.InternalQuery,
+ Where: msg.Where,
+ SchemaName: msg.SchemaName,
+ TableName: msg.TableName,
+ ColumnName: msg.ColumnName,
+ DataTypeName: msg.DataTypeName,
+ ConstraintName: msg.ConstraintName,
+ File: msg.File,
+ Line: msg.Line,
+ Routine: msg.Routine,
+ }
+
+ if err.Severity == "FATAL" {
+ c.die(err)
}
-}
-func (c *Conn) rxBackendKeyData(r *msgReader) {
- c.Pid = r.readInt32()
- c.SecretKey = r.readInt32()
+ return err
}
-func (c *Conn) rxReadyForQuery(r *msgReader) {
- c.TxStatus = r.readByte()
+func (c *Conn) rxNoticeResponse(msg *pgproto3.NoticeResponse) {
+ if c.onNotice == nil {
+ return
+ }
+
+ notice := &Notice{
+ Severity: msg.Severity,
+ Code: msg.Code,
+ Message: msg.Message,
+ Detail: msg.Detail,
+ Hint: msg.Hint,
+ Position: msg.Position,
+ InternalPosition: msg.InternalPosition,
+ InternalQuery: msg.InternalQuery,
+ Where: msg.Where,
+ SchemaName: msg.SchemaName,
+ TableName: msg.TableName,
+ ColumnName: msg.ColumnName,
+ DataTypeName: msg.DataTypeName,
+ ConstraintName: msg.ConstraintName,
+ File: msg.File,
+ Line: msg.Line,
+ Routine: msg.Routine,
+ }
+
+ c.onNotice(c, notice)
}
-func (c *Conn) rxRowDescription(r *msgReader) (fields []FieldDescription) {
- fieldCount := r.readInt16()
- fields = make([]FieldDescription, fieldCount)
- for i := int16(0); i < fieldCount; i++ {
- f := &fields[i]
- f.Name = r.readCString()
- f.Table = r.readOid()
- f.AttributeNumber = r.readInt16()
- f.DataType = r.readOid()
- f.DataTypeSize = r.readInt16()
- f.Modifier = r.readInt32()
- f.FormatCode = r.readInt16()
- }
- return
+func (c *Conn) rxBackendKeyData(msg *pgproto3.BackendKeyData) {
+ c.pid = msg.ProcessID
+ c.secretKey = msg.SecretKey
}
-func (c *Conn) rxParameterDescription(r *msgReader) (parameters []Oid) {
- // Internally, PostgreSQL supports greater than 64k parameters to a prepared
- // statement. But the parameter description uses a 16-bit integer for the
- // count of parameters. If there are more than 64K parameters, this count is
- // wrong. So read the count, ignore it, and compute the proper value from
- // the size of the message.
- r.readInt16()
- parameterCount := r.msgBytesRemaining / 4
+func (c *Conn) rxReadyForQuery(msg *pgproto3.ReadyForQuery) {
+ c.pendingReadyForQueryCount--
+ c.txStatus = msg.TxStatus
+}
- parameters = make([]Oid, 0, parameterCount)
+func (c *Conn) rxRowDescription(msg *pgproto3.RowDescription) []FieldDescription {
+ fields := make([]FieldDescription, len(msg.Fields))
+ for i := 0; i < len(fields); i++ {
+ fields[i].Name = msg.Fields[i].Name
+ fields[i].Table = pgtype.OID(msg.Fields[i].TableOID)
+ fields[i].AttributeNumber = msg.Fields[i].TableAttributeNumber
+ fields[i].DataType = pgtype.OID(msg.Fields[i].DataTypeOID)
+ fields[i].DataTypeSize = msg.Fields[i].DataTypeSize
+ fields[i].Modifier = msg.Fields[i].TypeModifier
+ fields[i].FormatCode = msg.Fields[i].Format
+ }
+ return fields
+}
- for i := int32(0); i < parameterCount; i++ {
- parameters = append(parameters, r.readOid())
+func (c *Conn) rxParameterDescription(msg *pgproto3.ParameterDescription) []pgtype.OID {
+ parameters := make([]pgtype.OID, len(msg.ParameterOIDs))
+ for i := 0; i < len(parameters); i++ {
+ parameters[i] = pgtype.OID(msg.ParameterOIDs[i])
}
- return
+ return parameters
}
-func (c *Conn) rxNotificationResponse(r *msgReader) {
+func (c *Conn) rxNotificationResponse(msg *pgproto3.NotificationResponse) {
n := new(Notification)
- n.Pid = r.readInt32()
- n.Channel = r.readCString()
- n.Payload = r.readCString()
+ n.PID = msg.PID
+ n.Channel = msg.Channel
+ n.Payload = msg.Payload
c.notifications = append(c.notifications, n)
}
@@ -1237,40 +1442,54 @@ func (c *Conn) startTLS(tlsConfig *tls.Config) (err error) {
return nil
}
-func (c *Conn) txStartupMessage(msg *startupMessage) error {
- _, err := c.conn.Write(msg.Bytes())
- return err
-}
-
func (c *Conn) txPasswordMessage(password string) (err error) {
- wbuf := newWriteBuf(c, 'p')
- wbuf.WriteCString(password)
- wbuf.closeMsg()
+ buf := c.wbuf
+ buf = append(buf, 'p')
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+ buf = append(buf, password...)
+ buf = append(buf, 0)
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])))
- _, err = c.conn.Write(wbuf.buf)
+ _, err = c.conn.Write(buf)
return err
}
func (c *Conn) die(err error) {
- c.alive = false
+ c.mux.Lock()
+ defer c.mux.Unlock()
+
+ if c.status == connStatusClosed {
+ return
+ }
+
+ c.status = connStatusClosed
c.causeOfDeath = err
c.conn.Close()
}
func (c *Conn) lock() error {
- if c.busy {
+ c.mux.Lock()
+ defer c.mux.Unlock()
+
+ if c.status != connStatusIdle {
return ErrConnBusy
}
- c.busy = true
+
+ c.status = connStatusBusy
return nil
}
func (c *Conn) unlock() error {
- if !c.busy {
+ c.mux.Lock()
+ defer c.mux.Unlock()
+
+ if c.status != connStatusBusy {
return errors.New("unlock conn that is not busy")
}
- c.busy = false
+
+ c.status = connStatusIdle
return nil
}
@@ -1278,23 +1497,15 @@ func (c *Conn) shouldLog(lvl int) bool {
return c.logger != nil && c.logLevel >= lvl
}
-func (c *Conn) log(lvl int, msg string, ctx ...interface{}) {
- if c.Pid != 0 {
- ctx = append(ctx, "pid", c.Pid)
+func (c *Conn) log(lvl LogLevel, msg string, data map[string]interface{}) {
+ if data == nil {
+ data = map[string]interface{}{}
}
-
- switch lvl {
- case LogLevelTrace:
- c.logger.Debug(msg, ctx...)
- case LogLevelDebug:
- c.logger.Debug(msg, ctx...)
- case LogLevelInfo:
- c.logger.Info(msg, ctx...)
- case LogLevelWarn:
- c.logger.Warn(msg, ctx...)
- case LogLevelError:
- c.logger.Error(msg, ctx...)
+ if c.pid != 0 {
+ data["pid"] = c.pid
}
+
+ c.logger.Log(lvl, msg, data)
}
// SetLogger replaces the current logger and returns the previous logger.
@@ -1320,3 +1531,306 @@ func (c *Conn) SetLogLevel(lvl int) (int, error) {
func quoteIdentifier(s string) string {
return `"` + strings.Replace(s, `"`, `""`, -1) + `"`
}
+
+// cancelQuery sends a cancel request to the PostgreSQL server. It returns an
+// error if unable to deliver the cancel request, but lack of an error does not
+// ensure that the query was canceled. As specified in the documentation, there
+// is no way to be sure a query was canceled. See
+// https://www.postgresql.org/docs/current/static/protocol-flow.html#AEN112861
+func (c *Conn) cancelQuery() {
+ if !atomic.CompareAndSwapInt32(&c.cancelQueryInProgress, 0, 1) {
+ panic("cancelQuery when cancelQueryInProgress")
+ }
+
+ if err := c.conn.SetDeadline(time.Now()); err != nil {
+ c.Close() // Close connection if unable to set deadline
+ return
+ }
+
+ doCancel := func() error {
+ network, address := c.config.networkAddress()
+ cancelConn, err := c.config.Dial(network, address)
+ if err != nil {
+ return err
+ }
+ defer cancelConn.Close()
+
+ // If server doesn't process cancellation request in bounded time then abort.
+ err = cancelConn.SetDeadline(time.Now().Add(15 * time.Second))
+ if err != nil {
+ return err
+ }
+
+ buf := make([]byte, 16)
+ binary.BigEndian.PutUint32(buf[0:4], 16)
+ binary.BigEndian.PutUint32(buf[4:8], 80877102)
+ binary.BigEndian.PutUint32(buf[8:12], uint32(c.pid))
+ binary.BigEndian.PutUint32(buf[12:16], uint32(c.secretKey))
+ _, err = cancelConn.Write(buf)
+ if err != nil {
+ return err
+ }
+
+ _, err = cancelConn.Read(buf)
+ if err != io.EOF {
+ return errors.Errorf("Server failed to close connection after cancel query request: %v %v", err, buf)
+ }
+
+ return nil
+ }
+
+ go func() {
+ err := doCancel()
+ if err != nil {
+ c.Close() // Something is very wrong. Terminate the connection.
+ }
+ c.cancelQueryCompleted <- struct{}{}
+ }()
+}
+
+func (c *Conn) Ping(ctx context.Context) error {
+ _, err := c.ExecEx(ctx, ";", nil)
+ return err
+}
+
+func (c *Conn) ExecEx(ctx context.Context, sql string, options *QueryExOptions, arguments ...interface{}) (CommandTag, error) {
+ err := c.waitForPreviousCancelQuery(ctx)
+ if err != nil {
+ return "", err
+ }
+
+ if err := c.lock(); err != nil {
+ return "", err
+ }
+ defer c.unlock()
+
+ startTime := time.Now()
+ c.lastActivityTime = startTime
+
+ commandTag, err := c.execEx(ctx, sql, options, arguments...)
+ if err != nil {
+ if c.shouldLog(LogLevelError) {
+ c.log(LogLevelError, "Exec", map[string]interface{}{"sql": sql, "args": logQueryArgs(arguments), "err": err})
+ }
+ return commandTag, err
+ }
+
+ if c.shouldLog(LogLevelInfo) {
+ endTime := time.Now()
+ c.log(LogLevelInfo, "Exec", map[string]interface{}{"sql": sql, "args": logQueryArgs(arguments), "time": endTime.Sub(startTime), "commandTag": commandTag})
+ }
+
+ return commandTag, err
+}
+
+func (c *Conn) execEx(ctx context.Context, sql string, options *QueryExOptions, arguments ...interface{}) (commandTag CommandTag, err error) {
+ err = c.initContext(ctx)
+ if err != nil {
+ return "", err
+ }
+ defer func() {
+ err = c.termContext(err)
+ }()
+
+ if (options == nil && c.config.PreferSimpleProtocol) || (options != nil && options.SimpleProtocol) {
+ err = c.sanitizeAndSendSimpleQuery(sql, arguments...)
+ if err != nil {
+ return "", err
+ }
+ } else if options != nil && len(options.ParameterOIDs) > 0 {
+ if err := c.ensureConnectionReadyForQuery(); err != nil {
+ return "", err
+ }
+
+ buf, err := c.buildOneRoundTripExec(c.wbuf, sql, options, arguments)
+ if err != nil {
+ return "", err
+ }
+
+ buf = appendSync(buf)
+
+ n, err := c.conn.Write(buf)
+ if err != nil && fatalWriteErr(n, err) {
+ c.die(err)
+ return "", err
+ }
+ c.pendingReadyForQueryCount++
+ } else {
+ if len(arguments) > 0 {
+ ps, ok := c.preparedStatements[sql]
+ if !ok {
+ var err error
+ ps, err = c.prepareEx("", sql, nil)
+ if err != nil {
+ return "", err
+ }
+ }
+
+ err = c.sendPreparedQuery(ps, arguments...)
+ if err != nil {
+ return "", err
+ }
+ } else {
+ if err = c.sendQuery(sql, arguments...); err != nil {
+ return
+ }
+ }
+ }
+
+ var softErr error
+
+ for {
+ msg, err := c.rxMsg()
+ if err != nil {
+ return commandTag, err
+ }
+
+ switch msg := msg.(type) {
+ case *pgproto3.ReadyForQuery:
+ c.rxReadyForQuery(msg)
+ return commandTag, softErr
+ case *pgproto3.CommandComplete:
+ commandTag = CommandTag(msg.CommandTag)
+ default:
+ if e := c.processContextFreeMsg(msg); e != nil && softErr == nil {
+ softErr = e
+ }
+ }
+ }
+}
+
+func (c *Conn) buildOneRoundTripExec(buf []byte, sql string, options *QueryExOptions, arguments []interface{}) ([]byte, error) {
+ if len(arguments) != len(options.ParameterOIDs) {
+ return nil, errors.Errorf("mismatched number of arguments (%d) and options.ParameterOIDs (%d)", len(arguments), len(options.ParameterOIDs))
+ }
+
+ if len(options.ParameterOIDs) > 65535 {
+ return nil, errors.Errorf("Number of QueryExOptions ParameterOIDs must be between 0 and 65535, received %d", len(options.ParameterOIDs))
+ }
+
+ buf = appendParse(buf, "", sql, options.ParameterOIDs)
+ buf, err := appendBind(buf, "", "", c.ConnInfo, options.ParameterOIDs, arguments, nil)
+ if err != nil {
+ return nil, err
+ }
+ buf = appendExecute(buf, "", 0)
+
+ return buf, nil
+}
+
+func (c *Conn) initContext(ctx context.Context) error {
+ if c.ctxInProgress {
+ return errors.New("ctx already in progress")
+ }
+
+ if ctx.Done() == nil {
+ return nil
+ }
+
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ c.ctxInProgress = true
+
+ go c.contextHandler(ctx)
+
+ return nil
+}
+
+func (c *Conn) termContext(opErr error) error {
+ if !c.ctxInProgress {
+ return opErr
+ }
+
+ var err error
+
+ select {
+ case err = <-c.closedChan:
+ if opErr == nil {
+ err = nil
+ }
+ case c.doneChan <- struct{}{}:
+ err = opErr
+ }
+
+ c.ctxInProgress = false
+ return err
+}
+
+func (c *Conn) contextHandler(ctx context.Context) {
+ select {
+ case <-ctx.Done():
+ c.cancelQuery()
+ c.closedChan <- ctx.Err()
+ case <-c.doneChan:
+ }
+}
+
+func (c *Conn) waitForPreviousCancelQuery(ctx context.Context) error {
+ if atomic.LoadInt32(&c.cancelQueryInProgress) == 0 {
+ return nil
+ }
+
+ select {
+ case <-c.cancelQueryCompleted:
+ atomic.StoreInt32(&c.cancelQueryInProgress, 0)
+ if err := c.conn.SetDeadline(time.Time{}); err != nil {
+ c.Close() // Close connection if unable to disable deadline
+ return err
+ }
+ return nil
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+}
+
+func (c *Conn) ensureConnectionReadyForQuery() error {
+ for c.pendingReadyForQueryCount > 0 {
+ msg, err := c.rxMsg()
+ if err != nil {
+ return err
+ }
+
+ switch msg := msg.(type) {
+ case *pgproto3.ErrorResponse:
+ pgErr := c.rxErrorResponse(msg)
+ if pgErr.Severity == "FATAL" {
+ return pgErr
+ }
+ default:
+ err = c.processContextFreeMsg(msg)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func connInfoFromRows(rows *Rows, err error) (map[string]pgtype.OID, error) {
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+
+ nameOIDs := make(map[string]pgtype.OID, 256)
+ for rows.Next() {
+ var oid pgtype.OID
+ var name pgtype.Text
+ if err = rows.Scan(&oid, &name); err != nil {
+ return nil, err
+ }
+
+ nameOIDs[name.String] = oid
+ }
+
+ if err = rows.Err(); err != nil {
+ return nil, err
+ }
+
+ return nameOIDs, err
+}
diff --git a/vendor/github.com/jackc/pgx/conn_config_test.go.example b/vendor/github.com/jackc/pgx/conn_config_test.go.example
index cac798b..463c084 100644
--- a/vendor/github.com/jackc/pgx/conn_config_test.go.example
+++ b/vendor/github.com/jackc/pgx/conn_config_test.go.example
@@ -15,6 +15,7 @@ var invalidUserConnConfig *pgx.ConnConfig = nil
var tlsConnConfig *pgx.ConnConfig = nil
var customDialerConnConfig *pgx.ConnConfig = nil
var replicationConnConfig *pgx.ConnConfig = nil
+var cratedbConnConfig *pgx.ConnConfig = nil
// var tcpConnConfig *pgx.ConnConfig = &pgx.ConnConfig{Host: "127.0.0.1", User: "pgx_md5", Password: "secret", Database: "pgx_test"}
// var unixSocketConnConfig *pgx.ConnConfig = &pgx.ConnConfig{Host: "/private/tmp", User: "pgx_none", Database: "pgx_test"}
@@ -23,3 +24,5 @@ var replicationConnConfig *pgx.ConnConfig = nil
// var invalidUserConnConfig *pgx.ConnConfig = &pgx.ConnConfig{Host: "127.0.0.1", User: "invalid", Database: "pgx_test"}
// var tlsConnConfig *pgx.ConnConfig = &pgx.ConnConfig{Host: "127.0.0.1", User: "pgx_md5", Password: "secret", Database: "pgx_test", TLSConfig: &tls.Config{InsecureSkipVerify: true}}
// var customDialerConnConfig *pgx.ConnConfig = &pgx.ConnConfig{Host: "127.0.0.1", User: "pgx_md5", Password: "secret", Database: "pgx_test"}
+// var replicationConnConfig *pgx.ConnConfig = &pgx.ConnConfig{Host: "127.0.0.1", User: "pgx_replication", Password: "secret", Database: "pgx_test"}
+
diff --git a/vendor/github.com/jackc/pgx/conn_config_test.go.travis b/vendor/github.com/jackc/pgx/conn_config_test.go.travis
index 75714bf..cf29a74 100644
--- a/vendor/github.com/jackc/pgx/conn_config_test.go.travis
+++ b/vendor/github.com/jackc/pgx/conn_config_test.go.travis
@@ -16,15 +16,21 @@ var invalidUserConnConfig = &pgx.ConnConfig{Host: "127.0.0.1", User: "invalid",
var tlsConnConfig = &pgx.ConnConfig{Host: "127.0.0.1", User: "pgx_ssl", Password: "secret", Database: "pgx_test", TLSConfig: &tls.Config{InsecureSkipVerify: true}}
var customDialerConnConfig = &pgx.ConnConfig{Host: "127.0.0.1", User: "pgx_md5", Password: "secret", Database: "pgx_test"}
var replicationConnConfig *pgx.ConnConfig = nil
+var cratedbConnConfig *pgx.ConnConfig = nil
func init() {
- version := os.Getenv("PGVERSION")
+ pgVersion := os.Getenv("PGVERSION")
- if len(version) > 0 {
- v, err := strconv.ParseFloat(version,64)
+ if len(pgVersion) > 0 {
+ v, err := strconv.ParseFloat(pgVersion, 64)
if err == nil && v >= 9.6 {
replicationConnConfig = &pgx.ConnConfig{Host: "127.0.0.1", User: "pgx_replication", Password: "secret", Database: "pgx_test"}
}
}
+
+ crateVersion := os.Getenv("CRATEVERSION")
+ if crateVersion != "" {
+ cratedbConnConfig = &pgx.ConnConfig{Host: "127.0.0.1", Port: 6543, User: "pgx", Password: "", Database: "pgx_test"}
+ }
}
diff --git a/vendor/github.com/jackc/pgx/conn_pool.go b/vendor/github.com/jackc/pgx/conn_pool.go
index 1913699..6ca0ee0 100644
--- a/vendor/github.com/jackc/pgx/conn_pool.go
+++ b/vendor/github.com/jackc/pgx/conn_pool.go
@@ -1,9 +1,13 @@
package pgx
import (
- "errors"
+ "context"
"sync"
"time"
+
+ "github.com/pkg/errors"
+
+ "github.com/jackc/pgx/pgtype"
)
type ConnPoolConfig struct {
@@ -27,11 +31,7 @@ type ConnPool struct {
closed bool
preparedStatements map[string]*PreparedStatement
acquireTimeout time.Duration
- pgTypes map[Oid]PgType
- pgsqlAfInet *byte
- pgsqlAfInet6 *byte
- txAfterClose func(tx *Tx)
- rowsAfterClose func(rows *Rows)
+ connInfo *pgtype.ConnInfo
}
type ConnPoolStat struct {
@@ -43,11 +43,15 @@ type ConnPoolStat struct {
// ErrAcquireTimeout occurs when an attempt to acquire a connection times out.
var ErrAcquireTimeout = errors.New("timeout acquiring connection from pool")
+// ErrClosedPool occurs on an attempt to acquire a connection from a closed pool.
+var ErrClosedPool = errors.New("cannot acquire from closed pool")
+
// NewConnPool creates a new ConnPool. config.ConnConfig is passed through to
// Connect directly.
func NewConnPool(config ConnPoolConfig) (p *ConnPool, err error) {
p = new(ConnPool)
p.config = config.ConnConfig
+ p.connInfo = minimalConnInfo
p.maxConnections = config.MaxConnections
if p.maxConnections == 0 {
p.maxConnections = 5
@@ -73,14 +77,6 @@ func NewConnPool(config ConnPoolConfig) (p *ConnPool, err error) {
p.logLevel = LogLevelNone
}
- p.txAfterClose = func(tx *Tx) {
- p.Release(tx.Conn())
- }
-
- p.rowsAfterClose = func(rows *Rows) {
- p.Release(rows.Conn())
- }
-
p.allConnections = make([]*Conn, 0, p.maxConnections)
p.availableConnections = make([]*Conn, 0, p.maxConnections)
p.preparedStatements = make(map[string]*PreparedStatement)
@@ -94,6 +90,7 @@ func NewConnPool(config ConnPoolConfig) (p *ConnPool, err error) {
}
p.allConnections = append(p.allConnections, c)
p.availableConnections = append(p.availableConnections, c)
+ p.connInfo = c.ConnInfo.DeepCopy()
return
}
@@ -114,7 +111,7 @@ func (p *ConnPool) deadlinePassed(deadline *time.Time) bool {
// acquire performs acquision assuming pool is already locked
func (p *ConnPool) acquire(deadline *time.Time) (*Conn, error) {
if p.closed {
- return nil, errors.New("cannot acquire from closed pool")
+ return nil, ErrClosedPool
}
// A connection is available
@@ -161,7 +158,7 @@ func (p *ConnPool) acquire(deadline *time.Time) (*Conn, error) {
}
// All connections are in use and we cannot create more
if p.logLevel >= LogLevelWarn {
- p.logger.Warn("All connections in pool are busy - waiting...")
+ p.logger.Log(LogLevelWarn, "waiting for available connection", nil)
}
// Wait until there is an available connection OR room to create a new connection
@@ -181,7 +178,11 @@ func (p *ConnPool) acquire(deadline *time.Time) (*Conn, error) {
// Release gives up use of a connection.
func (p *ConnPool) Release(conn *Conn) {
- if conn.TxStatus != 'I' {
+ if conn.ctxInProgress {
+ panic("should never release when context is in progress")
+ }
+
+ if conn.txStatus != 'I' {
conn.Exec("rollback")
}
@@ -223,25 +224,21 @@ func (p *ConnPool) removeFromAllConnections(conn *Conn) bool {
return false
}
-// Close ends the use of a connection pool. It prevents any new connections
-// from being acquired, waits until all acquired connections are released,
-// then closes all underlying connections.
+// Close ends the use of a connection pool. It prevents any new connections from
+// being acquired and closes available underlying connections. Any acquired
+// connections will be closed when they are released.
func (p *ConnPool) Close() {
p.cond.L.Lock()
defer p.cond.L.Unlock()
p.closed = true
- // Wait until all connections are released
- if len(p.availableConnections) != len(p.allConnections) {
- for len(p.availableConnections) != len(p.allConnections) {
- p.cond.Wait()
- }
- }
-
- for _, c := range p.allConnections {
+ for _, c := range p.availableConnections {
_ = c.Close()
}
+
+ // This will cause any checked out connections to be closed on release
+ p.resetCount++
}
// Reset closes all open connections, but leaves the pool open. It is intended
@@ -289,7 +286,7 @@ func (p *ConnPool) Stat() (s ConnPoolStat) {
}
func (p *ConnPool) createConnection() (*Conn, error) {
- c, err := connect(p.config, p.pgTypes, p.pgsqlAfInet, p.pgsqlAfInet6)
+ c, err := connect(p.config, p.connInfo)
if err != nil {
return nil, err
}
@@ -324,10 +321,6 @@ func (p *ConnPool) createConnectionUnlocked() (*Conn, error) {
// afterConnectionCreated executes (if it is) afterConnect() callback and prepares
// all the known statements for the new connection.
func (p *ConnPool) afterConnectionCreated(c *Conn) (*Conn, error) {
- p.pgTypes = c.PgTypes
- p.pgsqlAfInet = c.pgsqlAfInet
- p.pgsqlAfInet6 = c.pgsqlAfInet6
-
if p.afterConnect != nil {
err := p.afterConnect(c)
if err != nil {
@@ -357,6 +350,16 @@ func (p *ConnPool) Exec(sql string, arguments ...interface{}) (commandTag Comman
return c.Exec(sql, arguments...)
}
+func (p *ConnPool) ExecEx(ctx context.Context, sql string, options *QueryExOptions, arguments ...interface{}) (commandTag CommandTag, err error) {
+ var c *Conn
+ if c, err = p.Acquire(); err != nil {
+ return
+ }
+ defer p.Release(c)
+
+ return c.ExecEx(ctx, sql, options, arguments...)
+}
+
// Query acquires a connection and delegates the call to that connection. When
// *Rows are closed, the connection is released automatically.
func (p *ConnPool) Query(sql string, args ...interface{}) (*Rows, error) {
@@ -372,7 +375,25 @@ func (p *ConnPool) Query(sql string, args ...interface{}) (*Rows, error) {
return rows, err
}
- rows.AfterClose(p.rowsAfterClose)
+ rows.connPool = p
+
+ return rows, nil
+}
+
+func (p *ConnPool) QueryEx(ctx context.Context, sql string, options *QueryExOptions, args ...interface{}) (*Rows, error) {
+ c, err := p.Acquire()
+ if err != nil {
+ // Because checking for errors can be deferred to the *Rows, build one with the error
+ return &Rows{closed: true, err: err}, err
+ }
+
+ rows, err := c.QueryEx(ctx, sql, options, args...)
+ if err != nil {
+ p.Release(c)
+ return rows, err
+ }
+
+ rows.connPool = p
return rows, nil
}
@@ -385,10 +406,15 @@ func (p *ConnPool) QueryRow(sql string, args ...interface{}) *Row {
return (*Row)(rows)
}
+func (p *ConnPool) QueryRowEx(ctx context.Context, sql string, options *QueryExOptions, args ...interface{}) *Row {
+ rows, _ := p.QueryEx(ctx, sql, options, args...)
+ return (*Row)(rows)
+}
+
// Begin acquires a connection and begins a transaction on it. When the
// transaction is closed the connection will be automatically released.
func (p *ConnPool) Begin() (*Tx, error) {
- return p.BeginIso("")
+ return p.BeginEx(context.Background(), nil)
}
// Prepare creates a prepared statement on a connection in the pool to test the
@@ -403,7 +429,7 @@ func (p *ConnPool) Begin() (*Tx, error) {
// the same name and sql arguments. This allows a code path to Prepare and
// Query/Exec/PrepareEx without concern for if the statement has already been prepared.
func (p *ConnPool) Prepare(name, sql string) (*PreparedStatement, error) {
- return p.PrepareEx(name, sql, nil)
+ return p.PrepareEx(context.Background(), name, sql, nil)
}
// PrepareEx creates a prepared statement on a connection in the pool to test the
@@ -417,7 +443,7 @@ func (p *ConnPool) Prepare(name, sql string) (*PreparedStatement, error) {
// PrepareEx is idempotent; i.e. it is safe to call PrepareEx multiple times with the same
// name and sql arguments. This allows a code path to PrepareEx and Query/Exec/Prepare without
// concern for if the statement has already been prepared.
-func (p *ConnPool) PrepareEx(name, sql string, opts *PrepareExOptions) (*PreparedStatement, error) {
+func (p *ConnPool) PrepareEx(ctx context.Context, name, sql string, opts *PrepareExOptions) (*PreparedStatement, error) {
p.cond.L.Lock()
defer p.cond.L.Unlock()
@@ -439,13 +465,13 @@ func (p *ConnPool) PrepareEx(name, sql string, opts *PrepareExOptions) (*Prepare
return ps, nil
}
- ps, err := c.PrepareEx(name, sql, opts)
+ ps, err := c.PrepareEx(ctx, name, sql, opts)
if err != nil {
return nil, err
}
for _, c := range p.availableConnections {
- _, err := c.PrepareEx(name, sql, opts)
+ _, err := c.PrepareEx(ctx, name, sql, opts)
if err != nil {
return nil, err
}
@@ -474,17 +500,17 @@ func (p *ConnPool) Deallocate(name string) (err error) {
return nil
}
-// BeginIso acquires a connection and begins a transaction in isolation mode iso
-// on it. When the transaction is closed the connection will be automatically
-// released.
-func (p *ConnPool) BeginIso(iso string) (*Tx, error) {
+// BeginEx acquires a connection and starts a transaction with txOptions
+// determining the transaction mode. When the transaction is closed the
+// connection will be automatically released.
+func (p *ConnPool) BeginEx(ctx context.Context, txOptions *TxOptions) (*Tx, error) {
for {
c, err := p.Acquire()
if err != nil {
return nil, err
}
- tx, err := c.BeginIso(iso)
+ tx, err := c.BeginEx(ctx, txOptions)
if err != nil {
alive := c.IsAlive()
p.Release(c)
@@ -493,37 +519,31 @@ func (p *ConnPool) BeginIso(iso string) (*Tx, error) {
// again on a new connection would fix, so just return the error. But
// if the connection is dead try to acquire a new connection and try
// again.
- if alive {
+ if alive || ctx.Err() != nil {
return nil, err
}
continue
}
- tx.AfterClose(p.txAfterClose)
+ tx.connPool = p
return tx, nil
}
}
-// Deprecated. Use CopyFrom instead. CopyTo acquires a connection, delegates the
-// call to that connection, and releases the connection.
-func (p *ConnPool) CopyTo(tableName string, columnNames []string, rowSrc CopyToSource) (int, error) {
+// CopyFrom acquires a connection, delegates the call to that connection, and releases the connection
+func (p *ConnPool) CopyFrom(tableName Identifier, columnNames []string, rowSrc CopyFromSource) (int, error) {
c, err := p.Acquire()
if err != nil {
return 0, err
}
defer p.Release(c)
- return c.CopyTo(tableName, columnNames, rowSrc)
+ return c.CopyFrom(tableName, columnNames, rowSrc)
}
-// CopyFrom acquires a connection, delegates the call to that connection, and
-// releases the connection.
-func (p *ConnPool) CopyFrom(tableName Identifier, columnNames []string, rowSrc CopyFromSource) (int, error) {
+// BeginBatch acquires a connection and begins a batch on that connection. When
+// *Batch is finished, the connection is released automatically.
+func (p *ConnPool) BeginBatch() *Batch {
c, err := p.Acquire()
- if err != nil {
- return 0, err
- }
- defer p.Release(c)
-
- return c.CopyFrom(tableName, columnNames, rowSrc)
+ return &Batch{conn: c, connPool: p, err: err}
}
diff --git a/vendor/github.com/jackc/pgx/conn_pool_private_test.go b/vendor/github.com/jackc/pgx/conn_pool_private_test.go
deleted file mode 100644
index ef0ec1d..0000000
--- a/vendor/github.com/jackc/pgx/conn_pool_private_test.go
+++ /dev/null
@@ -1,44 +0,0 @@
-package pgx
-
-import (
- "testing"
-)
-
-func compareConnSlices(slice1, slice2 []*Conn) bool {
- if len(slice1) != len(slice2) {
- return false
- }
- for i, c := range slice1 {
- if c != slice2[i] {
- return false
- }
- }
- return true
-}
-
-func TestConnPoolRemoveFromAllConnections(t *testing.T) {
- t.Parallel()
- pool := ConnPool{}
- conn1 := &Conn{}
- conn2 := &Conn{}
- conn3 := &Conn{}
-
- // First element
- pool.allConnections = []*Conn{conn1, conn2, conn3}
- pool.removeFromAllConnections(conn1)
- if !compareConnSlices(pool.allConnections, []*Conn{conn2, conn3}) {
- t.Fatal("First element test failed")
- }
- // Element somewhere in the middle
- pool.allConnections = []*Conn{conn1, conn2, conn3}
- pool.removeFromAllConnections(conn2)
- if !compareConnSlices(pool.allConnections, []*Conn{conn1, conn3}) {
- t.Fatal("Middle element test failed")
- }
- // Last element
- pool.allConnections = []*Conn{conn1, conn2, conn3}
- pool.removeFromAllConnections(conn3)
- if !compareConnSlices(pool.allConnections, []*Conn{conn1, conn2}) {
- t.Fatal("Last element test failed")
- }
-}
diff --git a/vendor/github.com/jackc/pgx/conn_pool_test.go b/vendor/github.com/jackc/pgx/conn_pool_test.go
deleted file mode 100644
index ab76bfb..0000000
--- a/vendor/github.com/jackc/pgx/conn_pool_test.go
+++ /dev/null
@@ -1,982 +0,0 @@
-package pgx_test
-
-import (
- "errors"
- "fmt"
- "net"
- "sync"
- "testing"
- "time"
-
- "github.com/jackc/pgx"
-)
-
-func createConnPool(t *testing.T, maxConnections int) *pgx.ConnPool {
- config := pgx.ConnPoolConfig{ConnConfig: *defaultConnConfig, MaxConnections: maxConnections}
- pool, err := pgx.NewConnPool(config)
- if err != nil {
- t.Fatalf("Unable to create connection pool: %v", err)
- }
- return pool
-}
-
-func acquireAllConnections(t *testing.T, pool *pgx.ConnPool, maxConnections int) []*pgx.Conn {
- connections := make([]*pgx.Conn, maxConnections)
- for i := 0; i < maxConnections; i++ {
- var err error
- if connections[i], err = pool.Acquire(); err != nil {
- t.Fatalf("Unable to acquire connection: %v", err)
- }
- }
- return connections
-}
-
-func releaseAllConnections(pool *pgx.ConnPool, connections []*pgx.Conn) {
- for _, c := range connections {
- pool.Release(c)
- }
-}
-
-func acquireWithTimeTaken(pool *pgx.ConnPool) (*pgx.Conn, time.Duration, error) {
- startTime := time.Now()
- c, err := pool.Acquire()
- return c, time.Since(startTime), err
-}
-
-func TestNewConnPool(t *testing.T) {
- t.Parallel()
-
- var numCallbacks int
- afterConnect := func(c *pgx.Conn) error {
- numCallbacks++
- return nil
- }
-
- config := pgx.ConnPoolConfig{ConnConfig: *defaultConnConfig, MaxConnections: 2, AfterConnect: afterConnect}
- pool, err := pgx.NewConnPool(config)
- if err != nil {
- t.Fatal("Unable to establish connection pool")
- }
- defer pool.Close()
-
- // It initially connects once
- stat := pool.Stat()
- if stat.CurrentConnections != 1 {
- t.Errorf("Expected 1 connection to be established immediately, but %v were", numCallbacks)
- }
-
- // Pool creation returns an error if any AfterConnect callback does
- errAfterConnect := errors.New("Some error")
- afterConnect = func(c *pgx.Conn) error {
- return errAfterConnect
- }
-
- config = pgx.ConnPoolConfig{ConnConfig: *defaultConnConfig, MaxConnections: 2, AfterConnect: afterConnect}
- pool, err = pgx.NewConnPool(config)
- if err != errAfterConnect {
- t.Errorf("Expected errAfterConnect but received unexpected: %v", err)
- }
-}
-
-func TestNewConnPoolDefaultsTo5MaxConnections(t *testing.T) {
- t.Parallel()
-
- config := pgx.ConnPoolConfig{ConnConfig: *defaultConnConfig}
- pool, err := pgx.NewConnPool(config)
- if err != nil {
- t.Fatal("Unable to establish connection pool")
- }
- defer pool.Close()
-
- if n := pool.Stat().MaxConnections; n != 5 {
- t.Fatalf("Expected pool to default to 5 max connections, but it was %d", n)
- }
-}
-
-func TestPoolAcquireAndReleaseCycle(t *testing.T) {
- t.Parallel()
-
- maxConnections := 2
- incrementCount := int32(100)
- completeSync := make(chan int)
- pool := createConnPool(t, maxConnections)
- defer pool.Close()
-
- allConnections := acquireAllConnections(t, pool, maxConnections)
-
- for _, c := range allConnections {
- mustExec(t, c, "create temporary table t(counter integer not null)")
- mustExec(t, c, "insert into t(counter) values(0);")
- }
-
- releaseAllConnections(pool, allConnections)
-
- f := func() {
- conn, err := pool.Acquire()
- if err != nil {
- t.Fatal("Unable to acquire connection")
- }
- defer pool.Release(conn)
-
- // Increment counter...
- mustExec(t, conn, "update t set counter = counter + 1")
- completeSync <- 0
- }
-
- for i := int32(0); i < incrementCount; i++ {
- go f()
- }
-
- // Wait for all f() to complete
- for i := int32(0); i < incrementCount; i++ {
- <-completeSync
- }
-
- // Check that temp table in each connection has been incremented some number of times
- actualCount := int32(0)
- allConnections = acquireAllConnections(t, pool, maxConnections)
-
- for _, c := range allConnections {
- var n int32
- c.QueryRow("select counter from t").Scan(&n)
- if n == 0 {
- t.Error("A connection was never used")
- }
-
- actualCount += n
- }
-
- if actualCount != incrementCount {
- fmt.Println(actualCount)
- t.Error("Wrong number of increments")
- }
-
- releaseAllConnections(pool, allConnections)
-}
-
-func TestPoolNonBlockingConnections(t *testing.T) {
- t.Parallel()
-
- var dialCountLock sync.Mutex
- dialCount := 0
- openTimeout := 1 * time.Second
- testDialer := func(network, address string) (net.Conn, error) {
- var firstDial bool
- dialCountLock.Lock()
- dialCount++
- firstDial = dialCount == 1
- dialCountLock.Unlock()
-
- if firstDial {
- return net.Dial(network, address)
- } else {
- time.Sleep(openTimeout)
- return nil, &net.OpError{Op: "dial", Net: "tcp"}
- }
- }
-
- maxConnections := 3
- config := pgx.ConnPoolConfig{
- ConnConfig: *defaultConnConfig,
- MaxConnections: maxConnections,
- }
- config.ConnConfig.Dial = testDialer
-
- pool, err := pgx.NewConnPool(config)
- if err != nil {
- t.Fatalf("Expected NewConnPool not to fail, instead it failed with: %v", err)
- }
- defer pool.Close()
-
- // NewConnPool establishes an initial connection
- // so we need to close that for the rest of the test
- if conn, err := pool.Acquire(); err == nil {
- conn.Close()
- pool.Release(conn)
- } else {
- t.Fatalf("pool.Acquire unexpectedly failed: %v", err)
- }
-
- var wg sync.WaitGroup
- wg.Add(maxConnections)
-
- startedAt := time.Now()
- for i := 0; i < maxConnections; i++ {
- go func() {
- _, err := pool.Acquire()
- wg.Done()
- if err == nil {
- t.Fatal("Acquire() expected to fail but it did not")
- }
- }()
- }
- wg.Wait()
-
- // Prior to createConnectionUnlocked() use the test took
- // maxConnections * openTimeout seconds to complete.
- // With createConnectionUnlocked() it takes ~ 1 * openTimeout seconds.
- timeTaken := time.Since(startedAt)
- if timeTaken > openTimeout+1*time.Second {
- t.Fatalf("Expected all Acquire() to run in parallel and take about %v, instead it took '%v'", openTimeout, timeTaken)
- }
-
-}
-
-func TestAcquireTimeoutSanity(t *testing.T) {
- t.Parallel()
-
- config := pgx.ConnPoolConfig{
- ConnConfig: *defaultConnConfig,
- MaxConnections: 1,
- }
-
- // case 1: default 0 value
- pool, err := pgx.NewConnPool(config)
- if err != nil {
- t.Fatalf("Expected NewConnPool with default config.AcquireTimeout not to fail, instead it failed with '%v'", err)
- }
- pool.Close()
-
- // case 2: negative value
- config.AcquireTimeout = -1 * time.Second
- _, err = pgx.NewConnPool(config)
- if err == nil {
- t.Fatal("Expected NewConnPool with negative config.AcquireTimeout to fail, instead it did not")
- }
-
- // case 3: positive value
- config.AcquireTimeout = 1 * time.Second
- pool, err = pgx.NewConnPool(config)
- if err != nil {
- t.Fatalf("Expected NewConnPool with positive config.AcquireTimeout not to fail, instead it failed with '%v'", err)
- }
- defer pool.Close()
-}
-
-func TestPoolWithAcquireTimeoutSet(t *testing.T) {
- t.Parallel()
-
- connAllocTimeout := 2 * time.Second
- config := pgx.ConnPoolConfig{
- ConnConfig: *defaultConnConfig,
- MaxConnections: 1,
- AcquireTimeout: connAllocTimeout,
- }
-
- pool, err := pgx.NewConnPool(config)
- if err != nil {
- t.Fatalf("Unable to create connection pool: %v", err)
- }
- defer pool.Close()
-
- // Consume all connections ...
- allConnections := acquireAllConnections(t, pool, config.MaxConnections)
- defer releaseAllConnections(pool, allConnections)
-
- // ... then try to consume 1 more. It should fail after a short timeout.
- _, timeTaken, err := acquireWithTimeTaken(pool)
-
- if err == nil || err != pgx.ErrAcquireTimeout {
- t.Fatalf("Expected error to be pgx.ErrAcquireTimeout, instead it was '%v'", err)
- }
- if timeTaken < connAllocTimeout {
- t.Fatalf("Expected connection allocation time to be at least %v, instead it was '%v'", connAllocTimeout, timeTaken)
- }
-}
-
-func TestPoolWithoutAcquireTimeoutSet(t *testing.T) {
- t.Parallel()
-
- maxConnections := 1
- pool := createConnPool(t, maxConnections)
- defer pool.Close()
-
- // Consume all connections ...
- allConnections := acquireAllConnections(t, pool, maxConnections)
-
- // ... then try to consume 1 more. It should hang forever.
- // To unblock it we release the previously taken connection in a goroutine.
- stopDeadWaitTimeout := 5 * time.Second
- timer := time.AfterFunc(stopDeadWaitTimeout, func() {
- releaseAllConnections(pool, allConnections)
- })
- defer timer.Stop()
-
- conn, timeTaken, err := acquireWithTimeTaken(pool)
- if err == nil {
- pool.Release(conn)
- } else {
- t.Fatalf("Expected error to be nil, instead it was '%v'", err)
- }
- if timeTaken < stopDeadWaitTimeout {
- t.Fatalf("Expected connection allocation time to be at least %v, instead it was '%v'", stopDeadWaitTimeout, timeTaken)
- }
-}
-
-func TestPoolReleaseWithTransactions(t *testing.T) {
- t.Parallel()
-
- pool := createConnPool(t, 2)
- defer pool.Close()
-
- conn, err := pool.Acquire()
- if err != nil {
- t.Fatalf("Unable to acquire connection: %v", err)
- }
- mustExec(t, conn, "begin")
- if _, err = conn.Exec("selct"); err == nil {
- t.Fatal("Did not receive expected error")
- }
-
- if conn.TxStatus != 'E' {
- t.Fatalf("Expected TxStatus to be 'E', instead it was '%c'", conn.TxStatus)
- }
-
- pool.Release(conn)
-
- if conn.TxStatus != 'I' {
- t.Fatalf("Expected release to rollback errored transaction, but it did not: '%c'", conn.TxStatus)
- }
-
- conn, err = pool.Acquire()
- if err != nil {
- t.Fatalf("Unable to acquire connection: %v", err)
- }
- mustExec(t, conn, "begin")
- if conn.TxStatus != 'T' {
- t.Fatalf("Expected txStatus to be 'T', instead it was '%c'", conn.TxStatus)
- }
-
- pool.Release(conn)
-
- if conn.TxStatus != 'I' {
- t.Fatalf("Expected release to rollback uncommitted transaction, but it did not: '%c'", conn.TxStatus)
- }
-}
-
-func TestPoolAcquireAndReleaseCycleAutoConnect(t *testing.T) {
- t.Parallel()
-
- maxConnections := 3
- pool := createConnPool(t, maxConnections)
- defer pool.Close()
-
- doSomething := func() {
- c, err := pool.Acquire()
- if err != nil {
- t.Fatalf("Unable to Acquire: %v", err)
- }
- rows, _ := c.Query("select 1, pg_sleep(0.02)")
- rows.Close()
- pool.Release(c)
- }
-
- for i := 0; i < 10; i++ {
- doSomething()
- }
-
- stat := pool.Stat()
- if stat.CurrentConnections != 1 {
- t.Fatalf("Pool shouldn't have established more connections when no contention: %v", stat.CurrentConnections)
- }
-
- var wg sync.WaitGroup
- for i := 0; i < 10; i++ {
- wg.Add(1)
- go func() {
- defer wg.Done()
- doSomething()
- }()
- }
- wg.Wait()
-
- stat = pool.Stat()
- if stat.CurrentConnections != stat.MaxConnections {
- t.Fatalf("Pool should have used all possible connections: %v", stat.CurrentConnections)
- }
-}
-
-func TestPoolReleaseDiscardsDeadConnections(t *testing.T) {
- t.Parallel()
-
- // Run timing sensitive test many times
- for i := 0; i < 50; i++ {
- func() {
- maxConnections := 3
- pool := createConnPool(t, maxConnections)
- defer pool.Close()
-
- var c1, c2 *pgx.Conn
- var err error
- var stat pgx.ConnPoolStat
-
- if c1, err = pool.Acquire(); err != nil {
- t.Fatalf("Unexpected error acquiring connection: %v", err)
- }
- defer func() {
- if c1 != nil {
- pool.Release(c1)
- }
- }()
-
- if c2, err = pool.Acquire(); err != nil {
- t.Fatalf("Unexpected error acquiring connection: %v", err)
- }
- defer func() {
- if c2 != nil {
- pool.Release(c2)
- }
- }()
-
- if _, err = c2.Exec("select pg_terminate_backend($1)", c1.Pid); err != nil {
- t.Fatalf("Unable to kill backend PostgreSQL process: %v", err)
- }
-
- // do something with the connection so it knows it's dead
- rows, _ := c1.Query("select 1")
- rows.Close()
- if rows.Err() == nil {
- t.Fatal("Expected error but none occurred")
- }
-
- if c1.IsAlive() {
- t.Fatal("Expected connection to be dead but it wasn't")
- }
-
- stat = pool.Stat()
- if stat.CurrentConnections != 2 {
- t.Fatalf("Unexpected CurrentConnections: %v", stat.CurrentConnections)
- }
- if stat.AvailableConnections != 0 {
- t.Fatalf("Unexpected AvailableConnections: %v", stat.CurrentConnections)
- }
-
- pool.Release(c1)
- c1 = nil // so it doesn't get released again by the defer
-
- stat = pool.Stat()
- if stat.CurrentConnections != 1 {
- t.Fatalf("Unexpected CurrentConnections: %v", stat.CurrentConnections)
- }
- if stat.AvailableConnections != 0 {
- t.Fatalf("Unexpected AvailableConnections: %v", stat.CurrentConnections)
- }
- }()
- }
-}
-
-func TestConnPoolResetClosesCheckedOutConnectionsOnRelease(t *testing.T) {
- t.Parallel()
-
- pool := createConnPool(t, 5)
- defer pool.Close()
-
- inProgressRows := []*pgx.Rows{}
- var inProgressPIDs []int32
-
- // Start some queries and reset pool while they are in progress
- for i := 0; i < 10; i++ {
- rows, err := pool.Query("select pg_backend_pid() union all select 1 union all select 2")
- if err != nil {
- t.Fatal(err)
- }
-
- rows.Next()
- var pid int32
- rows.Scan(&pid)
- inProgressPIDs = append(inProgressPIDs, pid)
-
- inProgressRows = append(inProgressRows, rows)
- pool.Reset()
- }
-
- // Check that the queries are completed
- for _, rows := range inProgressRows {
- var expectedN int32
-
- for rows.Next() {
- expectedN++
- var n int32
- err := rows.Scan(&n)
- if err != nil {
- t.Fatal(err)
- }
- if expectedN != n {
- t.Fatalf("Expected n to be %d, but it was %d", expectedN, n)
- }
- }
-
- if err := rows.Err(); err != nil {
- t.Fatal(err)
- }
- }
-
- // pool should be in fresh state due to previous reset
- stats := pool.Stat()
- if stats.CurrentConnections != 0 || stats.AvailableConnections != 0 {
- t.Fatalf("Unexpected connection pool stats: %v", stats)
- }
-
- var connCount int
- err := pool.QueryRow("select count(*) from pg_stat_activity where pid = any($1::int4[])", inProgressPIDs).Scan(&connCount)
- if err != nil {
- t.Fatal(err)
- }
- if connCount != 0 {
- t.Fatalf("%d connections not closed", connCount)
- }
-}
-
-func TestConnPoolResetClosesCheckedInConnections(t *testing.T) {
- t.Parallel()
-
- pool := createConnPool(t, 5)
- defer pool.Close()
-
- inProgressRows := []*pgx.Rows{}
- var inProgressPIDs []int32
-
- // Start some queries and reset pool while they are in progress
- for i := 0; i < 5; i++ {
- rows, err := pool.Query("select pg_backend_pid()")
- if err != nil {
- t.Fatal(err)
- }
-
- inProgressRows = append(inProgressRows, rows)
- }
-
- // Check that the queries are completed
- for _, rows := range inProgressRows {
- for rows.Next() {
- var pid int32
- err := rows.Scan(&pid)
- if err != nil {
- t.Fatal(err)
- }
- inProgressPIDs = append(inProgressPIDs, pid)
-
- }
-
- if err := rows.Err(); err != nil {
- t.Fatal(err)
- }
- }
-
- // Ensure pool is fully connected and available
- stats := pool.Stat()
- if stats.CurrentConnections != 5 || stats.AvailableConnections != 5 {
- t.Fatalf("Unexpected connection pool stats: %v", stats)
- }
-
- pool.Reset()
-
- // Pool should be empty after reset
- stats = pool.Stat()
- if stats.CurrentConnections != 0 || stats.AvailableConnections != 0 {
- t.Fatalf("Unexpected connection pool stats: %v", stats)
- }
-
- var connCount int
- err := pool.QueryRow("select count(*) from pg_stat_activity where pid = any($1::int4[])", inProgressPIDs).Scan(&connCount)
- if err != nil {
- t.Fatal(err)
- }
- if connCount != 0 {
- t.Fatalf("%d connections not closed", connCount)
- }
-}
-
-func TestConnPoolTransaction(t *testing.T) {
- t.Parallel()
-
- pool := createConnPool(t, 2)
- defer pool.Close()
-
- stats := pool.Stat()
- if stats.CurrentConnections != 1 || stats.AvailableConnections != 1 {
- t.Fatalf("Unexpected connection pool stats: %v", stats)
- }
-
- tx, err := pool.Begin()
- if err != nil {
- t.Fatalf("pool.Begin failed: %v", err)
- }
- defer tx.Rollback()
-
- var n int32
- err = tx.QueryRow("select 40+$1", 2).Scan(&n)
- if err != nil {
- t.Fatalf("tx.QueryRow Scan failed: %v", err)
- }
- if n != 42 {
- t.Errorf("Expected 42, got %d", n)
- }
-
- stats = pool.Stat()
- if stats.CurrentConnections != 1 || stats.AvailableConnections != 0 {
- t.Fatalf("Unexpected connection pool stats: %v", stats)
- }
-
- err = tx.Rollback()
- if err != nil {
- t.Fatalf("tx.Rollback failed: %v", err)
- }
-
- stats = pool.Stat()
- if stats.CurrentConnections != 1 || stats.AvailableConnections != 1 {
- t.Fatalf("Unexpected connection pool stats: %v", stats)
- }
-}
-
-func TestConnPoolTransactionIso(t *testing.T) {
- t.Parallel()
-
- pool := createConnPool(t, 2)
- defer pool.Close()
-
- tx, err := pool.BeginIso(pgx.Serializable)
- if err != nil {
- t.Fatalf("pool.Begin failed: %v", err)
- }
- defer tx.Rollback()
-
- var level string
- err = tx.QueryRow("select current_setting('transaction_isolation')").Scan(&level)
- if err != nil {
- t.Fatalf("tx.QueryRow failed: %v", level)
- }
-
- if level != "serializable" {
- t.Errorf("Expected to be in isolation level %v but was %v", "serializable", level)
- }
-}
-
-func TestConnPoolBeginRetry(t *testing.T) {
- t.Parallel()
-
- // Run timing sensitive test many times
- for i := 0; i < 50; i++ {
- func() {
- pool := createConnPool(t, 2)
- defer pool.Close()
-
- killerConn, err := pool.Acquire()
- if err != nil {
- t.Fatal(err)
- }
- defer pool.Release(killerConn)
-
- victimConn, err := pool.Acquire()
- if err != nil {
- t.Fatal(err)
- }
- pool.Release(victimConn)
-
- // Terminate connection that was released to pool
- if _, err = killerConn.Exec("select pg_terminate_backend($1)", victimConn.Pid); err != nil {
- t.Fatalf("Unable to kill backend PostgreSQL process: %v", err)
- }
-
- // Since victimConn is the only available connection in the pool, pool.Begin should
- // try to use it, fail, and allocate another connection
- tx, err := pool.Begin()
- if err != nil {
- t.Fatalf("pool.Begin failed: %v", err)
- }
- defer tx.Rollback()
-
- var txPid int32
- err = tx.QueryRow("select pg_backend_pid()").Scan(&txPid)
- if err != nil {
- t.Fatalf("tx.QueryRow Scan failed: %v", err)
- }
- if txPid == victimConn.Pid {
- t.Error("Expected txPid to defer from killed conn pid, but it didn't")
- }
- }()
- }
-}
-
-func TestConnPoolQuery(t *testing.T) {
- t.Parallel()
-
- pool := createConnPool(t, 2)
- defer pool.Close()
-
- var sum, rowCount int32
-
- rows, err := pool.Query("select generate_series(1,$1)", 10)
- if err != nil {
- t.Fatalf("pool.Query failed: %v", err)
- }
-
- stats := pool.Stat()
- if stats.CurrentConnections != 1 || stats.AvailableConnections != 0 {
- t.Fatalf("Unexpected connection pool stats: %v", stats)
- }
-
- for rows.Next() {
- var n int32
- rows.Scan(&n)
- sum += n
- rowCount++
- }
-
- if rows.Err() != nil {
- t.Fatalf("conn.Query failed: %v", err)
- }
-
- if rowCount != 10 {
- t.Error("Select called onDataRow wrong number of times")
- }
- if sum != 55 {
- t.Error("Wrong values returned")
- }
-
- stats = pool.Stat()
- if stats.CurrentConnections != 1 || stats.AvailableConnections != 1 {
- t.Fatalf("Unexpected connection pool stats: %v", stats)
- }
-}
-
-func TestConnPoolQueryConcurrentLoad(t *testing.T) {
- t.Parallel()
-
- pool := createConnPool(t, 10)
- defer pool.Close()
-
- n := 100
- done := make(chan bool)
-
- for i := 0; i < n; i++ {
- go func() {
- defer func() { done <- true }()
- var rowCount int32
-
- rows, err := pool.Query("select generate_series(1,$1)", 1000)
- if err != nil {
- t.Fatalf("pool.Query failed: %v", err)
- }
- defer rows.Close()
-
- for rows.Next() {
- var n int32
- err = rows.Scan(&n)
- if err != nil {
- t.Fatalf("rows.Scan failed: %v", err)
- }
- if n != rowCount+1 {
- t.Fatalf("Expected n to be %d, but it was %d", rowCount+1, n)
- }
- rowCount++
- }
-
- if rows.Err() != nil {
- t.Fatalf("conn.Query failed: %v", rows.Err())
- }
-
- if rowCount != 1000 {
- t.Error("Select called onDataRow wrong number of times")
- }
-
- _, err = pool.Exec("--;")
- if err != nil {
- t.Fatalf("pool.Exec failed: %v", err)
- }
- }()
- }
-
- for i := 0; i < n; i++ {
- <-done
- }
-}
-
-func TestConnPoolQueryRow(t *testing.T) {
- t.Parallel()
-
- pool := createConnPool(t, 2)
- defer pool.Close()
-
- var n int32
- err := pool.QueryRow("select 40+$1", 2).Scan(&n)
- if err != nil {
- t.Fatalf("pool.QueryRow Scan failed: %v", err)
- }
-
- if n != 42 {
- t.Errorf("Expected 42, got %d", n)
- }
-
- stats := pool.Stat()
- if stats.CurrentConnections != 1 || stats.AvailableConnections != 1 {
- t.Fatalf("Unexpected connection pool stats: %v", stats)
- }
-}
-
-func TestConnPoolExec(t *testing.T) {
- t.Parallel()
-
- pool := createConnPool(t, 2)
- defer pool.Close()
-
- results, err := pool.Exec("create temporary table foo(id integer primary key);")
- if err != nil {
- t.Fatalf("Unexpected error from pool.Exec: %v", err)
- }
- if results != "CREATE TABLE" {
- t.Errorf("Unexpected results from Exec: %v", results)
- }
-
- results, err = pool.Exec("insert into foo(id) values($1)", 1)
- if err != nil {
- t.Fatalf("Unexpected error from pool.Exec: %v", err)
- }
- if results != "INSERT 0 1" {
- t.Errorf("Unexpected results from Exec: %v", results)
- }
-
- results, err = pool.Exec("drop table foo;")
- if err != nil {
- t.Fatalf("Unexpected error from pool.Exec: %v", err)
- }
- if results != "DROP TABLE" {
- t.Errorf("Unexpected results from Exec: %v", results)
- }
-}
-
-func TestConnPoolPrepare(t *testing.T) {
- t.Parallel()
-
- pool := createConnPool(t, 2)
- defer pool.Close()
-
- _, err := pool.Prepare("test", "select $1::varchar")
- if err != nil {
- t.Fatalf("Unable to prepare statement: %v", err)
- }
-
- var s string
- err = pool.QueryRow("test", "hello").Scan(&s)
- if err != nil {
- t.Errorf("Executing prepared statement failed: %v", err)
- }
-
- if s != "hello" {
- t.Errorf("Prepared statement did not return expected value: %v", s)
- }
-
- err = pool.Deallocate("test")
- if err != nil {
- t.Errorf("Deallocate failed: %v", err)
- }
-
- err = pool.QueryRow("test", "hello").Scan(&s)
- if err, ok := err.(pgx.PgError); !(ok && err.Code == "42601") {
- t.Errorf("Expected error calling deallocated prepared statement, but got: %v", err)
- }
-}
-
-func TestConnPoolPrepareDeallocatePrepare(t *testing.T) {
- t.Parallel()
-
- pool := createConnPool(t, 2)
- defer pool.Close()
-
- _, err := pool.Prepare("test", "select $1::varchar")
- if err != nil {
- t.Fatalf("Unable to prepare statement: %v", err)
- }
- err = pool.Deallocate("test")
- if err != nil {
- t.Fatalf("Unable to deallocate statement: %v", err)
- }
- _, err = pool.Prepare("test", "select $1::varchar")
- if err != nil {
- t.Fatalf("Unable to prepare statement: %v", err)
- }
-
- var s string
- err = pool.QueryRow("test", "hello").Scan(&s)
- if err != nil {
- t.Fatalf("Executing prepared statement failed: %v", err)
- }
-
- if s != "hello" {
- t.Errorf("Prepared statement did not return expected value: %v", s)
- }
-}
-
-func TestConnPoolPrepareWhenConnIsAlreadyAcquired(t *testing.T) {
- t.Parallel()
-
- pool := createConnPool(t, 2)
- defer pool.Close()
-
- testPreparedStatement := func(db queryRower, desc string) {
- var s string
- err := db.QueryRow("test", "hello").Scan(&s)
- if err != nil {
- t.Fatalf("%s. Executing prepared statement failed: %v", desc, err)
- }
-
- if s != "hello" {
- t.Fatalf("%s. Prepared statement did not return expected value: %v", desc, s)
- }
- }
-
- newReleaseOnce := func(c *pgx.Conn) func() {
- var once sync.Once
- return func() {
- once.Do(func() { pool.Release(c) })
- }
- }
-
- c1, err := pool.Acquire()
- if err != nil {
- t.Fatalf("Unable to acquire connection: %v", err)
- }
- c1Release := newReleaseOnce(c1)
- defer c1Release()
-
- _, err = pool.Prepare("test", "select $1::varchar")
- if err != nil {
- t.Fatalf("Unable to prepare statement: %v", err)
- }
-
- testPreparedStatement(pool, "pool")
-
- c1Release()
-
- c2, err := pool.Acquire()
- if err != nil {
- t.Fatalf("Unable to acquire connection: %v", err)
- }
- c2Release := newReleaseOnce(c2)
- defer c2Release()
-
- // This conn will not be available and will be connection at this point
- c3, err := pool.Acquire()
- if err != nil {
- t.Fatalf("Unable to acquire connection: %v", err)
- }
- c3Release := newReleaseOnce(c3)
- defer c3Release()
-
- testPreparedStatement(c2, "c2")
- testPreparedStatement(c3, "c3")
-
- c2Release()
- c3Release()
-
- err = pool.Deallocate("test")
- if err != nil {
- t.Errorf("Deallocate failed: %v", err)
- }
-
- var s string
- err = pool.QueryRow("test", "hello").Scan(&s)
- if err, ok := err.(pgx.PgError); !(ok && err.Code == "42601") {
- t.Errorf("Expected error calling deallocated prepared statement, but got: %v", err)
- }
-}
diff --git a/vendor/github.com/jackc/pgx/conn_test.go b/vendor/github.com/jackc/pgx/conn_test.go
deleted file mode 100644
index cfb9956..0000000
--- a/vendor/github.com/jackc/pgx/conn_test.go
+++ /dev/null
@@ -1,1744 +0,0 @@
-package pgx_test
-
-import (
- "crypto/tls"
- "fmt"
- "net"
- "os"
- "reflect"
- "strconv"
- "strings"
- "sync"
- "testing"
- "time"
-
- "github.com/jackc/pgx"
-)
-
-func TestConnect(t *testing.T) {
- t.Parallel()
-
- conn, err := pgx.Connect(*defaultConnConfig)
- if err != nil {
- t.Fatalf("Unable to establish connection: %v", err)
- }
-
- if _, present := conn.RuntimeParams["server_version"]; !present {
- t.Error("Runtime parameters not stored")
- }
-
- if conn.Pid == 0 {
- t.Error("Backend PID not stored")
- }
-
- if conn.SecretKey == 0 {
- t.Error("Backend secret key not stored")
- }
-
- var currentDB string
- err = conn.QueryRow("select current_database()").Scan(&currentDB)
- if err != nil {
- t.Fatalf("QueryRow Scan unexpectedly failed: %v", err)
- }
- if currentDB != defaultConnConfig.Database {
- t.Errorf("Did not connect to specified database (%v)", defaultConnConfig.Database)
- }
-
- var user string
- err = conn.QueryRow("select current_user").Scan(&user)
- if err != nil {
- t.Fatalf("QueryRow Scan unexpectedly failed: %v", err)
- }
- if user != defaultConnConfig.User {
- t.Errorf("Did not connect as specified user (%v)", defaultConnConfig.User)
- }
-
- err = conn.Close()
- if err != nil {
- t.Fatal("Unable to close connection")
- }
-}
-
-func TestConnectWithUnixSocketDirectory(t *testing.T) {
- t.Parallel()
-
- // /.s.PGSQL.5432
- if unixSocketConnConfig == nil {
- t.Skip("Skipping due to undefined unixSocketConnConfig")
- }
-
- conn, err := pgx.Connect(*unixSocketConnConfig)
- if err != nil {
- t.Fatalf("Unable to establish connection: %v", err)
- }
-
- err = conn.Close()
- if err != nil {
- t.Fatal("Unable to close connection")
- }
-}
-
-func TestConnectWithUnixSocketFile(t *testing.T) {
- t.Parallel()
-
- if unixSocketConnConfig == nil {
- t.Skip("Skipping due to undefined unixSocketConnConfig")
- }
-
- connParams := *unixSocketConnConfig
- connParams.Host = connParams.Host + "/.s.PGSQL.5432"
- conn, err := pgx.Connect(connParams)
- if err != nil {
- t.Fatalf("Unable to establish connection: %v", err)
- }
-
- err = conn.Close()
- if err != nil {
- t.Fatal("Unable to close connection")
- }
-}
-
-func TestConnectWithTcp(t *testing.T) {
- t.Parallel()
-
- if tcpConnConfig == nil {
- t.Skip("Skipping due to undefined tcpConnConfig")
- }
-
- conn, err := pgx.Connect(*tcpConnConfig)
- if err != nil {
- t.Fatal("Unable to establish connection: " + err.Error())
- }
-
- err = conn.Close()
- if err != nil {
- t.Fatal("Unable to close connection")
- }
-}
-
-func TestConnectWithTLS(t *testing.T) {
- t.Parallel()
-
- if tlsConnConfig == nil {
- t.Skip("Skipping due to undefined tlsConnConfig")
- }
-
- conn, err := pgx.Connect(*tlsConnConfig)
- if err != nil {
- t.Fatal("Unable to establish connection: " + err.Error())
- }
-
- err = conn.Close()
- if err != nil {
- t.Fatal("Unable to close connection")
- }
-}
-
-func TestConnectWithInvalidUser(t *testing.T) {
- t.Parallel()
-
- if invalidUserConnConfig == nil {
- t.Skip("Skipping due to undefined invalidUserConnConfig")
- }
-
- _, err := pgx.Connect(*invalidUserConnConfig)
- pgErr, ok := err.(pgx.PgError)
- if !ok {
- t.Fatalf("Expected to receive a PgError with code 28000, instead received: %v", err)
- }
- if pgErr.Code != "28000" && pgErr.Code != "28P01" {
- t.Fatalf("Expected to receive a PgError with code 28000 or 28P01, instead received: %v", pgErr)
- }
-}
-
-func TestConnectWithPlainTextPassword(t *testing.T) {
- t.Parallel()
-
- if plainPasswordConnConfig == nil {
- t.Skip("Skipping due to undefined plainPasswordConnConfig")
- }
-
- conn, err := pgx.Connect(*plainPasswordConnConfig)
- if err != nil {
- t.Fatal("Unable to establish connection: " + err.Error())
- }
-
- err = conn.Close()
- if err != nil {
- t.Fatal("Unable to close connection")
- }
-}
-
-func TestConnectWithMD5Password(t *testing.T) {
- t.Parallel()
-
- if md5ConnConfig == nil {
- t.Skip("Skipping due to undefined md5ConnConfig")
- }
-
- conn, err := pgx.Connect(*md5ConnConfig)
- if err != nil {
- t.Fatal("Unable to establish connection: " + err.Error())
- }
-
- err = conn.Close()
- if err != nil {
- t.Fatal("Unable to close connection")
- }
-}
-
-func TestConnectWithTLSFallback(t *testing.T) {
- t.Parallel()
-
- if tlsConnConfig == nil {
- t.Skip("Skipping due to undefined tlsConnConfig")
- }
-
- connConfig := *tlsConnConfig
- connConfig.TLSConfig = &tls.Config{ServerName: "bogus.local"} // bogus ServerName should ensure certificate validation failure
-
- conn, err := pgx.Connect(connConfig)
- if err == nil {
- t.Fatal("Expected failed connection, but succeeded")
- }
-
- connConfig.UseFallbackTLS = true
- connConfig.FallbackTLSConfig = &tls.Config{InsecureSkipVerify: true}
-
- conn, err = pgx.Connect(connConfig)
- if err != nil {
- t.Fatal("Unable to establish connection: " + err.Error())
- }
-
- err = conn.Close()
- if err != nil {
- t.Fatal("Unable to close connection")
- }
-}
-
-func TestConnectWithConnectionRefused(t *testing.T) {
- t.Parallel()
-
- // Presumably nothing is listening on 127.0.0.1:1
- bad := *defaultConnConfig
- bad.Host = "127.0.0.1"
- bad.Port = 1
-
- _, err := pgx.Connect(bad)
- if err == nil {
- t.Fatal("Expected error establishing connection to bad port")
- }
-}
-
-func TestConnectCustomDialer(t *testing.T) {
- t.Parallel()
-
- if customDialerConnConfig == nil {
- t.Skip("Skipping due to undefined customDialerConnConfig")
- }
-
- dialled := false
- conf := *customDialerConnConfig
- conf.Dial = func(network, address string) (net.Conn, error) {
- dialled = true
- return net.Dial(network, address)
- }
-
- conn, err := pgx.Connect(conf)
- if err != nil {
- t.Fatalf("Unable to establish connection: %s", err)
- }
- if !dialled {
- t.Fatal("Connect did not use custom dialer")
- }
-
- err = conn.Close()
- if err != nil {
- t.Fatal("Unable to close connection")
- }
-}
-
-func TestConnectWithRuntimeParams(t *testing.T) {
- t.Parallel()
-
- connConfig := *defaultConnConfig
- connConfig.RuntimeParams = map[string]string{
- "application_name": "pgxtest",
- "search_path": "myschema",
- }
-
- conn, err := pgx.Connect(connConfig)
- if err != nil {
- t.Fatalf("Unable to establish connection: %v", err)
- }
- defer conn.Close()
-
- var s string
- err = conn.QueryRow("show application_name").Scan(&s)
- if err != nil {
- t.Fatalf("QueryRow Scan unexpectedly failed: %v", err)
- }
- if s != "pgxtest" {
- t.Errorf("Expected application_name to be %s, but it was %s", "pgxtest", s)
- }
-
- err = conn.QueryRow("show search_path").Scan(&s)
- if err != nil {
- t.Fatalf("QueryRow Scan unexpectedly failed: %v", err)
- }
- if s != "myschema" {
- t.Errorf("Expected search_path to be %s, but it was %s", "myschema", s)
- }
-}
-
-func TestParseURI(t *testing.T) {
- t.Parallel()
-
- tests := []struct {
- url string
- connParams pgx.ConnConfig
- }{
- {
- url: "postgres://jack:secret@localhost:5432/mydb?sslmode=prefer",
- connParams: pgx.ConnConfig{
- User: "jack",
- Password: "secret",
- Host: "localhost",
- Port: 5432,
- Database: "mydb",
- TLSConfig: &tls.Config{
- InsecureSkipVerify: true,
- },
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{},
- },
- },
- {
- url: "postgres://jack:secret@localhost:5432/mydb?sslmode=disable",
- connParams: pgx.ConnConfig{
- User: "jack",
- Password: "secret",
- Host: "localhost",
- Port: 5432,
- Database: "mydb",
- TLSConfig: nil,
- UseFallbackTLS: false,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{},
- },
- },
- {
- url: "postgres://jack:secret@localhost:5432/mydb",
- connParams: pgx.ConnConfig{
- User: "jack",
- Password: "secret",
- Host: "localhost",
- Port: 5432,
- Database: "mydb",
- TLSConfig: &tls.Config{
- InsecureSkipVerify: true,
- },
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{},
- },
- },
- {
- url: "postgresql://jack:secret@localhost:5432/mydb",
- connParams: pgx.ConnConfig{
- User: "jack",
- Password: "secret",
- Host: "localhost",
- Port: 5432,
- Database: "mydb",
- TLSConfig: &tls.Config{
- InsecureSkipVerify: true,
- },
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{},
- },
- },
- {
- url: "postgres://jack@localhost:5432/mydb",
- connParams: pgx.ConnConfig{
- User: "jack",
- Host: "localhost",
- Port: 5432,
- Database: "mydb",
- TLSConfig: &tls.Config{
- InsecureSkipVerify: true,
- },
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{},
- },
- },
- {
- url: "postgres://jack@localhost/mydb",
- connParams: pgx.ConnConfig{
- User: "jack",
- Host: "localhost",
- Database: "mydb",
- TLSConfig: &tls.Config{
- InsecureSkipVerify: true,
- },
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{},
- },
- },
- {
- url: "postgres://jack@localhost/mydb?application_name=pgxtest&search_path=myschema",
- connParams: pgx.ConnConfig{
- User: "jack",
- Host: "localhost",
- Database: "mydb",
- TLSConfig: &tls.Config{
- InsecureSkipVerify: true,
- },
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{
- "application_name": "pgxtest",
- "search_path": "myschema",
- },
- },
- },
- }
-
- for i, tt := range tests {
- connParams, err := pgx.ParseURI(tt.url)
- if err != nil {
- t.Errorf("%d. Unexpected error from pgx.ParseURL(%q) => %v", i, tt.url, err)
- continue
- }
-
- if !reflect.DeepEqual(connParams, tt.connParams) {
- t.Errorf("%d. expected %#v got %#v", i, tt.connParams, connParams)
- }
- }
-}
-
-func TestParseDSN(t *testing.T) {
- t.Parallel()
-
- tests := []struct {
- url string
- connParams pgx.ConnConfig
- }{
- {
- url: "user=jack password=secret host=localhost port=5432 dbname=mydb sslmode=disable",
- connParams: pgx.ConnConfig{
- User: "jack",
- Password: "secret",
- Host: "localhost",
- Port: 5432,
- Database: "mydb",
- RuntimeParams: map[string]string{},
- },
- },
- {
- url: "user=jack password=secret host=localhost port=5432 dbname=mydb sslmode=prefer",
- connParams: pgx.ConnConfig{
- User: "jack",
- Password: "secret",
- Host: "localhost",
- Port: 5432,
- Database: "mydb",
- TLSConfig: &tls.Config{
- InsecureSkipVerify: true,
- },
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{},
- },
- },
- {
- url: "user=jack password=secret host=localhost port=5432 dbname=mydb",
- connParams: pgx.ConnConfig{
- User: "jack",
- Password: "secret",
- Host: "localhost",
- Port: 5432,
- Database: "mydb",
- TLSConfig: &tls.Config{
- InsecureSkipVerify: true,
- },
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{},
- },
- },
- {
- url: "user=jack host=localhost port=5432 dbname=mydb",
- connParams: pgx.ConnConfig{
- User: "jack",
- Host: "localhost",
- Port: 5432,
- Database: "mydb",
- TLSConfig: &tls.Config{
- InsecureSkipVerify: true,
- },
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{},
- },
- },
- {
- url: "user=jack host=localhost dbname=mydb",
- connParams: pgx.ConnConfig{
- User: "jack",
- Host: "localhost",
- Database: "mydb",
- TLSConfig: &tls.Config{
- InsecureSkipVerify: true,
- },
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{},
- },
- },
- {
- url: "user=jack host=localhost dbname=mydb application_name=pgxtest search_path=myschema",
- connParams: pgx.ConnConfig{
- User: "jack",
- Host: "localhost",
- Database: "mydb",
- TLSConfig: &tls.Config{
- InsecureSkipVerify: true,
- },
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{
- "application_name": "pgxtest",
- "search_path": "myschema",
- },
- },
- },
- }
-
- for i, tt := range tests {
- connParams, err := pgx.ParseDSN(tt.url)
- if err != nil {
- t.Errorf("%d. Unexpected error from pgx.ParseDSN(%q) => %v", i, tt.url, err)
- continue
- }
-
- if !reflect.DeepEqual(connParams, tt.connParams) {
- t.Errorf("%d. expected %#v got %#v", i, tt.connParams, connParams)
- }
- }
-}
-
-func TestParseConnectionString(t *testing.T) {
- t.Parallel()
-
- tests := []struct {
- url string
- connParams pgx.ConnConfig
- }{
- {
- url: "postgres://jack:secret@localhost:5432/mydb?sslmode=prefer",
- connParams: pgx.ConnConfig{
- User: "jack",
- Password: "secret",
- Host: "localhost",
- Port: 5432,
- Database: "mydb",
- TLSConfig: &tls.Config{
- InsecureSkipVerify: true,
- },
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{},
- },
- },
- {
- url: "postgres://jack:secret@localhost:5432/mydb?sslmode=disable",
- connParams: pgx.ConnConfig{
- User: "jack",
- Password: "secret",
- Host: "localhost",
- Port: 5432,
- Database: "mydb",
- TLSConfig: nil,
- UseFallbackTLS: false,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{},
- },
- },
- {
- url: "postgres://jack:secret@localhost:5432/mydb",
- connParams: pgx.ConnConfig{
- User: "jack",
- Password: "secret",
- Host: "localhost",
- Port: 5432,
- Database: "mydb",
- TLSConfig: &tls.Config{
- InsecureSkipVerify: true,
- },
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{},
- },
- },
- {
- url: "postgresql://jack:secret@localhost:5432/mydb",
- connParams: pgx.ConnConfig{
- User: "jack",
- Password: "secret",
- Host: "localhost",
- Port: 5432,
- Database: "mydb",
- TLSConfig: &tls.Config{
- InsecureSkipVerify: true,
- },
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{},
- },
- },
- {
- url: "postgres://jack@localhost:5432/mydb",
- connParams: pgx.ConnConfig{
- User: "jack",
- Host: "localhost",
- Port: 5432,
- Database: "mydb",
- TLSConfig: &tls.Config{
- InsecureSkipVerify: true,
- },
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{},
- },
- },
- {
- url: "postgres://jack@localhost/mydb",
- connParams: pgx.ConnConfig{
- User: "jack",
- Host: "localhost",
- Database: "mydb",
- TLSConfig: &tls.Config{
- InsecureSkipVerify: true,
- },
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{},
- },
- },
- {
- url: "postgres://jack@localhost/mydb?application_name=pgxtest&search_path=myschema",
- connParams: pgx.ConnConfig{
- User: "jack",
- Host: "localhost",
- Database: "mydb",
- TLSConfig: &tls.Config{
- InsecureSkipVerify: true,
- },
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{
- "application_name": "pgxtest",
- "search_path": "myschema",
- },
- },
- },
- {
- url: "user=jack password=secret host=localhost port=5432 dbname=mydb sslmode=disable",
- connParams: pgx.ConnConfig{
- User: "jack",
- Password: "secret",
- Host: "localhost",
- Port: 5432,
- Database: "mydb",
- RuntimeParams: map[string]string{},
- },
- },
- {
- url: "user=jack password=secret host=localhost port=5432 dbname=mydb sslmode=prefer",
- connParams: pgx.ConnConfig{
- User: "jack",
- Password: "secret",
- Host: "localhost",
- Port: 5432,
- Database: "mydb",
- TLSConfig: &tls.Config{
- InsecureSkipVerify: true,
- },
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{},
- },
- },
- {
- url: "user=jack password=secret host=localhost port=5432 dbname=mydb",
- connParams: pgx.ConnConfig{
- User: "jack",
- Password: "secret",
- Host: "localhost",
- Port: 5432,
- Database: "mydb",
- TLSConfig: &tls.Config{
- InsecureSkipVerify: true,
- },
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{},
- },
- },
- {
- url: "user=jack host=localhost port=5432 dbname=mydb",
- connParams: pgx.ConnConfig{
- User: "jack",
- Host: "localhost",
- Port: 5432,
- Database: "mydb",
- TLSConfig: &tls.Config{
- InsecureSkipVerify: true,
- },
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{},
- },
- },
- {
- url: "user=jack host=localhost dbname=mydb",
- connParams: pgx.ConnConfig{
- User: "jack",
- Host: "localhost",
- Database: "mydb",
- TLSConfig: &tls.Config{
- InsecureSkipVerify: true,
- },
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{},
- },
- },
- {
- url: "user=jack host=localhost dbname=mydb application_name=pgxtest search_path=myschema",
- connParams: pgx.ConnConfig{
- User: "jack",
- Host: "localhost",
- Database: "mydb",
- TLSConfig: &tls.Config{
- InsecureSkipVerify: true,
- },
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{
- "application_name": "pgxtest",
- "search_path": "myschema",
- },
- },
- },
- }
-
- for i, tt := range tests {
- connParams, err := pgx.ParseConnectionString(tt.url)
- if err != nil {
- t.Errorf("%d. Unexpected error from pgx.ParseDSN(%q) => %v", i, tt.url, err)
- continue
- }
-
- if !reflect.DeepEqual(connParams, tt.connParams) {
- t.Errorf("%d. expected %#v got %#v", i, tt.connParams, connParams)
- }
- }
-}
-
-func TestParseEnvLibpq(t *testing.T) {
- pgEnvvars := []string{"PGHOST", "PGPORT", "PGDATABASE", "PGUSER", "PGPASSWORD", "PGAPPNAME"}
-
- savedEnv := make(map[string]string)
- for _, n := range pgEnvvars {
- savedEnv[n] = os.Getenv(n)
- }
- defer func() {
- for k, v := range savedEnv {
- err := os.Setenv(k, v)
- if err != nil {
- t.Fatalf("Unable to restore environment: %v", err)
- }
- }
- }()
-
- tests := []struct {
- name string
- envvars map[string]string
- config pgx.ConnConfig
- }{
- {
- name: "No environment",
- envvars: map[string]string{},
- config: pgx.ConnConfig{
- TLSConfig: &tls.Config{InsecureSkipVerify: true},
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{},
- },
- },
- {
- name: "Normal PG vars",
- envvars: map[string]string{
- "PGHOST": "123.123.123.123",
- "PGPORT": "7777",
- "PGDATABASE": "foo",
- "PGUSER": "bar",
- "PGPASSWORD": "baz",
- },
- config: pgx.ConnConfig{
- Host: "123.123.123.123",
- Port: 7777,
- Database: "foo",
- User: "bar",
- Password: "baz",
- TLSConfig: &tls.Config{InsecureSkipVerify: true},
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{},
- },
- },
- {
- name: "application_name",
- envvars: map[string]string{
- "PGAPPNAME": "pgxtest",
- },
- config: pgx.ConnConfig{
- TLSConfig: &tls.Config{InsecureSkipVerify: true},
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{"application_name": "pgxtest"},
- },
- },
- {
- name: "sslmode=disable",
- envvars: map[string]string{
- "PGSSLMODE": "disable",
- },
- config: pgx.ConnConfig{
- TLSConfig: nil,
- UseFallbackTLS: false,
- RuntimeParams: map[string]string{},
- },
- },
- {
- name: "sslmode=allow",
- envvars: map[string]string{
- "PGSSLMODE": "allow",
- },
- config: pgx.ConnConfig{
- TLSConfig: nil,
- UseFallbackTLS: true,
- FallbackTLSConfig: &tls.Config{InsecureSkipVerify: true},
- RuntimeParams: map[string]string{},
- },
- },
- {
- name: "sslmode=prefer",
- envvars: map[string]string{
- "PGSSLMODE": "prefer",
- },
- config: pgx.ConnConfig{
- TLSConfig: &tls.Config{InsecureSkipVerify: true},
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{},
- },
- },
- {
- name: "sslmode=require",
- envvars: map[string]string{
- "PGSSLMODE": "require",
- },
- config: pgx.ConnConfig{
- TLSConfig: &tls.Config{},
- UseFallbackTLS: false,
- RuntimeParams: map[string]string{},
- },
- },
- {
- name: "sslmode=verify-ca",
- envvars: map[string]string{
- "PGSSLMODE": "verify-ca",
- },
- config: pgx.ConnConfig{
- TLSConfig: &tls.Config{},
- UseFallbackTLS: false,
- RuntimeParams: map[string]string{},
- },
- },
- {
- name: "sslmode=verify-full",
- envvars: map[string]string{
- "PGSSLMODE": "verify-full",
- },
- config: pgx.ConnConfig{
- TLSConfig: &tls.Config{},
- UseFallbackTLS: false,
- RuntimeParams: map[string]string{},
- },
- },
- {
- name: "sslmode=verify-full with host",
- envvars: map[string]string{
- "PGHOST": "pgx.example",
- "PGSSLMODE": "verify-full",
- },
- config: pgx.ConnConfig{
- Host: "pgx.example",
- TLSConfig: &tls.Config{
- ServerName: "pgx.example",
- },
- UseFallbackTLS: false,
- RuntimeParams: map[string]string{},
- },
- },
- }
-
- for _, tt := range tests {
- for _, n := range pgEnvvars {
- err := os.Unsetenv(n)
- if err != nil {
- t.Fatalf("%s: Unable to clear environment: %v", tt.name, err)
- }
- }
-
- for k, v := range tt.envvars {
- err := os.Setenv(k, v)
- if err != nil {
- t.Fatalf("%s: Unable to set environment: %v", tt.name, err)
- }
- }
-
- config, err := pgx.ParseEnvLibpq()
- if err != nil {
- t.Errorf("%s: Unexpected error from pgx.ParseLibpq() => %v", tt.name, err)
- continue
- }
-
- if config.Host != tt.config.Host {
- t.Errorf("%s: expected Host to be %v got %v", tt.name, tt.config.Host, config.Host)
- }
- if config.Port != tt.config.Port {
- t.Errorf("%s: expected Port to be %v got %v", tt.name, tt.config.Port, config.Port)
- }
- if config.Port != tt.config.Port {
- t.Errorf("%s: expected Port to be %v got %v", tt.name, tt.config.Port, config.Port)
- }
- if config.User != tt.config.User {
- t.Errorf("%s: expected User to be %v got %v", tt.name, tt.config.User, config.User)
- }
- if config.Password != tt.config.Password {
- t.Errorf("%s: expected Password to be %v got %v", tt.name, tt.config.Password, config.Password)
- }
-
- if !reflect.DeepEqual(config.RuntimeParams, tt.config.RuntimeParams) {
- t.Errorf("%s: expected RuntimeParams to be %#v got %#v", tt.name, tt.config.RuntimeParams, config.RuntimeParams)
- }
-
- tlsTests := []struct {
- name string
- expected *tls.Config
- actual *tls.Config
- }{
- {
- name: "TLSConfig",
- expected: tt.config.TLSConfig,
- actual: config.TLSConfig,
- },
- {
- name: "FallbackTLSConfig",
- expected: tt.config.FallbackTLSConfig,
- actual: config.FallbackTLSConfig,
- },
- }
- for _, tlsTest := range tlsTests {
- name := tlsTest.name
- expected := tlsTest.expected
- actual := tlsTest.actual
-
- if expected == nil && actual != nil {
- t.Errorf("%s / %s: expected nil, but it was set", tt.name, name)
- } else if expected != nil && actual == nil {
- t.Errorf("%s / %s: expected to be set, but got nil", tt.name, name)
- } else if expected != nil && actual != nil {
- if actual.InsecureSkipVerify != expected.InsecureSkipVerify {
- t.Errorf("%s / %s: expected InsecureSkipVerify to be %v got %v", tt.name, name, expected.InsecureSkipVerify, actual.InsecureSkipVerify)
- }
-
- if actual.ServerName != expected.ServerName {
- t.Errorf("%s / %s: expected ServerName to be %v got %v", tt.name, name, expected.ServerName, actual.ServerName)
- }
- }
- }
-
- if config.UseFallbackTLS != tt.config.UseFallbackTLS {
- t.Errorf("%s: expected UseFallbackTLS to be %v got %v", tt.name, tt.config.UseFallbackTLS, config.UseFallbackTLS)
- }
- }
-}
-
-func TestExec(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- if results := mustExec(t, conn, "create temporary table foo(id integer primary key);"); results != "CREATE TABLE" {
- t.Error("Unexpected results from Exec")
- }
-
- // Accept parameters
- if results := mustExec(t, conn, "insert into foo(id) values($1)", 1); results != "INSERT 0 1" {
- t.Errorf("Unexpected results from Exec: %v", results)
- }
-
- if results := mustExec(t, conn, "drop table foo;"); results != "DROP TABLE" {
- t.Error("Unexpected results from Exec")
- }
-
- // Multiple statements can be executed -- last command tag is returned
- if results := mustExec(t, conn, "create temporary table foo(id serial primary key); drop table foo;"); results != "DROP TABLE" {
- t.Error("Unexpected results from Exec")
- }
-
- // Can execute longer SQL strings than sharedBufferSize
- if results := mustExec(t, conn, strings.Repeat("select 42; ", 1000)); results != "SELECT 1" {
- t.Errorf("Unexpected results from Exec: %v", results)
- }
-
- // Exec no-op which does not return a command tag
- if results := mustExec(t, conn, "--;"); results != "" {
- t.Errorf("Unexpected results from Exec: %v", results)
- }
-}
-
-func TestExecFailure(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- if _, err := conn.Exec("selct;"); err == nil {
- t.Fatal("Expected SQL syntax error")
- }
-
- rows, _ := conn.Query("select 1")
- rows.Close()
- if rows.Err() != nil {
- t.Fatalf("Exec failure appears to have broken connection: %v", rows.Err())
- }
-}
-
-func TestPrepare(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- _, err := conn.Prepare("test", "select $1::varchar")
- if err != nil {
- t.Errorf("Unable to prepare statement: %v", err)
- return
- }
-
- var s string
- err = conn.QueryRow("test", "hello").Scan(&s)
- if err != nil {
- t.Errorf("Executing prepared statement failed: %v", err)
- }
-
- if s != "hello" {
- t.Errorf("Prepared statement did not return expected value: %v", s)
- }
-
- err = conn.Deallocate("test")
- if err != nil {
- t.Errorf("conn.Deallocate failed: %v", err)
- }
-
- // Create another prepared statement to ensure Deallocate left the connection
- // in a working state and that we can reuse the prepared statement name.
-
- _, err = conn.Prepare("test", "select $1::integer")
- if err != nil {
- t.Errorf("Unable to prepare statement: %v", err)
- return
- }
-
- var n int32
- err = conn.QueryRow("test", int32(1)).Scan(&n)
- if err != nil {
- t.Errorf("Executing prepared statement failed: %v", err)
- }
-
- if n != 1 {
- t.Errorf("Prepared statement did not return expected value: %v", s)
- }
-
- err = conn.Deallocate("test")
- if err != nil {
- t.Errorf("conn.Deallocate failed: %v", err)
- }
-}
-
-func TestPrepareBadSQLFailure(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- if _, err := conn.Prepare("badSQL", "select foo"); err == nil {
- t.Fatal("Prepare should have failed with syntax error")
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestPrepareQueryManyParameters(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- tests := []struct {
- count int
- succeed bool
- }{
- {
- count: 65534,
- succeed: true,
- },
- {
- count: 65535,
- succeed: true,
- },
- {
- count: 65536,
- succeed: false,
- },
- {
- count: 65537,
- succeed: false,
- },
- }
-
- for i, tt := range tests {
- params := make([]string, 0, tt.count)
- args := make([]interface{}, 0, tt.count)
- for j := 0; j < tt.count; j++ {
- params = append(params, fmt.Sprintf("($%d::text)", j+1))
- args = append(args, strconv.Itoa(j))
- }
-
- sql := "values" + strings.Join(params, ", ")
-
- psName := fmt.Sprintf("manyParams%d", i)
- _, err := conn.Prepare(psName, sql)
- if err != nil {
- if tt.succeed {
- t.Errorf("%d. %v", i, err)
- }
- continue
- }
- if !tt.succeed {
- t.Errorf("%d. Expected error but succeeded", i)
- continue
- }
-
- rows, err := conn.Query(psName, args...)
- if err != nil {
- t.Errorf("conn.Query failed: %v", err)
- continue
- }
-
- for rows.Next() {
- var s string
- rows.Scan(&s)
- }
-
- if rows.Err() != nil {
- t.Errorf("Reading query result failed: %v", err)
- }
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestPrepareIdempotency(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- for i := 0; i < 2; i++ {
- _, err := conn.Prepare("test", "select 42::integer")
- if err != nil {
- t.Fatalf("%d. Unable to prepare statement: %v", i, err)
- }
-
- var n int32
- err = conn.QueryRow("test").Scan(&n)
- if err != nil {
- t.Errorf("%d. Executing prepared statement failed: %v", i, err)
- }
-
- if n != int32(42) {
- t.Errorf("%d. Prepared statement did not return expected value: %v", i, n)
- }
- }
-
- _, err := conn.Prepare("test", "select 'fail'::varchar")
- if err == nil {
- t.Fatalf("Prepare statement with same name but different SQL should have failed but it didn't")
- return
- }
-}
-
-func TestPrepareEx(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- _, err := conn.PrepareEx("test", "select $1", &pgx.PrepareExOptions{ParameterOids: []pgx.Oid{pgx.TextOid}})
- if err != nil {
- t.Errorf("Unable to prepare statement: %v", err)
- return
- }
-
- var s string
- err = conn.QueryRow("test", "hello").Scan(&s)
- if err != nil {
- t.Errorf("Executing prepared statement failed: %v", err)
- }
-
- if s != "hello" {
- t.Errorf("Prepared statement did not return expected value: %v", s)
- }
-
- err = conn.Deallocate("test")
- if err != nil {
- t.Errorf("conn.Deallocate failed: %v", err)
- }
-}
-
-func TestListenNotify(t *testing.T) {
- t.Parallel()
-
- listener := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, listener)
-
- if err := listener.Listen("chat"); err != nil {
- t.Fatalf("Unable to start listening: %v", err)
- }
-
- notifier := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, notifier)
-
- mustExec(t, notifier, "notify chat")
-
- // when notification is waiting on the socket to be read
- notification, err := listener.WaitForNotification(time.Second)
- if err != nil {
- t.Fatalf("Unexpected error on WaitForNotification: %v", err)
- }
- if notification.Channel != "chat" {
- t.Errorf("Did not receive notification on expected channel: %v", notification.Channel)
- }
-
- // when notification has already been read during previous query
- mustExec(t, notifier, "notify chat")
- rows, _ := listener.Query("select 1")
- rows.Close()
- if rows.Err() != nil {
- t.Fatalf("Unexpected error on Query: %v", rows.Err())
- }
- notification, err = listener.WaitForNotification(0)
- if err != nil {
- t.Fatalf("Unexpected error on WaitForNotification: %v", err)
- }
- if notification.Channel != "chat" {
- t.Errorf("Did not receive notification on expected channel: %v", notification.Channel)
- }
-
- // when timeout occurs
- notification, err = listener.WaitForNotification(time.Millisecond)
- if err != pgx.ErrNotificationTimeout {
- t.Errorf("WaitForNotification returned the wrong kind of error: %v", err)
- }
- if notification != nil {
- t.Errorf("WaitForNotification returned an unexpected notification: %v", notification)
- }
-
- // listener can listen again after a timeout
- mustExec(t, notifier, "notify chat")
- notification, err = listener.WaitForNotification(time.Second)
- if err != nil {
- t.Fatalf("Unexpected error on WaitForNotification: %v", err)
- }
- if notification.Channel != "chat" {
- t.Errorf("Did not receive notification on expected channel: %v", notification.Channel)
- }
-}
-
-func TestUnlistenSpecificChannel(t *testing.T) {
- t.Parallel()
-
- listener := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, listener)
-
- if err := listener.Listen("unlisten_test"); err != nil {
- t.Fatalf("Unable to start listening: %v", err)
- }
-
- notifier := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, notifier)
-
- mustExec(t, notifier, "notify unlisten_test")
-
- // when notification is waiting on the socket to be read
- notification, err := listener.WaitForNotification(time.Second)
- if err != nil {
- t.Fatalf("Unexpected error on WaitForNotification: %v", err)
- }
- if notification.Channel != "unlisten_test" {
- t.Errorf("Did not receive notification on expected channel: %v", notification.Channel)
- }
-
- err = listener.Unlisten("unlisten_test")
- if err != nil {
- t.Fatalf("Unexpected error on Unlisten: %v", err)
- }
-
- // when notification has already been read during previous query
- mustExec(t, notifier, "notify unlisten_test")
- rows, _ := listener.Query("select 1")
- rows.Close()
- if rows.Err() != nil {
- t.Fatalf("Unexpected error on Query: %v", rows.Err())
- }
- notification, err = listener.WaitForNotification(100 * time.Millisecond)
- if err != pgx.ErrNotificationTimeout {
- t.Errorf("WaitForNotification returned the wrong kind of error: %v", err)
- }
-}
-
-func TestListenNotifyWhileBusyIsSafe(t *testing.T) {
- t.Parallel()
-
- listenerDone := make(chan bool)
- go func() {
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
- defer func() {
- listenerDone <- true
- }()
-
- if err := conn.Listen("busysafe"); err != nil {
- t.Fatalf("Unable to start listening: %v", err)
- }
-
- for i := 0; i < 5000; i++ {
- var sum int32
- var rowCount int32
-
- rows, err := conn.Query("select generate_series(1,$1)", 100)
- if err != nil {
- t.Fatalf("conn.Query failed: %v", err)
- }
-
- for rows.Next() {
- var n int32
- rows.Scan(&n)
- sum += n
- rowCount++
- }
-
- if rows.Err() != nil {
- t.Fatalf("conn.Query failed: %v", err)
- }
-
- if sum != 5050 {
- t.Fatalf("Wrong rows sum: %v", sum)
- }
-
- if rowCount != 100 {
- t.Fatalf("Wrong number of rows: %v", rowCount)
- }
-
- time.Sleep(1 * time.Microsecond)
- }
- }()
-
- notifierDone := make(chan bool)
- go func() {
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
- defer func() {
- notifierDone <- true
- }()
-
- for i := 0; i < 100000; i++ {
- mustExec(t, conn, "notify busysafe, 'hello'")
- time.Sleep(1 * time.Microsecond)
- }
- }()
-
- <-listenerDone
-}
-
-func TestListenNotifySelfNotification(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- if err := conn.Listen("self"); err != nil {
- t.Fatalf("Unable to start listening: %v", err)
- }
-
- // Notify self and WaitForNotification immediately
- mustExec(t, conn, "notify self")
-
- notification, err := conn.WaitForNotification(time.Second)
- if err != nil {
- t.Fatalf("Unexpected error on WaitForNotification: %v", err)
- }
- if notification.Channel != "self" {
- t.Errorf("Did not receive notification on expected channel: %v", notification.Channel)
- }
-
- // Notify self and do something else before WaitForNotification
- mustExec(t, conn, "notify self")
-
- rows, _ := conn.Query("select 1")
- rows.Close()
- if rows.Err() != nil {
- t.Fatalf("Unexpected error on Query: %v", rows.Err())
- }
-
- notification, err = conn.WaitForNotification(time.Second)
- if err != nil {
- t.Fatalf("Unexpected error on WaitForNotification: %v", err)
- }
- if notification.Channel != "self" {
- t.Errorf("Did not receive notification on expected channel: %v", notification.Channel)
- }
-}
-
-func TestListenUnlistenSpecialCharacters(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- chanName := "special characters !@#{$%^&*()}"
- if err := conn.Listen(chanName); err != nil {
- t.Fatalf("Unable to start listening: %v", err)
- }
-
- if err := conn.Unlisten(chanName); err != nil {
- t.Fatalf("Unable to stop listening: %v", err)
- }
-}
-
-func TestFatalRxError(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- var wg sync.WaitGroup
- wg.Add(1)
- go func() {
- defer wg.Done()
- var n int32
- var s string
- err := conn.QueryRow("select 1::int4, pg_sleep(10)::varchar").Scan(&n, &s)
- if err == pgx.ErrDeadConn {
- } else if pgErr, ok := err.(pgx.PgError); ok && pgErr.Severity == "FATAL" {
- } else {
- t.Fatalf("Expected QueryRow Scan to return fatal PgError or ErrDeadConn, but instead received %v", err)
- }
- }()
-
- otherConn, err := pgx.Connect(*defaultConnConfig)
- if err != nil {
- t.Fatalf("Unable to establish connection: %v", err)
- }
- defer otherConn.Close()
-
- if _, err := otherConn.Exec("select pg_terminate_backend($1)", conn.Pid); err != nil {
- t.Fatalf("Unable to kill backend PostgreSQL process: %v", err)
- }
-
- wg.Wait()
-
- if conn.IsAlive() {
- t.Fatal("Connection should not be live but was")
- }
-}
-
-func TestFatalTxError(t *testing.T) {
- t.Parallel()
-
- // Run timing sensitive test many times
- for i := 0; i < 50; i++ {
- func() {
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- otherConn, err := pgx.Connect(*defaultConnConfig)
- if err != nil {
- t.Fatalf("Unable to establish connection: %v", err)
- }
- defer otherConn.Close()
-
- _, err = otherConn.Exec("select pg_terminate_backend($1)", conn.Pid)
- if err != nil {
- t.Fatalf("Unable to kill backend PostgreSQL process: %v", err)
- }
-
- _, err = conn.Query("select 1")
- if err == nil {
- t.Fatal("Expected error but none occurred")
- }
-
- if conn.IsAlive() {
- t.Fatalf("Connection should not be live but was. Previous Query err: %v", err)
- }
- }()
- }
-}
-
-func TestCommandTag(t *testing.T) {
- t.Parallel()
-
- var tests = []struct {
- commandTag pgx.CommandTag
- rowsAffected int64
- }{
- {commandTag: "INSERT 0 5", rowsAffected: 5},
- {commandTag: "UPDATE 0", rowsAffected: 0},
- {commandTag: "UPDATE 1", rowsAffected: 1},
- {commandTag: "DELETE 0", rowsAffected: 0},
- {commandTag: "DELETE 1", rowsAffected: 1},
- {commandTag: "CREATE TABLE", rowsAffected: 0},
- {commandTag: "ALTER TABLE", rowsAffected: 0},
- {commandTag: "DROP TABLE", rowsAffected: 0},
- }
-
- for i, tt := range tests {
- actual := tt.commandTag.RowsAffected()
- if tt.rowsAffected != actual {
- t.Errorf(`%d. "%s" should have affected %d rows but it was %d`, i, tt.commandTag, tt.rowsAffected, actual)
- }
- }
-}
-
-func TestInsertBoolArray(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- if results := mustExec(t, conn, "create temporary table foo(spice bool[]);"); results != "CREATE TABLE" {
- t.Error("Unexpected results from Exec")
- }
-
- // Accept parameters
- if results := mustExec(t, conn, "insert into foo(spice) values($1)", []bool{true, false, true}); results != "INSERT 0 1" {
- t.Errorf("Unexpected results from Exec: %v", results)
- }
-}
-
-func TestInsertTimestampArray(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- if results := mustExec(t, conn, "create temporary table foo(spice timestamp[]);"); results != "CREATE TABLE" {
- t.Error("Unexpected results from Exec")
- }
-
- // Accept parameters
- if results := mustExec(t, conn, "insert into foo(spice) values($1)", []time.Time{time.Unix(1419143667, 0), time.Unix(1419143672, 0)}); results != "INSERT 0 1" {
- t.Errorf("Unexpected results from Exec: %v", results)
- }
-}
-
-func TestCatchSimultaneousConnectionQueries(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- rows1, err := conn.Query("select generate_series(1,$1)", 10)
- if err != nil {
- t.Fatalf("conn.Query failed: %v", err)
- }
- defer rows1.Close()
-
- _, err = conn.Query("select generate_series(1,$1)", 10)
- if err != pgx.ErrConnBusy {
- t.Fatalf("conn.Query should have failed with pgx.ErrConnBusy, but it was %v", err)
- }
-}
-
-func TestCatchSimultaneousConnectionQueryAndExec(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- rows, err := conn.Query("select generate_series(1,$1)", 10)
- if err != nil {
- t.Fatalf("conn.Query failed: %v", err)
- }
- defer rows.Close()
-
- _, err = conn.Exec("create temporary table foo(spice timestamp[])")
- if err != pgx.ErrConnBusy {
- t.Fatalf("conn.Exec should have failed with pgx.ErrConnBusy, but it was %v", err)
- }
-}
-
-type testLog struct {
- lvl int
- msg string
- ctx []interface{}
-}
-
-type testLogger struct {
- logs []testLog
-}
-
-func (l *testLogger) Debug(msg string, ctx ...interface{}) {
- l.logs = append(l.logs, testLog{lvl: pgx.LogLevelDebug, msg: msg, ctx: ctx})
-}
-func (l *testLogger) Info(msg string, ctx ...interface{}) {
- l.logs = append(l.logs, testLog{lvl: pgx.LogLevelInfo, msg: msg, ctx: ctx})
-}
-func (l *testLogger) Warn(msg string, ctx ...interface{}) {
- l.logs = append(l.logs, testLog{lvl: pgx.LogLevelWarn, msg: msg, ctx: ctx})
-}
-func (l *testLogger) Error(msg string, ctx ...interface{}) {
- l.logs = append(l.logs, testLog{lvl: pgx.LogLevelError, msg: msg, ctx: ctx})
-}
-
-func TestSetLogger(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- l1 := &testLogger{}
- oldLogger := conn.SetLogger(l1)
- if oldLogger != nil {
- t.Fatalf("Expected conn.SetLogger to return %v, but it was %v", nil, oldLogger)
- }
-
- if err := conn.Listen("foo"); err != nil {
- t.Fatal(err)
- }
-
- if len(l1.logs) == 0 {
- t.Fatal("Expected new logger l1 to be called, but it wasn't")
- }
-
- l2 := &testLogger{}
- oldLogger = conn.SetLogger(l2)
- if oldLogger != l1 {
- t.Fatalf("Expected conn.SetLogger to return %v, but it was %v", l1, oldLogger)
- }
-
- if err := conn.Listen("bar"); err != nil {
- t.Fatal(err)
- }
-
- if len(l2.logs) == 0 {
- t.Fatal("Expected new logger l2 to be called, but it wasn't")
- }
-}
-
-func TestSetLogLevel(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- logger := &testLogger{}
- conn.SetLogger(logger)
-
- if _, err := conn.SetLogLevel(0); err != pgx.ErrInvalidLogLevel {
- t.Fatal("SetLogLevel with invalid level did not return error")
- }
-
- if _, err := conn.SetLogLevel(pgx.LogLevelNone); err != nil {
- t.Fatal(err)
- }
-
- if err := conn.Listen("foo"); err != nil {
- t.Fatal(err)
- }
-
- if len(logger.logs) != 0 {
- t.Fatalf("Expected logger not to be called, but it was: %v", logger.logs)
- }
-
- if _, err := conn.SetLogLevel(pgx.LogLevelTrace); err != nil {
- t.Fatal(err)
- }
-
- if err := conn.Listen("bar"); err != nil {
- t.Fatal(err)
- }
-
- if len(logger.logs) == 0 {
- t.Fatal("Expected logger to be called, but it wasn't")
- }
-}
-
-func TestIdentifierSanitize(t *testing.T) {
- t.Parallel()
-
- tests := []struct {
- ident pgx.Identifier
- expected string
- }{
- {
- ident: pgx.Identifier{`foo`},
- expected: `"foo"`,
- },
- {
- ident: pgx.Identifier{`select`},
- expected: `"select"`,
- },
- {
- ident: pgx.Identifier{`foo`, `bar`},
- expected: `"foo"."bar"`,
- },
- {
- ident: pgx.Identifier{`you should " not do this`},
- expected: `"you should "" not do this"`,
- },
- {
- ident: pgx.Identifier{`you should " not do this`, `please don't`},
- expected: `"you should "" not do this"."please don't"`,
- },
- }
-
- for i, tt := range tests {
- qval := tt.ident.Sanitize()
- if qval != tt.expected {
- t.Errorf("%d. Expected Sanitize %v to return %v but it was %v", i, tt.ident, tt.expected, qval)
- }
- }
-}
diff --git a/vendor/github.com/jackc/pgx/copy_from.go b/vendor/github.com/jackc/pgx/copy_from.go
index 1f8a230..8b7c3d5 100644
--- a/vendor/github.com/jackc/pgx/copy_from.go
+++ b/vendor/github.com/jackc/pgx/copy_from.go
@@ -3,6 +3,10 @@ package pgx
import (
"bytes"
"fmt"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/jackc/pgx/pgproto3"
+ "github.com/pkg/errors"
)
// CopyFromRows returns a CopyFromSource interface over the provided rows slice
@@ -54,25 +58,25 @@ type copyFrom struct {
func (ct *copyFrom) readUntilReadyForQuery() {
for {
- t, r, err := ct.conn.rxMsg()
+ msg, err := ct.conn.rxMsg()
if err != nil {
ct.readerErrChan <- err
close(ct.readerErrChan)
return
}
- switch t {
- case readyForQuery:
- ct.conn.rxReadyForQuery(r)
+ switch msg := msg.(type) {
+ case *pgproto3.ReadyForQuery:
+ ct.conn.rxReadyForQuery(msg)
close(ct.readerErrChan)
return
- case commandComplete:
- case errorResponse:
- ct.readerErrChan <- ct.conn.rxErrorResponse(r)
+ case *pgproto3.CommandComplete:
+ case *pgproto3.ErrorResponse:
+ ct.readerErrChan <- ct.conn.rxErrorResponse(msg)
default:
- err = ct.conn.processContextFreeMsg(t, r)
+ err = ct.conn.processContextFreeMsg(msg)
if err != nil {
- ct.readerErrChan <- ct.conn.processContextFreeMsg(t, r)
+ ct.readerErrChan <- ct.conn.processContextFreeMsg(msg)
}
}
}
@@ -87,14 +91,14 @@ func (ct *copyFrom) waitForReaderDone() error {
func (ct *copyFrom) run() (int, error) {
quotedTableName := ct.tableName.Sanitize()
- buf := &bytes.Buffer{}
+ cbuf := &bytes.Buffer{}
for i, cn := range ct.columnNames {
if i != 0 {
- buf.WriteString(", ")
+ cbuf.WriteString(", ")
}
- buf.WriteString(quoteIdentifier(cn))
+ cbuf.WriteString(quoteIdentifier(cn))
}
- quotedColumnNames := buf.String()
+ quotedColumnNames := cbuf.String()
ps, err := ct.conn.Prepare("", fmt.Sprintf("select %s from %s", quotedColumnNames, quotedTableName))
if err != nil {
@@ -114,11 +118,14 @@ func (ct *copyFrom) run() (int, error) {
go ct.readUntilReadyForQuery()
defer ct.waitForReaderDone()
- wbuf := newWriteBuf(ct.conn, copyData)
+ buf := ct.conn.wbuf
+ buf = append(buf, copyData)
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
- wbuf.WriteBytes([]byte("PGCOPY\n\377\r\n\000"))
- wbuf.WriteInt32(0)
- wbuf.WriteInt32(0)
+ buf = append(buf, "PGCOPY\n\377\r\n\000"...)
+ buf = pgio.AppendInt32(buf, 0)
+ buf = pgio.AppendInt32(buf, 0)
var sentCount int
@@ -129,18 +136,16 @@ func (ct *copyFrom) run() (int, error) {
default:
}
- if len(wbuf.buf) > 65536 {
- wbuf.closeMsg()
- _, err = ct.conn.conn.Write(wbuf.buf)
+ if len(buf) > 65536 {
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])))
+ _, err = ct.conn.conn.Write(buf)
if err != nil {
ct.conn.die(err)
return 0, err
}
// Directly manipulate wbuf to reset to reuse the same buffer
- wbuf.buf = wbuf.buf[0:5]
- wbuf.buf[0] = copyData
- wbuf.sizeIdx = 1
+ buf = buf[0:5]
}
sentCount++
@@ -152,12 +157,12 @@ func (ct *copyFrom) run() (int, error) {
}
if len(values) != len(ct.columnNames) {
ct.cancelCopyIn()
- return 0, fmt.Errorf("expected %d values, got %d values", len(ct.columnNames), len(values))
+ return 0, errors.Errorf("expected %d values, got %d values", len(ct.columnNames), len(values))
}
- wbuf.WriteInt16(int16(len(ct.columnNames)))
+ buf = pgio.AppendInt16(buf, int16(len(ct.columnNames)))
for i, val := range values {
- err = Encode(wbuf, ps.FieldDescriptions[i].DataType, val)
+ buf, err = encodePreparedStatementArgument(ct.conn.ConnInfo, buf, ps.FieldDescriptions[i].DataType, val)
if err != nil {
ct.cancelCopyIn()
return 0, err
@@ -171,11 +176,13 @@ func (ct *copyFrom) run() (int, error) {
return 0, ct.rowSrc.Err()
}
- wbuf.WriteInt16(-1) // terminate the copy stream
+ buf = pgio.AppendInt16(buf, -1) // terminate the copy stream
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])))
+
+ buf = append(buf, copyDone)
+ buf = pgio.AppendInt32(buf, 4)
- wbuf.startMsg(copyDone)
- wbuf.closeMsg()
- _, err = ct.conn.conn.Write(wbuf.buf)
+ _, err = ct.conn.conn.Write(buf)
if err != nil {
ct.conn.die(err)
return 0, err
@@ -190,18 +197,16 @@ func (ct *copyFrom) run() (int, error) {
func (c *Conn) readUntilCopyInResponse() error {
for {
- var t byte
- var r *msgReader
- t, r, err := c.rxMsg()
+ msg, err := c.rxMsg()
if err != nil {
return err
}
- switch t {
- case copyInResponse:
+ switch msg := msg.(type) {
+ case *pgproto3.CopyInResponse:
return nil
default:
- err = c.processContextFreeMsg(t, r)
+ err = c.processContextFreeMsg(msg)
if err != nil {
return err
}
@@ -210,10 +215,15 @@ func (c *Conn) readUntilCopyInResponse() error {
}
func (ct *copyFrom) cancelCopyIn() error {
- wbuf := newWriteBuf(ct.conn, copyFail)
- wbuf.WriteCString("client error: abort")
- wbuf.closeMsg()
- _, err := ct.conn.conn.Write(wbuf.buf)
+ buf := ct.conn.wbuf
+ buf = append(buf, copyFail)
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+ buf = append(buf, "client error: abort"...)
+ buf = append(buf, 0)
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])))
+
+ _, err := ct.conn.conn.Write(buf)
if err != nil {
ct.conn.die(err)
return err
diff --git a/vendor/github.com/jackc/pgx/copy_from_test.go b/vendor/github.com/jackc/pgx/copy_from_test.go
deleted file mode 100644
index 54da698..0000000
--- a/vendor/github.com/jackc/pgx/copy_from_test.go
+++ /dev/null
@@ -1,428 +0,0 @@
-package pgx_test
-
-import (
- "fmt"
- "reflect"
- "testing"
- "time"
-
- "github.com/jackc/pgx"
-)
-
-func TestConnCopyFromSmall(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- mustExec(t, conn, `create temporary table foo(
- a int2,
- b int4,
- c int8,
- d varchar,
- e text,
- f date,
- g timestamptz
- )`)
-
- inputRows := [][]interface{}{
- {int16(0), int32(1), int64(2), "abc", "efg", time.Date(2000, 1, 1, 0, 0, 0, 0, time.Local), time.Date(2010, 2, 3, 4, 5, 6, 0, time.Local)},
- {nil, nil, nil, nil, nil, nil, nil},
- }
-
- copyCount, err := conn.CopyFrom(pgx.Identifier{"foo"}, []string{"a", "b", "c", "d", "e", "f", "g"}, pgx.CopyFromRows(inputRows))
- if err != nil {
- t.Errorf("Unexpected error for CopyFrom: %v", err)
- }
- if copyCount != len(inputRows) {
- t.Errorf("Expected CopyFrom to return %d copied rows, but got %d", len(inputRows), copyCount)
- }
-
- rows, err := conn.Query("select * from foo")
- if err != nil {
- t.Errorf("Unexpected error for Query: %v", err)
- }
-
- var outputRows [][]interface{}
- for rows.Next() {
- row, err := rows.Values()
- if err != nil {
- t.Errorf("Unexpected error for rows.Values(): %v", err)
- }
- outputRows = append(outputRows, row)
- }
-
- if rows.Err() != nil {
- t.Errorf("Unexpected error for rows.Err(): %v", rows.Err())
- }
-
- if !reflect.DeepEqual(inputRows, outputRows) {
- t.Errorf("Input rows and output rows do not equal: %v -> %v", inputRows, outputRows)
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestConnCopyFromLarge(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- mustExec(t, conn, `create temporary table foo(
- a int2,
- b int4,
- c int8,
- d varchar,
- e text,
- f date,
- g timestamptz,
- h bytea
- )`)
-
- inputRows := [][]interface{}{}
-
- for i := 0; i < 10000; i++ {
- inputRows = append(inputRows, []interface{}{int16(0), int32(1), int64(2), "abc", "efg", time.Date(2000, 1, 1, 0, 0, 0, 0, time.Local), time.Date(2010, 2, 3, 4, 5, 6, 0, time.Local), []byte{111, 111, 111, 111}})
- }
-
- copyCount, err := conn.CopyFrom(pgx.Identifier{"foo"}, []string{"a", "b", "c", "d", "e", "f", "g", "h"}, pgx.CopyFromRows(inputRows))
- if err != nil {
- t.Errorf("Unexpected error for CopyFrom: %v", err)
- }
- if copyCount != len(inputRows) {
- t.Errorf("Expected CopyFrom to return %d copied rows, but got %d", len(inputRows), copyCount)
- }
-
- rows, err := conn.Query("select * from foo")
- if err != nil {
- t.Errorf("Unexpected error for Query: %v", err)
- }
-
- var outputRows [][]interface{}
- for rows.Next() {
- row, err := rows.Values()
- if err != nil {
- t.Errorf("Unexpected error for rows.Values(): %v", err)
- }
- outputRows = append(outputRows, row)
- }
-
- if rows.Err() != nil {
- t.Errorf("Unexpected error for rows.Err(): %v", rows.Err())
- }
-
- if !reflect.DeepEqual(inputRows, outputRows) {
- t.Errorf("Input rows and output rows do not equal")
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestConnCopyFromJSON(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- for _, oid := range []pgx.Oid{pgx.JsonOid, pgx.JsonbOid} {
- if _, ok := conn.PgTypes[oid]; !ok {
- return // No JSON/JSONB type -- must be running against old PostgreSQL
- }
- }
-
- mustExec(t, conn, `create temporary table foo(
- a json,
- b jsonb
- )`)
-
- inputRows := [][]interface{}{
- {map[string]interface{}{"foo": "bar"}, map[string]interface{}{"bar": "quz"}},
- {nil, nil},
- }
-
- copyCount, err := conn.CopyFrom(pgx.Identifier{"foo"}, []string{"a", "b"}, pgx.CopyFromRows(inputRows))
- if err != nil {
- t.Errorf("Unexpected error for CopyFrom: %v", err)
- }
- if copyCount != len(inputRows) {
- t.Errorf("Expected CopyFrom to return %d copied rows, but got %d", len(inputRows), copyCount)
- }
-
- rows, err := conn.Query("select * from foo")
- if err != nil {
- t.Errorf("Unexpected error for Query: %v", err)
- }
-
- var outputRows [][]interface{}
- for rows.Next() {
- row, err := rows.Values()
- if err != nil {
- t.Errorf("Unexpected error for rows.Values(): %v", err)
- }
- outputRows = append(outputRows, row)
- }
-
- if rows.Err() != nil {
- t.Errorf("Unexpected error for rows.Err(): %v", rows.Err())
- }
-
- if !reflect.DeepEqual(inputRows, outputRows) {
- t.Errorf("Input rows and output rows do not equal: %v -> %v", inputRows, outputRows)
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestConnCopyFromFailServerSideMidway(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- mustExec(t, conn, `create temporary table foo(
- a int4,
- b varchar not null
- )`)
-
- inputRows := [][]interface{}{
- {int32(1), "abc"},
- {int32(2), nil}, // this row should trigger a failure
- {int32(3), "def"},
- }
-
- copyCount, err := conn.CopyFrom(pgx.Identifier{"foo"}, []string{"a", "b"}, pgx.CopyFromRows(inputRows))
- if err == nil {
- t.Errorf("Expected CopyFrom return error, but it did not")
- }
- if _, ok := err.(pgx.PgError); !ok {
- t.Errorf("Expected CopyFrom return pgx.PgError, but instead it returned: %v", err)
- }
- if copyCount != 0 {
- t.Errorf("Expected CopyFrom to return 0 copied rows, but got %d", copyCount)
- }
-
- rows, err := conn.Query("select * from foo")
- if err != nil {
- t.Errorf("Unexpected error for Query: %v", err)
- }
-
- var outputRows [][]interface{}
- for rows.Next() {
- row, err := rows.Values()
- if err != nil {
- t.Errorf("Unexpected error for rows.Values(): %v", err)
- }
- outputRows = append(outputRows, row)
- }
-
- if rows.Err() != nil {
- t.Errorf("Unexpected error for rows.Err(): %v", rows.Err())
- }
-
- if len(outputRows) != 0 {
- t.Errorf("Expected 0 rows, but got %v", outputRows)
- }
-
- ensureConnValid(t, conn)
-}
-
-type failSource struct {
- count int
-}
-
-func (fs *failSource) Next() bool {
- time.Sleep(time.Millisecond * 100)
- fs.count++
- return fs.count < 100
-}
-
-func (fs *failSource) Values() ([]interface{}, error) {
- if fs.count == 3 {
- return []interface{}{nil}, nil
- }
- return []interface{}{make([]byte, 100000)}, nil
-}
-
-func (fs *failSource) Err() error {
- return nil
-}
-
-func TestConnCopyFromFailServerSideMidwayAbortsWithoutWaiting(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- mustExec(t, conn, `create temporary table foo(
- a bytea not null
- )`)
-
- startTime := time.Now()
-
- copyCount, err := conn.CopyFrom(pgx.Identifier{"foo"}, []string{"a"}, &failSource{})
- if err == nil {
- t.Errorf("Expected CopyFrom return error, but it did not")
- }
- if _, ok := err.(pgx.PgError); !ok {
- t.Errorf("Expected CopyFrom return pgx.PgError, but instead it returned: %v", err)
- }
- if copyCount != 0 {
- t.Errorf("Expected CopyFrom to return 0 copied rows, but got %d", copyCount)
- }
-
- endTime := time.Now()
- copyTime := endTime.Sub(startTime)
- if copyTime > time.Second {
- t.Errorf("Failing CopyFrom shouldn't have taken so long: %v", copyTime)
- }
-
- rows, err := conn.Query("select * from foo")
- if err != nil {
- t.Errorf("Unexpected error for Query: %v", err)
- }
-
- var outputRows [][]interface{}
- for rows.Next() {
- row, err := rows.Values()
- if err != nil {
- t.Errorf("Unexpected error for rows.Values(): %v", err)
- }
- outputRows = append(outputRows, row)
- }
-
- if rows.Err() != nil {
- t.Errorf("Unexpected error for rows.Err(): %v", rows.Err())
- }
-
- if len(outputRows) != 0 {
- t.Errorf("Expected 0 rows, but got %v", outputRows)
- }
-
- ensureConnValid(t, conn)
-}
-
-type clientFailSource struct {
- count int
- err error
-}
-
-func (cfs *clientFailSource) Next() bool {
- cfs.count++
- return cfs.count < 100
-}
-
-func (cfs *clientFailSource) Values() ([]interface{}, error) {
- if cfs.count == 3 {
- cfs.err = fmt.Errorf("client error")
- return nil, cfs.err
- }
- return []interface{}{make([]byte, 100000)}, nil
-}
-
-func (cfs *clientFailSource) Err() error {
- return cfs.err
-}
-
-func TestConnCopyFromCopyFromSourceErrorMidway(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- mustExec(t, conn, `create temporary table foo(
- a bytea not null
- )`)
-
- copyCount, err := conn.CopyFrom(pgx.Identifier{"foo"}, []string{"a"}, &clientFailSource{})
- if err == nil {
- t.Errorf("Expected CopyFrom return error, but it did not")
- }
- if copyCount != 0 {
- t.Errorf("Expected CopyFrom to return 0 copied rows, but got %d", copyCount)
- }
-
- rows, err := conn.Query("select * from foo")
- if err != nil {
- t.Errorf("Unexpected error for Query: %v", err)
- }
-
- var outputRows [][]interface{}
- for rows.Next() {
- row, err := rows.Values()
- if err != nil {
- t.Errorf("Unexpected error for rows.Values(): %v", err)
- }
- outputRows = append(outputRows, row)
- }
-
- if rows.Err() != nil {
- t.Errorf("Unexpected error for rows.Err(): %v", rows.Err())
- }
-
- if len(outputRows) != 0 {
- t.Errorf("Expected 0 rows, but got %v", outputRows)
- }
-
- ensureConnValid(t, conn)
-}
-
-type clientFinalErrSource struct {
- count int
-}
-
-func (cfs *clientFinalErrSource) Next() bool {
- cfs.count++
- return cfs.count < 5
-}
-
-func (cfs *clientFinalErrSource) Values() ([]interface{}, error) {
- return []interface{}{make([]byte, 100000)}, nil
-}
-
-func (cfs *clientFinalErrSource) Err() error {
- return fmt.Errorf("final error")
-}
-
-func TestConnCopyFromCopyFromSourceErrorEnd(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- mustExec(t, conn, `create temporary table foo(
- a bytea not null
- )`)
-
- copyCount, err := conn.CopyFrom(pgx.Identifier{"foo"}, []string{"a"}, &clientFinalErrSource{})
- if err == nil {
- t.Errorf("Expected CopyFrom return error, but it did not")
- }
- if copyCount != 0 {
- t.Errorf("Expected CopyFrom to return 0 copied rows, but got %d", copyCount)
- }
-
- rows, err := conn.Query("select * from foo")
- if err != nil {
- t.Errorf("Unexpected error for Query: %v", err)
- }
-
- var outputRows [][]interface{}
- for rows.Next() {
- row, err := rows.Values()
- if err != nil {
- t.Errorf("Unexpected error for rows.Values(): %v", err)
- }
- outputRows = append(outputRows, row)
- }
-
- if rows.Err() != nil {
- t.Errorf("Unexpected error for rows.Err(): %v", rows.Err())
- }
-
- if len(outputRows) != 0 {
- t.Errorf("Expected 0 rows, but got %v", outputRows)
- }
-
- ensureConnValid(t, conn)
-}
diff --git a/vendor/github.com/jackc/pgx/copy_to.go b/vendor/github.com/jackc/pgx/copy_to.go
deleted file mode 100644
index 229e9a4..0000000
--- a/vendor/github.com/jackc/pgx/copy_to.go
+++ /dev/null
@@ -1,222 +0,0 @@
-package pgx
-
-import (
- "bytes"
- "fmt"
-)
-
-// Deprecated. Use CopyFromRows instead. CopyToRows returns a CopyToSource
-// interface over the provided rows slice making it usable by *Conn.CopyTo.
-func CopyToRows(rows [][]interface{}) CopyToSource {
- return &copyToRows{rows: rows, idx: -1}
-}
-
-type copyToRows struct {
- rows [][]interface{}
- idx int
-}
-
-func (ctr *copyToRows) Next() bool {
- ctr.idx++
- return ctr.idx < len(ctr.rows)
-}
-
-func (ctr *copyToRows) Values() ([]interface{}, error) {
- return ctr.rows[ctr.idx], nil
-}
-
-func (ctr *copyToRows) Err() error {
- return nil
-}
-
-// Deprecated. Use CopyFromSource instead. CopyToSource is the interface used by
-// *Conn.CopyTo as the source for copy data.
-type CopyToSource interface {
- // Next returns true if there is another row and makes the next row data
- // available to Values(). When there are no more rows available or an error
- // has occurred it returns false.
- Next() bool
-
- // Values returns the values for the current row.
- Values() ([]interface{}, error)
-
- // Err returns any error that has been encountered by the CopyToSource. If
- // this is not nil *Conn.CopyTo will abort the copy.
- Err() error
-}
-
-type copyTo struct {
- conn *Conn
- tableName string
- columnNames []string
- rowSrc CopyToSource
- readerErrChan chan error
-}
-
-func (ct *copyTo) readUntilReadyForQuery() {
- for {
- t, r, err := ct.conn.rxMsg()
- if err != nil {
- ct.readerErrChan <- err
- close(ct.readerErrChan)
- return
- }
-
- switch t {
- case readyForQuery:
- ct.conn.rxReadyForQuery(r)
- close(ct.readerErrChan)
- return
- case commandComplete:
- case errorResponse:
- ct.readerErrChan <- ct.conn.rxErrorResponse(r)
- default:
- err = ct.conn.processContextFreeMsg(t, r)
- if err != nil {
- ct.readerErrChan <- ct.conn.processContextFreeMsg(t, r)
- }
- }
- }
-}
-
-func (ct *copyTo) waitForReaderDone() error {
- var err error
- for err = range ct.readerErrChan {
- }
- return err
-}
-
-func (ct *copyTo) run() (int, error) {
- quotedTableName := quoteIdentifier(ct.tableName)
- buf := &bytes.Buffer{}
- for i, cn := range ct.columnNames {
- if i != 0 {
- buf.WriteString(", ")
- }
- buf.WriteString(quoteIdentifier(cn))
- }
- quotedColumnNames := buf.String()
-
- ps, err := ct.conn.Prepare("", fmt.Sprintf("select %s from %s", quotedColumnNames, quotedTableName))
- if err != nil {
- return 0, err
- }
-
- err = ct.conn.sendSimpleQuery(fmt.Sprintf("copy %s ( %s ) from stdin binary;", quotedTableName, quotedColumnNames))
- if err != nil {
- return 0, err
- }
-
- err = ct.conn.readUntilCopyInResponse()
- if err != nil {
- return 0, err
- }
-
- go ct.readUntilReadyForQuery()
- defer ct.waitForReaderDone()
-
- wbuf := newWriteBuf(ct.conn, copyData)
-
- wbuf.WriteBytes([]byte("PGCOPY\n\377\r\n\000"))
- wbuf.WriteInt32(0)
- wbuf.WriteInt32(0)
-
- var sentCount int
-
- for ct.rowSrc.Next() {
- select {
- case err = <-ct.readerErrChan:
- return 0, err
- default:
- }
-
- if len(wbuf.buf) > 65536 {
- wbuf.closeMsg()
- _, err = ct.conn.conn.Write(wbuf.buf)
- if err != nil {
- ct.conn.die(err)
- return 0, err
- }
-
- // Directly manipulate wbuf to reset to reuse the same buffer
- wbuf.buf = wbuf.buf[0:5]
- wbuf.buf[0] = copyData
- wbuf.sizeIdx = 1
- }
-
- sentCount++
-
- values, err := ct.rowSrc.Values()
- if err != nil {
- ct.cancelCopyIn()
- return 0, err
- }
- if len(values) != len(ct.columnNames) {
- ct.cancelCopyIn()
- return 0, fmt.Errorf("expected %d values, got %d values", len(ct.columnNames), len(values))
- }
-
- wbuf.WriteInt16(int16(len(ct.columnNames)))
- for i, val := range values {
- err = Encode(wbuf, ps.FieldDescriptions[i].DataType, val)
- if err != nil {
- ct.cancelCopyIn()
- return 0, err
- }
-
- }
- }
-
- if ct.rowSrc.Err() != nil {
- ct.cancelCopyIn()
- return 0, ct.rowSrc.Err()
- }
-
- wbuf.WriteInt16(-1) // terminate the copy stream
-
- wbuf.startMsg(copyDone)
- wbuf.closeMsg()
- _, err = ct.conn.conn.Write(wbuf.buf)
- if err != nil {
- ct.conn.die(err)
- return 0, err
- }
-
- err = ct.waitForReaderDone()
- if err != nil {
- return 0, err
- }
- return sentCount, nil
-}
-
-func (ct *copyTo) cancelCopyIn() error {
- wbuf := newWriteBuf(ct.conn, copyFail)
- wbuf.WriteCString("client error: abort")
- wbuf.closeMsg()
- _, err := ct.conn.conn.Write(wbuf.buf)
- if err != nil {
- ct.conn.die(err)
- return err
- }
-
- return nil
-}
-
-// Deprecated. Use CopyFrom instead. CopyTo uses the PostgreSQL copy protocol to
-// perform bulk data insertion. It returns the number of rows copied and an
-// error.
-//
-// CopyTo requires all values use the binary format. Almost all types
-// implemented by pgx use the binary format by default. Types implementing
-// Encoder can only be used if they encode to the binary format.
-func (c *Conn) CopyTo(tableName string, columnNames []string, rowSrc CopyToSource) (int, error) {
- ct := &copyTo{
- conn: c,
- tableName: tableName,
- columnNames: columnNames,
- rowSrc: rowSrc,
- readerErrChan: make(chan error),
- }
-
- return ct.run()
-}
diff --git a/vendor/github.com/jackc/pgx/copy_to_test.go b/vendor/github.com/jackc/pgx/copy_to_test.go
deleted file mode 100644
index ac27042..0000000
--- a/vendor/github.com/jackc/pgx/copy_to_test.go
+++ /dev/null
@@ -1,367 +0,0 @@
-package pgx_test
-
-import (
- "reflect"
- "testing"
- "time"
-
- "github.com/jackc/pgx"
-)
-
-func TestConnCopyToSmall(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- mustExec(t, conn, `create temporary table foo(
- a int2,
- b int4,
- c int8,
- d varchar,
- e text,
- f date,
- g timestamptz
- )`)
-
- inputRows := [][]interface{}{
- {int16(0), int32(1), int64(2), "abc", "efg", time.Date(2000, 1, 1, 0, 0, 0, 0, time.Local), time.Date(2010, 2, 3, 4, 5, 6, 0, time.Local)},
- {nil, nil, nil, nil, nil, nil, nil},
- }
-
- copyCount, err := conn.CopyTo("foo", []string{"a", "b", "c", "d", "e", "f", "g"}, pgx.CopyToRows(inputRows))
- if err != nil {
- t.Errorf("Unexpected error for CopyTo: %v", err)
- }
- if copyCount != len(inputRows) {
- t.Errorf("Expected CopyTo to return %d copied rows, but got %d", len(inputRows), copyCount)
- }
-
- rows, err := conn.Query("select * from foo")
- if err != nil {
- t.Errorf("Unexpected error for Query: %v", err)
- }
-
- var outputRows [][]interface{}
- for rows.Next() {
- row, err := rows.Values()
- if err != nil {
- t.Errorf("Unexpected error for rows.Values(): %v", err)
- }
- outputRows = append(outputRows, row)
- }
-
- if rows.Err() != nil {
- t.Errorf("Unexpected error for rows.Err(): %v", rows.Err())
- }
-
- if !reflect.DeepEqual(inputRows, outputRows) {
- t.Errorf("Input rows and output rows do not equal: %v -> %v", inputRows, outputRows)
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestConnCopyToLarge(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- mustExec(t, conn, `create temporary table foo(
- a int2,
- b int4,
- c int8,
- d varchar,
- e text,
- f date,
- g timestamptz,
- h bytea
- )`)
-
- inputRows := [][]interface{}{}
-
- for i := 0; i < 10000; i++ {
- inputRows = append(inputRows, []interface{}{int16(0), int32(1), int64(2), "abc", "efg", time.Date(2000, 1, 1, 0, 0, 0, 0, time.Local), time.Date(2010, 2, 3, 4, 5, 6, 0, time.Local), []byte{111, 111, 111, 111}})
- }
-
- copyCount, err := conn.CopyTo("foo", []string{"a", "b", "c", "d", "e", "f", "g", "h"}, pgx.CopyToRows(inputRows))
- if err != nil {
- t.Errorf("Unexpected error for CopyTo: %v", err)
- }
- if copyCount != len(inputRows) {
- t.Errorf("Expected CopyTo to return %d copied rows, but got %d", len(inputRows), copyCount)
- }
-
- rows, err := conn.Query("select * from foo")
- if err != nil {
- t.Errorf("Unexpected error for Query: %v", err)
- }
-
- var outputRows [][]interface{}
- for rows.Next() {
- row, err := rows.Values()
- if err != nil {
- t.Errorf("Unexpected error for rows.Values(): %v", err)
- }
- outputRows = append(outputRows, row)
- }
-
- if rows.Err() != nil {
- t.Errorf("Unexpected error for rows.Err(): %v", rows.Err())
- }
-
- if !reflect.DeepEqual(inputRows, outputRows) {
- t.Errorf("Input rows and output rows do not equal")
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestConnCopyToJSON(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- for _, oid := range []pgx.Oid{pgx.JsonOid, pgx.JsonbOid} {
- if _, ok := conn.PgTypes[oid]; !ok {
- return // No JSON/JSONB type -- must be running against old PostgreSQL
- }
- }
-
- mustExec(t, conn, `create temporary table foo(
- a json,
- b jsonb
- )`)
-
- inputRows := [][]interface{}{
- {map[string]interface{}{"foo": "bar"}, map[string]interface{}{"bar": "quz"}},
- {nil, nil},
- }
-
- copyCount, err := conn.CopyTo("foo", []string{"a", "b"}, pgx.CopyToRows(inputRows))
- if err != nil {
- t.Errorf("Unexpected error for CopyTo: %v", err)
- }
- if copyCount != len(inputRows) {
- t.Errorf("Expected CopyTo to return %d copied rows, but got %d", len(inputRows), copyCount)
- }
-
- rows, err := conn.Query("select * from foo")
- if err != nil {
- t.Errorf("Unexpected error for Query: %v", err)
- }
-
- var outputRows [][]interface{}
- for rows.Next() {
- row, err := rows.Values()
- if err != nil {
- t.Errorf("Unexpected error for rows.Values(): %v", err)
- }
- outputRows = append(outputRows, row)
- }
-
- if rows.Err() != nil {
- t.Errorf("Unexpected error for rows.Err(): %v", rows.Err())
- }
-
- if !reflect.DeepEqual(inputRows, outputRows) {
- t.Errorf("Input rows and output rows do not equal: %v -> %v", inputRows, outputRows)
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestConnCopyToFailServerSideMidway(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- mustExec(t, conn, `create temporary table foo(
- a int4,
- b varchar not null
- )`)
-
- inputRows := [][]interface{}{
- {int32(1), "abc"},
- {int32(2), nil}, // this row should trigger a failure
- {int32(3), "def"},
- }
-
- copyCount, err := conn.CopyTo("foo", []string{"a", "b"}, pgx.CopyToRows(inputRows))
- if err == nil {
- t.Errorf("Expected CopyTo return error, but it did not")
- }
- if _, ok := err.(pgx.PgError); !ok {
- t.Errorf("Expected CopyTo return pgx.PgError, but instead it returned: %v", err)
- }
- if copyCount != 0 {
- t.Errorf("Expected CopyTo to return 0 copied rows, but got %d", copyCount)
- }
-
- rows, err := conn.Query("select * from foo")
- if err != nil {
- t.Errorf("Unexpected error for Query: %v", err)
- }
-
- var outputRows [][]interface{}
- for rows.Next() {
- row, err := rows.Values()
- if err != nil {
- t.Errorf("Unexpected error for rows.Values(): %v", err)
- }
- outputRows = append(outputRows, row)
- }
-
- if rows.Err() != nil {
- t.Errorf("Unexpected error for rows.Err(): %v", rows.Err())
- }
-
- if len(outputRows) != 0 {
- t.Errorf("Expected 0 rows, but got %v", outputRows)
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestConnCopyToFailServerSideMidwayAbortsWithoutWaiting(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- mustExec(t, conn, `create temporary table foo(
- a bytea not null
- )`)
-
- startTime := time.Now()
-
- copyCount, err := conn.CopyTo("foo", []string{"a"}, &failSource{})
- if err == nil {
- t.Errorf("Expected CopyTo return error, but it did not")
- }
- if _, ok := err.(pgx.PgError); !ok {
- t.Errorf("Expected CopyTo return pgx.PgError, but instead it returned: %v", err)
- }
- if copyCount != 0 {
- t.Errorf("Expected CopyTo to return 0 copied rows, but got %d", copyCount)
- }
-
- endTime := time.Now()
- copyTime := endTime.Sub(startTime)
- if copyTime > time.Second {
- t.Errorf("Failing CopyTo shouldn't have taken so long: %v", copyTime)
- }
-
- rows, err := conn.Query("select * from foo")
- if err != nil {
- t.Errorf("Unexpected error for Query: %v", err)
- }
-
- var outputRows [][]interface{}
- for rows.Next() {
- row, err := rows.Values()
- if err != nil {
- t.Errorf("Unexpected error for rows.Values(): %v", err)
- }
- outputRows = append(outputRows, row)
- }
-
- if rows.Err() != nil {
- t.Errorf("Unexpected error for rows.Err(): %v", rows.Err())
- }
-
- if len(outputRows) != 0 {
- t.Errorf("Expected 0 rows, but got %v", outputRows)
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestConnCopyToCopyToSourceErrorMidway(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- mustExec(t, conn, `create temporary table foo(
- a bytea not null
- )`)
-
- copyCount, err := conn.CopyTo("foo", []string{"a"}, &clientFailSource{})
- if err == nil {
- t.Errorf("Expected CopyTo return error, but it did not")
- }
- if copyCount != 0 {
- t.Errorf("Expected CopyTo to return 0 copied rows, but got %d", copyCount)
- }
-
- rows, err := conn.Query("select * from foo")
- if err != nil {
- t.Errorf("Unexpected error for Query: %v", err)
- }
-
- var outputRows [][]interface{}
- for rows.Next() {
- row, err := rows.Values()
- if err != nil {
- t.Errorf("Unexpected error for rows.Values(): %v", err)
- }
- outputRows = append(outputRows, row)
- }
-
- if rows.Err() != nil {
- t.Errorf("Unexpected error for rows.Err(): %v", rows.Err())
- }
-
- if len(outputRows) != 0 {
- t.Errorf("Expected 0 rows, but got %v", outputRows)
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestConnCopyToCopyToSourceErrorEnd(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- mustExec(t, conn, `create temporary table foo(
- a bytea not null
- )`)
-
- copyCount, err := conn.CopyTo("foo", []string{"a"}, &clientFinalErrSource{})
- if err == nil {
- t.Errorf("Expected CopyTo return error, but it did not")
- }
- if copyCount != 0 {
- t.Errorf("Expected CopyTo to return 0 copied rows, but got %d", copyCount)
- }
-
- rows, err := conn.Query("select * from foo")
- if err != nil {
- t.Errorf("Unexpected error for Query: %v", err)
- }
-
- var outputRows [][]interface{}
- for rows.Next() {
- row, err := rows.Values()
- if err != nil {
- t.Errorf("Unexpected error for rows.Values(): %v", err)
- }
- outputRows = append(outputRows, row)
- }
-
- if rows.Err() != nil {
- t.Errorf("Unexpected error for rows.Err(): %v", rows.Err())
- }
-
- if len(outputRows) != 0 {
- t.Errorf("Expected 0 rows, but got %v", outputRows)
- }
-
- ensureConnValid(t, conn)
-}
diff --git a/vendor/github.com/jackc/pgx/doc.go b/vendor/github.com/jackc/pgx/doc.go
index f527d11..51f1edc 100644
--- a/vendor/github.com/jackc/pgx/doc.go
+++ b/vendor/github.com/jackc/pgx/doc.go
@@ -1,9 +1,9 @@
// Package pgx is a PostgreSQL database driver.
/*
-pgx provides lower level access to PostgreSQL than the standard database/sql
+pgx provides lower level access to PostgreSQL than the standard database/sql.
It remains as similar to the database/sql interface as possible while
providing better speed and access to PostgreSQL specific features. Import
-github.com/jack/pgx/stdlib to use pgx as a database/sql compatible driver.
+github.com/jackc/pgx/stdlib to use pgx as a database/sql compatible driver.
Query Interface
@@ -62,17 +62,15 @@ Use Exec to execute a query that does not return a result set.
Connection Pool
-Connection pool usage is explicit and configurable. In pgx, a connection can
-be created and managed directly, or a connection pool with a configurable
-maximum connections can be used. Also, the connection pool offers an after
-connect hook that allows every connection to be automatically setup before
-being made available in the connection pool. This is especially useful to
-ensure all connections have the same prepared statements available or to
-change any other connection settings.
+Connection pool usage is explicit and configurable. In pgx, a connection can be
+created and managed directly, or a connection pool with a configurable maximum
+connections can be used. The connection pool offers an after connect hook that
+allows every connection to be automatically setup before being made available in
+the connection pool.
-It delegates Query, QueryRow, Exec, and Begin functions to an automatically
-checked out and released connection so you can avoid manually acquiring and
-releasing connections when you do not need that level of control.
+It delegates methods such as QueryRow to an automatically checked out and
+released connection so you can avoid manually acquiring and releasing
+connections when you do not need that level of control.
var name string
var weight int64
@@ -117,11 +115,11 @@ particular:
Null Mapping
-pgx can map nulls in two ways. The first is Null* types that have a data field
-and a valid field. They work in a similar fashion to database/sql. The second
-is to use a pointer to a pointer.
+pgx can map nulls in two ways. The first is package pgtype provides types that
+have a data field and a status field. They work in a similar fashion to
+database/sql. The second is to use a pointer to a pointer.
- var foo pgx.NullString
+ var foo pgtype.Varchar
var bar *string
err := conn.QueryRow("select foo, bar from widgets where id=$1", 42).Scan(&a, &b)
if err != nil {
@@ -133,20 +131,15 @@ Array Mapping
pgx maps between int16, int32, int64, float32, float64, and string Go slices
and the equivalent PostgreSQL array type. Go slices of native types do not
support nulls, so if a PostgreSQL array that contains a null is read into a
-native Go slice an error will occur.
-
-Hstore Mapping
-
-pgx includes an Hstore type and a NullHstore type. Hstore is simply a
-map[string]string and is preferred when the hstore contains no nulls. NullHstore
-follows the Null* pattern and supports null values.
+native Go slice an error will occur. The pgtype package includes many more
+array types for PostgreSQL types that do not directly map to native Go types.
JSON and JSONB Mapping
pgx includes built-in support to marshal and unmarshal between Go types and
the PostgreSQL JSON and JSONB.
-Inet and Cidr Mapping
+Inet and CIDR Mapping
pgx encodes from net.IPNet to and from inet and cidr PostgreSQL types. In
addition, as a convenience pgx will encode from a net.IP; it will assume a /32
@@ -155,25 +148,10 @@ netmask for IPv4 and a /128 for IPv6.
Custom Type Support
pgx includes support for the common data types like integers, floats, strings,
-dates, and times that have direct mappings between Go and SQL. Support can be
-added for additional types like point, hstore, numeric, etc. that do not have
-direct mappings in Go by the types implementing ScannerPgx and Encoder.
-
-Custom types can support text or binary formats. Binary format can provide a
-large performance increase. The natural place for deciding the format for a
-value would be in ScannerPgx as it is responsible for decoding the returned
-data. However, that is impossible as the query has already been sent by the time
-the ScannerPgx is invoked. The solution to this is the global
-DefaultTypeFormats. If a custom type prefers binary format it should register it
-there.
-
- pgx.DefaultTypeFormats["point"] = pgx.BinaryFormatCode
-
-Note that the type is referred to by name, not by OID. This is because custom
-PostgreSQL types like hstore will have different OIDs on different servers. When
-pgx establishes a connection it queries the pg_type table for all types. It then
-matches the names in DefaultTypeFormats with the returned OIDs and stores it in
-Conn.PgTypes.
+dates, and times that have direct mappings between Go and SQL. In addition,
+pgx uses the github.com/jackc/pgx/pgtype library to support more types. See
+documention for that library for instructions on how to implement custom
+types.
See example_custom_type_test.go for an example of a custom type for the
PostgreSQL point type.
@@ -184,15 +162,12 @@ and database/sql/driver.Valuer interfaces.
Raw Bytes Mapping
[]byte passed as arguments to Query, QueryRow, and Exec are passed unmodified
-to PostgreSQL. In like manner, a *[]byte passed to Scan will be filled with
-the raw bytes returned by PostgreSQL. This can be especially useful for reading
-varchar, text, json, and jsonb values directly into a []byte and avoiding the
-type conversion from string.
+to PostgreSQL.
Transactions
-Transactions are started by calling Begin or BeginIso. The BeginIso variant
-creates a transaction with a specified isolation level.
+Transactions are started by calling Begin or BeginEx. The BeginEx variant
+can create a transaction with a specified isolation level.
tx, err := conn.Begin()
if err != nil {
@@ -225,7 +200,7 @@ implement CopyFromSource to avoid buffering the entire data set in memory.
}
copyCount, err := conn.CopyFrom(
- "people",
+ pgx.Identifier{"people"},
[]string{"first_name", "last_name", "age"},
pgx.CopyFromRows(rows),
)
@@ -257,9 +232,8 @@ connection.
Logging
pgx defines a simple logger interface. Connections optionally accept a logger
-that satisfies this interface. The log15 package
-(http://gopkg.in/inconshreveable/log15.v2) satisfies this interface and it is
-simple to define adapters for other loggers. Set LogLevel to control logging
-verbosity.
+that satisfies this interface. Set LogLevel to control logging verbosity.
+Adapters for github.com/inconshreveable/log15, github.com/sirupsen/logrus, and
+the testing log are provided in the log directory.
*/
package pgx
diff --git a/vendor/github.com/jackc/pgx/example_custom_type_test.go b/vendor/github.com/jackc/pgx/example_custom_type_test.go
deleted file mode 100644
index 34cc316..0000000
--- a/vendor/github.com/jackc/pgx/example_custom_type_test.go
+++ /dev/null
@@ -1,104 +0,0 @@
-package pgx_test
-
-import (
- "errors"
- "fmt"
- "github.com/jackc/pgx"
- "regexp"
- "strconv"
-)
-
-var pointRegexp *regexp.Regexp = regexp.MustCompile(`^\((.*),(.*)\)$`)
-
-// NullPoint represents a point that may be null.
-//
-// If Valid is false then the value is NULL.
-type NullPoint struct {
- X, Y float64 // Coordinates of point
- Valid bool // Valid is true if not NULL
-}
-
-func (p *NullPoint) ScanPgx(vr *pgx.ValueReader) error {
- if vr.Type().DataTypeName != "point" {
- return pgx.SerializationError(fmt.Sprintf("NullPoint.Scan cannot decode %s (OID %d)", vr.Type().DataTypeName, vr.Type().DataType))
- }
-
- if vr.Len() == -1 {
- p.X, p.Y, p.Valid = 0, 0, false
- return nil
- }
-
- switch vr.Type().FormatCode {
- case pgx.TextFormatCode:
- s := vr.ReadString(vr.Len())
- match := pointRegexp.FindStringSubmatch(s)
- if match == nil {
- return pgx.SerializationError(fmt.Sprintf("Received invalid point: %v", s))
- }
-
- var err error
- p.X, err = strconv.ParseFloat(match[1], 64)
- if err != nil {
- return pgx.SerializationError(fmt.Sprintf("Received invalid point: %v", s))
- }
- p.Y, err = strconv.ParseFloat(match[2], 64)
- if err != nil {
- return pgx.SerializationError(fmt.Sprintf("Received invalid point: %v", s))
- }
- case pgx.BinaryFormatCode:
- return errors.New("binary format not implemented")
- default:
- return fmt.Errorf("unknown format %v", vr.Type().FormatCode)
- }
-
- p.Valid = true
- return vr.Err()
-}
-
-func (p NullPoint) FormatCode() int16 { return pgx.BinaryFormatCode }
-
-func (p NullPoint) Encode(w *pgx.WriteBuf, oid pgx.Oid) error {
- if !p.Valid {
- w.WriteInt32(-1)
- return nil
- }
-
- s := fmt.Sprintf("point(%v,%v)", p.X, p.Y)
- w.WriteInt32(int32(len(s)))
- w.WriteBytes([]byte(s))
-
- return nil
-}
-
-func (p NullPoint) String() string {
- if p.Valid {
- return fmt.Sprintf("%v, %v", p.X, p.Y)
- }
- return "null point"
-}
-
-func Example_CustomType() {
- conn, err := pgx.Connect(*defaultConnConfig)
- if err != nil {
- fmt.Printf("Unable to establish connection: %v", err)
- return
- }
-
- var p NullPoint
- err = conn.QueryRow("select null::point").Scan(&p)
- if err != nil {
- fmt.Println(err)
- return
- }
- fmt.Println(p)
-
- err = conn.QueryRow("select point(1.5,2.5)").Scan(&p)
- if err != nil {
- fmt.Println(err)
- return
- }
- fmt.Println(p)
- // Output:
- // null point
- // 1.5, 2.5
-}
diff --git a/vendor/github.com/jackc/pgx/example_json_test.go b/vendor/github.com/jackc/pgx/example_json_test.go
deleted file mode 100644
index c153415..0000000
--- a/vendor/github.com/jackc/pgx/example_json_test.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package pgx_test
-
-import (
- "fmt"
- "github.com/jackc/pgx"
-)
-
-func Example_JSON() {
- conn, err := pgx.Connect(*defaultConnConfig)
- if err != nil {
- fmt.Printf("Unable to establish connection: %v", err)
- return
- }
-
- if _, ok := conn.PgTypes[pgx.JsonOid]; !ok {
- // No JSON type -- must be running against very old PostgreSQL
- // Pretend it works
- fmt.Println("John", 42)
- return
- }
-
- type person struct {
- Name string `json:"name"`
- Age int `json:"age"`
- }
-
- input := person{
- Name: "John",
- Age: 42,
- }
-
- var output person
-
- err = conn.QueryRow("select $1::json", input).Scan(&output)
- if err != nil {
- fmt.Println(err)
- return
- }
-
- fmt.Println(output.Name, output.Age)
- // Output:
- // John 42
-}
diff --git a/vendor/github.com/jackc/pgx/fastpath.go b/vendor/github.com/jackc/pgx/fastpath.go
index 19b9878..06e1354 100644
--- a/vendor/github.com/jackc/pgx/fastpath.go
+++ b/vendor/github.com/jackc/pgx/fastpath.go
@@ -2,29 +2,33 @@ package pgx
import (
"encoding/binary"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/jackc/pgx/pgproto3"
+ "github.com/jackc/pgx/pgtype"
)
func newFastpath(cn *Conn) *fastpath {
- return &fastpath{cn: cn, fns: make(map[string]Oid)}
+ return &fastpath{cn: cn, fns: make(map[string]pgtype.OID)}
}
type fastpath struct {
cn *Conn
- fns map[string]Oid
+ fns map[string]pgtype.OID
}
-func (f *fastpath) functionOID(name string) Oid {
+func (f *fastpath) functionOID(name string) pgtype.OID {
return f.fns[name]
}
-func (f *fastpath) addFunction(name string, oid Oid) {
+func (f *fastpath) addFunction(name string, oid pgtype.OID) {
f.fns[name] = oid
}
func (f *fastpath) addFunctions(rows *Rows) error {
for rows.Next() {
var name string
- var oid Oid
+ var oid pgtype.OID
if err := rows.Scan(&name, &oid); err != nil {
return err
}
@@ -47,41 +51,46 @@ func fpInt64Arg(n int64) fpArg {
return res
}
-func (f *fastpath) Call(oid Oid, args []fpArg) (res []byte, err error) {
- wbuf := newWriteBuf(f.cn, 'F') // function call
- wbuf.WriteInt32(int32(oid)) // function object id
- wbuf.WriteInt16(1) // # of argument format codes
- wbuf.WriteInt16(1) // format code: binary
- wbuf.WriteInt16(int16(len(args))) // # of arguments
+func (f *fastpath) Call(oid pgtype.OID, args []fpArg) (res []byte, err error) {
+ if err := f.cn.ensureConnectionReadyForQuery(); err != nil {
+ return nil, err
+ }
+
+ buf := f.cn.wbuf
+ buf = append(buf, 'F') // function call
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ buf = pgio.AppendInt32(buf, int32(oid)) // function object id
+ buf = pgio.AppendInt16(buf, 1) // # of argument format codes
+ buf = pgio.AppendInt16(buf, 1) // format code: binary
+ buf = pgio.AppendInt16(buf, int16(len(args))) // # of arguments
for _, arg := range args {
- wbuf.WriteInt32(int32(len(arg))) // length of argument
- wbuf.WriteBytes(arg) // argument value
+ buf = pgio.AppendInt32(buf, int32(len(arg))) // length of argument
+ buf = append(buf, arg...) // argument value
}
- wbuf.WriteInt16(1) // response format code (binary)
- wbuf.closeMsg()
+ buf = pgio.AppendInt16(buf, 1) // response format code (binary)
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])))
- if _, err := f.cn.conn.Write(wbuf.buf); err != nil {
+ if _, err := f.cn.conn.Write(buf); err != nil {
return nil, err
}
for {
- var t byte
- var r *msgReader
- t, r, err = f.cn.rxMsg()
+ msg, err := f.cn.rxMsg()
if err != nil {
return nil, err
}
- switch t {
- case 'V': // FunctionCallResponse
- data := r.readBytes(r.readInt32())
- res = make([]byte, len(data))
- copy(res, data)
- case 'Z': // Ready for query
- f.cn.rxReadyForQuery(r)
+ switch msg := msg.(type) {
+ case *pgproto3.FunctionCallResponse:
+ res = make([]byte, len(msg.Result))
+ copy(res, msg.Result)
+ case *pgproto3.ReadyForQuery:
+ f.cn.rxReadyForQuery(msg)
// done
- return
+ return res, err
default:
- if err := f.cn.processContextFreeMsg(t, r); err != nil {
+ if err := f.cn.processContextFreeMsg(msg); err != nil {
return nil, err
}
}
diff --git a/vendor/github.com/jackc/pgx/go_stdlib.go b/vendor/github.com/jackc/pgx/go_stdlib.go
new file mode 100644
index 0000000..9372f9e
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/go_stdlib.go
@@ -0,0 +1,61 @@
+package pgx
+
+import (
+ "database/sql/driver"
+ "reflect"
+)
+
+// This file contains code copied from the Go standard library due to the
+// required function not being public.
+
+// Copyright (c) 2009 The Go Authors. All rights reserved.
+
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// From database/sql/convert.go
+
+var valuerReflectType = reflect.TypeOf((*driver.Valuer)(nil)).Elem()
+
+// callValuerValue returns vr.Value(), with one exception:
+// If vr.Value is an auto-generated method on a pointer type and the
+// pointer is nil, it would panic at runtime in the panicwrap
+// method. Treat it like nil instead.
+// Issue 8415.
+//
+// This is so people can implement driver.Value on value types and
+// still use nil pointers to those types to mean nil/NULL, just like
+// string/*string.
+//
+// This function is mirrored in the database/sql/driver package.
+func callValuerValue(vr driver.Valuer) (v driver.Value, err error) {
+ if rv := reflect.ValueOf(vr); rv.Kind() == reflect.Ptr &&
+ rv.IsNil() &&
+ rv.Type().Elem().Implements(valuerReflectType) {
+ return nil, nil
+ }
+ return vr.Value()
+}
diff --git a/vendor/github.com/jackc/pgx/helper_test.go b/vendor/github.com/jackc/pgx/helper_test.go
deleted file mode 100644
index eff731e..0000000
--- a/vendor/github.com/jackc/pgx/helper_test.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package pgx_test
-
-import (
- "github.com/jackc/pgx"
- "testing"
-)
-
-func mustConnect(t testing.TB, config pgx.ConnConfig) *pgx.Conn {
- conn, err := pgx.Connect(config)
- if err != nil {
- t.Fatalf("Unable to establish connection: %v", err)
- }
- return conn
-}
-
-func mustReplicationConnect(t testing.TB, config pgx.ConnConfig) *pgx.ReplicationConn {
- conn, err := pgx.ReplicationConnect(config)
- if err != nil {
- t.Fatalf("Unable to establish connection: %v", err)
- }
- return conn
-}
-
-
-func closeConn(t testing.TB, conn *pgx.Conn) {
- err := conn.Close()
- if err != nil {
- t.Fatalf("conn.Close unexpectedly failed: %v", err)
- }
-}
-
-func closeReplicationConn(t testing.TB, conn *pgx.ReplicationConn) {
- err := conn.Close()
- if err != nil {
- t.Fatalf("conn.Close unexpectedly failed: %v", err)
- }
-}
-
-func mustExec(t testing.TB, conn *pgx.Conn, sql string, arguments ...interface{}) (commandTag pgx.CommandTag) {
- var err error
- if commandTag, err = conn.Exec(sql, arguments...); err != nil {
- t.Fatalf("Exec unexpectedly failed with %v: %v", sql, err)
- }
- return
-}
-
-// Do a simple query to ensure the connection is still usable
-func ensureConnValid(t *testing.T, conn *pgx.Conn) {
- var sum, rowCount int32
-
- rows, err := conn.Query("select generate_series(1,$1)", 10)
- if err != nil {
- t.Fatalf("conn.Query failed: %v", err)
- }
- defer rows.Close()
-
- for rows.Next() {
- var n int32
- rows.Scan(&n)
- sum += n
- rowCount++
- }
-
- if rows.Err() != nil {
- t.Fatalf("conn.Query failed: %v", err)
- }
-
- if rowCount != 10 {
- t.Error("Select called onDataRow wrong number of times")
- }
- if sum != 55 {
- t.Error("Wrong values returned")
- }
-}
diff --git a/vendor/github.com/jackc/pgx/hstore.go b/vendor/github.com/jackc/pgx/hstore.go
deleted file mode 100644
index 0ab9f77..0000000
--- a/vendor/github.com/jackc/pgx/hstore.go
+++ /dev/null
@@ -1,222 +0,0 @@
-package pgx
-
-import (
- "bytes"
- "errors"
- "fmt"
- "unicode"
- "unicode/utf8"
-)
-
-const (
- hsPre = iota
- hsKey
- hsSep
- hsVal
- hsNul
- hsNext
-)
-
-type hstoreParser struct {
- str string
- pos int
-}
-
-func newHSP(in string) *hstoreParser {
- return &hstoreParser{
- pos: 0,
- str: in,
- }
-}
-
-func (p *hstoreParser) Consume() (r rune, end bool) {
- if p.pos >= len(p.str) {
- end = true
- return
- }
- r, w := utf8.DecodeRuneInString(p.str[p.pos:])
- p.pos += w
- return
-}
-
-func (p *hstoreParser) Peek() (r rune, end bool) {
- if p.pos >= len(p.str) {
- end = true
- return
- }
- r, _ = utf8.DecodeRuneInString(p.str[p.pos:])
- return
-}
-
-func parseHstoreToMap(s string) (m map[string]string, err error) {
- keys, values, err := ParseHstore(s)
- if err != nil {
- return
- }
- m = make(map[string]string, len(keys))
- for i, key := range keys {
- if !values[i].Valid {
- err = fmt.Errorf("key '%s' has NULL value", key)
- m = nil
- return
- }
- m[key] = values[i].String
- }
- return
-}
-
-func parseHstoreToNullHstore(s string) (store map[string]NullString, err error) {
- keys, values, err := ParseHstore(s)
- if err != nil {
- return
- }
-
- store = make(map[string]NullString, len(keys))
-
- for i, key := range keys {
- store[key] = values[i]
- }
- return
-}
-
-// ParseHstore parses the string representation of an hstore column (the same
-// you would get from an ordinary SELECT) into two slices of keys and values. it
-// is used internally in the default parsing of hstores, but is exported for use
-// in handling custom data structures backed by an hstore column without the
-// overhead of creating a map[string]string
-func ParseHstore(s string) (k []string, v []NullString, err error) {
- if s == "" {
- return
- }
-
- buf := bytes.Buffer{}
- keys := []string{}
- values := []NullString{}
- p := newHSP(s)
-
- r, end := p.Consume()
- state := hsPre
-
- for !end {
- switch state {
- case hsPre:
- if r == '"' {
- state = hsKey
- } else {
- err = errors.New("String does not begin with \"")
- }
- case hsKey:
- switch r {
- case '"': //End of the key
- if buf.Len() == 0 {
- err = errors.New("Empty Key is invalid")
- } else {
- keys = append(keys, buf.String())
- buf = bytes.Buffer{}
- state = hsSep
- }
- case '\\': //Potential escaped character
- n, end := p.Consume()
- switch {
- case end:
- err = errors.New("Found EOS in key, expecting character or \"")
- case n == '"', n == '\\':
- buf.WriteRune(n)
- default:
- buf.WriteRune(r)
- buf.WriteRune(n)
- }
- default: //Any other character
- buf.WriteRune(r)
- }
- case hsSep:
- if r == '=' {
- r, end = p.Consume()
- switch {
- case end:
- err = errors.New("Found EOS after '=', expecting '>'")
- case r == '>':
- r, end = p.Consume()
- switch {
- case end:
- err = errors.New("Found EOS after '=>', expecting '\"' or 'NULL'")
- case r == '"':
- state = hsVal
- case r == 'N':
- state = hsNul
- default:
- err = fmt.Errorf("Invalid character '%c' after '=>', expecting '\"' or 'NULL'", r)
- }
- default:
- err = fmt.Errorf("Invalid character after '=', expecting '>'")
- }
- } else {
- err = fmt.Errorf("Invalid character '%c' after value, expecting '='", r)
- }
- case hsVal:
- switch r {
- case '"': //End of the value
- values = append(values, NullString{String: buf.String(), Valid: true})
- buf = bytes.Buffer{}
- state = hsNext
- case '\\': //Potential escaped character
- n, end := p.Consume()
- switch {
- case end:
- err = errors.New("Found EOS in key, expecting character or \"")
- case n == '"', n == '\\':
- buf.WriteRune(n)
- default:
- buf.WriteRune(r)
- buf.WriteRune(n)
- }
- default: //Any other character
- buf.WriteRune(r)
- }
- case hsNul:
- nulBuf := make([]rune, 3)
- nulBuf[0] = r
- for i := 1; i < 3; i++ {
- r, end = p.Consume()
- if end {
- err = errors.New("Found EOS in NULL value")
- return
- }
- nulBuf[i] = r
- }
- if nulBuf[0] == 'U' && nulBuf[1] == 'L' && nulBuf[2] == 'L' {
- values = append(values, NullString{String: "", Valid: false})
- state = hsNext
- } else {
- err = fmt.Errorf("Invalid NULL value: 'N%s'", string(nulBuf))
- }
- case hsNext:
- if r == ',' {
- r, end = p.Consume()
- switch {
- case end:
- err = errors.New("Found EOS after ',', expcting space")
- case (unicode.IsSpace(r)):
- r, end = p.Consume()
- state = hsKey
- default:
- err = fmt.Errorf("Invalid character '%c' after ', ', expecting \"", r)
- }
- } else {
- err = fmt.Errorf("Invalid character '%c' after value, expecting ','", r)
- }
- }
-
- if err != nil {
- return
- }
- r, end = p.Consume()
- }
- if state != hsNext {
- err = errors.New("Improperly formatted hstore")
- return
- }
- k = keys
- v = values
- return
-}
diff --git a/vendor/github.com/jackc/pgx/hstore_test.go b/vendor/github.com/jackc/pgx/hstore_test.go
deleted file mode 100644
index c948f0c..0000000
--- a/vendor/github.com/jackc/pgx/hstore_test.go
+++ /dev/null
@@ -1,181 +0,0 @@
-package pgx_test
-
-import (
- "github.com/jackc/pgx"
- "testing"
-)
-
-func TestHstoreTranscode(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- type test struct {
- hstore pgx.Hstore
- description string
- }
-
- tests := []test{
- {pgx.Hstore{}, "empty"},
- {pgx.Hstore{"foo": "bar"}, "single key/value"},
- {pgx.Hstore{"foo": "bar", "baz": "quz"}, "multiple key/values"},
- {pgx.Hstore{"NULL": "bar"}, `string "NULL" key`},
- {pgx.Hstore{"foo": "NULL"}, `string "NULL" value`},
- }
-
- specialStringTests := []struct {
- input string
- description string
- }{
- {`"`, `double quote (")`},
- {`'`, `single quote (')`},
- {`\`, `backslash (\)`},
- {`\\`, `multiple backslashes (\\)`},
- {`=>`, `separator (=>)`},
- {` `, `space`},
- {`\ / / \\ => " ' " '`, `multiple special characters`},
- }
- for _, sst := range specialStringTests {
- tests = append(tests, test{pgx.Hstore{sst.input + "foo": "bar"}, "key with " + sst.description + " at beginning"})
- tests = append(tests, test{pgx.Hstore{"foo" + sst.input + "foo": "bar"}, "key with " + sst.description + " in middle"})
- tests = append(tests, test{pgx.Hstore{"foo" + sst.input: "bar"}, "key with " + sst.description + " at end"})
- tests = append(tests, test{pgx.Hstore{sst.input: "bar"}, "key is " + sst.description})
-
- tests = append(tests, test{pgx.Hstore{"foo": sst.input + "bar"}, "value with " + sst.description + " at beginning"})
- tests = append(tests, test{pgx.Hstore{"foo": "bar" + sst.input + "bar"}, "value with " + sst.description + " in middle"})
- tests = append(tests, test{pgx.Hstore{"foo": "bar" + sst.input}, "value with " + sst.description + " at end"})
- tests = append(tests, test{pgx.Hstore{"foo": sst.input}, "value is " + sst.description})
- }
-
- for _, tt := range tests {
- var result pgx.Hstore
- err := conn.QueryRow("select $1::hstore", tt.hstore).Scan(&result)
- if err != nil {
- t.Errorf(`%s: QueryRow.Scan returned an error: %v`, tt.description, err)
- }
-
- for key, inValue := range tt.hstore {
- outValue, ok := result[key]
- if ok {
- if inValue != outValue {
- t.Errorf(`%s: Key %s mismatch - expected %s, received %s`, tt.description, key, inValue, outValue)
- }
- } else {
- t.Errorf(`%s: Missing key %s`, tt.description, key)
- }
- }
-
- ensureConnValid(t, conn)
- }
-}
-
-func TestNullHstoreTranscode(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- type test struct {
- nullHstore pgx.NullHstore
- description string
- }
-
- tests := []test{
- {pgx.NullHstore{}, "null"},
- {pgx.NullHstore{Valid: true}, "empty"},
- {pgx.NullHstore{
- Hstore: map[string]pgx.NullString{"foo": {String: "bar", Valid: true}},
- Valid: true},
- "single key/value"},
- {pgx.NullHstore{
- Hstore: map[string]pgx.NullString{"foo": {String: "bar", Valid: true}, "baz": {String: "quz", Valid: true}},
- Valid: true},
- "multiple key/values"},
- {pgx.NullHstore{
- Hstore: map[string]pgx.NullString{"NULL": {String: "bar", Valid: true}},
- Valid: true},
- `string "NULL" key`},
- {pgx.NullHstore{
- Hstore: map[string]pgx.NullString{"foo": {String: "NULL", Valid: true}},
- Valid: true},
- `string "NULL" value`},
- {pgx.NullHstore{
- Hstore: map[string]pgx.NullString{"foo": {String: "", Valid: false}},
- Valid: true},
- `NULL value`},
- }
-
- specialStringTests := []struct {
- input string
- description string
- }{
- {`"`, `double quote (")`},
- {`'`, `single quote (')`},
- {`\`, `backslash (\)`},
- {`\\`, `multiple backslashes (\\)`},
- {`=>`, `separator (=>)`},
- {` `, `space`},
- {`\ / / \\ => " ' " '`, `multiple special characters`},
- }
- for _, sst := range specialStringTests {
- tests = append(tests, test{pgx.NullHstore{
- Hstore: map[string]pgx.NullString{sst.input + "foo": {String: "bar", Valid: true}},
- Valid: true},
- "key with " + sst.description + " at beginning"})
- tests = append(tests, test{pgx.NullHstore{
- Hstore: map[string]pgx.NullString{"foo" + sst.input + "foo": {String: "bar", Valid: true}},
- Valid: true},
- "key with " + sst.description + " in middle"})
- tests = append(tests, test{pgx.NullHstore{
- Hstore: map[string]pgx.NullString{"foo" + sst.input: {String: "bar", Valid: true}},
- Valid: true},
- "key with " + sst.description + " at end"})
- tests = append(tests, test{pgx.NullHstore{
- Hstore: map[string]pgx.NullString{sst.input: {String: "bar", Valid: true}},
- Valid: true},
- "key is " + sst.description})
-
- tests = append(tests, test{pgx.NullHstore{
- Hstore: map[string]pgx.NullString{"foo": {String: sst.input + "bar", Valid: true}},
- Valid: true},
- "value with " + sst.description + " at beginning"})
- tests = append(tests, test{pgx.NullHstore{
- Hstore: map[string]pgx.NullString{"foo": {String: "bar" + sst.input + "bar", Valid: true}},
- Valid: true},
- "value with " + sst.description + " in middle"})
- tests = append(tests, test{pgx.NullHstore{
- Hstore: map[string]pgx.NullString{"foo": {String: "bar" + sst.input, Valid: true}},
- Valid: true},
- "value with " + sst.description + " at end"})
- tests = append(tests, test{pgx.NullHstore{
- Hstore: map[string]pgx.NullString{"foo": {String: sst.input, Valid: true}},
- Valid: true},
- "value is " + sst.description})
- }
-
- for _, tt := range tests {
- var result pgx.NullHstore
- err := conn.QueryRow("select $1::hstore", tt.nullHstore).Scan(&result)
- if err != nil {
- t.Errorf(`%s: QueryRow.Scan returned an error: %v`, tt.description, err)
- }
-
- if result.Valid != tt.nullHstore.Valid {
- t.Errorf(`%s: Valid mismatch - expected %v, received %v`, tt.description, tt.nullHstore.Valid, result.Valid)
- }
-
- for key, inValue := range tt.nullHstore.Hstore {
- outValue, ok := result.Hstore[key]
- if ok {
- if inValue != outValue {
- t.Errorf(`%s: Key %s mismatch - expected %v, received %v`, tt.description, key, inValue, outValue)
- }
- } else {
- t.Errorf(`%s: Missing key %s`, tt.description, key)
- }
- }
-
- ensureConnValid(t, conn)
- }
-}
diff --git a/vendor/github.com/jackc/pgx/internal/sanitize/sanitize.go b/vendor/github.com/jackc/pgx/internal/sanitize/sanitize.go
new file mode 100644
index 0000000..53543b8
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/internal/sanitize/sanitize.go
@@ -0,0 +1,237 @@
+package sanitize
+
+import (
+ "bytes"
+ "encoding/hex"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/pkg/errors"
+)
+
+// Part is either a string or an int. A string is raw SQL. An int is a
+// argument placeholder.
+type Part interface{}
+
+type Query struct {
+ Parts []Part
+}
+
+func (q *Query) Sanitize(args ...interface{}) (string, error) {
+ argUse := make([]bool, len(args))
+ buf := &bytes.Buffer{}
+
+ for _, part := range q.Parts {
+ var str string
+ switch part := part.(type) {
+ case string:
+ str = part
+ case int:
+ argIdx := part - 1
+ if argIdx >= len(args) {
+ return "", errors.Errorf("insufficient arguments")
+ }
+ arg := args[argIdx]
+ switch arg := arg.(type) {
+ case nil:
+ str = "null"
+ case int64:
+ str = strconv.FormatInt(arg, 10)
+ case float64:
+ str = strconv.FormatFloat(arg, 'f', -1, 64)
+ case bool:
+ str = strconv.FormatBool(arg)
+ case []byte:
+ str = QuoteBytes(arg)
+ case string:
+ str = QuoteString(arg)
+ case time.Time:
+ str = arg.Format("'2006-01-02 15:04:05.999999999Z07:00:00'")
+ default:
+ return "", errors.Errorf("invalid arg type: %T", arg)
+ }
+ argUse[argIdx] = true
+ default:
+ return "", errors.Errorf("invalid Part type: %T", part)
+ }
+ buf.WriteString(str)
+ }
+
+ for i, used := range argUse {
+ if !used {
+ return "", errors.Errorf("unused argument: %d", i)
+ }
+ }
+ return buf.String(), nil
+}
+
+func NewQuery(sql string) (*Query, error) {
+ l := &sqlLexer{
+ src: sql,
+ stateFn: rawState,
+ }
+
+ for l.stateFn != nil {
+ l.stateFn = l.stateFn(l)
+ }
+
+ query := &Query{Parts: l.parts}
+
+ return query, nil
+}
+
+func QuoteString(str string) string {
+ return "'" + strings.Replace(str, "'", "''", -1) + "'"
+}
+
+func QuoteBytes(buf []byte) string {
+ return `'\x` + hex.EncodeToString(buf) + "'"
+}
+
+type sqlLexer struct {
+ src string
+ start int
+ pos int
+ stateFn stateFn
+ parts []Part
+}
+
+type stateFn func(*sqlLexer) stateFn
+
+func rawState(l *sqlLexer) stateFn {
+ for {
+ r, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ l.pos += width
+
+ switch r {
+ case 'e', 'E':
+ nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ if nextRune == '\'' {
+ l.pos += width
+ return escapeStringState
+ }
+ case '\'':
+ return singleQuoteState
+ case '"':
+ return doubleQuoteState
+ case '$':
+ nextRune, _ := utf8.DecodeRuneInString(l.src[l.pos:])
+ if '0' <= nextRune && nextRune <= '9' {
+ if l.pos-l.start > 0 {
+ l.parts = append(l.parts, l.src[l.start:l.pos-width])
+ }
+ l.start = l.pos
+ return placeholderState
+ }
+ case utf8.RuneError:
+ if l.pos-l.start > 0 {
+ l.parts = append(l.parts, l.src[l.start:l.pos])
+ l.start = l.pos
+ }
+ return nil
+ }
+ }
+}
+
+func singleQuoteState(l *sqlLexer) stateFn {
+ for {
+ r, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ l.pos += width
+
+ switch r {
+ case '\'':
+ nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ if nextRune != '\'' {
+ return rawState
+ }
+ l.pos += width
+ case utf8.RuneError:
+ if l.pos-l.start > 0 {
+ l.parts = append(l.parts, l.src[l.start:l.pos])
+ l.start = l.pos
+ }
+ return nil
+ }
+ }
+}
+
+func doubleQuoteState(l *sqlLexer) stateFn {
+ for {
+ r, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ l.pos += width
+
+ switch r {
+ case '"':
+ nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ if nextRune != '"' {
+ return rawState
+ }
+ l.pos += width
+ case utf8.RuneError:
+ if l.pos-l.start > 0 {
+ l.parts = append(l.parts, l.src[l.start:l.pos])
+ l.start = l.pos
+ }
+ return nil
+ }
+ }
+}
+
+// placeholderState consumes a placeholder value. The $ must have already has
+// already been consumed. The first rune must be a digit.
+func placeholderState(l *sqlLexer) stateFn {
+ num := 0
+
+ for {
+ r, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ l.pos += width
+
+ if '0' <= r && r <= '9' {
+ num *= 10
+ num += int(r - '0')
+ } else {
+ l.parts = append(l.parts, num)
+ l.pos -= width
+ l.start = l.pos
+ return rawState
+ }
+ }
+}
+
+func escapeStringState(l *sqlLexer) stateFn {
+ for {
+ r, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ l.pos += width
+
+ switch r {
+ case '\\':
+ _, width = utf8.DecodeRuneInString(l.src[l.pos:])
+ l.pos += width
+ case '\'':
+ nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ if nextRune != '\'' {
+ return rawState
+ }
+ l.pos += width
+ case utf8.RuneError:
+ if l.pos-l.start > 0 {
+ l.parts = append(l.parts, l.src[l.start:l.pos])
+ l.start = l.pos
+ }
+ return nil
+ }
+ }
+}
+
+// SanitizeSQL replaces placeholder values with args. It quotes and escapes args
+// as necessary. This function is only safe when standard_conforming_strings is
+// on.
+func SanitizeSQL(sql string, args ...interface{}) (string, error) {
+ query, err := NewQuery(sql)
+ if err != nil {
+ return "", err
+ }
+ return query.Sanitize(args...)
+}
diff --git a/vendor/github.com/jackc/pgx/large_objects.go b/vendor/github.com/jackc/pgx/large_objects.go
index a4922ef..e109bce 100644
--- a/vendor/github.com/jackc/pgx/large_objects.go
+++ b/vendor/github.com/jackc/pgx/large_objects.go
@@ -2,6 +2,8 @@ package pgx
import (
"io"
+
+ "github.com/jackc/pgx/pgtype"
)
// LargeObjects is a structure used to access the large objects API. It is only
@@ -14,20 +16,20 @@ type LargeObjects struct {
fp *fastpath
}
-const largeObjectFns = `select proname, oid from pg_catalog.pg_proc
+const largeObjectFns = `select proname, oid from pg_catalog.pg_proc
where proname in (
-'lo_open',
-'lo_close',
-'lo_create',
-'lo_unlink',
-'lo_lseek',
-'lo_lseek64',
-'lo_tell',
-'lo_tell64',
-'lo_truncate',
-'lo_truncate64',
-'loread',
-'lowrite')
+'lo_open',
+'lo_close',
+'lo_create',
+'lo_unlink',
+'lo_lseek',
+'lo_lseek64',
+'lo_tell',
+'lo_tell64',
+'lo_truncate',
+'lo_truncate64',
+'loread',
+'lowrite')
and pronamespace = (select oid from pg_catalog.pg_namespace where nspname = 'pg_catalog')`
// LargeObjects returns a LargeObjects instance for the transaction.
@@ -60,19 +62,19 @@ const (
// Create creates a new large object. If id is zero, the server assigns an
// unused OID.
-func (o *LargeObjects) Create(id Oid) (Oid, error) {
- newOid, err := fpInt32(o.fp.CallFn("lo_create", []fpArg{fpIntArg(int32(id))}))
- return Oid(newOid), err
+func (o *LargeObjects) Create(id pgtype.OID) (pgtype.OID, error) {
+ newOID, err := fpInt32(o.fp.CallFn("lo_create", []fpArg{fpIntArg(int32(id))}))
+ return pgtype.OID(newOID), err
}
// Open opens an existing large object with the given mode.
-func (o *LargeObjects) Open(oid Oid, mode LargeObjectMode) (*LargeObject, error) {
+func (o *LargeObjects) Open(oid pgtype.OID, mode LargeObjectMode) (*LargeObject, error) {
fd, err := fpInt32(o.fp.CallFn("lo_open", []fpArg{fpIntArg(int32(oid)), fpIntArg(int32(mode))}))
return &LargeObject{fd: fd, lo: o}, err
}
// Unlink removes a large object from the database.
-func (o *LargeObjects) Unlink(oid Oid) error {
+func (o *LargeObjects) Unlink(oid pgtype.OID) error {
_, err := o.fp.CallFn("lo_unlink", []fpArg{fpIntArg(int32(oid))})
return err
}
diff --git a/vendor/github.com/jackc/pgx/large_objects_test.go b/vendor/github.com/jackc/pgx/large_objects_test.go
deleted file mode 100644
index a19c851..0000000
--- a/vendor/github.com/jackc/pgx/large_objects_test.go
+++ /dev/null
@@ -1,121 +0,0 @@
-package pgx_test
-
-import (
- "io"
- "testing"
-
- "github.com/jackc/pgx"
-)
-
-func TestLargeObjects(t *testing.T) {
- t.Parallel()
-
- conn, err := pgx.Connect(*defaultConnConfig)
- if err != nil {
- t.Fatal(err)
- }
-
- tx, err := conn.Begin()
- if err != nil {
- t.Fatal(err)
- }
-
- lo, err := tx.LargeObjects()
- if err != nil {
- t.Fatal(err)
- }
-
- id, err := lo.Create(0)
- if err != nil {
- t.Fatal(err)
- }
-
- obj, err := lo.Open(id, pgx.LargeObjectModeRead|pgx.LargeObjectModeWrite)
- if err != nil {
- t.Fatal(err)
- }
-
- n, err := obj.Write([]byte("testing"))
- if err != nil {
- t.Fatal(err)
- }
- if n != 7 {
- t.Errorf("Expected n to be 7, got %d", n)
- }
-
- pos, err := obj.Seek(1, 0)
- if err != nil {
- t.Fatal(err)
- }
- if pos != 1 {
- t.Errorf("Expected pos to be 1, got %d", pos)
- }
-
- res := make([]byte, 6)
- n, err = obj.Read(res)
- if err != nil {
- t.Fatal(err)
- }
- if string(res) != "esting" {
- t.Errorf(`Expected res to be "esting", got %q`, res)
- }
- if n != 6 {
- t.Errorf("Expected n to be 6, got %d", n)
- }
-
- n, err = obj.Read(res)
- if err != io.EOF {
- t.Error("Expected io.EOF, go nil")
- }
- if n != 0 {
- t.Errorf("Expected n to be 0, got %d", n)
- }
-
- pos, err = obj.Tell()
- if err != nil {
- t.Fatal(err)
- }
- if pos != 7 {
- t.Errorf("Expected pos to be 7, got %d", pos)
- }
-
- err = obj.Truncate(1)
- if err != nil {
- t.Fatal(err)
- }
-
- pos, err = obj.Seek(-1, 2)
- if err != nil {
- t.Fatal(err)
- }
- if pos != 0 {
- t.Errorf("Expected pos to be 0, got %d", pos)
- }
-
- res = make([]byte, 2)
- n, err = obj.Read(res)
- if err != io.EOF {
- t.Errorf("Expected err to be io.EOF, got %v", err)
- }
- if n != 1 {
- t.Errorf("Expected n to be 1, got %d", n)
- }
- if res[0] != 't' {
- t.Errorf("Expected res[0] to be 't', got %v", res[0])
- }
-
- err = obj.Close()
- if err != nil {
- t.Fatal(err)
- }
-
- err = lo.Unlink(id)
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = lo.Open(id, pgx.LargeObjectModeRead)
- if e, ok := err.(pgx.PgError); !ok || e.Code != "42704" {
- t.Errorf("Expected undefined_object error (42704), got %#v", err)
- }
-}
diff --git a/vendor/github.com/jackc/pgx/logger.go b/vendor/github.com/jackc/pgx/logger.go
index 4423325..528698b 100644
--- a/vendor/github.com/jackc/pgx/logger.go
+++ b/vendor/github.com/jackc/pgx/logger.go
@@ -2,13 +2,13 @@ package pgx
import (
"encoding/hex"
- "errors"
"fmt"
+
+ "github.com/pkg/errors"
)
// The values for log levels are chosen such that the zero value means that no
-// log level was specified and we can default to LogLevelDebug to preserve
-// the behavior that existed prior to log level introduction.
+// log level was specified.
const (
LogLevelTrace = 6
LogLevelDebug = 5
@@ -18,16 +18,33 @@ const (
LogLevelNone = 1
)
+// LogLevel represents the pgx logging level. See LogLevel* constants for
+// possible values.
+type LogLevel int
+
+func (ll LogLevel) String() string {
+ switch ll {
+ case LogLevelTrace:
+ return "trace"
+ case LogLevelDebug:
+ return "debug"
+ case LogLevelInfo:
+ return "info"
+ case LogLevelWarn:
+ return "warn"
+ case LogLevelError:
+ return "error"
+ case LogLevelNone:
+ return "none"
+ default:
+ return fmt.Sprintf("invalid level %d", ll)
+ }
+}
+
// Logger is the interface used to get logging from pgx internals.
-// https://github.com/inconshreveable/log15 is the recommended logging package.
-// This logging interface was extracted from there. However, it should be simple
-// to adapt any logger to this interface.
type Logger interface {
- // Log a message at the given level with context key/value pairs
- Debug(msg string, ctx ...interface{})
- Info(msg string, ctx ...interface{})
- Warn(msg string, ctx ...interface{})
- Error(msg string, ctx ...interface{})
+ // Log a message at the given level with data key/value pairs. data may be nil.
+ Log(level LogLevel, msg string, data map[string]interface{})
}
// LogLevelFromString converts log level string to constant
@@ -39,7 +56,7 @@ type Logger interface {
// warn
// error
// none
-func LogLevelFromString(s string) (int, error) {
+func LogLevelFromString(s string) (LogLevel, error) {
switch s {
case "trace":
return LogLevelTrace, nil
diff --git a/vendor/github.com/jackc/pgx/messages.go b/vendor/github.com/jackc/pgx/messages.go
index 317ba27..97e8929 100644
--- a/vendor/github.com/jackc/pgx/messages.go
+++ b/vendor/github.com/jackc/pgx/messages.go
@@ -1,67 +1,76 @@
package pgx
import (
- "encoding/binary"
-)
+ "math"
+ "reflect"
+ "time"
-const (
- protocolVersionNumber = 196608 // 3.0
+ "github.com/jackc/pgx/pgio"
+ "github.com/jackc/pgx/pgtype"
)
const (
- backendKeyData = 'K'
- authenticationX = 'R'
- readyForQuery = 'Z'
- rowDescription = 'T'
- dataRow = 'D'
- commandComplete = 'C'
- errorResponse = 'E'
- noticeResponse = 'N'
- parseComplete = '1'
- parameterDescription = 't'
- bindComplete = '2'
- notificationResponse = 'A'
- emptyQueryResponse = 'I'
- noData = 'n'
- closeComplete = '3'
- flush = 'H'
- copyInResponse = 'G'
- copyData = 'd'
- copyFail = 'f'
- copyDone = 'c'
+ copyData = 'd'
+ copyFail = 'f'
+ copyDone = 'c'
+ varHeaderSize = 4
)
-type startupMessage struct {
- options map[string]string
+type FieldDescription struct {
+ Name string
+ Table pgtype.OID
+ AttributeNumber uint16
+ DataType pgtype.OID
+ DataTypeSize int16
+ DataTypeName string
+ Modifier uint32
+ FormatCode int16
}
-func newStartupMessage() *startupMessage {
- return &startupMessage{map[string]string{}}
+func (fd FieldDescription) Length() (int64, bool) {
+ switch fd.DataType {
+ case pgtype.TextOID, pgtype.ByteaOID:
+ return math.MaxInt64, true
+ case pgtype.VarcharOID, pgtype.BPCharArrayOID:
+ return int64(fd.Modifier - varHeaderSize), true
+ default:
+ return 0, false
+ }
}
-func (s *startupMessage) Bytes() (buf []byte) {
- buf = make([]byte, 8, 128)
- binary.BigEndian.PutUint32(buf[4:8], uint32(protocolVersionNumber))
- for key, value := range s.options {
- buf = append(buf, key...)
- buf = append(buf, 0)
- buf = append(buf, value...)
- buf = append(buf, 0)
+func (fd FieldDescription) PrecisionScale() (precision, scale int64, ok bool) {
+ switch fd.DataType {
+ case pgtype.NumericOID:
+ mod := fd.Modifier - varHeaderSize
+ precision = int64((mod >> 16) & 0xffff)
+ scale = int64(mod & 0xffff)
+ return precision, scale, true
+ default:
+ return 0, 0, false
}
- buf = append(buf, ("\000")...)
- binary.BigEndian.PutUint32(buf[0:4], uint32(len(buf)))
- return buf
}
-type FieldDescription struct {
- Name string
- Table Oid
- AttributeNumber int16
- DataType Oid
- DataTypeSize int16
- DataTypeName string
- Modifier int32
- FormatCode int16
+func (fd FieldDescription) Type() reflect.Type {
+ switch fd.DataType {
+ case pgtype.Int8OID:
+ return reflect.TypeOf(int64(0))
+ case pgtype.Int4OID:
+ return reflect.TypeOf(int32(0))
+ case pgtype.Int2OID:
+ return reflect.TypeOf(int16(0))
+ case pgtype.VarcharOID, pgtype.BPCharArrayOID, pgtype.TextOID:
+ return reflect.TypeOf("")
+ case pgtype.BoolOID:
+ return reflect.TypeOf(false)
+ case pgtype.NumericOID:
+ return reflect.TypeOf(float64(0))
+ case pgtype.DateOID, pgtype.TimestampOID, pgtype.TimestamptzOID:
+ return reflect.TypeOf(time.Time{})
+ case pgtype.ByteaOID:
+ return reflect.TypeOf([]byte(nil))
+ default:
+ return reflect.TypeOf(new(interface{})).Elem()
+ }
}
// PgError represents an error reported by the PostgreSQL server. See
@@ -91,69 +100,114 @@ func (pe PgError) Error() string {
return pe.Severity + ": " + pe.Message + " (SQLSTATE " + pe.Code + ")"
}
-func newWriteBuf(c *Conn, t byte) *WriteBuf {
- buf := append(c.wbuf[0:0], t, 0, 0, 0, 0)
- c.writeBuf = WriteBuf{buf: buf, sizeIdx: 1, conn: c}
- return &c.writeBuf
-}
+// Notice represents a notice response message reported by the PostgreSQL
+// server. Be aware that this is distinct from LISTEN/NOTIFY notification.
+type Notice PgError
-// WriteBuf is used build messages to send to the PostgreSQL server. It is used
-// by the Encoder interface when implementing custom encoders.
-type WriteBuf struct {
- buf []byte
- sizeIdx int
- conn *Conn
-}
+// appendParse appends a PostgreSQL wire protocol parse message to buf and returns it.
+func appendParse(buf []byte, name string, query string, parameterOIDs []pgtype.OID) []byte {
+ buf = append(buf, 'P')
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+ buf = append(buf, name...)
+ buf = append(buf, 0)
+ buf = append(buf, query...)
+ buf = append(buf, 0)
-func (wb *WriteBuf) startMsg(t byte) {
- wb.closeMsg()
- wb.buf = append(wb.buf, t, 0, 0, 0, 0)
- wb.sizeIdx = len(wb.buf) - 4
-}
+ buf = pgio.AppendInt16(buf, int16(len(parameterOIDs)))
+ for _, oid := range parameterOIDs {
+ buf = pgio.AppendUint32(buf, uint32(oid))
+ }
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])))
-func (wb *WriteBuf) closeMsg() {
- binary.BigEndian.PutUint32(wb.buf[wb.sizeIdx:wb.sizeIdx+4], uint32(len(wb.buf)-wb.sizeIdx))
+ return buf
}
-func (wb *WriteBuf) WriteByte(b byte) {
- wb.buf = append(wb.buf, b)
-}
+// appendDescribe appends a PostgreSQL wire protocol describe message to buf and returns it.
+func appendDescribe(buf []byte, objectType byte, name string) []byte {
+ buf = append(buf, 'D')
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+ buf = append(buf, objectType)
+ buf = append(buf, name...)
+ buf = append(buf, 0)
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])))
-func (wb *WriteBuf) WriteCString(s string) {
- wb.buf = append(wb.buf, []byte(s)...)
- wb.buf = append(wb.buf, 0)
+ return buf
}
-func (wb *WriteBuf) WriteInt16(n int16) {
- b := make([]byte, 2)
- binary.BigEndian.PutUint16(b, uint16(n))
- wb.buf = append(wb.buf, b...)
-}
+// appendSync appends a PostgreSQL wire protocol sync message to buf and returns it.
+func appendSync(buf []byte) []byte {
+ buf = append(buf, 'S')
+ buf = pgio.AppendInt32(buf, 4)
-func (wb *WriteBuf) WriteUint16(n uint16) {
- b := make([]byte, 2)
- binary.BigEndian.PutUint16(b, n)
- wb.buf = append(wb.buf, b...)
+ return buf
}
-func (wb *WriteBuf) WriteInt32(n int32) {
- b := make([]byte, 4)
- binary.BigEndian.PutUint32(b, uint32(n))
- wb.buf = append(wb.buf, b...)
-}
+// appendBind appends a PostgreSQL wire protocol bind message to buf and returns it.
+func appendBind(
+ buf []byte,
+ destinationPortal,
+ preparedStatement string,
+ connInfo *pgtype.ConnInfo,
+ parameterOIDs []pgtype.OID,
+ arguments []interface{},
+ resultFormatCodes []int16,
+) ([]byte, error) {
+ buf = append(buf, 'B')
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+ buf = append(buf, destinationPortal...)
+ buf = append(buf, 0)
+ buf = append(buf, preparedStatement...)
+ buf = append(buf, 0)
+
+ buf = pgio.AppendInt16(buf, int16(len(parameterOIDs)))
+ for i, oid := range parameterOIDs {
+ buf = pgio.AppendInt16(buf, chooseParameterFormatCode(connInfo, oid, arguments[i]))
+ }
+
+ buf = pgio.AppendInt16(buf, int16(len(arguments)))
+ for i, oid := range parameterOIDs {
+ var err error
+ buf, err = encodePreparedStatementArgument(connInfo, buf, oid, arguments[i])
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ buf = pgio.AppendInt16(buf, int16(len(resultFormatCodes)))
+ for _, fc := range resultFormatCodes {
+ buf = pgio.AppendInt16(buf, fc)
+ }
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])))
-func (wb *WriteBuf) WriteUint32(n uint32) {
- b := make([]byte, 4)
- binary.BigEndian.PutUint32(b, n)
- wb.buf = append(wb.buf, b...)
+ return buf, nil
}
-func (wb *WriteBuf) WriteInt64(n int64) {
- b := make([]byte, 8)
- binary.BigEndian.PutUint64(b, uint64(n))
- wb.buf = append(wb.buf, b...)
+// appendExecute appends a PostgreSQL wire protocol execute message to buf and returns it.
+func appendExecute(buf []byte, portal string, maxRows uint32) []byte {
+ buf = append(buf, 'E')
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ buf = append(buf, portal...)
+ buf = append(buf, 0)
+ buf = pgio.AppendUint32(buf, maxRows)
+
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])))
+
+ return buf
}
-func (wb *WriteBuf) WriteBytes(b []byte) {
- wb.buf = append(wb.buf, b...)
+// appendQuery appends a PostgreSQL wire protocol query message to buf and returns it.
+func appendQuery(buf []byte, query string) []byte {
+ buf = append(buf, 'Q')
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+ buf = append(buf, query...)
+ buf = append(buf, 0)
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])))
+
+ return buf
}
diff --git a/vendor/github.com/jackc/pgx/msg_reader.go b/vendor/github.com/jackc/pgx/msg_reader.go
deleted file mode 100644
index 21db5d2..0000000
--- a/vendor/github.com/jackc/pgx/msg_reader.go
+++ /dev/null
@@ -1,316 +0,0 @@
-package pgx
-
-import (
- "bufio"
- "encoding/binary"
- "errors"
- "io"
-)
-
-// msgReader is a helper that reads values from a PostgreSQL message.
-type msgReader struct {
- reader *bufio.Reader
- msgBytesRemaining int32
- err error
- log func(lvl int, msg string, ctx ...interface{})
- shouldLog func(lvl int) bool
-}
-
-// Err returns any error that the msgReader has experienced
-func (r *msgReader) Err() error {
- return r.err
-}
-
-// fatal tells rc that a Fatal error has occurred
-func (r *msgReader) fatal(err error) {
- if r.shouldLog(LogLevelTrace) {
- r.log(LogLevelTrace, "msgReader.fatal", "error", err, "msgBytesRemaining", r.msgBytesRemaining)
- }
- r.err = err
-}
-
-// rxMsg reads the type and size of the next message.
-func (r *msgReader) rxMsg() (byte, error) {
- if r.err != nil {
- return 0, r.err
- }
-
- if r.msgBytesRemaining > 0 {
- if r.shouldLog(LogLevelTrace) {
- r.log(LogLevelTrace, "msgReader.rxMsg discarding unread previous message", "msgBytesRemaining", r.msgBytesRemaining)
- }
-
- _, err := r.reader.Discard(int(r.msgBytesRemaining))
- if err != nil {
- return 0, err
- }
- }
-
- b, err := r.reader.Peek(5)
- if err != nil {
- r.fatal(err)
- return 0, err
- }
- msgType := b[0]
- r.msgBytesRemaining = int32(binary.BigEndian.Uint32(b[1:])) - 4
- r.reader.Discard(5)
- return msgType, nil
-}
-
-func (r *msgReader) readByte() byte {
- if r.err != nil {
- return 0
- }
-
- r.msgBytesRemaining--
- if r.msgBytesRemaining < 0 {
- r.fatal(errors.New("read past end of message"))
- return 0
- }
-
- b, err := r.reader.ReadByte()
- if err != nil {
- r.fatal(err)
- return 0
- }
-
- if r.shouldLog(LogLevelTrace) {
- r.log(LogLevelTrace, "msgReader.readByte", "value", b, "byteAsString", string(b), "msgBytesRemaining", r.msgBytesRemaining)
- }
-
- return b
-}
-
-func (r *msgReader) readInt16() int16 {
- if r.err != nil {
- return 0
- }
-
- r.msgBytesRemaining -= 2
- if r.msgBytesRemaining < 0 {
- r.fatal(errors.New("read past end of message"))
- return 0
- }
-
- b, err := r.reader.Peek(2)
- if err != nil {
- r.fatal(err)
- return 0
- }
-
- n := int16(binary.BigEndian.Uint16(b))
-
- r.reader.Discard(2)
-
- if r.shouldLog(LogLevelTrace) {
- r.log(LogLevelTrace, "msgReader.readInt16", "value", n, "msgBytesRemaining", r.msgBytesRemaining)
- }
-
- return n
-}
-
-func (r *msgReader) readInt32() int32 {
- if r.err != nil {
- return 0
- }
-
- r.msgBytesRemaining -= 4
- if r.msgBytesRemaining < 0 {
- r.fatal(errors.New("read past end of message"))
- return 0
- }
-
- b, err := r.reader.Peek(4)
- if err != nil {
- r.fatal(err)
- return 0
- }
-
- n := int32(binary.BigEndian.Uint32(b))
-
- r.reader.Discard(4)
-
- if r.shouldLog(LogLevelTrace) {
- r.log(LogLevelTrace, "msgReader.readInt32", "value", n, "msgBytesRemaining", r.msgBytesRemaining)
- }
-
- return n
-}
-
-func (r *msgReader) readUint16() uint16 {
- if r.err != nil {
- return 0
- }
-
- r.msgBytesRemaining -= 2
- if r.msgBytesRemaining < 0 {
- r.fatal(errors.New("read past end of message"))
- return 0
- }
-
- b, err := r.reader.Peek(2)
- if err != nil {
- r.fatal(err)
- return 0
- }
-
- n := uint16(binary.BigEndian.Uint16(b))
-
- r.reader.Discard(2)
-
- if r.shouldLog(LogLevelTrace) {
- r.log(LogLevelTrace, "msgReader.readUint16", "value", n, "msgBytesRemaining", r.msgBytesRemaining)
- }
-
- return n
-}
-
-func (r *msgReader) readUint32() uint32 {
- if r.err != nil {
- return 0
- }
-
- r.msgBytesRemaining -= 4
- if r.msgBytesRemaining < 0 {
- r.fatal(errors.New("read past end of message"))
- return 0
- }
-
- b, err := r.reader.Peek(4)
- if err != nil {
- r.fatal(err)
- return 0
- }
-
- n := uint32(binary.BigEndian.Uint32(b))
-
- r.reader.Discard(4)
-
- if r.shouldLog(LogLevelTrace) {
- r.log(LogLevelTrace, "msgReader.readUint32", "value", n, "msgBytesRemaining", r.msgBytesRemaining)
- }
-
- return n
-}
-
-func (r *msgReader) readInt64() int64 {
- if r.err != nil {
- return 0
- }
-
- r.msgBytesRemaining -= 8
- if r.msgBytesRemaining < 0 {
- r.fatal(errors.New("read past end of message"))
- return 0
- }
-
- b, err := r.reader.Peek(8)
- if err != nil {
- r.fatal(err)
- return 0
- }
-
- n := int64(binary.BigEndian.Uint64(b))
-
- r.reader.Discard(8)
-
- if r.shouldLog(LogLevelTrace) {
- r.log(LogLevelTrace, "msgReader.readInt64", "value", n, "msgBytesRemaining", r.msgBytesRemaining)
- }
-
- return n
-}
-
-func (r *msgReader) readOid() Oid {
- return Oid(r.readInt32())
-}
-
-// readCString reads a null terminated string
-func (r *msgReader) readCString() string {
- if r.err != nil {
- return ""
- }
-
- b, err := r.reader.ReadBytes(0)
- if err != nil {
- r.fatal(err)
- return ""
- }
-
- r.msgBytesRemaining -= int32(len(b))
- if r.msgBytesRemaining < 0 {
- r.fatal(errors.New("read past end of message"))
- return ""
- }
-
- s := string(b[0 : len(b)-1])
-
- if r.shouldLog(LogLevelTrace) {
- r.log(LogLevelTrace, "msgReader.readCString", "value", s, "msgBytesRemaining", r.msgBytesRemaining)
- }
-
- return s
-}
-
-// readString reads count bytes and returns as string
-func (r *msgReader) readString(countI32 int32) string {
- if r.err != nil {
- return ""
- }
-
- r.msgBytesRemaining -= countI32
- if r.msgBytesRemaining < 0 {
- r.fatal(errors.New("read past end of message"))
- return ""
- }
-
- count := int(countI32)
- var s string
-
- if r.reader.Buffered() >= count {
- buf, _ := r.reader.Peek(count)
- s = string(buf)
- r.reader.Discard(count)
- } else {
- buf := make([]byte, count)
- _, err := io.ReadFull(r.reader, buf)
- if err != nil {
- r.fatal(err)
- return ""
- }
- s = string(buf)
- }
-
- if r.shouldLog(LogLevelTrace) {
- r.log(LogLevelTrace, "msgReader.readString", "value", s, "msgBytesRemaining", r.msgBytesRemaining)
- }
-
- return s
-}
-
-// readBytes reads count bytes and returns as []byte
-func (r *msgReader) readBytes(count int32) []byte {
- if r.err != nil {
- return nil
- }
-
- r.msgBytesRemaining -= count
- if r.msgBytesRemaining < 0 {
- r.fatal(errors.New("read past end of message"))
- return nil
- }
-
- b := make([]byte, int(count))
-
- _, err := io.ReadFull(r.reader, b)
- if err != nil {
- r.fatal(err)
- return nil
- }
-
- if r.shouldLog(LogLevelTrace) {
- r.log(LogLevelTrace, "msgReader.readBytes", "value", b, "msgBytesRemaining", r.msgBytesRemaining)
- }
-
- return b
-}
diff --git a/vendor/github.com/jackc/pgx/pgio/doc.go b/vendor/github.com/jackc/pgx/pgio/doc.go
new file mode 100644
index 0000000..ef2dcc7
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgio/doc.go
@@ -0,0 +1,6 @@
+// Package pgio is a low-level toolkit building messages in the PostgreSQL wire protocol.
+/*
+pgio provides functions for appending integers to a []byte while doing byte
+order conversion.
+*/
+package pgio
diff --git a/vendor/github.com/jackc/pgx/pgio/write.go b/vendor/github.com/jackc/pgx/pgio/write.go
new file mode 100644
index 0000000..96aedf9
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgio/write.go
@@ -0,0 +1,40 @@
+package pgio
+
+import "encoding/binary"
+
+func AppendUint16(buf []byte, n uint16) []byte {
+ wp := len(buf)
+ buf = append(buf, 0, 0)
+ binary.BigEndian.PutUint16(buf[wp:], n)
+ return buf
+}
+
+func AppendUint32(buf []byte, n uint32) []byte {
+ wp := len(buf)
+ buf = append(buf, 0, 0, 0, 0)
+ binary.BigEndian.PutUint32(buf[wp:], n)
+ return buf
+}
+
+func AppendUint64(buf []byte, n uint64) []byte {
+ wp := len(buf)
+ buf = append(buf, 0, 0, 0, 0, 0, 0, 0, 0)
+ binary.BigEndian.PutUint64(buf[wp:], n)
+ return buf
+}
+
+func AppendInt16(buf []byte, n int16) []byte {
+ return AppendUint16(buf, uint16(n))
+}
+
+func AppendInt32(buf []byte, n int32) []byte {
+ return AppendUint32(buf, uint32(n))
+}
+
+func AppendInt64(buf []byte, n int64) []byte {
+ return AppendUint64(buf, uint64(n))
+}
+
+func SetInt32(buf []byte, n int32) {
+ binary.BigEndian.PutUint32(buf, uint32(n))
+}
diff --git a/vendor/github.com/jackc/pgx/pgpass_test.go b/vendor/github.com/jackc/pgx/pgpass_test.go
deleted file mode 100644
index f6094c8..0000000
--- a/vendor/github.com/jackc/pgx/pgpass_test.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package pgx
-
-import (
- "fmt"
- "io/ioutil"
- "os"
- "strings"
- "testing"
-)
-
-func unescape(s string) string {
- s = strings.Replace(s, `\:`, `:`, -1)
- s = strings.Replace(s, `\\`, `\`, -1)
- return s
-}
-
-var passfile = [][]string{
- []string{"test1", "5432", "larrydb", "larry", "whatstheidea"},
- []string{"test1", "5432", "moedb", "moe", "imbecile"},
- []string{"test1", "5432", "curlydb", "curly", "nyuknyuknyuk"},
- []string{"test2", "5432", "*", "shemp", "heymoe"},
- []string{"test2", "5432", "*", "*", `test\\ing\:`},
-}
-
-func TestPGPass(t *testing.T) {
- tf, err := ioutil.TempFile("", "")
- if err != nil {
- t.Fatal(err)
- }
- defer tf.Close()
- defer os.Remove(tf.Name())
- os.Setenv("PGPASSFILE", tf.Name())
- for _, l := range passfile {
- _, err := fmt.Fprintln(tf, strings.Join(l, `:`))
- if err != nil {
- t.Fatal(err)
- }
- }
- if err = tf.Close(); err != nil {
- t.Fatal(err)
- }
- for i, l := range passfile {
- cfg := ConnConfig{Host: l[0], Database: l[2], User: l[3]}
- found := pgpass(&cfg)
- if !found {
- t.Fatalf("Entry %v not found", i)
- }
- if cfg.Password != unescape(l[4]) {
- t.Fatalf(`Password mismatch entry %v want %s got %s`, i, unescape(l[4]), cfg.Password)
- }
- }
- cfg := ConnConfig{Host: "derp", Database: "herp", User: "joe"}
- found := pgpass(&cfg)
- if found {
- t.Fatal("bad found")
- }
-}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/authentication.go b/vendor/github.com/jackc/pgx/pgproto3/authentication.go
new file mode 100644
index 0000000..77750b8
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/authentication.go
@@ -0,0 +1,54 @@
+package pgproto3
+
+import (
+ "encoding/binary"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+const (
+ AuthTypeOk = 0
+ AuthTypeCleartextPassword = 3
+ AuthTypeMD5Password = 5
+)
+
+type Authentication struct {
+ Type uint32
+
+ // MD5Password fields
+ Salt [4]byte
+}
+
+func (*Authentication) Backend() {}
+
+func (dst *Authentication) Decode(src []byte) error {
+ *dst = Authentication{Type: binary.BigEndian.Uint32(src[:4])}
+
+ switch dst.Type {
+ case AuthTypeOk:
+ case AuthTypeCleartextPassword:
+ case AuthTypeMD5Password:
+ copy(dst.Salt[:], src[4:8])
+ default:
+ return errors.Errorf("unknown authentication type: %d", dst.Type)
+ }
+
+ return nil
+}
+
+func (src *Authentication) Encode(dst []byte) []byte {
+ dst = append(dst, 'R')
+ sp := len(dst)
+ dst = pgio.AppendInt32(dst, -1)
+ dst = pgio.AppendUint32(dst, src.Type)
+
+ switch src.Type {
+ case AuthTypeMD5Password:
+ dst = append(dst, src.Salt[:]...)
+ }
+
+ pgio.SetInt32(dst[sp:], int32(len(dst[sp:])))
+
+ return dst
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/backend.go b/vendor/github.com/jackc/pgx/pgproto3/backend.go
new file mode 100644
index 0000000..8f3c347
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/backend.go
@@ -0,0 +1,110 @@
+package pgproto3
+
+import (
+ "encoding/binary"
+ "io"
+
+ "github.com/jackc/pgx/chunkreader"
+ "github.com/pkg/errors"
+)
+
+type Backend struct {
+ cr *chunkreader.ChunkReader
+ w io.Writer
+
+ // Frontend message flyweights
+ bind Bind
+ _close Close
+ describe Describe
+ execute Execute
+ flush Flush
+ parse Parse
+ passwordMessage PasswordMessage
+ query Query
+ startupMessage StartupMessage
+ sync Sync
+ terminate Terminate
+
+ bodyLen int
+ msgType byte
+ partialMsg bool
+}
+
+func NewBackend(r io.Reader, w io.Writer) (*Backend, error) {
+ cr := chunkreader.NewChunkReader(r)
+ return &Backend{cr: cr, w: w}, nil
+}
+
+func (b *Backend) Send(msg BackendMessage) error {
+ _, err := b.w.Write(msg.Encode(nil))
+ return err
+}
+
+func (b *Backend) ReceiveStartupMessage() (*StartupMessage, error) {
+ buf, err := b.cr.Next(4)
+ if err != nil {
+ return nil, err
+ }
+ msgSize := int(binary.BigEndian.Uint32(buf) - 4)
+
+ buf, err = b.cr.Next(msgSize)
+ if err != nil {
+ return nil, err
+ }
+
+ err = b.startupMessage.Decode(buf)
+ if err != nil {
+ return nil, err
+ }
+
+ return &b.startupMessage, nil
+}
+
+func (b *Backend) Receive() (FrontendMessage, error) {
+ if !b.partialMsg {
+ header, err := b.cr.Next(5)
+ if err != nil {
+ return nil, err
+ }
+
+ b.msgType = header[0]
+ b.bodyLen = int(binary.BigEndian.Uint32(header[1:])) - 4
+ b.partialMsg = true
+ }
+
+ var msg FrontendMessage
+ switch b.msgType {
+ case 'B':
+ msg = &b.bind
+ case 'C':
+ msg = &b._close
+ case 'D':
+ msg = &b.describe
+ case 'E':
+ msg = &b.execute
+ case 'H':
+ msg = &b.flush
+ case 'P':
+ msg = &b.parse
+ case 'p':
+ msg = &b.passwordMessage
+ case 'Q':
+ msg = &b.query
+ case 'S':
+ msg = &b.sync
+ case 'X':
+ msg = &b.terminate
+ default:
+ return nil, errors.Errorf("unknown message type: %c", b.msgType)
+ }
+
+ msgBody, err := b.cr.Next(b.bodyLen)
+ if err != nil {
+ return nil, err
+ }
+
+ b.partialMsg = false
+
+ err = msg.Decode(msgBody)
+ return msg, err
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/backend_key_data.go b/vendor/github.com/jackc/pgx/pgproto3/backend_key_data.go
new file mode 100644
index 0000000..5a478f1
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/backend_key_data.go
@@ -0,0 +1,46 @@
+package pgproto3
+
+import (
+ "encoding/binary"
+ "encoding/json"
+
+ "github.com/jackc/pgx/pgio"
+)
+
+type BackendKeyData struct {
+ ProcessID uint32
+ SecretKey uint32
+}
+
+func (*BackendKeyData) Backend() {}
+
+func (dst *BackendKeyData) Decode(src []byte) error {
+ if len(src) != 8 {
+ return &invalidMessageLenErr{messageType: "BackendKeyData", expectedLen: 8, actualLen: len(src)}
+ }
+
+ dst.ProcessID = binary.BigEndian.Uint32(src[:4])
+ dst.SecretKey = binary.BigEndian.Uint32(src[4:])
+
+ return nil
+}
+
+func (src *BackendKeyData) Encode(dst []byte) []byte {
+ dst = append(dst, 'K')
+ dst = pgio.AppendUint32(dst, 12)
+ dst = pgio.AppendUint32(dst, src.ProcessID)
+ dst = pgio.AppendUint32(dst, src.SecretKey)
+ return dst
+}
+
+func (src *BackendKeyData) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ ProcessID uint32
+ SecretKey uint32
+ }{
+ Type: "BackendKeyData",
+ ProcessID: src.ProcessID,
+ SecretKey: src.SecretKey,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/big_endian.go b/vendor/github.com/jackc/pgx/pgproto3/big_endian.go
new file mode 100644
index 0000000..f7bdb97
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/big_endian.go
@@ -0,0 +1,37 @@
+package pgproto3
+
+import (
+ "encoding/binary"
+)
+
+type BigEndianBuf [8]byte
+
+func (b BigEndianBuf) Int16(n int16) []byte {
+ buf := b[0:2]
+ binary.BigEndian.PutUint16(buf, uint16(n))
+ return buf
+}
+
+func (b BigEndianBuf) Uint16(n uint16) []byte {
+ buf := b[0:2]
+ binary.BigEndian.PutUint16(buf, n)
+ return buf
+}
+
+func (b BigEndianBuf) Int32(n int32) []byte {
+ buf := b[0:4]
+ binary.BigEndian.PutUint32(buf, uint32(n))
+ return buf
+}
+
+func (b BigEndianBuf) Uint32(n uint32) []byte {
+ buf := b[0:4]
+ binary.BigEndian.PutUint32(buf, n)
+ return buf
+}
+
+func (b BigEndianBuf) Int64(n int64) []byte {
+ buf := b[0:8]
+ binary.BigEndian.PutUint64(buf, uint64(n))
+ return buf
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/bind.go b/vendor/github.com/jackc/pgx/pgproto3/bind.go
new file mode 100644
index 0000000..cceee6a
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/bind.go
@@ -0,0 +1,171 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/hex"
+ "encoding/json"
+
+ "github.com/jackc/pgx/pgio"
+)
+
+type Bind struct {
+ DestinationPortal string
+ PreparedStatement string
+ ParameterFormatCodes []int16
+ Parameters [][]byte
+ ResultFormatCodes []int16
+}
+
+func (*Bind) Frontend() {}
+
+func (dst *Bind) Decode(src []byte) error {
+ *dst = Bind{}
+
+ idx := bytes.IndexByte(src, 0)
+ if idx < 0 {
+ return &invalidMessageFormatErr{messageType: "Bind"}
+ }
+ dst.DestinationPortal = string(src[:idx])
+ rp := idx + 1
+
+ idx = bytes.IndexByte(src[rp:], 0)
+ if idx < 0 {
+ return &invalidMessageFormatErr{messageType: "Bind"}
+ }
+ dst.PreparedStatement = string(src[rp : rp+idx])
+ rp += idx + 1
+
+ if len(src[rp:]) < 2 {
+ return &invalidMessageFormatErr{messageType: "Bind"}
+ }
+ parameterFormatCodeCount := int(binary.BigEndian.Uint16(src[rp:]))
+ rp += 2
+
+ if parameterFormatCodeCount > 0 {
+ dst.ParameterFormatCodes = make([]int16, parameterFormatCodeCount)
+
+ if len(src[rp:]) < len(dst.ParameterFormatCodes)*2 {
+ return &invalidMessageFormatErr{messageType: "Bind"}
+ }
+ for i := 0; i < parameterFormatCodeCount; i++ {
+ dst.ParameterFormatCodes[i] = int16(binary.BigEndian.Uint16(src[rp:]))
+ rp += 2
+ }
+ }
+
+ if len(src[rp:]) < 2 {
+ return &invalidMessageFormatErr{messageType: "Bind"}
+ }
+ parameterCount := int(binary.BigEndian.Uint16(src[rp:]))
+ rp += 2
+
+ if parameterCount > 0 {
+ dst.Parameters = make([][]byte, parameterCount)
+
+ for i := 0; i < parameterCount; i++ {
+ if len(src[rp:]) < 4 {
+ return &invalidMessageFormatErr{messageType: "Bind"}
+ }
+
+ msgSize := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+
+ // null
+ if msgSize == -1 {
+ continue
+ }
+
+ if len(src[rp:]) < msgSize {
+ return &invalidMessageFormatErr{messageType: "Bind"}
+ }
+
+ dst.Parameters[i] = src[rp : rp+msgSize]
+ rp += msgSize
+ }
+ }
+
+ if len(src[rp:]) < 2 {
+ return &invalidMessageFormatErr{messageType: "Bind"}
+ }
+ resultFormatCodeCount := int(binary.BigEndian.Uint16(src[rp:]))
+ rp += 2
+
+ dst.ResultFormatCodes = make([]int16, resultFormatCodeCount)
+ if len(src[rp:]) < len(dst.ResultFormatCodes)*2 {
+ return &invalidMessageFormatErr{messageType: "Bind"}
+ }
+ for i := 0; i < resultFormatCodeCount; i++ {
+ dst.ResultFormatCodes[i] = int16(binary.BigEndian.Uint16(src[rp:]))
+ rp += 2
+ }
+
+ return nil
+}
+
+func (src *Bind) Encode(dst []byte) []byte {
+ dst = append(dst, 'B')
+ sp := len(dst)
+ dst = pgio.AppendInt32(dst, -1)
+
+ dst = append(dst, src.DestinationPortal...)
+ dst = append(dst, 0)
+ dst = append(dst, src.PreparedStatement...)
+ dst = append(dst, 0)
+
+ dst = pgio.AppendUint16(dst, uint16(len(src.ParameterFormatCodes)))
+ for _, fc := range src.ParameterFormatCodes {
+ dst = pgio.AppendInt16(dst, fc)
+ }
+
+ dst = pgio.AppendUint16(dst, uint16(len(src.Parameters)))
+ for _, p := range src.Parameters {
+ if p == nil {
+ dst = pgio.AppendInt32(dst, -1)
+ continue
+ }
+
+ dst = pgio.AppendInt32(dst, int32(len(p)))
+ dst = append(dst, p...)
+ }
+
+ dst = pgio.AppendUint16(dst, uint16(len(src.ResultFormatCodes)))
+ for _, fc := range src.ResultFormatCodes {
+ dst = pgio.AppendInt16(dst, fc)
+ }
+
+ pgio.SetInt32(dst[sp:], int32(len(dst[sp:])))
+
+ return dst
+}
+
+func (src *Bind) MarshalJSON() ([]byte, error) {
+ formattedParameters := make([]map[string]string, len(src.Parameters))
+ for i, p := range src.Parameters {
+ if p == nil {
+ continue
+ }
+
+ if src.ParameterFormatCodes[i] == 0 {
+ formattedParameters[i] = map[string]string{"text": string(p)}
+ } else {
+ formattedParameters[i] = map[string]string{"binary": hex.EncodeToString(p)}
+ }
+ }
+
+ return json.Marshal(struct {
+ Type string
+ DestinationPortal string
+ PreparedStatement string
+ ParameterFormatCodes []int16
+ Parameters []map[string]string
+ ResultFormatCodes []int16
+ }{
+ Type: "Bind",
+ DestinationPortal: src.DestinationPortal,
+ PreparedStatement: src.PreparedStatement,
+ ParameterFormatCodes: src.ParameterFormatCodes,
+ Parameters: formattedParameters,
+ ResultFormatCodes: src.ResultFormatCodes,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/bind_complete.go b/vendor/github.com/jackc/pgx/pgproto3/bind_complete.go
new file mode 100644
index 0000000..6036051
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/bind_complete.go
@@ -0,0 +1,29 @@
+package pgproto3
+
+import (
+ "encoding/json"
+)
+
+type BindComplete struct{}
+
+func (*BindComplete) Backend() {}
+
+func (dst *BindComplete) Decode(src []byte) error {
+ if len(src) != 0 {
+ return &invalidMessageLenErr{messageType: "BindComplete", expectedLen: 0, actualLen: len(src)}
+ }
+
+ return nil
+}
+
+func (src *BindComplete) Encode(dst []byte) []byte {
+ return append(dst, '2', 0, 0, 0, 4)
+}
+
+func (src *BindComplete) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ }{
+ Type: "BindComplete",
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/close.go b/vendor/github.com/jackc/pgx/pgproto3/close.go
new file mode 100644
index 0000000..5ff4c88
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/close.go
@@ -0,0 +1,59 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/json"
+
+ "github.com/jackc/pgx/pgio"
+)
+
+type Close struct {
+ ObjectType byte // 'S' = prepared statement, 'P' = portal
+ Name string
+}
+
+func (*Close) Frontend() {}
+
+func (dst *Close) Decode(src []byte) error {
+ if len(src) < 2 {
+ return &invalidMessageFormatErr{messageType: "Close"}
+ }
+
+ dst.ObjectType = src[0]
+ rp := 1
+
+ idx := bytes.IndexByte(src[rp:], 0)
+ if idx != len(src[rp:])-1 {
+ return &invalidMessageFormatErr{messageType: "Close"}
+ }
+
+ dst.Name = string(src[rp : len(src)-1])
+
+ return nil
+}
+
+func (src *Close) Encode(dst []byte) []byte {
+ dst = append(dst, 'C')
+ sp := len(dst)
+ dst = pgio.AppendInt32(dst, -1)
+
+ dst = append(dst, src.ObjectType)
+ dst = append(dst, src.Name...)
+ dst = append(dst, 0)
+
+ pgio.SetInt32(dst[sp:], int32(len(dst[sp:])))
+
+ return dst
+}
+
+func (src *Close) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ ObjectType string
+ Name string
+ }{
+ Type: "Close",
+ ObjectType: string(src.ObjectType),
+ Name: src.Name,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/close_complete.go b/vendor/github.com/jackc/pgx/pgproto3/close_complete.go
new file mode 100644
index 0000000..db793c9
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/close_complete.go
@@ -0,0 +1,29 @@
+package pgproto3
+
+import (
+ "encoding/json"
+)
+
+type CloseComplete struct{}
+
+func (*CloseComplete) Backend() {}
+
+func (dst *CloseComplete) Decode(src []byte) error {
+ if len(src) != 0 {
+ return &invalidMessageLenErr{messageType: "CloseComplete", expectedLen: 0, actualLen: len(src)}
+ }
+
+ return nil
+}
+
+func (src *CloseComplete) Encode(dst []byte) []byte {
+ return append(dst, '3', 0, 0, 0, 4)
+}
+
+func (src *CloseComplete) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ }{
+ Type: "CloseComplete",
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/command_complete.go b/vendor/github.com/jackc/pgx/pgproto3/command_complete.go
new file mode 100644
index 0000000..8584853
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/command_complete.go
@@ -0,0 +1,48 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/json"
+
+ "github.com/jackc/pgx/pgio"
+)
+
+type CommandComplete struct {
+ CommandTag string
+}
+
+func (*CommandComplete) Backend() {}
+
+func (dst *CommandComplete) Decode(src []byte) error {
+ idx := bytes.IndexByte(src, 0)
+ if idx != len(src)-1 {
+ return &invalidMessageFormatErr{messageType: "CommandComplete"}
+ }
+
+ dst.CommandTag = string(src[:idx])
+
+ return nil
+}
+
+func (src *CommandComplete) Encode(dst []byte) []byte {
+ dst = append(dst, 'C')
+ sp := len(dst)
+ dst = pgio.AppendInt32(dst, -1)
+
+ dst = append(dst, src.CommandTag...)
+ dst = append(dst, 0)
+
+ pgio.SetInt32(dst[sp:], int32(len(dst[sp:])))
+
+ return dst
+}
+
+func (src *CommandComplete) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ CommandTag string
+ }{
+ Type: "CommandComplete",
+ CommandTag: src.CommandTag,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/copy_both_response.go b/vendor/github.com/jackc/pgx/pgproto3/copy_both_response.go
new file mode 100644
index 0000000..2862a34
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/copy_both_response.go
@@ -0,0 +1,65 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/json"
+
+ "github.com/jackc/pgx/pgio"
+)
+
+type CopyBothResponse struct {
+ OverallFormat byte
+ ColumnFormatCodes []uint16
+}
+
+func (*CopyBothResponse) Backend() {}
+
+func (dst *CopyBothResponse) Decode(src []byte) error {
+ buf := bytes.NewBuffer(src)
+
+ if buf.Len() < 3 {
+ return &invalidMessageFormatErr{messageType: "CopyBothResponse"}
+ }
+
+ overallFormat := buf.Next(1)[0]
+
+ columnCount := int(binary.BigEndian.Uint16(buf.Next(2)))
+ if buf.Len() != columnCount*2 {
+ return &invalidMessageFormatErr{messageType: "CopyBothResponse"}
+ }
+
+ columnFormatCodes := make([]uint16, columnCount)
+ for i := 0; i < columnCount; i++ {
+ columnFormatCodes[i] = binary.BigEndian.Uint16(buf.Next(2))
+ }
+
+ *dst = CopyBothResponse{OverallFormat: overallFormat, ColumnFormatCodes: columnFormatCodes}
+
+ return nil
+}
+
+func (src *CopyBothResponse) Encode(dst []byte) []byte {
+ dst = append(dst, 'W')
+ sp := len(dst)
+ dst = pgio.AppendInt32(dst, -1)
+
+ dst = pgio.AppendUint16(dst, uint16(len(src.ColumnFormatCodes)))
+ for _, fc := range src.ColumnFormatCodes {
+ dst = pgio.AppendUint16(dst, fc)
+ }
+
+ pgio.SetInt32(dst[sp:], int32(len(dst[sp:])))
+
+ return dst
+}
+
+func (src *CopyBothResponse) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ ColumnFormatCodes []uint16
+ }{
+ Type: "CopyBothResponse",
+ ColumnFormatCodes: src.ColumnFormatCodes,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/copy_data.go b/vendor/github.com/jackc/pgx/pgproto3/copy_data.go
new file mode 100644
index 0000000..fab139e
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/copy_data.go
@@ -0,0 +1,37 @@
+package pgproto3
+
+import (
+ "encoding/hex"
+ "encoding/json"
+
+ "github.com/jackc/pgx/pgio"
+)
+
+type CopyData struct {
+ Data []byte
+}
+
+func (*CopyData) Backend() {}
+func (*CopyData) Frontend() {}
+
+func (dst *CopyData) Decode(src []byte) error {
+ dst.Data = src
+ return nil
+}
+
+func (src *CopyData) Encode(dst []byte) []byte {
+ dst = append(dst, 'd')
+ dst = pgio.AppendInt32(dst, int32(4+len(src.Data)))
+ dst = append(dst, src.Data...)
+ return dst
+}
+
+func (src *CopyData) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ Data string
+ }{
+ Type: "CopyData",
+ Data: hex.EncodeToString(src.Data),
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/copy_in_response.go b/vendor/github.com/jackc/pgx/pgproto3/copy_in_response.go
new file mode 100644
index 0000000..54083cd
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/copy_in_response.go
@@ -0,0 +1,65 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/json"
+
+ "github.com/jackc/pgx/pgio"
+)
+
+type CopyInResponse struct {
+ OverallFormat byte
+ ColumnFormatCodes []uint16
+}
+
+func (*CopyInResponse) Backend() {}
+
+func (dst *CopyInResponse) Decode(src []byte) error {
+ buf := bytes.NewBuffer(src)
+
+ if buf.Len() < 3 {
+ return &invalidMessageFormatErr{messageType: "CopyInResponse"}
+ }
+
+ overallFormat := buf.Next(1)[0]
+
+ columnCount := int(binary.BigEndian.Uint16(buf.Next(2)))
+ if buf.Len() != columnCount*2 {
+ return &invalidMessageFormatErr{messageType: "CopyInResponse"}
+ }
+
+ columnFormatCodes := make([]uint16, columnCount)
+ for i := 0; i < columnCount; i++ {
+ columnFormatCodes[i] = binary.BigEndian.Uint16(buf.Next(2))
+ }
+
+ *dst = CopyInResponse{OverallFormat: overallFormat, ColumnFormatCodes: columnFormatCodes}
+
+ return nil
+}
+
+func (src *CopyInResponse) Encode(dst []byte) []byte {
+ dst = append(dst, 'G')
+ sp := len(dst)
+ dst = pgio.AppendInt32(dst, -1)
+
+ dst = pgio.AppendUint16(dst, uint16(len(src.ColumnFormatCodes)))
+ for _, fc := range src.ColumnFormatCodes {
+ dst = pgio.AppendUint16(dst, fc)
+ }
+
+ pgio.SetInt32(dst[sp:], int32(len(dst[sp:])))
+
+ return dst
+}
+
+func (src *CopyInResponse) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ ColumnFormatCodes []uint16
+ }{
+ Type: "CopyInResponse",
+ ColumnFormatCodes: src.ColumnFormatCodes,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/copy_out_response.go b/vendor/github.com/jackc/pgx/pgproto3/copy_out_response.go
new file mode 100644
index 0000000..eaa33b8
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/copy_out_response.go
@@ -0,0 +1,65 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/json"
+
+ "github.com/jackc/pgx/pgio"
+)
+
+type CopyOutResponse struct {
+ OverallFormat byte
+ ColumnFormatCodes []uint16
+}
+
+func (*CopyOutResponse) Backend() {}
+
+func (dst *CopyOutResponse) Decode(src []byte) error {
+ buf := bytes.NewBuffer(src)
+
+ if buf.Len() < 3 {
+ return &invalidMessageFormatErr{messageType: "CopyOutResponse"}
+ }
+
+ overallFormat := buf.Next(1)[0]
+
+ columnCount := int(binary.BigEndian.Uint16(buf.Next(2)))
+ if buf.Len() != columnCount*2 {
+ return &invalidMessageFormatErr{messageType: "CopyOutResponse"}
+ }
+
+ columnFormatCodes := make([]uint16, columnCount)
+ for i := 0; i < columnCount; i++ {
+ columnFormatCodes[i] = binary.BigEndian.Uint16(buf.Next(2))
+ }
+
+ *dst = CopyOutResponse{OverallFormat: overallFormat, ColumnFormatCodes: columnFormatCodes}
+
+ return nil
+}
+
+func (src *CopyOutResponse) Encode(dst []byte) []byte {
+ dst = append(dst, 'H')
+ sp := len(dst)
+ dst = pgio.AppendInt32(dst, -1)
+
+ dst = pgio.AppendUint16(dst, uint16(len(src.ColumnFormatCodes)))
+ for _, fc := range src.ColumnFormatCodes {
+ dst = pgio.AppendUint16(dst, fc)
+ }
+
+ pgio.SetInt32(dst[sp:], int32(len(dst[sp:])))
+
+ return dst
+}
+
+func (src *CopyOutResponse) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ ColumnFormatCodes []uint16
+ }{
+ Type: "CopyOutResponse",
+ ColumnFormatCodes: src.ColumnFormatCodes,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/data_row.go b/vendor/github.com/jackc/pgx/pgproto3/data_row.go
new file mode 100644
index 0000000..e46d3cc
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/data_row.go
@@ -0,0 +1,112 @@
+package pgproto3
+
+import (
+ "encoding/binary"
+ "encoding/hex"
+ "encoding/json"
+
+ "github.com/jackc/pgx/pgio"
+)
+
+type DataRow struct {
+ Values [][]byte
+}
+
+func (*DataRow) Backend() {}
+
+func (dst *DataRow) Decode(src []byte) error {
+ if len(src) < 2 {
+ return &invalidMessageFormatErr{messageType: "DataRow"}
+ }
+ rp := 0
+ fieldCount := int(binary.BigEndian.Uint16(src[rp:]))
+ rp += 2
+
+ // If the capacity of the values slice is too small OR substantially too
+ // large reallocate. This is too avoid one row with many columns from
+ // permanently allocating memory.
+ if cap(dst.Values) < fieldCount || cap(dst.Values)-fieldCount > 32 {
+ newCap := 32
+ if newCap < fieldCount {
+ newCap = fieldCount
+ }
+ dst.Values = make([][]byte, fieldCount, newCap)
+ } else {
+ dst.Values = dst.Values[:fieldCount]
+ }
+
+ for i := 0; i < fieldCount; i++ {
+ if len(src[rp:]) < 4 {
+ return &invalidMessageFormatErr{messageType: "DataRow"}
+ }
+
+ msgSize := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+
+ // null
+ if msgSize == -1 {
+ dst.Values[i] = nil
+ } else {
+ if len(src[rp:]) < msgSize {
+ return &invalidMessageFormatErr{messageType: "DataRow"}
+ }
+
+ dst.Values[i] = src[rp : rp+msgSize]
+ rp += msgSize
+ }
+ }
+
+ return nil
+}
+
+func (src *DataRow) Encode(dst []byte) []byte {
+ dst = append(dst, 'D')
+ sp := len(dst)
+ dst = pgio.AppendInt32(dst, -1)
+
+ dst = pgio.AppendUint16(dst, uint16(len(src.Values)))
+ for _, v := range src.Values {
+ if v == nil {
+ dst = pgio.AppendInt32(dst, -1)
+ continue
+ }
+
+ dst = pgio.AppendInt32(dst, int32(len(v)))
+ dst = append(dst, v...)
+ }
+
+ pgio.SetInt32(dst[sp:], int32(len(dst[sp:])))
+
+ return dst
+}
+
+func (src *DataRow) MarshalJSON() ([]byte, error) {
+ formattedValues := make([]map[string]string, len(src.Values))
+ for i, v := range src.Values {
+ if v == nil {
+ continue
+ }
+
+ var hasNonPrintable bool
+ for _, b := range v {
+ if b < 32 {
+ hasNonPrintable = true
+ break
+ }
+ }
+
+ if hasNonPrintable {
+ formattedValues[i] = map[string]string{"binary": hex.EncodeToString(v)}
+ } else {
+ formattedValues[i] = map[string]string{"text": string(v)}
+ }
+ }
+
+ return json.Marshal(struct {
+ Type string
+ Values []map[string]string
+ }{
+ Type: "DataRow",
+ Values: formattedValues,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/describe.go b/vendor/github.com/jackc/pgx/pgproto3/describe.go
new file mode 100644
index 0000000..bb7bc05
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/describe.go
@@ -0,0 +1,59 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/json"
+
+ "github.com/jackc/pgx/pgio"
+)
+
+type Describe struct {
+ ObjectType byte // 'S' = prepared statement, 'P' = portal
+ Name string
+}
+
+func (*Describe) Frontend() {}
+
+func (dst *Describe) Decode(src []byte) error {
+ if len(src) < 2 {
+ return &invalidMessageFormatErr{messageType: "Describe"}
+ }
+
+ dst.ObjectType = src[0]
+ rp := 1
+
+ idx := bytes.IndexByte(src[rp:], 0)
+ if idx != len(src[rp:])-1 {
+ return &invalidMessageFormatErr{messageType: "Describe"}
+ }
+
+ dst.Name = string(src[rp : len(src)-1])
+
+ return nil
+}
+
+func (src *Describe) Encode(dst []byte) []byte {
+ dst = append(dst, 'D')
+ sp := len(dst)
+ dst = pgio.AppendInt32(dst, -1)
+
+ dst = append(dst, src.ObjectType)
+ dst = append(dst, src.Name...)
+ dst = append(dst, 0)
+
+ pgio.SetInt32(dst[sp:], int32(len(dst[sp:])))
+
+ return dst
+}
+
+func (src *Describe) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ ObjectType string
+ Name string
+ }{
+ Type: "Describe",
+ ObjectType: string(src.ObjectType),
+ Name: src.Name,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/empty_query_response.go b/vendor/github.com/jackc/pgx/pgproto3/empty_query_response.go
new file mode 100644
index 0000000..d283b06
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/empty_query_response.go
@@ -0,0 +1,29 @@
+package pgproto3
+
+import (
+ "encoding/json"
+)
+
+type EmptyQueryResponse struct{}
+
+func (*EmptyQueryResponse) Backend() {}
+
+func (dst *EmptyQueryResponse) Decode(src []byte) error {
+ if len(src) != 0 {
+ return &invalidMessageLenErr{messageType: "EmptyQueryResponse", expectedLen: 0, actualLen: len(src)}
+ }
+
+ return nil
+}
+
+func (src *EmptyQueryResponse) Encode(dst []byte) []byte {
+ return append(dst, 'I', 0, 0, 0, 4)
+}
+
+func (src *EmptyQueryResponse) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ }{
+ Type: "EmptyQueryResponse",
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/error_response.go b/vendor/github.com/jackc/pgx/pgproto3/error_response.go
new file mode 100644
index 0000000..160234f
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/error_response.go
@@ -0,0 +1,197 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/binary"
+ "strconv"
+)
+
+type ErrorResponse struct {
+ Severity string
+ Code string
+ Message string
+ Detail string
+ Hint string
+ Position int32
+ InternalPosition int32
+ InternalQuery string
+ Where string
+ SchemaName string
+ TableName string
+ ColumnName string
+ DataTypeName string
+ ConstraintName string
+ File string
+ Line int32
+ Routine string
+
+ UnknownFields map[byte]string
+}
+
+func (*ErrorResponse) Backend() {}
+
+func (dst *ErrorResponse) Decode(src []byte) error {
+ *dst = ErrorResponse{}
+
+ buf := bytes.NewBuffer(src)
+
+ for {
+ k, err := buf.ReadByte()
+ if err != nil {
+ return err
+ }
+ if k == 0 {
+ break
+ }
+
+ vb, err := buf.ReadBytes(0)
+ if err != nil {
+ return err
+ }
+ v := string(vb[:len(vb)-1])
+
+ switch k {
+ case 'S':
+ dst.Severity = v
+ case 'C':
+ dst.Code = v
+ case 'M':
+ dst.Message = v
+ case 'D':
+ dst.Detail = v
+ case 'H':
+ dst.Hint = v
+ case 'P':
+ s := v
+ n, _ := strconv.ParseInt(s, 10, 32)
+ dst.Position = int32(n)
+ case 'p':
+ s := v
+ n, _ := strconv.ParseInt(s, 10, 32)
+ dst.InternalPosition = int32(n)
+ case 'q':
+ dst.InternalQuery = v
+ case 'W':
+ dst.Where = v
+ case 's':
+ dst.SchemaName = v
+ case 't':
+ dst.TableName = v
+ case 'c':
+ dst.ColumnName = v
+ case 'd':
+ dst.DataTypeName = v
+ case 'n':
+ dst.ConstraintName = v
+ case 'F':
+ dst.File = v
+ case 'L':
+ s := v
+ n, _ := strconv.ParseInt(s, 10, 32)
+ dst.Line = int32(n)
+ case 'R':
+ dst.Routine = v
+
+ default:
+ if dst.UnknownFields == nil {
+ dst.UnknownFields = make(map[byte]string)
+ }
+ dst.UnknownFields[k] = v
+ }
+ }
+
+ return nil
+}
+
+func (src *ErrorResponse) Encode(dst []byte) []byte {
+ return append(dst, src.marshalBinary('E')...)
+}
+
+func (src *ErrorResponse) marshalBinary(typeByte byte) []byte {
+ var bigEndian BigEndianBuf
+ buf := &bytes.Buffer{}
+
+ buf.WriteByte(typeByte)
+ buf.Write(bigEndian.Uint32(0))
+
+ if src.Severity != "" {
+ buf.WriteString(src.Severity)
+ buf.WriteByte(0)
+ }
+ if src.Code != "" {
+ buf.WriteString(src.Code)
+ buf.WriteByte(0)
+ }
+ if src.Message != "" {
+ buf.WriteString(src.Message)
+ buf.WriteByte(0)
+ }
+ if src.Detail != "" {
+ buf.WriteString(src.Detail)
+ buf.WriteByte(0)
+ }
+ if src.Hint != "" {
+ buf.WriteString(src.Hint)
+ buf.WriteByte(0)
+ }
+ if src.Position != 0 {
+ buf.WriteString(strconv.Itoa(int(src.Position)))
+ buf.WriteByte(0)
+ }
+ if src.InternalPosition != 0 {
+ buf.WriteString(strconv.Itoa(int(src.InternalPosition)))
+ buf.WriteByte(0)
+ }
+ if src.InternalQuery != "" {
+ buf.WriteString(src.InternalQuery)
+ buf.WriteByte(0)
+ }
+ if src.Where != "" {
+ buf.WriteString(src.Where)
+ buf.WriteByte(0)
+ }
+ if src.SchemaName != "" {
+ buf.WriteString(src.SchemaName)
+ buf.WriteByte(0)
+ }
+ if src.TableName != "" {
+ buf.WriteString(src.TableName)
+ buf.WriteByte(0)
+ }
+ if src.ColumnName != "" {
+ buf.WriteString(src.ColumnName)
+ buf.WriteByte(0)
+ }
+ if src.DataTypeName != "" {
+ buf.WriteString(src.DataTypeName)
+ buf.WriteByte(0)
+ }
+ if src.ConstraintName != "" {
+ buf.WriteString(src.ConstraintName)
+ buf.WriteByte(0)
+ }
+ if src.File != "" {
+ buf.WriteString(src.File)
+ buf.WriteByte(0)
+ }
+ if src.Line != 0 {
+ buf.WriteString(strconv.Itoa(int(src.Line)))
+ buf.WriteByte(0)
+ }
+ if src.Routine != "" {
+ buf.WriteString(src.Routine)
+ buf.WriteByte(0)
+ }
+
+ for k, v := range src.UnknownFields {
+ buf.WriteByte(k)
+ buf.WriteByte(0)
+ buf.WriteString(v)
+ buf.WriteByte(0)
+ }
+ buf.WriteByte(0)
+
+ binary.BigEndian.PutUint32(buf.Bytes()[1:5], uint32(buf.Len()-1))
+
+ return buf.Bytes()
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/execute.go b/vendor/github.com/jackc/pgx/pgproto3/execute.go
new file mode 100644
index 0000000..76da994
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/execute.go
@@ -0,0 +1,60 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/json"
+
+ "github.com/jackc/pgx/pgio"
+)
+
+type Execute struct {
+ Portal string
+ MaxRows uint32
+}
+
+func (*Execute) Frontend() {}
+
+func (dst *Execute) Decode(src []byte) error {
+ buf := bytes.NewBuffer(src)
+
+ b, err := buf.ReadBytes(0)
+ if err != nil {
+ return err
+ }
+ dst.Portal = string(b[:len(b)-1])
+
+ if buf.Len() < 4 {
+ return &invalidMessageFormatErr{messageType: "Execute"}
+ }
+ dst.MaxRows = binary.BigEndian.Uint32(buf.Next(4))
+
+ return nil
+}
+
+func (src *Execute) Encode(dst []byte) []byte {
+ dst = append(dst, 'E')
+ sp := len(dst)
+ dst = pgio.AppendInt32(dst, -1)
+
+ dst = append(dst, src.Portal...)
+ dst = append(dst, 0)
+
+ dst = pgio.AppendUint32(dst, src.MaxRows)
+
+ pgio.SetInt32(dst[sp:], int32(len(dst[sp:])))
+
+ return dst
+}
+
+func (src *Execute) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ Portal string
+ MaxRows uint32
+ }{
+ Type: "Execute",
+ Portal: src.Portal,
+ MaxRows: src.MaxRows,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/flush.go b/vendor/github.com/jackc/pgx/pgproto3/flush.go
new file mode 100644
index 0000000..7fd5e98
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/flush.go
@@ -0,0 +1,29 @@
+package pgproto3
+
+import (
+ "encoding/json"
+)
+
+type Flush struct{}
+
+func (*Flush) Frontend() {}
+
+func (dst *Flush) Decode(src []byte) error {
+ if len(src) != 0 {
+ return &invalidMessageLenErr{messageType: "Flush", expectedLen: 0, actualLen: len(src)}
+ }
+
+ return nil
+}
+
+func (src *Flush) Encode(dst []byte) []byte {
+ return append(dst, 'H', 0, 0, 0, 4)
+}
+
+func (src *Flush) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ }{
+ Type: "Flush",
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/frontend.go b/vendor/github.com/jackc/pgx/pgproto3/frontend.go
new file mode 100644
index 0000000..d803d36
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/frontend.go
@@ -0,0 +1,122 @@
+package pgproto3
+
+import (
+ "encoding/binary"
+ "io"
+
+ "github.com/jackc/pgx/chunkreader"
+ "github.com/pkg/errors"
+)
+
+type Frontend struct {
+ cr *chunkreader.ChunkReader
+ w io.Writer
+
+ // Backend message flyweights
+ authentication Authentication
+ backendKeyData BackendKeyData
+ bindComplete BindComplete
+ closeComplete CloseComplete
+ commandComplete CommandComplete
+ copyBothResponse CopyBothResponse
+ copyData CopyData
+ copyInResponse CopyInResponse
+ copyOutResponse CopyOutResponse
+ dataRow DataRow
+ emptyQueryResponse EmptyQueryResponse
+ errorResponse ErrorResponse
+ functionCallResponse FunctionCallResponse
+ noData NoData
+ noticeResponse NoticeResponse
+ notificationResponse NotificationResponse
+ parameterDescription ParameterDescription
+ parameterStatus ParameterStatus
+ parseComplete ParseComplete
+ readyForQuery ReadyForQuery
+ rowDescription RowDescription
+
+ bodyLen int
+ msgType byte
+ partialMsg bool
+}
+
+func NewFrontend(r io.Reader, w io.Writer) (*Frontend, error) {
+ cr := chunkreader.NewChunkReader(r)
+ return &Frontend{cr: cr, w: w}, nil
+}
+
+func (b *Frontend) Send(msg FrontendMessage) error {
+ _, err := b.w.Write(msg.Encode(nil))
+ return err
+}
+
+func (b *Frontend) Receive() (BackendMessage, error) {
+ if !b.partialMsg {
+ header, err := b.cr.Next(5)
+ if err != nil {
+ return nil, err
+ }
+
+ b.msgType = header[0]
+ b.bodyLen = int(binary.BigEndian.Uint32(header[1:])) - 4
+ b.partialMsg = true
+ }
+
+ var msg BackendMessage
+ switch b.msgType {
+ case '1':
+ msg = &b.parseComplete
+ case '2':
+ msg = &b.bindComplete
+ case '3':
+ msg = &b.closeComplete
+ case 'A':
+ msg = &b.notificationResponse
+ case 'C':
+ msg = &b.commandComplete
+ case 'd':
+ msg = &b.copyData
+ case 'D':
+ msg = &b.dataRow
+ case 'E':
+ msg = &b.errorResponse
+ case 'G':
+ msg = &b.copyInResponse
+ case 'H':
+ msg = &b.copyOutResponse
+ case 'I':
+ msg = &b.emptyQueryResponse
+ case 'K':
+ msg = &b.backendKeyData
+ case 'n':
+ msg = &b.noData
+ case 'N':
+ msg = &b.noticeResponse
+ case 'R':
+ msg = &b.authentication
+ case 'S':
+ msg = &b.parameterStatus
+ case 't':
+ msg = &b.parameterDescription
+ case 'T':
+ msg = &b.rowDescription
+ case 'V':
+ msg = &b.functionCallResponse
+ case 'W':
+ msg = &b.copyBothResponse
+ case 'Z':
+ msg = &b.readyForQuery
+ default:
+ return nil, errors.Errorf("unknown message type: %c", b.msgType)
+ }
+
+ msgBody, err := b.cr.Next(b.bodyLen)
+ if err != nil {
+ return nil, err
+ }
+
+ b.partialMsg = false
+
+ err = msg.Decode(msgBody)
+ return msg, err
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/function_call_response.go b/vendor/github.com/jackc/pgx/pgproto3/function_call_response.go
new file mode 100644
index 0000000..bb325b6
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/function_call_response.go
@@ -0,0 +1,78 @@
+package pgproto3
+
+import (
+ "encoding/binary"
+ "encoding/hex"
+ "encoding/json"
+
+ "github.com/jackc/pgx/pgio"
+)
+
+type FunctionCallResponse struct {
+ Result []byte
+}
+
+func (*FunctionCallResponse) Backend() {}
+
+func (dst *FunctionCallResponse) Decode(src []byte) error {
+ if len(src) < 4 {
+ return &invalidMessageFormatErr{messageType: "FunctionCallResponse"}
+ }
+ rp := 0
+ resultSize := int(binary.BigEndian.Uint32(src[rp:]))
+ rp += 4
+
+ if resultSize == -1 {
+ dst.Result = nil
+ return nil
+ }
+
+ if len(src[rp:]) != resultSize {
+ return &invalidMessageFormatErr{messageType: "FunctionCallResponse"}
+ }
+
+ dst.Result = src[rp:]
+ return nil
+}
+
+func (src *FunctionCallResponse) Encode(dst []byte) []byte {
+ dst = append(dst, 'V')
+ sp := len(dst)
+ dst = pgio.AppendInt32(dst, -1)
+
+ if src.Result == nil {
+ dst = pgio.AppendInt32(dst, -1)
+ } else {
+ dst = pgio.AppendInt32(dst, int32(len(src.Result)))
+ dst = append(dst, src.Result...)
+ }
+
+ pgio.SetInt32(dst[sp:], int32(len(dst[sp:])))
+
+ return dst
+}
+
+func (src *FunctionCallResponse) MarshalJSON() ([]byte, error) {
+ var formattedValue map[string]string
+ var hasNonPrintable bool
+ for _, b := range src.Result {
+ if b < 32 {
+ hasNonPrintable = true
+ break
+ }
+ }
+
+ if hasNonPrintable {
+ formattedValue = map[string]string{"binary": hex.EncodeToString(src.Result)}
+ } else {
+ formattedValue = map[string]string{"text": string(src.Result)}
+ }
+
+ return json.Marshal(struct {
+ Type string
+ Result map[string]string
+ }{
+ Type: "FunctionCallResponse",
+ Result: formattedValue,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/no_data.go b/vendor/github.com/jackc/pgx/pgproto3/no_data.go
new file mode 100644
index 0000000..1fb47c2
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/no_data.go
@@ -0,0 +1,29 @@
+package pgproto3
+
+import (
+ "encoding/json"
+)
+
+type NoData struct{}
+
+func (*NoData) Backend() {}
+
+func (dst *NoData) Decode(src []byte) error {
+ if len(src) != 0 {
+ return &invalidMessageLenErr{messageType: "NoData", expectedLen: 0, actualLen: len(src)}
+ }
+
+ return nil
+}
+
+func (src *NoData) Encode(dst []byte) []byte {
+ return append(dst, 'n', 0, 0, 0, 4)
+}
+
+func (src *NoData) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ }{
+ Type: "NoData",
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/notice_response.go b/vendor/github.com/jackc/pgx/pgproto3/notice_response.go
new file mode 100644
index 0000000..e4595aa
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/notice_response.go
@@ -0,0 +1,13 @@
+package pgproto3
+
+type NoticeResponse ErrorResponse
+
+func (*NoticeResponse) Backend() {}
+
+func (dst *NoticeResponse) Decode(src []byte) error {
+ return (*ErrorResponse)(dst).Decode(src)
+}
+
+func (src *NoticeResponse) Encode(dst []byte) []byte {
+ return append(dst, (*ErrorResponse)(src).marshalBinary('N')...)
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/notification_response.go b/vendor/github.com/jackc/pgx/pgproto3/notification_response.go
new file mode 100644
index 0000000..b14007b
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/notification_response.go
@@ -0,0 +1,67 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/json"
+
+ "github.com/jackc/pgx/pgio"
+)
+
+type NotificationResponse struct {
+ PID uint32
+ Channel string
+ Payload string
+}
+
+func (*NotificationResponse) Backend() {}
+
+func (dst *NotificationResponse) Decode(src []byte) error {
+ buf := bytes.NewBuffer(src)
+
+ pid := binary.BigEndian.Uint32(buf.Next(4))
+
+ b, err := buf.ReadBytes(0)
+ if err != nil {
+ return err
+ }
+ channel := string(b[:len(b)-1])
+
+ b, err = buf.ReadBytes(0)
+ if err != nil {
+ return err
+ }
+ payload := string(b[:len(b)-1])
+
+ *dst = NotificationResponse{PID: pid, Channel: channel, Payload: payload}
+ return nil
+}
+
+func (src *NotificationResponse) Encode(dst []byte) []byte {
+ dst = append(dst, 'A')
+ sp := len(dst)
+ dst = pgio.AppendInt32(dst, -1)
+
+ dst = append(dst, src.Channel...)
+ dst = append(dst, 0)
+ dst = append(dst, src.Payload...)
+ dst = append(dst, 0)
+
+ pgio.SetInt32(dst[sp:], int32(len(dst[sp:])))
+
+ return dst
+}
+
+func (src *NotificationResponse) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ PID uint32
+ Channel string
+ Payload string
+ }{
+ Type: "NotificationResponse",
+ PID: src.PID,
+ Channel: src.Channel,
+ Payload: src.Payload,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/parameter_description.go b/vendor/github.com/jackc/pgx/pgproto3/parameter_description.go
new file mode 100644
index 0000000..1fa3c92
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/parameter_description.go
@@ -0,0 +1,61 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/json"
+
+ "github.com/jackc/pgx/pgio"
+)
+
+type ParameterDescription struct {
+ ParameterOIDs []uint32
+}
+
+func (*ParameterDescription) Backend() {}
+
+func (dst *ParameterDescription) Decode(src []byte) error {
+ buf := bytes.NewBuffer(src)
+
+ if buf.Len() < 2 {
+ return &invalidMessageFormatErr{messageType: "ParameterDescription"}
+ }
+
+ // Reported parameter count will be incorrect when number of args is greater than uint16
+ buf.Next(2)
+ // Instead infer parameter count by remaining size of message
+ parameterCount := buf.Len() / 4
+
+ *dst = ParameterDescription{ParameterOIDs: make([]uint32, parameterCount)}
+
+ for i := 0; i < parameterCount; i++ {
+ dst.ParameterOIDs[i] = binary.BigEndian.Uint32(buf.Next(4))
+ }
+
+ return nil
+}
+
+func (src *ParameterDescription) Encode(dst []byte) []byte {
+ dst = append(dst, 't')
+ sp := len(dst)
+ dst = pgio.AppendInt32(dst, -1)
+
+ dst = pgio.AppendUint16(dst, uint16(len(src.ParameterOIDs)))
+ for _, oid := range src.ParameterOIDs {
+ dst = pgio.AppendUint32(dst, oid)
+ }
+
+ pgio.SetInt32(dst[sp:], int32(len(dst[sp:])))
+
+ return dst
+}
+
+func (src *ParameterDescription) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ ParameterOIDs []uint32
+ }{
+ Type: "ParameterDescription",
+ ParameterOIDs: src.ParameterOIDs,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/parameter_status.go b/vendor/github.com/jackc/pgx/pgproto3/parameter_status.go
new file mode 100644
index 0000000..b3bac33
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/parameter_status.go
@@ -0,0 +1,61 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/json"
+
+ "github.com/jackc/pgx/pgio"
+)
+
+type ParameterStatus struct {
+ Name string
+ Value string
+}
+
+func (*ParameterStatus) Backend() {}
+
+func (dst *ParameterStatus) Decode(src []byte) error {
+ buf := bytes.NewBuffer(src)
+
+ b, err := buf.ReadBytes(0)
+ if err != nil {
+ return err
+ }
+ name := string(b[:len(b)-1])
+
+ b, err = buf.ReadBytes(0)
+ if err != nil {
+ return err
+ }
+ value := string(b[:len(b)-1])
+
+ *dst = ParameterStatus{Name: name, Value: value}
+ return nil
+}
+
+func (src *ParameterStatus) Encode(dst []byte) []byte {
+ dst = append(dst, 'S')
+ sp := len(dst)
+ dst = pgio.AppendInt32(dst, -1)
+
+ dst = append(dst, src.Name...)
+ dst = append(dst, 0)
+ dst = append(dst, src.Value...)
+ dst = append(dst, 0)
+
+ pgio.SetInt32(dst[sp:], int32(len(dst[sp:])))
+
+ return dst
+}
+
+func (ps *ParameterStatus) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ Name string
+ Value string
+ }{
+ Type: "ParameterStatus",
+ Name: ps.Name,
+ Value: ps.Value,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/parse.go b/vendor/github.com/jackc/pgx/pgproto3/parse.go
new file mode 100644
index 0000000..ca4834c
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/parse.go
@@ -0,0 +1,83 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/json"
+
+ "github.com/jackc/pgx/pgio"
+)
+
+type Parse struct {
+ Name string
+ Query string
+ ParameterOIDs []uint32
+}
+
+func (*Parse) Frontend() {}
+
+func (dst *Parse) Decode(src []byte) error {
+ *dst = Parse{}
+
+ buf := bytes.NewBuffer(src)
+
+ b, err := buf.ReadBytes(0)
+ if err != nil {
+ return err
+ }
+ dst.Name = string(b[:len(b)-1])
+
+ b, err = buf.ReadBytes(0)
+ if err != nil {
+ return err
+ }
+ dst.Query = string(b[:len(b)-1])
+
+ if buf.Len() < 2 {
+ return &invalidMessageFormatErr{messageType: "Parse"}
+ }
+ parameterOIDCount := int(binary.BigEndian.Uint16(buf.Next(2)))
+
+ for i := 0; i < parameterOIDCount; i++ {
+ if buf.Len() < 4 {
+ return &invalidMessageFormatErr{messageType: "Parse"}
+ }
+ dst.ParameterOIDs = append(dst.ParameterOIDs, binary.BigEndian.Uint32(buf.Next(4)))
+ }
+
+ return nil
+}
+
+func (src *Parse) Encode(dst []byte) []byte {
+ dst = append(dst, 'P')
+ sp := len(dst)
+ dst = pgio.AppendInt32(dst, -1)
+
+ dst = append(dst, src.Name...)
+ dst = append(dst, 0)
+ dst = append(dst, src.Query...)
+ dst = append(dst, 0)
+
+ dst = pgio.AppendUint16(dst, uint16(len(src.ParameterOIDs)))
+ for _, oid := range src.ParameterOIDs {
+ dst = pgio.AppendUint32(dst, oid)
+ }
+
+ pgio.SetInt32(dst[sp:], int32(len(dst[sp:])))
+
+ return dst
+}
+
+func (src *Parse) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ Name string
+ Query string
+ ParameterOIDs []uint32
+ }{
+ Type: "Parse",
+ Name: src.Name,
+ Query: src.Query,
+ ParameterOIDs: src.ParameterOIDs,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/parse_complete.go b/vendor/github.com/jackc/pgx/pgproto3/parse_complete.go
new file mode 100644
index 0000000..462a89b
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/parse_complete.go
@@ -0,0 +1,29 @@
+package pgproto3
+
+import (
+ "encoding/json"
+)
+
+type ParseComplete struct{}
+
+func (*ParseComplete) Backend() {}
+
+func (dst *ParseComplete) Decode(src []byte) error {
+ if len(src) != 0 {
+ return &invalidMessageLenErr{messageType: "ParseComplete", expectedLen: 0, actualLen: len(src)}
+ }
+
+ return nil
+}
+
+func (src *ParseComplete) Encode(dst []byte) []byte {
+ return append(dst, '1', 0, 0, 0, 4)
+}
+
+func (src *ParseComplete) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ }{
+ Type: "ParseComplete",
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/password_message.go b/vendor/github.com/jackc/pgx/pgproto3/password_message.go
new file mode 100644
index 0000000..2ad3fe4
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/password_message.go
@@ -0,0 +1,46 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/json"
+
+ "github.com/jackc/pgx/pgio"
+)
+
+type PasswordMessage struct {
+ Password string
+}
+
+func (*PasswordMessage) Frontend() {}
+
+func (dst *PasswordMessage) Decode(src []byte) error {
+ buf := bytes.NewBuffer(src)
+
+ b, err := buf.ReadBytes(0)
+ if err != nil {
+ return err
+ }
+ dst.Password = string(b[:len(b)-1])
+
+ return nil
+}
+
+func (src *PasswordMessage) Encode(dst []byte) []byte {
+ dst = append(dst, 'p')
+ dst = pgio.AppendInt32(dst, int32(4+len(src.Password)+1))
+
+ dst = append(dst, src.Password...)
+ dst = append(dst, 0)
+
+ return dst
+}
+
+func (src *PasswordMessage) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ Password string
+ }{
+ Type: "PasswordMessage",
+ Password: src.Password,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/pgproto3.go b/vendor/github.com/jackc/pgx/pgproto3/pgproto3.go
new file mode 100644
index 0000000..fe7b085
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/pgproto3.go
@@ -0,0 +1,42 @@
+package pgproto3
+
+import "fmt"
+
+// Message is the interface implemented by an object that can decode and encode
+// a particular PostgreSQL message.
+type Message interface {
+ // Decode is allowed and expected to retain a reference to data after
+ // returning (unlike encoding.BinaryUnmarshaler).
+ Decode(data []byte) error
+
+ // Encode appends itself to dst and returns the new buffer.
+ Encode(dst []byte) []byte
+}
+
+type FrontendMessage interface {
+ Message
+ Frontend() // no-op method to distinguish frontend from backend methods
+}
+
+type BackendMessage interface {
+ Message
+ Backend() // no-op method to distinguish frontend from backend methods
+}
+
+type invalidMessageLenErr struct {
+ messageType string
+ expectedLen int
+ actualLen int
+}
+
+func (e *invalidMessageLenErr) Error() string {
+ return fmt.Sprintf("%s body must have length of %d, but it is %d", e.messageType, e.expectedLen, e.actualLen)
+}
+
+type invalidMessageFormatErr struct {
+ messageType string
+}
+
+func (e *invalidMessageFormatErr) Error() string {
+ return fmt.Sprintf("%s body is invalid", e.messageType)
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/query.go b/vendor/github.com/jackc/pgx/pgproto3/query.go
new file mode 100644
index 0000000..d80c0fb
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/query.go
@@ -0,0 +1,45 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/json"
+
+ "github.com/jackc/pgx/pgio"
+)
+
+type Query struct {
+ String string
+}
+
+func (*Query) Frontend() {}
+
+func (dst *Query) Decode(src []byte) error {
+ i := bytes.IndexByte(src, 0)
+ if i != len(src)-1 {
+ return &invalidMessageFormatErr{messageType: "Query"}
+ }
+
+ dst.String = string(src[:i])
+
+ return nil
+}
+
+func (src *Query) Encode(dst []byte) []byte {
+ dst = append(dst, 'Q')
+ dst = pgio.AppendInt32(dst, int32(4+len(src.String)+1))
+
+ dst = append(dst, src.String...)
+ dst = append(dst, 0)
+
+ return dst
+}
+
+func (src *Query) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ String string
+ }{
+ Type: "Query",
+ String: src.String,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/ready_for_query.go b/vendor/github.com/jackc/pgx/pgproto3/ready_for_query.go
new file mode 100644
index 0000000..63b902b
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/ready_for_query.go
@@ -0,0 +1,35 @@
+package pgproto3
+
+import (
+ "encoding/json"
+)
+
+type ReadyForQuery struct {
+ TxStatus byte
+}
+
+func (*ReadyForQuery) Backend() {}
+
+func (dst *ReadyForQuery) Decode(src []byte) error {
+ if len(src) != 1 {
+ return &invalidMessageLenErr{messageType: "ReadyForQuery", expectedLen: 1, actualLen: len(src)}
+ }
+
+ dst.TxStatus = src[0]
+
+ return nil
+}
+
+func (src *ReadyForQuery) Encode(dst []byte) []byte {
+ return append(dst, 'Z', 0, 0, 0, 5, src.TxStatus)
+}
+
+func (src *ReadyForQuery) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ TxStatus string
+ }{
+ Type: "ReadyForQuery",
+ TxStatus: string(src.TxStatus),
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/row_description.go b/vendor/github.com/jackc/pgx/pgproto3/row_description.go
new file mode 100644
index 0000000..d0df11b
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/row_description.go
@@ -0,0 +1,100 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/json"
+
+ "github.com/jackc/pgx/pgio"
+)
+
+const (
+ TextFormat = 0
+ BinaryFormat = 1
+)
+
+type FieldDescription struct {
+ Name string
+ TableOID uint32
+ TableAttributeNumber uint16
+ DataTypeOID uint32
+ DataTypeSize int16
+ TypeModifier uint32
+ Format int16
+}
+
+type RowDescription struct {
+ Fields []FieldDescription
+}
+
+func (*RowDescription) Backend() {}
+
+func (dst *RowDescription) Decode(src []byte) error {
+ buf := bytes.NewBuffer(src)
+
+ if buf.Len() < 2 {
+ return &invalidMessageFormatErr{messageType: "RowDescription"}
+ }
+ fieldCount := int(binary.BigEndian.Uint16(buf.Next(2)))
+
+ *dst = RowDescription{Fields: make([]FieldDescription, fieldCount)}
+
+ for i := 0; i < fieldCount; i++ {
+ var fd FieldDescription
+ bName, err := buf.ReadBytes(0)
+ if err != nil {
+ return err
+ }
+ fd.Name = string(bName[:len(bName)-1])
+
+ // Since buf.Next() doesn't return an error if we hit the end of the buffer
+ // check Len ahead of time
+ if buf.Len() < 18 {
+ return &invalidMessageFormatErr{messageType: "RowDescription"}
+ }
+
+ fd.TableOID = binary.BigEndian.Uint32(buf.Next(4))
+ fd.TableAttributeNumber = binary.BigEndian.Uint16(buf.Next(2))
+ fd.DataTypeOID = binary.BigEndian.Uint32(buf.Next(4))
+ fd.DataTypeSize = int16(binary.BigEndian.Uint16(buf.Next(2)))
+ fd.TypeModifier = binary.BigEndian.Uint32(buf.Next(4))
+ fd.Format = int16(binary.BigEndian.Uint16(buf.Next(2)))
+
+ dst.Fields[i] = fd
+ }
+
+ return nil
+}
+
+func (src *RowDescription) Encode(dst []byte) []byte {
+ dst = append(dst, 'T')
+ sp := len(dst)
+ dst = pgio.AppendInt32(dst, -1)
+
+ dst = pgio.AppendUint16(dst, uint16(len(src.Fields)))
+ for _, fd := range src.Fields {
+ dst = append(dst, fd.Name...)
+ dst = append(dst, 0)
+
+ dst = pgio.AppendUint32(dst, fd.TableOID)
+ dst = pgio.AppendUint16(dst, fd.TableAttributeNumber)
+ dst = pgio.AppendUint32(dst, fd.DataTypeOID)
+ dst = pgio.AppendInt16(dst, fd.DataTypeSize)
+ dst = pgio.AppendUint32(dst, fd.TypeModifier)
+ dst = pgio.AppendInt16(dst, fd.Format)
+ }
+
+ pgio.SetInt32(dst[sp:], int32(len(dst[sp:])))
+
+ return dst
+}
+
+func (src *RowDescription) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ Fields []FieldDescription
+ }{
+ Type: "RowDescription",
+ Fields: src.Fields,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/startup_message.go b/vendor/github.com/jackc/pgx/pgproto3/startup_message.go
new file mode 100644
index 0000000..6c5d4f9
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/startup_message.go
@@ -0,0 +1,97 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/json"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+const (
+ ProtocolVersionNumber = 196608 // 3.0
+ sslRequestNumber = 80877103
+)
+
+type StartupMessage struct {
+ ProtocolVersion uint32
+ Parameters map[string]string
+}
+
+func (*StartupMessage) Frontend() {}
+
+func (dst *StartupMessage) Decode(src []byte) error {
+ if len(src) < 4 {
+ return errors.Errorf("startup message too short")
+ }
+
+ dst.ProtocolVersion = binary.BigEndian.Uint32(src)
+ rp := 4
+
+ if dst.ProtocolVersion == sslRequestNumber {
+ return errors.Errorf("can't handle ssl connection request")
+ }
+
+ if dst.ProtocolVersion != ProtocolVersionNumber {
+ return errors.Errorf("Bad startup message version number. Expected %d, got %d", ProtocolVersionNumber, dst.ProtocolVersion)
+ }
+
+ dst.Parameters = make(map[string]string)
+ for {
+ idx := bytes.IndexByte(src[rp:], 0)
+ if idx < 0 {
+ return &invalidMessageFormatErr{messageType: "StartupMesage"}
+ }
+ key := string(src[rp : rp+idx])
+ rp += idx + 1
+
+ idx = bytes.IndexByte(src[rp:], 0)
+ if idx < 0 {
+ return &invalidMessageFormatErr{messageType: "StartupMesage"}
+ }
+ value := string(src[rp : rp+idx])
+ rp += idx + 1
+
+ dst.Parameters[key] = value
+
+ if len(src[rp:]) == 1 {
+ if src[rp] != 0 {
+ return errors.Errorf("Bad startup message last byte. Expected 0, got %d", src[rp])
+ }
+ break
+ }
+ }
+
+ return nil
+}
+
+func (src *StartupMessage) Encode(dst []byte) []byte {
+ sp := len(dst)
+ dst = pgio.AppendInt32(dst, -1)
+
+ dst = pgio.AppendUint32(dst, src.ProtocolVersion)
+ for k, v := range src.Parameters {
+ dst = append(dst, k...)
+ dst = append(dst, 0)
+ dst = append(dst, v...)
+ dst = append(dst, 0)
+ }
+ dst = append(dst, 0)
+
+ pgio.SetInt32(dst[sp:], int32(len(dst[sp:])))
+
+ return dst
+}
+
+func (src *StartupMessage) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ ProtocolVersion uint32
+ Parameters map[string]string
+ }{
+ Type: "StartupMessage",
+ ProtocolVersion: src.ProtocolVersion,
+ Parameters: src.Parameters,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/sync.go b/vendor/github.com/jackc/pgx/pgproto3/sync.go
new file mode 100644
index 0000000..85f4749
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/sync.go
@@ -0,0 +1,29 @@
+package pgproto3
+
+import (
+ "encoding/json"
+)
+
+type Sync struct{}
+
+func (*Sync) Frontend() {}
+
+func (dst *Sync) Decode(src []byte) error {
+ if len(src) != 0 {
+ return &invalidMessageLenErr{messageType: "Sync", expectedLen: 0, actualLen: len(src)}
+ }
+
+ return nil
+}
+
+func (src *Sync) Encode(dst []byte) []byte {
+ return append(dst, 'S', 0, 0, 0, 4)
+}
+
+func (src *Sync) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ }{
+ Type: "Sync",
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/terminate.go b/vendor/github.com/jackc/pgx/pgproto3/terminate.go
new file mode 100644
index 0000000..0a3310d
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/terminate.go
@@ -0,0 +1,29 @@
+package pgproto3
+
+import (
+ "encoding/json"
+)
+
+type Terminate struct{}
+
+func (*Terminate) Frontend() {}
+
+func (dst *Terminate) Decode(src []byte) error {
+ if len(src) != 0 {
+ return &invalidMessageLenErr{messageType: "Terminate", expectedLen: 0, actualLen: len(src)}
+ }
+
+ return nil
+}
+
+func (src *Terminate) Encode(dst []byte) []byte {
+ return append(dst, 'X', 0, 0, 0, 4)
+}
+
+func (src *Terminate) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ }{
+ Type: "Terminate",
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/aclitem.go b/vendor/github.com/jackc/pgx/pgtype/aclitem.go
new file mode 100644
index 0000000..35269e9
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/aclitem.go
@@ -0,0 +1,126 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+
+ "github.com/pkg/errors"
+)
+
+// ACLItem is used for PostgreSQL's aclitem data type. A sample aclitem
+// might look like this:
+//
+// postgres=arwdDxt/postgres
+//
+// Note, however, that because the user/role name part of an aclitem is
+// an identifier, it follows all the usual formatting rules for SQL
+// identifiers: if it contains spaces and other special characters,
+// it should appear in double-quotes:
+//
+// postgres=arwdDxt/"role with spaces"
+//
+type ACLItem struct {
+ String string
+ Status Status
+}
+
+func (dst *ACLItem) Set(src interface{}) error {
+ switch value := src.(type) {
+ case string:
+ *dst = ACLItem{String: value, Status: Present}
+ case *string:
+ if value == nil {
+ *dst = ACLItem{Status: Null}
+ } else {
+ *dst = ACLItem{String: *value, Status: Present}
+ }
+ default:
+ if originalSrc, ok := underlyingStringType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to ACLItem", value)
+ }
+
+ return nil
+}
+
+func (dst *ACLItem) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst.String
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *ACLItem) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+ case *string:
+ *v = src.String
+ return nil
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *ACLItem) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = ACLItem{Status: Null}
+ return nil
+ }
+
+ *dst = ACLItem{String: string(src), Status: Present}
+ return nil
+}
+
+func (src *ACLItem) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ return append(buf, src.String...), nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *ACLItem) Scan(src interface{}) error {
+ if src == nil {
+ *dst = ACLItem{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *ACLItem) Value() (driver.Value, error) {
+ switch src.Status {
+ case Present:
+ return src.String, nil
+ case Null:
+ return nil, nil
+ default:
+ return nil, errUndefined
+ }
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/aclitem_array.go b/vendor/github.com/jackc/pgx/pgtype/aclitem_array.go
new file mode 100644
index 0000000..0a82929
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/aclitem_array.go
@@ -0,0 +1,212 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+
+ "github.com/pkg/errors"
+)
+
+type ACLItemArray struct {
+ Elements []ACLItem
+ Dimensions []ArrayDimension
+ Status Status
+}
+
+func (dst *ACLItemArray) Set(src interface{}) error {
+ // untyped nil and typed nil interfaces are different
+ if src == nil {
+ *dst = ACLItemArray{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+
+ case []string:
+ if value == nil {
+ *dst = ACLItemArray{Status: Null}
+ } else if len(value) == 0 {
+ *dst = ACLItemArray{Status: Present}
+ } else {
+ elements := make([]ACLItem, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = ACLItemArray{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ default:
+ if originalSrc, ok := underlyingSliceType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to ACLItemArray", value)
+ }
+
+ return nil
+}
+
+func (dst *ACLItemArray) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *ACLItemArray) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+
+ case *[]string:
+ *v = make([]string, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *ACLItemArray) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = ACLItemArray{Status: Null}
+ return nil
+ }
+
+ uta, err := ParseUntypedTextArray(string(src))
+ if err != nil {
+ return err
+ }
+
+ var elements []ACLItem
+
+ if len(uta.Elements) > 0 {
+ elements = make([]ACLItem, len(uta.Elements))
+
+ for i, s := range uta.Elements {
+ var elem ACLItem
+ var elemSrc []byte
+ if s != "NULL" {
+ elemSrc = []byte(s)
+ }
+ err = elem.DecodeText(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+
+ elements[i] = elem
+ }
+ }
+
+ *dst = ACLItemArray{Elements: elements, Dimensions: uta.Dimensions, Status: Present}
+
+ return nil
+}
+
+func (src *ACLItemArray) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ if len(src.Dimensions) == 0 {
+ return append(buf, '{', '}'), nil
+ }
+
+ buf = EncodeTextArrayDimensions(buf, src.Dimensions)
+
+ // dimElemCounts is the multiples of elements that each array lies on. For
+ // example, a single dimension array of length 4 would have a dimElemCounts of
+ // [4]. A multi-dimensional array of lengths [3,5,2] would have a
+ // dimElemCounts of [30,10,2]. This is used to simplify when to render a '{'
+ // or '}'.
+ dimElemCounts := make([]int, len(src.Dimensions))
+ dimElemCounts[len(src.Dimensions)-1] = int(src.Dimensions[len(src.Dimensions)-1].Length)
+ for i := len(src.Dimensions) - 2; i > -1; i-- {
+ dimElemCounts[i] = int(src.Dimensions[i].Length) * dimElemCounts[i+1]
+ }
+
+ inElemBuf := make([]byte, 0, 32)
+ for i, elem := range src.Elements {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+
+ for _, dec := range dimElemCounts {
+ if i%dec == 0 {
+ buf = append(buf, '{')
+ }
+ }
+
+ elemBuf, err := elem.EncodeText(ci, inElemBuf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf == nil {
+ buf = append(buf, `NULL`...)
+ } else {
+ buf = append(buf, QuoteArrayElementIfNeeded(string(elemBuf))...)
+ }
+
+ for _, dec := range dimElemCounts {
+ if (i+1)%dec == 0 {
+ buf = append(buf, '}')
+ }
+ }
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *ACLItemArray) Scan(src interface{}) error {
+ if src == nil {
+ return dst.DecodeText(nil, nil)
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *ACLItemArray) Value() (driver.Value, error) {
+ buf, err := src.EncodeText(nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, nil
+ }
+
+ return string(buf), nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/array.go b/vendor/github.com/jackc/pgx/pgtype/array.go
new file mode 100644
index 0000000..5b852ed
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/array.go
@@ -0,0 +1,352 @@
+package pgtype
+
+import (
+ "bytes"
+ "encoding/binary"
+ "io"
+ "strconv"
+ "strings"
+ "unicode"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+// Information on the internals of PostgreSQL arrays can be found in
+// src/include/utils/array.h and src/backend/utils/adt/arrayfuncs.c. Of
+// particular interest is the array_send function.
+
+type ArrayHeader struct {
+ ContainsNull bool
+ ElementOID int32
+ Dimensions []ArrayDimension
+}
+
+type ArrayDimension struct {
+ Length int32
+ LowerBound int32
+}
+
+func (dst *ArrayHeader) DecodeBinary(ci *ConnInfo, src []byte) (int, error) {
+ if len(src) < 12 {
+ return 0, errors.Errorf("array header too short: %d", len(src))
+ }
+
+ rp := 0
+
+ numDims := int(binary.BigEndian.Uint32(src[rp:]))
+ rp += 4
+
+ dst.ContainsNull = binary.BigEndian.Uint32(src[rp:]) == 1
+ rp += 4
+
+ dst.ElementOID = int32(binary.BigEndian.Uint32(src[rp:]))
+ rp += 4
+
+ if numDims > 0 {
+ dst.Dimensions = make([]ArrayDimension, numDims)
+ }
+ if len(src) < 12+numDims*8 {
+ return 0, errors.Errorf("array header too short for %d dimensions: %d", numDims, len(src))
+ }
+ for i := range dst.Dimensions {
+ dst.Dimensions[i].Length = int32(binary.BigEndian.Uint32(src[rp:]))
+ rp += 4
+
+ dst.Dimensions[i].LowerBound = int32(binary.BigEndian.Uint32(src[rp:]))
+ rp += 4
+ }
+
+ return rp, nil
+}
+
+func (src *ArrayHeader) EncodeBinary(ci *ConnInfo, buf []byte) []byte {
+ buf = pgio.AppendInt32(buf, int32(len(src.Dimensions)))
+
+ var containsNull int32
+ if src.ContainsNull {
+ containsNull = 1
+ }
+ buf = pgio.AppendInt32(buf, containsNull)
+
+ buf = pgio.AppendInt32(buf, src.ElementOID)
+
+ for i := range src.Dimensions {
+ buf = pgio.AppendInt32(buf, src.Dimensions[i].Length)
+ buf = pgio.AppendInt32(buf, src.Dimensions[i].LowerBound)
+ }
+
+ return buf
+}
+
+type UntypedTextArray struct {
+ Elements []string
+ Dimensions []ArrayDimension
+}
+
+func ParseUntypedTextArray(src string) (*UntypedTextArray, error) {
+ dst := &UntypedTextArray{}
+
+ buf := bytes.NewBufferString(src)
+
+ skipWhitespace(buf)
+
+ r, _, err := buf.ReadRune()
+ if err != nil {
+ return nil, errors.Errorf("invalid array: %v", err)
+ }
+
+ var explicitDimensions []ArrayDimension
+
+ // Array has explicit dimensions
+ if r == '[' {
+ buf.UnreadRune()
+
+ for {
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return nil, errors.Errorf("invalid array: %v", err)
+ }
+
+ if r == '=' {
+ break
+ } else if r != '[' {
+ return nil, errors.Errorf("invalid array, expected '[' or '=' got %v", r)
+ }
+
+ lower, err := arrayParseInteger(buf)
+ if err != nil {
+ return nil, errors.Errorf("invalid array: %v", err)
+ }
+
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return nil, errors.Errorf("invalid array: %v", err)
+ }
+
+ if r != ':' {
+ return nil, errors.Errorf("invalid array, expected ':' got %v", r)
+ }
+
+ upper, err := arrayParseInteger(buf)
+ if err != nil {
+ return nil, errors.Errorf("invalid array: %v", err)
+ }
+
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return nil, errors.Errorf("invalid array: %v", err)
+ }
+
+ if r != ']' {
+ return nil, errors.Errorf("invalid array, expected ']' got %v", r)
+ }
+
+ explicitDimensions = append(explicitDimensions, ArrayDimension{LowerBound: lower, Length: upper - lower + 1})
+ }
+
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return nil, errors.Errorf("invalid array: %v", err)
+ }
+ }
+
+ if r != '{' {
+ return nil, errors.Errorf("invalid array, expected '{': %v", err)
+ }
+
+ implicitDimensions := []ArrayDimension{{LowerBound: 1, Length: 0}}
+
+ // Consume all initial opening brackets. This provides number of dimensions.
+ for {
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return nil, errors.Errorf("invalid array: %v", err)
+ }
+
+ if r == '{' {
+ implicitDimensions[len(implicitDimensions)-1].Length = 1
+ implicitDimensions = append(implicitDimensions, ArrayDimension{LowerBound: 1})
+ } else {
+ buf.UnreadRune()
+ break
+ }
+ }
+ currentDim := len(implicitDimensions) - 1
+ counterDim := currentDim
+
+ for {
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return nil, errors.Errorf("invalid array: %v", err)
+ }
+
+ switch r {
+ case '{':
+ if currentDim == counterDim {
+ implicitDimensions[currentDim].Length++
+ }
+ currentDim++
+ case ',':
+ case '}':
+ currentDim--
+ if currentDim < counterDim {
+ counterDim = currentDim
+ }
+ default:
+ buf.UnreadRune()
+ value, err := arrayParseValue(buf)
+ if err != nil {
+ return nil, errors.Errorf("invalid array value: %v", err)
+ }
+ if currentDim == counterDim {
+ implicitDimensions[currentDim].Length++
+ }
+ dst.Elements = append(dst.Elements, value)
+ }
+
+ if currentDim < 0 {
+ break
+ }
+ }
+
+ skipWhitespace(buf)
+
+ if buf.Len() > 0 {
+ return nil, errors.Errorf("unexpected trailing data: %v", buf.String())
+ }
+
+ if len(dst.Elements) == 0 {
+ dst.Dimensions = nil
+ } else if len(explicitDimensions) > 0 {
+ dst.Dimensions = explicitDimensions
+ } else {
+ dst.Dimensions = implicitDimensions
+ }
+
+ return dst, nil
+}
+
+func skipWhitespace(buf *bytes.Buffer) {
+ var r rune
+ var err error
+ for r, _, _ = buf.ReadRune(); unicode.IsSpace(r); r, _, _ = buf.ReadRune() {
+ }
+
+ if err != io.EOF {
+ buf.UnreadRune()
+ }
+}
+
+func arrayParseValue(buf *bytes.Buffer) (string, error) {
+ r, _, err := buf.ReadRune()
+ if err != nil {
+ return "", err
+ }
+ if r == '"' {
+ return arrayParseQuotedValue(buf)
+ }
+ buf.UnreadRune()
+
+ s := &bytes.Buffer{}
+
+ for {
+ r, _, err := buf.ReadRune()
+ if err != nil {
+ return "", err
+ }
+
+ switch r {
+ case ',', '}':
+ buf.UnreadRune()
+ return s.String(), nil
+ }
+
+ s.WriteRune(r)
+ }
+}
+
+func arrayParseQuotedValue(buf *bytes.Buffer) (string, error) {
+ s := &bytes.Buffer{}
+
+ for {
+ r, _, err := buf.ReadRune()
+ if err != nil {
+ return "", err
+ }
+
+ switch r {
+ case '\\':
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return "", err
+ }
+ case '"':
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return "", err
+ }
+ buf.UnreadRune()
+ return s.String(), nil
+ }
+ s.WriteRune(r)
+ }
+}
+
+func arrayParseInteger(buf *bytes.Buffer) (int32, error) {
+ s := &bytes.Buffer{}
+
+ for {
+ r, _, err := buf.ReadRune()
+ if err != nil {
+ return 0, err
+ }
+
+ if '0' <= r && r <= '9' {
+ s.WriteRune(r)
+ } else {
+ buf.UnreadRune()
+ n, err := strconv.ParseInt(s.String(), 10, 32)
+ if err != nil {
+ return 0, err
+ }
+ return int32(n), nil
+ }
+ }
+}
+
+func EncodeTextArrayDimensions(buf []byte, dimensions []ArrayDimension) []byte {
+ var customDimensions bool
+ for _, dim := range dimensions {
+ if dim.LowerBound != 1 {
+ customDimensions = true
+ }
+ }
+
+ if !customDimensions {
+ return buf
+ }
+
+ for _, dim := range dimensions {
+ buf = append(buf, '[')
+ buf = append(buf, strconv.FormatInt(int64(dim.LowerBound), 10)...)
+ buf = append(buf, ':')
+ buf = append(buf, strconv.FormatInt(int64(dim.LowerBound+dim.Length-1), 10)...)
+ buf = append(buf, ']')
+ }
+
+ return append(buf, '=')
+}
+
+var quoteArrayReplacer = strings.NewReplacer(`\`, `\\`, `"`, `\"`)
+
+func quoteArrayElement(src string) string {
+ return `"` + quoteArrayReplacer.Replace(src) + `"`
+}
+
+func QuoteArrayElementIfNeeded(src string) string {
+ if src == "" || (len(src) == 4 && strings.ToLower(src) == "null") || src[0] == ' ' || src[len(src)-1] == ' ' || strings.ContainsAny(src, `{},"\`) {
+ return quoteArrayElement(src)
+ }
+ return src
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/bit.go b/vendor/github.com/jackc/pgx/pgtype/bit.go
new file mode 100644
index 0000000..f892cee
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/bit.go
@@ -0,0 +1,37 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+)
+
+type Bit Varbit
+
+func (dst *Bit) Set(src interface{}) error {
+ return (*Varbit)(dst).Set(src)
+}
+
+func (dst *Bit) Get() interface{} {
+ return (*Varbit)(dst).Get()
+}
+
+func (src *Bit) AssignTo(dst interface{}) error {
+ return (*Varbit)(src).AssignTo(dst)
+}
+
+func (dst *Bit) DecodeBinary(ci *ConnInfo, src []byte) error {
+ return (*Varbit)(dst).DecodeBinary(ci, src)
+}
+
+func (src *Bit) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ return (*Varbit)(src).EncodeBinary(ci, buf)
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Bit) Scan(src interface{}) error {
+ return (*Varbit)(dst).Scan(src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Bit) Value() (driver.Value, error) {
+ return (*Varbit)(src).Value()
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/bool.go b/vendor/github.com/jackc/pgx/pgtype/bool.go
new file mode 100644
index 0000000..3a3eef4
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/bool.go
@@ -0,0 +1,159 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "strconv"
+
+ "github.com/pkg/errors"
+)
+
+type Bool struct {
+ Bool bool
+ Status Status
+}
+
+func (dst *Bool) Set(src interface{}) error {
+ switch value := src.(type) {
+ case bool:
+ *dst = Bool{Bool: value, Status: Present}
+ case string:
+ bb, err := strconv.ParseBool(value)
+ if err != nil {
+ return err
+ }
+ *dst = Bool{Bool: bb, Status: Present}
+ default:
+ if originalSrc, ok := underlyingBoolType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to Bool", value)
+ }
+
+ return nil
+}
+
+func (dst *Bool) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst.Bool
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Bool) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+ case *bool:
+ *v = src.Bool
+ return nil
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *Bool) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Bool{Status: Null}
+ return nil
+ }
+
+ if len(src) != 1 {
+ return errors.Errorf("invalid length for bool: %v", len(src))
+ }
+
+ *dst = Bool{Bool: src[0] == 't', Status: Present}
+ return nil
+}
+
+func (dst *Bool) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Bool{Status: Null}
+ return nil
+ }
+
+ if len(src) != 1 {
+ return errors.Errorf("invalid length for bool: %v", len(src))
+ }
+
+ *dst = Bool{Bool: src[0] == 1, Status: Present}
+ return nil
+}
+
+func (src *Bool) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ if src.Bool {
+ buf = append(buf, 't')
+ } else {
+ buf = append(buf, 'f')
+ }
+
+ return buf, nil
+}
+
+func (src *Bool) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ if src.Bool {
+ buf = append(buf, 1)
+ } else {
+ buf = append(buf, 0)
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Bool) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Bool{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case bool:
+ *dst = Bool{Bool: src, Status: Present}
+ return nil
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Bool) Value() (driver.Value, error) {
+ switch src.Status {
+ case Present:
+ return src.Bool, nil
+ case Null:
+ return nil, nil
+ default:
+ return nil, errUndefined
+ }
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/bool_array.go b/vendor/github.com/jackc/pgx/pgtype/bool_array.go
new file mode 100644
index 0000000..67dd92a
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/bool_array.go
@@ -0,0 +1,300 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type BoolArray struct {
+ Elements []Bool
+ Dimensions []ArrayDimension
+ Status Status
+}
+
+func (dst *BoolArray) Set(src interface{}) error {
+ // untyped nil and typed nil interfaces are different
+ if src == nil {
+ *dst = BoolArray{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+
+ case []bool:
+ if value == nil {
+ *dst = BoolArray{Status: Null}
+ } else if len(value) == 0 {
+ *dst = BoolArray{Status: Present}
+ } else {
+ elements := make([]Bool, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = BoolArray{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ default:
+ if originalSrc, ok := underlyingSliceType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to BoolArray", value)
+ }
+
+ return nil
+}
+
+func (dst *BoolArray) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *BoolArray) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+
+ case *[]bool:
+ *v = make([]bool, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *BoolArray) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = BoolArray{Status: Null}
+ return nil
+ }
+
+ uta, err := ParseUntypedTextArray(string(src))
+ if err != nil {
+ return err
+ }
+
+ var elements []Bool
+
+ if len(uta.Elements) > 0 {
+ elements = make([]Bool, len(uta.Elements))
+
+ for i, s := range uta.Elements {
+ var elem Bool
+ var elemSrc []byte
+ if s != "NULL" {
+ elemSrc = []byte(s)
+ }
+ err = elem.DecodeText(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+
+ elements[i] = elem
+ }
+ }
+
+ *dst = BoolArray{Elements: elements, Dimensions: uta.Dimensions, Status: Present}
+
+ return nil
+}
+
+func (dst *BoolArray) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = BoolArray{Status: Null}
+ return nil
+ }
+
+ var arrayHeader ArrayHeader
+ rp, err := arrayHeader.DecodeBinary(ci, src)
+ if err != nil {
+ return err
+ }
+
+ if len(arrayHeader.Dimensions) == 0 {
+ *dst = BoolArray{Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+ }
+
+ elementCount := arrayHeader.Dimensions[0].Length
+ for _, d := range arrayHeader.Dimensions[1:] {
+ elementCount *= d.Length
+ }
+
+ elements := make([]Bool, elementCount)
+
+ for i := range elements {
+ elemLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+ var elemSrc []byte
+ if elemLen >= 0 {
+ elemSrc = src[rp : rp+elemLen]
+ rp += elemLen
+ }
+ err = elements[i].DecodeBinary(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+ }
+
+ *dst = BoolArray{Elements: elements, Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+}
+
+func (src *BoolArray) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ if len(src.Dimensions) == 0 {
+ return append(buf, '{', '}'), nil
+ }
+
+ buf = EncodeTextArrayDimensions(buf, src.Dimensions)
+
+ // dimElemCounts is the multiples of elements that each array lies on. For
+ // example, a single dimension array of length 4 would have a dimElemCounts of
+ // [4]. A multi-dimensional array of lengths [3,5,2] would have a
+ // dimElemCounts of [30,10,2]. This is used to simplify when to render a '{'
+ // or '}'.
+ dimElemCounts := make([]int, len(src.Dimensions))
+ dimElemCounts[len(src.Dimensions)-1] = int(src.Dimensions[len(src.Dimensions)-1].Length)
+ for i := len(src.Dimensions) - 2; i > -1; i-- {
+ dimElemCounts[i] = int(src.Dimensions[i].Length) * dimElemCounts[i+1]
+ }
+
+ inElemBuf := make([]byte, 0, 32)
+ for i, elem := range src.Elements {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+
+ for _, dec := range dimElemCounts {
+ if i%dec == 0 {
+ buf = append(buf, '{')
+ }
+ }
+
+ elemBuf, err := elem.EncodeText(ci, inElemBuf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf == nil {
+ buf = append(buf, `NULL`...)
+ } else {
+ buf = append(buf, QuoteArrayElementIfNeeded(string(elemBuf))...)
+ }
+
+ for _, dec := range dimElemCounts {
+ if (i+1)%dec == 0 {
+ buf = append(buf, '}')
+ }
+ }
+ }
+
+ return buf, nil
+}
+
+func (src *BoolArray) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ arrayHeader := ArrayHeader{
+ Dimensions: src.Dimensions,
+ }
+
+ if dt, ok := ci.DataTypeForName("bool"); ok {
+ arrayHeader.ElementOID = int32(dt.OID)
+ } else {
+ return nil, errors.Errorf("unable to find oid for type name %v", "bool")
+ }
+
+ for i := range src.Elements {
+ if src.Elements[i].Status == Null {
+ arrayHeader.ContainsNull = true
+ break
+ }
+ }
+
+ buf = arrayHeader.EncodeBinary(ci, buf)
+
+ for i := range src.Elements {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ elemBuf, err := src.Elements[i].EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf != nil {
+ buf = elemBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *BoolArray) Scan(src interface{}) error {
+ if src == nil {
+ return dst.DecodeText(nil, nil)
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *BoolArray) Value() (driver.Value, error) {
+ buf, err := src.EncodeText(nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, nil
+ }
+
+ return string(buf), nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/box.go b/vendor/github.com/jackc/pgx/pgtype/box.go
new file mode 100644
index 0000000..83df049
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/box.go
@@ -0,0 +1,162 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type Box struct {
+ P [2]Vec2
+ Status Status
+}
+
+func (dst *Box) Set(src interface{}) error {
+ return errors.Errorf("cannot convert %v to Box", src)
+}
+
+func (dst *Box) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Box) AssignTo(dst interface{}) error {
+ return errors.Errorf("cannot assign %v to %T", src, dst)
+}
+
+func (dst *Box) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Box{Status: Null}
+ return nil
+ }
+
+ if len(src) < 11 {
+ return errors.Errorf("invalid length for Box: %v", len(src))
+ }
+
+ str := string(src[1:])
+
+ var end int
+ end = strings.IndexByte(str, ',')
+
+ x1, err := strconv.ParseFloat(str[:end], 64)
+ if err != nil {
+ return err
+ }
+
+ str = str[end+1:]
+ end = strings.IndexByte(str, ')')
+
+ y1, err := strconv.ParseFloat(str[:end], 64)
+ if err != nil {
+ return err
+ }
+
+ str = str[end+3:]
+ end = strings.IndexByte(str, ',')
+
+ x2, err := strconv.ParseFloat(str[:end], 64)
+ if err != nil {
+ return err
+ }
+
+ str = str[end+1 : len(str)-1]
+
+ y2, err := strconv.ParseFloat(str, 64)
+ if err != nil {
+ return err
+ }
+
+ *dst = Box{P: [2]Vec2{{x1, y1}, {x2, y2}}, Status: Present}
+ return nil
+}
+
+func (dst *Box) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Box{Status: Null}
+ return nil
+ }
+
+ if len(src) != 32 {
+ return errors.Errorf("invalid length for Box: %v", len(src))
+ }
+
+ x1 := binary.BigEndian.Uint64(src)
+ y1 := binary.BigEndian.Uint64(src[8:])
+ x2 := binary.BigEndian.Uint64(src[16:])
+ y2 := binary.BigEndian.Uint64(src[24:])
+
+ *dst = Box{
+ P: [2]Vec2{
+ {math.Float64frombits(x1), math.Float64frombits(y1)},
+ {math.Float64frombits(x2), math.Float64frombits(y2)},
+ },
+ Status: Present,
+ }
+ return nil
+}
+
+func (src *Box) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ buf = append(buf, fmt.Sprintf(`(%f,%f),(%f,%f)`,
+ src.P[0].X, src.P[0].Y, src.P[1].X, src.P[1].Y)...)
+ return buf, nil
+}
+
+func (src *Box) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ buf = pgio.AppendUint64(buf, math.Float64bits(src.P[0].X))
+ buf = pgio.AppendUint64(buf, math.Float64bits(src.P[0].Y))
+ buf = pgio.AppendUint64(buf, math.Float64bits(src.P[1].X))
+ buf = pgio.AppendUint64(buf, math.Float64bits(src.P[1].Y))
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Box) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Box{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Box) Value() (driver.Value, error) {
+ return EncodeValueText(src)
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/bpchar.go b/vendor/github.com/jackc/pgx/pgtype/bpchar.go
new file mode 100644
index 0000000..2126318
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/bpchar.go
@@ -0,0 +1,68 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+)
+
+// BPChar is fixed-length, blank padded char type
+// character(n), char(n)
+type BPChar Text
+
+// Set converts from src to dst.
+func (dst *BPChar) Set(src interface{}) error {
+ return (*Text)(dst).Set(src)
+}
+
+// Get returns underlying value
+func (dst *BPChar) Get() interface{} {
+ return (*Text)(dst).Get()
+}
+
+// AssignTo assigns from src to dst.
+func (src *BPChar) AssignTo(dst interface{}) error {
+ if src.Status == Present {
+ switch v := dst.(type) {
+ case *rune:
+ runes := []rune(src.String)
+ if len(runes) == 1 {
+ *v = runes[0]
+ return nil
+ }
+ }
+ }
+ return (*Text)(src).AssignTo(dst)
+}
+
+func (dst *BPChar) DecodeText(ci *ConnInfo, src []byte) error {
+ return (*Text)(dst).DecodeText(ci, src)
+}
+
+func (dst *BPChar) DecodeBinary(ci *ConnInfo, src []byte) error {
+ return (*Text)(dst).DecodeBinary(ci, src)
+}
+
+func (src *BPChar) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ return (*Text)(src).EncodeText(ci, buf)
+}
+
+func (src *BPChar) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ return (*Text)(src).EncodeBinary(ci, buf)
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *BPChar) Scan(src interface{}) error {
+ return (*Text)(dst).Scan(src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *BPChar) Value() (driver.Value, error) {
+ return (*Text)(src).Value()
+}
+
+func (src *BPChar) MarshalJSON() ([]byte, error) {
+ return (*Text)(src).MarshalJSON()
+}
+
+func (dst *BPChar) UnmarshalJSON(b []byte) error {
+ return (*Text)(dst).UnmarshalJSON(b)
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/bpchar_array.go b/vendor/github.com/jackc/pgx/pgtype/bpchar_array.go
new file mode 100644
index 0000000..1e6220f
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/bpchar_array.go
@@ -0,0 +1,300 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type BPCharArray struct {
+ Elements []BPChar
+ Dimensions []ArrayDimension
+ Status Status
+}
+
+func (dst *BPCharArray) Set(src interface{}) error {
+ // untyped nil and typed nil interfaces are different
+ if src == nil {
+ *dst = BPCharArray{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+
+ case []string:
+ if value == nil {
+ *dst = BPCharArray{Status: Null}
+ } else if len(value) == 0 {
+ *dst = BPCharArray{Status: Present}
+ } else {
+ elements := make([]BPChar, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = BPCharArray{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ default:
+ if originalSrc, ok := underlyingSliceType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to BPCharArray", value)
+ }
+
+ return nil
+}
+
+func (dst *BPCharArray) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *BPCharArray) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+
+ case *[]string:
+ *v = make([]string, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *BPCharArray) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = BPCharArray{Status: Null}
+ return nil
+ }
+
+ uta, err := ParseUntypedTextArray(string(src))
+ if err != nil {
+ return err
+ }
+
+ var elements []BPChar
+
+ if len(uta.Elements) > 0 {
+ elements = make([]BPChar, len(uta.Elements))
+
+ for i, s := range uta.Elements {
+ var elem BPChar
+ var elemSrc []byte
+ if s != "NULL" {
+ elemSrc = []byte(s)
+ }
+ err = elem.DecodeText(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+
+ elements[i] = elem
+ }
+ }
+
+ *dst = BPCharArray{Elements: elements, Dimensions: uta.Dimensions, Status: Present}
+
+ return nil
+}
+
+func (dst *BPCharArray) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = BPCharArray{Status: Null}
+ return nil
+ }
+
+ var arrayHeader ArrayHeader
+ rp, err := arrayHeader.DecodeBinary(ci, src)
+ if err != nil {
+ return err
+ }
+
+ if len(arrayHeader.Dimensions) == 0 {
+ *dst = BPCharArray{Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+ }
+
+ elementCount := arrayHeader.Dimensions[0].Length
+ for _, d := range arrayHeader.Dimensions[1:] {
+ elementCount *= d.Length
+ }
+
+ elements := make([]BPChar, elementCount)
+
+ for i := range elements {
+ elemLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+ var elemSrc []byte
+ if elemLen >= 0 {
+ elemSrc = src[rp : rp+elemLen]
+ rp += elemLen
+ }
+ err = elements[i].DecodeBinary(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+ }
+
+ *dst = BPCharArray{Elements: elements, Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+}
+
+func (src *BPCharArray) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ if len(src.Dimensions) == 0 {
+ return append(buf, '{', '}'), nil
+ }
+
+ buf = EncodeTextArrayDimensions(buf, src.Dimensions)
+
+ // dimElemCounts is the multiples of elements that each array lies on. For
+ // example, a single dimension array of length 4 would have a dimElemCounts of
+ // [4]. A multi-dimensional array of lengths [3,5,2] would have a
+ // dimElemCounts of [30,10,2]. This is used to simplify when to render a '{'
+ // or '}'.
+ dimElemCounts := make([]int, len(src.Dimensions))
+ dimElemCounts[len(src.Dimensions)-1] = int(src.Dimensions[len(src.Dimensions)-1].Length)
+ for i := len(src.Dimensions) - 2; i > -1; i-- {
+ dimElemCounts[i] = int(src.Dimensions[i].Length) * dimElemCounts[i+1]
+ }
+
+ inElemBuf := make([]byte, 0, 32)
+ for i, elem := range src.Elements {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+
+ for _, dec := range dimElemCounts {
+ if i%dec == 0 {
+ buf = append(buf, '{')
+ }
+ }
+
+ elemBuf, err := elem.EncodeText(ci, inElemBuf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf == nil {
+ buf = append(buf, `NULL`...)
+ } else {
+ buf = append(buf, QuoteArrayElementIfNeeded(string(elemBuf))...)
+ }
+
+ for _, dec := range dimElemCounts {
+ if (i+1)%dec == 0 {
+ buf = append(buf, '}')
+ }
+ }
+ }
+
+ return buf, nil
+}
+
+func (src *BPCharArray) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ arrayHeader := ArrayHeader{
+ Dimensions: src.Dimensions,
+ }
+
+ if dt, ok := ci.DataTypeForName("bpchar"); ok {
+ arrayHeader.ElementOID = int32(dt.OID)
+ } else {
+ return nil, errors.Errorf("unable to find oid for type name %v", "bpchar")
+ }
+
+ for i := range src.Elements {
+ if src.Elements[i].Status == Null {
+ arrayHeader.ContainsNull = true
+ break
+ }
+ }
+
+ buf = arrayHeader.EncodeBinary(ci, buf)
+
+ for i := range src.Elements {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ elemBuf, err := src.Elements[i].EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf != nil {
+ buf = elemBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *BPCharArray) Scan(src interface{}) error {
+ if src == nil {
+ return dst.DecodeText(nil, nil)
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *BPCharArray) Value() (driver.Value, error) {
+ buf, err := src.EncodeText(nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, nil
+ }
+
+ return string(buf), nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/bytea.go b/vendor/github.com/jackc/pgx/pgtype/bytea.go
new file mode 100644
index 0000000..c7117f4
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/bytea.go
@@ -0,0 +1,156 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/hex"
+
+ "github.com/pkg/errors"
+)
+
+type Bytea struct {
+ Bytes []byte
+ Status Status
+}
+
+func (dst *Bytea) Set(src interface{}) error {
+ if src == nil {
+ *dst = Bytea{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+ case []byte:
+ if value != nil {
+ *dst = Bytea{Bytes: value, Status: Present}
+ } else {
+ *dst = Bytea{Status: Null}
+ }
+ default:
+ if originalSrc, ok := underlyingBytesType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to Bytea", value)
+ }
+
+ return nil
+}
+
+func (dst *Bytea) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst.Bytes
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Bytea) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+ case *[]byte:
+ buf := make([]byte, len(src.Bytes))
+ copy(buf, src.Bytes)
+ *v = buf
+ return nil
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+// DecodeText only supports the hex format. This has been the default since
+// PostgreSQL 9.0.
+func (dst *Bytea) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Bytea{Status: Null}
+ return nil
+ }
+
+ if len(src) < 2 || src[0] != '\\' || src[1] != 'x' {
+ return errors.Errorf("invalid hex format")
+ }
+
+ buf := make([]byte, (len(src)-2)/2)
+ _, err := hex.Decode(buf, src[2:])
+ if err != nil {
+ return err
+ }
+
+ *dst = Bytea{Bytes: buf, Status: Present}
+ return nil
+}
+
+func (dst *Bytea) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Bytea{Status: Null}
+ return nil
+ }
+
+ *dst = Bytea{Bytes: src, Status: Present}
+ return nil
+}
+
+func (src *Bytea) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ buf = append(buf, `\x`...)
+ buf = append(buf, hex.EncodeToString(src.Bytes)...)
+ return buf, nil
+}
+
+func (src *Bytea) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ return append(buf, src.Bytes...), nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Bytea) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Bytea{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ buf := make([]byte, len(src))
+ copy(buf, src)
+ *dst = Bytea{Bytes: buf, Status: Present}
+ return nil
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Bytea) Value() (driver.Value, error) {
+ switch src.Status {
+ case Present:
+ return src.Bytes, nil
+ case Null:
+ return nil, nil
+ default:
+ return nil, errUndefined
+ }
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/bytea_array.go b/vendor/github.com/jackc/pgx/pgtype/bytea_array.go
new file mode 100644
index 0000000..c8eb566
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/bytea_array.go
@@ -0,0 +1,300 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type ByteaArray struct {
+ Elements []Bytea
+ Dimensions []ArrayDimension
+ Status Status
+}
+
+func (dst *ByteaArray) Set(src interface{}) error {
+ // untyped nil and typed nil interfaces are different
+ if src == nil {
+ *dst = ByteaArray{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+
+ case [][]byte:
+ if value == nil {
+ *dst = ByteaArray{Status: Null}
+ } else if len(value) == 0 {
+ *dst = ByteaArray{Status: Present}
+ } else {
+ elements := make([]Bytea, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = ByteaArray{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ default:
+ if originalSrc, ok := underlyingSliceType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to ByteaArray", value)
+ }
+
+ return nil
+}
+
+func (dst *ByteaArray) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *ByteaArray) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+
+ case *[][]byte:
+ *v = make([][]byte, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *ByteaArray) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = ByteaArray{Status: Null}
+ return nil
+ }
+
+ uta, err := ParseUntypedTextArray(string(src))
+ if err != nil {
+ return err
+ }
+
+ var elements []Bytea
+
+ if len(uta.Elements) > 0 {
+ elements = make([]Bytea, len(uta.Elements))
+
+ for i, s := range uta.Elements {
+ var elem Bytea
+ var elemSrc []byte
+ if s != "NULL" {
+ elemSrc = []byte(s)
+ }
+ err = elem.DecodeText(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+
+ elements[i] = elem
+ }
+ }
+
+ *dst = ByteaArray{Elements: elements, Dimensions: uta.Dimensions, Status: Present}
+
+ return nil
+}
+
+func (dst *ByteaArray) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = ByteaArray{Status: Null}
+ return nil
+ }
+
+ var arrayHeader ArrayHeader
+ rp, err := arrayHeader.DecodeBinary(ci, src)
+ if err != nil {
+ return err
+ }
+
+ if len(arrayHeader.Dimensions) == 0 {
+ *dst = ByteaArray{Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+ }
+
+ elementCount := arrayHeader.Dimensions[0].Length
+ for _, d := range arrayHeader.Dimensions[1:] {
+ elementCount *= d.Length
+ }
+
+ elements := make([]Bytea, elementCount)
+
+ for i := range elements {
+ elemLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+ var elemSrc []byte
+ if elemLen >= 0 {
+ elemSrc = src[rp : rp+elemLen]
+ rp += elemLen
+ }
+ err = elements[i].DecodeBinary(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+ }
+
+ *dst = ByteaArray{Elements: elements, Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+}
+
+func (src *ByteaArray) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ if len(src.Dimensions) == 0 {
+ return append(buf, '{', '}'), nil
+ }
+
+ buf = EncodeTextArrayDimensions(buf, src.Dimensions)
+
+ // dimElemCounts is the multiples of elements that each array lies on. For
+ // example, a single dimension array of length 4 would have a dimElemCounts of
+ // [4]. A multi-dimensional array of lengths [3,5,2] would have a
+ // dimElemCounts of [30,10,2]. This is used to simplify when to render a '{'
+ // or '}'.
+ dimElemCounts := make([]int, len(src.Dimensions))
+ dimElemCounts[len(src.Dimensions)-1] = int(src.Dimensions[len(src.Dimensions)-1].Length)
+ for i := len(src.Dimensions) - 2; i > -1; i-- {
+ dimElemCounts[i] = int(src.Dimensions[i].Length) * dimElemCounts[i+1]
+ }
+
+ inElemBuf := make([]byte, 0, 32)
+ for i, elem := range src.Elements {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+
+ for _, dec := range dimElemCounts {
+ if i%dec == 0 {
+ buf = append(buf, '{')
+ }
+ }
+
+ elemBuf, err := elem.EncodeText(ci, inElemBuf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf == nil {
+ buf = append(buf, `NULL`...)
+ } else {
+ buf = append(buf, QuoteArrayElementIfNeeded(string(elemBuf))...)
+ }
+
+ for _, dec := range dimElemCounts {
+ if (i+1)%dec == 0 {
+ buf = append(buf, '}')
+ }
+ }
+ }
+
+ return buf, nil
+}
+
+func (src *ByteaArray) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ arrayHeader := ArrayHeader{
+ Dimensions: src.Dimensions,
+ }
+
+ if dt, ok := ci.DataTypeForName("bytea"); ok {
+ arrayHeader.ElementOID = int32(dt.OID)
+ } else {
+ return nil, errors.Errorf("unable to find oid for type name %v", "bytea")
+ }
+
+ for i := range src.Elements {
+ if src.Elements[i].Status == Null {
+ arrayHeader.ContainsNull = true
+ break
+ }
+ }
+
+ buf = arrayHeader.EncodeBinary(ci, buf)
+
+ for i := range src.Elements {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ elemBuf, err := src.Elements[i].EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf != nil {
+ buf = elemBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *ByteaArray) Scan(src interface{}) error {
+ if src == nil {
+ return dst.DecodeText(nil, nil)
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *ByteaArray) Value() (driver.Value, error) {
+ buf, err := src.EncodeText(nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, nil
+ }
+
+ return string(buf), nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/cid.go b/vendor/github.com/jackc/pgx/pgtype/cid.go
new file mode 100644
index 0000000..0ed54f4
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/cid.go
@@ -0,0 +1,61 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+)
+
+// CID is PostgreSQL's Command Identifier type.
+//
+// When one does
+//
+// select cmin, cmax, * from some_table;
+//
+// it is the data type of the cmin and cmax hidden system columns.
+//
+// It is currently implemented as an unsigned four byte integer.
+// Its definition can be found in src/include/c.h as CommandId
+// in the PostgreSQL sources.
+type CID pguint32
+
+// Set converts from src to dst. Note that as CID is not a general
+// number type Set does not do automatic type conversion as other number
+// types do.
+func (dst *CID) Set(src interface{}) error {
+ return (*pguint32)(dst).Set(src)
+}
+
+func (dst *CID) Get() interface{} {
+ return (*pguint32)(dst).Get()
+}
+
+// AssignTo assigns from src to dst. Note that as CID is not a general number
+// type AssignTo does not do automatic type conversion as other number types do.
+func (src *CID) AssignTo(dst interface{}) error {
+ return (*pguint32)(src).AssignTo(dst)
+}
+
+func (dst *CID) DecodeText(ci *ConnInfo, src []byte) error {
+ return (*pguint32)(dst).DecodeText(ci, src)
+}
+
+func (dst *CID) DecodeBinary(ci *ConnInfo, src []byte) error {
+ return (*pguint32)(dst).DecodeBinary(ci, src)
+}
+
+func (src *CID) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ return (*pguint32)(src).EncodeText(ci, buf)
+}
+
+func (src *CID) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ return (*pguint32)(src).EncodeBinary(ci, buf)
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *CID) Scan(src interface{}) error {
+ return (*pguint32)(dst).Scan(src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *CID) Value() (driver.Value, error) {
+ return (*pguint32)(src).Value()
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/cidr.go b/vendor/github.com/jackc/pgx/pgtype/cidr.go
new file mode 100644
index 0000000..519b9ca
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/cidr.go
@@ -0,0 +1,31 @@
+package pgtype
+
+type CIDR Inet
+
+func (dst *CIDR) Set(src interface{}) error {
+ return (*Inet)(dst).Set(src)
+}
+
+func (dst *CIDR) Get() interface{} {
+ return (*Inet)(dst).Get()
+}
+
+func (src *CIDR) AssignTo(dst interface{}) error {
+ return (*Inet)(src).AssignTo(dst)
+}
+
+func (dst *CIDR) DecodeText(ci *ConnInfo, src []byte) error {
+ return (*Inet)(dst).DecodeText(ci, src)
+}
+
+func (dst *CIDR) DecodeBinary(ci *ConnInfo, src []byte) error {
+ return (*Inet)(dst).DecodeBinary(ci, src)
+}
+
+func (src *CIDR) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ return (*Inet)(src).EncodeText(ci, buf)
+}
+
+func (src *CIDR) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ return (*Inet)(src).EncodeBinary(ci, buf)
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/cidr_array.go b/vendor/github.com/jackc/pgx/pgtype/cidr_array.go
new file mode 100644
index 0000000..e4bb761
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/cidr_array.go
@@ -0,0 +1,329 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "net"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type CIDRArray struct {
+ Elements []CIDR
+ Dimensions []ArrayDimension
+ Status Status
+}
+
+func (dst *CIDRArray) Set(src interface{}) error {
+ // untyped nil and typed nil interfaces are different
+ if src == nil {
+ *dst = CIDRArray{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+
+ case []*net.IPNet:
+ if value == nil {
+ *dst = CIDRArray{Status: Null}
+ } else if len(value) == 0 {
+ *dst = CIDRArray{Status: Present}
+ } else {
+ elements := make([]CIDR, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = CIDRArray{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ case []net.IP:
+ if value == nil {
+ *dst = CIDRArray{Status: Null}
+ } else if len(value) == 0 {
+ *dst = CIDRArray{Status: Present}
+ } else {
+ elements := make([]CIDR, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = CIDRArray{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ default:
+ if originalSrc, ok := underlyingSliceType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to CIDRArray", value)
+ }
+
+ return nil
+}
+
+func (dst *CIDRArray) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *CIDRArray) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+
+ case *[]*net.IPNet:
+ *v = make([]*net.IPNet, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ case *[]net.IP:
+ *v = make([]net.IP, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *CIDRArray) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = CIDRArray{Status: Null}
+ return nil
+ }
+
+ uta, err := ParseUntypedTextArray(string(src))
+ if err != nil {
+ return err
+ }
+
+ var elements []CIDR
+
+ if len(uta.Elements) > 0 {
+ elements = make([]CIDR, len(uta.Elements))
+
+ for i, s := range uta.Elements {
+ var elem CIDR
+ var elemSrc []byte
+ if s != "NULL" {
+ elemSrc = []byte(s)
+ }
+ err = elem.DecodeText(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+
+ elements[i] = elem
+ }
+ }
+
+ *dst = CIDRArray{Elements: elements, Dimensions: uta.Dimensions, Status: Present}
+
+ return nil
+}
+
+func (dst *CIDRArray) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = CIDRArray{Status: Null}
+ return nil
+ }
+
+ var arrayHeader ArrayHeader
+ rp, err := arrayHeader.DecodeBinary(ci, src)
+ if err != nil {
+ return err
+ }
+
+ if len(arrayHeader.Dimensions) == 0 {
+ *dst = CIDRArray{Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+ }
+
+ elementCount := arrayHeader.Dimensions[0].Length
+ for _, d := range arrayHeader.Dimensions[1:] {
+ elementCount *= d.Length
+ }
+
+ elements := make([]CIDR, elementCount)
+
+ for i := range elements {
+ elemLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+ var elemSrc []byte
+ if elemLen >= 0 {
+ elemSrc = src[rp : rp+elemLen]
+ rp += elemLen
+ }
+ err = elements[i].DecodeBinary(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+ }
+
+ *dst = CIDRArray{Elements: elements, Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+}
+
+func (src *CIDRArray) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ if len(src.Dimensions) == 0 {
+ return append(buf, '{', '}'), nil
+ }
+
+ buf = EncodeTextArrayDimensions(buf, src.Dimensions)
+
+ // dimElemCounts is the multiples of elements that each array lies on. For
+ // example, a single dimension array of length 4 would have a dimElemCounts of
+ // [4]. A multi-dimensional array of lengths [3,5,2] would have a
+ // dimElemCounts of [30,10,2]. This is used to simplify when to render a '{'
+ // or '}'.
+ dimElemCounts := make([]int, len(src.Dimensions))
+ dimElemCounts[len(src.Dimensions)-1] = int(src.Dimensions[len(src.Dimensions)-1].Length)
+ for i := len(src.Dimensions) - 2; i > -1; i-- {
+ dimElemCounts[i] = int(src.Dimensions[i].Length) * dimElemCounts[i+1]
+ }
+
+ inElemBuf := make([]byte, 0, 32)
+ for i, elem := range src.Elements {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+
+ for _, dec := range dimElemCounts {
+ if i%dec == 0 {
+ buf = append(buf, '{')
+ }
+ }
+
+ elemBuf, err := elem.EncodeText(ci, inElemBuf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf == nil {
+ buf = append(buf, `NULL`...)
+ } else {
+ buf = append(buf, QuoteArrayElementIfNeeded(string(elemBuf))...)
+ }
+
+ for _, dec := range dimElemCounts {
+ if (i+1)%dec == 0 {
+ buf = append(buf, '}')
+ }
+ }
+ }
+
+ return buf, nil
+}
+
+func (src *CIDRArray) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ arrayHeader := ArrayHeader{
+ Dimensions: src.Dimensions,
+ }
+
+ if dt, ok := ci.DataTypeForName("cidr"); ok {
+ arrayHeader.ElementOID = int32(dt.OID)
+ } else {
+ return nil, errors.Errorf("unable to find oid for type name %v", "cidr")
+ }
+
+ for i := range src.Elements {
+ if src.Elements[i].Status == Null {
+ arrayHeader.ContainsNull = true
+ break
+ }
+ }
+
+ buf = arrayHeader.EncodeBinary(ci, buf)
+
+ for i := range src.Elements {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ elemBuf, err := src.Elements[i].EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf != nil {
+ buf = elemBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *CIDRArray) Scan(src interface{}) error {
+ if src == nil {
+ return dst.DecodeText(nil, nil)
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *CIDRArray) Value() (driver.Value, error) {
+ buf, err := src.EncodeText(nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, nil
+ }
+
+ return string(buf), nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/circle.go b/vendor/github.com/jackc/pgx/pgtype/circle.go
new file mode 100644
index 0000000..97ecbf3
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/circle.go
@@ -0,0 +1,146 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type Circle struct {
+ P Vec2
+ R float64
+ Status Status
+}
+
+func (dst *Circle) Set(src interface{}) error {
+ return errors.Errorf("cannot convert %v to Circle", src)
+}
+
+func (dst *Circle) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Circle) AssignTo(dst interface{}) error {
+ return errors.Errorf("cannot assign %v to %T", src, dst)
+}
+
+func (dst *Circle) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Circle{Status: Null}
+ return nil
+ }
+
+ if len(src) < 9 {
+ return errors.Errorf("invalid length for Circle: %v", len(src))
+ }
+
+ str := string(src[2:])
+ end := strings.IndexByte(str, ',')
+ x, err := strconv.ParseFloat(str[:end], 64)
+ if err != nil {
+ return err
+ }
+
+ str = str[end+1:]
+ end = strings.IndexByte(str, ')')
+
+ y, err := strconv.ParseFloat(str[:end], 64)
+ if err != nil {
+ return err
+ }
+
+ str = str[end+2 : len(str)-1]
+
+ r, err := strconv.ParseFloat(str, 64)
+ if err != nil {
+ return err
+ }
+
+ *dst = Circle{P: Vec2{x, y}, R: r, Status: Present}
+ return nil
+}
+
+func (dst *Circle) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Circle{Status: Null}
+ return nil
+ }
+
+ if len(src) != 24 {
+ return errors.Errorf("invalid length for Circle: %v", len(src))
+ }
+
+ x := binary.BigEndian.Uint64(src)
+ y := binary.BigEndian.Uint64(src[8:])
+ r := binary.BigEndian.Uint64(src[16:])
+
+ *dst = Circle{
+ P: Vec2{math.Float64frombits(x), math.Float64frombits(y)},
+ R: math.Float64frombits(r),
+ Status: Present,
+ }
+ return nil
+}
+
+func (src *Circle) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ buf = append(buf, fmt.Sprintf(`<(%f,%f),%f>`, src.P.X, src.P.Y, src.R)...)
+ return buf, nil
+}
+
+func (src *Circle) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ buf = pgio.AppendUint64(buf, math.Float64bits(src.P.X))
+ buf = pgio.AppendUint64(buf, math.Float64bits(src.P.Y))
+ buf = pgio.AppendUint64(buf, math.Float64bits(src.R))
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Circle) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Circle{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Circle) Value() (driver.Value, error) {
+ return EncodeValueText(src)
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/convert.go b/vendor/github.com/jackc/pgx/pgtype/convert.go
new file mode 100644
index 0000000..5dfb738
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/convert.go
@@ -0,0 +1,424 @@
+package pgtype
+
+import (
+ "math"
+ "reflect"
+ "time"
+
+ "github.com/pkg/errors"
+)
+
+const maxUint = ^uint(0)
+const maxInt = int(maxUint >> 1)
+const minInt = -maxInt - 1
+
+// underlyingNumberType gets the underlying type that can be converted to Int2, Int4, Int8, Float4, or Float8
+func underlyingNumberType(val interface{}) (interface{}, bool) {
+ refVal := reflect.ValueOf(val)
+
+ switch refVal.Kind() {
+ case reflect.Ptr:
+ if refVal.IsNil() {
+ return nil, false
+ }
+ convVal := refVal.Elem().Interface()
+ return convVal, true
+ case reflect.Int:
+ convVal := int(refVal.Int())
+ return convVal, reflect.TypeOf(convVal) != refVal.Type()
+ case reflect.Int8:
+ convVal := int8(refVal.Int())
+ return convVal, reflect.TypeOf(convVal) != refVal.Type()
+ case reflect.Int16:
+ convVal := int16(refVal.Int())
+ return convVal, reflect.TypeOf(convVal) != refVal.Type()
+ case reflect.Int32:
+ convVal := int32(refVal.Int())
+ return convVal, reflect.TypeOf(convVal) != refVal.Type()
+ case reflect.Int64:
+ convVal := int64(refVal.Int())
+ return convVal, reflect.TypeOf(convVal) != refVal.Type()
+ case reflect.Uint:
+ convVal := uint(refVal.Uint())
+ return convVal, reflect.TypeOf(convVal) != refVal.Type()
+ case reflect.Uint8:
+ convVal := uint8(refVal.Uint())
+ return convVal, reflect.TypeOf(convVal) != refVal.Type()
+ case reflect.Uint16:
+ convVal := uint16(refVal.Uint())
+ return convVal, reflect.TypeOf(convVal) != refVal.Type()
+ case reflect.Uint32:
+ convVal := uint32(refVal.Uint())
+ return convVal, reflect.TypeOf(convVal) != refVal.Type()
+ case reflect.Uint64:
+ convVal := uint64(refVal.Uint())
+ return convVal, reflect.TypeOf(convVal) != refVal.Type()
+ case reflect.Float32:
+ convVal := float32(refVal.Float())
+ return convVal, reflect.TypeOf(convVal) != refVal.Type()
+ case reflect.Float64:
+ convVal := refVal.Float()
+ return convVal, reflect.TypeOf(convVal) != refVal.Type()
+ case reflect.String:
+ convVal := refVal.String()
+ return convVal, reflect.TypeOf(convVal) != refVal.Type()
+ }
+
+ return nil, false
+}
+
+// underlyingBoolType gets the underlying type that can be converted to Bool
+func underlyingBoolType(val interface{}) (interface{}, bool) {
+ refVal := reflect.ValueOf(val)
+
+ switch refVal.Kind() {
+ case reflect.Ptr:
+ if refVal.IsNil() {
+ return nil, false
+ }
+ convVal := refVal.Elem().Interface()
+ return convVal, true
+ case reflect.Bool:
+ convVal := refVal.Bool()
+ return convVal, reflect.TypeOf(convVal) != refVal.Type()
+ }
+
+ return nil, false
+}
+
+// underlyingBytesType gets the underlying type that can be converted to []byte
+func underlyingBytesType(val interface{}) (interface{}, bool) {
+ refVal := reflect.ValueOf(val)
+
+ switch refVal.Kind() {
+ case reflect.Ptr:
+ if refVal.IsNil() {
+ return nil, false
+ }
+ convVal := refVal.Elem().Interface()
+ return convVal, true
+ case reflect.Slice:
+ if refVal.Type().Elem().Kind() == reflect.Uint8 {
+ convVal := refVal.Bytes()
+ return convVal, reflect.TypeOf(convVal) != refVal.Type()
+ }
+ }
+
+ return nil, false
+}
+
+// underlyingStringType gets the underlying type that can be converted to String
+func underlyingStringType(val interface{}) (interface{}, bool) {
+ refVal := reflect.ValueOf(val)
+
+ switch refVal.Kind() {
+ case reflect.Ptr:
+ if refVal.IsNil() {
+ return nil, false
+ }
+ convVal := refVal.Elem().Interface()
+ return convVal, true
+ case reflect.String:
+ convVal := refVal.String()
+ return convVal, reflect.TypeOf(convVal) != refVal.Type()
+ }
+
+ return nil, false
+}
+
+// underlyingPtrType dereferences a pointer
+func underlyingPtrType(val interface{}) (interface{}, bool) {
+ refVal := reflect.ValueOf(val)
+
+ switch refVal.Kind() {
+ case reflect.Ptr:
+ if refVal.IsNil() {
+ return nil, false
+ }
+ convVal := refVal.Elem().Interface()
+ return convVal, true
+ }
+
+ return nil, false
+}
+
+// underlyingTimeType gets the underlying type that can be converted to time.Time
+func underlyingTimeType(val interface{}) (interface{}, bool) {
+ refVal := reflect.ValueOf(val)
+
+ switch refVal.Kind() {
+ case reflect.Ptr:
+ if refVal.IsNil() {
+ return time.Time{}, false
+ }
+ convVal := refVal.Elem().Interface()
+ return convVal, true
+ }
+
+ timeType := reflect.TypeOf(time.Time{})
+ if refVal.Type().ConvertibleTo(timeType) {
+ return refVal.Convert(timeType).Interface(), true
+ }
+
+ return time.Time{}, false
+}
+
+// underlyingSliceType gets the underlying slice type
+func underlyingSliceType(val interface{}) (interface{}, bool) {
+ refVal := reflect.ValueOf(val)
+
+ switch refVal.Kind() {
+ case reflect.Ptr:
+ if refVal.IsNil() {
+ return nil, false
+ }
+ convVal := refVal.Elem().Interface()
+ return convVal, true
+ case reflect.Slice:
+ baseSliceType := reflect.SliceOf(refVal.Type().Elem())
+ if refVal.Type().ConvertibleTo(baseSliceType) {
+ convVal := refVal.Convert(baseSliceType)
+ return convVal.Interface(), reflect.TypeOf(convVal.Interface()) != refVal.Type()
+ }
+ }
+
+ return nil, false
+}
+
+func int64AssignTo(srcVal int64, srcStatus Status, dst interface{}) error {
+ if srcStatus == Present {
+ switch v := dst.(type) {
+ case *int:
+ if srcVal < int64(minInt) {
+ return errors.Errorf("%d is less than minimum value for int", srcVal)
+ } else if srcVal > int64(maxInt) {
+ return errors.Errorf("%d is greater than maximum value for int", srcVal)
+ }
+ *v = int(srcVal)
+ case *int8:
+ if srcVal < math.MinInt8 {
+ return errors.Errorf("%d is less than minimum value for int8", srcVal)
+ } else if srcVal > math.MaxInt8 {
+ return errors.Errorf("%d is greater than maximum value for int8", srcVal)
+ }
+ *v = int8(srcVal)
+ case *int16:
+ if srcVal < math.MinInt16 {
+ return errors.Errorf("%d is less than minimum value for int16", srcVal)
+ } else if srcVal > math.MaxInt16 {
+ return errors.Errorf("%d is greater than maximum value for int16", srcVal)
+ }
+ *v = int16(srcVal)
+ case *int32:
+ if srcVal < math.MinInt32 {
+ return errors.Errorf("%d is less than minimum value for int32", srcVal)
+ } else if srcVal > math.MaxInt32 {
+ return errors.Errorf("%d is greater than maximum value for int32", srcVal)
+ }
+ *v = int32(srcVal)
+ case *int64:
+ if srcVal < math.MinInt64 {
+ return errors.Errorf("%d is less than minimum value for int64", srcVal)
+ } else if srcVal > math.MaxInt64 {
+ return errors.Errorf("%d is greater than maximum value for int64", srcVal)
+ }
+ *v = int64(srcVal)
+ case *uint:
+ if srcVal < 0 {
+ return errors.Errorf("%d is less than zero for uint", srcVal)
+ } else if uint64(srcVal) > uint64(maxUint) {
+ return errors.Errorf("%d is greater than maximum value for uint", srcVal)
+ }
+ *v = uint(srcVal)
+ case *uint8:
+ if srcVal < 0 {
+ return errors.Errorf("%d is less than zero for uint8", srcVal)
+ } else if srcVal > math.MaxUint8 {
+ return errors.Errorf("%d is greater than maximum value for uint8", srcVal)
+ }
+ *v = uint8(srcVal)
+ case *uint16:
+ if srcVal < 0 {
+ return errors.Errorf("%d is less than zero for uint32", srcVal)
+ } else if srcVal > math.MaxUint16 {
+ return errors.Errorf("%d is greater than maximum value for uint16", srcVal)
+ }
+ *v = uint16(srcVal)
+ case *uint32:
+ if srcVal < 0 {
+ return errors.Errorf("%d is less than zero for uint32", srcVal)
+ } else if srcVal > math.MaxUint32 {
+ return errors.Errorf("%d is greater than maximum value for uint32", srcVal)
+ }
+ *v = uint32(srcVal)
+ case *uint64:
+ if srcVal < 0 {
+ return errors.Errorf("%d is less than zero for uint64", srcVal)
+ }
+ *v = uint64(srcVal)
+ default:
+ if v := reflect.ValueOf(dst); v.Kind() == reflect.Ptr {
+ el := v.Elem()
+ switch el.Kind() {
+ // if dst is a pointer to pointer, strip the pointer and try again
+ case reflect.Ptr:
+ if el.IsNil() {
+ // allocate destination
+ el.Set(reflect.New(el.Type().Elem()))
+ }
+ return int64AssignTo(srcVal, srcStatus, el.Interface())
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ if el.OverflowInt(int64(srcVal)) {
+ return errors.Errorf("cannot put %d into %T", srcVal, dst)
+ }
+ el.SetInt(int64(srcVal))
+ return nil
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ if srcVal < 0 {
+ return errors.Errorf("%d is less than zero for %T", srcVal, dst)
+ }
+ if el.OverflowUint(uint64(srcVal)) {
+ return errors.Errorf("cannot put %d into %T", srcVal, dst)
+ }
+ el.SetUint(uint64(srcVal))
+ return nil
+ }
+ }
+ return errors.Errorf("cannot assign %v into %T", srcVal, dst)
+ }
+ return nil
+ }
+
+ // if dst is a pointer to pointer and srcStatus is not Present, nil it out
+ if v := reflect.ValueOf(dst); v.Kind() == reflect.Ptr {
+ el := v.Elem()
+ if el.Kind() == reflect.Ptr {
+ el.Set(reflect.Zero(el.Type()))
+ return nil
+ }
+ }
+
+ return errors.Errorf("cannot assign %v %v into %T", srcVal, srcStatus, dst)
+}
+
+func float64AssignTo(srcVal float64, srcStatus Status, dst interface{}) error {
+ if srcStatus == Present {
+ switch v := dst.(type) {
+ case *float32:
+ *v = float32(srcVal)
+ case *float64:
+ *v = srcVal
+ default:
+ if v := reflect.ValueOf(dst); v.Kind() == reflect.Ptr {
+ el := v.Elem()
+ switch el.Kind() {
+ // if dst is a pointer to pointer, strip the pointer and try again
+ case reflect.Ptr:
+ if el.IsNil() {
+ // allocate destination
+ el.Set(reflect.New(el.Type().Elem()))
+ }
+ return float64AssignTo(srcVal, srcStatus, el.Interface())
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ i64 := int64(srcVal)
+ if float64(i64) == srcVal {
+ return int64AssignTo(i64, srcStatus, dst)
+ }
+ }
+ }
+ return errors.Errorf("cannot assign %v into %T", srcVal, dst)
+ }
+ return nil
+ }
+
+ // if dst is a pointer to pointer and srcStatus is not Present, nil it out
+ if v := reflect.ValueOf(dst); v.Kind() == reflect.Ptr {
+ el := v.Elem()
+ if el.Kind() == reflect.Ptr {
+ el.Set(reflect.Zero(el.Type()))
+ return nil
+ }
+ }
+
+ return errors.Errorf("cannot assign %v %v into %T", srcVal, srcStatus, dst)
+}
+
+func NullAssignTo(dst interface{}) error {
+ dstPtr := reflect.ValueOf(dst)
+
+ // AssignTo dst must always be a pointer
+ if dstPtr.Kind() != reflect.Ptr {
+ return errors.Errorf("cannot assign NULL to %T", dst)
+ }
+
+ dstVal := dstPtr.Elem()
+
+ switch dstVal.Kind() {
+ case reflect.Ptr, reflect.Slice, reflect.Map:
+ dstVal.Set(reflect.Zero(dstVal.Type()))
+ return nil
+ }
+
+ return errors.Errorf("cannot assign NULL to %T", dst)
+}
+
+var kindTypes map[reflect.Kind]reflect.Type
+
+// GetAssignToDstType attempts to convert dst to something AssignTo can assign
+// to. If dst is a pointer to pointer it allocates a value and returns the
+// dereferences pointer. If dst is a named type such as *Foo where Foo is type
+// Foo int16, it converts dst to *int16.
+//
+// GetAssignToDstType returns the converted dst and a bool representing if any
+// change was made.
+func GetAssignToDstType(dst interface{}) (interface{}, bool) {
+ dstPtr := reflect.ValueOf(dst)
+
+ // AssignTo dst must always be a pointer
+ if dstPtr.Kind() != reflect.Ptr {
+ return nil, false
+ }
+
+ dstVal := dstPtr.Elem()
+
+ // if dst is a pointer to pointer, allocate space try again with the dereferenced pointer
+ if dstVal.Kind() == reflect.Ptr {
+ dstVal.Set(reflect.New(dstVal.Type().Elem()))
+ return dstVal.Interface(), true
+ }
+
+ // if dst is pointer to a base type that has been renamed
+ if baseValType, ok := kindTypes[dstVal.Kind()]; ok {
+ nextDst := dstPtr.Convert(reflect.PtrTo(baseValType))
+ return nextDst.Interface(), dstPtr.Type() != nextDst.Type()
+ }
+
+ if dstVal.Kind() == reflect.Slice {
+ if baseElemType, ok := kindTypes[dstVal.Type().Elem().Kind()]; ok {
+ baseSliceType := reflect.PtrTo(reflect.SliceOf(baseElemType))
+ nextDst := dstPtr.Convert(baseSliceType)
+ return nextDst.Interface(), dstPtr.Type() != nextDst.Type()
+ }
+ }
+
+ return nil, false
+}
+
+func init() {
+ kindTypes = map[reflect.Kind]reflect.Type{
+ reflect.Bool: reflect.TypeOf(false),
+ reflect.Float32: reflect.TypeOf(float32(0)),
+ reflect.Float64: reflect.TypeOf(float64(0)),
+ reflect.Int: reflect.TypeOf(int(0)),
+ reflect.Int8: reflect.TypeOf(int8(0)),
+ reflect.Int16: reflect.TypeOf(int16(0)),
+ reflect.Int32: reflect.TypeOf(int32(0)),
+ reflect.Int64: reflect.TypeOf(int64(0)),
+ reflect.Uint: reflect.TypeOf(uint(0)),
+ reflect.Uint8: reflect.TypeOf(uint8(0)),
+ reflect.Uint16: reflect.TypeOf(uint16(0)),
+ reflect.Uint32: reflect.TypeOf(uint32(0)),
+ reflect.Uint64: reflect.TypeOf(uint64(0)),
+ reflect.String: reflect.TypeOf(""),
+ }
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/database_sql.go b/vendor/github.com/jackc/pgx/pgtype/database_sql.go
new file mode 100644
index 0000000..969536d
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/database_sql.go
@@ -0,0 +1,42 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+
+ "github.com/pkg/errors"
+)
+
+func DatabaseSQLValue(ci *ConnInfo, src Value) (interface{}, error) {
+ if valuer, ok := src.(driver.Valuer); ok {
+ return valuer.Value()
+ }
+
+ if textEncoder, ok := src.(TextEncoder); ok {
+ buf, err := textEncoder.EncodeText(ci, nil)
+ if err != nil {
+ return nil, err
+ }
+ return string(buf), nil
+ }
+
+ if binaryEncoder, ok := src.(BinaryEncoder); ok {
+ buf, err := binaryEncoder.EncodeBinary(ci, nil)
+ if err != nil {
+ return nil, err
+ }
+ return buf, nil
+ }
+
+ return nil, errors.New("cannot convert to database/sql compatible value")
+}
+
+func EncodeValueText(src TextEncoder) (interface{}, error) {
+ buf, err := src.EncodeText(nil, make([]byte, 0, 32))
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, nil
+ }
+ return string(buf), err
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/date.go b/vendor/github.com/jackc/pgx/pgtype/date.go
new file mode 100644
index 0000000..f1c0d8b
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/date.go
@@ -0,0 +1,209 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "time"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type Date struct {
+ Time time.Time
+ Status Status
+ InfinityModifier InfinityModifier
+}
+
+const (
+ negativeInfinityDayOffset = -2147483648
+ infinityDayOffset = 2147483647
+)
+
+func (dst *Date) Set(src interface{}) error {
+ if src == nil {
+ *dst = Date{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+ case time.Time:
+ *dst = Date{Time: value, Status: Present}
+ default:
+ if originalSrc, ok := underlyingTimeType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to Date", value)
+ }
+
+ return nil
+}
+
+func (dst *Date) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ if dst.InfinityModifier != None {
+ return dst.InfinityModifier
+ }
+ return dst.Time
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Date) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+ case *time.Time:
+ if src.InfinityModifier != None {
+ return errors.Errorf("cannot assign %v to %T", src, dst)
+ }
+ *v = src.Time
+ return nil
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *Date) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Date{Status: Null}
+ return nil
+ }
+
+ sbuf := string(src)
+ switch sbuf {
+ case "infinity":
+ *dst = Date{Status: Present, InfinityModifier: Infinity}
+ case "-infinity":
+ *dst = Date{Status: Present, InfinityModifier: -Infinity}
+ default:
+ t, err := time.ParseInLocation("2006-01-02", sbuf, time.UTC)
+ if err != nil {
+ return err
+ }
+
+ *dst = Date{Time: t, Status: Present}
+ }
+
+ return nil
+}
+
+func (dst *Date) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Date{Status: Null}
+ return nil
+ }
+
+ if len(src) != 4 {
+ return errors.Errorf("invalid length for date: %v", len(src))
+ }
+
+ dayOffset := int32(binary.BigEndian.Uint32(src))
+
+ switch dayOffset {
+ case infinityDayOffset:
+ *dst = Date{Status: Present, InfinityModifier: Infinity}
+ case negativeInfinityDayOffset:
+ *dst = Date{Status: Present, InfinityModifier: -Infinity}
+ default:
+ t := time.Date(2000, 1, int(1+dayOffset), 0, 0, 0, 0, time.UTC)
+ *dst = Date{Time: t, Status: Present}
+ }
+
+ return nil
+}
+
+func (src *Date) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ var s string
+
+ switch src.InfinityModifier {
+ case None:
+ s = src.Time.Format("2006-01-02")
+ case Infinity:
+ s = "infinity"
+ case NegativeInfinity:
+ s = "-infinity"
+ }
+
+ return append(buf, s...), nil
+}
+
+func (src *Date) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ var daysSinceDateEpoch int32
+ switch src.InfinityModifier {
+ case None:
+ tUnix := time.Date(src.Time.Year(), src.Time.Month(), src.Time.Day(), 0, 0, 0, 0, time.UTC).Unix()
+ dateEpoch := time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC).Unix()
+
+ secSinceDateEpoch := tUnix - dateEpoch
+ daysSinceDateEpoch = int32(secSinceDateEpoch / 86400)
+ case Infinity:
+ daysSinceDateEpoch = infinityDayOffset
+ case NegativeInfinity:
+ daysSinceDateEpoch = negativeInfinityDayOffset
+ }
+
+ return pgio.AppendInt32(buf, daysSinceDateEpoch), nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Date) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Date{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ case time.Time:
+ *dst = Date{Time: src, Status: Present}
+ return nil
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Date) Value() (driver.Value, error) {
+ switch src.Status {
+ case Present:
+ if src.InfinityModifier != None {
+ return src.InfinityModifier.String(), nil
+ }
+ return src.Time, nil
+ case Null:
+ return nil, nil
+ default:
+ return nil, errUndefined
+ }
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/date_array.go b/vendor/github.com/jackc/pgx/pgtype/date_array.go
new file mode 100644
index 0000000..0cb6458
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/date_array.go
@@ -0,0 +1,301 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "time"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type DateArray struct {
+ Elements []Date
+ Dimensions []ArrayDimension
+ Status Status
+}
+
+func (dst *DateArray) Set(src interface{}) error {
+ // untyped nil and typed nil interfaces are different
+ if src == nil {
+ *dst = DateArray{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+
+ case []time.Time:
+ if value == nil {
+ *dst = DateArray{Status: Null}
+ } else if len(value) == 0 {
+ *dst = DateArray{Status: Present}
+ } else {
+ elements := make([]Date, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = DateArray{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ default:
+ if originalSrc, ok := underlyingSliceType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to DateArray", value)
+ }
+
+ return nil
+}
+
+func (dst *DateArray) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *DateArray) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+
+ case *[]time.Time:
+ *v = make([]time.Time, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *DateArray) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = DateArray{Status: Null}
+ return nil
+ }
+
+ uta, err := ParseUntypedTextArray(string(src))
+ if err != nil {
+ return err
+ }
+
+ var elements []Date
+
+ if len(uta.Elements) > 0 {
+ elements = make([]Date, len(uta.Elements))
+
+ for i, s := range uta.Elements {
+ var elem Date
+ var elemSrc []byte
+ if s != "NULL" {
+ elemSrc = []byte(s)
+ }
+ err = elem.DecodeText(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+
+ elements[i] = elem
+ }
+ }
+
+ *dst = DateArray{Elements: elements, Dimensions: uta.Dimensions, Status: Present}
+
+ return nil
+}
+
+func (dst *DateArray) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = DateArray{Status: Null}
+ return nil
+ }
+
+ var arrayHeader ArrayHeader
+ rp, err := arrayHeader.DecodeBinary(ci, src)
+ if err != nil {
+ return err
+ }
+
+ if len(arrayHeader.Dimensions) == 0 {
+ *dst = DateArray{Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+ }
+
+ elementCount := arrayHeader.Dimensions[0].Length
+ for _, d := range arrayHeader.Dimensions[1:] {
+ elementCount *= d.Length
+ }
+
+ elements := make([]Date, elementCount)
+
+ for i := range elements {
+ elemLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+ var elemSrc []byte
+ if elemLen >= 0 {
+ elemSrc = src[rp : rp+elemLen]
+ rp += elemLen
+ }
+ err = elements[i].DecodeBinary(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+ }
+
+ *dst = DateArray{Elements: elements, Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+}
+
+func (src *DateArray) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ if len(src.Dimensions) == 0 {
+ return append(buf, '{', '}'), nil
+ }
+
+ buf = EncodeTextArrayDimensions(buf, src.Dimensions)
+
+ // dimElemCounts is the multiples of elements that each array lies on. For
+ // example, a single dimension array of length 4 would have a dimElemCounts of
+ // [4]. A multi-dimensional array of lengths [3,5,2] would have a
+ // dimElemCounts of [30,10,2]. This is used to simplify when to render a '{'
+ // or '}'.
+ dimElemCounts := make([]int, len(src.Dimensions))
+ dimElemCounts[len(src.Dimensions)-1] = int(src.Dimensions[len(src.Dimensions)-1].Length)
+ for i := len(src.Dimensions) - 2; i > -1; i-- {
+ dimElemCounts[i] = int(src.Dimensions[i].Length) * dimElemCounts[i+1]
+ }
+
+ inElemBuf := make([]byte, 0, 32)
+ for i, elem := range src.Elements {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+
+ for _, dec := range dimElemCounts {
+ if i%dec == 0 {
+ buf = append(buf, '{')
+ }
+ }
+
+ elemBuf, err := elem.EncodeText(ci, inElemBuf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf == nil {
+ buf = append(buf, `NULL`...)
+ } else {
+ buf = append(buf, QuoteArrayElementIfNeeded(string(elemBuf))...)
+ }
+
+ for _, dec := range dimElemCounts {
+ if (i+1)%dec == 0 {
+ buf = append(buf, '}')
+ }
+ }
+ }
+
+ return buf, nil
+}
+
+func (src *DateArray) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ arrayHeader := ArrayHeader{
+ Dimensions: src.Dimensions,
+ }
+
+ if dt, ok := ci.DataTypeForName("date"); ok {
+ arrayHeader.ElementOID = int32(dt.OID)
+ } else {
+ return nil, errors.Errorf("unable to find oid for type name %v", "date")
+ }
+
+ for i := range src.Elements {
+ if src.Elements[i].Status == Null {
+ arrayHeader.ContainsNull = true
+ break
+ }
+ }
+
+ buf = arrayHeader.EncodeBinary(ci, buf)
+
+ for i := range src.Elements {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ elemBuf, err := src.Elements[i].EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf != nil {
+ buf = elemBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *DateArray) Scan(src interface{}) error {
+ if src == nil {
+ return dst.DecodeText(nil, nil)
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *DateArray) Value() (driver.Value, error) {
+ buf, err := src.EncodeText(nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, nil
+ }
+
+ return string(buf), nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/daterange.go b/vendor/github.com/jackc/pgx/pgtype/daterange.go
new file mode 100644
index 0000000..47cd7e4
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/daterange.go
@@ -0,0 +1,250 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type Daterange struct {
+ Lower Date
+ Upper Date
+ LowerType BoundType
+ UpperType BoundType
+ Status Status
+}
+
+func (dst *Daterange) Set(src interface{}) error {
+ return errors.Errorf("cannot convert %v to Daterange", src)
+}
+
+func (dst *Daterange) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Daterange) AssignTo(dst interface{}) error {
+ return errors.Errorf("cannot assign %v to %T", src, dst)
+}
+
+func (dst *Daterange) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Daterange{Status: Null}
+ return nil
+ }
+
+ utr, err := ParseUntypedTextRange(string(src))
+ if err != nil {
+ return err
+ }
+
+ *dst = Daterange{Status: Present}
+
+ dst.LowerType = utr.LowerType
+ dst.UpperType = utr.UpperType
+
+ if dst.LowerType == Empty {
+ return nil
+ }
+
+ if dst.LowerType == Inclusive || dst.LowerType == Exclusive {
+ if err := dst.Lower.DecodeText(ci, []byte(utr.Lower)); err != nil {
+ return err
+ }
+ }
+
+ if dst.UpperType == Inclusive || dst.UpperType == Exclusive {
+ if err := dst.Upper.DecodeText(ci, []byte(utr.Upper)); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (dst *Daterange) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Daterange{Status: Null}
+ return nil
+ }
+
+ ubr, err := ParseUntypedBinaryRange(src)
+ if err != nil {
+ return err
+ }
+
+ *dst = Daterange{Status: Present}
+
+ dst.LowerType = ubr.LowerType
+ dst.UpperType = ubr.UpperType
+
+ if dst.LowerType == Empty {
+ return nil
+ }
+
+ if dst.LowerType == Inclusive || dst.LowerType == Exclusive {
+ if err := dst.Lower.DecodeBinary(ci, ubr.Lower); err != nil {
+ return err
+ }
+ }
+
+ if dst.UpperType == Inclusive || dst.UpperType == Exclusive {
+ if err := dst.Upper.DecodeBinary(ci, ubr.Upper); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (src Daterange) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ switch src.LowerType {
+ case Exclusive, Unbounded:
+ buf = append(buf, '(')
+ case Inclusive:
+ buf = append(buf, '[')
+ case Empty:
+ return append(buf, "empty"...), nil
+ default:
+ return nil, errors.Errorf("unknown lower bound type %v", src.LowerType)
+ }
+
+ var err error
+
+ if src.LowerType != Unbounded {
+ buf, err = src.Lower.EncodeText(ci, buf)
+ if err != nil {
+ return nil, err
+ } else if buf == nil {
+ return nil, errors.Errorf("Lower cannot be null unless LowerType is Unbounded")
+ }
+ }
+
+ buf = append(buf, ',')
+
+ if src.UpperType != Unbounded {
+ buf, err = src.Upper.EncodeText(ci, buf)
+ if err != nil {
+ return nil, err
+ } else if buf == nil {
+ return nil, errors.Errorf("Upper cannot be null unless UpperType is Unbounded")
+ }
+ }
+
+ switch src.UpperType {
+ case Exclusive, Unbounded:
+ buf = append(buf, ')')
+ case Inclusive:
+ buf = append(buf, ']')
+ default:
+ return nil, errors.Errorf("unknown upper bound type %v", src.UpperType)
+ }
+
+ return buf, nil
+}
+
+func (src Daterange) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ var rangeType byte
+ switch src.LowerType {
+ case Inclusive:
+ rangeType |= lowerInclusiveMask
+ case Unbounded:
+ rangeType |= lowerUnboundedMask
+ case Exclusive:
+ case Empty:
+ return append(buf, emptyMask), nil
+ default:
+ return nil, errors.Errorf("unknown LowerType: %v", src.LowerType)
+ }
+
+ switch src.UpperType {
+ case Inclusive:
+ rangeType |= upperInclusiveMask
+ case Unbounded:
+ rangeType |= upperUnboundedMask
+ case Exclusive:
+ default:
+ return nil, errors.Errorf("unknown UpperType: %v", src.UpperType)
+ }
+
+ buf = append(buf, rangeType)
+
+ var err error
+
+ if src.LowerType != Unbounded {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ buf, err = src.Lower.EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, errors.Errorf("Lower cannot be null unless LowerType is Unbounded")
+ }
+
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+
+ if src.UpperType != Unbounded {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ buf, err = src.Upper.EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, errors.Errorf("Upper cannot be null unless UpperType is Unbounded")
+ }
+
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Daterange) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Daterange{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src Daterange) Value() (driver.Value, error) {
+ return EncodeValueText(src)
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/decimal.go b/vendor/github.com/jackc/pgx/pgtype/decimal.go
new file mode 100644
index 0000000..79653cf
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/decimal.go
@@ -0,0 +1,31 @@
+package pgtype
+
+type Decimal Numeric
+
+func (dst *Decimal) Set(src interface{}) error {
+ return (*Numeric)(dst).Set(src)
+}
+
+func (dst *Decimal) Get() interface{} {
+ return (*Numeric)(dst).Get()
+}
+
+func (src *Decimal) AssignTo(dst interface{}) error {
+ return (*Numeric)(src).AssignTo(dst)
+}
+
+func (dst *Decimal) DecodeText(ci *ConnInfo, src []byte) error {
+ return (*Numeric)(dst).DecodeText(ci, src)
+}
+
+func (dst *Decimal) DecodeBinary(ci *ConnInfo, src []byte) error {
+ return (*Numeric)(dst).DecodeBinary(ci, src)
+}
+
+func (src *Decimal) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ return (*Numeric)(src).EncodeText(ci, buf)
+}
+
+func (src *Decimal) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ return (*Numeric)(src).EncodeBinary(ci, buf)
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/enum_array.go b/vendor/github.com/jackc/pgx/pgtype/enum_array.go
new file mode 100644
index 0000000..3a94801
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/enum_array.go
@@ -0,0 +1,212 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+
+ "github.com/pkg/errors"
+)
+
+type EnumArray struct {
+ Elements []GenericText
+ Dimensions []ArrayDimension
+ Status Status
+}
+
+func (dst *EnumArray) Set(src interface{}) error {
+ // untyped nil and typed nil interfaces are different
+ if src == nil {
+ *dst = EnumArray{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+
+ case []string:
+ if value == nil {
+ *dst = EnumArray{Status: Null}
+ } else if len(value) == 0 {
+ *dst = EnumArray{Status: Present}
+ } else {
+ elements := make([]GenericText, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = EnumArray{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ default:
+ if originalSrc, ok := underlyingSliceType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to EnumArray", value)
+ }
+
+ return nil
+}
+
+func (dst *EnumArray) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *EnumArray) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+
+ case *[]string:
+ *v = make([]string, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *EnumArray) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = EnumArray{Status: Null}
+ return nil
+ }
+
+ uta, err := ParseUntypedTextArray(string(src))
+ if err != nil {
+ return err
+ }
+
+ var elements []GenericText
+
+ if len(uta.Elements) > 0 {
+ elements = make([]GenericText, len(uta.Elements))
+
+ for i, s := range uta.Elements {
+ var elem GenericText
+ var elemSrc []byte
+ if s != "NULL" {
+ elemSrc = []byte(s)
+ }
+ err = elem.DecodeText(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+
+ elements[i] = elem
+ }
+ }
+
+ *dst = EnumArray{Elements: elements, Dimensions: uta.Dimensions, Status: Present}
+
+ return nil
+}
+
+func (src *EnumArray) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ if len(src.Dimensions) == 0 {
+ return append(buf, '{', '}'), nil
+ }
+
+ buf = EncodeTextArrayDimensions(buf, src.Dimensions)
+
+ // dimElemCounts is the multiples of elements that each array lies on. For
+ // example, a single dimension array of length 4 would have a dimElemCounts of
+ // [4]. A multi-dimensional array of lengths [3,5,2] would have a
+ // dimElemCounts of [30,10,2]. This is used to simplify when to render a '{'
+ // or '}'.
+ dimElemCounts := make([]int, len(src.Dimensions))
+ dimElemCounts[len(src.Dimensions)-1] = int(src.Dimensions[len(src.Dimensions)-1].Length)
+ for i := len(src.Dimensions) - 2; i > -1; i-- {
+ dimElemCounts[i] = int(src.Dimensions[i].Length) * dimElemCounts[i+1]
+ }
+
+ inElemBuf := make([]byte, 0, 32)
+ for i, elem := range src.Elements {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+
+ for _, dec := range dimElemCounts {
+ if i%dec == 0 {
+ buf = append(buf, '{')
+ }
+ }
+
+ elemBuf, err := elem.EncodeText(ci, inElemBuf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf == nil {
+ buf = append(buf, `NULL`...)
+ } else {
+ buf = append(buf, QuoteArrayElementIfNeeded(string(elemBuf))...)
+ }
+
+ for _, dec := range dimElemCounts {
+ if (i+1)%dec == 0 {
+ buf = append(buf, '}')
+ }
+ }
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *EnumArray) Scan(src interface{}) error {
+ if src == nil {
+ return dst.DecodeText(nil, nil)
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *EnumArray) Value() (driver.Value, error) {
+ buf, err := src.EncodeText(nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, nil
+ }
+
+ return string(buf), nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/float4.go b/vendor/github.com/jackc/pgx/pgtype/float4.go
new file mode 100644
index 0000000..2207594
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/float4.go
@@ -0,0 +1,197 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "math"
+ "strconv"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type Float4 struct {
+ Float float32
+ Status Status
+}
+
+func (dst *Float4) Set(src interface{}) error {
+ if src == nil {
+ *dst = Float4{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+ case float32:
+ *dst = Float4{Float: value, Status: Present}
+ case float64:
+ *dst = Float4{Float: float32(value), Status: Present}
+ case int8:
+ *dst = Float4{Float: float32(value), Status: Present}
+ case uint8:
+ *dst = Float4{Float: float32(value), Status: Present}
+ case int16:
+ *dst = Float4{Float: float32(value), Status: Present}
+ case uint16:
+ *dst = Float4{Float: float32(value), Status: Present}
+ case int32:
+ f32 := float32(value)
+ if int32(f32) == value {
+ *dst = Float4{Float: f32, Status: Present}
+ } else {
+ return errors.Errorf("%v cannot be exactly represented as float32", value)
+ }
+ case uint32:
+ f32 := float32(value)
+ if uint32(f32) == value {
+ *dst = Float4{Float: f32, Status: Present}
+ } else {
+ return errors.Errorf("%v cannot be exactly represented as float32", value)
+ }
+ case int64:
+ f32 := float32(value)
+ if int64(f32) == value {
+ *dst = Float4{Float: f32, Status: Present}
+ } else {
+ return errors.Errorf("%v cannot be exactly represented as float32", value)
+ }
+ case uint64:
+ f32 := float32(value)
+ if uint64(f32) == value {
+ *dst = Float4{Float: f32, Status: Present}
+ } else {
+ return errors.Errorf("%v cannot be exactly represented as float32", value)
+ }
+ case int:
+ f32 := float32(value)
+ if int(f32) == value {
+ *dst = Float4{Float: f32, Status: Present}
+ } else {
+ return errors.Errorf("%v cannot be exactly represented as float32", value)
+ }
+ case uint:
+ f32 := float32(value)
+ if uint(f32) == value {
+ *dst = Float4{Float: f32, Status: Present}
+ } else {
+ return errors.Errorf("%v cannot be exactly represented as float32", value)
+ }
+ case string:
+ num, err := strconv.ParseFloat(value, 32)
+ if err != nil {
+ return err
+ }
+ *dst = Float4{Float: float32(num), Status: Present}
+ default:
+ if originalSrc, ok := underlyingNumberType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to Float8", value)
+ }
+
+ return nil
+}
+
+func (dst *Float4) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst.Float
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Float4) AssignTo(dst interface{}) error {
+ return float64AssignTo(float64(src.Float), src.Status, dst)
+}
+
+func (dst *Float4) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Float4{Status: Null}
+ return nil
+ }
+
+ n, err := strconv.ParseFloat(string(src), 32)
+ if err != nil {
+ return err
+ }
+
+ *dst = Float4{Float: float32(n), Status: Present}
+ return nil
+}
+
+func (dst *Float4) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Float4{Status: Null}
+ return nil
+ }
+
+ if len(src) != 4 {
+ return errors.Errorf("invalid length for float4: %v", len(src))
+ }
+
+ n := int32(binary.BigEndian.Uint32(src))
+
+ *dst = Float4{Float: math.Float32frombits(uint32(n)), Status: Present}
+ return nil
+}
+
+func (src *Float4) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ buf = append(buf, strconv.FormatFloat(float64(src.Float), 'f', -1, 32)...)
+ return buf, nil
+}
+
+func (src *Float4) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ buf = pgio.AppendUint32(buf, math.Float32bits(src.Float))
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Float4) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Float4{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case float64:
+ *dst = Float4{Float: float32(src), Status: Present}
+ return nil
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Float4) Value() (driver.Value, error) {
+ switch src.Status {
+ case Present:
+ return float64(src.Float), nil
+ case Null:
+ return nil, nil
+ default:
+ return nil, errUndefined
+ }
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/float4_array.go b/vendor/github.com/jackc/pgx/pgtype/float4_array.go
new file mode 100644
index 0000000..02c28ca
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/float4_array.go
@@ -0,0 +1,300 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type Float4Array struct {
+ Elements []Float4
+ Dimensions []ArrayDimension
+ Status Status
+}
+
+func (dst *Float4Array) Set(src interface{}) error {
+ // untyped nil and typed nil interfaces are different
+ if src == nil {
+ *dst = Float4Array{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+
+ case []float32:
+ if value == nil {
+ *dst = Float4Array{Status: Null}
+ } else if len(value) == 0 {
+ *dst = Float4Array{Status: Present}
+ } else {
+ elements := make([]Float4, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = Float4Array{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ default:
+ if originalSrc, ok := underlyingSliceType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to Float4Array", value)
+ }
+
+ return nil
+}
+
+func (dst *Float4Array) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Float4Array) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+
+ case *[]float32:
+ *v = make([]float32, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *Float4Array) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Float4Array{Status: Null}
+ return nil
+ }
+
+ uta, err := ParseUntypedTextArray(string(src))
+ if err != nil {
+ return err
+ }
+
+ var elements []Float4
+
+ if len(uta.Elements) > 0 {
+ elements = make([]Float4, len(uta.Elements))
+
+ for i, s := range uta.Elements {
+ var elem Float4
+ var elemSrc []byte
+ if s != "NULL" {
+ elemSrc = []byte(s)
+ }
+ err = elem.DecodeText(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+
+ elements[i] = elem
+ }
+ }
+
+ *dst = Float4Array{Elements: elements, Dimensions: uta.Dimensions, Status: Present}
+
+ return nil
+}
+
+func (dst *Float4Array) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Float4Array{Status: Null}
+ return nil
+ }
+
+ var arrayHeader ArrayHeader
+ rp, err := arrayHeader.DecodeBinary(ci, src)
+ if err != nil {
+ return err
+ }
+
+ if len(arrayHeader.Dimensions) == 0 {
+ *dst = Float4Array{Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+ }
+
+ elementCount := arrayHeader.Dimensions[0].Length
+ for _, d := range arrayHeader.Dimensions[1:] {
+ elementCount *= d.Length
+ }
+
+ elements := make([]Float4, elementCount)
+
+ for i := range elements {
+ elemLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+ var elemSrc []byte
+ if elemLen >= 0 {
+ elemSrc = src[rp : rp+elemLen]
+ rp += elemLen
+ }
+ err = elements[i].DecodeBinary(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+ }
+
+ *dst = Float4Array{Elements: elements, Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+}
+
+func (src *Float4Array) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ if len(src.Dimensions) == 0 {
+ return append(buf, '{', '}'), nil
+ }
+
+ buf = EncodeTextArrayDimensions(buf, src.Dimensions)
+
+ // dimElemCounts is the multiples of elements that each array lies on. For
+ // example, a single dimension array of length 4 would have a dimElemCounts of
+ // [4]. A multi-dimensional array of lengths [3,5,2] would have a
+ // dimElemCounts of [30,10,2]. This is used to simplify when to render a '{'
+ // or '}'.
+ dimElemCounts := make([]int, len(src.Dimensions))
+ dimElemCounts[len(src.Dimensions)-1] = int(src.Dimensions[len(src.Dimensions)-1].Length)
+ for i := len(src.Dimensions) - 2; i > -1; i-- {
+ dimElemCounts[i] = int(src.Dimensions[i].Length) * dimElemCounts[i+1]
+ }
+
+ inElemBuf := make([]byte, 0, 32)
+ for i, elem := range src.Elements {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+
+ for _, dec := range dimElemCounts {
+ if i%dec == 0 {
+ buf = append(buf, '{')
+ }
+ }
+
+ elemBuf, err := elem.EncodeText(ci, inElemBuf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf == nil {
+ buf = append(buf, `NULL`...)
+ } else {
+ buf = append(buf, QuoteArrayElementIfNeeded(string(elemBuf))...)
+ }
+
+ for _, dec := range dimElemCounts {
+ if (i+1)%dec == 0 {
+ buf = append(buf, '}')
+ }
+ }
+ }
+
+ return buf, nil
+}
+
+func (src *Float4Array) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ arrayHeader := ArrayHeader{
+ Dimensions: src.Dimensions,
+ }
+
+ if dt, ok := ci.DataTypeForName("float4"); ok {
+ arrayHeader.ElementOID = int32(dt.OID)
+ } else {
+ return nil, errors.Errorf("unable to find oid for type name %v", "float4")
+ }
+
+ for i := range src.Elements {
+ if src.Elements[i].Status == Null {
+ arrayHeader.ContainsNull = true
+ break
+ }
+ }
+
+ buf = arrayHeader.EncodeBinary(ci, buf)
+
+ for i := range src.Elements {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ elemBuf, err := src.Elements[i].EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf != nil {
+ buf = elemBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Float4Array) Scan(src interface{}) error {
+ if src == nil {
+ return dst.DecodeText(nil, nil)
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Float4Array) Value() (driver.Value, error) {
+ buf, err := src.EncodeText(nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, nil
+ }
+
+ return string(buf), nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/float8.go b/vendor/github.com/jackc/pgx/pgtype/float8.go
new file mode 100644
index 0000000..dd34f54
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/float8.go
@@ -0,0 +1,187 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "math"
+ "strconv"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type Float8 struct {
+ Float float64
+ Status Status
+}
+
+func (dst *Float8) Set(src interface{}) error {
+ if src == nil {
+ *dst = Float8{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+ case float32:
+ *dst = Float8{Float: float64(value), Status: Present}
+ case float64:
+ *dst = Float8{Float: value, Status: Present}
+ case int8:
+ *dst = Float8{Float: float64(value), Status: Present}
+ case uint8:
+ *dst = Float8{Float: float64(value), Status: Present}
+ case int16:
+ *dst = Float8{Float: float64(value), Status: Present}
+ case uint16:
+ *dst = Float8{Float: float64(value), Status: Present}
+ case int32:
+ *dst = Float8{Float: float64(value), Status: Present}
+ case uint32:
+ *dst = Float8{Float: float64(value), Status: Present}
+ case int64:
+ f64 := float64(value)
+ if int64(f64) == value {
+ *dst = Float8{Float: f64, Status: Present}
+ } else {
+ return errors.Errorf("%v cannot be exactly represented as float64", value)
+ }
+ case uint64:
+ f64 := float64(value)
+ if uint64(f64) == value {
+ *dst = Float8{Float: f64, Status: Present}
+ } else {
+ return errors.Errorf("%v cannot be exactly represented as float64", value)
+ }
+ case int:
+ f64 := float64(value)
+ if int(f64) == value {
+ *dst = Float8{Float: f64, Status: Present}
+ } else {
+ return errors.Errorf("%v cannot be exactly represented as float64", value)
+ }
+ case uint:
+ f64 := float64(value)
+ if uint(f64) == value {
+ *dst = Float8{Float: f64, Status: Present}
+ } else {
+ return errors.Errorf("%v cannot be exactly represented as float64", value)
+ }
+ case string:
+ num, err := strconv.ParseFloat(value, 64)
+ if err != nil {
+ return err
+ }
+ *dst = Float8{Float: float64(num), Status: Present}
+ default:
+ if originalSrc, ok := underlyingNumberType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to Float8", value)
+ }
+
+ return nil
+}
+
+func (dst *Float8) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst.Float
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Float8) AssignTo(dst interface{}) error {
+ return float64AssignTo(src.Float, src.Status, dst)
+}
+
+func (dst *Float8) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Float8{Status: Null}
+ return nil
+ }
+
+ n, err := strconv.ParseFloat(string(src), 64)
+ if err != nil {
+ return err
+ }
+
+ *dst = Float8{Float: n, Status: Present}
+ return nil
+}
+
+func (dst *Float8) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Float8{Status: Null}
+ return nil
+ }
+
+ if len(src) != 8 {
+ return errors.Errorf("invalid length for float4: %v", len(src))
+ }
+
+ n := int64(binary.BigEndian.Uint64(src))
+
+ *dst = Float8{Float: math.Float64frombits(uint64(n)), Status: Present}
+ return nil
+}
+
+func (src *Float8) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ buf = append(buf, strconv.FormatFloat(float64(src.Float), 'f', -1, 64)...)
+ return buf, nil
+}
+
+func (src *Float8) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ buf = pgio.AppendUint64(buf, math.Float64bits(src.Float))
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Float8) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Float8{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case float64:
+ *dst = Float8{Float: src, Status: Present}
+ return nil
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Float8) Value() (driver.Value, error) {
+ switch src.Status {
+ case Present:
+ return src.Float, nil
+ case Null:
+ return nil, nil
+ default:
+ return nil, errUndefined
+ }
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/float8_array.go b/vendor/github.com/jackc/pgx/pgtype/float8_array.go
new file mode 100644
index 0000000..b92a820
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/float8_array.go
@@ -0,0 +1,300 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type Float8Array struct {
+ Elements []Float8
+ Dimensions []ArrayDimension
+ Status Status
+}
+
+func (dst *Float8Array) Set(src interface{}) error {
+ // untyped nil and typed nil interfaces are different
+ if src == nil {
+ *dst = Float8Array{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+
+ case []float64:
+ if value == nil {
+ *dst = Float8Array{Status: Null}
+ } else if len(value) == 0 {
+ *dst = Float8Array{Status: Present}
+ } else {
+ elements := make([]Float8, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = Float8Array{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ default:
+ if originalSrc, ok := underlyingSliceType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to Float8Array", value)
+ }
+
+ return nil
+}
+
+func (dst *Float8Array) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Float8Array) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+
+ case *[]float64:
+ *v = make([]float64, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *Float8Array) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Float8Array{Status: Null}
+ return nil
+ }
+
+ uta, err := ParseUntypedTextArray(string(src))
+ if err != nil {
+ return err
+ }
+
+ var elements []Float8
+
+ if len(uta.Elements) > 0 {
+ elements = make([]Float8, len(uta.Elements))
+
+ for i, s := range uta.Elements {
+ var elem Float8
+ var elemSrc []byte
+ if s != "NULL" {
+ elemSrc = []byte(s)
+ }
+ err = elem.DecodeText(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+
+ elements[i] = elem
+ }
+ }
+
+ *dst = Float8Array{Elements: elements, Dimensions: uta.Dimensions, Status: Present}
+
+ return nil
+}
+
+func (dst *Float8Array) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Float8Array{Status: Null}
+ return nil
+ }
+
+ var arrayHeader ArrayHeader
+ rp, err := arrayHeader.DecodeBinary(ci, src)
+ if err != nil {
+ return err
+ }
+
+ if len(arrayHeader.Dimensions) == 0 {
+ *dst = Float8Array{Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+ }
+
+ elementCount := arrayHeader.Dimensions[0].Length
+ for _, d := range arrayHeader.Dimensions[1:] {
+ elementCount *= d.Length
+ }
+
+ elements := make([]Float8, elementCount)
+
+ for i := range elements {
+ elemLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+ var elemSrc []byte
+ if elemLen >= 0 {
+ elemSrc = src[rp : rp+elemLen]
+ rp += elemLen
+ }
+ err = elements[i].DecodeBinary(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+ }
+
+ *dst = Float8Array{Elements: elements, Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+}
+
+func (src *Float8Array) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ if len(src.Dimensions) == 0 {
+ return append(buf, '{', '}'), nil
+ }
+
+ buf = EncodeTextArrayDimensions(buf, src.Dimensions)
+
+ // dimElemCounts is the multiples of elements that each array lies on. For
+ // example, a single dimension array of length 4 would have a dimElemCounts of
+ // [4]. A multi-dimensional array of lengths [3,5,2] would have a
+ // dimElemCounts of [30,10,2]. This is used to simplify when to render a '{'
+ // or '}'.
+ dimElemCounts := make([]int, len(src.Dimensions))
+ dimElemCounts[len(src.Dimensions)-1] = int(src.Dimensions[len(src.Dimensions)-1].Length)
+ for i := len(src.Dimensions) - 2; i > -1; i-- {
+ dimElemCounts[i] = int(src.Dimensions[i].Length) * dimElemCounts[i+1]
+ }
+
+ inElemBuf := make([]byte, 0, 32)
+ for i, elem := range src.Elements {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+
+ for _, dec := range dimElemCounts {
+ if i%dec == 0 {
+ buf = append(buf, '{')
+ }
+ }
+
+ elemBuf, err := elem.EncodeText(ci, inElemBuf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf == nil {
+ buf = append(buf, `NULL`...)
+ } else {
+ buf = append(buf, QuoteArrayElementIfNeeded(string(elemBuf))...)
+ }
+
+ for _, dec := range dimElemCounts {
+ if (i+1)%dec == 0 {
+ buf = append(buf, '}')
+ }
+ }
+ }
+
+ return buf, nil
+}
+
+func (src *Float8Array) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ arrayHeader := ArrayHeader{
+ Dimensions: src.Dimensions,
+ }
+
+ if dt, ok := ci.DataTypeForName("float8"); ok {
+ arrayHeader.ElementOID = int32(dt.OID)
+ } else {
+ return nil, errors.Errorf("unable to find oid for type name %v", "float8")
+ }
+
+ for i := range src.Elements {
+ if src.Elements[i].Status == Null {
+ arrayHeader.ContainsNull = true
+ break
+ }
+ }
+
+ buf = arrayHeader.EncodeBinary(ci, buf)
+
+ for i := range src.Elements {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ elemBuf, err := src.Elements[i].EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf != nil {
+ buf = elemBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Float8Array) Scan(src interface{}) error {
+ if src == nil {
+ return dst.DecodeText(nil, nil)
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Float8Array) Value() (driver.Value, error) {
+ buf, err := src.EncodeText(nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, nil
+ }
+
+ return string(buf), nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/generic_binary.go b/vendor/github.com/jackc/pgx/pgtype/generic_binary.go
new file mode 100644
index 0000000..2596eca
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/generic_binary.go
@@ -0,0 +1,39 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+)
+
+// GenericBinary is a placeholder for binary format values that no other type exists
+// to handle.
+type GenericBinary Bytea
+
+func (dst *GenericBinary) Set(src interface{}) error {
+ return (*Bytea)(dst).Set(src)
+}
+
+func (dst *GenericBinary) Get() interface{} {
+ return (*Bytea)(dst).Get()
+}
+
+func (src *GenericBinary) AssignTo(dst interface{}) error {
+ return (*Bytea)(src).AssignTo(dst)
+}
+
+func (dst *GenericBinary) DecodeBinary(ci *ConnInfo, src []byte) error {
+ return (*Bytea)(dst).DecodeBinary(ci, src)
+}
+
+func (src *GenericBinary) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ return (*Bytea)(src).EncodeBinary(ci, buf)
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *GenericBinary) Scan(src interface{}) error {
+ return (*Bytea)(dst).Scan(src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *GenericBinary) Value() (driver.Value, error) {
+ return (*Bytea)(src).Value()
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/generic_text.go b/vendor/github.com/jackc/pgx/pgtype/generic_text.go
new file mode 100644
index 0000000..0e3db9d
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/generic_text.go
@@ -0,0 +1,39 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+)
+
+// GenericText is a placeholder for text format values that no other type exists
+// to handle.
+type GenericText Text
+
+func (dst *GenericText) Set(src interface{}) error {
+ return (*Text)(dst).Set(src)
+}
+
+func (dst *GenericText) Get() interface{} {
+ return (*Text)(dst).Get()
+}
+
+func (src *GenericText) AssignTo(dst interface{}) error {
+ return (*Text)(src).AssignTo(dst)
+}
+
+func (dst *GenericText) DecodeText(ci *ConnInfo, src []byte) error {
+ return (*Text)(dst).DecodeText(ci, src)
+}
+
+func (src *GenericText) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ return (*Text)(src).EncodeText(ci, buf)
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *GenericText) Scan(src interface{}) error {
+ return (*Text)(dst).Scan(src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *GenericText) Value() (driver.Value, error) {
+ return (*Text)(src).Value()
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/hstore.go b/vendor/github.com/jackc/pgx/pgtype/hstore.go
new file mode 100644
index 0000000..347446a
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/hstore.go
@@ -0,0 +1,434 @@
+package pgtype
+
+import (
+ "bytes"
+ "database/sql/driver"
+ "encoding/binary"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/pkg/errors"
+
+ "github.com/jackc/pgx/pgio"
+)
+
+// Hstore represents an hstore column that can be null or have null values
+// associated with its keys.
+type Hstore struct {
+ Map map[string]Text
+ Status Status
+}
+
+func (dst *Hstore) Set(src interface{}) error {
+ if src == nil {
+ *dst = Hstore{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+ case map[string]string:
+ m := make(map[string]Text, len(value))
+ for k, v := range value {
+ m[k] = Text{String: v, Status: Present}
+ }
+ *dst = Hstore{Map: m, Status: Present}
+ default:
+ return errors.Errorf("cannot convert %v to Hstore", src)
+ }
+
+ return nil
+}
+
+func (dst *Hstore) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst.Map
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Hstore) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+ case *map[string]string:
+ *v = make(map[string]string, len(src.Map))
+ for k, val := range src.Map {
+ if val.Status != Present {
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+ }
+ (*v)[k] = val.String
+ }
+ return nil
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *Hstore) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Hstore{Status: Null}
+ return nil
+ }
+
+ keys, values, err := parseHstore(string(src))
+ if err != nil {
+ return err
+ }
+
+ m := make(map[string]Text, len(keys))
+ for i := range keys {
+ m[keys[i]] = values[i]
+ }
+
+ *dst = Hstore{Map: m, Status: Present}
+ return nil
+}
+
+func (dst *Hstore) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Hstore{Status: Null}
+ return nil
+ }
+
+ rp := 0
+
+ if len(src[rp:]) < 4 {
+ return errors.Errorf("hstore incomplete %v", src)
+ }
+ pairCount := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+
+ m := make(map[string]Text, pairCount)
+
+ for i := 0; i < pairCount; i++ {
+ if len(src[rp:]) < 4 {
+ return errors.Errorf("hstore incomplete %v", src)
+ }
+ keyLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+
+ if len(src[rp:]) < keyLen {
+ return errors.Errorf("hstore incomplete %v", src)
+ }
+ key := string(src[rp : rp+keyLen])
+ rp += keyLen
+
+ if len(src[rp:]) < 4 {
+ return errors.Errorf("hstore incomplete %v", src)
+ }
+ valueLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+
+ var valueBuf []byte
+ if valueLen >= 0 {
+ valueBuf = src[rp : rp+valueLen]
+ }
+ rp += valueLen
+
+ var value Text
+ err := value.DecodeBinary(ci, valueBuf)
+ if err != nil {
+ return err
+ }
+ m[key] = value
+ }
+
+ *dst = Hstore{Map: m, Status: Present}
+
+ return nil
+}
+
+func (src *Hstore) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ firstPair := true
+
+ for k, v := range src.Map {
+ if firstPair {
+ firstPair = false
+ } else {
+ buf = append(buf, ',')
+ }
+
+ buf = append(buf, quoteHstoreElementIfNeeded(k)...)
+ buf = append(buf, "=>"...)
+
+ elemBuf, err := v.EncodeText(ci, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ if elemBuf == nil {
+ buf = append(buf, "NULL"...)
+ } else {
+ buf = append(buf, quoteHstoreElementIfNeeded(string(elemBuf))...)
+ }
+ }
+
+ return buf, nil
+}
+
+func (src *Hstore) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ buf = pgio.AppendInt32(buf, int32(len(src.Map)))
+
+ var err error
+ for k, v := range src.Map {
+ buf = pgio.AppendInt32(buf, int32(len(k)))
+ buf = append(buf, k...)
+
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ elemBuf, err := v.EncodeText(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf != nil {
+ buf = elemBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+ }
+
+ return buf, err
+}
+
+var quoteHstoreReplacer = strings.NewReplacer(`\`, `\\`, `"`, `\"`)
+
+func quoteHstoreElement(src string) string {
+ return `"` + quoteArrayReplacer.Replace(src) + `"`
+}
+
+func quoteHstoreElementIfNeeded(src string) string {
+ if src == "" || (len(src) == 4 && strings.ToLower(src) == "null") || strings.ContainsAny(src, ` {},"\=>`) {
+ return quoteArrayElement(src)
+ }
+ return src
+}
+
+const (
+ hsPre = iota
+ hsKey
+ hsSep
+ hsVal
+ hsNul
+ hsNext
+)
+
+type hstoreParser struct {
+ str string
+ pos int
+}
+
+func newHSP(in string) *hstoreParser {
+ return &hstoreParser{
+ pos: 0,
+ str: in,
+ }
+}
+
+func (p *hstoreParser) Consume() (r rune, end bool) {
+ if p.pos >= len(p.str) {
+ end = true
+ return
+ }
+ r, w := utf8.DecodeRuneInString(p.str[p.pos:])
+ p.pos += w
+ return
+}
+
+func (p *hstoreParser) Peek() (r rune, end bool) {
+ if p.pos >= len(p.str) {
+ end = true
+ return
+ }
+ r, _ = utf8.DecodeRuneInString(p.str[p.pos:])
+ return
+}
+
+// parseHstore parses the string representation of an hstore column (the same
+// you would get from an ordinary SELECT) into two slices of keys and values. it
+// is used internally in the default parsing of hstores.
+func parseHstore(s string) (k []string, v []Text, err error) {
+ if s == "" {
+ return
+ }
+
+ buf := bytes.Buffer{}
+ keys := []string{}
+ values := []Text{}
+ p := newHSP(s)
+
+ r, end := p.Consume()
+ state := hsPre
+
+ for !end {
+ switch state {
+ case hsPre:
+ if r == '"' {
+ state = hsKey
+ } else {
+ err = errors.New("String does not begin with \"")
+ }
+ case hsKey:
+ switch r {
+ case '"': //End of the key
+ if buf.Len() == 0 {
+ err = errors.New("Empty Key is invalid")
+ } else {
+ keys = append(keys, buf.String())
+ buf = bytes.Buffer{}
+ state = hsSep
+ }
+ case '\\': //Potential escaped character
+ n, end := p.Consume()
+ switch {
+ case end:
+ err = errors.New("Found EOS in key, expecting character or \"")
+ case n == '"', n == '\\':
+ buf.WriteRune(n)
+ default:
+ buf.WriteRune(r)
+ buf.WriteRune(n)
+ }
+ default: //Any other character
+ buf.WriteRune(r)
+ }
+ case hsSep:
+ if r == '=' {
+ r, end = p.Consume()
+ switch {
+ case end:
+ err = errors.New("Found EOS after '=', expecting '>'")
+ case r == '>':
+ r, end = p.Consume()
+ switch {
+ case end:
+ err = errors.New("Found EOS after '=>', expecting '\"' or 'NULL'")
+ case r == '"':
+ state = hsVal
+ case r == 'N':
+ state = hsNul
+ default:
+ err = errors.Errorf("Invalid character '%c' after '=>', expecting '\"' or 'NULL'", r)
+ }
+ default:
+ err = errors.Errorf("Invalid character after '=', expecting '>'")
+ }
+ } else {
+ err = errors.Errorf("Invalid character '%c' after value, expecting '='", r)
+ }
+ case hsVal:
+ switch r {
+ case '"': //End of the value
+ values = append(values, Text{String: buf.String(), Status: Present})
+ buf = bytes.Buffer{}
+ state = hsNext
+ case '\\': //Potential escaped character
+ n, end := p.Consume()
+ switch {
+ case end:
+ err = errors.New("Found EOS in key, expecting character or \"")
+ case n == '"', n == '\\':
+ buf.WriteRune(n)
+ default:
+ buf.WriteRune(r)
+ buf.WriteRune(n)
+ }
+ default: //Any other character
+ buf.WriteRune(r)
+ }
+ case hsNul:
+ nulBuf := make([]rune, 3)
+ nulBuf[0] = r
+ for i := 1; i < 3; i++ {
+ r, end = p.Consume()
+ if end {
+ err = errors.New("Found EOS in NULL value")
+ return
+ }
+ nulBuf[i] = r
+ }
+ if nulBuf[0] == 'U' && nulBuf[1] == 'L' && nulBuf[2] == 'L' {
+ values = append(values, Text{Status: Null})
+ state = hsNext
+ } else {
+ err = errors.Errorf("Invalid NULL value: 'N%s'", string(nulBuf))
+ }
+ case hsNext:
+ if r == ',' {
+ r, end = p.Consume()
+ switch {
+ case end:
+ err = errors.New("Found EOS after ',', expcting space")
+ case (unicode.IsSpace(r)):
+ r, end = p.Consume()
+ state = hsKey
+ default:
+ err = errors.Errorf("Invalid character '%c' after ', ', expecting \"", r)
+ }
+ } else {
+ err = errors.Errorf("Invalid character '%c' after value, expecting ','", r)
+ }
+ }
+
+ if err != nil {
+ return
+ }
+ r, end = p.Consume()
+ }
+ if state != hsNext {
+ err = errors.New("Improperly formatted hstore")
+ return
+ }
+ k = keys
+ v = values
+ return
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Hstore) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Hstore{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Hstore) Value() (driver.Value, error) {
+ return EncodeValueText(src)
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/hstore_array.go b/vendor/github.com/jackc/pgx/pgtype/hstore_array.go
new file mode 100644
index 0000000..80530c2
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/hstore_array.go
@@ -0,0 +1,300 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type HstoreArray struct {
+ Elements []Hstore
+ Dimensions []ArrayDimension
+ Status Status
+}
+
+func (dst *HstoreArray) Set(src interface{}) error {
+ // untyped nil and typed nil interfaces are different
+ if src == nil {
+ *dst = HstoreArray{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+
+ case []map[string]string:
+ if value == nil {
+ *dst = HstoreArray{Status: Null}
+ } else if len(value) == 0 {
+ *dst = HstoreArray{Status: Present}
+ } else {
+ elements := make([]Hstore, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = HstoreArray{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ default:
+ if originalSrc, ok := underlyingSliceType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to HstoreArray", value)
+ }
+
+ return nil
+}
+
+func (dst *HstoreArray) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *HstoreArray) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+
+ case *[]map[string]string:
+ *v = make([]map[string]string, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *HstoreArray) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = HstoreArray{Status: Null}
+ return nil
+ }
+
+ uta, err := ParseUntypedTextArray(string(src))
+ if err != nil {
+ return err
+ }
+
+ var elements []Hstore
+
+ if len(uta.Elements) > 0 {
+ elements = make([]Hstore, len(uta.Elements))
+
+ for i, s := range uta.Elements {
+ var elem Hstore
+ var elemSrc []byte
+ if s != "NULL" {
+ elemSrc = []byte(s)
+ }
+ err = elem.DecodeText(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+
+ elements[i] = elem
+ }
+ }
+
+ *dst = HstoreArray{Elements: elements, Dimensions: uta.Dimensions, Status: Present}
+
+ return nil
+}
+
+func (dst *HstoreArray) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = HstoreArray{Status: Null}
+ return nil
+ }
+
+ var arrayHeader ArrayHeader
+ rp, err := arrayHeader.DecodeBinary(ci, src)
+ if err != nil {
+ return err
+ }
+
+ if len(arrayHeader.Dimensions) == 0 {
+ *dst = HstoreArray{Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+ }
+
+ elementCount := arrayHeader.Dimensions[0].Length
+ for _, d := range arrayHeader.Dimensions[1:] {
+ elementCount *= d.Length
+ }
+
+ elements := make([]Hstore, elementCount)
+
+ for i := range elements {
+ elemLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+ var elemSrc []byte
+ if elemLen >= 0 {
+ elemSrc = src[rp : rp+elemLen]
+ rp += elemLen
+ }
+ err = elements[i].DecodeBinary(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+ }
+
+ *dst = HstoreArray{Elements: elements, Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+}
+
+func (src *HstoreArray) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ if len(src.Dimensions) == 0 {
+ return append(buf, '{', '}'), nil
+ }
+
+ buf = EncodeTextArrayDimensions(buf, src.Dimensions)
+
+ // dimElemCounts is the multiples of elements that each array lies on. For
+ // example, a single dimension array of length 4 would have a dimElemCounts of
+ // [4]. A multi-dimensional array of lengths [3,5,2] would have a
+ // dimElemCounts of [30,10,2]. This is used to simplify when to render a '{'
+ // or '}'.
+ dimElemCounts := make([]int, len(src.Dimensions))
+ dimElemCounts[len(src.Dimensions)-1] = int(src.Dimensions[len(src.Dimensions)-1].Length)
+ for i := len(src.Dimensions) - 2; i > -1; i-- {
+ dimElemCounts[i] = int(src.Dimensions[i].Length) * dimElemCounts[i+1]
+ }
+
+ inElemBuf := make([]byte, 0, 32)
+ for i, elem := range src.Elements {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+
+ for _, dec := range dimElemCounts {
+ if i%dec == 0 {
+ buf = append(buf, '{')
+ }
+ }
+
+ elemBuf, err := elem.EncodeText(ci, inElemBuf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf == nil {
+ buf = append(buf, `NULL`...)
+ } else {
+ buf = append(buf, QuoteArrayElementIfNeeded(string(elemBuf))...)
+ }
+
+ for _, dec := range dimElemCounts {
+ if (i+1)%dec == 0 {
+ buf = append(buf, '}')
+ }
+ }
+ }
+
+ return buf, nil
+}
+
+func (src *HstoreArray) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ arrayHeader := ArrayHeader{
+ Dimensions: src.Dimensions,
+ }
+
+ if dt, ok := ci.DataTypeForName("hstore"); ok {
+ arrayHeader.ElementOID = int32(dt.OID)
+ } else {
+ return nil, errors.Errorf("unable to find oid for type name %v", "hstore")
+ }
+
+ for i := range src.Elements {
+ if src.Elements[i].Status == Null {
+ arrayHeader.ContainsNull = true
+ break
+ }
+ }
+
+ buf = arrayHeader.EncodeBinary(ci, buf)
+
+ for i := range src.Elements {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ elemBuf, err := src.Elements[i].EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf != nil {
+ buf = elemBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *HstoreArray) Scan(src interface{}) error {
+ if src == nil {
+ return dst.DecodeText(nil, nil)
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *HstoreArray) Value() (driver.Value, error) {
+ buf, err := src.EncodeText(nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, nil
+ }
+
+ return string(buf), nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/inet.go b/vendor/github.com/jackc/pgx/pgtype/inet.go
new file mode 100644
index 0000000..01fc0e5
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/inet.go
@@ -0,0 +1,215 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "net"
+
+ "github.com/pkg/errors"
+)
+
+// Network address family is dependent on server socket.h value for AF_INET.
+// In practice, all platforms appear to have the same value. See
+// src/include/utils/inet.h for more information.
+const (
+ defaultAFInet = 2
+ defaultAFInet6 = 3
+)
+
+// Inet represents both inet and cidr PostgreSQL types.
+type Inet struct {
+ IPNet *net.IPNet
+ Status Status
+}
+
+func (dst *Inet) Set(src interface{}) error {
+ if src == nil {
+ *dst = Inet{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+ case net.IPNet:
+ *dst = Inet{IPNet: &value, Status: Present}
+ case *net.IPNet:
+ *dst = Inet{IPNet: value, Status: Present}
+ case net.IP:
+ bitCount := len(value) * 8
+ mask := net.CIDRMask(bitCount, bitCount)
+ *dst = Inet{IPNet: &net.IPNet{Mask: mask, IP: value}, Status: Present}
+ case string:
+ _, ipnet, err := net.ParseCIDR(value)
+ if err != nil {
+ return err
+ }
+ *dst = Inet{IPNet: ipnet, Status: Present}
+ default:
+ if originalSrc, ok := underlyingPtrType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to Inet", value)
+ }
+
+ return nil
+}
+
+func (dst *Inet) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst.IPNet
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Inet) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+ case *net.IPNet:
+ *v = net.IPNet{
+ IP: make(net.IP, len(src.IPNet.IP)),
+ Mask: make(net.IPMask, len(src.IPNet.Mask)),
+ }
+ copy(v.IP, src.IPNet.IP)
+ copy(v.Mask, src.IPNet.Mask)
+ return nil
+ case *net.IP:
+ if oneCount, bitCount := src.IPNet.Mask.Size(); oneCount != bitCount {
+ return errors.Errorf("cannot assign %v to %T", src, dst)
+ }
+ *v = make(net.IP, len(src.IPNet.IP))
+ copy(*v, src.IPNet.IP)
+ return nil
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *Inet) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Inet{Status: Null}
+ return nil
+ }
+
+ var ipnet *net.IPNet
+ var err error
+
+ if ip := net.ParseIP(string(src)); ip != nil {
+ ipv4 := ip.To4()
+ if ipv4 != nil {
+ ip = ipv4
+ }
+ bitCount := len(ip) * 8
+ mask := net.CIDRMask(bitCount, bitCount)
+ ipnet = &net.IPNet{Mask: mask, IP: ip}
+ } else {
+ _, ipnet, err = net.ParseCIDR(string(src))
+ if err != nil {
+ return err
+ }
+ }
+
+ *dst = Inet{IPNet: ipnet, Status: Present}
+ return nil
+}
+
+func (dst *Inet) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Inet{Status: Null}
+ return nil
+ }
+
+ if len(src) != 8 && len(src) != 20 {
+ return errors.Errorf("Received an invalid size for a inet: %d", len(src))
+ }
+
+ // ignore family
+ bits := src[1]
+ // ignore is_cidr
+ addressLength := src[3]
+
+ var ipnet net.IPNet
+ ipnet.IP = make(net.IP, int(addressLength))
+ copy(ipnet.IP, src[4:])
+ ipnet.Mask = net.CIDRMask(int(bits), int(addressLength)*8)
+
+ *dst = Inet{IPNet: &ipnet, Status: Present}
+
+ return nil
+}
+
+func (src *Inet) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ return append(buf, src.IPNet.String()...), nil
+}
+
+// EncodeBinary encodes src into w.
+func (src *Inet) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ var family byte
+ switch len(src.IPNet.IP) {
+ case net.IPv4len:
+ family = defaultAFInet
+ case net.IPv6len:
+ family = defaultAFInet6
+ default:
+ return nil, errors.Errorf("Unexpected IP length: %v", len(src.IPNet.IP))
+ }
+
+ buf = append(buf, family)
+
+ ones, _ := src.IPNet.Mask.Size()
+ buf = append(buf, byte(ones))
+
+ // is_cidr is ignored on server
+ buf = append(buf, 0)
+
+ buf = append(buf, byte(len(src.IPNet.IP)))
+
+ return append(buf, src.IPNet.IP...), nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Inet) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Inet{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Inet) Value() (driver.Value, error) {
+ return EncodeValueText(src)
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/inet_array.go b/vendor/github.com/jackc/pgx/pgtype/inet_array.go
new file mode 100644
index 0000000..f3e4efb
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/inet_array.go
@@ -0,0 +1,329 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "net"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type InetArray struct {
+ Elements []Inet
+ Dimensions []ArrayDimension
+ Status Status
+}
+
+func (dst *InetArray) Set(src interface{}) error {
+ // untyped nil and typed nil interfaces are different
+ if src == nil {
+ *dst = InetArray{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+
+ case []*net.IPNet:
+ if value == nil {
+ *dst = InetArray{Status: Null}
+ } else if len(value) == 0 {
+ *dst = InetArray{Status: Present}
+ } else {
+ elements := make([]Inet, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = InetArray{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ case []net.IP:
+ if value == nil {
+ *dst = InetArray{Status: Null}
+ } else if len(value) == 0 {
+ *dst = InetArray{Status: Present}
+ } else {
+ elements := make([]Inet, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = InetArray{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ default:
+ if originalSrc, ok := underlyingSliceType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to InetArray", value)
+ }
+
+ return nil
+}
+
+func (dst *InetArray) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *InetArray) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+
+ case *[]*net.IPNet:
+ *v = make([]*net.IPNet, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ case *[]net.IP:
+ *v = make([]net.IP, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *InetArray) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = InetArray{Status: Null}
+ return nil
+ }
+
+ uta, err := ParseUntypedTextArray(string(src))
+ if err != nil {
+ return err
+ }
+
+ var elements []Inet
+
+ if len(uta.Elements) > 0 {
+ elements = make([]Inet, len(uta.Elements))
+
+ for i, s := range uta.Elements {
+ var elem Inet
+ var elemSrc []byte
+ if s != "NULL" {
+ elemSrc = []byte(s)
+ }
+ err = elem.DecodeText(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+
+ elements[i] = elem
+ }
+ }
+
+ *dst = InetArray{Elements: elements, Dimensions: uta.Dimensions, Status: Present}
+
+ return nil
+}
+
+func (dst *InetArray) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = InetArray{Status: Null}
+ return nil
+ }
+
+ var arrayHeader ArrayHeader
+ rp, err := arrayHeader.DecodeBinary(ci, src)
+ if err != nil {
+ return err
+ }
+
+ if len(arrayHeader.Dimensions) == 0 {
+ *dst = InetArray{Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+ }
+
+ elementCount := arrayHeader.Dimensions[0].Length
+ for _, d := range arrayHeader.Dimensions[1:] {
+ elementCount *= d.Length
+ }
+
+ elements := make([]Inet, elementCount)
+
+ for i := range elements {
+ elemLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+ var elemSrc []byte
+ if elemLen >= 0 {
+ elemSrc = src[rp : rp+elemLen]
+ rp += elemLen
+ }
+ err = elements[i].DecodeBinary(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+ }
+
+ *dst = InetArray{Elements: elements, Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+}
+
+func (src *InetArray) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ if len(src.Dimensions) == 0 {
+ return append(buf, '{', '}'), nil
+ }
+
+ buf = EncodeTextArrayDimensions(buf, src.Dimensions)
+
+ // dimElemCounts is the multiples of elements that each array lies on. For
+ // example, a single dimension array of length 4 would have a dimElemCounts of
+ // [4]. A multi-dimensional array of lengths [3,5,2] would have a
+ // dimElemCounts of [30,10,2]. This is used to simplify when to render a '{'
+ // or '}'.
+ dimElemCounts := make([]int, len(src.Dimensions))
+ dimElemCounts[len(src.Dimensions)-1] = int(src.Dimensions[len(src.Dimensions)-1].Length)
+ for i := len(src.Dimensions) - 2; i > -1; i-- {
+ dimElemCounts[i] = int(src.Dimensions[i].Length) * dimElemCounts[i+1]
+ }
+
+ inElemBuf := make([]byte, 0, 32)
+ for i, elem := range src.Elements {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+
+ for _, dec := range dimElemCounts {
+ if i%dec == 0 {
+ buf = append(buf, '{')
+ }
+ }
+
+ elemBuf, err := elem.EncodeText(ci, inElemBuf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf == nil {
+ buf = append(buf, `NULL`...)
+ } else {
+ buf = append(buf, QuoteArrayElementIfNeeded(string(elemBuf))...)
+ }
+
+ for _, dec := range dimElemCounts {
+ if (i+1)%dec == 0 {
+ buf = append(buf, '}')
+ }
+ }
+ }
+
+ return buf, nil
+}
+
+func (src *InetArray) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ arrayHeader := ArrayHeader{
+ Dimensions: src.Dimensions,
+ }
+
+ if dt, ok := ci.DataTypeForName("inet"); ok {
+ arrayHeader.ElementOID = int32(dt.OID)
+ } else {
+ return nil, errors.Errorf("unable to find oid for type name %v", "inet")
+ }
+
+ for i := range src.Elements {
+ if src.Elements[i].Status == Null {
+ arrayHeader.ContainsNull = true
+ break
+ }
+ }
+
+ buf = arrayHeader.EncodeBinary(ci, buf)
+
+ for i := range src.Elements {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ elemBuf, err := src.Elements[i].EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf != nil {
+ buf = elemBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *InetArray) Scan(src interface{}) error {
+ if src == nil {
+ return dst.DecodeText(nil, nil)
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *InetArray) Value() (driver.Value, error) {
+ buf, err := src.EncodeText(nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, nil
+ }
+
+ return string(buf), nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/int2.go b/vendor/github.com/jackc/pgx/pgtype/int2.go
new file mode 100644
index 0000000..6156ea7
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/int2.go
@@ -0,0 +1,209 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "math"
+ "strconv"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type Int2 struct {
+ Int int16
+ Status Status
+}
+
+func (dst *Int2) Set(src interface{}) error {
+ if src == nil {
+ *dst = Int2{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+ case int8:
+ *dst = Int2{Int: int16(value), Status: Present}
+ case uint8:
+ *dst = Int2{Int: int16(value), Status: Present}
+ case int16:
+ *dst = Int2{Int: int16(value), Status: Present}
+ case uint16:
+ if value > math.MaxInt16 {
+ return errors.Errorf("%d is greater than maximum value for Int2", value)
+ }
+ *dst = Int2{Int: int16(value), Status: Present}
+ case int32:
+ if value < math.MinInt16 {
+ return errors.Errorf("%d is greater than maximum value for Int2", value)
+ }
+ if value > math.MaxInt16 {
+ return errors.Errorf("%d is greater than maximum value for Int2", value)
+ }
+ *dst = Int2{Int: int16(value), Status: Present}
+ case uint32:
+ if value > math.MaxInt16 {
+ return errors.Errorf("%d is greater than maximum value for Int2", value)
+ }
+ *dst = Int2{Int: int16(value), Status: Present}
+ case int64:
+ if value < math.MinInt16 {
+ return errors.Errorf("%d is greater than maximum value for Int2", value)
+ }
+ if value > math.MaxInt16 {
+ return errors.Errorf("%d is greater than maximum value for Int2", value)
+ }
+ *dst = Int2{Int: int16(value), Status: Present}
+ case uint64:
+ if value > math.MaxInt16 {
+ return errors.Errorf("%d is greater than maximum value for Int2", value)
+ }
+ *dst = Int2{Int: int16(value), Status: Present}
+ case int:
+ if value < math.MinInt16 {
+ return errors.Errorf("%d is greater than maximum value for Int2", value)
+ }
+ if value > math.MaxInt16 {
+ return errors.Errorf("%d is greater than maximum value for Int2", value)
+ }
+ *dst = Int2{Int: int16(value), Status: Present}
+ case uint:
+ if value > math.MaxInt16 {
+ return errors.Errorf("%d is greater than maximum value for Int2", value)
+ }
+ *dst = Int2{Int: int16(value), Status: Present}
+ case string:
+ num, err := strconv.ParseInt(value, 10, 16)
+ if err != nil {
+ return err
+ }
+ *dst = Int2{Int: int16(num), Status: Present}
+ default:
+ if originalSrc, ok := underlyingNumberType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to Int2", value)
+ }
+
+ return nil
+}
+
+func (dst *Int2) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst.Int
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Int2) AssignTo(dst interface{}) error {
+ return int64AssignTo(int64(src.Int), src.Status, dst)
+}
+
+func (dst *Int2) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Int2{Status: Null}
+ return nil
+ }
+
+ n, err := strconv.ParseInt(string(src), 10, 16)
+ if err != nil {
+ return err
+ }
+
+ *dst = Int2{Int: int16(n), Status: Present}
+ return nil
+}
+
+func (dst *Int2) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Int2{Status: Null}
+ return nil
+ }
+
+ if len(src) != 2 {
+ return errors.Errorf("invalid length for int2: %v", len(src))
+ }
+
+ n := int16(binary.BigEndian.Uint16(src))
+ *dst = Int2{Int: n, Status: Present}
+ return nil
+}
+
+func (src *Int2) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ return append(buf, strconv.FormatInt(int64(src.Int), 10)...), nil
+}
+
+func (src *Int2) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ return pgio.AppendInt16(buf, src.Int), nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Int2) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Int2{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case int64:
+ if src < math.MinInt16 {
+ return errors.Errorf("%d is greater than maximum value for Int2", src)
+ }
+ if src > math.MaxInt16 {
+ return errors.Errorf("%d is greater than maximum value for Int2", src)
+ }
+ *dst = Int2{Int: int16(src), Status: Present}
+ return nil
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Int2) Value() (driver.Value, error) {
+ switch src.Status {
+ case Present:
+ return int64(src.Int), nil
+ case Null:
+ return nil, nil
+ default:
+ return nil, errUndefined
+ }
+}
+
+func (src *Int2) MarshalJSON() ([]byte, error) {
+ switch src.Status {
+ case Present:
+ return []byte(strconv.FormatInt(int64(src.Int), 10)), nil
+ case Null:
+ return []byte("null"), nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ return nil, errBadStatus
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/int2_array.go b/vendor/github.com/jackc/pgx/pgtype/int2_array.go
new file mode 100644
index 0000000..f50d927
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/int2_array.go
@@ -0,0 +1,328 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type Int2Array struct {
+ Elements []Int2
+ Dimensions []ArrayDimension
+ Status Status
+}
+
+func (dst *Int2Array) Set(src interface{}) error {
+ // untyped nil and typed nil interfaces are different
+ if src == nil {
+ *dst = Int2Array{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+
+ case []int16:
+ if value == nil {
+ *dst = Int2Array{Status: Null}
+ } else if len(value) == 0 {
+ *dst = Int2Array{Status: Present}
+ } else {
+ elements := make([]Int2, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = Int2Array{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ case []uint16:
+ if value == nil {
+ *dst = Int2Array{Status: Null}
+ } else if len(value) == 0 {
+ *dst = Int2Array{Status: Present}
+ } else {
+ elements := make([]Int2, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = Int2Array{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ default:
+ if originalSrc, ok := underlyingSliceType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to Int2Array", value)
+ }
+
+ return nil
+}
+
+func (dst *Int2Array) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Int2Array) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+
+ case *[]int16:
+ *v = make([]int16, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ case *[]uint16:
+ *v = make([]uint16, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *Int2Array) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Int2Array{Status: Null}
+ return nil
+ }
+
+ uta, err := ParseUntypedTextArray(string(src))
+ if err != nil {
+ return err
+ }
+
+ var elements []Int2
+
+ if len(uta.Elements) > 0 {
+ elements = make([]Int2, len(uta.Elements))
+
+ for i, s := range uta.Elements {
+ var elem Int2
+ var elemSrc []byte
+ if s != "NULL" {
+ elemSrc = []byte(s)
+ }
+ err = elem.DecodeText(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+
+ elements[i] = elem
+ }
+ }
+
+ *dst = Int2Array{Elements: elements, Dimensions: uta.Dimensions, Status: Present}
+
+ return nil
+}
+
+func (dst *Int2Array) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Int2Array{Status: Null}
+ return nil
+ }
+
+ var arrayHeader ArrayHeader
+ rp, err := arrayHeader.DecodeBinary(ci, src)
+ if err != nil {
+ return err
+ }
+
+ if len(arrayHeader.Dimensions) == 0 {
+ *dst = Int2Array{Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+ }
+
+ elementCount := arrayHeader.Dimensions[0].Length
+ for _, d := range arrayHeader.Dimensions[1:] {
+ elementCount *= d.Length
+ }
+
+ elements := make([]Int2, elementCount)
+
+ for i := range elements {
+ elemLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+ var elemSrc []byte
+ if elemLen >= 0 {
+ elemSrc = src[rp : rp+elemLen]
+ rp += elemLen
+ }
+ err = elements[i].DecodeBinary(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+ }
+
+ *dst = Int2Array{Elements: elements, Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+}
+
+func (src *Int2Array) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ if len(src.Dimensions) == 0 {
+ return append(buf, '{', '}'), nil
+ }
+
+ buf = EncodeTextArrayDimensions(buf, src.Dimensions)
+
+ // dimElemCounts is the multiples of elements that each array lies on. For
+ // example, a single dimension array of length 4 would have a dimElemCounts of
+ // [4]. A multi-dimensional array of lengths [3,5,2] would have a
+ // dimElemCounts of [30,10,2]. This is used to simplify when to render a '{'
+ // or '}'.
+ dimElemCounts := make([]int, len(src.Dimensions))
+ dimElemCounts[len(src.Dimensions)-1] = int(src.Dimensions[len(src.Dimensions)-1].Length)
+ for i := len(src.Dimensions) - 2; i > -1; i-- {
+ dimElemCounts[i] = int(src.Dimensions[i].Length) * dimElemCounts[i+1]
+ }
+
+ inElemBuf := make([]byte, 0, 32)
+ for i, elem := range src.Elements {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+
+ for _, dec := range dimElemCounts {
+ if i%dec == 0 {
+ buf = append(buf, '{')
+ }
+ }
+
+ elemBuf, err := elem.EncodeText(ci, inElemBuf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf == nil {
+ buf = append(buf, `NULL`...)
+ } else {
+ buf = append(buf, QuoteArrayElementIfNeeded(string(elemBuf))...)
+ }
+
+ for _, dec := range dimElemCounts {
+ if (i+1)%dec == 0 {
+ buf = append(buf, '}')
+ }
+ }
+ }
+
+ return buf, nil
+}
+
+func (src *Int2Array) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ arrayHeader := ArrayHeader{
+ Dimensions: src.Dimensions,
+ }
+
+ if dt, ok := ci.DataTypeForName("int2"); ok {
+ arrayHeader.ElementOID = int32(dt.OID)
+ } else {
+ return nil, errors.Errorf("unable to find oid for type name %v", "int2")
+ }
+
+ for i := range src.Elements {
+ if src.Elements[i].Status == Null {
+ arrayHeader.ContainsNull = true
+ break
+ }
+ }
+
+ buf = arrayHeader.EncodeBinary(ci, buf)
+
+ for i := range src.Elements {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ elemBuf, err := src.Elements[i].EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf != nil {
+ buf = elemBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Int2Array) Scan(src interface{}) error {
+ if src == nil {
+ return dst.DecodeText(nil, nil)
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Int2Array) Value() (driver.Value, error) {
+ buf, err := src.EncodeText(nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, nil
+ }
+
+ return string(buf), nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/int4.go b/vendor/github.com/jackc/pgx/pgtype/int4.go
new file mode 100644
index 0000000..261c511
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/int4.go
@@ -0,0 +1,213 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "encoding/json"
+ "math"
+ "strconv"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type Int4 struct {
+ Int int32
+ Status Status
+}
+
+func (dst *Int4) Set(src interface{}) error {
+ if src == nil {
+ *dst = Int4{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+ case int8:
+ *dst = Int4{Int: int32(value), Status: Present}
+ case uint8:
+ *dst = Int4{Int: int32(value), Status: Present}
+ case int16:
+ *dst = Int4{Int: int32(value), Status: Present}
+ case uint16:
+ *dst = Int4{Int: int32(value), Status: Present}
+ case int32:
+ *dst = Int4{Int: int32(value), Status: Present}
+ case uint32:
+ if value > math.MaxInt32 {
+ return errors.Errorf("%d is greater than maximum value for Int4", value)
+ }
+ *dst = Int4{Int: int32(value), Status: Present}
+ case int64:
+ if value < math.MinInt32 {
+ return errors.Errorf("%d is greater than maximum value for Int4", value)
+ }
+ if value > math.MaxInt32 {
+ return errors.Errorf("%d is greater than maximum value for Int4", value)
+ }
+ *dst = Int4{Int: int32(value), Status: Present}
+ case uint64:
+ if value > math.MaxInt32 {
+ return errors.Errorf("%d is greater than maximum value for Int4", value)
+ }
+ *dst = Int4{Int: int32(value), Status: Present}
+ case int:
+ if value < math.MinInt32 {
+ return errors.Errorf("%d is greater than maximum value for Int4", value)
+ }
+ if value > math.MaxInt32 {
+ return errors.Errorf("%d is greater than maximum value for Int4", value)
+ }
+ *dst = Int4{Int: int32(value), Status: Present}
+ case uint:
+ if value > math.MaxInt32 {
+ return errors.Errorf("%d is greater than maximum value for Int4", value)
+ }
+ *dst = Int4{Int: int32(value), Status: Present}
+ case string:
+ num, err := strconv.ParseInt(value, 10, 32)
+ if err != nil {
+ return err
+ }
+ *dst = Int4{Int: int32(num), Status: Present}
+ default:
+ if originalSrc, ok := underlyingNumberType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to Int4", value)
+ }
+
+ return nil
+}
+
+func (dst *Int4) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst.Int
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Int4) AssignTo(dst interface{}) error {
+ return int64AssignTo(int64(src.Int), src.Status, dst)
+}
+
+func (dst *Int4) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Int4{Status: Null}
+ return nil
+ }
+
+ n, err := strconv.ParseInt(string(src), 10, 32)
+ if err != nil {
+ return err
+ }
+
+ *dst = Int4{Int: int32(n), Status: Present}
+ return nil
+}
+
+func (dst *Int4) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Int4{Status: Null}
+ return nil
+ }
+
+ if len(src) != 4 {
+ return errors.Errorf("invalid length for int4: %v", len(src))
+ }
+
+ n := int32(binary.BigEndian.Uint32(src))
+ *dst = Int4{Int: n, Status: Present}
+ return nil
+}
+
+func (src *Int4) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ return append(buf, strconv.FormatInt(int64(src.Int), 10)...), nil
+}
+
+func (src *Int4) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ return pgio.AppendInt32(buf, src.Int), nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Int4) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Int4{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case int64:
+ if src < math.MinInt32 {
+ return errors.Errorf("%d is greater than maximum value for Int4", src)
+ }
+ if src > math.MaxInt32 {
+ return errors.Errorf("%d is greater than maximum value for Int4", src)
+ }
+ *dst = Int4{Int: int32(src), Status: Present}
+ return nil
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Int4) Value() (driver.Value, error) {
+ switch src.Status {
+ case Present:
+ return int64(src.Int), nil
+ case Null:
+ return nil, nil
+ default:
+ return nil, errUndefined
+ }
+}
+
+func (src *Int4) MarshalJSON() ([]byte, error) {
+ switch src.Status {
+ case Present:
+ return []byte(strconv.FormatInt(int64(src.Int), 10)), nil
+ case Null:
+ return []byte("null"), nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ return nil, errBadStatus
+}
+
+func (dst *Int4) UnmarshalJSON(b []byte) error {
+ var n int32
+ err := json.Unmarshal(b, &n)
+ if err != nil {
+ return err
+ }
+
+ *dst = Int4{Int: n, Status: Present}
+
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/int4_array.go b/vendor/github.com/jackc/pgx/pgtype/int4_array.go
new file mode 100644
index 0000000..6c9418b
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/int4_array.go
@@ -0,0 +1,328 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type Int4Array struct {
+ Elements []Int4
+ Dimensions []ArrayDimension
+ Status Status
+}
+
+func (dst *Int4Array) Set(src interface{}) error {
+ // untyped nil and typed nil interfaces are different
+ if src == nil {
+ *dst = Int4Array{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+
+ case []int32:
+ if value == nil {
+ *dst = Int4Array{Status: Null}
+ } else if len(value) == 0 {
+ *dst = Int4Array{Status: Present}
+ } else {
+ elements := make([]Int4, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = Int4Array{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ case []uint32:
+ if value == nil {
+ *dst = Int4Array{Status: Null}
+ } else if len(value) == 0 {
+ *dst = Int4Array{Status: Present}
+ } else {
+ elements := make([]Int4, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = Int4Array{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ default:
+ if originalSrc, ok := underlyingSliceType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to Int4Array", value)
+ }
+
+ return nil
+}
+
+func (dst *Int4Array) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Int4Array) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+
+ case *[]int32:
+ *v = make([]int32, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ case *[]uint32:
+ *v = make([]uint32, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *Int4Array) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Int4Array{Status: Null}
+ return nil
+ }
+
+ uta, err := ParseUntypedTextArray(string(src))
+ if err != nil {
+ return err
+ }
+
+ var elements []Int4
+
+ if len(uta.Elements) > 0 {
+ elements = make([]Int4, len(uta.Elements))
+
+ for i, s := range uta.Elements {
+ var elem Int4
+ var elemSrc []byte
+ if s != "NULL" {
+ elemSrc = []byte(s)
+ }
+ err = elem.DecodeText(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+
+ elements[i] = elem
+ }
+ }
+
+ *dst = Int4Array{Elements: elements, Dimensions: uta.Dimensions, Status: Present}
+
+ return nil
+}
+
+func (dst *Int4Array) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Int4Array{Status: Null}
+ return nil
+ }
+
+ var arrayHeader ArrayHeader
+ rp, err := arrayHeader.DecodeBinary(ci, src)
+ if err != nil {
+ return err
+ }
+
+ if len(arrayHeader.Dimensions) == 0 {
+ *dst = Int4Array{Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+ }
+
+ elementCount := arrayHeader.Dimensions[0].Length
+ for _, d := range arrayHeader.Dimensions[1:] {
+ elementCount *= d.Length
+ }
+
+ elements := make([]Int4, elementCount)
+
+ for i := range elements {
+ elemLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+ var elemSrc []byte
+ if elemLen >= 0 {
+ elemSrc = src[rp : rp+elemLen]
+ rp += elemLen
+ }
+ err = elements[i].DecodeBinary(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+ }
+
+ *dst = Int4Array{Elements: elements, Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+}
+
+func (src *Int4Array) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ if len(src.Dimensions) == 0 {
+ return append(buf, '{', '}'), nil
+ }
+
+ buf = EncodeTextArrayDimensions(buf, src.Dimensions)
+
+ // dimElemCounts is the multiples of elements that each array lies on. For
+ // example, a single dimension array of length 4 would have a dimElemCounts of
+ // [4]. A multi-dimensional array of lengths [3,5,2] would have a
+ // dimElemCounts of [30,10,2]. This is used to simplify when to render a '{'
+ // or '}'.
+ dimElemCounts := make([]int, len(src.Dimensions))
+ dimElemCounts[len(src.Dimensions)-1] = int(src.Dimensions[len(src.Dimensions)-1].Length)
+ for i := len(src.Dimensions) - 2; i > -1; i-- {
+ dimElemCounts[i] = int(src.Dimensions[i].Length) * dimElemCounts[i+1]
+ }
+
+ inElemBuf := make([]byte, 0, 32)
+ for i, elem := range src.Elements {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+
+ for _, dec := range dimElemCounts {
+ if i%dec == 0 {
+ buf = append(buf, '{')
+ }
+ }
+
+ elemBuf, err := elem.EncodeText(ci, inElemBuf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf == nil {
+ buf = append(buf, `NULL`...)
+ } else {
+ buf = append(buf, QuoteArrayElementIfNeeded(string(elemBuf))...)
+ }
+
+ for _, dec := range dimElemCounts {
+ if (i+1)%dec == 0 {
+ buf = append(buf, '}')
+ }
+ }
+ }
+
+ return buf, nil
+}
+
+func (src *Int4Array) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ arrayHeader := ArrayHeader{
+ Dimensions: src.Dimensions,
+ }
+
+ if dt, ok := ci.DataTypeForName("int4"); ok {
+ arrayHeader.ElementOID = int32(dt.OID)
+ } else {
+ return nil, errors.Errorf("unable to find oid for type name %v", "int4")
+ }
+
+ for i := range src.Elements {
+ if src.Elements[i].Status == Null {
+ arrayHeader.ContainsNull = true
+ break
+ }
+ }
+
+ buf = arrayHeader.EncodeBinary(ci, buf)
+
+ for i := range src.Elements {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ elemBuf, err := src.Elements[i].EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf != nil {
+ buf = elemBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Int4Array) Scan(src interface{}) error {
+ if src == nil {
+ return dst.DecodeText(nil, nil)
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Int4Array) Value() (driver.Value, error) {
+ buf, err := src.EncodeText(nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, nil
+ }
+
+ return string(buf), nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/int4range.go b/vendor/github.com/jackc/pgx/pgtype/int4range.go
new file mode 100644
index 0000000..95ad152
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/int4range.go
@@ -0,0 +1,250 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type Int4range struct {
+ Lower Int4
+ Upper Int4
+ LowerType BoundType
+ UpperType BoundType
+ Status Status
+}
+
+func (dst *Int4range) Set(src interface{}) error {
+ return errors.Errorf("cannot convert %v to Int4range", src)
+}
+
+func (dst *Int4range) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Int4range) AssignTo(dst interface{}) error {
+ return errors.Errorf("cannot assign %v to %T", src, dst)
+}
+
+func (dst *Int4range) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Int4range{Status: Null}
+ return nil
+ }
+
+ utr, err := ParseUntypedTextRange(string(src))
+ if err != nil {
+ return err
+ }
+
+ *dst = Int4range{Status: Present}
+
+ dst.LowerType = utr.LowerType
+ dst.UpperType = utr.UpperType
+
+ if dst.LowerType == Empty {
+ return nil
+ }
+
+ if dst.LowerType == Inclusive || dst.LowerType == Exclusive {
+ if err := dst.Lower.DecodeText(ci, []byte(utr.Lower)); err != nil {
+ return err
+ }
+ }
+
+ if dst.UpperType == Inclusive || dst.UpperType == Exclusive {
+ if err := dst.Upper.DecodeText(ci, []byte(utr.Upper)); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (dst *Int4range) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Int4range{Status: Null}
+ return nil
+ }
+
+ ubr, err := ParseUntypedBinaryRange(src)
+ if err != nil {
+ return err
+ }
+
+ *dst = Int4range{Status: Present}
+
+ dst.LowerType = ubr.LowerType
+ dst.UpperType = ubr.UpperType
+
+ if dst.LowerType == Empty {
+ return nil
+ }
+
+ if dst.LowerType == Inclusive || dst.LowerType == Exclusive {
+ if err := dst.Lower.DecodeBinary(ci, ubr.Lower); err != nil {
+ return err
+ }
+ }
+
+ if dst.UpperType == Inclusive || dst.UpperType == Exclusive {
+ if err := dst.Upper.DecodeBinary(ci, ubr.Upper); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (src Int4range) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ switch src.LowerType {
+ case Exclusive, Unbounded:
+ buf = append(buf, '(')
+ case Inclusive:
+ buf = append(buf, '[')
+ case Empty:
+ return append(buf, "empty"...), nil
+ default:
+ return nil, errors.Errorf("unknown lower bound type %v", src.LowerType)
+ }
+
+ var err error
+
+ if src.LowerType != Unbounded {
+ buf, err = src.Lower.EncodeText(ci, buf)
+ if err != nil {
+ return nil, err
+ } else if buf == nil {
+ return nil, errors.Errorf("Lower cannot be null unless LowerType is Unbounded")
+ }
+ }
+
+ buf = append(buf, ',')
+
+ if src.UpperType != Unbounded {
+ buf, err = src.Upper.EncodeText(ci, buf)
+ if err != nil {
+ return nil, err
+ } else if buf == nil {
+ return nil, errors.Errorf("Upper cannot be null unless UpperType is Unbounded")
+ }
+ }
+
+ switch src.UpperType {
+ case Exclusive, Unbounded:
+ buf = append(buf, ')')
+ case Inclusive:
+ buf = append(buf, ']')
+ default:
+ return nil, errors.Errorf("unknown upper bound type %v", src.UpperType)
+ }
+
+ return buf, nil
+}
+
+func (src Int4range) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ var rangeType byte
+ switch src.LowerType {
+ case Inclusive:
+ rangeType |= lowerInclusiveMask
+ case Unbounded:
+ rangeType |= lowerUnboundedMask
+ case Exclusive:
+ case Empty:
+ return append(buf, emptyMask), nil
+ default:
+ return nil, errors.Errorf("unknown LowerType: %v", src.LowerType)
+ }
+
+ switch src.UpperType {
+ case Inclusive:
+ rangeType |= upperInclusiveMask
+ case Unbounded:
+ rangeType |= upperUnboundedMask
+ case Exclusive:
+ default:
+ return nil, errors.Errorf("unknown UpperType: %v", src.UpperType)
+ }
+
+ buf = append(buf, rangeType)
+
+ var err error
+
+ if src.LowerType != Unbounded {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ buf, err = src.Lower.EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, errors.Errorf("Lower cannot be null unless LowerType is Unbounded")
+ }
+
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+
+ if src.UpperType != Unbounded {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ buf, err = src.Upper.EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, errors.Errorf("Upper cannot be null unless UpperType is Unbounded")
+ }
+
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Int4range) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Int4range{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src Int4range) Value() (driver.Value, error) {
+ return EncodeValueText(src)
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/int8.go b/vendor/github.com/jackc/pgx/pgtype/int8.go
new file mode 100644
index 0000000..00a8cd0
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/int8.go
@@ -0,0 +1,199 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "encoding/json"
+ "math"
+ "strconv"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type Int8 struct {
+ Int int64
+ Status Status
+}
+
+func (dst *Int8) Set(src interface{}) error {
+ if src == nil {
+ *dst = Int8{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+ case int8:
+ *dst = Int8{Int: int64(value), Status: Present}
+ case uint8:
+ *dst = Int8{Int: int64(value), Status: Present}
+ case int16:
+ *dst = Int8{Int: int64(value), Status: Present}
+ case uint16:
+ *dst = Int8{Int: int64(value), Status: Present}
+ case int32:
+ *dst = Int8{Int: int64(value), Status: Present}
+ case uint32:
+ *dst = Int8{Int: int64(value), Status: Present}
+ case int64:
+ *dst = Int8{Int: int64(value), Status: Present}
+ case uint64:
+ if value > math.MaxInt64 {
+ return errors.Errorf("%d is greater than maximum value for Int8", value)
+ }
+ *dst = Int8{Int: int64(value), Status: Present}
+ case int:
+ if int64(value) < math.MinInt64 {
+ return errors.Errorf("%d is greater than maximum value for Int8", value)
+ }
+ if int64(value) > math.MaxInt64 {
+ return errors.Errorf("%d is greater than maximum value for Int8", value)
+ }
+ *dst = Int8{Int: int64(value), Status: Present}
+ case uint:
+ if uint64(value) > math.MaxInt64 {
+ return errors.Errorf("%d is greater than maximum value for Int8", value)
+ }
+ *dst = Int8{Int: int64(value), Status: Present}
+ case string:
+ num, err := strconv.ParseInt(value, 10, 64)
+ if err != nil {
+ return err
+ }
+ *dst = Int8{Int: num, Status: Present}
+ default:
+ if originalSrc, ok := underlyingNumberType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to Int8", value)
+ }
+
+ return nil
+}
+
+func (dst *Int8) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst.Int
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Int8) AssignTo(dst interface{}) error {
+ return int64AssignTo(int64(src.Int), src.Status, dst)
+}
+
+func (dst *Int8) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Int8{Status: Null}
+ return nil
+ }
+
+ n, err := strconv.ParseInt(string(src), 10, 64)
+ if err != nil {
+ return err
+ }
+
+ *dst = Int8{Int: n, Status: Present}
+ return nil
+}
+
+func (dst *Int8) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Int8{Status: Null}
+ return nil
+ }
+
+ if len(src) != 8 {
+ return errors.Errorf("invalid length for int8: %v", len(src))
+ }
+
+ n := int64(binary.BigEndian.Uint64(src))
+
+ *dst = Int8{Int: n, Status: Present}
+ return nil
+}
+
+func (src *Int8) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ return append(buf, strconv.FormatInt(src.Int, 10)...), nil
+}
+
+func (src *Int8) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ return pgio.AppendInt64(buf, src.Int), nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Int8) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Int8{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case int64:
+ *dst = Int8{Int: src, Status: Present}
+ return nil
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Int8) Value() (driver.Value, error) {
+ switch src.Status {
+ case Present:
+ return int64(src.Int), nil
+ case Null:
+ return nil, nil
+ default:
+ return nil, errUndefined
+ }
+}
+
+func (src *Int8) MarshalJSON() ([]byte, error) {
+ switch src.Status {
+ case Present:
+ return []byte(strconv.FormatInt(src.Int, 10)), nil
+ case Null:
+ return []byte("null"), nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ return nil, errBadStatus
+}
+
+func (dst *Int8) UnmarshalJSON(b []byte) error {
+ var n int64
+ err := json.Unmarshal(b, &n)
+ if err != nil {
+ return err
+ }
+
+ *dst = Int8{Int: n, Status: Present}
+
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/int8_array.go b/vendor/github.com/jackc/pgx/pgtype/int8_array.go
new file mode 100644
index 0000000..bb6ce00
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/int8_array.go
@@ -0,0 +1,328 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type Int8Array struct {
+ Elements []Int8
+ Dimensions []ArrayDimension
+ Status Status
+}
+
+func (dst *Int8Array) Set(src interface{}) error {
+ // untyped nil and typed nil interfaces are different
+ if src == nil {
+ *dst = Int8Array{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+
+ case []int64:
+ if value == nil {
+ *dst = Int8Array{Status: Null}
+ } else if len(value) == 0 {
+ *dst = Int8Array{Status: Present}
+ } else {
+ elements := make([]Int8, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = Int8Array{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ case []uint64:
+ if value == nil {
+ *dst = Int8Array{Status: Null}
+ } else if len(value) == 0 {
+ *dst = Int8Array{Status: Present}
+ } else {
+ elements := make([]Int8, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = Int8Array{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ default:
+ if originalSrc, ok := underlyingSliceType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to Int8Array", value)
+ }
+
+ return nil
+}
+
+func (dst *Int8Array) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Int8Array) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+
+ case *[]int64:
+ *v = make([]int64, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ case *[]uint64:
+ *v = make([]uint64, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *Int8Array) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Int8Array{Status: Null}
+ return nil
+ }
+
+ uta, err := ParseUntypedTextArray(string(src))
+ if err != nil {
+ return err
+ }
+
+ var elements []Int8
+
+ if len(uta.Elements) > 0 {
+ elements = make([]Int8, len(uta.Elements))
+
+ for i, s := range uta.Elements {
+ var elem Int8
+ var elemSrc []byte
+ if s != "NULL" {
+ elemSrc = []byte(s)
+ }
+ err = elem.DecodeText(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+
+ elements[i] = elem
+ }
+ }
+
+ *dst = Int8Array{Elements: elements, Dimensions: uta.Dimensions, Status: Present}
+
+ return nil
+}
+
+func (dst *Int8Array) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Int8Array{Status: Null}
+ return nil
+ }
+
+ var arrayHeader ArrayHeader
+ rp, err := arrayHeader.DecodeBinary(ci, src)
+ if err != nil {
+ return err
+ }
+
+ if len(arrayHeader.Dimensions) == 0 {
+ *dst = Int8Array{Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+ }
+
+ elementCount := arrayHeader.Dimensions[0].Length
+ for _, d := range arrayHeader.Dimensions[1:] {
+ elementCount *= d.Length
+ }
+
+ elements := make([]Int8, elementCount)
+
+ for i := range elements {
+ elemLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+ var elemSrc []byte
+ if elemLen >= 0 {
+ elemSrc = src[rp : rp+elemLen]
+ rp += elemLen
+ }
+ err = elements[i].DecodeBinary(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+ }
+
+ *dst = Int8Array{Elements: elements, Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+}
+
+func (src *Int8Array) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ if len(src.Dimensions) == 0 {
+ return append(buf, '{', '}'), nil
+ }
+
+ buf = EncodeTextArrayDimensions(buf, src.Dimensions)
+
+ // dimElemCounts is the multiples of elements that each array lies on. For
+ // example, a single dimension array of length 4 would have a dimElemCounts of
+ // [4]. A multi-dimensional array of lengths [3,5,2] would have a
+ // dimElemCounts of [30,10,2]. This is used to simplify when to render a '{'
+ // or '}'.
+ dimElemCounts := make([]int, len(src.Dimensions))
+ dimElemCounts[len(src.Dimensions)-1] = int(src.Dimensions[len(src.Dimensions)-1].Length)
+ for i := len(src.Dimensions) - 2; i > -1; i-- {
+ dimElemCounts[i] = int(src.Dimensions[i].Length) * dimElemCounts[i+1]
+ }
+
+ inElemBuf := make([]byte, 0, 32)
+ for i, elem := range src.Elements {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+
+ for _, dec := range dimElemCounts {
+ if i%dec == 0 {
+ buf = append(buf, '{')
+ }
+ }
+
+ elemBuf, err := elem.EncodeText(ci, inElemBuf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf == nil {
+ buf = append(buf, `NULL`...)
+ } else {
+ buf = append(buf, QuoteArrayElementIfNeeded(string(elemBuf))...)
+ }
+
+ for _, dec := range dimElemCounts {
+ if (i+1)%dec == 0 {
+ buf = append(buf, '}')
+ }
+ }
+ }
+
+ return buf, nil
+}
+
+func (src *Int8Array) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ arrayHeader := ArrayHeader{
+ Dimensions: src.Dimensions,
+ }
+
+ if dt, ok := ci.DataTypeForName("int8"); ok {
+ arrayHeader.ElementOID = int32(dt.OID)
+ } else {
+ return nil, errors.Errorf("unable to find oid for type name %v", "int8")
+ }
+
+ for i := range src.Elements {
+ if src.Elements[i].Status == Null {
+ arrayHeader.ContainsNull = true
+ break
+ }
+ }
+
+ buf = arrayHeader.EncodeBinary(ci, buf)
+
+ for i := range src.Elements {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ elemBuf, err := src.Elements[i].EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf != nil {
+ buf = elemBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Int8Array) Scan(src interface{}) error {
+ if src == nil {
+ return dst.DecodeText(nil, nil)
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Int8Array) Value() (driver.Value, error) {
+ buf, err := src.EncodeText(nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, nil
+ }
+
+ return string(buf), nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/int8range.go b/vendor/github.com/jackc/pgx/pgtype/int8range.go
new file mode 100644
index 0000000..61d860d
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/int8range.go
@@ -0,0 +1,250 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type Int8range struct {
+ Lower Int8
+ Upper Int8
+ LowerType BoundType
+ UpperType BoundType
+ Status Status
+}
+
+func (dst *Int8range) Set(src interface{}) error {
+ return errors.Errorf("cannot convert %v to Int8range", src)
+}
+
+func (dst *Int8range) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Int8range) AssignTo(dst interface{}) error {
+ return errors.Errorf("cannot assign %v to %T", src, dst)
+}
+
+func (dst *Int8range) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Int8range{Status: Null}
+ return nil
+ }
+
+ utr, err := ParseUntypedTextRange(string(src))
+ if err != nil {
+ return err
+ }
+
+ *dst = Int8range{Status: Present}
+
+ dst.LowerType = utr.LowerType
+ dst.UpperType = utr.UpperType
+
+ if dst.LowerType == Empty {
+ return nil
+ }
+
+ if dst.LowerType == Inclusive || dst.LowerType == Exclusive {
+ if err := dst.Lower.DecodeText(ci, []byte(utr.Lower)); err != nil {
+ return err
+ }
+ }
+
+ if dst.UpperType == Inclusive || dst.UpperType == Exclusive {
+ if err := dst.Upper.DecodeText(ci, []byte(utr.Upper)); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (dst *Int8range) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Int8range{Status: Null}
+ return nil
+ }
+
+ ubr, err := ParseUntypedBinaryRange(src)
+ if err != nil {
+ return err
+ }
+
+ *dst = Int8range{Status: Present}
+
+ dst.LowerType = ubr.LowerType
+ dst.UpperType = ubr.UpperType
+
+ if dst.LowerType == Empty {
+ return nil
+ }
+
+ if dst.LowerType == Inclusive || dst.LowerType == Exclusive {
+ if err := dst.Lower.DecodeBinary(ci, ubr.Lower); err != nil {
+ return err
+ }
+ }
+
+ if dst.UpperType == Inclusive || dst.UpperType == Exclusive {
+ if err := dst.Upper.DecodeBinary(ci, ubr.Upper); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (src Int8range) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ switch src.LowerType {
+ case Exclusive, Unbounded:
+ buf = append(buf, '(')
+ case Inclusive:
+ buf = append(buf, '[')
+ case Empty:
+ return append(buf, "empty"...), nil
+ default:
+ return nil, errors.Errorf("unknown lower bound type %v", src.LowerType)
+ }
+
+ var err error
+
+ if src.LowerType != Unbounded {
+ buf, err = src.Lower.EncodeText(ci, buf)
+ if err != nil {
+ return nil, err
+ } else if buf == nil {
+ return nil, errors.Errorf("Lower cannot be null unless LowerType is Unbounded")
+ }
+ }
+
+ buf = append(buf, ',')
+
+ if src.UpperType != Unbounded {
+ buf, err = src.Upper.EncodeText(ci, buf)
+ if err != nil {
+ return nil, err
+ } else if buf == nil {
+ return nil, errors.Errorf("Upper cannot be null unless UpperType is Unbounded")
+ }
+ }
+
+ switch src.UpperType {
+ case Exclusive, Unbounded:
+ buf = append(buf, ')')
+ case Inclusive:
+ buf = append(buf, ']')
+ default:
+ return nil, errors.Errorf("unknown upper bound type %v", src.UpperType)
+ }
+
+ return buf, nil
+}
+
+func (src Int8range) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ var rangeType byte
+ switch src.LowerType {
+ case Inclusive:
+ rangeType |= lowerInclusiveMask
+ case Unbounded:
+ rangeType |= lowerUnboundedMask
+ case Exclusive:
+ case Empty:
+ return append(buf, emptyMask), nil
+ default:
+ return nil, errors.Errorf("unknown LowerType: %v", src.LowerType)
+ }
+
+ switch src.UpperType {
+ case Inclusive:
+ rangeType |= upperInclusiveMask
+ case Unbounded:
+ rangeType |= upperUnboundedMask
+ case Exclusive:
+ default:
+ return nil, errors.Errorf("unknown UpperType: %v", src.UpperType)
+ }
+
+ buf = append(buf, rangeType)
+
+ var err error
+
+ if src.LowerType != Unbounded {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ buf, err = src.Lower.EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, errors.Errorf("Lower cannot be null unless LowerType is Unbounded")
+ }
+
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+
+ if src.UpperType != Unbounded {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ buf, err = src.Upper.EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, errors.Errorf("Upper cannot be null unless UpperType is Unbounded")
+ }
+
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Int8range) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Int8range{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src Int8range) Value() (driver.Value, error) {
+ return EncodeValueText(src)
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/interval.go b/vendor/github.com/jackc/pgx/pgtype/interval.go
new file mode 100644
index 0000000..799ce53
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/interval.go
@@ -0,0 +1,250 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+const (
+ microsecondsPerSecond = 1000000
+ microsecondsPerMinute = 60 * microsecondsPerSecond
+ microsecondsPerHour = 60 * microsecondsPerMinute
+)
+
+type Interval struct {
+ Microseconds int64
+ Days int32
+ Months int32
+ Status Status
+}
+
+func (dst *Interval) Set(src interface{}) error {
+ if src == nil {
+ *dst = Interval{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+ case time.Duration:
+ *dst = Interval{Microseconds: int64(value) / 1000, Status: Present}
+ default:
+ if originalSrc, ok := underlyingPtrType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to Interval", value)
+ }
+
+ return nil
+}
+
+func (dst *Interval) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Interval) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+ case *time.Duration:
+ if src.Days > 0 || src.Months > 0 {
+ return errors.Errorf("interval with months or days cannot be decoded into %T", dst)
+ }
+ *v = time.Duration(src.Microseconds) * time.Microsecond
+ return nil
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *Interval) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Interval{Status: Null}
+ return nil
+ }
+
+ var microseconds int64
+ var days int32
+ var months int32
+
+ parts := strings.Split(string(src), " ")
+
+ for i := 0; i < len(parts)-1; i += 2 {
+ scalar, err := strconv.ParseInt(parts[i], 10, 64)
+ if err != nil {
+ return errors.Errorf("bad interval format")
+ }
+
+ switch parts[i+1] {
+ case "year", "years":
+ months += int32(scalar * 12)
+ case "mon", "mons":
+ months += int32(scalar)
+ case "day", "days":
+ days = int32(scalar)
+ }
+ }
+
+ if len(parts)%2 == 1 {
+ timeParts := strings.SplitN(parts[len(parts)-1], ":", 3)
+ if len(timeParts) != 3 {
+ return errors.Errorf("bad interval format")
+ }
+
+ var negative bool
+ if timeParts[0][0] == '-' {
+ negative = true
+ timeParts[0] = timeParts[0][1:]
+ }
+
+ hours, err := strconv.ParseInt(timeParts[0], 10, 64)
+ if err != nil {
+ return errors.Errorf("bad interval hour format: %s", timeParts[0])
+ }
+
+ minutes, err := strconv.ParseInt(timeParts[1], 10, 64)
+ if err != nil {
+ return errors.Errorf("bad interval minute format: %s", timeParts[1])
+ }
+
+ secondParts := strings.SplitN(timeParts[2], ".", 2)
+
+ seconds, err := strconv.ParseInt(secondParts[0], 10, 64)
+ if err != nil {
+ return errors.Errorf("bad interval second format: %s", secondParts[0])
+ }
+
+ var uSeconds int64
+ if len(secondParts) == 2 {
+ uSeconds, err = strconv.ParseInt(secondParts[1], 10, 64)
+ if err != nil {
+ return errors.Errorf("bad interval decimal format: %s", secondParts[1])
+ }
+
+ for i := 0; i < 6-len(secondParts[1]); i++ {
+ uSeconds *= 10
+ }
+ }
+
+ microseconds = hours * microsecondsPerHour
+ microseconds += minutes * microsecondsPerMinute
+ microseconds += seconds * microsecondsPerSecond
+ microseconds += uSeconds
+
+ if negative {
+ microseconds = -microseconds
+ }
+ }
+
+ *dst = Interval{Months: months, Days: days, Microseconds: microseconds, Status: Present}
+ return nil
+}
+
+func (dst *Interval) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Interval{Status: Null}
+ return nil
+ }
+
+ if len(src) != 16 {
+ return errors.Errorf("Received an invalid size for a interval: %d", len(src))
+ }
+
+ microseconds := int64(binary.BigEndian.Uint64(src))
+ days := int32(binary.BigEndian.Uint32(src[8:]))
+ months := int32(binary.BigEndian.Uint32(src[12:]))
+
+ *dst = Interval{Microseconds: microseconds, Days: days, Months: months, Status: Present}
+ return nil
+}
+
+func (src *Interval) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ if src.Months != 0 {
+ buf = append(buf, strconv.FormatInt(int64(src.Months), 10)...)
+ buf = append(buf, " mon "...)
+ }
+
+ if src.Days != 0 {
+ buf = append(buf, strconv.FormatInt(int64(src.Days), 10)...)
+ buf = append(buf, " day "...)
+ }
+
+ absMicroseconds := src.Microseconds
+ if absMicroseconds < 0 {
+ absMicroseconds = -absMicroseconds
+ buf = append(buf, '-')
+ }
+
+ hours := absMicroseconds / microsecondsPerHour
+ minutes := (absMicroseconds % microsecondsPerHour) / microsecondsPerMinute
+ seconds := (absMicroseconds % microsecondsPerMinute) / microsecondsPerSecond
+ microseconds := absMicroseconds % microsecondsPerSecond
+
+ timeStr := fmt.Sprintf("%02d:%02d:%02d.%06d", hours, minutes, seconds, microseconds)
+ return append(buf, timeStr...), nil
+}
+
+// EncodeBinary encodes src into w.
+func (src *Interval) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ buf = pgio.AppendInt64(buf, src.Microseconds)
+ buf = pgio.AppendInt32(buf, src.Days)
+ return pgio.AppendInt32(buf, src.Months), nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Interval) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Interval{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Interval) Value() (driver.Value, error) {
+ return EncodeValueText(src)
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/json.go b/vendor/github.com/jackc/pgx/pgtype/json.go
new file mode 100644
index 0000000..ef8231b
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/json.go
@@ -0,0 +1,161 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/json"
+
+ "github.com/pkg/errors"
+)
+
+type JSON struct {
+ Bytes []byte
+ Status Status
+}
+
+func (dst *JSON) Set(src interface{}) error {
+ if src == nil {
+ *dst = JSON{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+ case string:
+ *dst = JSON{Bytes: []byte(value), Status: Present}
+ case *string:
+ if value == nil {
+ *dst = JSON{Status: Null}
+ } else {
+ *dst = JSON{Bytes: []byte(*value), Status: Present}
+ }
+ case []byte:
+ if value == nil {
+ *dst = JSON{Status: Null}
+ } else {
+ *dst = JSON{Bytes: value, Status: Present}
+ }
+ // Encode* methods are defined on *JSON. If JSON is passed directly then the
+ // struct itself would be encoded instead of Bytes. This is clearly a footgun
+ // so detect and return an error. See https://github.com/jackc/pgx/issues/350.
+ case JSON:
+ return errors.New("use pointer to pgtype.JSON instead of value")
+ // Same as above but for JSONB (because they share implementation)
+ case JSONB:
+ return errors.New("use pointer to pgtype.JSONB instead of value")
+
+ default:
+ buf, err := json.Marshal(value)
+ if err != nil {
+ return err
+ }
+ *dst = JSON{Bytes: buf, Status: Present}
+ }
+
+ return nil
+}
+
+func (dst *JSON) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ var i interface{}
+ err := json.Unmarshal(dst.Bytes, &i)
+ if err != nil {
+ return dst
+ }
+ return i
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *JSON) AssignTo(dst interface{}) error {
+ switch v := dst.(type) {
+ case *string:
+ if src.Status != Present {
+ v = nil
+ } else {
+ *v = string(src.Bytes)
+ }
+ case **string:
+ *v = new(string)
+ return src.AssignTo(*v)
+ case *[]byte:
+ if src.Status != Present {
+ *v = nil
+ } else {
+ buf := make([]byte, len(src.Bytes))
+ copy(buf, src.Bytes)
+ *v = buf
+ }
+ default:
+ data := src.Bytes
+ if data == nil || src.Status != Present {
+ data = []byte("null")
+ }
+
+ return json.Unmarshal(data, dst)
+ }
+
+ return nil
+}
+
+func (dst *JSON) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = JSON{Status: Null}
+ return nil
+ }
+
+ *dst = JSON{Bytes: src, Status: Present}
+ return nil
+}
+
+func (dst *JSON) DecodeBinary(ci *ConnInfo, src []byte) error {
+ return dst.DecodeText(ci, src)
+}
+
+func (src *JSON) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ return append(buf, src.Bytes...), nil
+}
+
+func (src *JSON) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ return src.EncodeText(ci, buf)
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *JSON) Scan(src interface{}) error {
+ if src == nil {
+ *dst = JSON{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *JSON) Value() (driver.Value, error) {
+ switch src.Status {
+ case Present:
+ return string(src.Bytes), nil
+ case Null:
+ return nil, nil
+ default:
+ return nil, errUndefined
+ }
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/jsonb.go b/vendor/github.com/jackc/pgx/pgtype/jsonb.go
new file mode 100644
index 0000000..c315c58
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/jsonb.go
@@ -0,0 +1,70 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+
+ "github.com/pkg/errors"
+)
+
+type JSONB JSON
+
+func (dst *JSONB) Set(src interface{}) error {
+ return (*JSON)(dst).Set(src)
+}
+
+func (dst *JSONB) Get() interface{} {
+ return (*JSON)(dst).Get()
+}
+
+func (src *JSONB) AssignTo(dst interface{}) error {
+ return (*JSON)(src).AssignTo(dst)
+}
+
+func (dst *JSONB) DecodeText(ci *ConnInfo, src []byte) error {
+ return (*JSON)(dst).DecodeText(ci, src)
+}
+
+func (dst *JSONB) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = JSONB{Status: Null}
+ return nil
+ }
+
+ if len(src) == 0 {
+ return errors.Errorf("jsonb too short")
+ }
+
+ if src[0] != 1 {
+ return errors.Errorf("unknown jsonb version number %d", src[0])
+ }
+
+ *dst = JSONB{Bytes: src[1:], Status: Present}
+ return nil
+
+}
+
+func (src *JSONB) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ return (*JSON)(src).EncodeText(ci, buf)
+}
+
+func (src *JSONB) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ buf = append(buf, 1)
+ return append(buf, src.Bytes...), nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *JSONB) Scan(src interface{}) error {
+ return (*JSON)(dst).Scan(src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *JSONB) Value() (driver.Value, error) {
+ return (*JSON)(src).Value()
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/line.go b/vendor/github.com/jackc/pgx/pgtype/line.go
new file mode 100644
index 0000000..f6eadf0
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/line.go
@@ -0,0 +1,143 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type Line struct {
+ A, B, C float64
+ Status Status
+}
+
+func (dst *Line) Set(src interface{}) error {
+ return errors.Errorf("cannot convert %v to Line", src)
+}
+
+func (dst *Line) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Line) AssignTo(dst interface{}) error {
+ return errors.Errorf("cannot assign %v to %T", src, dst)
+}
+
+func (dst *Line) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Line{Status: Null}
+ return nil
+ }
+
+ if len(src) < 7 {
+ return errors.Errorf("invalid length for Line: %v", len(src))
+ }
+
+ parts := strings.SplitN(string(src[1:len(src)-1]), ",", 3)
+ if len(parts) < 3 {
+ return errors.Errorf("invalid format for line")
+ }
+
+ a, err := strconv.ParseFloat(parts[0], 64)
+ if err != nil {
+ return err
+ }
+
+ b, err := strconv.ParseFloat(parts[1], 64)
+ if err != nil {
+ return err
+ }
+
+ c, err := strconv.ParseFloat(parts[2], 64)
+ if err != nil {
+ return err
+ }
+
+ *dst = Line{A: a, B: b, C: c, Status: Present}
+ return nil
+}
+
+func (dst *Line) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Line{Status: Null}
+ return nil
+ }
+
+ if len(src) != 24 {
+ return errors.Errorf("invalid length for Line: %v", len(src))
+ }
+
+ a := binary.BigEndian.Uint64(src)
+ b := binary.BigEndian.Uint64(src[8:])
+ c := binary.BigEndian.Uint64(src[16:])
+
+ *dst = Line{
+ A: math.Float64frombits(a),
+ B: math.Float64frombits(b),
+ C: math.Float64frombits(c),
+ Status: Present,
+ }
+ return nil
+}
+
+func (src *Line) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ return append(buf, fmt.Sprintf(`{%f,%f,%f}`, src.A, src.B, src.C)...), nil
+}
+
+func (src *Line) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ buf = pgio.AppendUint64(buf, math.Float64bits(src.A))
+ buf = pgio.AppendUint64(buf, math.Float64bits(src.B))
+ buf = pgio.AppendUint64(buf, math.Float64bits(src.C))
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Line) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Line{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Line) Value() (driver.Value, error) {
+ return EncodeValueText(src)
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/lseg.go b/vendor/github.com/jackc/pgx/pgtype/lseg.go
new file mode 100644
index 0000000..a9d740c
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/lseg.go
@@ -0,0 +1,161 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type Lseg struct {
+ P [2]Vec2
+ Status Status
+}
+
+func (dst *Lseg) Set(src interface{}) error {
+ return errors.Errorf("cannot convert %v to Lseg", src)
+}
+
+func (dst *Lseg) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Lseg) AssignTo(dst interface{}) error {
+ return errors.Errorf("cannot assign %v to %T", src, dst)
+}
+
+func (dst *Lseg) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Lseg{Status: Null}
+ return nil
+ }
+
+ if len(src) < 11 {
+ return errors.Errorf("invalid length for Lseg: %v", len(src))
+ }
+
+ str := string(src[2:])
+
+ var end int
+ end = strings.IndexByte(str, ',')
+
+ x1, err := strconv.ParseFloat(str[:end], 64)
+ if err != nil {
+ return err
+ }
+
+ str = str[end+1:]
+ end = strings.IndexByte(str, ')')
+
+ y1, err := strconv.ParseFloat(str[:end], 64)
+ if err != nil {
+ return err
+ }
+
+ str = str[end+3:]
+ end = strings.IndexByte(str, ',')
+
+ x2, err := strconv.ParseFloat(str[:end], 64)
+ if err != nil {
+ return err
+ }
+
+ str = str[end+1 : len(str)-2]
+
+ y2, err := strconv.ParseFloat(str, 64)
+ if err != nil {
+ return err
+ }
+
+ *dst = Lseg{P: [2]Vec2{{x1, y1}, {x2, y2}}, Status: Present}
+ return nil
+}
+
+func (dst *Lseg) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Lseg{Status: Null}
+ return nil
+ }
+
+ if len(src) != 32 {
+ return errors.Errorf("invalid length for Lseg: %v", len(src))
+ }
+
+ x1 := binary.BigEndian.Uint64(src)
+ y1 := binary.BigEndian.Uint64(src[8:])
+ x2 := binary.BigEndian.Uint64(src[16:])
+ y2 := binary.BigEndian.Uint64(src[24:])
+
+ *dst = Lseg{
+ P: [2]Vec2{
+ {math.Float64frombits(x1), math.Float64frombits(y1)},
+ {math.Float64frombits(x2), math.Float64frombits(y2)},
+ },
+ Status: Present,
+ }
+ return nil
+}
+
+func (src *Lseg) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ buf = append(buf, fmt.Sprintf(`(%f,%f),(%f,%f)`,
+ src.P[0].X, src.P[0].Y, src.P[1].X, src.P[1].Y)...)
+ return buf, nil
+}
+
+func (src *Lseg) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ buf = pgio.AppendUint64(buf, math.Float64bits(src.P[0].X))
+ buf = pgio.AppendUint64(buf, math.Float64bits(src.P[0].Y))
+ buf = pgio.AppendUint64(buf, math.Float64bits(src.P[1].X))
+ buf = pgio.AppendUint64(buf, math.Float64bits(src.P[1].Y))
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Lseg) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Lseg{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Lseg) Value() (driver.Value, error) {
+ return EncodeValueText(src)
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/macaddr.go b/vendor/github.com/jackc/pgx/pgtype/macaddr.go
new file mode 100644
index 0000000..4c6e221
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/macaddr.go
@@ -0,0 +1,154 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "net"
+
+ "github.com/pkg/errors"
+)
+
+type Macaddr struct {
+ Addr net.HardwareAddr
+ Status Status
+}
+
+func (dst *Macaddr) Set(src interface{}) error {
+ if src == nil {
+ *dst = Macaddr{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+ case net.HardwareAddr:
+ addr := make(net.HardwareAddr, len(value))
+ copy(addr, value)
+ *dst = Macaddr{Addr: addr, Status: Present}
+ case string:
+ addr, err := net.ParseMAC(value)
+ if err != nil {
+ return err
+ }
+ *dst = Macaddr{Addr: addr, Status: Present}
+ default:
+ if originalSrc, ok := underlyingPtrType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to Macaddr", value)
+ }
+
+ return nil
+}
+
+func (dst *Macaddr) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst.Addr
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Macaddr) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+ case *net.HardwareAddr:
+ *v = make(net.HardwareAddr, len(src.Addr))
+ copy(*v, src.Addr)
+ return nil
+ case *string:
+ *v = src.Addr.String()
+ return nil
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *Macaddr) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Macaddr{Status: Null}
+ return nil
+ }
+
+ addr, err := net.ParseMAC(string(src))
+ if err != nil {
+ return err
+ }
+
+ *dst = Macaddr{Addr: addr, Status: Present}
+ return nil
+}
+
+func (dst *Macaddr) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Macaddr{Status: Null}
+ return nil
+ }
+
+ if len(src) != 6 {
+ return errors.Errorf("Received an invalid size for a macaddr: %d", len(src))
+ }
+
+ addr := make(net.HardwareAddr, 6)
+ copy(addr, src)
+
+ *dst = Macaddr{Addr: addr, Status: Present}
+
+ return nil
+}
+
+func (src *Macaddr) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ return append(buf, src.Addr.String()...), nil
+}
+
+// EncodeBinary encodes src into w.
+func (src *Macaddr) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ return append(buf, src.Addr...), nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Macaddr) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Macaddr{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Macaddr) Value() (driver.Value, error) {
+ return EncodeValueText(src)
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/name.go b/vendor/github.com/jackc/pgx/pgtype/name.go
new file mode 100644
index 0000000..af064a8
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/name.go
@@ -0,0 +1,58 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+)
+
+// Name is a type used for PostgreSQL's special 63-byte
+// name data type, used for identifiers like table names.
+// The pg_class.relname column is a good example of where the
+// name data type is used.
+//
+// Note that the underlying Go data type of pgx.Name is string,
+// so there is no way to enforce the 63-byte length. Inputting
+// a longer name into PostgreSQL will result in silent truncation
+// to 63 bytes.
+//
+// Also, if you have custom-compiled PostgreSQL and set
+// NAMEDATALEN to a different value, obviously that number of
+// bytes applies, rather than the default 63.
+type Name Text
+
+func (dst *Name) Set(src interface{}) error {
+ return (*Text)(dst).Set(src)
+}
+
+func (dst *Name) Get() interface{} {
+ return (*Text)(dst).Get()
+}
+
+func (src *Name) AssignTo(dst interface{}) error {
+ return (*Text)(src).AssignTo(dst)
+}
+
+func (dst *Name) DecodeText(ci *ConnInfo, src []byte) error {
+ return (*Text)(dst).DecodeText(ci, src)
+}
+
+func (dst *Name) DecodeBinary(ci *ConnInfo, src []byte) error {
+ return (*Text)(dst).DecodeBinary(ci, src)
+}
+
+func (src *Name) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ return (*Text)(src).EncodeText(ci, buf)
+}
+
+func (src *Name) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ return (*Text)(src).EncodeBinary(ci, buf)
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Name) Scan(src interface{}) error {
+ return (*Text)(dst).Scan(src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Name) Value() (driver.Value, error) {
+ return (*Text)(src).Value()
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/numeric.go b/vendor/github.com/jackc/pgx/pgtype/numeric.go
new file mode 100644
index 0000000..fb63df7
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/numeric.go
@@ -0,0 +1,600 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "math"
+ "math/big"
+ "strconv"
+ "strings"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+// PostgreSQL internal numeric storage uses 16-bit "digits" with base of 10,000
+const nbase = 10000
+
+var big0 *big.Int = big.NewInt(0)
+var big1 *big.Int = big.NewInt(1)
+var big10 *big.Int = big.NewInt(10)
+var big100 *big.Int = big.NewInt(100)
+var big1000 *big.Int = big.NewInt(1000)
+
+var bigMaxInt8 *big.Int = big.NewInt(math.MaxInt8)
+var bigMinInt8 *big.Int = big.NewInt(math.MinInt8)
+var bigMaxInt16 *big.Int = big.NewInt(math.MaxInt16)
+var bigMinInt16 *big.Int = big.NewInt(math.MinInt16)
+var bigMaxInt32 *big.Int = big.NewInt(math.MaxInt32)
+var bigMinInt32 *big.Int = big.NewInt(math.MinInt32)
+var bigMaxInt64 *big.Int = big.NewInt(math.MaxInt64)
+var bigMinInt64 *big.Int = big.NewInt(math.MinInt64)
+var bigMaxInt *big.Int = big.NewInt(int64(maxInt))
+var bigMinInt *big.Int = big.NewInt(int64(minInt))
+
+var bigMaxUint8 *big.Int = big.NewInt(math.MaxUint8)
+var bigMaxUint16 *big.Int = big.NewInt(math.MaxUint16)
+var bigMaxUint32 *big.Int = big.NewInt(math.MaxUint32)
+var bigMaxUint64 *big.Int = (&big.Int{}).SetUint64(uint64(math.MaxUint64))
+var bigMaxUint *big.Int = (&big.Int{}).SetUint64(uint64(maxUint))
+
+var bigNBase *big.Int = big.NewInt(nbase)
+var bigNBaseX2 *big.Int = big.NewInt(nbase * nbase)
+var bigNBaseX3 *big.Int = big.NewInt(nbase * nbase * nbase)
+var bigNBaseX4 *big.Int = big.NewInt(nbase * nbase * nbase * nbase)
+
+type Numeric struct {
+ Int *big.Int
+ Exp int32
+ Status Status
+}
+
+func (dst *Numeric) Set(src interface{}) error {
+ if src == nil {
+ *dst = Numeric{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+ case float32:
+ num, exp, err := parseNumericString(strconv.FormatFloat(float64(value), 'f', -1, 64))
+ if err != nil {
+ return err
+ }
+ *dst = Numeric{Int: num, Exp: exp, Status: Present}
+ case float64:
+ num, exp, err := parseNumericString(strconv.FormatFloat(value, 'f', -1, 64))
+ if err != nil {
+ return err
+ }
+ *dst = Numeric{Int: num, Exp: exp, Status: Present}
+ case int8:
+ *dst = Numeric{Int: big.NewInt(int64(value)), Status: Present}
+ case uint8:
+ *dst = Numeric{Int: big.NewInt(int64(value)), Status: Present}
+ case int16:
+ *dst = Numeric{Int: big.NewInt(int64(value)), Status: Present}
+ case uint16:
+ *dst = Numeric{Int: big.NewInt(int64(value)), Status: Present}
+ case int32:
+ *dst = Numeric{Int: big.NewInt(int64(value)), Status: Present}
+ case uint32:
+ *dst = Numeric{Int: big.NewInt(int64(value)), Status: Present}
+ case int64:
+ *dst = Numeric{Int: big.NewInt(value), Status: Present}
+ case uint64:
+ *dst = Numeric{Int: (&big.Int{}).SetUint64(value), Status: Present}
+ case int:
+ *dst = Numeric{Int: big.NewInt(int64(value)), Status: Present}
+ case uint:
+ *dst = Numeric{Int: (&big.Int{}).SetUint64(uint64(value)), Status: Present}
+ case string:
+ num, exp, err := parseNumericString(value)
+ if err != nil {
+ return err
+ }
+ *dst = Numeric{Int: num, Exp: exp, Status: Present}
+ default:
+ if originalSrc, ok := underlyingNumberType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to Numeric", value)
+ }
+
+ return nil
+}
+
+func (dst *Numeric) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Numeric) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+ case *float32:
+ f, err := src.toFloat64()
+ if err != nil {
+ return err
+ }
+ return float64AssignTo(f, src.Status, dst)
+ case *float64:
+ f, err := src.toFloat64()
+ if err != nil {
+ return err
+ }
+ return float64AssignTo(f, src.Status, dst)
+ case *int:
+ normalizedInt, err := src.toBigInt()
+ if err != nil {
+ return err
+ }
+ if normalizedInt.Cmp(bigMaxInt) > 0 {
+ return errors.Errorf("%v is greater than maximum value for %T", normalizedInt, *v)
+ }
+ if normalizedInt.Cmp(bigMinInt) < 0 {
+ return errors.Errorf("%v is less than minimum value for %T", normalizedInt, *v)
+ }
+ *v = int(normalizedInt.Int64())
+ case *int8:
+ normalizedInt, err := src.toBigInt()
+ if err != nil {
+ return err
+ }
+ if normalizedInt.Cmp(bigMaxInt8) > 0 {
+ return errors.Errorf("%v is greater than maximum value for %T", normalizedInt, *v)
+ }
+ if normalizedInt.Cmp(bigMinInt8) < 0 {
+ return errors.Errorf("%v is less than minimum value for %T", normalizedInt, *v)
+ }
+ *v = int8(normalizedInt.Int64())
+ case *int16:
+ normalizedInt, err := src.toBigInt()
+ if err != nil {
+ return err
+ }
+ if normalizedInt.Cmp(bigMaxInt16) > 0 {
+ return errors.Errorf("%v is greater than maximum value for %T", normalizedInt, *v)
+ }
+ if normalizedInt.Cmp(bigMinInt16) < 0 {
+ return errors.Errorf("%v is less than minimum value for %T", normalizedInt, *v)
+ }
+ *v = int16(normalizedInt.Int64())
+ case *int32:
+ normalizedInt, err := src.toBigInt()
+ if err != nil {
+ return err
+ }
+ if normalizedInt.Cmp(bigMaxInt32) > 0 {
+ return errors.Errorf("%v is greater than maximum value for %T", normalizedInt, *v)
+ }
+ if normalizedInt.Cmp(bigMinInt32) < 0 {
+ return errors.Errorf("%v is less than minimum value for %T", normalizedInt, *v)
+ }
+ *v = int32(normalizedInt.Int64())
+ case *int64:
+ normalizedInt, err := src.toBigInt()
+ if err != nil {
+ return err
+ }
+ if normalizedInt.Cmp(bigMaxInt64) > 0 {
+ return errors.Errorf("%v is greater than maximum value for %T", normalizedInt, *v)
+ }
+ if normalizedInt.Cmp(bigMinInt64) < 0 {
+ return errors.Errorf("%v is less than minimum value for %T", normalizedInt, *v)
+ }
+ *v = normalizedInt.Int64()
+ case *uint:
+ normalizedInt, err := src.toBigInt()
+ if err != nil {
+ return err
+ }
+ if normalizedInt.Cmp(big0) < 0 {
+ return errors.Errorf("%d is less than zero for %T", normalizedInt, *v)
+ } else if normalizedInt.Cmp(bigMaxUint) > 0 {
+ return errors.Errorf("%d is greater than maximum value for %T", normalizedInt, *v)
+ }
+ *v = uint(normalizedInt.Uint64())
+ case *uint8:
+ normalizedInt, err := src.toBigInt()
+ if err != nil {
+ return err
+ }
+ if normalizedInt.Cmp(big0) < 0 {
+ return errors.Errorf("%d is less than zero for %T", normalizedInt, *v)
+ } else if normalizedInt.Cmp(bigMaxUint8) > 0 {
+ return errors.Errorf("%d is greater than maximum value for %T", normalizedInt, *v)
+ }
+ *v = uint8(normalizedInt.Uint64())
+ case *uint16:
+ normalizedInt, err := src.toBigInt()
+ if err != nil {
+ return err
+ }
+ if normalizedInt.Cmp(big0) < 0 {
+ return errors.Errorf("%d is less than zero for %T", normalizedInt, *v)
+ } else if normalizedInt.Cmp(bigMaxUint16) > 0 {
+ return errors.Errorf("%d is greater than maximum value for %T", normalizedInt, *v)
+ }
+ *v = uint16(normalizedInt.Uint64())
+ case *uint32:
+ normalizedInt, err := src.toBigInt()
+ if err != nil {
+ return err
+ }
+ if normalizedInt.Cmp(big0) < 0 {
+ return errors.Errorf("%d is less than zero for %T", normalizedInt, *v)
+ } else if normalizedInt.Cmp(bigMaxUint32) > 0 {
+ return errors.Errorf("%d is greater than maximum value for %T", normalizedInt, *v)
+ }
+ *v = uint32(normalizedInt.Uint64())
+ case *uint64:
+ normalizedInt, err := src.toBigInt()
+ if err != nil {
+ return err
+ }
+ if normalizedInt.Cmp(big0) < 0 {
+ return errors.Errorf("%d is less than zero for %T", normalizedInt, *v)
+ } else if normalizedInt.Cmp(bigMaxUint64) > 0 {
+ return errors.Errorf("%d is greater than maximum value for %T", normalizedInt, *v)
+ }
+ *v = normalizedInt.Uint64()
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return nil
+}
+
+func (dst *Numeric) toBigInt() (*big.Int, error) {
+ if dst.Exp == 0 {
+ return dst.Int, nil
+ }
+
+ num := &big.Int{}
+ num.Set(dst.Int)
+ if dst.Exp > 0 {
+ mul := &big.Int{}
+ mul.Exp(big10, big.NewInt(int64(dst.Exp)), nil)
+ num.Mul(num, mul)
+ return num, nil
+ }
+
+ div := &big.Int{}
+ div.Exp(big10, big.NewInt(int64(-dst.Exp)), nil)
+ remainder := &big.Int{}
+ num.DivMod(num, div, remainder)
+ if remainder.Cmp(big0) != 0 {
+ return nil, errors.Errorf("cannot convert %v to integer", dst)
+ }
+ return num, nil
+}
+
+func (src *Numeric) toFloat64() (float64, error) {
+ f, err := strconv.ParseFloat(src.Int.String(), 64)
+ if err != nil {
+ return 0, err
+ }
+ if src.Exp > 0 {
+ for i := 0; i < int(src.Exp); i++ {
+ f *= 10
+ }
+ } else if src.Exp < 0 {
+ for i := 0; i > int(src.Exp); i-- {
+ f /= 10
+ }
+ }
+ return f, nil
+}
+
+func (dst *Numeric) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Numeric{Status: Null}
+ return nil
+ }
+
+ num, exp, err := parseNumericString(string(src))
+ if err != nil {
+ return err
+ }
+
+ *dst = Numeric{Int: num, Exp: exp, Status: Present}
+ return nil
+}
+
+func parseNumericString(str string) (n *big.Int, exp int32, err error) {
+ parts := strings.SplitN(str, ".", 2)
+ digits := strings.Join(parts, "")
+
+ if len(parts) > 1 {
+ exp = int32(-len(parts[1]))
+ } else {
+ for len(digits) > 1 && digits[len(digits)-1] == '0' {
+ digits = digits[:len(digits)-1]
+ exp++
+ }
+ }
+
+ accum := &big.Int{}
+ if _, ok := accum.SetString(digits, 10); !ok {
+ return nil, 0, errors.Errorf("%s is not a number", str)
+ }
+
+ return accum, exp, nil
+}
+
+func (dst *Numeric) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Numeric{Status: Null}
+ return nil
+ }
+
+ if len(src) < 8 {
+ return errors.Errorf("numeric incomplete %v", src)
+ }
+
+ rp := 0
+ ndigits := int16(binary.BigEndian.Uint16(src[rp:]))
+ rp += 2
+
+ if ndigits == 0 {
+ *dst = Numeric{Int: big.NewInt(0), Status: Present}
+ return nil
+ }
+
+ weight := int16(binary.BigEndian.Uint16(src[rp:]))
+ rp += 2
+ sign := int16(binary.BigEndian.Uint16(src[rp:]))
+ rp += 2
+ dscale := int16(binary.BigEndian.Uint16(src[rp:]))
+ rp += 2
+
+ if len(src[rp:]) < int(ndigits)*2 {
+ return errors.Errorf("numeric incomplete %v", src)
+ }
+
+ accum := &big.Int{}
+
+ for i := 0; i < int(ndigits+3)/4; i++ {
+ int64accum, bytesRead, digitsRead := nbaseDigitsToInt64(src[rp:])
+ rp += bytesRead
+
+ if i > 0 {
+ var mul *big.Int
+ switch digitsRead {
+ case 1:
+ mul = bigNBase
+ case 2:
+ mul = bigNBaseX2
+ case 3:
+ mul = bigNBaseX3
+ case 4:
+ mul = bigNBaseX4
+ default:
+ return errors.Errorf("invalid digitsRead: %d (this can't happen)", digitsRead)
+ }
+ accum.Mul(accum, mul)
+ }
+
+ accum.Add(accum, big.NewInt(int64accum))
+ }
+
+ exp := (int32(weight) - int32(ndigits) + 1) * 4
+
+ if dscale > 0 {
+ fracNBaseDigits := ndigits - weight - 1
+ fracDecimalDigits := fracNBaseDigits * 4
+
+ if dscale > fracDecimalDigits {
+ multCount := int(dscale - fracDecimalDigits)
+ for i := 0; i < multCount; i++ {
+ accum.Mul(accum, big10)
+ exp--
+ }
+ } else if dscale < fracDecimalDigits {
+ divCount := int(fracDecimalDigits - dscale)
+ for i := 0; i < divCount; i++ {
+ accum.Div(accum, big10)
+ exp++
+ }
+ }
+ }
+
+ reduced := &big.Int{}
+ remainder := &big.Int{}
+ if exp >= 0 {
+ for {
+ reduced.DivMod(accum, big10, remainder)
+ if remainder.Cmp(big0) != 0 {
+ break
+ }
+ accum.Set(reduced)
+ exp++
+ }
+ }
+
+ if sign != 0 {
+ accum.Neg(accum)
+ }
+
+ *dst = Numeric{Int: accum, Exp: exp, Status: Present}
+
+ return nil
+
+}
+
+func nbaseDigitsToInt64(src []byte) (accum int64, bytesRead, digitsRead int) {
+ digits := len(src) / 2
+ if digits > 4 {
+ digits = 4
+ }
+
+ rp := 0
+
+ for i := 0; i < digits; i++ {
+ if i > 0 {
+ accum *= nbase
+ }
+ accum += int64(binary.BigEndian.Uint16(src[rp:]))
+ rp += 2
+ }
+
+ return accum, rp, digits
+}
+
+func (src *Numeric) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ buf = append(buf, src.Int.String()...)
+ buf = append(buf, 'e')
+ buf = append(buf, strconv.FormatInt(int64(src.Exp), 10)...)
+ return buf, nil
+}
+
+func (src *Numeric) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ var sign int16
+ if src.Int.Cmp(big0) < 0 {
+ sign = 16384
+ }
+
+ absInt := &big.Int{}
+ wholePart := &big.Int{}
+ fracPart := &big.Int{}
+ remainder := &big.Int{}
+ absInt.Abs(src.Int)
+
+ // Normalize absInt and exp to where exp is always a multiple of 4. This makes
+ // converting to 16-bit base 10,000 digits easier.
+ var exp int32
+ switch src.Exp % 4 {
+ case 1, -3:
+ exp = src.Exp - 1
+ absInt.Mul(absInt, big10)
+ case 2, -2:
+ exp = src.Exp - 2
+ absInt.Mul(absInt, big100)
+ case 3, -1:
+ exp = src.Exp - 3
+ absInt.Mul(absInt, big1000)
+ default:
+ exp = src.Exp
+ }
+
+ if exp < 0 {
+ divisor := &big.Int{}
+ divisor.Exp(big10, big.NewInt(int64(-exp)), nil)
+ wholePart.DivMod(absInt, divisor, fracPart)
+ fracPart.Add(fracPart, divisor)
+ } else {
+ wholePart = absInt
+ }
+
+ var wholeDigits, fracDigits []int16
+
+ for wholePart.Cmp(big0) != 0 {
+ wholePart.DivMod(wholePart, bigNBase, remainder)
+ wholeDigits = append(wholeDigits, int16(remainder.Int64()))
+ }
+
+ if fracPart.Cmp(big0) != 0 {
+ for fracPart.Cmp(big1) != 0 {
+ fracPart.DivMod(fracPart, bigNBase, remainder)
+ fracDigits = append(fracDigits, int16(remainder.Int64()))
+ }
+ }
+
+ buf = pgio.AppendInt16(buf, int16(len(wholeDigits)+len(fracDigits)))
+
+ var weight int16
+ if len(wholeDigits) > 0 {
+ weight = int16(len(wholeDigits) - 1)
+ if exp > 0 {
+ weight += int16(exp / 4)
+ }
+ } else {
+ weight = int16(exp/4) - 1 + int16(len(fracDigits))
+ }
+ buf = pgio.AppendInt16(buf, weight)
+
+ buf = pgio.AppendInt16(buf, sign)
+
+ var dscale int16
+ if src.Exp < 0 {
+ dscale = int16(-src.Exp)
+ }
+ buf = pgio.AppendInt16(buf, dscale)
+
+ for i := len(wholeDigits) - 1; i >= 0; i-- {
+ buf = pgio.AppendInt16(buf, wholeDigits[i])
+ }
+
+ for i := len(fracDigits) - 1; i >= 0; i-- {
+ buf = pgio.AppendInt16(buf, fracDigits[i])
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Numeric) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Numeric{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case float64:
+ // TODO
+ // *dst = Numeric{Float: src, Status: Present}
+ return nil
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Numeric) Value() (driver.Value, error) {
+ switch src.Status {
+ case Present:
+ buf, err := src.EncodeText(nil, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return string(buf), nil
+ case Null:
+ return nil, nil
+ default:
+ return nil, errUndefined
+ }
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/numeric_array.go b/vendor/github.com/jackc/pgx/pgtype/numeric_array.go
new file mode 100644
index 0000000..d991234
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/numeric_array.go
@@ -0,0 +1,328 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type NumericArray struct {
+ Elements []Numeric
+ Dimensions []ArrayDimension
+ Status Status
+}
+
+func (dst *NumericArray) Set(src interface{}) error {
+ // untyped nil and typed nil interfaces are different
+ if src == nil {
+ *dst = NumericArray{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+
+ case []float32:
+ if value == nil {
+ *dst = NumericArray{Status: Null}
+ } else if len(value) == 0 {
+ *dst = NumericArray{Status: Present}
+ } else {
+ elements := make([]Numeric, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = NumericArray{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ case []float64:
+ if value == nil {
+ *dst = NumericArray{Status: Null}
+ } else if len(value) == 0 {
+ *dst = NumericArray{Status: Present}
+ } else {
+ elements := make([]Numeric, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = NumericArray{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ default:
+ if originalSrc, ok := underlyingSliceType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to NumericArray", value)
+ }
+
+ return nil
+}
+
+func (dst *NumericArray) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *NumericArray) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+
+ case *[]float32:
+ *v = make([]float32, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ case *[]float64:
+ *v = make([]float64, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *NumericArray) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = NumericArray{Status: Null}
+ return nil
+ }
+
+ uta, err := ParseUntypedTextArray(string(src))
+ if err != nil {
+ return err
+ }
+
+ var elements []Numeric
+
+ if len(uta.Elements) > 0 {
+ elements = make([]Numeric, len(uta.Elements))
+
+ for i, s := range uta.Elements {
+ var elem Numeric
+ var elemSrc []byte
+ if s != "NULL" {
+ elemSrc = []byte(s)
+ }
+ err = elem.DecodeText(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+
+ elements[i] = elem
+ }
+ }
+
+ *dst = NumericArray{Elements: elements, Dimensions: uta.Dimensions, Status: Present}
+
+ return nil
+}
+
+func (dst *NumericArray) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = NumericArray{Status: Null}
+ return nil
+ }
+
+ var arrayHeader ArrayHeader
+ rp, err := arrayHeader.DecodeBinary(ci, src)
+ if err != nil {
+ return err
+ }
+
+ if len(arrayHeader.Dimensions) == 0 {
+ *dst = NumericArray{Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+ }
+
+ elementCount := arrayHeader.Dimensions[0].Length
+ for _, d := range arrayHeader.Dimensions[1:] {
+ elementCount *= d.Length
+ }
+
+ elements := make([]Numeric, elementCount)
+
+ for i := range elements {
+ elemLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+ var elemSrc []byte
+ if elemLen >= 0 {
+ elemSrc = src[rp : rp+elemLen]
+ rp += elemLen
+ }
+ err = elements[i].DecodeBinary(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+ }
+
+ *dst = NumericArray{Elements: elements, Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+}
+
+func (src *NumericArray) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ if len(src.Dimensions) == 0 {
+ return append(buf, '{', '}'), nil
+ }
+
+ buf = EncodeTextArrayDimensions(buf, src.Dimensions)
+
+ // dimElemCounts is the multiples of elements that each array lies on. For
+ // example, a single dimension array of length 4 would have a dimElemCounts of
+ // [4]. A multi-dimensional array of lengths [3,5,2] would have a
+ // dimElemCounts of [30,10,2]. This is used to simplify when to render a '{'
+ // or '}'.
+ dimElemCounts := make([]int, len(src.Dimensions))
+ dimElemCounts[len(src.Dimensions)-1] = int(src.Dimensions[len(src.Dimensions)-1].Length)
+ for i := len(src.Dimensions) - 2; i > -1; i-- {
+ dimElemCounts[i] = int(src.Dimensions[i].Length) * dimElemCounts[i+1]
+ }
+
+ inElemBuf := make([]byte, 0, 32)
+ for i, elem := range src.Elements {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+
+ for _, dec := range dimElemCounts {
+ if i%dec == 0 {
+ buf = append(buf, '{')
+ }
+ }
+
+ elemBuf, err := elem.EncodeText(ci, inElemBuf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf == nil {
+ buf = append(buf, `NULL`...)
+ } else {
+ buf = append(buf, QuoteArrayElementIfNeeded(string(elemBuf))...)
+ }
+
+ for _, dec := range dimElemCounts {
+ if (i+1)%dec == 0 {
+ buf = append(buf, '}')
+ }
+ }
+ }
+
+ return buf, nil
+}
+
+func (src *NumericArray) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ arrayHeader := ArrayHeader{
+ Dimensions: src.Dimensions,
+ }
+
+ if dt, ok := ci.DataTypeForName("numeric"); ok {
+ arrayHeader.ElementOID = int32(dt.OID)
+ } else {
+ return nil, errors.Errorf("unable to find oid for type name %v", "numeric")
+ }
+
+ for i := range src.Elements {
+ if src.Elements[i].Status == Null {
+ arrayHeader.ContainsNull = true
+ break
+ }
+ }
+
+ buf = arrayHeader.EncodeBinary(ci, buf)
+
+ for i := range src.Elements {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ elemBuf, err := src.Elements[i].EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf != nil {
+ buf = elemBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *NumericArray) Scan(src interface{}) error {
+ if src == nil {
+ return dst.DecodeText(nil, nil)
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *NumericArray) Value() (driver.Value, error) {
+ buf, err := src.EncodeText(nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, nil
+ }
+
+ return string(buf), nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/numrange.go b/vendor/github.com/jackc/pgx/pgtype/numrange.go
new file mode 100644
index 0000000..aaed62c
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/numrange.go
@@ -0,0 +1,250 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type Numrange struct {
+ Lower Numeric
+ Upper Numeric
+ LowerType BoundType
+ UpperType BoundType
+ Status Status
+}
+
+func (dst *Numrange) Set(src interface{}) error {
+ return errors.Errorf("cannot convert %v to Numrange", src)
+}
+
+func (dst *Numrange) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Numrange) AssignTo(dst interface{}) error {
+ return errors.Errorf("cannot assign %v to %T", src, dst)
+}
+
+func (dst *Numrange) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Numrange{Status: Null}
+ return nil
+ }
+
+ utr, err := ParseUntypedTextRange(string(src))
+ if err != nil {
+ return err
+ }
+
+ *dst = Numrange{Status: Present}
+
+ dst.LowerType = utr.LowerType
+ dst.UpperType = utr.UpperType
+
+ if dst.LowerType == Empty {
+ return nil
+ }
+
+ if dst.LowerType == Inclusive || dst.LowerType == Exclusive {
+ if err := dst.Lower.DecodeText(ci, []byte(utr.Lower)); err != nil {
+ return err
+ }
+ }
+
+ if dst.UpperType == Inclusive || dst.UpperType == Exclusive {
+ if err := dst.Upper.DecodeText(ci, []byte(utr.Upper)); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (dst *Numrange) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Numrange{Status: Null}
+ return nil
+ }
+
+ ubr, err := ParseUntypedBinaryRange(src)
+ if err != nil {
+ return err
+ }
+
+ *dst = Numrange{Status: Present}
+
+ dst.LowerType = ubr.LowerType
+ dst.UpperType = ubr.UpperType
+
+ if dst.LowerType == Empty {
+ return nil
+ }
+
+ if dst.LowerType == Inclusive || dst.LowerType == Exclusive {
+ if err := dst.Lower.DecodeBinary(ci, ubr.Lower); err != nil {
+ return err
+ }
+ }
+
+ if dst.UpperType == Inclusive || dst.UpperType == Exclusive {
+ if err := dst.Upper.DecodeBinary(ci, ubr.Upper); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (src Numrange) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ switch src.LowerType {
+ case Exclusive, Unbounded:
+ buf = append(buf, '(')
+ case Inclusive:
+ buf = append(buf, '[')
+ case Empty:
+ return append(buf, "empty"...), nil
+ default:
+ return nil, errors.Errorf("unknown lower bound type %v", src.LowerType)
+ }
+
+ var err error
+
+ if src.LowerType != Unbounded {
+ buf, err = src.Lower.EncodeText(ci, buf)
+ if err != nil {
+ return nil, err
+ } else if buf == nil {
+ return nil, errors.Errorf("Lower cannot be null unless LowerType is Unbounded")
+ }
+ }
+
+ buf = append(buf, ',')
+
+ if src.UpperType != Unbounded {
+ buf, err = src.Upper.EncodeText(ci, buf)
+ if err != nil {
+ return nil, err
+ } else if buf == nil {
+ return nil, errors.Errorf("Upper cannot be null unless UpperType is Unbounded")
+ }
+ }
+
+ switch src.UpperType {
+ case Exclusive, Unbounded:
+ buf = append(buf, ')')
+ case Inclusive:
+ buf = append(buf, ']')
+ default:
+ return nil, errors.Errorf("unknown upper bound type %v", src.UpperType)
+ }
+
+ return buf, nil
+}
+
+func (src Numrange) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ var rangeType byte
+ switch src.LowerType {
+ case Inclusive:
+ rangeType |= lowerInclusiveMask
+ case Unbounded:
+ rangeType |= lowerUnboundedMask
+ case Exclusive:
+ case Empty:
+ return append(buf, emptyMask), nil
+ default:
+ return nil, errors.Errorf("unknown LowerType: %v", src.LowerType)
+ }
+
+ switch src.UpperType {
+ case Inclusive:
+ rangeType |= upperInclusiveMask
+ case Unbounded:
+ rangeType |= upperUnboundedMask
+ case Exclusive:
+ default:
+ return nil, errors.Errorf("unknown UpperType: %v", src.UpperType)
+ }
+
+ buf = append(buf, rangeType)
+
+ var err error
+
+ if src.LowerType != Unbounded {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ buf, err = src.Lower.EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, errors.Errorf("Lower cannot be null unless LowerType is Unbounded")
+ }
+
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+
+ if src.UpperType != Unbounded {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ buf, err = src.Upper.EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, errors.Errorf("Upper cannot be null unless UpperType is Unbounded")
+ }
+
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Numrange) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Numrange{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src Numrange) Value() (driver.Value, error) {
+ return EncodeValueText(src)
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/oid.go b/vendor/github.com/jackc/pgx/pgtype/oid.go
new file mode 100644
index 0000000..59370d6
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/oid.go
@@ -0,0 +1,81 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "strconv"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+// OID (Object Identifier Type) is, according to
+// https://www.postgresql.org/docs/current/static/datatype-oid.html, used
+// internally by PostgreSQL as a primary key for various system tables. It is
+// currently implemented as an unsigned four-byte integer. Its definition can be
+// found in src/include/postgres_ext.h in the PostgreSQL sources. Because it is
+// so frequently required to be in a NOT NULL condition OID cannot be NULL. To
+// allow for NULL OIDs use OIDValue.
+type OID uint32
+
+func (dst *OID) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ return errors.Errorf("cannot decode nil into OID")
+ }
+
+ n, err := strconv.ParseUint(string(src), 10, 32)
+ if err != nil {
+ return err
+ }
+
+ *dst = OID(n)
+ return nil
+}
+
+func (dst *OID) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ return errors.Errorf("cannot decode nil into OID")
+ }
+
+ if len(src) != 4 {
+ return errors.Errorf("invalid length: %v", len(src))
+ }
+
+ n := binary.BigEndian.Uint32(src)
+ *dst = OID(n)
+ return nil
+}
+
+func (src OID) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ return append(buf, strconv.FormatUint(uint64(src), 10)...), nil
+}
+
+func (src OID) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ return pgio.AppendUint32(buf, uint32(src)), nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *OID) Scan(src interface{}) error {
+ if src == nil {
+ return errors.Errorf("cannot scan NULL into %T", src)
+ }
+
+ switch src := src.(type) {
+ case int64:
+ *dst = OID(src)
+ return nil
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src OID) Value() (driver.Value, error) {
+ return int64(src), nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/oid_value.go b/vendor/github.com/jackc/pgx/pgtype/oid_value.go
new file mode 100644
index 0000000..7eae4bf
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/oid_value.go
@@ -0,0 +1,55 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+)
+
+// OIDValue (Object Identifier Type) is, according to
+// https://www.postgresql.org/docs/current/static/datatype-OIDValue.html, used
+// internally by PostgreSQL as a primary key for various system tables. It is
+// currently implemented as an unsigned four-byte integer. Its definition can be
+// found in src/include/postgres_ext.h in the PostgreSQL sources.
+type OIDValue pguint32
+
+// Set converts from src to dst. Note that as OIDValue is not a general
+// number type Set does not do automatic type conversion as other number
+// types do.
+func (dst *OIDValue) Set(src interface{}) error {
+ return (*pguint32)(dst).Set(src)
+}
+
+func (dst *OIDValue) Get() interface{} {
+ return (*pguint32)(dst).Get()
+}
+
+// AssignTo assigns from src to dst. Note that as OIDValue is not a general number
+// type AssignTo does not do automatic type conversion as other number types do.
+func (src *OIDValue) AssignTo(dst interface{}) error {
+ return (*pguint32)(src).AssignTo(dst)
+}
+
+func (dst *OIDValue) DecodeText(ci *ConnInfo, src []byte) error {
+ return (*pguint32)(dst).DecodeText(ci, src)
+}
+
+func (dst *OIDValue) DecodeBinary(ci *ConnInfo, src []byte) error {
+ return (*pguint32)(dst).DecodeBinary(ci, src)
+}
+
+func (src *OIDValue) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ return (*pguint32)(src).EncodeText(ci, buf)
+}
+
+func (src *OIDValue) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ return (*pguint32)(src).EncodeBinary(ci, buf)
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *OIDValue) Scan(src interface{}) error {
+ return (*pguint32)(dst).Scan(src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *OIDValue) Value() (driver.Value, error) {
+ return (*pguint32)(src).Value()
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/path.go b/vendor/github.com/jackc/pgx/pgtype/path.go
new file mode 100644
index 0000000..aa0cee8
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/path.go
@@ -0,0 +1,193 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type Path struct {
+ P []Vec2
+ Closed bool
+ Status Status
+}
+
+func (dst *Path) Set(src interface{}) error {
+ return errors.Errorf("cannot convert %v to Path", src)
+}
+
+func (dst *Path) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Path) AssignTo(dst interface{}) error {
+ return errors.Errorf("cannot assign %v to %T", src, dst)
+}
+
+func (dst *Path) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Path{Status: Null}
+ return nil
+ }
+
+ if len(src) < 7 {
+ return errors.Errorf("invalid length for Path: %v", len(src))
+ }
+
+ closed := src[0] == '('
+ points := make([]Vec2, 0)
+
+ str := string(src[2:])
+
+ for {
+ end := strings.IndexByte(str, ',')
+ x, err := strconv.ParseFloat(str[:end], 64)
+ if err != nil {
+ return err
+ }
+
+ str = str[end+1:]
+ end = strings.IndexByte(str, ')')
+
+ y, err := strconv.ParseFloat(str[:end], 64)
+ if err != nil {
+ return err
+ }
+
+ points = append(points, Vec2{x, y})
+
+ if end+3 < len(str) {
+ str = str[end+3:]
+ } else {
+ break
+ }
+ }
+
+ *dst = Path{P: points, Closed: closed, Status: Present}
+ return nil
+}
+
+func (dst *Path) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Path{Status: Null}
+ return nil
+ }
+
+ if len(src) < 5 {
+ return errors.Errorf("invalid length for Path: %v", len(src))
+ }
+
+ closed := src[0] == 1
+ pointCount := int(binary.BigEndian.Uint32(src[1:]))
+
+ rp := 5
+
+ if 5+pointCount*16 != len(src) {
+ return errors.Errorf("invalid length for Path with %d points: %v", pointCount, len(src))
+ }
+
+ points := make([]Vec2, pointCount)
+ for i := 0; i < len(points); i++ {
+ x := binary.BigEndian.Uint64(src[rp:])
+ rp += 8
+ y := binary.BigEndian.Uint64(src[rp:])
+ rp += 8
+ points[i] = Vec2{math.Float64frombits(x), math.Float64frombits(y)}
+ }
+
+ *dst = Path{
+ P: points,
+ Closed: closed,
+ Status: Present,
+ }
+ return nil
+}
+
+func (src *Path) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ var startByte, endByte byte
+ if src.Closed {
+ startByte = '('
+ endByte = ')'
+ } else {
+ startByte = '['
+ endByte = ']'
+ }
+ buf = append(buf, startByte)
+
+ for i, p := range src.P {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+ buf = append(buf, fmt.Sprintf(`(%f,%f)`, p.X, p.Y)...)
+ }
+
+ return append(buf, endByte), nil
+}
+
+func (src *Path) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ var closeByte byte
+ if src.Closed {
+ closeByte = 1
+ }
+ buf = append(buf, closeByte)
+
+ buf = pgio.AppendInt32(buf, int32(len(src.P)))
+
+ for _, p := range src.P {
+ buf = pgio.AppendUint64(buf, math.Float64bits(p.X))
+ buf = pgio.AppendUint64(buf, math.Float64bits(p.Y))
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Path) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Path{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Path) Value() (driver.Value, error) {
+ return EncodeValueText(src)
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/pgtype.go b/vendor/github.com/jackc/pgx/pgtype/pgtype.go
new file mode 100644
index 0000000..2643314
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/pgtype.go
@@ -0,0 +1,280 @@
+package pgtype
+
+import (
+ "reflect"
+
+ "github.com/pkg/errors"
+)
+
+// PostgreSQL oids for common types
+const (
+ BoolOID = 16
+ ByteaOID = 17
+ CharOID = 18
+ NameOID = 19
+ Int8OID = 20
+ Int2OID = 21
+ Int4OID = 23
+ TextOID = 25
+ OIDOID = 26
+ TIDOID = 27
+ XIDOID = 28
+ CIDOID = 29
+ JSONOID = 114
+ CIDROID = 650
+ CIDRArrayOID = 651
+ Float4OID = 700
+ Float8OID = 701
+ UnknownOID = 705
+ InetOID = 869
+ BoolArrayOID = 1000
+ Int2ArrayOID = 1005
+ Int4ArrayOID = 1007
+ TextArrayOID = 1009
+ ByteaArrayOID = 1001
+ BPCharArrayOID = 1014
+ VarcharArrayOID = 1015
+ Int8ArrayOID = 1016
+ Float4ArrayOID = 1021
+ Float8ArrayOID = 1022
+ ACLItemOID = 1033
+ ACLItemArrayOID = 1034
+ InetArrayOID = 1041
+ BPCharOID = 1042
+ VarcharOID = 1043
+ DateOID = 1082
+ TimestampOID = 1114
+ TimestampArrayOID = 1115
+ DateArrayOID = 1182
+ TimestamptzOID = 1184
+ TimestamptzArrayOID = 1185
+ NumericOID = 1700
+ RecordOID = 2249
+ UUIDOID = 2950
+ UUIDArrayOID = 2951
+ JSONBOID = 3802
+)
+
+type Status byte
+
+const (
+ Undefined Status = iota
+ Null
+ Present
+)
+
+type InfinityModifier int8
+
+const (
+ Infinity InfinityModifier = 1
+ None InfinityModifier = 0
+ NegativeInfinity InfinityModifier = -Infinity
+)
+
+func (im InfinityModifier) String() string {
+ switch im {
+ case None:
+ return "none"
+ case Infinity:
+ return "infinity"
+ case NegativeInfinity:
+ return "-infinity"
+ default:
+ return "invalid"
+ }
+}
+
+type Value interface {
+ // Set converts and assigns src to itself.
+ Set(src interface{}) error
+
+ // Get returns the simplest representation of Value. If the Value is Null or
+ // Undefined that is the return value. If no simpler representation is
+ // possible, then Get() returns Value.
+ Get() interface{}
+
+ // AssignTo converts and assigns the Value to dst. It MUST make a deep copy of
+ // any reference types.
+ AssignTo(dst interface{}) error
+}
+
+type BinaryDecoder interface {
+ // DecodeBinary decodes src into BinaryDecoder. If src is nil then the
+ // original SQL value is NULL. BinaryDecoder takes ownership of src. The
+ // caller MUST not use it again.
+ DecodeBinary(ci *ConnInfo, src []byte) error
+}
+
+type TextDecoder interface {
+ // DecodeText decodes src into TextDecoder. If src is nil then the original
+ // SQL value is NULL. TextDecoder takes ownership of src. The caller MUST not
+ // use it again.
+ DecodeText(ci *ConnInfo, src []byte) error
+}
+
+// BinaryEncoder is implemented by types that can encode themselves into the
+// PostgreSQL binary wire format.
+type BinaryEncoder interface {
+ // EncodeBinary should append the binary format of self to buf. If self is the
+ // SQL value NULL then append nothing and return (nil, nil). The caller of
+ // EncodeBinary is responsible for writing the correct NULL value or the
+ // length of the data written.
+ EncodeBinary(ci *ConnInfo, buf []byte) (newBuf []byte, err error)
+}
+
+// TextEncoder is implemented by types that can encode themselves into the
+// PostgreSQL text wire format.
+type TextEncoder interface {
+ // EncodeText should append the text format of self to buf. If self is the
+ // SQL value NULL then append nothing and return (nil, nil). The caller of
+ // EncodeText is responsible for writing the correct NULL value or the
+ // length of the data written.
+ EncodeText(ci *ConnInfo, buf []byte) (newBuf []byte, err error)
+}
+
+var errUndefined = errors.New("cannot encode status undefined")
+var errBadStatus = errors.New("invalid status")
+
+type DataType struct {
+ Value Value
+ Name string
+ OID OID
+}
+
+type ConnInfo struct {
+ oidToDataType map[OID]*DataType
+ nameToDataType map[string]*DataType
+ reflectTypeToDataType map[reflect.Type]*DataType
+}
+
+func NewConnInfo() *ConnInfo {
+ return &ConnInfo{
+ oidToDataType: make(map[OID]*DataType, 256),
+ nameToDataType: make(map[string]*DataType, 256),
+ reflectTypeToDataType: make(map[reflect.Type]*DataType, 256),
+ }
+}
+
+func (ci *ConnInfo) InitializeDataTypes(nameOIDs map[string]OID) {
+ for name, oid := range nameOIDs {
+ var value Value
+ if t, ok := nameValues[name]; ok {
+ value = reflect.New(reflect.ValueOf(t).Elem().Type()).Interface().(Value)
+ } else {
+ value = &GenericText{}
+ }
+ ci.RegisterDataType(DataType{Value: value, Name: name, OID: oid})
+ }
+}
+
+func (ci *ConnInfo) RegisterDataType(t DataType) {
+ ci.oidToDataType[t.OID] = &t
+ ci.nameToDataType[t.Name] = &t
+ ci.reflectTypeToDataType[reflect.ValueOf(t.Value).Type()] = &t
+}
+
+func (ci *ConnInfo) DataTypeForOID(oid OID) (*DataType, bool) {
+ dt, ok := ci.oidToDataType[oid]
+ return dt, ok
+}
+
+func (ci *ConnInfo) DataTypeForName(name string) (*DataType, bool) {
+ dt, ok := ci.nameToDataType[name]
+ return dt, ok
+}
+
+func (ci *ConnInfo) DataTypeForValue(v Value) (*DataType, bool) {
+ dt, ok := ci.reflectTypeToDataType[reflect.ValueOf(v).Type()]
+ return dt, ok
+}
+
+// DeepCopy makes a deep copy of the ConnInfo.
+func (ci *ConnInfo) DeepCopy() *ConnInfo {
+ ci2 := &ConnInfo{
+ oidToDataType: make(map[OID]*DataType, len(ci.oidToDataType)),
+ nameToDataType: make(map[string]*DataType, len(ci.nameToDataType)),
+ reflectTypeToDataType: make(map[reflect.Type]*DataType, len(ci.reflectTypeToDataType)),
+ }
+
+ for _, dt := range ci.oidToDataType {
+ ci2.RegisterDataType(DataType{
+ Value: reflect.New(reflect.ValueOf(dt.Value).Elem().Type()).Interface().(Value),
+ Name: dt.Name,
+ OID: dt.OID,
+ })
+ }
+
+ return ci2
+}
+
+var nameValues map[string]Value
+
+func init() {
+ nameValues = map[string]Value{
+ "_aclitem": &ACLItemArray{},
+ "_bool": &BoolArray{},
+ "_bpchar": &BPCharArray{},
+ "_bytea": &ByteaArray{},
+ "_cidr": &CIDRArray{},
+ "_date": &DateArray{},
+ "_float4": &Float4Array{},
+ "_float8": &Float8Array{},
+ "_inet": &InetArray{},
+ "_int2": &Int2Array{},
+ "_int4": &Int4Array{},
+ "_int8": &Int8Array{},
+ "_numeric": &NumericArray{},
+ "_text": &TextArray{},
+ "_timestamp": &TimestampArray{},
+ "_timestamptz": &TimestamptzArray{},
+ "_uuid": &UUIDArray{},
+ "_varchar": &VarcharArray{},
+ "aclitem": &ACLItem{},
+ "bit": &Bit{},
+ "bool": &Bool{},
+ "box": &Box{},
+ "bpchar": &BPChar{},
+ "bytea": &Bytea{},
+ "char": &QChar{},
+ "cid": &CID{},
+ "cidr": &CIDR{},
+ "circle": &Circle{},
+ "date": &Date{},
+ "daterange": &Daterange{},
+ "decimal": &Decimal{},
+ "float4": &Float4{},
+ "float8": &Float8{},
+ "hstore": &Hstore{},
+ "inet": &Inet{},
+ "int2": &Int2{},
+ "int4": &Int4{},
+ "int4range": &Int4range{},
+ "int8": &Int8{},
+ "int8range": &Int8range{},
+ "interval": &Interval{},
+ "json": &JSON{},
+ "jsonb": &JSONB{},
+ "line": &Line{},
+ "lseg": &Lseg{},
+ "macaddr": &Macaddr{},
+ "name": &Name{},
+ "numeric": &Numeric{},
+ "numrange": &Numrange{},
+ "oid": &OIDValue{},
+ "path": &Path{},
+ "point": &Point{},
+ "polygon": &Polygon{},
+ "record": &Record{},
+ "text": &Text{},
+ "tid": &TID{},
+ "timestamp": &Timestamp{},
+ "timestamptz": &Timestamptz{},
+ "tsrange": &Tsrange{},
+ "tstzrange": &Tstzrange{},
+ "unknown": &Unknown{},
+ "uuid": &UUID{},
+ "varbit": &Varbit{},
+ "varchar": &Varchar{},
+ "xid": &XID{},
+ }
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/pguint32.go b/vendor/github.com/jackc/pgx/pgtype/pguint32.go
new file mode 100644
index 0000000..e441a69
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/pguint32.go
@@ -0,0 +1,162 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "math"
+ "strconv"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+// pguint32 is the core type that is used to implement PostgreSQL types such as
+// CID and XID.
+type pguint32 struct {
+ Uint uint32
+ Status Status
+}
+
+// Set converts from src to dst. Note that as pguint32 is not a general
+// number type Set does not do automatic type conversion as other number
+// types do.
+func (dst *pguint32) Set(src interface{}) error {
+ switch value := src.(type) {
+ case int64:
+ if value < 0 {
+ return errors.Errorf("%d is less than minimum value for pguint32", value)
+ }
+ if value > math.MaxUint32 {
+ return errors.Errorf("%d is greater than maximum value for pguint32", value)
+ }
+ *dst = pguint32{Uint: uint32(value), Status: Present}
+ case uint32:
+ *dst = pguint32{Uint: value, Status: Present}
+ default:
+ return errors.Errorf("cannot convert %v to pguint32", value)
+ }
+
+ return nil
+}
+
+func (dst *pguint32) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst.Uint
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+// AssignTo assigns from src to dst. Note that as pguint32 is not a general number
+// type AssignTo does not do automatic type conversion as other number types do.
+func (src *pguint32) AssignTo(dst interface{}) error {
+ switch v := dst.(type) {
+ case *uint32:
+ if src.Status == Present {
+ *v = src.Uint
+ } else {
+ return errors.Errorf("cannot assign %v into %T", src, dst)
+ }
+ case **uint32:
+ if src.Status == Present {
+ n := src.Uint
+ *v = &n
+ } else {
+ *v = nil
+ }
+ }
+
+ return nil
+}
+
+func (dst *pguint32) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = pguint32{Status: Null}
+ return nil
+ }
+
+ n, err := strconv.ParseUint(string(src), 10, 32)
+ if err != nil {
+ return err
+ }
+
+ *dst = pguint32{Uint: uint32(n), Status: Present}
+ return nil
+}
+
+func (dst *pguint32) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = pguint32{Status: Null}
+ return nil
+ }
+
+ if len(src) != 4 {
+ return errors.Errorf("invalid length: %v", len(src))
+ }
+
+ n := binary.BigEndian.Uint32(src)
+ *dst = pguint32{Uint: n, Status: Present}
+ return nil
+}
+
+func (src *pguint32) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ return append(buf, strconv.FormatUint(uint64(src.Uint), 10)...), nil
+}
+
+func (src *pguint32) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ return pgio.AppendUint32(buf, src.Uint), nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *pguint32) Scan(src interface{}) error {
+ if src == nil {
+ *dst = pguint32{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case uint32:
+ *dst = pguint32{Uint: src, Status: Present}
+ return nil
+ case int64:
+ *dst = pguint32{Uint: uint32(src), Status: Present}
+ return nil
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *pguint32) Value() (driver.Value, error) {
+ switch src.Status {
+ case Present:
+ return int64(src.Uint), nil
+ case Null:
+ return nil, nil
+ default:
+ return nil, errUndefined
+ }
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/point.go b/vendor/github.com/jackc/pgx/pgtype/point.go
new file mode 100644
index 0000000..3132a93
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/point.go
@@ -0,0 +1,139 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type Vec2 struct {
+ X float64
+ Y float64
+}
+
+type Point struct {
+ P Vec2
+ Status Status
+}
+
+func (dst *Point) Set(src interface{}) error {
+ return errors.Errorf("cannot convert %v to Point", src)
+}
+
+func (dst *Point) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Point) AssignTo(dst interface{}) error {
+ return errors.Errorf("cannot assign %v to %T", src, dst)
+}
+
+func (dst *Point) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Point{Status: Null}
+ return nil
+ }
+
+ if len(src) < 5 {
+ return errors.Errorf("invalid length for point: %v", len(src))
+ }
+
+ parts := strings.SplitN(string(src[1:len(src)-1]), ",", 2)
+ if len(parts) < 2 {
+ return errors.Errorf("invalid format for point")
+ }
+
+ x, err := strconv.ParseFloat(parts[0], 64)
+ if err != nil {
+ return err
+ }
+
+ y, err := strconv.ParseFloat(parts[1], 64)
+ if err != nil {
+ return err
+ }
+
+ *dst = Point{P: Vec2{x, y}, Status: Present}
+ return nil
+}
+
+func (dst *Point) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Point{Status: Null}
+ return nil
+ }
+
+ if len(src) != 16 {
+ return errors.Errorf("invalid length for point: %v", len(src))
+ }
+
+ x := binary.BigEndian.Uint64(src)
+ y := binary.BigEndian.Uint64(src[8:])
+
+ *dst = Point{
+ P: Vec2{math.Float64frombits(x), math.Float64frombits(y)},
+ Status: Present,
+ }
+ return nil
+}
+
+func (src *Point) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ return append(buf, fmt.Sprintf(`(%f,%f)`, src.P.X, src.P.Y)...), nil
+}
+
+func (src *Point) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ buf = pgio.AppendUint64(buf, math.Float64bits(src.P.X))
+ buf = pgio.AppendUint64(buf, math.Float64bits(src.P.Y))
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Point) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Point{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Point) Value() (driver.Value, error) {
+ return EncodeValueText(src)
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/polygon.go b/vendor/github.com/jackc/pgx/pgtype/polygon.go
new file mode 100644
index 0000000..3f3d9f5
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/polygon.go
@@ -0,0 +1,174 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type Polygon struct {
+ P []Vec2
+ Status Status
+}
+
+func (dst *Polygon) Set(src interface{}) error {
+ return errors.Errorf("cannot convert %v to Polygon", src)
+}
+
+func (dst *Polygon) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Polygon) AssignTo(dst interface{}) error {
+ return errors.Errorf("cannot assign %v to %T", src, dst)
+}
+
+func (dst *Polygon) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Polygon{Status: Null}
+ return nil
+ }
+
+ if len(src) < 7 {
+ return errors.Errorf("invalid length for Polygon: %v", len(src))
+ }
+
+ points := make([]Vec2, 0)
+
+ str := string(src[2:])
+
+ for {
+ end := strings.IndexByte(str, ',')
+ x, err := strconv.ParseFloat(str[:end], 64)
+ if err != nil {
+ return err
+ }
+
+ str = str[end+1:]
+ end = strings.IndexByte(str, ')')
+
+ y, err := strconv.ParseFloat(str[:end], 64)
+ if err != nil {
+ return err
+ }
+
+ points = append(points, Vec2{x, y})
+
+ if end+3 < len(str) {
+ str = str[end+3:]
+ } else {
+ break
+ }
+ }
+
+ *dst = Polygon{P: points, Status: Present}
+ return nil
+}
+
+func (dst *Polygon) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Polygon{Status: Null}
+ return nil
+ }
+
+ if len(src) < 5 {
+ return errors.Errorf("invalid length for Polygon: %v", len(src))
+ }
+
+ pointCount := int(binary.BigEndian.Uint32(src))
+ rp := 4
+
+ if 4+pointCount*16 != len(src) {
+ return errors.Errorf("invalid length for Polygon with %d points: %v", pointCount, len(src))
+ }
+
+ points := make([]Vec2, pointCount)
+ for i := 0; i < len(points); i++ {
+ x := binary.BigEndian.Uint64(src[rp:])
+ rp += 8
+ y := binary.BigEndian.Uint64(src[rp:])
+ rp += 8
+ points[i] = Vec2{math.Float64frombits(x), math.Float64frombits(y)}
+ }
+
+ *dst = Polygon{
+ P: points,
+ Status: Present,
+ }
+ return nil
+}
+
+func (src *Polygon) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ buf = append(buf, '(')
+
+ for i, p := range src.P {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+ buf = append(buf, fmt.Sprintf(`(%f,%f)`, p.X, p.Y)...)
+ }
+
+ return append(buf, ')'), nil
+}
+
+func (src *Polygon) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ buf = pgio.AppendInt32(buf, int32(len(src.P)))
+
+ for _, p := range src.P {
+ buf = pgio.AppendUint64(buf, math.Float64bits(p.X))
+ buf = pgio.AppendUint64(buf, math.Float64bits(p.Y))
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Polygon) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Polygon{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Polygon) Value() (driver.Value, error) {
+ return EncodeValueText(src)
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/qchar.go b/vendor/github.com/jackc/pgx/pgtype/qchar.go
new file mode 100644
index 0000000..064dab1
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/qchar.go
@@ -0,0 +1,146 @@
+package pgtype
+
+import (
+ "math"
+ "strconv"
+
+ "github.com/pkg/errors"
+)
+
+// QChar is for PostgreSQL's special 8-bit-only "char" type more akin to the C
+// language's char type, or Go's byte type. (Note that the name in PostgreSQL
+// itself is "char", in double-quotes, and not char.) It gets used a lot in
+// PostgreSQL's system tables to hold a single ASCII character value (eg
+// pg_class.relkind). It is named Qchar for quoted char to disambiguate from SQL
+// standard type char.
+//
+// Not all possible values of QChar are representable in the text format.
+// Therefore, QChar does not implement TextEncoder and TextDecoder. In
+// addition, database/sql Scanner and database/sql/driver Value are not
+// implemented.
+type QChar struct {
+ Int int8
+ Status Status
+}
+
+func (dst *QChar) Set(src interface{}) error {
+ if src == nil {
+ *dst = QChar{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+ case int8:
+ *dst = QChar{Int: value, Status: Present}
+ case uint8:
+ if value > math.MaxInt8 {
+ return errors.Errorf("%d is greater than maximum value for QChar", value)
+ }
+ *dst = QChar{Int: int8(value), Status: Present}
+ case int16:
+ if value < math.MinInt8 {
+ return errors.Errorf("%d is greater than maximum value for QChar", value)
+ }
+ if value > math.MaxInt8 {
+ return errors.Errorf("%d is greater than maximum value for QChar", value)
+ }
+ *dst = QChar{Int: int8(value), Status: Present}
+ case uint16:
+ if value > math.MaxInt8 {
+ return errors.Errorf("%d is greater than maximum value for QChar", value)
+ }
+ *dst = QChar{Int: int8(value), Status: Present}
+ case int32:
+ if value < math.MinInt8 {
+ return errors.Errorf("%d is greater than maximum value for QChar", value)
+ }
+ if value > math.MaxInt8 {
+ return errors.Errorf("%d is greater than maximum value for QChar", value)
+ }
+ *dst = QChar{Int: int8(value), Status: Present}
+ case uint32:
+ if value > math.MaxInt8 {
+ return errors.Errorf("%d is greater than maximum value for QChar", value)
+ }
+ *dst = QChar{Int: int8(value), Status: Present}
+ case int64:
+ if value < math.MinInt8 {
+ return errors.Errorf("%d is greater than maximum value for QChar", value)
+ }
+ if value > math.MaxInt8 {
+ return errors.Errorf("%d is greater than maximum value for QChar", value)
+ }
+ *dst = QChar{Int: int8(value), Status: Present}
+ case uint64:
+ if value > math.MaxInt8 {
+ return errors.Errorf("%d is greater than maximum value for QChar", value)
+ }
+ *dst = QChar{Int: int8(value), Status: Present}
+ case int:
+ if value < math.MinInt8 {
+ return errors.Errorf("%d is greater than maximum value for QChar", value)
+ }
+ if value > math.MaxInt8 {
+ return errors.Errorf("%d is greater than maximum value for QChar", value)
+ }
+ *dst = QChar{Int: int8(value), Status: Present}
+ case uint:
+ if value > math.MaxInt8 {
+ return errors.Errorf("%d is greater than maximum value for QChar", value)
+ }
+ *dst = QChar{Int: int8(value), Status: Present}
+ case string:
+ num, err := strconv.ParseInt(value, 10, 8)
+ if err != nil {
+ return err
+ }
+ *dst = QChar{Int: int8(num), Status: Present}
+ default:
+ if originalSrc, ok := underlyingNumberType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to QChar", value)
+ }
+
+ return nil
+}
+
+func (dst *QChar) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst.Int
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *QChar) AssignTo(dst interface{}) error {
+ return int64AssignTo(int64(src.Int), src.Status, dst)
+}
+
+func (dst *QChar) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = QChar{Status: Null}
+ return nil
+ }
+
+ if len(src) != 1 {
+ return errors.Errorf(`invalid length for "char": %v`, len(src))
+ }
+
+ *dst = QChar{Int: int8(src[0]), Status: Present}
+ return nil
+}
+
+func (src *QChar) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ return append(buf, byte(src.Int)), nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/range.go b/vendor/github.com/jackc/pgx/pgtype/range.go
new file mode 100644
index 0000000..54fc6ca
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/range.go
@@ -0,0 +1,278 @@
+package pgtype
+
+import (
+ "bytes"
+ "encoding/binary"
+
+ "github.com/pkg/errors"
+)
+
+type BoundType byte
+
+const (
+ Inclusive = BoundType('i')
+ Exclusive = BoundType('e')
+ Unbounded = BoundType('U')
+ Empty = BoundType('E')
+)
+
+func (bt BoundType) String() string {
+ return string(bt)
+}
+
+type UntypedTextRange struct {
+ Lower string
+ Upper string
+ LowerType BoundType
+ UpperType BoundType
+}
+
+func ParseUntypedTextRange(src string) (*UntypedTextRange, error) {
+ utr := &UntypedTextRange{}
+ if src == "empty" {
+ utr.LowerType = Empty
+ utr.UpperType = Empty
+ return utr, nil
+ }
+
+ buf := bytes.NewBufferString(src)
+
+ skipWhitespace(buf)
+
+ r, _, err := buf.ReadRune()
+ if err != nil {
+ return nil, errors.Errorf("invalid lower bound: %v", err)
+ }
+ switch r {
+ case '(':
+ utr.LowerType = Exclusive
+ case '[':
+ utr.LowerType = Inclusive
+ default:
+ return nil, errors.Errorf("missing lower bound, instead got: %v", string(r))
+ }
+
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return nil, errors.Errorf("invalid lower value: %v", err)
+ }
+ buf.UnreadRune()
+
+ if r == ',' {
+ utr.LowerType = Unbounded
+ } else {
+ utr.Lower, err = rangeParseValue(buf)
+ if err != nil {
+ return nil, errors.Errorf("invalid lower value: %v", err)
+ }
+ }
+
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return nil, errors.Errorf("missing range separator: %v", err)
+ }
+ if r != ',' {
+ return nil, errors.Errorf("missing range separator: %v", r)
+ }
+
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return nil, errors.Errorf("invalid upper value: %v", err)
+ }
+
+ if r == ')' || r == ']' {
+ utr.UpperType = Unbounded
+ } else {
+ buf.UnreadRune()
+ utr.Upper, err = rangeParseValue(buf)
+ if err != nil {
+ return nil, errors.Errorf("invalid upper value: %v", err)
+ }
+
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return nil, errors.Errorf("missing upper bound: %v", err)
+ }
+ switch r {
+ case ')':
+ utr.UpperType = Exclusive
+ case ']':
+ utr.UpperType = Inclusive
+ default:
+ return nil, errors.Errorf("missing upper bound, instead got: %v", string(r))
+ }
+ }
+
+ skipWhitespace(buf)
+
+ if buf.Len() > 0 {
+ return nil, errors.Errorf("unexpected trailing data: %v", buf.String())
+ }
+
+ return utr, nil
+}
+
+func rangeParseValue(buf *bytes.Buffer) (string, error) {
+ r, _, err := buf.ReadRune()
+ if err != nil {
+ return "", err
+ }
+ if r == '"' {
+ return rangeParseQuotedValue(buf)
+ }
+ buf.UnreadRune()
+
+ s := &bytes.Buffer{}
+
+ for {
+ r, _, err := buf.ReadRune()
+ if err != nil {
+ return "", err
+ }
+
+ switch r {
+ case '\\':
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return "", err
+ }
+ case ',', '[', ']', '(', ')':
+ buf.UnreadRune()
+ return s.String(), nil
+ }
+
+ s.WriteRune(r)
+ }
+}
+
+func rangeParseQuotedValue(buf *bytes.Buffer) (string, error) {
+ s := &bytes.Buffer{}
+
+ for {
+ r, _, err := buf.ReadRune()
+ if err != nil {
+ return "", err
+ }
+
+ switch r {
+ case '\\':
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return "", err
+ }
+ case '"':
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return "", err
+ }
+ if r != '"' {
+ buf.UnreadRune()
+ return s.String(), nil
+ }
+ }
+ s.WriteRune(r)
+ }
+}
+
+type UntypedBinaryRange struct {
+ Lower []byte
+ Upper []byte
+ LowerType BoundType
+ UpperType BoundType
+}
+
+// 0 = () = 00000
+// 1 = empty = 00001
+// 2 = [) = 00010
+// 4 = (] = 00100
+// 6 = [] = 00110
+// 8 = ) = 01000
+// 12 = ] = 01100
+// 16 = ( = 10000
+// 18 = [ = 10010
+// 24 = = 11000
+
+const emptyMask = 1
+const lowerInclusiveMask = 2
+const upperInclusiveMask = 4
+const lowerUnboundedMask = 8
+const upperUnboundedMask = 16
+
+func ParseUntypedBinaryRange(src []byte) (*UntypedBinaryRange, error) {
+ ubr := &UntypedBinaryRange{}
+
+ if len(src) == 0 {
+ return nil, errors.Errorf("range too short: %v", len(src))
+ }
+
+ rangeType := src[0]
+ rp := 1
+
+ if rangeType&emptyMask > 0 {
+ if len(src[rp:]) > 0 {
+ return nil, errors.Errorf("unexpected trailing bytes parsing empty range: %v", len(src[rp:]))
+ }
+ ubr.LowerType = Empty
+ ubr.UpperType = Empty
+ return ubr, nil
+ }
+
+ if rangeType&lowerInclusiveMask > 0 {
+ ubr.LowerType = Inclusive
+ } else if rangeType&lowerUnboundedMask > 0 {
+ ubr.LowerType = Unbounded
+ } else {
+ ubr.LowerType = Exclusive
+ }
+
+ if rangeType&upperInclusiveMask > 0 {
+ ubr.UpperType = Inclusive
+ } else if rangeType&upperUnboundedMask > 0 {
+ ubr.UpperType = Unbounded
+ } else {
+ ubr.UpperType = Exclusive
+ }
+
+ if ubr.LowerType == Unbounded && ubr.UpperType == Unbounded {
+ if len(src[rp:]) > 0 {
+ return nil, errors.Errorf("unexpected trailing bytes parsing unbounded range: %v", len(src[rp:]))
+ }
+ return ubr, nil
+ }
+
+ if len(src[rp:]) < 4 {
+ return nil, errors.Errorf("too few bytes for size: %v", src[rp:])
+ }
+ valueLen := int(binary.BigEndian.Uint32(src[rp:]))
+ rp += 4
+
+ val := src[rp : rp+valueLen]
+ rp += valueLen
+
+ if ubr.LowerType != Unbounded {
+ ubr.Lower = val
+ } else {
+ ubr.Upper = val
+ if len(src[rp:]) > 0 {
+ return nil, errors.Errorf("unexpected trailing bytes parsing range: %v", len(src[rp:]))
+ }
+ return ubr, nil
+ }
+
+ if ubr.UpperType != Unbounded {
+ if len(src[rp:]) < 4 {
+ return nil, errors.Errorf("too few bytes for size: %v", src[rp:])
+ }
+ valueLen := int(binary.BigEndian.Uint32(src[rp:]))
+ rp += 4
+ ubr.Upper = src[rp : rp+valueLen]
+ rp += valueLen
+ }
+
+ if len(src[rp:]) > 0 {
+ return nil, errors.Errorf("unexpected trailing bytes parsing range: %v", len(src[rp:]))
+ }
+
+ return ubr, nil
+
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/record.go b/vendor/github.com/jackc/pgx/pgtype/record.go
new file mode 100644
index 0000000..aeca1c5
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/record.go
@@ -0,0 +1,129 @@
+package pgtype
+
+import (
+ "encoding/binary"
+ "reflect"
+
+ "github.com/pkg/errors"
+)
+
+// Record is the generic PostgreSQL record type such as is created with the
+// "row" function. Record only implements BinaryEncoder and Value. The text
+// format output format from PostgreSQL does not include type information and is
+// therefore impossible to decode. No encoders are implemented because
+// PostgreSQL does not support input of generic records.
+type Record struct {
+ Fields []Value
+ Status Status
+}
+
+func (dst *Record) Set(src interface{}) error {
+ if src == nil {
+ *dst = Record{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+ case []Value:
+ *dst = Record{Fields: value, Status: Present}
+ default:
+ return errors.Errorf("cannot convert %v to Record", src)
+ }
+
+ return nil
+}
+
+func (dst *Record) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst.Fields
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Record) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+ case *[]Value:
+ *v = make([]Value, len(src.Fields))
+ copy(*v, src.Fields)
+ return nil
+ case *[]interface{}:
+ *v = make([]interface{}, len(src.Fields))
+ for i := range *v {
+ (*v)[i] = src.Fields[i].Get()
+ }
+ return nil
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *Record) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Record{Status: Null}
+ return nil
+ }
+
+ rp := 0
+
+ if len(src[rp:]) < 4 {
+ return errors.Errorf("Record incomplete %v", src)
+ }
+ fieldCount := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+
+ fields := make([]Value, fieldCount)
+
+ for i := 0; i < fieldCount; i++ {
+ if len(src[rp:]) < 8 {
+ return errors.Errorf("Record incomplete %v", src)
+ }
+ fieldOID := OID(binary.BigEndian.Uint32(src[rp:]))
+ rp += 4
+
+ fieldLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+
+ var binaryDecoder BinaryDecoder
+ if dt, ok := ci.DataTypeForOID(fieldOID); ok {
+ binaryDecoder, _ = dt.Value.(BinaryDecoder)
+ }
+ if binaryDecoder == nil {
+ return errors.Errorf("unknown oid while decoding record: %v", fieldOID)
+ }
+
+ var fieldBytes []byte
+ if fieldLen >= 0 {
+ if len(src[rp:]) < fieldLen {
+ return errors.Errorf("Record incomplete %v", src)
+ }
+ fieldBytes = src[rp : rp+fieldLen]
+ rp += fieldLen
+ }
+
+ // Duplicate struct to scan into
+ binaryDecoder = reflect.New(reflect.ValueOf(binaryDecoder).Elem().Type()).Interface().(BinaryDecoder)
+
+ if err := binaryDecoder.DecodeBinary(ci, fieldBytes); err != nil {
+ return err
+ }
+
+ fields[i] = binaryDecoder.(Value)
+ }
+
+ *dst = Record{Fields: fields, Status: Present}
+
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/text.go b/vendor/github.com/jackc/pgx/pgtype/text.go
new file mode 100644
index 0000000..bceeffd
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/text.go
@@ -0,0 +1,163 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/json"
+
+ "github.com/pkg/errors"
+)
+
+type Text struct {
+ String string
+ Status Status
+}
+
+func (dst *Text) Set(src interface{}) error {
+ if src == nil {
+ *dst = Text{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+ case string:
+ *dst = Text{String: value, Status: Present}
+ case *string:
+ if value == nil {
+ *dst = Text{Status: Null}
+ } else {
+ *dst = Text{String: *value, Status: Present}
+ }
+ case []byte:
+ if value == nil {
+ *dst = Text{Status: Null}
+ } else {
+ *dst = Text{String: string(value), Status: Present}
+ }
+ default:
+ if originalSrc, ok := underlyingStringType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to Text", value)
+ }
+
+ return nil
+}
+
+func (dst *Text) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst.String
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Text) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+ case *string:
+ *v = src.String
+ return nil
+ case *[]byte:
+ *v = make([]byte, len(src.String))
+ copy(*v, src.String)
+ return nil
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *Text) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Text{Status: Null}
+ return nil
+ }
+
+ *dst = Text{String: string(src), Status: Present}
+ return nil
+}
+
+func (dst *Text) DecodeBinary(ci *ConnInfo, src []byte) error {
+ return dst.DecodeText(ci, src)
+}
+
+func (src *Text) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ return append(buf, src.String...), nil
+}
+
+func (src *Text) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ return src.EncodeText(ci, buf)
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Text) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Text{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Text) Value() (driver.Value, error) {
+ switch src.Status {
+ case Present:
+ return src.String, nil
+ case Null:
+ return nil, nil
+ default:
+ return nil, errUndefined
+ }
+}
+
+func (src *Text) MarshalJSON() ([]byte, error) {
+ switch src.Status {
+ case Present:
+ return json.Marshal(src.String)
+ case Null:
+ return []byte("null"), nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ return nil, errBadStatus
+}
+
+func (dst *Text) UnmarshalJSON(b []byte) error {
+ var s string
+ err := json.Unmarshal(b, &s)
+ if err != nil {
+ return err
+ }
+
+ *dst = Text{String: s, Status: Present}
+
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/text_array.go b/vendor/github.com/jackc/pgx/pgtype/text_array.go
new file mode 100644
index 0000000..e40f4b8
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/text_array.go
@@ -0,0 +1,300 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type TextArray struct {
+ Elements []Text
+ Dimensions []ArrayDimension
+ Status Status
+}
+
+func (dst *TextArray) Set(src interface{}) error {
+ // untyped nil and typed nil interfaces are different
+ if src == nil {
+ *dst = TextArray{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+
+ case []string:
+ if value == nil {
+ *dst = TextArray{Status: Null}
+ } else if len(value) == 0 {
+ *dst = TextArray{Status: Present}
+ } else {
+ elements := make([]Text, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = TextArray{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ default:
+ if originalSrc, ok := underlyingSliceType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to TextArray", value)
+ }
+
+ return nil
+}
+
+func (dst *TextArray) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *TextArray) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+
+ case *[]string:
+ *v = make([]string, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *TextArray) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = TextArray{Status: Null}
+ return nil
+ }
+
+ uta, err := ParseUntypedTextArray(string(src))
+ if err != nil {
+ return err
+ }
+
+ var elements []Text
+
+ if len(uta.Elements) > 0 {
+ elements = make([]Text, len(uta.Elements))
+
+ for i, s := range uta.Elements {
+ var elem Text
+ var elemSrc []byte
+ if s != "NULL" {
+ elemSrc = []byte(s)
+ }
+ err = elem.DecodeText(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+
+ elements[i] = elem
+ }
+ }
+
+ *dst = TextArray{Elements: elements, Dimensions: uta.Dimensions, Status: Present}
+
+ return nil
+}
+
+func (dst *TextArray) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = TextArray{Status: Null}
+ return nil
+ }
+
+ var arrayHeader ArrayHeader
+ rp, err := arrayHeader.DecodeBinary(ci, src)
+ if err != nil {
+ return err
+ }
+
+ if len(arrayHeader.Dimensions) == 0 {
+ *dst = TextArray{Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+ }
+
+ elementCount := arrayHeader.Dimensions[0].Length
+ for _, d := range arrayHeader.Dimensions[1:] {
+ elementCount *= d.Length
+ }
+
+ elements := make([]Text, elementCount)
+
+ for i := range elements {
+ elemLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+ var elemSrc []byte
+ if elemLen >= 0 {
+ elemSrc = src[rp : rp+elemLen]
+ rp += elemLen
+ }
+ err = elements[i].DecodeBinary(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+ }
+
+ *dst = TextArray{Elements: elements, Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+}
+
+func (src *TextArray) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ if len(src.Dimensions) == 0 {
+ return append(buf, '{', '}'), nil
+ }
+
+ buf = EncodeTextArrayDimensions(buf, src.Dimensions)
+
+ // dimElemCounts is the multiples of elements that each array lies on. For
+ // example, a single dimension array of length 4 would have a dimElemCounts of
+ // [4]. A multi-dimensional array of lengths [3,5,2] would have a
+ // dimElemCounts of [30,10,2]. This is used to simplify when to render a '{'
+ // or '}'.
+ dimElemCounts := make([]int, len(src.Dimensions))
+ dimElemCounts[len(src.Dimensions)-1] = int(src.Dimensions[len(src.Dimensions)-1].Length)
+ for i := len(src.Dimensions) - 2; i > -1; i-- {
+ dimElemCounts[i] = int(src.Dimensions[i].Length) * dimElemCounts[i+1]
+ }
+
+ inElemBuf := make([]byte, 0, 32)
+ for i, elem := range src.Elements {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+
+ for _, dec := range dimElemCounts {
+ if i%dec == 0 {
+ buf = append(buf, '{')
+ }
+ }
+
+ elemBuf, err := elem.EncodeText(ci, inElemBuf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf == nil {
+ buf = append(buf, `"NULL"`...)
+ } else {
+ buf = append(buf, QuoteArrayElementIfNeeded(string(elemBuf))...)
+ }
+
+ for _, dec := range dimElemCounts {
+ if (i+1)%dec == 0 {
+ buf = append(buf, '}')
+ }
+ }
+ }
+
+ return buf, nil
+}
+
+func (src *TextArray) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ arrayHeader := ArrayHeader{
+ Dimensions: src.Dimensions,
+ }
+
+ if dt, ok := ci.DataTypeForName("text"); ok {
+ arrayHeader.ElementOID = int32(dt.OID)
+ } else {
+ return nil, errors.Errorf("unable to find oid for type name %v", "text")
+ }
+
+ for i := range src.Elements {
+ if src.Elements[i].Status == Null {
+ arrayHeader.ContainsNull = true
+ break
+ }
+ }
+
+ buf = arrayHeader.EncodeBinary(ci, buf)
+
+ for i := range src.Elements {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ elemBuf, err := src.Elements[i].EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf != nil {
+ buf = elemBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *TextArray) Scan(src interface{}) error {
+ if src == nil {
+ return dst.DecodeText(nil, nil)
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *TextArray) Value() (driver.Value, error) {
+ buf, err := src.EncodeText(nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, nil
+ }
+
+ return string(buf), nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/tid.go b/vendor/github.com/jackc/pgx/pgtype/tid.go
new file mode 100644
index 0000000..21852a1
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/tid.go
@@ -0,0 +1,144 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+// TID is PostgreSQL's Tuple Identifier type.
+//
+// When one does
+//
+// select ctid, * from some_table;
+//
+// it is the data type of the ctid hidden system column.
+//
+// It is currently implemented as a pair unsigned two byte integers.
+// Its conversion functions can be found in src/backend/utils/adt/tid.c
+// in the PostgreSQL sources.
+type TID struct {
+ BlockNumber uint32
+ OffsetNumber uint16
+ Status Status
+}
+
+func (dst *TID) Set(src interface{}) error {
+ return errors.Errorf("cannot convert %v to TID", src)
+}
+
+func (dst *TID) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *TID) AssignTo(dst interface{}) error {
+ return errors.Errorf("cannot assign %v to %T", src, dst)
+}
+
+func (dst *TID) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = TID{Status: Null}
+ return nil
+ }
+
+ if len(src) < 5 {
+ return errors.Errorf("invalid length for tid: %v", len(src))
+ }
+
+ parts := strings.SplitN(string(src[1:len(src)-1]), ",", 2)
+ if len(parts) < 2 {
+ return errors.Errorf("invalid format for tid")
+ }
+
+ blockNumber, err := strconv.ParseUint(parts[0], 10, 32)
+ if err != nil {
+ return err
+ }
+
+ offsetNumber, err := strconv.ParseUint(parts[1], 10, 16)
+ if err != nil {
+ return err
+ }
+
+ *dst = TID{BlockNumber: uint32(blockNumber), OffsetNumber: uint16(offsetNumber), Status: Present}
+ return nil
+}
+
+func (dst *TID) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = TID{Status: Null}
+ return nil
+ }
+
+ if len(src) != 6 {
+ return errors.Errorf("invalid length for tid: %v", len(src))
+ }
+
+ *dst = TID{
+ BlockNumber: binary.BigEndian.Uint32(src),
+ OffsetNumber: binary.BigEndian.Uint16(src[4:]),
+ Status: Present,
+ }
+ return nil
+}
+
+func (src *TID) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ buf = append(buf, fmt.Sprintf(`(%d,%d)`, src.BlockNumber, src.OffsetNumber)...)
+ return buf, nil
+}
+
+func (src *TID) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ buf = pgio.AppendUint32(buf, src.BlockNumber)
+ buf = pgio.AppendUint16(buf, src.OffsetNumber)
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *TID) Scan(src interface{}) error {
+ if src == nil {
+ *dst = TID{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *TID) Value() (driver.Value, error) {
+ return EncodeValueText(src)
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/timestamp.go b/vendor/github.com/jackc/pgx/pgtype/timestamp.go
new file mode 100644
index 0000000..d906f46
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/timestamp.go
@@ -0,0 +1,225 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "time"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+const pgTimestampFormat = "2006-01-02 15:04:05.999999999"
+
+// Timestamp represents the PostgreSQL timestamp type. The PostgreSQL
+// timestamp does not have a time zone. This presents a problem when
+// translating to and from time.Time which requires a time zone. It is highly
+// recommended to use timestamptz whenever possible. Timestamp methods either
+// convert to UTC or return an error on non-UTC times.
+type Timestamp struct {
+ Time time.Time // Time must always be in UTC.
+ Status Status
+ InfinityModifier InfinityModifier
+}
+
+// Set converts src into a Timestamp and stores in dst. If src is a
+// time.Time in a non-UTC time zone, the time zone is discarded.
+func (dst *Timestamp) Set(src interface{}) error {
+ if src == nil {
+ *dst = Timestamp{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+ case time.Time:
+ *dst = Timestamp{Time: time.Date(value.Year(), value.Month(), value.Day(), value.Hour(), value.Minute(), value.Second(), value.Nanosecond(), time.UTC), Status: Present}
+ default:
+ if originalSrc, ok := underlyingTimeType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to Timestamp", value)
+ }
+
+ return nil
+}
+
+func (dst *Timestamp) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ if dst.InfinityModifier != None {
+ return dst.InfinityModifier
+ }
+ return dst.Time
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Timestamp) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+ case *time.Time:
+ if src.InfinityModifier != None {
+ return errors.Errorf("cannot assign %v to %T", src, dst)
+ }
+ *v = src.Time
+ return nil
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+// DecodeText decodes from src into dst. The decoded time is considered to
+// be in UTC.
+func (dst *Timestamp) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Timestamp{Status: Null}
+ return nil
+ }
+
+ sbuf := string(src)
+ switch sbuf {
+ case "infinity":
+ *dst = Timestamp{Status: Present, InfinityModifier: Infinity}
+ case "-infinity":
+ *dst = Timestamp{Status: Present, InfinityModifier: -Infinity}
+ default:
+ tim, err := time.Parse(pgTimestampFormat, sbuf)
+ if err != nil {
+ return err
+ }
+
+ *dst = Timestamp{Time: tim, Status: Present}
+ }
+
+ return nil
+}
+
+// DecodeBinary decodes from src into dst. The decoded time is considered to
+// be in UTC.
+func (dst *Timestamp) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Timestamp{Status: Null}
+ return nil
+ }
+
+ if len(src) != 8 {
+ return errors.Errorf("invalid length for timestamp: %v", len(src))
+ }
+
+ microsecSinceY2K := int64(binary.BigEndian.Uint64(src))
+
+ switch microsecSinceY2K {
+ case infinityMicrosecondOffset:
+ *dst = Timestamp{Status: Present, InfinityModifier: Infinity}
+ case negativeInfinityMicrosecondOffset:
+ *dst = Timestamp{Status: Present, InfinityModifier: -Infinity}
+ default:
+ microsecSinceUnixEpoch := microsecFromUnixEpochToY2K + microsecSinceY2K
+ tim := time.Unix(microsecSinceUnixEpoch/1000000, (microsecSinceUnixEpoch%1000000)*1000).UTC()
+ *dst = Timestamp{Time: tim, Status: Present}
+ }
+
+ return nil
+}
+
+// EncodeText writes the text encoding of src into w. If src.Time is not in
+// the UTC time zone it returns an error.
+func (src *Timestamp) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+ if src.Time.Location() != time.UTC {
+ return nil, errors.Errorf("cannot encode non-UTC time into timestamp")
+ }
+
+ var s string
+
+ switch src.InfinityModifier {
+ case None:
+ s = src.Time.Format(pgTimestampFormat)
+ case Infinity:
+ s = "infinity"
+ case NegativeInfinity:
+ s = "-infinity"
+ }
+
+ return append(buf, s...), nil
+}
+
+// EncodeBinary writes the binary encoding of src into w. If src.Time is not in
+// the UTC time zone it returns an error.
+func (src *Timestamp) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+ if src.Time.Location() != time.UTC {
+ return nil, errors.Errorf("cannot encode non-UTC time into timestamp")
+ }
+
+ var microsecSinceY2K int64
+ switch src.InfinityModifier {
+ case None:
+ microsecSinceUnixEpoch := src.Time.Unix()*1000000 + int64(src.Time.Nanosecond())/1000
+ microsecSinceY2K = microsecSinceUnixEpoch - microsecFromUnixEpochToY2K
+ case Infinity:
+ microsecSinceY2K = infinityMicrosecondOffset
+ case NegativeInfinity:
+ microsecSinceY2K = negativeInfinityMicrosecondOffset
+ }
+
+ return pgio.AppendInt64(buf, microsecSinceY2K), nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Timestamp) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Timestamp{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ case time.Time:
+ *dst = Timestamp{Time: src, Status: Present}
+ return nil
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Timestamp) Value() (driver.Value, error) {
+ switch src.Status {
+ case Present:
+ if src.InfinityModifier != None {
+ return src.InfinityModifier.String(), nil
+ }
+ return src.Time, nil
+ case Null:
+ return nil, nil
+ default:
+ return nil, errUndefined
+ }
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/timestamp_array.go b/vendor/github.com/jackc/pgx/pgtype/timestamp_array.go
new file mode 100644
index 0000000..546a381
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/timestamp_array.go
@@ -0,0 +1,301 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "time"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type TimestampArray struct {
+ Elements []Timestamp
+ Dimensions []ArrayDimension
+ Status Status
+}
+
+func (dst *TimestampArray) Set(src interface{}) error {
+ // untyped nil and typed nil interfaces are different
+ if src == nil {
+ *dst = TimestampArray{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+
+ case []time.Time:
+ if value == nil {
+ *dst = TimestampArray{Status: Null}
+ } else if len(value) == 0 {
+ *dst = TimestampArray{Status: Present}
+ } else {
+ elements := make([]Timestamp, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = TimestampArray{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ default:
+ if originalSrc, ok := underlyingSliceType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to TimestampArray", value)
+ }
+
+ return nil
+}
+
+func (dst *TimestampArray) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *TimestampArray) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+
+ case *[]time.Time:
+ *v = make([]time.Time, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *TimestampArray) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = TimestampArray{Status: Null}
+ return nil
+ }
+
+ uta, err := ParseUntypedTextArray(string(src))
+ if err != nil {
+ return err
+ }
+
+ var elements []Timestamp
+
+ if len(uta.Elements) > 0 {
+ elements = make([]Timestamp, len(uta.Elements))
+
+ for i, s := range uta.Elements {
+ var elem Timestamp
+ var elemSrc []byte
+ if s != "NULL" {
+ elemSrc = []byte(s)
+ }
+ err = elem.DecodeText(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+
+ elements[i] = elem
+ }
+ }
+
+ *dst = TimestampArray{Elements: elements, Dimensions: uta.Dimensions, Status: Present}
+
+ return nil
+}
+
+func (dst *TimestampArray) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = TimestampArray{Status: Null}
+ return nil
+ }
+
+ var arrayHeader ArrayHeader
+ rp, err := arrayHeader.DecodeBinary(ci, src)
+ if err != nil {
+ return err
+ }
+
+ if len(arrayHeader.Dimensions) == 0 {
+ *dst = TimestampArray{Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+ }
+
+ elementCount := arrayHeader.Dimensions[0].Length
+ for _, d := range arrayHeader.Dimensions[1:] {
+ elementCount *= d.Length
+ }
+
+ elements := make([]Timestamp, elementCount)
+
+ for i := range elements {
+ elemLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+ var elemSrc []byte
+ if elemLen >= 0 {
+ elemSrc = src[rp : rp+elemLen]
+ rp += elemLen
+ }
+ err = elements[i].DecodeBinary(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+ }
+
+ *dst = TimestampArray{Elements: elements, Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+}
+
+func (src *TimestampArray) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ if len(src.Dimensions) == 0 {
+ return append(buf, '{', '}'), nil
+ }
+
+ buf = EncodeTextArrayDimensions(buf, src.Dimensions)
+
+ // dimElemCounts is the multiples of elements that each array lies on. For
+ // example, a single dimension array of length 4 would have a dimElemCounts of
+ // [4]. A multi-dimensional array of lengths [3,5,2] would have a
+ // dimElemCounts of [30,10,2]. This is used to simplify when to render a '{'
+ // or '}'.
+ dimElemCounts := make([]int, len(src.Dimensions))
+ dimElemCounts[len(src.Dimensions)-1] = int(src.Dimensions[len(src.Dimensions)-1].Length)
+ for i := len(src.Dimensions) - 2; i > -1; i-- {
+ dimElemCounts[i] = int(src.Dimensions[i].Length) * dimElemCounts[i+1]
+ }
+
+ inElemBuf := make([]byte, 0, 32)
+ for i, elem := range src.Elements {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+
+ for _, dec := range dimElemCounts {
+ if i%dec == 0 {
+ buf = append(buf, '{')
+ }
+ }
+
+ elemBuf, err := elem.EncodeText(ci, inElemBuf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf == nil {
+ buf = append(buf, `NULL`...)
+ } else {
+ buf = append(buf, QuoteArrayElementIfNeeded(string(elemBuf))...)
+ }
+
+ for _, dec := range dimElemCounts {
+ if (i+1)%dec == 0 {
+ buf = append(buf, '}')
+ }
+ }
+ }
+
+ return buf, nil
+}
+
+func (src *TimestampArray) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ arrayHeader := ArrayHeader{
+ Dimensions: src.Dimensions,
+ }
+
+ if dt, ok := ci.DataTypeForName("timestamp"); ok {
+ arrayHeader.ElementOID = int32(dt.OID)
+ } else {
+ return nil, errors.Errorf("unable to find oid for type name %v", "timestamp")
+ }
+
+ for i := range src.Elements {
+ if src.Elements[i].Status == Null {
+ arrayHeader.ContainsNull = true
+ break
+ }
+ }
+
+ buf = arrayHeader.EncodeBinary(ci, buf)
+
+ for i := range src.Elements {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ elemBuf, err := src.Elements[i].EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf != nil {
+ buf = elemBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *TimestampArray) Scan(src interface{}) error {
+ if src == nil {
+ return dst.DecodeText(nil, nil)
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *TimestampArray) Value() (driver.Value, error) {
+ buf, err := src.EncodeText(nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, nil
+ }
+
+ return string(buf), nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/timestamptz.go b/vendor/github.com/jackc/pgx/pgtype/timestamptz.go
new file mode 100644
index 0000000..74fe495
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/timestamptz.go
@@ -0,0 +1,221 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "time"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+const pgTimestamptzHourFormat = "2006-01-02 15:04:05.999999999Z07"
+const pgTimestamptzMinuteFormat = "2006-01-02 15:04:05.999999999Z07:00"
+const pgTimestamptzSecondFormat = "2006-01-02 15:04:05.999999999Z07:00:00"
+const microsecFromUnixEpochToY2K = 946684800 * 1000000
+
+const (
+ negativeInfinityMicrosecondOffset = -9223372036854775808
+ infinityMicrosecondOffset = 9223372036854775807
+)
+
+type Timestamptz struct {
+ Time time.Time
+ Status Status
+ InfinityModifier InfinityModifier
+}
+
+func (dst *Timestamptz) Set(src interface{}) error {
+ if src == nil {
+ *dst = Timestamptz{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+ case time.Time:
+ *dst = Timestamptz{Time: value, Status: Present}
+ default:
+ if originalSrc, ok := underlyingTimeType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to Timestamptz", value)
+ }
+
+ return nil
+}
+
+func (dst *Timestamptz) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ if dst.InfinityModifier != None {
+ return dst.InfinityModifier
+ }
+ return dst.Time
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Timestamptz) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+ case *time.Time:
+ if src.InfinityModifier != None {
+ return errors.Errorf("cannot assign %v to %T", src, dst)
+ }
+ *v = src.Time
+ return nil
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *Timestamptz) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Timestamptz{Status: Null}
+ return nil
+ }
+
+ sbuf := string(src)
+ switch sbuf {
+ case "infinity":
+ *dst = Timestamptz{Status: Present, InfinityModifier: Infinity}
+ case "-infinity":
+ *dst = Timestamptz{Status: Present, InfinityModifier: -Infinity}
+ default:
+ var format string
+ if sbuf[len(sbuf)-9] == '-' || sbuf[len(sbuf)-9] == '+' {
+ format = pgTimestamptzSecondFormat
+ } else if sbuf[len(sbuf)-6] == '-' || sbuf[len(sbuf)-6] == '+' {
+ format = pgTimestamptzMinuteFormat
+ } else {
+ format = pgTimestamptzHourFormat
+ }
+
+ tim, err := time.Parse(format, sbuf)
+ if err != nil {
+ return err
+ }
+
+ *dst = Timestamptz{Time: tim, Status: Present}
+ }
+
+ return nil
+}
+
+func (dst *Timestamptz) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Timestamptz{Status: Null}
+ return nil
+ }
+
+ if len(src) != 8 {
+ return errors.Errorf("invalid length for timestamptz: %v", len(src))
+ }
+
+ microsecSinceY2K := int64(binary.BigEndian.Uint64(src))
+
+ switch microsecSinceY2K {
+ case infinityMicrosecondOffset:
+ *dst = Timestamptz{Status: Present, InfinityModifier: Infinity}
+ case negativeInfinityMicrosecondOffset:
+ *dst = Timestamptz{Status: Present, InfinityModifier: -Infinity}
+ default:
+ microsecSinceUnixEpoch := microsecFromUnixEpochToY2K + microsecSinceY2K
+ tim := time.Unix(microsecSinceUnixEpoch/1000000, (microsecSinceUnixEpoch%1000000)*1000)
+ *dst = Timestamptz{Time: tim, Status: Present}
+ }
+
+ return nil
+}
+
+func (src *Timestamptz) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ var s string
+
+ switch src.InfinityModifier {
+ case None:
+ s = src.Time.UTC().Format(pgTimestamptzSecondFormat)
+ case Infinity:
+ s = "infinity"
+ case NegativeInfinity:
+ s = "-infinity"
+ }
+
+ return append(buf, s...), nil
+}
+
+func (src *Timestamptz) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ var microsecSinceY2K int64
+ switch src.InfinityModifier {
+ case None:
+ microsecSinceUnixEpoch := src.Time.Unix()*1000000 + int64(src.Time.Nanosecond())/1000
+ microsecSinceY2K = microsecSinceUnixEpoch - microsecFromUnixEpochToY2K
+ case Infinity:
+ microsecSinceY2K = infinityMicrosecondOffset
+ case NegativeInfinity:
+ microsecSinceY2K = negativeInfinityMicrosecondOffset
+ }
+
+ return pgio.AppendInt64(buf, microsecSinceY2K), nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Timestamptz) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Timestamptz{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ case time.Time:
+ *dst = Timestamptz{Time: src, Status: Present}
+ return nil
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Timestamptz) Value() (driver.Value, error) {
+ switch src.Status {
+ case Present:
+ if src.InfinityModifier != None {
+ return src.InfinityModifier.String(), nil
+ }
+ return src.Time, nil
+ case Null:
+ return nil, nil
+ default:
+ return nil, errUndefined
+ }
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/timestamptz_array.go b/vendor/github.com/jackc/pgx/pgtype/timestamptz_array.go
new file mode 100644
index 0000000..88b6cc5
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/timestamptz_array.go
@@ -0,0 +1,301 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "time"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type TimestamptzArray struct {
+ Elements []Timestamptz
+ Dimensions []ArrayDimension
+ Status Status
+}
+
+func (dst *TimestamptzArray) Set(src interface{}) error {
+ // untyped nil and typed nil interfaces are different
+ if src == nil {
+ *dst = TimestamptzArray{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+
+ case []time.Time:
+ if value == nil {
+ *dst = TimestamptzArray{Status: Null}
+ } else if len(value) == 0 {
+ *dst = TimestamptzArray{Status: Present}
+ } else {
+ elements := make([]Timestamptz, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = TimestamptzArray{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ default:
+ if originalSrc, ok := underlyingSliceType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to TimestamptzArray", value)
+ }
+
+ return nil
+}
+
+func (dst *TimestamptzArray) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *TimestamptzArray) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+
+ case *[]time.Time:
+ *v = make([]time.Time, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *TimestamptzArray) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = TimestamptzArray{Status: Null}
+ return nil
+ }
+
+ uta, err := ParseUntypedTextArray(string(src))
+ if err != nil {
+ return err
+ }
+
+ var elements []Timestamptz
+
+ if len(uta.Elements) > 0 {
+ elements = make([]Timestamptz, len(uta.Elements))
+
+ for i, s := range uta.Elements {
+ var elem Timestamptz
+ var elemSrc []byte
+ if s != "NULL" {
+ elemSrc = []byte(s)
+ }
+ err = elem.DecodeText(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+
+ elements[i] = elem
+ }
+ }
+
+ *dst = TimestamptzArray{Elements: elements, Dimensions: uta.Dimensions, Status: Present}
+
+ return nil
+}
+
+func (dst *TimestamptzArray) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = TimestamptzArray{Status: Null}
+ return nil
+ }
+
+ var arrayHeader ArrayHeader
+ rp, err := arrayHeader.DecodeBinary(ci, src)
+ if err != nil {
+ return err
+ }
+
+ if len(arrayHeader.Dimensions) == 0 {
+ *dst = TimestamptzArray{Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+ }
+
+ elementCount := arrayHeader.Dimensions[0].Length
+ for _, d := range arrayHeader.Dimensions[1:] {
+ elementCount *= d.Length
+ }
+
+ elements := make([]Timestamptz, elementCount)
+
+ for i := range elements {
+ elemLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+ var elemSrc []byte
+ if elemLen >= 0 {
+ elemSrc = src[rp : rp+elemLen]
+ rp += elemLen
+ }
+ err = elements[i].DecodeBinary(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+ }
+
+ *dst = TimestamptzArray{Elements: elements, Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+}
+
+func (src *TimestamptzArray) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ if len(src.Dimensions) == 0 {
+ return append(buf, '{', '}'), nil
+ }
+
+ buf = EncodeTextArrayDimensions(buf, src.Dimensions)
+
+ // dimElemCounts is the multiples of elements that each array lies on. For
+ // example, a single dimension array of length 4 would have a dimElemCounts of
+ // [4]. A multi-dimensional array of lengths [3,5,2] would have a
+ // dimElemCounts of [30,10,2]. This is used to simplify when to render a '{'
+ // or '}'.
+ dimElemCounts := make([]int, len(src.Dimensions))
+ dimElemCounts[len(src.Dimensions)-1] = int(src.Dimensions[len(src.Dimensions)-1].Length)
+ for i := len(src.Dimensions) - 2; i > -1; i-- {
+ dimElemCounts[i] = int(src.Dimensions[i].Length) * dimElemCounts[i+1]
+ }
+
+ inElemBuf := make([]byte, 0, 32)
+ for i, elem := range src.Elements {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+
+ for _, dec := range dimElemCounts {
+ if i%dec == 0 {
+ buf = append(buf, '{')
+ }
+ }
+
+ elemBuf, err := elem.EncodeText(ci, inElemBuf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf == nil {
+ buf = append(buf, `NULL`...)
+ } else {
+ buf = append(buf, QuoteArrayElementIfNeeded(string(elemBuf))...)
+ }
+
+ for _, dec := range dimElemCounts {
+ if (i+1)%dec == 0 {
+ buf = append(buf, '}')
+ }
+ }
+ }
+
+ return buf, nil
+}
+
+func (src *TimestamptzArray) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ arrayHeader := ArrayHeader{
+ Dimensions: src.Dimensions,
+ }
+
+ if dt, ok := ci.DataTypeForName("timestamptz"); ok {
+ arrayHeader.ElementOID = int32(dt.OID)
+ } else {
+ return nil, errors.Errorf("unable to find oid for type name %v", "timestamptz")
+ }
+
+ for i := range src.Elements {
+ if src.Elements[i].Status == Null {
+ arrayHeader.ContainsNull = true
+ break
+ }
+ }
+
+ buf = arrayHeader.EncodeBinary(ci, buf)
+
+ for i := range src.Elements {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ elemBuf, err := src.Elements[i].EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf != nil {
+ buf = elemBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *TimestamptzArray) Scan(src interface{}) error {
+ if src == nil {
+ return dst.DecodeText(nil, nil)
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *TimestamptzArray) Value() (driver.Value, error) {
+ buf, err := src.EncodeText(nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, nil
+ }
+
+ return string(buf), nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/tsrange.go b/vendor/github.com/jackc/pgx/pgtype/tsrange.go
new file mode 100644
index 0000000..8a67d65
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/tsrange.go
@@ -0,0 +1,250 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type Tsrange struct {
+ Lower Timestamp
+ Upper Timestamp
+ LowerType BoundType
+ UpperType BoundType
+ Status Status
+}
+
+func (dst *Tsrange) Set(src interface{}) error {
+ return errors.Errorf("cannot convert %v to Tsrange", src)
+}
+
+func (dst *Tsrange) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Tsrange) AssignTo(dst interface{}) error {
+ return errors.Errorf("cannot assign %v to %T", src, dst)
+}
+
+func (dst *Tsrange) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Tsrange{Status: Null}
+ return nil
+ }
+
+ utr, err := ParseUntypedTextRange(string(src))
+ if err != nil {
+ return err
+ }
+
+ *dst = Tsrange{Status: Present}
+
+ dst.LowerType = utr.LowerType
+ dst.UpperType = utr.UpperType
+
+ if dst.LowerType == Empty {
+ return nil
+ }
+
+ if dst.LowerType == Inclusive || dst.LowerType == Exclusive {
+ if err := dst.Lower.DecodeText(ci, []byte(utr.Lower)); err != nil {
+ return err
+ }
+ }
+
+ if dst.UpperType == Inclusive || dst.UpperType == Exclusive {
+ if err := dst.Upper.DecodeText(ci, []byte(utr.Upper)); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (dst *Tsrange) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Tsrange{Status: Null}
+ return nil
+ }
+
+ ubr, err := ParseUntypedBinaryRange(src)
+ if err != nil {
+ return err
+ }
+
+ *dst = Tsrange{Status: Present}
+
+ dst.LowerType = ubr.LowerType
+ dst.UpperType = ubr.UpperType
+
+ if dst.LowerType == Empty {
+ return nil
+ }
+
+ if dst.LowerType == Inclusive || dst.LowerType == Exclusive {
+ if err := dst.Lower.DecodeBinary(ci, ubr.Lower); err != nil {
+ return err
+ }
+ }
+
+ if dst.UpperType == Inclusive || dst.UpperType == Exclusive {
+ if err := dst.Upper.DecodeBinary(ci, ubr.Upper); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (src Tsrange) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ switch src.LowerType {
+ case Exclusive, Unbounded:
+ buf = append(buf, '(')
+ case Inclusive:
+ buf = append(buf, '[')
+ case Empty:
+ return append(buf, "empty"...), nil
+ default:
+ return nil, errors.Errorf("unknown lower bound type %v", src.LowerType)
+ }
+
+ var err error
+
+ if src.LowerType != Unbounded {
+ buf, err = src.Lower.EncodeText(ci, buf)
+ if err != nil {
+ return nil, err
+ } else if buf == nil {
+ return nil, errors.Errorf("Lower cannot be null unless LowerType is Unbounded")
+ }
+ }
+
+ buf = append(buf, ',')
+
+ if src.UpperType != Unbounded {
+ buf, err = src.Upper.EncodeText(ci, buf)
+ if err != nil {
+ return nil, err
+ } else if buf == nil {
+ return nil, errors.Errorf("Upper cannot be null unless UpperType is Unbounded")
+ }
+ }
+
+ switch src.UpperType {
+ case Exclusive, Unbounded:
+ buf = append(buf, ')')
+ case Inclusive:
+ buf = append(buf, ']')
+ default:
+ return nil, errors.Errorf("unknown upper bound type %v", src.UpperType)
+ }
+
+ return buf, nil
+}
+
+func (src Tsrange) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ var rangeType byte
+ switch src.LowerType {
+ case Inclusive:
+ rangeType |= lowerInclusiveMask
+ case Unbounded:
+ rangeType |= lowerUnboundedMask
+ case Exclusive:
+ case Empty:
+ return append(buf, emptyMask), nil
+ default:
+ return nil, errors.Errorf("unknown LowerType: %v", src.LowerType)
+ }
+
+ switch src.UpperType {
+ case Inclusive:
+ rangeType |= upperInclusiveMask
+ case Unbounded:
+ rangeType |= upperUnboundedMask
+ case Exclusive:
+ default:
+ return nil, errors.Errorf("unknown UpperType: %v", src.UpperType)
+ }
+
+ buf = append(buf, rangeType)
+
+ var err error
+
+ if src.LowerType != Unbounded {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ buf, err = src.Lower.EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, errors.Errorf("Lower cannot be null unless LowerType is Unbounded")
+ }
+
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+
+ if src.UpperType != Unbounded {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ buf, err = src.Upper.EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, errors.Errorf("Upper cannot be null unless UpperType is Unbounded")
+ }
+
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Tsrange) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Tsrange{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src Tsrange) Value() (driver.Value, error) {
+ return EncodeValueText(src)
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/tstzrange.go b/vendor/github.com/jackc/pgx/pgtype/tstzrange.go
new file mode 100644
index 0000000..b512909
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/tstzrange.go
@@ -0,0 +1,250 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type Tstzrange struct {
+ Lower Timestamptz
+ Upper Timestamptz
+ LowerType BoundType
+ UpperType BoundType
+ Status Status
+}
+
+func (dst *Tstzrange) Set(src interface{}) error {
+ return errors.Errorf("cannot convert %v to Tstzrange", src)
+}
+
+func (dst *Tstzrange) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Tstzrange) AssignTo(dst interface{}) error {
+ return errors.Errorf("cannot assign %v to %T", src, dst)
+}
+
+func (dst *Tstzrange) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Tstzrange{Status: Null}
+ return nil
+ }
+
+ utr, err := ParseUntypedTextRange(string(src))
+ if err != nil {
+ return err
+ }
+
+ *dst = Tstzrange{Status: Present}
+
+ dst.LowerType = utr.LowerType
+ dst.UpperType = utr.UpperType
+
+ if dst.LowerType == Empty {
+ return nil
+ }
+
+ if dst.LowerType == Inclusive || dst.LowerType == Exclusive {
+ if err := dst.Lower.DecodeText(ci, []byte(utr.Lower)); err != nil {
+ return err
+ }
+ }
+
+ if dst.UpperType == Inclusive || dst.UpperType == Exclusive {
+ if err := dst.Upper.DecodeText(ci, []byte(utr.Upper)); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (dst *Tstzrange) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Tstzrange{Status: Null}
+ return nil
+ }
+
+ ubr, err := ParseUntypedBinaryRange(src)
+ if err != nil {
+ return err
+ }
+
+ *dst = Tstzrange{Status: Present}
+
+ dst.LowerType = ubr.LowerType
+ dst.UpperType = ubr.UpperType
+
+ if dst.LowerType == Empty {
+ return nil
+ }
+
+ if dst.LowerType == Inclusive || dst.LowerType == Exclusive {
+ if err := dst.Lower.DecodeBinary(ci, ubr.Lower); err != nil {
+ return err
+ }
+ }
+
+ if dst.UpperType == Inclusive || dst.UpperType == Exclusive {
+ if err := dst.Upper.DecodeBinary(ci, ubr.Upper); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (src Tstzrange) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ switch src.LowerType {
+ case Exclusive, Unbounded:
+ buf = append(buf, '(')
+ case Inclusive:
+ buf = append(buf, '[')
+ case Empty:
+ return append(buf, "empty"...), nil
+ default:
+ return nil, errors.Errorf("unknown lower bound type %v", src.LowerType)
+ }
+
+ var err error
+
+ if src.LowerType != Unbounded {
+ buf, err = src.Lower.EncodeText(ci, buf)
+ if err != nil {
+ return nil, err
+ } else if buf == nil {
+ return nil, errors.Errorf("Lower cannot be null unless LowerType is Unbounded")
+ }
+ }
+
+ buf = append(buf, ',')
+
+ if src.UpperType != Unbounded {
+ buf, err = src.Upper.EncodeText(ci, buf)
+ if err != nil {
+ return nil, err
+ } else if buf == nil {
+ return nil, errors.Errorf("Upper cannot be null unless UpperType is Unbounded")
+ }
+ }
+
+ switch src.UpperType {
+ case Exclusive, Unbounded:
+ buf = append(buf, ')')
+ case Inclusive:
+ buf = append(buf, ']')
+ default:
+ return nil, errors.Errorf("unknown upper bound type %v", src.UpperType)
+ }
+
+ return buf, nil
+}
+
+func (src Tstzrange) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ var rangeType byte
+ switch src.LowerType {
+ case Inclusive:
+ rangeType |= lowerInclusiveMask
+ case Unbounded:
+ rangeType |= lowerUnboundedMask
+ case Exclusive:
+ case Empty:
+ return append(buf, emptyMask), nil
+ default:
+ return nil, errors.Errorf("unknown LowerType: %v", src.LowerType)
+ }
+
+ switch src.UpperType {
+ case Inclusive:
+ rangeType |= upperInclusiveMask
+ case Unbounded:
+ rangeType |= upperUnboundedMask
+ case Exclusive:
+ default:
+ return nil, errors.Errorf("unknown UpperType: %v", src.UpperType)
+ }
+
+ buf = append(buf, rangeType)
+
+ var err error
+
+ if src.LowerType != Unbounded {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ buf, err = src.Lower.EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, errors.Errorf("Lower cannot be null unless LowerType is Unbounded")
+ }
+
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+
+ if src.UpperType != Unbounded {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ buf, err = src.Upper.EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, errors.Errorf("Upper cannot be null unless UpperType is Unbounded")
+ }
+
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Tstzrange) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Tstzrange{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src Tstzrange) Value() (driver.Value, error) {
+ return EncodeValueText(src)
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/typed_array.go.erb b/vendor/github.com/jackc/pgx/pgtype/typed_array.go.erb
new file mode 100644
index 0000000..6fafc2d
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/typed_array.go.erb
@@ -0,0 +1,304 @@
+package pgtype
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+
+ "github.com/jackc/pgx/pgio"
+)
+
+type <%= pgtype_array_type %> struct {
+ Elements []<%= pgtype_element_type %>
+ Dimensions []ArrayDimension
+ Status Status
+}
+
+func (dst *<%= pgtype_array_type %>) Set(src interface{}) error {
+ // untyped nil and typed nil interfaces are different
+ if src == nil {
+ *dst = <%= pgtype_array_type %>{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+ <% go_array_types.split(",").each do |t| %>
+ case <%= t %>:
+ if value == nil {
+ *dst = <%= pgtype_array_type %>{Status: Null}
+ } else if len(value) == 0 {
+ *dst = <%= pgtype_array_type %>{Status: Present}
+ } else {
+ elements := make([]<%= pgtype_element_type %>, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = <%= pgtype_array_type %>{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+ <% end %>
+ default:
+ if originalSrc, ok := underlyingSliceType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to <%= pgtype_array_type %>", value)
+ }
+
+ return nil
+}
+
+func (dst *<%= pgtype_array_type %>) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *<%= pgtype_array_type %>) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+ <% go_array_types.split(",").each do |t| %>
+ case *<%= t %>:
+ *v = make(<%= t %>, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+ <% end %>
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *<%= pgtype_array_type %>) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = <%= pgtype_array_type %>{Status: Null}
+ return nil
+ }
+
+ uta, err := ParseUntypedTextArray(string(src))
+ if err != nil {
+ return err
+ }
+
+ var elements []<%= pgtype_element_type %>
+
+ if len(uta.Elements) > 0 {
+ elements = make([]<%= pgtype_element_type %>, len(uta.Elements))
+
+ for i, s := range uta.Elements {
+ var elem <%= pgtype_element_type %>
+ var elemSrc []byte
+ if s != "NULL" {
+ elemSrc = []byte(s)
+ }
+ err = elem.DecodeText(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+
+ elements[i] = elem
+ }
+ }
+
+ *dst = <%= pgtype_array_type %>{Elements: elements, Dimensions: uta.Dimensions, Status: Present}
+
+ return nil
+}
+
+<% if binary_format == "true" %>
+func (dst *<%= pgtype_array_type %>) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = <%= pgtype_array_type %>{Status: Null}
+ return nil
+ }
+
+ var arrayHeader ArrayHeader
+ rp, err := arrayHeader.DecodeBinary(ci, src)
+ if err != nil {
+ return err
+ }
+
+ if len(arrayHeader.Dimensions) == 0 {
+ *dst = <%= pgtype_array_type %>{Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+ }
+
+ elementCount := arrayHeader.Dimensions[0].Length
+ for _, d := range arrayHeader.Dimensions[1:] {
+ elementCount *= d.Length
+ }
+
+ elements := make([]<%= pgtype_element_type %>, elementCount)
+
+ for i := range elements {
+ elemLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+ var elemSrc []byte
+ if elemLen >= 0 {
+ elemSrc = src[rp:rp+elemLen]
+ rp += elemLen
+ }
+ err = elements[i].DecodeBinary(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+ }
+
+ *dst = <%= pgtype_array_type %>{Elements: elements, Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+}
+<% end %>
+
+func (src *<%= pgtype_array_type %>) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ if len(src.Dimensions) == 0 {
+ return append(buf, '{', '}'), nil
+ }
+
+ buf = EncodeTextArrayDimensions(buf, src.Dimensions)
+
+ // dimElemCounts is the multiples of elements that each array lies on. For
+ // example, a single dimension array of length 4 would have a dimElemCounts of
+ // [4]. A multi-dimensional array of lengths [3,5,2] would have a
+ // dimElemCounts of [30,10,2]. This is used to simplify when to render a '{'
+ // or '}'.
+ dimElemCounts := make([]int, len(src.Dimensions))
+ dimElemCounts[len(src.Dimensions)-1] = int(src.Dimensions[len(src.Dimensions)-1].Length)
+ for i := len(src.Dimensions) - 2; i > -1; i-- {
+ dimElemCounts[i] = int(src.Dimensions[i].Length) * dimElemCounts[i+1]
+ }
+
+ inElemBuf := make([]byte, 0, 32)
+ for i, elem := range src.Elements {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+
+ for _, dec := range dimElemCounts {
+ if i%dec == 0 {
+ buf = append(buf, '{')
+ }
+ }
+
+ elemBuf, err := elem.EncodeText(ci, inElemBuf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf == nil {
+ buf = append(buf, `<%= text_null %>`...)
+ } else {
+ buf = append(buf, QuoteArrayElementIfNeeded(string(elemBuf))...)
+ }
+
+ for _, dec := range dimElemCounts {
+ if (i+1)%dec == 0 {
+ buf = append(buf, '}')
+ }
+ }
+ }
+
+ return buf, nil
+}
+
+<% if binary_format == "true" %>
+ func (src *<%= pgtype_array_type %>) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ arrayHeader := ArrayHeader{
+ Dimensions: src.Dimensions,
+ }
+
+ if dt, ok := ci.DataTypeForName("<%= element_type_name %>"); ok {
+ arrayHeader.ElementOID = int32(dt.OID)
+ } else {
+ return nil, errors.Errorf("unable to find oid for type name %v", "<%= element_type_name %>")
+ }
+
+ for i := range src.Elements {
+ if src.Elements[i].Status == Null {
+ arrayHeader.ContainsNull = true
+ break
+ }
+ }
+
+ buf = arrayHeader.EncodeBinary(ci, buf)
+
+ for i := range src.Elements {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ elemBuf, err := src.Elements[i].EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf != nil {
+ buf = elemBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+ }
+
+ return buf, nil
+ }
+<% end %>
+
+// Scan implements the database/sql Scanner interface.
+func (dst *<%= pgtype_array_type %>) Scan(src interface{}) error {
+ if src == nil {
+ return dst.DecodeText(nil, nil)
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *<%= pgtype_array_type %>) Value() (driver.Value, error) {
+ buf, err := src.EncodeText(nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, nil
+ }
+
+ return string(buf), nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/typed_array_gen.sh b/vendor/github.com/jackc/pgx/pgtype/typed_array_gen.sh
new file mode 100644
index 0000000..4a8211b
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/typed_array_gen.sh
@@ -0,0 +1,24 @@
+erb pgtype_array_type=Int2Array pgtype_element_type=Int2 go_array_types=[]int16,[]uint16 element_type_name=int2 text_null=NULL binary_format=true typed_array.go.erb > int2_array.go
+erb pgtype_array_type=Int4Array pgtype_element_type=Int4 go_array_types=[]int32,[]uint32 element_type_name=int4 text_null=NULL binary_format=true typed_array.go.erb > int4_array.go
+erb pgtype_array_type=Int8Array pgtype_element_type=Int8 go_array_types=[]int64,[]uint64 element_type_name=int8 text_null=NULL binary_format=true typed_array.go.erb > int8_array.go
+erb pgtype_array_type=BoolArray pgtype_element_type=Bool go_array_types=[]bool element_type_name=bool text_null=NULL binary_format=true typed_array.go.erb > bool_array.go
+erb pgtype_array_type=DateArray pgtype_element_type=Date go_array_types=[]time.Time element_type_name=date text_null=NULL binary_format=true typed_array.go.erb > date_array.go
+erb pgtype_array_type=TimestamptzArray pgtype_element_type=Timestamptz go_array_types=[]time.Time element_type_name=timestamptz text_null=NULL binary_format=true typed_array.go.erb > timestamptz_array.go
+erb pgtype_array_type=TimestampArray pgtype_element_type=Timestamp go_array_types=[]time.Time element_type_name=timestamp text_null=NULL binary_format=true typed_array.go.erb > timestamp_array.go
+erb pgtype_array_type=Float4Array pgtype_element_type=Float4 go_array_types=[]float32 element_type_name=float4 text_null=NULL binary_format=true typed_array.go.erb > float4_array.go
+erb pgtype_array_type=Float8Array pgtype_element_type=Float8 go_array_types=[]float64 element_type_name=float8 text_null=NULL binary_format=true typed_array.go.erb > float8_array.go
+erb pgtype_array_type=InetArray pgtype_element_type=Inet go_array_types=[]*net.IPNet,[]net.IP element_type_name=inet text_null=NULL binary_format=true typed_array.go.erb > inet_array.go
+erb pgtype_array_type=CIDRArray pgtype_element_type=CIDR go_array_types=[]*net.IPNet,[]net.IP element_type_name=cidr text_null=NULL binary_format=true typed_array.go.erb > cidr_array.go
+erb pgtype_array_type=TextArray pgtype_element_type=Text go_array_types=[]string element_type_name=text text_null='"NULL"' binary_format=true typed_array.go.erb > text_array.go
+erb pgtype_array_type=VarcharArray pgtype_element_type=Varchar go_array_types=[]string element_type_name=varchar text_null='"NULL"' binary_format=true typed_array.go.erb > varchar_array.go
+erb pgtype_array_type=BPCharArray pgtype_element_type=BPChar go_array_types=[]string element_type_name=bpchar text_null='NULL' binary_format=true typed_array.go.erb > bpchar_array.go
+erb pgtype_array_type=ByteaArray pgtype_element_type=Bytea go_array_types=[][]byte element_type_name=bytea text_null=NULL binary_format=true typed_array.go.erb > bytea_array.go
+erb pgtype_array_type=ACLItemArray pgtype_element_type=ACLItem go_array_types=[]string element_type_name=aclitem text_null=NULL binary_format=false typed_array.go.erb > aclitem_array.go
+erb pgtype_array_type=HstoreArray pgtype_element_type=Hstore go_array_types=[]map[string]string element_type_name=hstore text_null=NULL binary_format=true typed_array.go.erb > hstore_array.go
+erb pgtype_array_type=NumericArray pgtype_element_type=Numeric go_array_types=[]float32,[]float64 element_type_name=numeric text_null=NULL binary_format=true typed_array.go.erb > numeric_array.go
+erb pgtype_array_type=UUIDArray pgtype_element_type=UUID go_array_types=[][16]byte,[][]byte,[]string element_type_name=uuid text_null=NULL binary_format=true typed_array.go.erb > uuid_array.go
+
+# While the binary format is theoretically possible it is only practical to use the text format. In addition, the text format for NULL enums is unquoted so TextArray or a possible GenericTextArray cannot be used.
+erb pgtype_array_type=EnumArray pgtype_element_type=GenericText go_array_types=[]string text_null='NULL' binary_format=false typed_array.go.erb > enum_array.go
+
+goimports -w *_array.go
diff --git a/vendor/github.com/jackc/pgx/pgtype/typed_range.go.erb b/vendor/github.com/jackc/pgx/pgtype/typed_range.go.erb
new file mode 100644
index 0000000..91a5cb9
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/typed_range.go.erb
@@ -0,0 +1,252 @@
+package pgtype
+
+import (
+ "bytes"
+ "database/sql/driver"
+ "fmt"
+ "io"
+
+ "github.com/jackc/pgx/pgio"
+)
+
+type <%= range_type %> struct {
+ Lower <%= element_type %>
+ Upper <%= element_type %>
+ LowerType BoundType
+ UpperType BoundType
+ Status Status
+}
+
+func (dst *<%= range_type %>) Set(src interface{}) error {
+ return errors.Errorf("cannot convert %v to <%= range_type %>", src)
+}
+
+func (dst *<%= range_type %>) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *<%= range_type %>) AssignTo(dst interface{}) error {
+ return errors.Errorf("cannot assign %v to %T", src, dst)
+}
+
+func (dst *<%= range_type %>) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = <%= range_type %>{Status: Null}
+ return nil
+ }
+
+ utr, err := ParseUntypedTextRange(string(src))
+ if err != nil {
+ return err
+ }
+
+ *dst = <%= range_type %>{Status: Present}
+
+ dst.LowerType = utr.LowerType
+ dst.UpperType = utr.UpperType
+
+ if dst.LowerType == Empty {
+ return nil
+ }
+
+ if dst.LowerType == Inclusive || dst.LowerType == Exclusive {
+ if err := dst.Lower.DecodeText(ci, []byte(utr.Lower)); err != nil {
+ return err
+ }
+ }
+
+ if dst.UpperType == Inclusive || dst.UpperType == Exclusive {
+ if err := dst.Upper.DecodeText(ci, []byte(utr.Upper)); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (dst *<%= range_type %>) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = <%= range_type %>{Status: Null}
+ return nil
+ }
+
+ ubr, err := ParseUntypedBinaryRange(src)
+ if err != nil {
+ return err
+ }
+
+ *dst = <%= range_type %>{Status: Present}
+
+ dst.LowerType = ubr.LowerType
+ dst.UpperType = ubr.UpperType
+
+ if dst.LowerType == Empty {
+ return nil
+ }
+
+ if dst.LowerType == Inclusive || dst.LowerType == Exclusive {
+ if err := dst.Lower.DecodeBinary(ci, ubr.Lower); err != nil {
+ return err
+ }
+ }
+
+ if dst.UpperType == Inclusive || dst.UpperType == Exclusive {
+ if err := dst.Upper.DecodeBinary(ci, ubr.Upper); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (src <%= range_type %>) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ switch src.LowerType {
+ case Exclusive, Unbounded:
+ buf = append(buf, '(')
+ case Inclusive:
+ buf = append(buf, '[')
+ case Empty:
+ return append(buf, "empty"...), nil
+ default:
+ return nil, errors.Errorf("unknown lower bound type %v", src.LowerType)
+ }
+
+ var err error
+
+ if src.LowerType != Unbounded {
+ buf, err = src.Lower.EncodeText(ci, buf)
+ if err != nil {
+ return nil, err
+ } else if buf == nil {
+ return nil, errors.Errorf("Lower cannot be null unless LowerType is Unbounded")
+ }
+ }
+
+ buf = append(buf, ',')
+
+ if src.UpperType != Unbounded {
+ buf, err = src.Upper.EncodeText(ci, buf)
+ if err != nil {
+ return nil, err
+ } else if buf == nil {
+ return nil, errors.Errorf("Upper cannot be null unless UpperType is Unbounded")
+ }
+ }
+
+ switch src.UpperType {
+ case Exclusive, Unbounded:
+ buf = append(buf, ')')
+ case Inclusive:
+ buf = append(buf, ']')
+ default:
+ return nil, errors.Errorf("unknown upper bound type %v", src.UpperType)
+ }
+
+ return buf, nil
+}
+
+func (src <%= range_type %>) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ var rangeType byte
+ switch src.LowerType {
+ case Inclusive:
+ rangeType |= lowerInclusiveMask
+ case Unbounded:
+ rangeType |= lowerUnboundedMask
+ case Exclusive:
+ case Empty:
+ return append(buf, emptyMask), nil
+ default:
+ return nil, errors.Errorf("unknown LowerType: %v", src.LowerType)
+ }
+
+ switch src.UpperType {
+ case Inclusive:
+ rangeType |= upperInclusiveMask
+ case Unbounded:
+ rangeType |= upperUnboundedMask
+ case Exclusive:
+ default:
+ return nil, errors.Errorf("unknown UpperType: %v", src.UpperType)
+ }
+
+ buf = append(buf, rangeType)
+
+ var err error
+
+ if src.LowerType != Unbounded {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ buf, err = src.Lower.EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, errors.Errorf("Lower cannot be null unless LowerType is Unbounded")
+ }
+
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+
+ if src.UpperType != Unbounded {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ buf, err = src.Upper.EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, errors.Errorf("Upper cannot be null unless UpperType is Unbounded")
+ }
+
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *<%= range_type %>) Scan(src interface{}) error {
+ if src == nil {
+ *dst = <%= range_type %>{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src <%= range_type %>) Value() (driver.Value, error) {
+ return EncodeValueText(src)
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/typed_range_gen.sh b/vendor/github.com/jackc/pgx/pgtype/typed_range_gen.sh
new file mode 100644
index 0000000..bedda29
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/typed_range_gen.sh
@@ -0,0 +1,7 @@
+erb range_type=Int4range element_type=Int4 typed_range.go.erb > int4range.go
+erb range_type=Int8range element_type=Int8 typed_range.go.erb > int8range.go
+erb range_type=Tsrange element_type=Timestamp typed_range.go.erb > tsrange.go
+erb range_type=Tstzrange element_type=Timestamptz typed_range.go.erb > tstzrange.go
+erb range_type=Daterange element_type=Date typed_range.go.erb > daterange.go
+erb range_type=Numrange element_type=Numeric typed_range.go.erb > numrange.go
+goimports -w *range.go
diff --git a/vendor/github.com/jackc/pgx/pgtype/unknown.go b/vendor/github.com/jackc/pgx/pgtype/unknown.go
new file mode 100644
index 0000000..567831d
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/unknown.go
@@ -0,0 +1,44 @@
+package pgtype
+
+import "database/sql/driver"
+
+// Unknown represents the PostgreSQL unknown type. It is either a string literal
+// or NULL. It is used when PostgreSQL does not know the type of a value. In
+// general, this will only be used in pgx when selecting a null value without
+// type information. e.g. SELECT NULL;
+type Unknown struct {
+ String string
+ Status Status
+}
+
+func (dst *Unknown) Set(src interface{}) error {
+ return (*Text)(dst).Set(src)
+}
+
+func (dst *Unknown) Get() interface{} {
+ return (*Text)(dst).Get()
+}
+
+// AssignTo assigns from src to dst. Note that as Unknown is not a general number
+// type AssignTo does not do automatic type conversion as other number types do.
+func (src *Unknown) AssignTo(dst interface{}) error {
+ return (*Text)(src).AssignTo(dst)
+}
+
+func (dst *Unknown) DecodeText(ci *ConnInfo, src []byte) error {
+ return (*Text)(dst).DecodeText(ci, src)
+}
+
+func (dst *Unknown) DecodeBinary(ci *ConnInfo, src []byte) error {
+ return (*Text)(dst).DecodeBinary(ci, src)
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Unknown) Scan(src interface{}) error {
+ return (*Text)(dst).Scan(src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Unknown) Value() (driver.Value, error) {
+ return (*Text)(src).Value()
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/uuid.go b/vendor/github.com/jackc/pgx/pgtype/uuid.go
new file mode 100644
index 0000000..f8297b3
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/uuid.go
@@ -0,0 +1,183 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/hex"
+ "fmt"
+
+ "github.com/pkg/errors"
+)
+
+type UUID struct {
+ Bytes [16]byte
+ Status Status
+}
+
+func (dst *UUID) Set(src interface{}) error {
+ if src == nil {
+ *dst = UUID{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+ case [16]byte:
+ *dst = UUID{Bytes: value, Status: Present}
+ case []byte:
+ if value != nil {
+ if len(value) != 16 {
+ return errors.Errorf("[]byte must be 16 bytes to convert to UUID: %d", len(value))
+ }
+ *dst = UUID{Status: Present}
+ copy(dst.Bytes[:], value)
+ } else {
+ *dst = UUID{Status: Null}
+ }
+ case string:
+ uuid, err := parseUUID(value)
+ if err != nil {
+ return err
+ }
+ *dst = UUID{Bytes: uuid, Status: Present}
+ default:
+ if originalSrc, ok := underlyingPtrType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to UUID", value)
+ }
+
+ return nil
+}
+
+func (dst *UUID) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst.Bytes
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *UUID) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+ case *[16]byte:
+ *v = src.Bytes
+ return nil
+ case *[]byte:
+ *v = make([]byte, 16)
+ copy(*v, src.Bytes[:])
+ return nil
+ case *string:
+ *v = encodeUUID(src.Bytes)
+ return nil
+ default:
+ if nextDst, retry := GetAssignToDstType(v); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot assign %v into %T", src, dst)
+}
+
+// parseUUID converts a string UUID in standard form to a byte array.
+func parseUUID(src string) (dst [16]byte, err error) {
+ src = src[0:8] + src[9:13] + src[14:18] + src[19:23] + src[24:]
+ buf, err := hex.DecodeString(src)
+ if err != nil {
+ return dst, err
+ }
+
+ copy(dst[:], buf)
+ return dst, err
+}
+
+// encodeUUID converts a uuid byte array to UUID standard string form.
+func encodeUUID(src [16]byte) string {
+ return fmt.Sprintf("%x-%x-%x-%x-%x", src[0:4], src[4:6], src[6:8], src[8:10], src[10:16])
+}
+
+func (dst *UUID) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = UUID{Status: Null}
+ return nil
+ }
+
+ if len(src) != 36 {
+ return errors.Errorf("invalid length for UUID: %v", len(src))
+ }
+
+ buf, err := parseUUID(string(src))
+ if err != nil {
+ return err
+ }
+
+ *dst = UUID{Bytes: buf, Status: Present}
+ return nil
+}
+
+func (dst *UUID) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = UUID{Status: Null}
+ return nil
+ }
+
+ if len(src) != 16 {
+ return errors.Errorf("invalid length for UUID: %v", len(src))
+ }
+
+ *dst = UUID{Status: Present}
+ copy(dst.Bytes[:], src)
+ return nil
+}
+
+func (src *UUID) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ return append(buf, encodeUUID(src.Bytes)...), nil
+}
+
+func (src *UUID) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ return append(buf, src.Bytes[:]...), nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *UUID) Scan(src interface{}) error {
+ if src == nil {
+ *dst = UUID{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *UUID) Value() (driver.Value, error) {
+ return EncodeValueText(src)
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/uuid_array.go b/vendor/github.com/jackc/pgx/pgtype/uuid_array.go
new file mode 100644
index 0000000..9c7843a
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/uuid_array.go
@@ -0,0 +1,356 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type UUIDArray struct {
+ Elements []UUID
+ Dimensions []ArrayDimension
+ Status Status
+}
+
+func (dst *UUIDArray) Set(src interface{}) error {
+ // untyped nil and typed nil interfaces are different
+ if src == nil {
+ *dst = UUIDArray{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+
+ case [][16]byte:
+ if value == nil {
+ *dst = UUIDArray{Status: Null}
+ } else if len(value) == 0 {
+ *dst = UUIDArray{Status: Present}
+ } else {
+ elements := make([]UUID, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = UUIDArray{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ case [][]byte:
+ if value == nil {
+ *dst = UUIDArray{Status: Null}
+ } else if len(value) == 0 {
+ *dst = UUIDArray{Status: Present}
+ } else {
+ elements := make([]UUID, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = UUIDArray{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ case []string:
+ if value == nil {
+ *dst = UUIDArray{Status: Null}
+ } else if len(value) == 0 {
+ *dst = UUIDArray{Status: Present}
+ } else {
+ elements := make([]UUID, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = UUIDArray{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ default:
+ if originalSrc, ok := underlyingSliceType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to UUIDArray", value)
+ }
+
+ return nil
+}
+
+func (dst *UUIDArray) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *UUIDArray) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+
+ case *[][16]byte:
+ *v = make([][16]byte, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ case *[][]byte:
+ *v = make([][]byte, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ case *[]string:
+ *v = make([]string, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *UUIDArray) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = UUIDArray{Status: Null}
+ return nil
+ }
+
+ uta, err := ParseUntypedTextArray(string(src))
+ if err != nil {
+ return err
+ }
+
+ var elements []UUID
+
+ if len(uta.Elements) > 0 {
+ elements = make([]UUID, len(uta.Elements))
+
+ for i, s := range uta.Elements {
+ var elem UUID
+ var elemSrc []byte
+ if s != "NULL" {
+ elemSrc = []byte(s)
+ }
+ err = elem.DecodeText(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+
+ elements[i] = elem
+ }
+ }
+
+ *dst = UUIDArray{Elements: elements, Dimensions: uta.Dimensions, Status: Present}
+
+ return nil
+}
+
+func (dst *UUIDArray) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = UUIDArray{Status: Null}
+ return nil
+ }
+
+ var arrayHeader ArrayHeader
+ rp, err := arrayHeader.DecodeBinary(ci, src)
+ if err != nil {
+ return err
+ }
+
+ if len(arrayHeader.Dimensions) == 0 {
+ *dst = UUIDArray{Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+ }
+
+ elementCount := arrayHeader.Dimensions[0].Length
+ for _, d := range arrayHeader.Dimensions[1:] {
+ elementCount *= d.Length
+ }
+
+ elements := make([]UUID, elementCount)
+
+ for i := range elements {
+ elemLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+ var elemSrc []byte
+ if elemLen >= 0 {
+ elemSrc = src[rp : rp+elemLen]
+ rp += elemLen
+ }
+ err = elements[i].DecodeBinary(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+ }
+
+ *dst = UUIDArray{Elements: elements, Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+}
+
+func (src *UUIDArray) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ if len(src.Dimensions) == 0 {
+ return append(buf, '{', '}'), nil
+ }
+
+ buf = EncodeTextArrayDimensions(buf, src.Dimensions)
+
+ // dimElemCounts is the multiples of elements that each array lies on. For
+ // example, a single dimension array of length 4 would have a dimElemCounts of
+ // [4]. A multi-dimensional array of lengths [3,5,2] would have a
+ // dimElemCounts of [30,10,2]. This is used to simplify when to render a '{'
+ // or '}'.
+ dimElemCounts := make([]int, len(src.Dimensions))
+ dimElemCounts[len(src.Dimensions)-1] = int(src.Dimensions[len(src.Dimensions)-1].Length)
+ for i := len(src.Dimensions) - 2; i > -1; i-- {
+ dimElemCounts[i] = int(src.Dimensions[i].Length) * dimElemCounts[i+1]
+ }
+
+ inElemBuf := make([]byte, 0, 32)
+ for i, elem := range src.Elements {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+
+ for _, dec := range dimElemCounts {
+ if i%dec == 0 {
+ buf = append(buf, '{')
+ }
+ }
+
+ elemBuf, err := elem.EncodeText(ci, inElemBuf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf == nil {
+ buf = append(buf, `NULL`...)
+ } else {
+ buf = append(buf, QuoteArrayElementIfNeeded(string(elemBuf))...)
+ }
+
+ for _, dec := range dimElemCounts {
+ if (i+1)%dec == 0 {
+ buf = append(buf, '}')
+ }
+ }
+ }
+
+ return buf, nil
+}
+
+func (src *UUIDArray) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ arrayHeader := ArrayHeader{
+ Dimensions: src.Dimensions,
+ }
+
+ if dt, ok := ci.DataTypeForName("uuid"); ok {
+ arrayHeader.ElementOID = int32(dt.OID)
+ } else {
+ return nil, errors.Errorf("unable to find oid for type name %v", "uuid")
+ }
+
+ for i := range src.Elements {
+ if src.Elements[i].Status == Null {
+ arrayHeader.ContainsNull = true
+ break
+ }
+ }
+
+ buf = arrayHeader.EncodeBinary(ci, buf)
+
+ for i := range src.Elements {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ elemBuf, err := src.Elements[i].EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf != nil {
+ buf = elemBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *UUIDArray) Scan(src interface{}) error {
+ if src == nil {
+ return dst.DecodeText(nil, nil)
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *UUIDArray) Value() (driver.Value, error) {
+ buf, err := src.EncodeText(nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, nil
+ }
+
+ return string(buf), nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/varbit.go b/vendor/github.com/jackc/pgx/pgtype/varbit.go
new file mode 100644
index 0000000..dfa194d
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/varbit.go
@@ -0,0 +1,133 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type Varbit struct {
+ Bytes []byte
+ Len int32 // Number of bits
+ Status Status
+}
+
+func (dst *Varbit) Set(src interface{}) error {
+ return errors.Errorf("cannot convert %v to Varbit", src)
+}
+
+func (dst *Varbit) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Varbit) AssignTo(dst interface{}) error {
+ return errors.Errorf("cannot assign %v to %T", src, dst)
+}
+
+func (dst *Varbit) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Varbit{Status: Null}
+ return nil
+ }
+
+ bitLen := len(src)
+ byteLen := bitLen / 8
+ if bitLen%8 > 0 {
+ byteLen++
+ }
+ buf := make([]byte, byteLen)
+
+ for i, b := range src {
+ if b == '1' {
+ byteIdx := i / 8
+ bitIdx := uint(i % 8)
+ buf[byteIdx] = buf[byteIdx] | (128 >> bitIdx)
+ }
+ }
+
+ *dst = Varbit{Bytes: buf, Len: int32(bitLen), Status: Present}
+ return nil
+}
+
+func (dst *Varbit) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Varbit{Status: Null}
+ return nil
+ }
+
+ if len(src) < 4 {
+ return errors.Errorf("invalid length for varbit: %v", len(src))
+ }
+
+ bitLen := int32(binary.BigEndian.Uint32(src))
+ rp := 4
+
+ *dst = Varbit{Bytes: src[rp:], Len: bitLen, Status: Present}
+ return nil
+}
+
+func (src *Varbit) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ for i := int32(0); i < src.Len; i++ {
+ byteIdx := i / 8
+ bitMask := byte(128 >> byte(i%8))
+ char := byte('0')
+ if src.Bytes[byteIdx]&bitMask > 0 {
+ char = '1'
+ }
+ buf = append(buf, char)
+ }
+
+ return buf, nil
+}
+
+func (src *Varbit) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ buf = pgio.AppendInt32(buf, src.Len)
+ return append(buf, src.Bytes...), nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Varbit) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Varbit{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Varbit) Value() (driver.Value, error) {
+ return EncodeValueText(src)
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/varchar.go b/vendor/github.com/jackc/pgx/pgtype/varchar.go
new file mode 100644
index 0000000..6be1a03
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/varchar.go
@@ -0,0 +1,58 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+)
+
+type Varchar Text
+
+// Set converts from src to dst. Note that as Varchar is not a general
+// number type Set does not do automatic type conversion as other number
+// types do.
+func (dst *Varchar) Set(src interface{}) error {
+ return (*Text)(dst).Set(src)
+}
+
+func (dst *Varchar) Get() interface{} {
+ return (*Text)(dst).Get()
+}
+
+// AssignTo assigns from src to dst. Note that as Varchar is not a general number
+// type AssignTo does not do automatic type conversion as other number types do.
+func (src *Varchar) AssignTo(dst interface{}) error {
+ return (*Text)(src).AssignTo(dst)
+}
+
+func (dst *Varchar) DecodeText(ci *ConnInfo, src []byte) error {
+ return (*Text)(dst).DecodeText(ci, src)
+}
+
+func (dst *Varchar) DecodeBinary(ci *ConnInfo, src []byte) error {
+ return (*Text)(dst).DecodeBinary(ci, src)
+}
+
+func (src *Varchar) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ return (*Text)(src).EncodeText(ci, buf)
+}
+
+func (src *Varchar) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ return (*Text)(src).EncodeBinary(ci, buf)
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Varchar) Scan(src interface{}) error {
+ return (*Text)(dst).Scan(src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Varchar) Value() (driver.Value, error) {
+ return (*Text)(src).Value()
+}
+
+func (src *Varchar) MarshalJSON() ([]byte, error) {
+ return (*Text)(src).MarshalJSON()
+}
+
+func (dst *Varchar) UnmarshalJSON(b []byte) error {
+ return (*Text)(dst).UnmarshalJSON(b)
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/varchar_array.go b/vendor/github.com/jackc/pgx/pgtype/varchar_array.go
new file mode 100644
index 0000000..09eba3e
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/varchar_array.go
@@ -0,0 +1,300 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type VarcharArray struct {
+ Elements []Varchar
+ Dimensions []ArrayDimension
+ Status Status
+}
+
+func (dst *VarcharArray) Set(src interface{}) error {
+ // untyped nil and typed nil interfaces are different
+ if src == nil {
+ *dst = VarcharArray{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+
+ case []string:
+ if value == nil {
+ *dst = VarcharArray{Status: Null}
+ } else if len(value) == 0 {
+ *dst = VarcharArray{Status: Present}
+ } else {
+ elements := make([]Varchar, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = VarcharArray{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ default:
+ if originalSrc, ok := underlyingSliceType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to VarcharArray", value)
+ }
+
+ return nil
+}
+
+func (dst *VarcharArray) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *VarcharArray) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+
+ case *[]string:
+ *v = make([]string, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *VarcharArray) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = VarcharArray{Status: Null}
+ return nil
+ }
+
+ uta, err := ParseUntypedTextArray(string(src))
+ if err != nil {
+ return err
+ }
+
+ var elements []Varchar
+
+ if len(uta.Elements) > 0 {
+ elements = make([]Varchar, len(uta.Elements))
+
+ for i, s := range uta.Elements {
+ var elem Varchar
+ var elemSrc []byte
+ if s != "NULL" {
+ elemSrc = []byte(s)
+ }
+ err = elem.DecodeText(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+
+ elements[i] = elem
+ }
+ }
+
+ *dst = VarcharArray{Elements: elements, Dimensions: uta.Dimensions, Status: Present}
+
+ return nil
+}
+
+func (dst *VarcharArray) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = VarcharArray{Status: Null}
+ return nil
+ }
+
+ var arrayHeader ArrayHeader
+ rp, err := arrayHeader.DecodeBinary(ci, src)
+ if err != nil {
+ return err
+ }
+
+ if len(arrayHeader.Dimensions) == 0 {
+ *dst = VarcharArray{Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+ }
+
+ elementCount := arrayHeader.Dimensions[0].Length
+ for _, d := range arrayHeader.Dimensions[1:] {
+ elementCount *= d.Length
+ }
+
+ elements := make([]Varchar, elementCount)
+
+ for i := range elements {
+ elemLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+ var elemSrc []byte
+ if elemLen >= 0 {
+ elemSrc = src[rp : rp+elemLen]
+ rp += elemLen
+ }
+ err = elements[i].DecodeBinary(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+ }
+
+ *dst = VarcharArray{Elements: elements, Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+}
+
+func (src *VarcharArray) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ if len(src.Dimensions) == 0 {
+ return append(buf, '{', '}'), nil
+ }
+
+ buf = EncodeTextArrayDimensions(buf, src.Dimensions)
+
+ // dimElemCounts is the multiples of elements that each array lies on. For
+ // example, a single dimension array of length 4 would have a dimElemCounts of
+ // [4]. A multi-dimensional array of lengths [3,5,2] would have a
+ // dimElemCounts of [30,10,2]. This is used to simplify when to render a '{'
+ // or '}'.
+ dimElemCounts := make([]int, len(src.Dimensions))
+ dimElemCounts[len(src.Dimensions)-1] = int(src.Dimensions[len(src.Dimensions)-1].Length)
+ for i := len(src.Dimensions) - 2; i > -1; i-- {
+ dimElemCounts[i] = int(src.Dimensions[i].Length) * dimElemCounts[i+1]
+ }
+
+ inElemBuf := make([]byte, 0, 32)
+ for i, elem := range src.Elements {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+
+ for _, dec := range dimElemCounts {
+ if i%dec == 0 {
+ buf = append(buf, '{')
+ }
+ }
+
+ elemBuf, err := elem.EncodeText(ci, inElemBuf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf == nil {
+ buf = append(buf, `"NULL"`...)
+ } else {
+ buf = append(buf, QuoteArrayElementIfNeeded(string(elemBuf))...)
+ }
+
+ for _, dec := range dimElemCounts {
+ if (i+1)%dec == 0 {
+ buf = append(buf, '}')
+ }
+ }
+ }
+
+ return buf, nil
+}
+
+func (src *VarcharArray) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ arrayHeader := ArrayHeader{
+ Dimensions: src.Dimensions,
+ }
+
+ if dt, ok := ci.DataTypeForName("varchar"); ok {
+ arrayHeader.ElementOID = int32(dt.OID)
+ } else {
+ return nil, errors.Errorf("unable to find oid for type name %v", "varchar")
+ }
+
+ for i := range src.Elements {
+ if src.Elements[i].Status == Null {
+ arrayHeader.ContainsNull = true
+ break
+ }
+ }
+
+ buf = arrayHeader.EncodeBinary(ci, buf)
+
+ for i := range src.Elements {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ elemBuf, err := src.Elements[i].EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf != nil {
+ buf = elemBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *VarcharArray) Scan(src interface{}) error {
+ if src == nil {
+ return dst.DecodeText(nil, nil)
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *VarcharArray) Value() (driver.Value, error) {
+ buf, err := src.EncodeText(nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, nil
+ }
+
+ return string(buf), nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/xid.go b/vendor/github.com/jackc/pgx/pgtype/xid.go
new file mode 100644
index 0000000..f66f536
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/xid.go
@@ -0,0 +1,64 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+)
+
+// XID is PostgreSQL's Transaction ID type.
+//
+// In later versions of PostgreSQL, it is the type used for the backend_xid
+// and backend_xmin columns of the pg_stat_activity system view.
+//
+// Also, when one does
+//
+// select xmin, xmax, * from some_table;
+//
+// it is the data type of the xmin and xmax hidden system columns.
+//
+// It is currently implemented as an unsigned four byte integer.
+// Its definition can be found in src/include/postgres_ext.h as TransactionId
+// in the PostgreSQL sources.
+type XID pguint32
+
+// Set converts from src to dst. Note that as XID is not a general
+// number type Set does not do automatic type conversion as other number
+// types do.
+func (dst *XID) Set(src interface{}) error {
+ return (*pguint32)(dst).Set(src)
+}
+
+func (dst *XID) Get() interface{} {
+ return (*pguint32)(dst).Get()
+}
+
+// AssignTo assigns from src to dst. Note that as XID is not a general number
+// type AssignTo does not do automatic type conversion as other number types do.
+func (src *XID) AssignTo(dst interface{}) error {
+ return (*pguint32)(src).AssignTo(dst)
+}
+
+func (dst *XID) DecodeText(ci *ConnInfo, src []byte) error {
+ return (*pguint32)(dst).DecodeText(ci, src)
+}
+
+func (dst *XID) DecodeBinary(ci *ConnInfo, src []byte) error {
+ return (*pguint32)(dst).DecodeBinary(ci, src)
+}
+
+func (src *XID) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ return (*pguint32)(src).EncodeText(ci, buf)
+}
+
+func (src *XID) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ return (*pguint32)(src).EncodeBinary(ci, buf)
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *XID) Scan(src interface{}) error {
+ return (*pguint32)(dst).Scan(src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *XID) Value() (driver.Value, error) {
+ return (*pguint32)(src).Value()
+}
diff --git a/vendor/github.com/jackc/pgx/query.go b/vendor/github.com/jackc/pgx/query.go
index 19b867e..3576091 100644
--- a/vendor/github.com/jackc/pgx/query.go
+++ b/vendor/github.com/jackc/pgx/query.go
@@ -1,10 +1,16 @@
package pgx
import (
+ "context"
"database/sql"
- "errors"
"fmt"
"time"
+
+ "github.com/pkg/errors"
+
+ "github.com/jackc/pgx/internal/sanitize"
+ "github.com/jackc/pgx/pgproto3"
+ "github.com/jackc/pgx/pgtype"
)
// Row is a convenience wrapper over Rows that is returned by QueryRow.
@@ -37,16 +43,16 @@ func (r *Row) Scan(dest ...interface{}) (err error) {
// calling Next() until it returns false, or when a fatal error occurs.
type Rows struct {
conn *Conn
- mr *msgReader
+ connPool *ConnPool
+ batch *Batch
+ values [][]byte
fields []FieldDescription
- vr ValueReader
rowCount int
columnIdx int
err error
startTime time.Time
sql string
args []interface{}
- afterClose func(*Rows)
unlockConn bool
closed bool
}
@@ -55,7 +61,9 @@ func (rows *Rows) FieldDescriptions() []FieldDescription {
return rows.fields
}
-func (rows *Rows) close() {
+// Close closes the rows, making the connection ready for use again. It is safe
+// to call Close after rows is already closed.
+func (rows *Rows) Close() {
if rows.closed {
return
}
@@ -67,80 +75,33 @@ func (rows *Rows) close() {
rows.closed = true
+ rows.err = rows.conn.termContext(rows.err)
+
if rows.err == nil {
if rows.conn.shouldLog(LogLevelInfo) {
endTime := time.Now()
- rows.conn.log(LogLevelInfo, "Query", "sql", rows.sql, "args", logQueryArgs(rows.args), "time", endTime.Sub(rows.startTime), "rowCount", rows.rowCount)
+ rows.conn.log(LogLevelInfo, "Query", map[string]interface{}{"sql": rows.sql, "args": logQueryArgs(rows.args), "time": endTime.Sub(rows.startTime), "rowCount": rows.rowCount})
}
} else if rows.conn.shouldLog(LogLevelError) {
- rows.conn.log(LogLevelError, "Query", "sql", rows.sql, "args", logQueryArgs(rows.args))
+ rows.conn.log(LogLevelError, "Query", map[string]interface{}{"sql": rows.sql, "args": logQueryArgs(rows.args)})
}
- if rows.afterClose != nil {
- rows.afterClose(rows)
- }
-}
-
-func (rows *Rows) readUntilReadyForQuery() {
- for {
- t, r, err := rows.conn.rxMsg()
- if err != nil {
- rows.close()
- return
- }
-
- switch t {
- case readyForQuery:
- rows.conn.rxReadyForQuery(r)
- rows.close()
- return
- case rowDescription:
- case dataRow:
- case commandComplete:
- case bindComplete:
- case errorResponse:
- err = rows.conn.rxErrorResponse(r)
- if rows.err == nil {
- rows.err = err
- }
- default:
- err = rows.conn.processContextFreeMsg(t, r)
- if err != nil {
- rows.close()
- return
- }
- }
+ if rows.batch != nil && rows.err != nil {
+ rows.batch.die(rows.err)
}
-}
-// Close closes the rows, making the connection ready for use again. It is safe
-// to call Close after rows is already closed.
-func (rows *Rows) Close() {
- if rows.closed {
- return
+ if rows.connPool != nil {
+ rows.connPool.Release(rows.conn)
}
- rows.readUntilReadyForQuery()
- rows.close()
}
func (rows *Rows) Err() error {
return rows.err
}
-// abort signals that the query was not successfully sent to the server.
-// This differs from Fatal in that it is not necessary to readUntilReadyForQuery
-func (rows *Rows) abort(err error) {
- if rows.err != nil {
- return
- }
-
- rows.err = err
- rows.close()
-}
-
-// Fatal signals an error occurred after the query was sent to the server. It
+// fatal signals an error occurred after the query was sent to the server. It
// closes the rows automatically.
-func (rows *Rows) Fatal(err error) {
+func (rows *Rows) fatal(err error) {
if rows.err != nil {
return
}
@@ -159,64 +120,64 @@ func (rows *Rows) Next() bool {
rows.rowCount++
rows.columnIdx = 0
- rows.vr = ValueReader{}
for {
- t, r, err := rows.conn.rxMsg()
+ msg, err := rows.conn.rxMsg()
if err != nil {
- rows.Fatal(err)
+ rows.fatal(err)
return false
}
- switch t {
- case readyForQuery:
- rows.conn.rxReadyForQuery(r)
- rows.close()
- return false
- case dataRow:
- fieldCount := r.readInt16()
- if int(fieldCount) != len(rows.fields) {
- rows.Fatal(ProtocolError(fmt.Sprintf("Row description field count (%v) and data row field count (%v) do not match", len(rows.fields), fieldCount)))
+ switch msg := msg.(type) {
+ case *pgproto3.RowDescription:
+ rows.fields = rows.conn.rxRowDescription(msg)
+ for i := range rows.fields {
+ if dt, ok := rows.conn.ConnInfo.DataTypeForOID(rows.fields[i].DataType); ok {
+ rows.fields[i].DataTypeName = dt.Name
+ rows.fields[i].FormatCode = TextFormatCode
+ } else {
+ rows.fatal(errors.Errorf("unknown oid: %d", rows.fields[i].DataType))
+ return false
+ }
+ }
+ case *pgproto3.DataRow:
+ if len(msg.Values) != len(rows.fields) {
+ rows.fatal(ProtocolError(fmt.Sprintf("Row description field count (%v) and data row field count (%v) do not match", len(rows.fields), len(msg.Values))))
return false
}
- rows.mr = r
+ rows.values = msg.Values
return true
- case commandComplete:
- case bindComplete:
+ case *pgproto3.CommandComplete:
+ if rows.batch != nil {
+ rows.batch.pendingCommandComplete = false
+ }
+ rows.Close()
+ return false
+
default:
- err = rows.conn.processContextFreeMsg(t, r)
+ err = rows.conn.processContextFreeMsg(msg)
if err != nil {
- rows.Fatal(err)
+ rows.fatal(err)
return false
}
}
}
}
-// Conn returns the *Conn this *Rows is using.
-func (rows *Rows) Conn() *Conn {
- return rows.conn
-}
-
-func (rows *Rows) nextColumn() (*ValueReader, bool) {
+func (rows *Rows) nextColumn() ([]byte, *FieldDescription, bool) {
if rows.closed {
- return nil, false
+ return nil, nil, false
}
if len(rows.fields) <= rows.columnIdx {
- rows.Fatal(ProtocolError("No next column available"))
- return nil, false
- }
-
- if rows.vr.Len() > 0 {
- rows.mr.readBytes(rows.vr.Len())
+ rows.fatal(ProtocolError("No next column available"))
+ return nil, nil, false
}
+ buf := rows.values[rows.columnIdx]
fd := &rows.fields[rows.columnIdx]
rows.columnIdx++
- size := rows.mr.readInt32()
- rows.vr = ValueReader{mr: rows.mr, fd: fd, valueBytesRemaining: size}
- return &rows.vr, true
+ return buf, fd, true
}
type scanArgError struct {
@@ -234,93 +195,71 @@ func (e scanArgError) Error() string {
// copy the raw bytes received from PostgreSQL. nil will skip the value entirely.
func (rows *Rows) Scan(dest ...interface{}) (err error) {
if len(rows.fields) != len(dest) {
- err = fmt.Errorf("Scan received wrong number of arguments, got %d but expected %d", len(dest), len(rows.fields))
- rows.Fatal(err)
+ err = errors.Errorf("Scan received wrong number of arguments, got %d but expected %d", len(dest), len(rows.fields))
+ rows.fatal(err)
return err
}
for i, d := range dest {
- vr, _ := rows.nextColumn()
+ buf, fd, _ := rows.nextColumn()
if d == nil {
continue
}
- // Check for []byte first as we allow sidestepping the decoding process and retrieving the raw bytes
- if b, ok := d.(*[]byte); ok {
- // If it actually is a bytea then pass it through decodeBytea (so it can be decoded if it is in text format)
- // Otherwise read the bytes directly regardless of what the actual type is.
- if vr.Type().DataType == ByteaOid {
- *b = decodeBytea(vr)
- } else {
- if vr.Len() != -1 {
- *b = vr.ReadBytes(vr.Len())
- } else {
- *b = nil
- }
- }
- } else if s, ok := d.(Scanner); ok {
- err = s.Scan(vr)
+ if s, ok := d.(pgtype.BinaryDecoder); ok && fd.FormatCode == BinaryFormatCode {
+ err = s.DecodeBinary(rows.conn.ConnInfo, buf)
if err != nil {
- rows.Fatal(scanArgError{col: i, err: err})
+ rows.fatal(scanArgError{col: i, err: err})
}
- } else if s, ok := d.(PgxScanner); ok {
- err = s.ScanPgx(vr)
+ } else if s, ok := d.(pgtype.TextDecoder); ok && fd.FormatCode == TextFormatCode {
+ err = s.DecodeText(rows.conn.ConnInfo, buf)
if err != nil {
- rows.Fatal(scanArgError{col: i, err: err})
+ rows.fatal(scanArgError{col: i, err: err})
}
- } else if s, ok := d.(sql.Scanner); ok {
- var val interface{}
- if 0 <= vr.Len() {
- switch vr.Type().DataType {
- case BoolOid:
- val = decodeBool(vr)
- case Int8Oid:
- val = int64(decodeInt8(vr))
- case Int2Oid:
- val = int64(decodeInt2(vr))
- case Int4Oid:
- val = int64(decodeInt4(vr))
- case TextOid, VarcharOid:
- val = decodeText(vr)
- case OidOid:
- val = int64(decodeOid(vr))
- case Float4Oid:
- val = float64(decodeFloat4(vr))
- case Float8Oid:
- val = decodeFloat8(vr)
- case DateOid:
- val = decodeDate(vr)
- case TimestampOid:
- val = decodeTimestamp(vr)
- case TimestampTzOid:
- val = decodeTimestampTz(vr)
+ } else {
+ if dt, ok := rows.conn.ConnInfo.DataTypeForOID(fd.DataType); ok {
+ value := dt.Value
+ switch fd.FormatCode {
+ case TextFormatCode:
+ if textDecoder, ok := value.(pgtype.TextDecoder); ok {
+ err = textDecoder.DecodeText(rows.conn.ConnInfo, buf)
+ if err != nil {
+ rows.fatal(scanArgError{col: i, err: err})
+ }
+ } else {
+ rows.fatal(scanArgError{col: i, err: errors.Errorf("%T is not a pgtype.TextDecoder", value)})
+ }
+ case BinaryFormatCode:
+ if binaryDecoder, ok := value.(pgtype.BinaryDecoder); ok {
+ err = binaryDecoder.DecodeBinary(rows.conn.ConnInfo, buf)
+ if err != nil {
+ rows.fatal(scanArgError{col: i, err: err})
+ }
+ } else {
+ rows.fatal(scanArgError{col: i, err: errors.Errorf("%T is not a pgtype.BinaryDecoder", value)})
+ }
default:
- val = vr.ReadBytes(vr.Len())
+ rows.fatal(scanArgError{col: i, err: errors.Errorf("unknown format code: %v", fd.FormatCode)})
}
+
+ if rows.Err() == nil {
+ if scanner, ok := d.(sql.Scanner); ok {
+ sqlSrc, err := pgtype.DatabaseSQLValue(rows.conn.ConnInfo, value)
+ if err != nil {
+ rows.fatal(err)
+ }
+ err = scanner.Scan(sqlSrc)
+ if err != nil {
+ rows.fatal(scanArgError{col: i, err: err})
+ }
+ } else if err := value.AssignTo(d); err != nil {
+ rows.fatal(scanArgError{col: i, err: err})
+ }
+ }
+ } else {
+ rows.fatal(scanArgError{col: i, err: errors.Errorf("unknown oid: %v", fd.DataType)})
}
- err = s.Scan(val)
- if err != nil {
- rows.Fatal(scanArgError{col: i, err: err})
- }
- } else if vr.Type().DataType == JsonOid {
- // Because the argument passed to decodeJSON will escape the heap.
- // This allows d to be stack allocated and only copied to the heap when
- // we actually are decoding JSON. This saves one memory allocation per
- // row.
- d2 := d
- decodeJSON(vr, &d2)
- } else if vr.Type().DataType == JsonbOid {
- // Same trick as above for getting stack allocation
- d2 := d
- decodeJSONB(vr, &d2)
- } else {
- if err := Decode(vr, d); err != nil {
- rows.Fatal(scanArgError{col: i, err: err})
- }
- }
- if vr.Err() != nil {
- rows.Fatal(scanArgError{col: i, err: vr.Err()})
}
if rows.Err() != nil {
@@ -340,79 +279,42 @@ func (rows *Rows) Values() ([]interface{}, error) {
values := make([]interface{}, 0, len(rows.fields))
for range rows.fields {
- vr, _ := rows.nextColumn()
+ buf, fd, _ := rows.nextColumn()
- if vr.Len() == -1 {
+ if buf == nil {
values = append(values, nil)
continue
}
- switch vr.Type().FormatCode {
- // All intrinsic types (except string) are encoded with binary
- // encoding so anything else should be treated as a string
- case TextFormatCode:
- values = append(values, vr.ReadString(vr.Len()))
- case BinaryFormatCode:
- switch vr.Type().DataType {
- case TextOid, VarcharOid:
- values = append(values, decodeText(vr))
- case BoolOid:
- values = append(values, decodeBool(vr))
- case ByteaOid:
- values = append(values, decodeBytea(vr))
- case Int8Oid:
- values = append(values, decodeInt8(vr))
- case Int2Oid:
- values = append(values, decodeInt2(vr))
- case Int4Oid:
- values = append(values, decodeInt4(vr))
- case OidOid:
- values = append(values, decodeOid(vr))
- case Float4Oid:
- values = append(values, decodeFloat4(vr))
- case Float8Oid:
- values = append(values, decodeFloat8(vr))
- case BoolArrayOid:
- values = append(values, decodeBoolArray(vr))
- case Int2ArrayOid:
- values = append(values, decodeInt2Array(vr))
- case Int4ArrayOid:
- values = append(values, decodeInt4Array(vr))
- case Int8ArrayOid:
- values = append(values, decodeInt8Array(vr))
- case Float4ArrayOid:
- values = append(values, decodeFloat4Array(vr))
- case Float8ArrayOid:
- values = append(values, decodeFloat8Array(vr))
- case TextArrayOid, VarcharArrayOid:
- values = append(values, decodeTextArray(vr))
- case TimestampArrayOid, TimestampTzArrayOid:
- values = append(values, decodeTimestampArray(vr))
- case DateOid:
- values = append(values, decodeDate(vr))
- case TimestampTzOid:
- values = append(values, decodeTimestampTz(vr))
- case TimestampOid:
- values = append(values, decodeTimestamp(vr))
- case InetOid, CidrOid:
- values = append(values, decodeInet(vr))
- case JsonOid:
- var d interface{}
- decodeJSON(vr, &d)
- values = append(values, d)
- case JsonbOid:
- var d interface{}
- decodeJSONB(vr, &d)
- values = append(values, d)
+ if dt, ok := rows.conn.ConnInfo.DataTypeForOID(fd.DataType); ok {
+ value := dt.Value
+
+ switch fd.FormatCode {
+ case TextFormatCode:
+ decoder := value.(pgtype.TextDecoder)
+ if decoder == nil {
+ decoder = &pgtype.GenericText{}
+ }
+ err := decoder.DecodeText(rows.conn.ConnInfo, buf)
+ if err != nil {
+ rows.fatal(err)
+ }
+ values = append(values, decoder.(pgtype.Value).Get())
+ case BinaryFormatCode:
+ decoder := value.(pgtype.BinaryDecoder)
+ if decoder == nil {
+ decoder = &pgtype.GenericBinary{}
+ }
+ err := decoder.DecodeBinary(rows.conn.ConnInfo, buf)
+ if err != nil {
+ rows.fatal(err)
+ }
+ values = append(values, value.Get())
default:
- rows.Fatal(errors.New("Values cannot handle binary format non-intrinsic types"))
+ rows.fatal(errors.New("Unknown format code"))
}
- default:
- rows.Fatal(errors.New("Unknown format code"))
- }
-
- if vr.Err() != nil {
- rows.Fatal(vr.Err())
+ } else {
+ rows.fatal(errors.New("Unknown type"))
}
if rows.Err() != nil {
@@ -423,72 +325,221 @@ func (rows *Rows) Values() ([]interface{}, error) {
return values, rows.Err()
}
-// AfterClose adds f to a LILO queue of functions that will be called when
-// rows is closed.
-func (rows *Rows) AfterClose(f func(*Rows)) {
- if rows.afterClose == nil {
- rows.afterClose = f
- } else {
- prevFn := rows.afterClose
- rows.afterClose = func(rows *Rows) {
- f(rows)
- prevFn(rows)
- }
- }
-}
-
// Query executes sql with args. If there is an error the returned *Rows will
// be returned in an error state. So it is allowed to ignore the error returned
// from Query and handle it in *Rows.
func (c *Conn) Query(sql string, args ...interface{}) (*Rows, error) {
+ return c.QueryEx(context.Background(), sql, nil, args...)
+}
+
+func (c *Conn) getRows(sql string, args []interface{}) *Rows {
+ if len(c.preallocatedRows) == 0 {
+ c.preallocatedRows = make([]Rows, 64)
+ }
+
+ r := &c.preallocatedRows[len(c.preallocatedRows)-1]
+ c.preallocatedRows = c.preallocatedRows[0 : len(c.preallocatedRows)-1]
+
+ r.conn = c
+ r.startTime = c.lastActivityTime
+ r.sql = sql
+ r.args = args
+
+ return r
+}
+
+// QueryRow is a convenience wrapper over Query. Any error that occurs while
+// querying is deferred until calling Scan on the returned *Row. That *Row will
+// error with ErrNoRows if no rows are returned.
+func (c *Conn) QueryRow(sql string, args ...interface{}) *Row {
+ rows, _ := c.Query(sql, args...)
+ return (*Row)(rows)
+}
+
+type QueryExOptions struct {
+ // When ParameterOIDs are present and the query is not a prepared statement,
+ // then ParameterOIDs and ResultFormatCodes will be used to avoid an extra
+ // network round-trip.
+ ParameterOIDs []pgtype.OID
+ ResultFormatCodes []int16
+
+ SimpleProtocol bool
+}
+
+func (c *Conn) QueryEx(ctx context.Context, sql string, options *QueryExOptions, args ...interface{}) (rows *Rows, err error) {
c.lastActivityTime = time.Now()
+ rows = c.getRows(sql, args)
+
+ err = c.waitForPreviousCancelQuery(ctx)
+ if err != nil {
+ rows.fatal(err)
+ return rows, err
+ }
- rows := c.getRows(sql, args)
+ if err := c.ensureConnectionReadyForQuery(); err != nil {
+ rows.fatal(err)
+ return rows, err
+ }
if err := c.lock(); err != nil {
- rows.abort(err)
+ rows.fatal(err)
return rows, err
}
rows.unlockConn = true
+ err = c.initContext(ctx)
+ if err != nil {
+ rows.fatal(err)
+ return rows, rows.err
+ }
+
+ if (options == nil && c.config.PreferSimpleProtocol) || (options != nil && options.SimpleProtocol) {
+ err = c.sanitizeAndSendSimpleQuery(sql, args...)
+ if err != nil {
+ rows.fatal(err)
+ return rows, err
+ }
+
+ return rows, nil
+ }
+
+ if options != nil && len(options.ParameterOIDs) > 0 {
+
+ buf, err := c.buildOneRoundTripQueryEx(c.wbuf, sql, options, args)
+ if err != nil {
+ rows.fatal(err)
+ return rows, err
+ }
+
+ buf = appendSync(buf)
+
+ n, err := c.conn.Write(buf)
+ if err != nil && fatalWriteErr(n, err) {
+ rows.fatal(err)
+ c.die(err)
+ return rows, err
+ }
+ c.pendingReadyForQueryCount++
+
+ fieldDescriptions, err := c.readUntilRowDescription()
+ if err != nil {
+ rows.fatal(err)
+ return rows, err
+ }
+
+ if len(options.ResultFormatCodes) == 0 {
+ for i := range fieldDescriptions {
+ fieldDescriptions[i].FormatCode = TextFormatCode
+ }
+ } else if len(options.ResultFormatCodes) == 1 {
+ fc := options.ResultFormatCodes[0]
+ for i := range fieldDescriptions {
+ fieldDescriptions[i].FormatCode = fc
+ }
+ } else {
+ for i := range options.ResultFormatCodes {
+ fieldDescriptions[i].FormatCode = options.ResultFormatCodes[i]
+ }
+ }
+
+ rows.sql = sql
+ rows.fields = fieldDescriptions
+ return rows, nil
+ }
+
ps, ok := c.preparedStatements[sql]
if !ok {
var err error
- ps, err = c.Prepare("", sql)
+ ps, err = c.prepareEx("", sql, nil)
if err != nil {
- rows.abort(err)
+ rows.fatal(err)
return rows, rows.err
}
}
rows.sql = ps.SQL
rows.fields = ps.FieldDescriptions
- err := c.sendPreparedQuery(ps, args...)
+
+ err = c.sendPreparedQuery(ps, args...)
if err != nil {
- rows.abort(err)
+ rows.fatal(err)
}
+
return rows, rows.err
}
-func (c *Conn) getRows(sql string, args []interface{}) *Rows {
- if len(c.preallocatedRows) == 0 {
- c.preallocatedRows = make([]Rows, 64)
+func (c *Conn) buildOneRoundTripQueryEx(buf []byte, sql string, options *QueryExOptions, arguments []interface{}) ([]byte, error) {
+ if len(arguments) != len(options.ParameterOIDs) {
+ return nil, errors.Errorf("mismatched number of arguments (%d) and options.ParameterOIDs (%d)", len(arguments), len(options.ParameterOIDs))
}
- r := &c.preallocatedRows[len(c.preallocatedRows)-1]
- c.preallocatedRows = c.preallocatedRows[0 : len(c.preallocatedRows)-1]
+ if len(options.ParameterOIDs) > 65535 {
+ return nil, errors.Errorf("Number of QueryExOptions ParameterOIDs must be between 0 and 65535, received %d", len(options.ParameterOIDs))
+ }
- r.conn = c
- r.startTime = c.lastActivityTime
- r.sql = sql
- r.args = args
+ buf = appendParse(buf, "", sql, options.ParameterOIDs)
+ buf = appendDescribe(buf, 'S', "")
+ buf, err := appendBind(buf, "", "", c.ConnInfo, options.ParameterOIDs, arguments, options.ResultFormatCodes)
+ if err != nil {
+ return nil, err
+ }
+ buf = appendExecute(buf, "", 0)
- return r
+ return buf, nil
}
-// QueryRow is a convenience wrapper over Query. Any error that occurs while
-// querying is deferred until calling Scan on the returned *Row. That *Row will
-// error with ErrNoRows if no rows are returned.
-func (c *Conn) QueryRow(sql string, args ...interface{}) *Row {
- rows, _ := c.Query(sql, args...)
+func (c *Conn) readUntilRowDescription() ([]FieldDescription, error) {
+ for {
+ msg, err := c.rxMsg()
+ if err != nil {
+ return nil, err
+ }
+
+ switch msg := msg.(type) {
+ case *pgproto3.ParameterDescription:
+ case *pgproto3.RowDescription:
+ fieldDescriptions := c.rxRowDescription(msg)
+ for i := range fieldDescriptions {
+ if dt, ok := c.ConnInfo.DataTypeForOID(fieldDescriptions[i].DataType); ok {
+ fieldDescriptions[i].DataTypeName = dt.Name
+ } else {
+ return nil, errors.Errorf("unknown oid: %d", fieldDescriptions[i].DataType)
+ }
+ }
+ return fieldDescriptions, nil
+ default:
+ if err := c.processContextFreeMsg(msg); err != nil {
+ return nil, err
+ }
+ }
+ }
+}
+
+func (c *Conn) sanitizeAndSendSimpleQuery(sql string, args ...interface{}) (err error) {
+ if c.RuntimeParams["standard_conforming_strings"] != "on" {
+ return errors.New("simple protocol queries must be run with standard_conforming_strings=on")
+ }
+
+ if c.RuntimeParams["client_encoding"] != "UTF8" {
+ return errors.New("simple protocol queries must be run with client_encoding=UTF8")
+ }
+
+ valueArgs := make([]interface{}, len(args))
+ for i, a := range args {
+ valueArgs[i], err = convertSimpleArgument(c.ConnInfo, a)
+ if err != nil {
+ return err
+ }
+ }
+
+ sql, err = sanitize.SanitizeSQL(sql, valueArgs...)
+ if err != nil {
+ return err
+ }
+
+ return c.sendSimpleQuery(sql)
+}
+
+func (c *Conn) QueryRowEx(ctx context.Context, sql string, options *QueryExOptions, args ...interface{}) *Row {
+ rows, _ := c.QueryEx(ctx, sql, options, args...)
return (*Row)(rows)
}
diff --git a/vendor/github.com/jackc/pgx/query_test.go b/vendor/github.com/jackc/pgx/query_test.go
deleted file mode 100644
index f08887b..0000000
--- a/vendor/github.com/jackc/pgx/query_test.go
+++ /dev/null
@@ -1,1414 +0,0 @@
-package pgx_test
-
-import (
- "bytes"
- "database/sql"
- "fmt"
- "strings"
- "testing"
- "time"
-
- "github.com/jackc/pgx"
-
- "github.com/shopspring/decimal"
-)
-
-func TestConnQueryScan(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- var sum, rowCount int32
-
- rows, err := conn.Query("select generate_series(1,$1)", 10)
- if err != nil {
- t.Fatalf("conn.Query failed: %v", err)
- }
- defer rows.Close()
-
- for rows.Next() {
- var n int32
- rows.Scan(&n)
- sum += n
- rowCount++
- }
-
- if rows.Err() != nil {
- t.Fatalf("conn.Query failed: %v", err)
- }
-
- if rowCount != 10 {
- t.Error("Select called onDataRow wrong number of times")
- }
- if sum != 55 {
- t.Error("Wrong values returned")
- }
-}
-
-func TestConnQueryValues(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- var rowCount int32
-
- rows, err := conn.Query("select 'foo'::text, 'bar'::varchar, n, null, n::oid from generate_series(1,$1) n", 10)
- if err != nil {
- t.Fatalf("conn.Query failed: %v", err)
- }
- defer rows.Close()
-
- for rows.Next() {
- rowCount++
-
- values, err := rows.Values()
- if err != nil {
- t.Fatalf("rows.Values failed: %v", err)
- }
- if len(values) != 5 {
- t.Errorf("Expected rows.Values to return 5 values, but it returned %d", len(values))
- }
- if values[0] != "foo" {
- t.Errorf(`Expected values[0] to be "foo", but it was %v`, values[0])
- }
- if values[1] != "bar" {
- t.Errorf(`Expected values[1] to be "bar", but it was %v`, values[1])
- }
-
- if values[2] != rowCount {
- t.Errorf(`Expected values[2] to be %d, but it was %d`, rowCount, values[2])
- }
-
- if values[3] != nil {
- t.Errorf(`Expected values[3] to be %v, but it was %d`, nil, values[3])
- }
-
- if values[4] != pgx.Oid(rowCount) {
- t.Errorf(`Expected values[4] to be %d, but it was %d`, rowCount, values[4])
- }
- }
-
- if rows.Err() != nil {
- t.Fatalf("conn.Query failed: %v", err)
- }
-
- if rowCount != 10 {
- t.Error("Select called onDataRow wrong number of times")
- }
-}
-
-// Test that a connection stays valid when query results are closed early
-func TestConnQueryCloseEarly(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- // Immediately close query without reading any rows
- rows, err := conn.Query("select generate_series(1,$1)", 10)
- if err != nil {
- t.Fatalf("conn.Query failed: %v", err)
- }
- rows.Close()
-
- ensureConnValid(t, conn)
-
- // Read partial response then close
- rows, err = conn.Query("select generate_series(1,$1)", 10)
- if err != nil {
- t.Fatalf("conn.Query failed: %v", err)
- }
-
- ok := rows.Next()
- if !ok {
- t.Fatal("rows.Next terminated early")
- }
-
- var n int32
- rows.Scan(&n)
- if n != 1 {
- t.Fatalf("Expected 1 from first row, but got %v", n)
- }
-
- rows.Close()
-
- ensureConnValid(t, conn)
-}
-
-func TestConnQueryCloseEarlyWithErrorOnWire(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- rows, err := conn.Query("select 1/(10-n) from generate_series(1,10) n")
- if err != nil {
- t.Fatalf("conn.Query failed: %v", err)
- }
- rows.Close()
-
- ensureConnValid(t, conn)
-}
-
-// Test that a connection stays valid when query results read incorrectly
-func TestConnQueryReadWrongTypeError(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- // Read a single value incorrectly
- rows, err := conn.Query("select generate_series(1,$1)", 10)
- if err != nil {
- t.Fatalf("conn.Query failed: %v", err)
- }
-
- rowsRead := 0
-
- for rows.Next() {
- var t time.Time
- rows.Scan(&t)
- rowsRead++
- }
-
- if rowsRead != 1 {
- t.Fatalf("Expected error to cause only 1 row to be read, but %d were read", rowsRead)
- }
-
- if rows.Err() == nil {
- t.Fatal("Expected Rows to have an error after an improper read but it didn't")
- }
-
- if rows.Err().Error() != "can't scan into dest[0]: Can't convert OID 23 to time.Time" {
- t.Fatalf("Expected different Rows.Err(): %v", rows.Err())
- }
-
- ensureConnValid(t, conn)
-}
-
-// Test that a connection stays valid when query results read incorrectly
-func TestConnQueryReadTooManyValues(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- // Read too many values
- rows, err := conn.Query("select generate_series(1,$1)", 10)
- if err != nil {
- t.Fatalf("conn.Query failed: %v", err)
- }
-
- rowsRead := 0
-
- for rows.Next() {
- var n, m int32
- rows.Scan(&n, &m)
- rowsRead++
- }
-
- if rowsRead != 1 {
- t.Fatalf("Expected error to cause only 1 row to be read, but %d were read", rowsRead)
- }
-
- if rows.Err() == nil {
- t.Fatal("Expected Rows to have an error after an improper read but it didn't")
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestConnQueryScanIgnoreColumn(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- rows, err := conn.Query("select 1::int8, 2::int8, 3::int8")
- if err != nil {
- t.Fatalf("conn.Query failed: %v", err)
- }
-
- ok := rows.Next()
- if !ok {
- t.Fatal("rows.Next terminated early")
- }
-
- var n, m int64
- err = rows.Scan(&n, nil, &m)
- if err != nil {
- t.Fatalf("rows.Scan failed: %v", err)
- }
- rows.Close()
-
- if n != 1 {
- t.Errorf("Expected n to equal 1, but it was %d", n)
- }
-
- if m != 3 {
- t.Errorf("Expected n to equal 3, but it was %d", m)
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestConnQueryScanner(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- rows, err := conn.Query("select null::int8, 1::int8")
- if err != nil {
- t.Fatalf("conn.Query failed: %v", err)
- }
-
- ok := rows.Next()
- if !ok {
- t.Fatal("rows.Next terminated early")
- }
-
- var n, m pgx.NullInt64
- err = rows.Scan(&n, &m)
- if err != nil {
- t.Fatalf("rows.Scan failed: %v", err)
- }
- rows.Close()
-
- if n.Valid {
- t.Error("Null should not be valid, but it was")
- }
-
- if !m.Valid {
- t.Error("1 should be valid, but it wasn't")
- }
-
- if m.Int64 != 1 {
- t.Errorf("m.Int64 should have been 1, but it was %v", m.Int64)
- }
-
- ensureConnValid(t, conn)
-}
-
-type pgxNullInt64 struct {
- Int64 int64
- Valid bool // Valid is true if Int64 is not NULL
-}
-
-func (n *pgxNullInt64) ScanPgx(vr *pgx.ValueReader) error {
- if vr.Type().DataType != pgx.Int8Oid {
- return pgx.SerializationError(fmt.Sprintf("pgxNullInt64.Scan cannot decode OID %d", vr.Type().DataType))
- }
-
- if vr.Len() == -1 {
- n.Int64, n.Valid = 0, false
- return nil
- }
- n.Valid = true
-
- err := pgx.Decode(vr, &n.Int64)
- if err != nil {
- return err
- }
- return vr.Err()
-}
-
-func TestConnQueryPgxScanner(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- rows, err := conn.Query("select null::int8, 1::int8")
- if err != nil {
- t.Fatalf("conn.Query failed: %v", err)
- }
-
- ok := rows.Next()
- if !ok {
- t.Fatal("rows.Next terminated early")
- }
-
- var n, m pgxNullInt64
- err = rows.Scan(&n, &m)
- if err != nil {
- t.Fatalf("rows.Scan failed: %v", err)
- }
- rows.Close()
-
- if n.Valid {
- t.Error("Null should not be valid, but it was")
- }
-
- if !m.Valid {
- t.Error("1 should be valid, but it wasn't")
- }
-
- if m.Int64 != 1 {
- t.Errorf("m.Int64 should have been 1, but it was %v", m.Int64)
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestConnQueryErrorWhileReturningRows(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- for i := 0; i < 100; i++ {
- func() {
- sql := `select 42 / (random() * 20)::integer from generate_series(1,100000)`
-
- rows, err := conn.Query(sql)
- if err != nil {
- t.Fatal(err)
- }
- defer rows.Close()
-
- for rows.Next() {
- var n int32
- rows.Scan(&n)
- }
-
- if err, ok := rows.Err().(pgx.PgError); !ok {
- t.Fatalf("Expected pgx.PgError, got %v", err)
- }
-
- ensureConnValid(t, conn)
- }()
- }
-
-}
-
-func TestConnQueryEncoder(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- n := pgx.NullInt64{Int64: 1, Valid: true}
-
- rows, err := conn.Query("select $1::int8", &n)
- if err != nil {
- t.Fatalf("conn.Query failed: %v", err)
- }
-
- ok := rows.Next()
- if !ok {
- t.Fatal("rows.Next terminated early")
- }
-
- var m pgx.NullInt64
- err = rows.Scan(&m)
- if err != nil {
- t.Fatalf("rows.Scan failed: %v", err)
- }
- rows.Close()
-
- if !m.Valid {
- t.Error("m should be valid, but it wasn't")
- }
-
- if m.Int64 != 1 {
- t.Errorf("m.Int64 should have been 1, but it was %v", m.Int64)
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestQueryEncodeError(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- rows, err := conn.Query("select $1::integer", "wrong")
- if err != nil {
- t.Errorf("conn.Query failure: %v", err)
- }
- defer rows.Close()
-
- rows.Next()
-
- if rows.Err() == nil {
- t.Error("Expected rows.Err() to return error, but it didn't")
- }
- if rows.Err().Error() != `ERROR: invalid input syntax for integer: "wrong" (SQLSTATE 22P02)` {
- t.Error("Expected rows.Err() to return different error:", rows.Err())
- }
-}
-
-// Ensure that an argument that implements Encoder works when the parameter type
-// is a core type.
-type coreEncoder struct{}
-
-func (n coreEncoder) FormatCode() int16 { return pgx.TextFormatCode }
-
-func (n *coreEncoder) Encode(w *pgx.WriteBuf, oid pgx.Oid) error {
- w.WriteInt32(int32(2))
- w.WriteBytes([]byte("42"))
- return nil
-}
-
-func TestQueryEncodeCoreTextFormatError(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- var n int32
- err := conn.QueryRow("select $1::integer", &coreEncoder{}).Scan(&n)
- if err != nil {
- t.Fatalf("Unexpected conn.QueryRow error: %v", err)
- }
-
- if n != 42 {
- t.Errorf("Expected 42, got %v", n)
- }
-}
-
-func TestQueryRowCoreTypes(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- type allTypes struct {
- s string
- f32 float32
- f64 float64
- b bool
- t time.Time
- oid pgx.Oid
- }
-
- var actual, zero allTypes
-
- tests := []struct {
- sql string
- queryArgs []interface{}
- scanArgs []interface{}
- expected allTypes
- }{
- {"select $1::text", []interface{}{"Jack"}, []interface{}{&actual.s}, allTypes{s: "Jack"}},
- {"select $1::float4", []interface{}{float32(1.23)}, []interface{}{&actual.f32}, allTypes{f32: 1.23}},
- {"select $1::float8", []interface{}{float64(1.23)}, []interface{}{&actual.f64}, allTypes{f64: 1.23}},
- {"select $1::bool", []interface{}{true}, []interface{}{&actual.b}, allTypes{b: true}},
- {"select $1::timestamptz", []interface{}{time.Unix(123, 5000)}, []interface{}{&actual.t}, allTypes{t: time.Unix(123, 5000)}},
- {"select $1::timestamp", []interface{}{time.Date(2010, 1, 2, 3, 4, 5, 0, time.Local)}, []interface{}{&actual.t}, allTypes{t: time.Date(2010, 1, 2, 3, 4, 5, 0, time.Local)}},
- {"select $1::date", []interface{}{time.Date(1987, 1, 2, 0, 0, 0, 0, time.Local)}, []interface{}{&actual.t}, allTypes{t: time.Date(1987, 1, 2, 0, 0, 0, 0, time.Local)}},
- {"select $1::oid", []interface{}{pgx.Oid(42)}, []interface{}{&actual.oid}, allTypes{oid: 42}},
- }
-
- for i, tt := range tests {
- actual = zero
-
- err := conn.QueryRow(tt.sql, tt.queryArgs...).Scan(tt.scanArgs...)
- if err != nil {
- t.Errorf("%d. Unexpected failure: %v (sql -> %v, queryArgs -> %v)", i, err, tt.sql, tt.queryArgs)
- }
-
- if actual != tt.expected {
- t.Errorf("%d. Expected %v, got %v (sql -> %v, queryArgs -> %v)", i, tt.expected, actual, tt.sql, tt.queryArgs)
- }
-
- ensureConnValid(t, conn)
-
- // Check that Scan errors when a core type is null
- err = conn.QueryRow(tt.sql, nil).Scan(tt.scanArgs...)
- if err == nil {
- t.Errorf("%d. Expected null to cause error, but it didn't (sql -> %v)", i, tt.sql)
- }
- if err != nil && !strings.Contains(err.Error(), "Cannot decode null") {
- t.Errorf(`%d. Expected null to cause error "Cannot decode null..." but it was %v (sql -> %v)`, i, err, tt.sql)
- }
-
- ensureConnValid(t, conn)
- }
-}
-
-func TestQueryRowCoreIntegerEncoding(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- type allTypes struct {
- ui uint
- ui8 uint8
- ui16 uint16
- ui32 uint32
- ui64 uint64
- i int
- i8 int8
- i16 int16
- i32 int32
- i64 int64
- }
-
- var actual, zero allTypes
-
- successfulEncodeTests := []struct {
- sql string
- queryArg interface{}
- scanArg interface{}
- expected allTypes
- }{
- // Check any integer type where value is within int2 range can be encoded
- {"select $1::int2", int(42), &actual.i16, allTypes{i16: 42}},
- {"select $1::int2", int8(42), &actual.i16, allTypes{i16: 42}},
- {"select $1::int2", int16(42), &actual.i16, allTypes{i16: 42}},
- {"select $1::int2", int32(42), &actual.i16, allTypes{i16: 42}},
- {"select $1::int2", int64(42), &actual.i16, allTypes{i16: 42}},
- {"select $1::int2", uint(42), &actual.i16, allTypes{i16: 42}},
- {"select $1::int2", uint8(42), &actual.i16, allTypes{i16: 42}},
- {"select $1::int2", uint16(42), &actual.i16, allTypes{i16: 42}},
- {"select $1::int2", uint32(42), &actual.i16, allTypes{i16: 42}},
- {"select $1::int2", uint64(42), &actual.i16, allTypes{i16: 42}},
-
- // Check any integer type where value is within int4 range can be encoded
- {"select $1::int4", int(42), &actual.i32, allTypes{i32: 42}},
- {"select $1::int4", int8(42), &actual.i32, allTypes{i32: 42}},
- {"select $1::int4", int16(42), &actual.i32, allTypes{i32: 42}},
- {"select $1::int4", int32(42), &actual.i32, allTypes{i32: 42}},
- {"select $1::int4", int64(42), &actual.i32, allTypes{i32: 42}},
- {"select $1::int4", uint(42), &actual.i32, allTypes{i32: 42}},
- {"select $1::int4", uint8(42), &actual.i32, allTypes{i32: 42}},
- {"select $1::int4", uint16(42), &actual.i32, allTypes{i32: 42}},
- {"select $1::int4", uint32(42), &actual.i32, allTypes{i32: 42}},
- {"select $1::int4", uint64(42), &actual.i32, allTypes{i32: 42}},
-
- // Check any integer type where value is within int8 range can be encoded
- {"select $1::int8", int(42), &actual.i64, allTypes{i64: 42}},
- {"select $1::int8", int8(42), &actual.i64, allTypes{i64: 42}},
- {"select $1::int8", int16(42), &actual.i64, allTypes{i64: 42}},
- {"select $1::int8", int32(42), &actual.i64, allTypes{i64: 42}},
- {"select $1::int8", int64(42), &actual.i64, allTypes{i64: 42}},
- {"select $1::int8", uint(42), &actual.i64, allTypes{i64: 42}},
- {"select $1::int8", uint8(42), &actual.i64, allTypes{i64: 42}},
- {"select $1::int8", uint16(42), &actual.i64, allTypes{i64: 42}},
- {"select $1::int8", uint32(42), &actual.i64, allTypes{i64: 42}},
- {"select $1::int8", uint64(42), &actual.i64, allTypes{i64: 42}},
- }
-
- for i, tt := range successfulEncodeTests {
- actual = zero
-
- err := conn.QueryRow(tt.sql, tt.queryArg).Scan(tt.scanArg)
- if err != nil {
- t.Errorf("%d. Unexpected failure: %v (sql -> %v, queryArg -> %v)", i, err, tt.sql, tt.queryArg)
- continue
- }
-
- if actual != tt.expected {
- t.Errorf("%d. Expected %v, got %v (sql -> %v, queryArg -> %v)", i, tt.expected, actual, tt.sql, tt.queryArg)
- }
-
- ensureConnValid(t, conn)
- }
-
- failedEncodeTests := []struct {
- sql string
- queryArg interface{}
- }{
- // Check any integer type where value is outside pg:int2 range cannot be encoded
- {"select $1::int2", int(32769)},
- {"select $1::int2", int32(32769)},
- {"select $1::int2", int32(32769)},
- {"select $1::int2", int64(32769)},
- {"select $1::int2", uint(32769)},
- {"select $1::int2", uint16(32769)},
- {"select $1::int2", uint32(32769)},
- {"select $1::int2", uint64(32769)},
-
- // Check any integer type where value is outside pg:int4 range cannot be encoded
- {"select $1::int4", int64(2147483649)},
- {"select $1::int4", uint32(2147483649)},
- {"select $1::int4", uint64(2147483649)},
-
- // Check any integer type where value is outside pg:int8 range cannot be encoded
- {"select $1::int8", uint64(9223372036854775809)},
- }
-
- for i, tt := range failedEncodeTests {
- err := conn.QueryRow(tt.sql, tt.queryArg).Scan(nil)
- if err == nil {
- t.Errorf("%d. Expected failure to encode, but unexpectedly succeeded: %v (sql -> %v, queryArg -> %v)", i, err, tt.sql, tt.queryArg)
- } else if !strings.Contains(err.Error(), "is greater than") {
- t.Errorf("%d. Expected failure to encode, but got: %v (sql -> %v, queryArg -> %v)", i, err, tt.sql, tt.queryArg)
- }
-
- ensureConnValid(t, conn)
- }
-}
-
-func TestQueryRowCoreIntegerDecoding(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- type allTypes struct {
- ui uint
- ui8 uint8
- ui16 uint16
- ui32 uint32
- ui64 uint64
- i int
- i8 int8
- i16 int16
- i32 int32
- i64 int64
- }
-
- var actual, zero allTypes
-
- successfulDecodeTests := []struct {
- sql string
- scanArg interface{}
- expected allTypes
- }{
- // Check any integer type where value is within Go:int range can be decoded
- {"select 42::int2", &actual.i, allTypes{i: 42}},
- {"select 42::int4", &actual.i, allTypes{i: 42}},
- {"select 42::int8", &actual.i, allTypes{i: 42}},
- {"select -42::int2", &actual.i, allTypes{i: -42}},
- {"select -42::int4", &actual.i, allTypes{i: -42}},
- {"select -42::int8", &actual.i, allTypes{i: -42}},
-
- // Check any integer type where value is within Go:int8 range can be decoded
- {"select 42::int2", &actual.i8, allTypes{i8: 42}},
- {"select 42::int4", &actual.i8, allTypes{i8: 42}},
- {"select 42::int8", &actual.i8, allTypes{i8: 42}},
- {"select -42::int2", &actual.i8, allTypes{i8: -42}},
- {"select -42::int4", &actual.i8, allTypes{i8: -42}},
- {"select -42::int8", &actual.i8, allTypes{i8: -42}},
-
- // Check any integer type where value is within Go:int16 range can be decoded
- {"select 42::int2", &actual.i16, allTypes{i16: 42}},
- {"select 42::int4", &actual.i16, allTypes{i16: 42}},
- {"select 42::int8", &actual.i16, allTypes{i16: 42}},
- {"select -42::int2", &actual.i16, allTypes{i16: -42}},
- {"select -42::int4", &actual.i16, allTypes{i16: -42}},
- {"select -42::int8", &actual.i16, allTypes{i16: -42}},
-
- // Check any integer type where value is within Go:int32 range can be decoded
- {"select 42::int2", &actual.i32, allTypes{i32: 42}},
- {"select 42::int4", &actual.i32, allTypes{i32: 42}},
- {"select 42::int8", &actual.i32, allTypes{i32: 42}},
- {"select -42::int2", &actual.i32, allTypes{i32: -42}},
- {"select -42::int4", &actual.i32, allTypes{i32: -42}},
- {"select -42::int8", &actual.i32, allTypes{i32: -42}},
-
- // Check any integer type where value is within Go:int64 range can be decoded
- {"select 42::int2", &actual.i64, allTypes{i64: 42}},
- {"select 42::int4", &actual.i64, allTypes{i64: 42}},
- {"select 42::int8", &actual.i64, allTypes{i64: 42}},
- {"select -42::int2", &actual.i64, allTypes{i64: -42}},
- {"select -42::int4", &actual.i64, allTypes{i64: -42}},
- {"select -42::int8", &actual.i64, allTypes{i64: -42}},
-
- // Check any integer type where value is within Go:uint range can be decoded
- {"select 128::int2", &actual.ui, allTypes{ui: 128}},
- {"select 128::int4", &actual.ui, allTypes{ui: 128}},
- {"select 128::int8", &actual.ui, allTypes{ui: 128}},
-
- // Check any integer type where value is within Go:uint8 range can be decoded
- {"select 128::int2", &actual.ui8, allTypes{ui8: 128}},
- {"select 128::int4", &actual.ui8, allTypes{ui8: 128}},
- {"select 128::int8", &actual.ui8, allTypes{ui8: 128}},
-
- // Check any integer type where value is within Go:uint16 range can be decoded
- {"select 42::int2", &actual.ui16, allTypes{ui16: 42}},
- {"select 32768::int4", &actual.ui16, allTypes{ui16: 32768}},
- {"select 32768::int8", &actual.ui16, allTypes{ui16: 32768}},
-
- // Check any integer type where value is within Go:uint32 range can be decoded
- {"select 42::int2", &actual.ui32, allTypes{ui32: 42}},
- {"select 42::int4", &actual.ui32, allTypes{ui32: 42}},
- {"select 2147483648::int8", &actual.ui32, allTypes{ui32: 2147483648}},
-
- // Check any integer type where value is within Go:uint64 range can be decoded
- {"select 42::int2", &actual.ui64, allTypes{ui64: 42}},
- {"select 42::int4", &actual.ui64, allTypes{ui64: 42}},
- {"select 42::int8", &actual.ui64, allTypes{ui64: 42}},
- }
-
- for i, tt := range successfulDecodeTests {
- actual = zero
-
- err := conn.QueryRow(tt.sql).Scan(tt.scanArg)
- if err != nil {
- t.Errorf("%d. Unexpected failure: %v (sql -> %v)", i, err, tt.sql)
- continue
- }
-
- if actual != tt.expected {
- t.Errorf("%d. Expected %v, got %v (sql -> %v)", i, tt.expected, actual, tt.sql)
- }
-
- ensureConnValid(t, conn)
- }
-
- failedDecodeTests := []struct {
- sql string
- scanArg interface{}
- expectedErr string
- }{
- // Check any integer type where value is outside Go:int8 range cannot be decoded
- {"select 128::int2", &actual.i8, "is greater than"},
- {"select 128::int4", &actual.i8, "is greater than"},
- {"select 128::int8", &actual.i8, "is greater than"},
- {"select -129::int2", &actual.i8, "is less than"},
- {"select -129::int4", &actual.i8, "is less than"},
- {"select -129::int8", &actual.i8, "is less than"},
-
- // Check any integer type where value is outside Go:int16 range cannot be decoded
- {"select 32768::int4", &actual.i16, "is greater than"},
- {"select 32768::int8", &actual.i16, "is greater than"},
- {"select -32769::int4", &actual.i16, "is less than"},
- {"select -32769::int8", &actual.i16, "is less than"},
-
- // Check any integer type where value is outside Go:int32 range cannot be decoded
- {"select 2147483648::int8", &actual.i32, "is greater than"},
- {"select -2147483649::int8", &actual.i32, "is less than"},
-
- // Check any integer type where value is outside Go:uint range cannot be decoded
- {"select -1::int2", &actual.ui, "is less than"},
- {"select -1::int4", &actual.ui, "is less than"},
- {"select -1::int8", &actual.ui, "is less than"},
-
- // Check any integer type where value is outside Go:uint8 range cannot be decoded
- {"select 256::int2", &actual.ui8, "is greater than"},
- {"select 256::int4", &actual.ui8, "is greater than"},
- {"select 256::int8", &actual.ui8, "is greater than"},
- {"select -1::int2", &actual.ui8, "is less than"},
- {"select -1::int4", &actual.ui8, "is less than"},
- {"select -1::int8", &actual.ui8, "is less than"},
-
- // Check any integer type where value is outside Go:uint16 cannot be decoded
- {"select 65536::int4", &actual.ui16, "is greater than"},
- {"select 65536::int8", &actual.ui16, "is greater than"},
- {"select -1::int2", &actual.ui16, "is less than"},
- {"select -1::int4", &actual.ui16, "is less than"},
- {"select -1::int8", &actual.ui16, "is less than"},
-
- // Check any integer type where value is outside Go:uint32 range cannot be decoded
- {"select 4294967296::int8", &actual.ui32, "is greater than"},
- {"select -1::int2", &actual.ui32, "is less than"},
- {"select -1::int4", &actual.ui32, "is less than"},
- {"select -1::int8", &actual.ui32, "is less than"},
-
- // Check any integer type where value is outside Go:uint64 range cannot be decoded
- {"select -1::int2", &actual.ui64, "is less than"},
- {"select -1::int4", &actual.ui64, "is less than"},
- {"select -1::int8", &actual.ui64, "is less than"},
- }
-
- for i, tt := range failedDecodeTests {
- err := conn.QueryRow(tt.sql).Scan(tt.scanArg)
- if err == nil {
- t.Errorf("%d. Expected failure to decode, but unexpectedly succeeded: %v (sql -> %v)", i, err, tt.sql)
- } else if !strings.Contains(err.Error(), tt.expectedErr) {
- t.Errorf("%d. Expected failure to decode, but got: %v (sql -> %v)", i, err, tt.sql)
- }
-
- ensureConnValid(t, conn)
- }
-}
-
-func TestQueryRowCoreByteSlice(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- tests := []struct {
- sql string
- queryArg interface{}
- expected []byte
- }{
- {"select $1::text", "Jack", []byte("Jack")},
- {"select $1::text", []byte("Jack"), []byte("Jack")},
- {"select $1::int4", int32(239023409), []byte{14, 63, 53, 49}},
- {"select $1::varchar", []byte("Jack"), []byte("Jack")},
- {"select $1::bytea", []byte{0, 15, 255, 17}, []byte{0, 15, 255, 17}},
- }
-
- for i, tt := range tests {
- var actual []byte
-
- err := conn.QueryRow(tt.sql, tt.queryArg).Scan(&actual)
- if err != nil {
- t.Errorf("%d. Unexpected failure: %v (sql -> %v)", i, err, tt.sql)
- }
-
- if !bytes.Equal(actual, tt.expected) {
- t.Errorf("%d. Expected %v, got %v (sql -> %v)", i, tt.expected, actual, tt.sql)
- }
-
- ensureConnValid(t, conn)
- }
-}
-
-func TestQueryRowByteSliceArgument(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- sql := "select $1::int4"
- queryArg := []byte{14, 63, 53, 49}
- expected := int32(239023409)
-
- var actual int32
-
- err := conn.QueryRow(sql, queryArg).Scan(&actual)
- if err != nil {
- t.Errorf("Unexpected failure: %v (sql -> %v)", err, sql)
- }
-
- if expected != actual {
- t.Errorf("Expected %v, got %v (sql -> %v)", expected, actual, sql)
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestQueryRowUnknownType(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- sql := "select $1::point"
- expected := "(1,0)"
- var actual string
-
- err := conn.QueryRow(sql, expected).Scan(&actual)
- if err != nil {
- t.Errorf("Unexpected failure: %v (sql -> %v)", err, sql)
- }
-
- if actual != expected {
- t.Errorf(`Expected "%v", got "%v" (sql -> %v)`, expected, actual, sql)
-
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestQueryRowErrors(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- type allTypes struct {
- i16 int16
- i int
- s string
- }
-
- var actual, zero allTypes
-
- tests := []struct {
- sql string
- queryArgs []interface{}
- scanArgs []interface{}
- err string
- }{
- {"select $1", []interface{}{"Jack"}, []interface{}{&actual.i16}, "could not determine data type of parameter $1 (SQLSTATE 42P18)"},
- {"select $1::badtype", []interface{}{"Jack"}, []interface{}{&actual.i16}, `type "badtype" does not exist`},
- {"SYNTAX ERROR", []interface{}{}, []interface{}{&actual.i16}, "SQLSTATE 42601"},
- {"select $1::text", []interface{}{"Jack"}, []interface{}{&actual.i16}, "Cannot decode oid 25 into any integer type"},
- {"select $1::point", []interface{}{int(705)}, []interface{}{&actual.s}, "cannot encode int8 into oid 600"},
- }
-
- for i, tt := range tests {
- actual = zero
-
- err := conn.QueryRow(tt.sql, tt.queryArgs...).Scan(tt.scanArgs...)
- if err == nil {
- t.Errorf("%d. Unexpected success (sql -> %v, queryArgs -> %v)", i, tt.sql, tt.queryArgs)
- }
- if err != nil && !strings.Contains(err.Error(), tt.err) {
- t.Errorf("%d. Expected error to contain %s, but got %v (sql -> %v, queryArgs -> %v)", i, tt.err, err, tt.sql, tt.queryArgs)
- }
-
- ensureConnValid(t, conn)
- }
-}
-
-func TestQueryRowNoResults(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- var n int32
- err := conn.QueryRow("select 1 where 1=0").Scan(&n)
- if err != pgx.ErrNoRows {
- t.Errorf("Expected pgx.ErrNoRows, got %v", err)
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestQueryRowCoreInt16Slice(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- var actual []int16
-
- tests := []struct {
- sql string
- expected []int16
- }{
- {"select $1::int2[]", []int16{1, 2, 3, 4, 5}},
- {"select $1::int2[]", []int16{}},
- }
-
- for i, tt := range tests {
- err := conn.QueryRow(tt.sql, tt.expected).Scan(&actual)
- if err != nil {
- t.Errorf("%d. Unexpected failure: %v", i, err)
- }
-
- if len(actual) != len(tt.expected) {
- t.Errorf("%d. Expected %v, got %v", i, tt.expected, actual)
- }
-
- for j := 0; j < len(actual); j++ {
- if actual[j] != tt.expected[j] {
- t.Errorf("%d. Expected actual[%d] to be %v, got %v", i, j, tt.expected[j], actual[j])
- }
- }
-
- ensureConnValid(t, conn)
- }
-
- // Check that Scan errors when an array with a null is scanned into a core slice type
- err := conn.QueryRow("select '{1, 2, 3, 4, 5, null}'::int2[];").Scan(&actual)
- if err == nil {
- t.Error("Expected null to cause error when scanned into slice, but it didn't")
- }
- if err != nil && !strings.Contains(err.Error(), "Cannot decode null") {
- t.Errorf(`Expected null to cause error "Cannot decode null..." but it was %v`, err)
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestQueryRowCoreInt32Slice(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- var actual []int32
-
- tests := []struct {
- sql string
- expected []int32
- }{
- {"select $1::int4[]", []int32{1, 2, 3, 4, 5}},
- {"select $1::int4[]", []int32{}},
- }
-
- for i, tt := range tests {
- err := conn.QueryRow(tt.sql, tt.expected).Scan(&actual)
- if err != nil {
- t.Errorf("%d. Unexpected failure: %v", i, err)
- }
-
- if len(actual) != len(tt.expected) {
- t.Errorf("%d. Expected %v, got %v", i, tt.expected, actual)
- }
-
- for j := 0; j < len(actual); j++ {
- if actual[j] != tt.expected[j] {
- t.Errorf("%d. Expected actual[%d] to be %v, got %v", i, j, tt.expected[j], actual[j])
- }
- }
-
- ensureConnValid(t, conn)
- }
-
- // Check that Scan errors when an array with a null is scanned into a core slice type
- err := conn.QueryRow("select '{1, 2, 3, 4, 5, null}'::int4[];").Scan(&actual)
- if err == nil {
- t.Error("Expected null to cause error when scanned into slice, but it didn't")
- }
- if err != nil && !strings.Contains(err.Error(), "Cannot decode null") {
- t.Errorf(`Expected null to cause error "Cannot decode null..." but it was %v`, err)
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestQueryRowCoreInt64Slice(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- var actual []int64
-
- tests := []struct {
- sql string
- expected []int64
- }{
- {"select $1::int8[]", []int64{1, 2, 3, 4, 5}},
- {"select $1::int8[]", []int64{}},
- }
-
- for i, tt := range tests {
- err := conn.QueryRow(tt.sql, tt.expected).Scan(&actual)
- if err != nil {
- t.Errorf("%d. Unexpected failure: %v", i, err)
- }
-
- if len(actual) != len(tt.expected) {
- t.Errorf("%d. Expected %v, got %v", i, tt.expected, actual)
- }
-
- for j := 0; j < len(actual); j++ {
- if actual[j] != tt.expected[j] {
- t.Errorf("%d. Expected actual[%d] to be %v, got %v", i, j, tt.expected[j], actual[j])
- }
- }
-
- ensureConnValid(t, conn)
- }
-
- // Check that Scan errors when an array with a null is scanned into a core slice type
- err := conn.QueryRow("select '{1, 2, 3, 4, 5, null}'::int8[];").Scan(&actual)
- if err == nil {
- t.Error("Expected null to cause error when scanned into slice, but it didn't")
- }
- if err != nil && !strings.Contains(err.Error(), "Cannot decode null") {
- t.Errorf(`Expected null to cause error "Cannot decode null..." but it was %v`, err)
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestQueryRowCoreFloat32Slice(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- var actual []float32
-
- tests := []struct {
- sql string
- expected []float32
- }{
- {"select $1::float4[]", []float32{1.5, 2.0, 3.5}},
- {"select $1::float4[]", []float32{}},
- }
-
- for i, tt := range tests {
- err := conn.QueryRow(tt.sql, tt.expected).Scan(&actual)
- if err != nil {
- t.Errorf("%d. Unexpected failure: %v", i, err)
- }
-
- if len(actual) != len(tt.expected) {
- t.Errorf("%d. Expected %v, got %v", i, tt.expected, actual)
- }
-
- for j := 0; j < len(actual); j++ {
- if actual[j] != tt.expected[j] {
- t.Errorf("%d. Expected actual[%d] to be %v, got %v", i, j, tt.expected[j], actual[j])
- }
- }
-
- ensureConnValid(t, conn)
- }
-
- // Check that Scan errors when an array with a null is scanned into a core slice type
- err := conn.QueryRow("select '{1.5, 2.0, 3.5, null}'::float4[];").Scan(&actual)
- if err == nil {
- t.Error("Expected null to cause error when scanned into slice, but it didn't")
- }
- if err != nil && !strings.Contains(err.Error(), "Cannot decode null") {
- t.Errorf(`Expected null to cause error "Cannot decode null..." but it was %v`, err)
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestQueryRowCoreFloat64Slice(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- var actual []float64
-
- tests := []struct {
- sql string
- expected []float64
- }{
- {"select $1::float8[]", []float64{1.5, 2.0, 3.5}},
- {"select $1::float8[]", []float64{}},
- }
-
- for i, tt := range tests {
- err := conn.QueryRow(tt.sql, tt.expected).Scan(&actual)
- if err != nil {
- t.Errorf("%d. Unexpected failure: %v", i, err)
- }
-
- if len(actual) != len(tt.expected) {
- t.Errorf("%d. Expected %v, got %v", i, tt.expected, actual)
- }
-
- for j := 0; j < len(actual); j++ {
- if actual[j] != tt.expected[j] {
- t.Errorf("%d. Expected actual[%d] to be %v, got %v", i, j, tt.expected[j], actual[j])
- }
- }
-
- ensureConnValid(t, conn)
- }
-
- // Check that Scan errors when an array with a null is scanned into a core slice type
- err := conn.QueryRow("select '{1.5, 2.0, 3.5, null}'::float8[];").Scan(&actual)
- if err == nil {
- t.Error("Expected null to cause error when scanned into slice, but it didn't")
- }
- if err != nil && !strings.Contains(err.Error(), "Cannot decode null") {
- t.Errorf(`Expected null to cause error "Cannot decode null..." but it was %v`, err)
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestQueryRowCoreStringSlice(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- var actual []string
-
- tests := []struct {
- sql string
- expected []string
- }{
- {"select $1::text[]", []string{"Adam", "Eve", "UTF-8 Characters Å Æ Ë Ͽ"}},
- {"select $1::text[]", []string{}},
- {"select $1::varchar[]", []string{"Adam", "Eve", "UTF-8 Characters Å Æ Ë Ͽ"}},
- {"select $1::varchar[]", []string{}},
- }
-
- for i, tt := range tests {
- err := conn.QueryRow(tt.sql, tt.expected).Scan(&actual)
- if err != nil {
- t.Errorf("%d. Unexpected failure: %v", i, err)
- }
-
- if len(actual) != len(tt.expected) {
- t.Errorf("%d. Expected %v, got %v", i, tt.expected, actual)
- }
-
- for j := 0; j < len(actual); j++ {
- if actual[j] != tt.expected[j] {
- t.Errorf("%d. Expected actual[%d] to be %v, got %v", i, j, tt.expected[j], actual[j])
- }
- }
-
- ensureConnValid(t, conn)
- }
-
- // Check that Scan errors when an array with a null is scanned into a core slice type
- err := conn.QueryRow("select '{Adam,Eve,NULL}'::text[];").Scan(&actual)
- if err == nil {
- t.Error("Expected null to cause error when scanned into slice, but it didn't")
- }
- if err != nil && !strings.Contains(err.Error(), "Cannot decode null") {
- t.Errorf(`Expected null to cause error "Cannot decode null..." but it was %v`, err)
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestReadingValueAfterEmptyArray(t *testing.T) {
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- var a []string
- var b int32
- err := conn.QueryRow("select '{}'::text[], 42::integer").Scan(&a, &b)
- if err != nil {
- t.Fatalf("conn.QueryRow failed: %v", err)
- }
-
- if len(a) != 0 {
- t.Errorf("Expected 'a' to have length 0, but it was: %d", len(a))
- }
-
- if b != 42 {
- t.Errorf("Expected 'b' to 42, but it was: %d", b)
- }
-}
-
-func TestReadingNullByteArray(t *testing.T) {
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- var a []byte
- err := conn.QueryRow("select null::text").Scan(&a)
- if err != nil {
- t.Fatalf("conn.QueryRow failed: %v", err)
- }
-
- if a != nil {
- t.Errorf("Expected 'a' to be nil, but it was: %v", a)
- }
-}
-
-func TestReadingNullByteArrays(t *testing.T) {
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- rows, err := conn.Query("select null::text union all select null::text")
- if err != nil {
- t.Fatalf("conn.Query failed: %v", err)
- }
-
- count := 0
- for rows.Next() {
- count++
- var a []byte
- if err := rows.Scan(&a); err != nil {
- t.Fatalf("failed to scan row: %v", err)
- }
- if a != nil {
- t.Errorf("Expected 'a' to be nil, but it was: %v", a)
- }
- }
- if count != 2 {
- t.Errorf("Expected to read 2 rows, read: %d", count)
- }
-}
-
-// Use github.com/shopspring/decimal as real-world database/sql custom type
-// to test against.
-func TestConnQueryDatabaseSQLScanner(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- var num decimal.Decimal
-
- err := conn.QueryRow("select '1234.567'::decimal").Scan(&num)
- if err != nil {
- t.Fatalf("Scan failed: %v", err)
- }
-
- expected, err := decimal.NewFromString("1234.567")
- if err != nil {
- t.Fatal(err)
- }
-
- if !num.Equals(expected) {
- t.Errorf("Expected num to be %v, but it was %v", expected, num)
- }
-
- ensureConnValid(t, conn)
-}
-
-// Use github.com/shopspring/decimal as real-world database/sql custom type
-// to test against.
-func TestConnQueryDatabaseSQLDriverValuer(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- expected, err := decimal.NewFromString("1234.567")
- if err != nil {
- t.Fatal(err)
- }
- var num decimal.Decimal
-
- err = conn.QueryRow("select $1::decimal", &expected).Scan(&num)
- if err != nil {
- t.Fatalf("Scan failed: %v", err)
- }
-
- if !num.Equals(expected) {
- t.Errorf("Expected num to be %v, but it was %v", expected, num)
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestConnQueryDatabaseSQLNullX(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- type row struct {
- boolValid sql.NullBool
- boolNull sql.NullBool
- int64Valid sql.NullInt64
- int64Null sql.NullInt64
- float64Valid sql.NullFloat64
- float64Null sql.NullFloat64
- stringValid sql.NullString
- stringNull sql.NullString
- }
-
- expected := row{
- boolValid: sql.NullBool{Bool: true, Valid: true},
- int64Valid: sql.NullInt64{Int64: 123, Valid: true},
- float64Valid: sql.NullFloat64{Float64: 3.14, Valid: true},
- stringValid: sql.NullString{String: "pgx", Valid: true},
- }
-
- var actual row
-
- err := conn.QueryRow(
- "select $1::bool, $2::bool, $3::int8, $4::int8, $5::float8, $6::float8, $7::text, $8::text",
- expected.boolValid,
- expected.boolNull,
- expected.int64Valid,
- expected.int64Null,
- expected.float64Valid,
- expected.float64Null,
- expected.stringValid,
- expected.stringNull,
- ).Scan(
- &actual.boolValid,
- &actual.boolNull,
- &actual.int64Valid,
- &actual.int64Null,
- &actual.float64Valid,
- &actual.float64Null,
- &actual.stringValid,
- &actual.stringNull,
- )
- if err != nil {
- t.Fatalf("Scan failed: %v", err)
- }
-
- if expected != actual {
- t.Errorf("Expected %v, but got %v", expected, actual)
- }
-
- ensureConnValid(t, conn)
-}
diff --git a/vendor/github.com/jackc/pgx/replication.go b/vendor/github.com/jackc/pgx/replication.go
index 7b28d6b..7dd5efe 100644
--- a/vendor/github.com/jackc/pgx/replication.go
+++ b/vendor/github.com/jackc/pgx/replication.go
@@ -1,10 +1,15 @@
package pgx
import (
- "errors"
+ "context"
+ "encoding/binary"
"fmt"
- "net"
"time"
+
+ "github.com/pkg/errors"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/jackc/pgx/pgproto3"
)
const (
@@ -172,17 +177,21 @@ type ReplicationConn struct {
// message to the server, as well as carries the WAL position of the
// client, which then updates the server's replication slot position.
func (rc *ReplicationConn) SendStandbyStatus(k *StandbyStatus) (err error) {
- writeBuf := newWriteBuf(rc.c, copyData)
- writeBuf.WriteByte(standbyStatusUpdate)
- writeBuf.WriteInt64(int64(k.WalWritePosition))
- writeBuf.WriteInt64(int64(k.WalFlushPosition))
- writeBuf.WriteInt64(int64(k.WalApplyPosition))
- writeBuf.WriteInt64(int64(k.ClientTime))
- writeBuf.WriteByte(k.ReplyRequested)
+ buf := rc.c.wbuf
+ buf = append(buf, copyData)
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ buf = append(buf, standbyStatusUpdate)
+ buf = pgio.AppendInt64(buf, int64(k.WalWritePosition))
+ buf = pgio.AppendInt64(buf, int64(k.WalFlushPosition))
+ buf = pgio.AppendInt64(buf, int64(k.WalApplyPosition))
+ buf = pgio.AppendInt64(buf, int64(k.ClientTime))
+ buf = append(buf, k.ReplyRequested)
- writeBuf.closeMsg()
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])))
- _, err = rc.c.conn.Write(writeBuf.buf)
+ _, err = rc.c.conn.Write(buf)
if err != nil {
rc.c.die(err)
}
@@ -203,107 +212,115 @@ func (rc *ReplicationConn) CauseOfDeath() error {
}
func (rc *ReplicationConn) readReplicationMessage() (r *ReplicationMessage, err error) {
- var t byte
- var reader *msgReader
- t, reader, err = rc.c.rxMsg()
+ msg, err := rc.c.rxMsg()
if err != nil {
return
}
- switch t {
- case noticeResponse:
- pgError := rc.c.rxErrorResponse(reader)
+ switch msg := msg.(type) {
+ case *pgproto3.NoticeResponse:
+ pgError := rc.c.rxErrorResponse((*pgproto3.ErrorResponse)(msg))
if rc.c.shouldLog(LogLevelInfo) {
- rc.c.log(LogLevelInfo, pgError.Error())
+ rc.c.log(LogLevelInfo, pgError.Error(), nil)
}
- case errorResponse:
- err = rc.c.rxErrorResponse(reader)
+ case *pgproto3.ErrorResponse:
+ err = rc.c.rxErrorResponse(msg)
if rc.c.shouldLog(LogLevelError) {
- rc.c.log(LogLevelError, err.Error())
+ rc.c.log(LogLevelError, err.Error(), nil)
}
return
- case copyBothResponse:
+ case *pgproto3.CopyBothResponse:
// This is the tail end of the replication process start,
// and can be safely ignored
return
- case copyData:
- var msgType byte
- msgType = reader.readByte()
+ case *pgproto3.CopyData:
+ msgType := msg.Data[0]
+ rp := 1
+
switch msgType {
case walData:
- walStart := reader.readInt64()
- serverWalEnd := reader.readInt64()
- serverTime := reader.readInt64()
- walData := reader.readBytes(reader.msgBytesRemaining)
- walMessage := WalMessage{WalStart: uint64(walStart),
- ServerWalEnd: uint64(serverWalEnd),
- ServerTime: uint64(serverTime),
+ walStart := binary.BigEndian.Uint64(msg.Data[rp:])
+ rp += 8
+ serverWalEnd := binary.BigEndian.Uint64(msg.Data[rp:])
+ rp += 8
+ serverTime := binary.BigEndian.Uint64(msg.Data[rp:])
+ rp += 8
+ walData := msg.Data[rp:]
+ walMessage := WalMessage{WalStart: walStart,
+ ServerWalEnd: serverWalEnd,
+ ServerTime: serverTime,
WalData: walData,
}
return &ReplicationMessage{WalMessage: &walMessage}, nil
case senderKeepalive:
- serverWalEnd := reader.readInt64()
- serverTime := reader.readInt64()
- replyNow := reader.readByte()
- h := &ServerHeartbeat{ServerWalEnd: uint64(serverWalEnd), ServerTime: uint64(serverTime), ReplyRequested: replyNow}
+ serverWalEnd := binary.BigEndian.Uint64(msg.Data[rp:])
+ rp += 8
+ serverTime := binary.BigEndian.Uint64(msg.Data[rp:])
+ rp += 8
+ replyNow := msg.Data[rp]
+ rp += 1
+ h := &ServerHeartbeat{ServerWalEnd: serverWalEnd, ServerTime: serverTime, ReplyRequested: replyNow}
return &ReplicationMessage{ServerHeartbeat: h}, nil
default:
if rc.c.shouldLog(LogLevelError) {
- rc.c.log(LogLevelError, "Unexpected data playload message type %v", t)
+ rc.c.log(LogLevelError, "Unexpected data playload message type", map[string]interface{}{"type": msgType})
}
}
default:
if rc.c.shouldLog(LogLevelError) {
- rc.c.log(LogLevelError, "Unexpected replication message type %v", t)
+ rc.c.log(LogLevelError, "Unexpected replication message type", map[string]interface{}{"type": msg})
}
}
return
}
-// Wait for a single replication message up to timeout time.
+// Wait for a single replication message.
//
// Properly using this requires some knowledge of the postgres replication mechanisms,
// as the client can receive both WAL data (the ultimate payload) and server heartbeat
// updates. The caller also must send standby status updates in order to keep the connection
// alive and working.
//
-// This returns pgx.ErrNotificationTimeout when there is no replication message by the specified
-// duration.
-func (rc *ReplicationConn) WaitForReplicationMessage(timeout time.Duration) (r *ReplicationMessage, err error) {
- var zeroTime time.Time
-
- deadline := time.Now().Add(timeout)
-
- // Use SetReadDeadline to implement the timeout. SetReadDeadline will
- // cause operations to fail with a *net.OpError that has a Timeout()
- // of true. Because the normal pgx rxMsg path considers any error to
- // have potentially corrupted the state of the connection, it dies
- // on any errors. So to avoid timeout errors in rxMsg we set the
- // deadline and peek into the reader. If a timeout error occurs there
- // we don't break the pgx connection. If the Peek returns that data
- // is available then we turn off the read deadline before the rxMsg.
- err = rc.c.conn.SetReadDeadline(deadline)
- if err != nil {
- return nil, err
+// This returns the context error when there is no replication message before
+// the context is canceled.
+func (rc *ReplicationConn) WaitForReplicationMessage(ctx context.Context) (*ReplicationMessage, error) {
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ default:
}
- // Wait until there is a byte available before continuing onto the normal msg reading path
- _, err = rc.c.reader.Peek(1)
- if err != nil {
- rc.c.conn.SetReadDeadline(zeroTime) // we can only return one error and we already have one -- so ignore possiple error from SetReadDeadline
- if err, ok := err.(*net.OpError); ok && err.Timeout() {
- return nil, ErrNotificationTimeout
+ go func() {
+ select {
+ case <-ctx.Done():
+ if err := rc.c.conn.SetDeadline(time.Now()); err != nil {
+ rc.Close() // Close connection if unable to set deadline
+ return
+ }
+ rc.c.closedChan <- ctx.Err()
+ case <-rc.c.doneChan:
}
- return nil, err
- }
+ }()
- err = rc.c.conn.SetReadDeadline(zeroTime)
- if err != nil {
- return nil, err
+ r, opErr := rc.readReplicationMessage()
+
+ var err error
+ select {
+ case err = <-rc.c.closedChan:
+ if err := rc.c.conn.SetDeadline(time.Time{}); err != nil {
+ rc.Close() // Close connection if unable to disable deadline
+ return nil, err
+ }
+
+ if opErr == nil {
+ err = nil
+ }
+ case rc.c.doneChan <- struct{}{}:
+ err = opErr
}
- return rc.readReplicationMessage()
+ return r, err
}
func (rc *ReplicationConn) sendReplicationModeQuery(sql string) (*Rows, error) {
@@ -312,32 +329,30 @@ func (rc *ReplicationConn) sendReplicationModeQuery(sql string) (*Rows, error) {
rows := rc.c.getRows(sql, nil)
if err := rc.c.lock(); err != nil {
- rows.abort(err)
+ rows.fatal(err)
return rows, err
}
rows.unlockConn = true
err := rc.c.sendSimpleQuery(sql)
if err != nil {
- rows.abort(err)
+ rows.fatal(err)
}
- var t byte
- var r *msgReader
- t, r, err = rc.c.rxMsg()
+ msg, err := rc.c.rxMsg()
if err != nil {
return nil, err
}
- switch t {
- case rowDescription:
- rows.fields = rc.c.rxRowDescription(r)
+ switch msg := msg.(type) {
+ case *pgproto3.RowDescription:
+ rows.fields = rc.c.rxRowDescription(msg)
// We don't have c.PgTypes here because we're a replication
// connection. This means the field descriptions will have
- // only Oids. Not much we can do about this.
+ // only OIDs. Not much we can do about this.
default:
- if e := rc.c.processContextFreeMsg(t, r); e != nil {
- rows.abort(e)
+ if e := rc.c.processContextFreeMsg(msg); e != nil {
+ rows.fatal(e)
return rows, e
}
}
@@ -354,7 +369,7 @@ func (rc *ReplicationConn) sendReplicationModeQuery(sql string) (*Rows, error) {
//
// NOTE: Because this is a replication mode connection, we don't have
// type names, so the field descriptions in the result will have only
-// Oids and no DataTypeName values
+// OIDs and no DataTypeName values
func (rc *ReplicationConn) IdentifySystem() (r *Rows, err error) {
return rc.sendReplicationModeQuery("IDENTIFY_SYSTEM")
}
@@ -369,7 +384,7 @@ func (rc *ReplicationConn) IdentifySystem() (r *Rows, err error) {
//
// NOTE: Because this is a replication mode connection, we don't have
// type names, so the field descriptions in the result will have only
-// Oids and no DataTypeName values
+// OIDs and no DataTypeName values
func (rc *ReplicationConn) TimelineHistory(timeline int) (r *Rows, err error) {
return rc.sendReplicationModeQuery(fmt.Sprintf("TIMELINE_HISTORY %d", timeline))
}
@@ -401,15 +416,18 @@ func (rc *ReplicationConn) StartReplication(slotName string, startLsn uint64, ti
return
}
+ ctx, cancelFn := context.WithTimeout(context.Background(), initialReplicationResponseTimeout)
+ defer cancelFn()
+
// The first replication message that comes back here will be (in a success case)
// a empty CopyBoth that is (apparently) sent as the confirmation that the replication has
// started. This call will either return nil, nil or if it returns an error
// that indicates the start replication command failed
var r *ReplicationMessage
- r, err = rc.WaitForReplicationMessage(initialReplicationResponseTimeout)
+ r, err = rc.WaitForReplicationMessage(ctx)
if err != nil && r != nil {
if rc.c.shouldLog(LogLevelError) {
- rc.c.log(LogLevelError, "Unxpected replication message %v", r)
+ rc.c.log(LogLevelError, "Unexpected replication message", map[string]interface{}{"msg": r, "err": err})
}
}
@@ -422,6 +440,18 @@ func (rc *ReplicationConn) CreateReplicationSlot(slotName, outputPlugin string)
return
}
+// Create the replication slot, using the given name and output plugin, and return the consistent_point and snapshot_name values.
+func (rc *ReplicationConn) CreateReplicationSlotEx(slotName, outputPlugin string) (consistentPoint string, snapshotName string, err error) {
+ var dummy string
+ var rows *Rows
+ rows, err = rc.sendReplicationModeQuery(fmt.Sprintf("CREATE_REPLICATION_SLOT %s LOGICAL %s", slotName, outputPlugin))
+ defer rows.Close()
+ for rows.Next() {
+ rows.Scan(&dummy, &consistentPoint, &snapshotName, &dummy)
+ }
+ return
+}
+
// Drop the replication slot for the given name
func (rc *ReplicationConn) DropReplicationSlot(slotName string) (err error) {
_, err = rc.c.Exec(fmt.Sprintf("DROP_REPLICATION_SLOT %s", slotName))
diff --git a/vendor/github.com/jackc/pgx/replication_test.go b/vendor/github.com/jackc/pgx/replication_test.go
deleted file mode 100644
index 4f810c7..0000000
--- a/vendor/github.com/jackc/pgx/replication_test.go
+++ /dev/null
@@ -1,329 +0,0 @@
-package pgx_test
-
-import (
- "fmt"
- "github.com/jackc/pgx"
- "reflect"
- "strconv"
- "strings"
- "testing"
- "time"
-)
-
-// This function uses a postgresql 9.6 specific column
-func getConfirmedFlushLsnFor(t *testing.T, conn *pgx.Conn, slot string) string {
- // Fetch the restart LSN of the slot, to establish a starting point
- rows, err := conn.Query(fmt.Sprintf("select confirmed_flush_lsn from pg_replication_slots where slot_name='%s'", slot))
- if err != nil {
- t.Fatalf("conn.Query failed: %v", err)
- }
- defer rows.Close()
-
- var restartLsn string
- for rows.Next() {
- rows.Scan(&restartLsn)
- }
- return restartLsn
-}
-
-// This battleship test (at least somewhat by necessity) does
-// several things all at once in a single run. It:
-// - Establishes a replication connection & slot
-// - Does a series of operations to create some known WAL entries
-// - Replicates the entries down, and checks that the rows it
-// created come down in order
-// - Sends a standby status message to update the server with the
-// wal position of the slot
-// - Checks the wal position of the slot on the server to make sure
-// the update succeeded
-func TestSimpleReplicationConnection(t *testing.T) {
- t.Parallel()
-
- var err error
-
- if replicationConnConfig == nil {
- t.Skip("Skipping due to undefined replicationConnConfig")
- }
-
- conn := mustConnect(t, *replicationConnConfig)
- defer closeConn(t, conn)
-
- replicationConn := mustReplicationConnect(t, *replicationConnConfig)
- defer closeReplicationConn(t, replicationConn)
-
- err = replicationConn.CreateReplicationSlot("pgx_test", "test_decoding")
- if err != nil {
- t.Logf("replication slot create failed: %v", err)
- }
-
- // Do a simple change so we can get some wal data
- _, err = conn.Exec("create table if not exists replication_test (a integer)")
- if err != nil {
- t.Fatalf("Failed to create table: %v", err)
- }
-
- err = replicationConn.StartReplication("pgx_test", 0, -1)
- if err != nil {
- t.Fatalf("Failed to start replication: %v", err)
- }
-
- var i int32
- var insertedTimes []int64
- for i < 5 {
- var ct pgx.CommandTag
- currentTime := time.Now().Unix()
- insertedTimes = append(insertedTimes, currentTime)
- ct, err = conn.Exec("insert into replication_test(a) values($1)", currentTime)
- if err != nil {
- t.Fatalf("Insert failed: %v", err)
- }
- t.Logf("Inserted %d rows", ct.RowsAffected())
- i++
- }
-
- i = 0
- var foundTimes []int64
- var foundCount int
- var maxWal uint64
- for {
- var message *pgx.ReplicationMessage
-
- message, err = replicationConn.WaitForReplicationMessage(time.Duration(1 * time.Second))
- if err != nil {
- if err != pgx.ErrNotificationTimeout {
- t.Fatalf("Replication failed: %v %s", err, reflect.TypeOf(err))
- }
- }
- if message != nil {
- if message.WalMessage != nil {
- // The waldata payload with the test_decoding plugin looks like:
- // public.replication_test: INSERT: a[integer]:2
- // What we wanna do here is check that once we find one of our inserted times,
- // that they occur in the wal stream in the order we executed them.
- walString := string(message.WalMessage.WalData)
- if strings.Contains(walString, "public.replication_test: INSERT") {
- stringParts := strings.Split(walString, ":")
- offset, err := strconv.ParseInt(stringParts[len(stringParts)-1], 10, 64)
- if err != nil {
- t.Fatalf("Failed to parse walString %s", walString)
- }
- if foundCount > 0 || offset == insertedTimes[0] {
- foundTimes = append(foundTimes, offset)
- foundCount++
- }
- }
- if message.WalMessage.WalStart > maxWal {
- maxWal = message.WalMessage.WalStart
- }
-
- }
- if message.ServerHeartbeat != nil {
- t.Logf("Got heartbeat: %s", message.ServerHeartbeat)
- }
- } else {
- t.Log("Timed out waiting for wal message")
- i++
- }
- if i > 3 {
- t.Log("Actual timeout")
- break
- }
- }
-
- if foundCount != len(insertedTimes) {
- t.Fatalf("Failed to find all inserted time values in WAL stream (found %d expected %d)", foundCount, len(insertedTimes))
- }
-
- for i := range insertedTimes {
- if foundTimes[i] != insertedTimes[i] {
- t.Fatalf("Found %d expected %d", foundTimes[i], insertedTimes[i])
- }
- }
-
- t.Logf("Found %d times, as expected", len(foundTimes))
-
- // Before closing our connection, let's send a standby status to update our wal
- // position, which should then be reflected if we fetch out our current wal position
- // for the slot
- status, err := pgx.NewStandbyStatus(maxWal)
- if err != nil {
- t.Errorf("Failed to create standby status %v", err)
- }
- replicationConn.SendStandbyStatus(status)
-
- restartLsn := getConfirmedFlushLsnFor(t, conn, "pgx_test")
- integerRestartLsn, _ := pgx.ParseLSN(restartLsn)
- if integerRestartLsn != maxWal {
- t.Fatalf("Wal offset update failed, expected %s found %s", pgx.FormatLSN(maxWal), restartLsn)
- }
-
- closeReplicationConn(t, replicationConn)
-
- replicationConn2 := mustReplicationConnect(t, *replicationConnConfig)
- defer closeReplicationConn(t, replicationConn2)
-
- err = replicationConn2.DropReplicationSlot("pgx_test")
- if err != nil {
- t.Fatalf("Failed to drop replication slot: %v", err)
- }
-
- droppedLsn := getConfirmedFlushLsnFor(t, conn, "pgx_test")
- if droppedLsn != "" {
- t.Errorf("Got odd flush lsn %s for supposedly dropped slot", droppedLsn)
- }
-}
-
-func TestReplicationConn_DropReplicationSlot(t *testing.T) {
- if replicationConnConfig == nil {
- t.Skip("Skipping due to undefined replicationConnConfig")
- }
-
- replicationConn := mustReplicationConnect(t, *replicationConnConfig)
- defer closeReplicationConn(t, replicationConn)
-
- err := replicationConn.CreateReplicationSlot("pgx_slot_test", "test_decoding")
- if err != nil {
- t.Logf("replication slot create failed: %v", err)
- }
- err = replicationConn.DropReplicationSlot("pgx_slot_test")
- if err != nil {
- t.Fatalf("Failed to drop replication slot: %v", err)
- }
-
- // We re-create to ensure the drop worked.
- err = replicationConn.CreateReplicationSlot("pgx_slot_test", "test_decoding")
- if err != nil {
- t.Logf("replication slot create failed: %v", err)
- }
-
- // And finally we drop to ensure we don't leave dirty state
- err = replicationConn.DropReplicationSlot("pgx_slot_test")
- if err != nil {
- t.Fatalf("Failed to drop replication slot: %v", err)
- }
-}
-
-func TestIdentifySystem(t *testing.T) {
- if replicationConnConfig == nil {
- t.Skip("Skipping due to undefined replicationConnConfig")
- }
-
- replicationConn2 := mustReplicationConnect(t, *replicationConnConfig)
- defer closeReplicationConn(t, replicationConn2)
-
- r, err := replicationConn2.IdentifySystem()
- if err != nil {
- t.Error(err)
- }
- defer r.Close()
- for _, fd := range r.FieldDescriptions() {
- t.Logf("Field: %s of type %v", fd.Name, fd.DataType)
- }
-
- var rowCount int
- for r.Next() {
- rowCount++
- values, err := r.Values()
- if err != nil {
- t.Error(err)
- }
- t.Logf("Row values: %v", values)
- }
- if r.Err() != nil {
- t.Error(r.Err())
- }
-
- if rowCount == 0 {
- t.Errorf("Failed to find any rows: %d", rowCount)
- }
-}
-
-func getCurrentTimeline(t *testing.T, rc *pgx.ReplicationConn) int {
- r, err := rc.IdentifySystem()
- if err != nil {
- t.Error(err)
- }
- defer r.Close()
- for r.Next() {
- values, e := r.Values()
- if e != nil {
- t.Error(e)
- }
- timeline, e := strconv.Atoi(values[1].(string))
- if e != nil {
- t.Error(e)
- }
- return timeline
- }
- t.Fatal("Failed to read timeline")
- return -1
-}
-
-func TestGetTimelineHistory(t *testing.T) {
- if replicationConnConfig == nil {
- t.Skip("Skipping due to undefined replicationConnConfig")
- }
-
- replicationConn := mustReplicationConnect(t, *replicationConnConfig)
- defer closeReplicationConn(t, replicationConn)
-
- timeline := getCurrentTimeline(t, replicationConn)
-
- r, err := replicationConn.TimelineHistory(timeline)
- if err != nil {
- t.Errorf("%#v", err)
- }
- defer r.Close()
-
- for _, fd := range r.FieldDescriptions() {
- t.Logf("Field: %s of type %v", fd.Name, fd.DataType)
- }
-
- var rowCount int
- for r.Next() {
- rowCount++
- values, err := r.Values()
- if err != nil {
- t.Error(err)
- }
- t.Logf("Row values: %v", values)
- }
- if r.Err() != nil {
- if strings.Contains(r.Err().Error(), "No such file or directory") {
- // This is normal, this means the timeline we're on has no
- // history, which is the common case in a test db that
- // has only one timeline
- return
- }
- t.Error(r.Err())
- }
-
- // If we have a timeline history (see above) there should have been
- // rows emitted
- if rowCount == 0 {
- t.Errorf("Failed to find any rows: %d", rowCount)
- }
-}
-
-func TestStandbyStatusParsing(t *testing.T) {
- // Let's push the boundary conditions of the standby status and ensure it errors correctly
- status, err := pgx.NewStandbyStatus(0, 1, 2, 3, 4)
- if err == nil {
- t.Errorf("Expected error from new standby status, got %v", status)
- }
-
- // And if you provide 3 args, ensure the right fields are set
- status, err = pgx.NewStandbyStatus(1, 2, 3)
- if err != nil {
- t.Errorf("Failed to create test status: %v", err)
- }
- if status.WalFlushPosition != 1 {
- t.Errorf("Unexpected flush position %d", status.WalFlushPosition)
- }
- if status.WalApplyPosition != 2 {
- t.Errorf("Unexpected apply position %d", status.WalApplyPosition)
- }
- if status.WalWritePosition != 3 {
- t.Errorf("Unexpected write position %d", status.WalWritePosition)
- }
-}
diff --git a/vendor/github.com/jackc/pgx/sql_test.go b/vendor/github.com/jackc/pgx/sql_test.go
deleted file mode 100644
index dd03603..0000000
--- a/vendor/github.com/jackc/pgx/sql_test.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package pgx_test
-
-import (
- "strconv"
- "testing"
-
- "github.com/jackc/pgx"
-)
-
-func TestQueryArgs(t *testing.T) {
- var qa pgx.QueryArgs
-
- for i := 1; i < 512; i++ {
- expectedPlaceholder := "$" + strconv.Itoa(i)
- placeholder := qa.Append(i)
- if placeholder != expectedPlaceholder {
- t.Errorf(`Expected qa.Append to return "%s", but it returned "%s"`, expectedPlaceholder, placeholder)
- }
- }
-}
-
-func BenchmarkQueryArgs(b *testing.B) {
- for i := 0; i < b.N; i++ {
- qa := pgx.QueryArgs(make([]interface{}, 0, 16))
- qa.Append("foo1")
- qa.Append("foo2")
- qa.Append("foo3")
- qa.Append("foo4")
- qa.Append("foo5")
- qa.Append("foo6")
- qa.Append("foo7")
- qa.Append("foo8")
- qa.Append("foo9")
- qa.Append("foo10")
- }
-}
diff --git a/vendor/github.com/jackc/pgx/stdlib/sql.go b/vendor/github.com/jackc/pgx/stdlib/sql.go
index 8c78cd3..2d4930e 100644
--- a/vendor/github.com/jackc/pgx/stdlib/sql.go
+++ b/vendor/github.com/jackc/pgx/stdlib/sql.go
@@ -14,154 +14,213 @@
// return err
// }
//
-// Or a normal pgx connection pool can be established and the database/sql
-// connection can be created through stdlib.OpenFromConnPool(). This allows
-// more control over the connection process (such as TLS), more control
-// over the connection pool, setting an AfterConnect hook, and using both
-// database/sql and pgx interfaces as needed.
+// A DriverConfig can be used to further configure the connection process. This
+// allows configuring TLS configuration, setting a custom dialer, logging, and
+// setting an AfterConnect hook.
//
-// connConfig := pgx.ConnConfig{
-// Host: "localhost",
-// User: "pgx_md5",
-// Password: "secret",
-// Database: "pgx_test",
-// }
+// driverConfig := stdlib.DriverConfig{
+// ConnConfig: pgx.ConnConfig{
+// Logger: logger,
+// },
+// AfterConnect: func(c *pgx.Conn) error {
+// // Ensure all connections have this temp table available
+// _, err := c.Exec("create temporary table foo(...)")
+// return err
+// },
+// }
//
-// config := pgx.ConnPoolConfig{ConnConfig: connConfig}
-// pool, err := pgx.NewConnPool(config)
-// if err != nil {
-// return err
-// }
+// stdlib.RegisterDriverConfig(&driverConfig)
//
-// db, err := stdlib.OpenFromConnPool(pool)
+// db, err := sql.Open("pgx", driverConfig.ConnectionString("postgres://pgx_md5:secret@127.0.0.1:5432/pgx_test"))
// if err != nil {
-// t.Fatalf("Unable to create connection pool: %v", err)
+// return err
+// }
+//
+// pgx uses standard PostgreSQL positional parameters in queries. e.g. $1, $2.
+// It does not support named parameters.
+//
+// db.QueryRow("select * from users where id=$1", userID)
+//
+// AcquireConn and ReleaseConn acquire and release a *pgx.Conn from the standard
+// database/sql.DB connection pool. This allows operations that must be
+// performed on a single connection, but should not be run in a transaction or
+// to use pgx specific functionality.
+//
+// conn, err := stdlib.AcquireConn(db)
+// if err != nil {
+// return err
// }
+// defer stdlib.ReleaseConn(db, conn)
//
-// If the database/sql connection is established through
-// stdlib.OpenFromConnPool then access to a pgx *ConnPool can be regained
-// through db.Driver(). This allows writing a fast path for pgx while
-// preserving compatibility with other drivers and database
+// // do stuff with pgx.Conn
//
-// if driver, ok := db.Driver().(*stdlib.Driver); ok && driver.Pool != nil {
+// It also can be used to enable a fast path for pgx while preserving
+// compatibility with other drivers and database.
+//
+// conn, err := stdlib.AcquireConn(db)
+// if err == nil {
// // fast path with pgx
+// // ...
+// // release conn when done
+// stdlib.ReleaseConn(db, conn)
// } else {
// // normal path for other drivers and databases
// }
package stdlib
import (
+ "context"
"database/sql"
"database/sql/driver"
- "errors"
+ "encoding/binary"
"fmt"
"io"
+ "reflect"
+ "strings"
"sync"
- "github.com/jackc/pgx"
-)
+ "github.com/pkg/errors"
-var (
- openFromConnPoolCountMu sync.Mutex
- openFromConnPoolCount int
+ "github.com/jackc/pgx"
+ "github.com/jackc/pgx/pgtype"
)
// oids that map to intrinsic database/sql types. These will be allowed to be
// binary, anything else will be forced to text format
-var databaseSqlOids map[pgx.Oid]bool
+var databaseSqlOIDs map[pgtype.OID]bool
+
+var pgxDriver *Driver
+
+type ctxKey int
+
+var ctxKeyFakeTx ctxKey = 0
+
+var ErrNotPgx = errors.New("not pgx *sql.DB")
func init() {
- d := &Driver{}
- sql.Register("pgx", d)
-
- databaseSqlOids = make(map[pgx.Oid]bool)
- databaseSqlOids[pgx.BoolOid] = true
- databaseSqlOids[pgx.ByteaOid] = true
- databaseSqlOids[pgx.Int2Oid] = true
- databaseSqlOids[pgx.Int4Oid] = true
- databaseSqlOids[pgx.Int8Oid] = true
- databaseSqlOids[pgx.Float4Oid] = true
- databaseSqlOids[pgx.Float8Oid] = true
- databaseSqlOids[pgx.DateOid] = true
- databaseSqlOids[pgx.TimestampTzOid] = true
- databaseSqlOids[pgx.TimestampOid] = true
+ pgxDriver = &Driver{
+ configs: make(map[int64]*DriverConfig),
+ fakeTxConns: make(map[*pgx.Conn]*sql.Tx),
+ }
+ sql.Register("pgx", pgxDriver)
+
+ databaseSqlOIDs = make(map[pgtype.OID]bool)
+ databaseSqlOIDs[pgtype.BoolOID] = true
+ databaseSqlOIDs[pgtype.ByteaOID] = true
+ databaseSqlOIDs[pgtype.CIDOID] = true
+ databaseSqlOIDs[pgtype.DateOID] = true
+ databaseSqlOIDs[pgtype.Float4OID] = true
+ databaseSqlOIDs[pgtype.Float8OID] = true
+ databaseSqlOIDs[pgtype.Int2OID] = true
+ databaseSqlOIDs[pgtype.Int4OID] = true
+ databaseSqlOIDs[pgtype.Int8OID] = true
+ databaseSqlOIDs[pgtype.OIDOID] = true
+ databaseSqlOIDs[pgtype.TimestampOID] = true
+ databaseSqlOIDs[pgtype.TimestamptzOID] = true
+ databaseSqlOIDs[pgtype.XIDOID] = true
}
type Driver struct {
- Pool *pgx.ConnPool
+ configMutex sync.Mutex
+ configCount int64
+ configs map[int64]*DriverConfig
+
+ fakeTxMutex sync.Mutex
+ fakeTxConns map[*pgx.Conn]*sql.Tx
}
func (d *Driver) Open(name string) (driver.Conn, error) {
- if d.Pool != nil {
- conn, err := d.Pool.Acquire()
- if err != nil {
- return nil, err
- }
-
- return &Conn{conn: conn, pool: d.Pool}, nil
+ var connConfig pgx.ConnConfig
+ var afterConnect func(*pgx.Conn) error
+ if len(name) >= 9 && name[0] == 0 {
+ idBuf := []byte(name)[1:9]
+ id := int64(binary.BigEndian.Uint64(idBuf))
+ connConfig = d.configs[id].ConnConfig
+ afterConnect = d.configs[id].AfterConnect
+ name = name[9:]
}
- connConfig, err := pgx.ParseConnectionString(name)
+ parsedConfig, err := pgx.ParseConnectionString(name)
if err != nil {
return nil, err
}
+ connConfig = connConfig.Merge(parsedConfig)
conn, err := pgx.Connect(connConfig)
if err != nil {
return nil, err
}
- c := &Conn{conn: conn}
+ if afterConnect != nil {
+ err = afterConnect(conn)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ c := &Conn{conn: conn, driver: d, connConfig: connConfig}
return c, nil
}
-// OpenFromConnPool takes the existing *pgx.ConnPool pool and returns a *sql.DB
-// with pool as the backend. This enables full control over the connection
-// process and configuration while maintaining compatibility with the
-// database/sql interface. In addition, by calling Driver() on the returned
-// *sql.DB and typecasting to *stdlib.Driver a reference to the pgx.ConnPool can
-// be reaquired later. This allows fast paths targeting pgx to be used while
-// still maintaining compatibility with other databases and drivers.
-//
-// pool connection size must be at least 2.
-func OpenFromConnPool(pool *pgx.ConnPool) (*sql.DB, error) {
- d := &Driver{Pool: pool}
-
- openFromConnPoolCountMu.Lock()
- name := fmt.Sprintf("pgx-%d", openFromConnPoolCount)
- openFromConnPoolCount++
- openFromConnPoolCountMu.Unlock()
+type DriverConfig struct {
+ pgx.ConnConfig
+ AfterConnect func(*pgx.Conn) error // function to call on every new connection
+ driver *Driver
+ id int64
+}
- sql.Register(name, d)
- db, err := sql.Open(name, "")
- if err != nil {
- return nil, err
+// ConnectionString encodes the DriverConfig into the original connection
+// string. DriverConfig must be registered before calling ConnectionString.
+func (c *DriverConfig) ConnectionString(original string) string {
+ if c.driver == nil {
+ panic("DriverConfig must be registered before calling ConnectionString")
}
- // Presumably OpenFromConnPool is being used because the user wants to use
- // database/sql most of the time, but fast path with pgx some of the time.
- // Allow database/sql to use all the connections, but release 2 idle ones.
- // Don't have database/sql immediately release all idle connections because
- // that would mean that prepared statements would be lost (which kills
- // performance if the prepared statements constantly have to be reprepared)
- stat := pool.Stat()
+ buf := make([]byte, 9)
+ binary.BigEndian.PutUint64(buf[1:], uint64(c.id))
+ buf = append(buf, original...)
+ return string(buf)
+}
+
+func (d *Driver) registerDriverConfig(c *DriverConfig) {
+ d.configMutex.Lock()
- if stat.MaxConnections <= 2 {
- return nil, errors.New("pool connection size must be at least 3")
- }
- db.SetMaxIdleConns(stat.MaxConnections - 2)
- db.SetMaxOpenConns(stat.MaxConnections)
+ c.driver = d
+ c.id = d.configCount
+ d.configs[d.configCount] = c
+ d.configCount++
- return db, nil
+ d.configMutex.Unlock()
+}
+
+func (d *Driver) unregisterDriverConfig(c *DriverConfig) {
+ d.configMutex.Lock()
+ delete(d.configs, c.id)
+ d.configMutex.Unlock()
+}
+
+// RegisterDriverConfig registers a DriverConfig for use with Open.
+func RegisterDriverConfig(c *DriverConfig) {
+ pgxDriver.registerDriverConfig(c)
+}
+
+// UnregisterDriverConfig removes a DriverConfig registration.
+func UnregisterDriverConfig(c *DriverConfig) {
+ pgxDriver.unregisterDriverConfig(c)
}
type Conn struct {
- conn *pgx.Conn
- pool *pgx.ConnPool
- psCount int64 // Counter used for creating unique prepared statement names
+ conn *pgx.Conn
+ psCount int64 // Counter used for creating unique prepared statement names
+ driver *Driver
+ connConfig pgx.ConnConfig
}
func (c *Conn) Prepare(query string) (driver.Stmt, error) {
+ return c.PrepareContext(context.Background(), query)
+}
+
+func (c *Conn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) {
if !c.conn.IsAlive() {
return nil, driver.ErrBadConn
}
@@ -169,7 +228,7 @@ func (c *Conn) Prepare(query string) (driver.Stmt, error) {
name := fmt.Sprintf("pgx_%d", c.psCount)
c.psCount++
- ps, err := c.conn.Prepare(name, query)
+ ps, err := c.conn.PrepareEx(ctx, name, query, nil)
if err != nil {
return nil, err
}
@@ -180,25 +239,43 @@ func (c *Conn) Prepare(query string) (driver.Stmt, error) {
}
func (c *Conn) Close() error {
- err := c.conn.Close()
- if c.pool != nil {
- c.pool.Release(c.conn)
- }
-
- return err
+ return c.conn.Close()
}
func (c *Conn) Begin() (driver.Tx, error) {
+ return c.BeginTx(context.Background(), driver.TxOptions{})
+}
+
+func (c *Conn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
if !c.conn.IsAlive() {
return nil, driver.ErrBadConn
}
- _, err := c.conn.Exec("begin")
- if err != nil {
- return nil, err
+ if pconn, ok := ctx.Value(ctxKeyFakeTx).(**pgx.Conn); ok {
+ *pconn = c.conn
+ return fakeTx{}, nil
+ }
+
+ var pgxOpts pgx.TxOptions
+ switch sql.IsolationLevel(opts.Isolation) {
+ case sql.LevelDefault:
+ case sql.LevelReadUncommitted:
+ pgxOpts.IsoLevel = pgx.ReadUncommitted
+ case sql.LevelReadCommitted:
+ pgxOpts.IsoLevel = pgx.ReadCommitted
+ case sql.LevelSnapshot:
+ pgxOpts.IsoLevel = pgx.RepeatableRead
+ case sql.LevelSerializable:
+ pgxOpts.IsoLevel = pgx.Serializable
+ default:
+ return nil, errors.Errorf("unsupported isolation: %v", opts.Isolation)
+ }
+
+ if opts.ReadOnly {
+ pgxOpts.AccessMode = pgx.ReadOnly
}
- return &Tx{conn: c.conn}, nil
+ return c.conn.BeginEx(ctx, &pgxOpts)
}
func (c *Conn) Exec(query string, argsV []driver.Value) (driver.Result, error) {
@@ -211,19 +288,65 @@ func (c *Conn) Exec(query string, argsV []driver.Value) (driver.Result, error) {
return driver.RowsAffected(commandTag.RowsAffected()), err
}
+func (c *Conn) ExecContext(ctx context.Context, query string, argsV []driver.NamedValue) (driver.Result, error) {
+ if !c.conn.IsAlive() {
+ return nil, driver.ErrBadConn
+ }
+
+ args := namedValueToInterface(argsV)
+
+ commandTag, err := c.conn.ExecEx(ctx, query, nil, args...)
+ return driver.RowsAffected(commandTag.RowsAffected()), err
+}
+
func (c *Conn) Query(query string, argsV []driver.Value) (driver.Rows, error) {
if !c.conn.IsAlive() {
return nil, driver.ErrBadConn
}
- ps, err := c.conn.Prepare("", query)
+ if !c.connConfig.PreferSimpleProtocol {
+ ps, err := c.conn.Prepare("", query)
+ if err != nil {
+ return nil, err
+ }
+
+ restrictBinaryToDatabaseSqlTypes(ps)
+ return c.queryPrepared("", argsV)
+ }
+
+ rows, err := c.conn.Query(query, valueToInterface(argsV)...)
if err != nil {
return nil, err
}
- restrictBinaryToDatabaseSqlTypes(ps)
+ // Preload first row because otherwise we won't know what columns are available when database/sql asks.
+ more := rows.Next()
+ return &Rows{rows: rows, skipNext: true, skipNextMore: more}, nil
+}
- return c.queryPrepared("", argsV)
+func (c *Conn) QueryContext(ctx context.Context, query string, argsV []driver.NamedValue) (driver.Rows, error) {
+ if !c.conn.IsAlive() {
+ return nil, driver.ErrBadConn
+ }
+
+ if !c.connConfig.PreferSimpleProtocol {
+ ps, err := c.conn.PrepareEx(ctx, "", query, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ restrictBinaryToDatabaseSqlTypes(ps)
+ return c.queryPreparedContext(ctx, "", argsV)
+ }
+
+ rows, err := c.conn.QueryEx(ctx, query, nil, namedValueToInterface(argsV)...)
+ if err != nil {
+ return nil, err
+ }
+
+ // Preload first row because otherwise we won't know what columns are available when database/sql asks.
+ more := rows.Next()
+ return &Rows{rows: rows, skipNext: true, skipNextMore: more}, nil
}
func (c *Conn) queryPrepared(name string, argsV []driver.Value) (driver.Rows, error) {
@@ -241,12 +364,35 @@ func (c *Conn) queryPrepared(name string, argsV []driver.Value) (driver.Rows, er
return &Rows{rows: rows}, nil
}
+func (c *Conn) queryPreparedContext(ctx context.Context, name string, argsV []driver.NamedValue) (driver.Rows, error) {
+ if !c.conn.IsAlive() {
+ return nil, driver.ErrBadConn
+ }
+
+ args := namedValueToInterface(argsV)
+
+ rows, err := c.conn.QueryEx(ctx, name, nil, args...)
+ if err != nil {
+ return nil, err
+ }
+
+ return &Rows{rows: rows}, nil
+}
+
+func (c *Conn) Ping(ctx context.Context) error {
+ if !c.conn.IsAlive() {
+ return driver.ErrBadConn
+ }
+
+ return c.conn.Ping(ctx)
+}
+
// Anything that isn't a database/sql compatible type needs to be forced to
// text format so that pgx.Rows.Values doesn't decode it into a native type
// (e.g. []int32)
func restrictBinaryToDatabaseSqlTypes(ps *pgx.PreparedStatement) {
for i := range ps.FieldDescriptions {
- intrinsic, _ := databaseSqlOids[ps.FieldDescriptions[i].DataType]
+ intrinsic, _ := databaseSqlOIDs[ps.FieldDescriptions[i].DataType]
if !intrinsic {
ps.FieldDescriptions[i].FormatCode = pgx.TextFormatCode
}
@@ -263,20 +409,30 @@ func (s *Stmt) Close() error {
}
func (s *Stmt) NumInput() int {
- return len(s.ps.ParameterOids)
+ return len(s.ps.ParameterOIDs)
}
func (s *Stmt) Exec(argsV []driver.Value) (driver.Result, error) {
return s.conn.Exec(s.ps.Name, argsV)
}
+func (s *Stmt) ExecContext(ctx context.Context, argsV []driver.NamedValue) (driver.Result, error) {
+ return s.conn.ExecContext(ctx, s.ps.Name, argsV)
+}
+
func (s *Stmt) Query(argsV []driver.Value) (driver.Rows, error) {
return s.conn.queryPrepared(s.ps.Name, argsV)
}
-// TODO - rename to avoid alloc
+func (s *Stmt) QueryContext(ctx context.Context, argsV []driver.NamedValue) (driver.Rows, error) {
+ return s.conn.queryPreparedContext(ctx, s.ps.Name, argsV)
+}
+
type Rows struct {
- rows *pgx.Rows
+ rows *pgx.Rows
+ values []interface{}
+ skipNext bool
+ skipNextMore bool
}
func (r *Rows) Columns() []string {
@@ -288,13 +444,79 @@ func (r *Rows) Columns() []string {
return names
}
+// ColumnTypeDatabaseTypeName return the database system type name.
+func (r *Rows) ColumnTypeDatabaseTypeName(index int) string {
+ return strings.ToUpper(r.rows.FieldDescriptions()[index].DataTypeName)
+}
+
+// ColumnTypeLength returns the length of the column type if the column is a
+// variable length type. If the column is not a variable length type ok
+// should return false.
+func (r *Rows) ColumnTypeLength(index int) (int64, bool) {
+ return r.rows.FieldDescriptions()[index].Length()
+}
+
+// ColumnTypePrecisionScale should return the precision and scale for decimal
+// types. If not applicable, ok should be false.
+func (r *Rows) ColumnTypePrecisionScale(index int) (precision, scale int64, ok bool) {
+ return r.rows.FieldDescriptions()[index].PrecisionScale()
+}
+
+// ColumnTypeScanType returns the value type that can be used to scan types into.
+func (r *Rows) ColumnTypeScanType(index int) reflect.Type {
+ return r.rows.FieldDescriptions()[index].Type()
+}
+
func (r *Rows) Close() error {
r.rows.Close()
return nil
}
func (r *Rows) Next(dest []driver.Value) error {
- more := r.rows.Next()
+ if r.values == nil {
+ r.values = make([]interface{}, len(r.rows.FieldDescriptions()))
+ for i, fd := range r.rows.FieldDescriptions() {
+ switch fd.DataType {
+ case pgtype.BoolOID:
+ r.values[i] = &pgtype.Bool{}
+ case pgtype.ByteaOID:
+ r.values[i] = &pgtype.Bytea{}
+ case pgtype.CIDOID:
+ r.values[i] = &pgtype.CID{}
+ case pgtype.DateOID:
+ r.values[i] = &pgtype.Date{}
+ case pgtype.Float4OID:
+ r.values[i] = &pgtype.Float4{}
+ case pgtype.Float8OID:
+ r.values[i] = &pgtype.Float8{}
+ case pgtype.Int2OID:
+ r.values[i] = &pgtype.Int2{}
+ case pgtype.Int4OID:
+ r.values[i] = &pgtype.Int4{}
+ case pgtype.Int8OID:
+ r.values[i] = &pgtype.Int8{}
+ case pgtype.OIDOID:
+ r.values[i] = &pgtype.OIDValue{}
+ case pgtype.TimestampOID:
+ r.values[i] = &pgtype.Timestamp{}
+ case pgtype.TimestamptzOID:
+ r.values[i] = &pgtype.Timestamptz{}
+ case pgtype.XIDOID:
+ r.values[i] = &pgtype.XID{}
+ default:
+ r.values[i] = &pgtype.GenericText{}
+ }
+ }
+ }
+
+ var more bool
+ if r.skipNext {
+ more = r.skipNextMore
+ r.skipNext = false
+ } else {
+ more = r.rows.Next()
+ }
+
if !more {
if r.rows.Err() == nil {
return io.EOF
@@ -303,19 +525,16 @@ func (r *Rows) Next(dest []driver.Value) error {
}
}
- values, err := r.rows.Values()
+ err := r.rows.Scan(r.values...)
if err != nil {
return err
}
- if len(dest) < len(values) {
- fmt.Printf("%d: %#v\n", len(dest), dest)
- fmt.Printf("%d: %#v\n", len(values), values)
- return errors.New("expected more values than were received")
- }
-
- for i, v := range values {
- dest[i] = driver.Value(v)
+ for i, v := range r.values {
+ dest[i], err = v.(driver.Valuer).Value()
+ if err != nil {
+ return err
+ }
}
return nil
@@ -333,16 +552,58 @@ func valueToInterface(argsV []driver.Value) []interface{} {
return args
}
-type Tx struct {
- conn *pgx.Conn
+func namedValueToInterface(argsV []driver.NamedValue) []interface{} {
+ args := make([]interface{}, 0, len(argsV))
+ for _, v := range argsV {
+ if v.Value != nil {
+ args = append(args, v.Value.(interface{}))
+ } else {
+ args = append(args, nil)
+ }
+ }
+ return args
}
-func (t *Tx) Commit() error {
- _, err := t.conn.Exec("commit")
- return err
+type fakeTx struct{}
+
+func (fakeTx) Commit() error { return nil }
+
+func (fakeTx) Rollback() error { return nil }
+
+func AcquireConn(db *sql.DB) (*pgx.Conn, error) {
+ driver, ok := db.Driver().(*Driver)
+ if !ok {
+ return nil, ErrNotPgx
+ }
+
+ var conn *pgx.Conn
+ ctx := context.WithValue(context.Background(), ctxKeyFakeTx, &conn)
+ tx, err := db.BeginTx(ctx, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ driver.fakeTxMutex.Lock()
+ driver.fakeTxConns[conn] = tx
+ driver.fakeTxMutex.Unlock()
+
+ return conn, nil
}
-func (t *Tx) Rollback() error {
- _, err := t.conn.Exec("rollback")
- return err
+func ReleaseConn(db *sql.DB, conn *pgx.Conn) error {
+ var tx *sql.Tx
+ var ok bool
+
+ driver := db.Driver().(*Driver)
+ driver.fakeTxMutex.Lock()
+ tx, ok = driver.fakeTxConns[conn]
+ if ok {
+ delete(driver.fakeTxConns, conn)
+ driver.fakeTxMutex.Unlock()
+ } else {
+ driver.fakeTxMutex.Unlock()
+ return errors.Errorf("can't release conn that is not acquired")
+ }
+
+ return tx.Rollback()
}
diff --git a/vendor/github.com/jackc/pgx/stdlib/sql_test.go b/vendor/github.com/jackc/pgx/stdlib/sql_test.go
deleted file mode 100644
index 1455ca1..0000000
--- a/vendor/github.com/jackc/pgx/stdlib/sql_test.go
+++ /dev/null
@@ -1,691 +0,0 @@
-package stdlib_test
-
-import (
- "bytes"
- "database/sql"
- "github.com/jackc/pgx"
- "github.com/jackc/pgx/stdlib"
- "sync"
- "testing"
-)
-
-func openDB(t *testing.T) *sql.DB {
- db, err := sql.Open("pgx", "postgres://pgx_md5:secret@127.0.0.1:5432/pgx_test")
- if err != nil {
- t.Fatalf("sql.Open failed: %v", err)
- }
-
- return db
-}
-
-func closeDB(t *testing.T, db *sql.DB) {
- err := db.Close()
- if err != nil {
- t.Fatalf("db.Close unexpectedly failed: %v", err)
- }
-}
-
-// Do a simple query to ensure the connection is still usable
-func ensureConnValid(t *testing.T, db *sql.DB) {
- var sum, rowCount int32
-
- rows, err := db.Query("select generate_series(1,$1)", 10)
- if err != nil {
- t.Fatalf("db.Query failed: %v", err)
- }
- defer rows.Close()
-
- for rows.Next() {
- var n int32
- rows.Scan(&n)
- sum += n
- rowCount++
- }
-
- if rows.Err() != nil {
- t.Fatalf("db.Query failed: %v", err)
- }
-
- if rowCount != 10 {
- t.Error("Select called onDataRow wrong number of times")
- }
- if sum != 55 {
- t.Error("Wrong values returned")
- }
-}
-
-type preparer interface {
- Prepare(query string) (*sql.Stmt, error)
-}
-
-func prepareStmt(t *testing.T, p preparer, sql string) *sql.Stmt {
- stmt, err := p.Prepare(sql)
- if err != nil {
- t.Fatalf("%v Prepare unexpectedly failed: %v", p, err)
- }
-
- return stmt
-}
-
-func closeStmt(t *testing.T, stmt *sql.Stmt) {
- err := stmt.Close()
- if err != nil {
- t.Fatalf("stmt.Close unexpectedly failed: %v", err)
- }
-}
-
-func TestNormalLifeCycle(t *testing.T) {
- db := openDB(t)
- defer closeDB(t, db)
-
- stmt := prepareStmt(t, db, "select 'foo', n from generate_series($1::int, $2::int) n")
- defer closeStmt(t, stmt)
-
- rows, err := stmt.Query(int32(1), int32(10))
- if err != nil {
- t.Fatalf("stmt.Query unexpectedly failed: %v", err)
- }
-
- rowCount := int64(0)
-
- for rows.Next() {
- rowCount++
-
- var s string
- var n int64
- if err := rows.Scan(&s, &n); err != nil {
- t.Fatalf("rows.Scan unexpectedly failed: %v", err)
- }
- if s != "foo" {
- t.Errorf(`Expected "foo", received "%v"`, s)
- }
- if n != rowCount {
- t.Errorf("Expected %d, received %d", rowCount, n)
- }
- }
- err = rows.Err()
- if err != nil {
- t.Fatalf("rows.Err unexpectedly is: %v", err)
- }
- if rowCount != 10 {
- t.Fatalf("Expected to receive 10 rows, instead received %d", rowCount)
- }
-
- err = rows.Close()
- if err != nil {
- t.Fatalf("rows.Close unexpectedly failed: %v", err)
- }
-
- ensureConnValid(t, db)
-}
-
-func TestSqlOpenDoesNotHavePool(t *testing.T) {
- db := openDB(t)
- defer closeDB(t, db)
-
- driver := db.Driver().(*stdlib.Driver)
- if driver.Pool != nil {
- t.Fatal("Did not expect driver opened through database/sql to have Pool, but it did")
- }
-}
-
-func TestOpenFromConnPool(t *testing.T) {
- connConfig := pgx.ConnConfig{
- Host: "127.0.0.1",
- User: "pgx_md5",
- Password: "secret",
- Database: "pgx_test",
- }
-
- config := pgx.ConnPoolConfig{ConnConfig: connConfig}
- pool, err := pgx.NewConnPool(config)
- if err != nil {
- t.Fatalf("Unable to create connection pool: %v", err)
- }
- defer pool.Close()
-
- db, err := stdlib.OpenFromConnPool(pool)
- if err != nil {
- t.Fatalf("Unable to create connection pool: %v", err)
- }
- defer closeDB(t, db)
-
- // Can get pgx.ConnPool from driver
- driver := db.Driver().(*stdlib.Driver)
- if driver.Pool == nil {
- t.Fatal("Expected driver opened through OpenFromConnPool to have Pool, but it did not")
- }
-
- // Normal sql/database still works
- var n int64
- err = db.QueryRow("select 1").Scan(&n)
- if err != nil {
- t.Fatalf("db.QueryRow unexpectedly failed: %v", err)
- }
-}
-
-func TestOpenFromConnPoolRace(t *testing.T) {
- wg := &sync.WaitGroup{}
- connConfig := pgx.ConnConfig{
- Host: "127.0.0.1",
- User: "pgx_md5",
- Password: "secret",
- Database: "pgx_test",
- }
-
- config := pgx.ConnPoolConfig{ConnConfig: connConfig}
- pool, err := pgx.NewConnPool(config)
- if err != nil {
- t.Fatalf("Unable to create connection pool: %v", err)
- }
- defer pool.Close()
-
- wg.Add(10)
- for i := 0; i < 10; i++ {
- go func() {
- defer wg.Done()
- db, err := stdlib.OpenFromConnPool(pool)
- if err != nil {
- t.Fatalf("Unable to create connection pool: %v", err)
- }
- defer closeDB(t, db)
-
- // Can get pgx.ConnPool from driver
- driver := db.Driver().(*stdlib.Driver)
- if driver.Pool == nil {
- t.Fatal("Expected driver opened through OpenFromConnPool to have Pool, but it did not")
- }
- }()
- }
-
- wg.Wait()
-}
-
-func TestStmtExec(t *testing.T) {
- db := openDB(t)
- defer closeDB(t, db)
-
- tx, err := db.Begin()
- if err != nil {
- t.Fatalf("db.Begin unexpectedly failed: %v", err)
- }
-
- createStmt := prepareStmt(t, tx, "create temporary table t(a varchar not null)")
- _, err = createStmt.Exec()
- if err != nil {
- t.Fatalf("stmt.Exec unexpectedly failed: %v", err)
- }
- closeStmt(t, createStmt)
-
- insertStmt := prepareStmt(t, tx, "insert into t values($1::text)")
- result, err := insertStmt.Exec("foo")
- if err != nil {
- t.Fatalf("stmt.Exec unexpectedly failed: %v", err)
- }
-
- n, err := result.RowsAffected()
- if err != nil {
- t.Fatalf("result.RowsAffected unexpectedly failed: %v", err)
- }
- if n != 1 {
- t.Fatalf("Expected 1, received %d", n)
- }
- closeStmt(t, insertStmt)
-
- if err != nil {
- t.Fatalf("tx.Commit unexpectedly failed: %v", err)
- }
-
- ensureConnValid(t, db)
-}
-
-func TestQueryCloseRowsEarly(t *testing.T) {
- db := openDB(t)
- defer closeDB(t, db)
-
- stmt := prepareStmt(t, db, "select 'foo', n from generate_series($1::int, $2::int) n")
- defer closeStmt(t, stmt)
-
- rows, err := stmt.Query(int32(1), int32(10))
- if err != nil {
- t.Fatalf("stmt.Query unexpectedly failed: %v", err)
- }
-
- // Close rows immediately without having read them
- err = rows.Close()
- if err != nil {
- t.Fatalf("rows.Close unexpectedly failed: %v", err)
- }
-
- // Run the query again to ensure the connection and statement are still ok
- rows, err = stmt.Query(int32(1), int32(10))
- if err != nil {
- t.Fatalf("stmt.Query unexpectedly failed: %v", err)
- }
-
- rowCount := int64(0)
-
- for rows.Next() {
- rowCount++
-
- var s string
- var n int64
- if err := rows.Scan(&s, &n); err != nil {
- t.Fatalf("rows.Scan unexpectedly failed: %v", err)
- }
- if s != "foo" {
- t.Errorf(`Expected "foo", received "%v"`, s)
- }
- if n != rowCount {
- t.Errorf("Expected %d, received %d", rowCount, n)
- }
- }
- err = rows.Err()
- if err != nil {
- t.Fatalf("rows.Err unexpectedly is: %v", err)
- }
- if rowCount != 10 {
- t.Fatalf("Expected to receive 10 rows, instead received %d", rowCount)
- }
-
- err = rows.Close()
- if err != nil {
- t.Fatalf("rows.Close unexpectedly failed: %v", err)
- }
-
- ensureConnValid(t, db)
-}
-
-func TestConnExec(t *testing.T) {
- db := openDB(t)
- defer closeDB(t, db)
-
- _, err := db.Exec("create temporary table t(a varchar not null)")
- if err != nil {
- t.Fatalf("db.Exec unexpectedly failed: %v", err)
- }
-
- result, err := db.Exec("insert into t values('hey')")
- if err != nil {
- t.Fatalf("db.Exec unexpectedly failed: %v", err)
- }
-
- n, err := result.RowsAffected()
- if err != nil {
- t.Fatalf("result.RowsAffected unexpectedly failed: %v", err)
- }
- if n != 1 {
- t.Fatalf("Expected 1, received %d", n)
- }
-
- ensureConnValid(t, db)
-}
-
-func TestConnQuery(t *testing.T) {
- db := openDB(t)
- defer closeDB(t, db)
-
- rows, err := db.Query("select 'foo', n from generate_series($1::int, $2::int) n", int32(1), int32(10))
- if err != nil {
- t.Fatalf("db.Query unexpectedly failed: %v", err)
- }
-
- rowCount := int64(0)
-
- for rows.Next() {
- rowCount++
-
- var s string
- var n int64
- if err := rows.Scan(&s, &n); err != nil {
- t.Fatalf("rows.Scan unexpectedly failed: %v", err)
- }
- if s != "foo" {
- t.Errorf(`Expected "foo", received "%v"`, s)
- }
- if n != rowCount {
- t.Errorf("Expected %d, received %d", rowCount, n)
- }
- }
- err = rows.Err()
- if err != nil {
- t.Fatalf("rows.Err unexpectedly is: %v", err)
- }
- if rowCount != 10 {
- t.Fatalf("Expected to receive 10 rows, instead received %d", rowCount)
- }
-
- err = rows.Close()
- if err != nil {
- t.Fatalf("rows.Close unexpectedly failed: %v", err)
- }
-
- ensureConnValid(t, db)
-}
-
-type testLog struct {
- lvl int
- msg string
- ctx []interface{}
-}
-
-type testLogger struct {
- logs []testLog
-}
-
-func (l *testLogger) Debug(msg string, ctx ...interface{}) {
- l.logs = append(l.logs, testLog{lvl: pgx.LogLevelDebug, msg: msg, ctx: ctx})
-}
-func (l *testLogger) Info(msg string, ctx ...interface{}) {
- l.logs = append(l.logs, testLog{lvl: pgx.LogLevelInfo, msg: msg, ctx: ctx})
-}
-func (l *testLogger) Warn(msg string, ctx ...interface{}) {
- l.logs = append(l.logs, testLog{lvl: pgx.LogLevelWarn, msg: msg, ctx: ctx})
-}
-func (l *testLogger) Error(msg string, ctx ...interface{}) {
- l.logs = append(l.logs, testLog{lvl: pgx.LogLevelError, msg: msg, ctx: ctx})
-}
-
-func TestConnQueryLog(t *testing.T) {
- logger := &testLogger{}
-
- connConfig := pgx.ConnConfig{
- Host: "127.0.0.1",
- User: "pgx_md5",
- Password: "secret",
- Database: "pgx_test",
- Logger: logger,
- }
-
- config := pgx.ConnPoolConfig{ConnConfig: connConfig}
- pool, err := pgx.NewConnPool(config)
- if err != nil {
- t.Fatalf("Unable to create connection pool: %v", err)
- }
- defer pool.Close()
-
- db, err := stdlib.OpenFromConnPool(pool)
- if err != nil {
- t.Fatalf("Unable to create connection pool: %v", err)
- }
- defer closeDB(t, db)
-
- // clear logs from initial connection
- logger.logs = []testLog{}
-
- var n int64
- err = db.QueryRow("select 1").Scan(&n)
- if err != nil {
- t.Fatalf("db.QueryRow unexpectedly failed: %v", err)
- }
-
- l := logger.logs[0]
- if l.msg != "Query" {
- t.Errorf("Expected to log Query, but got %v", l)
- }
-
- if !(l.ctx[0] == "sql" && l.ctx[1] == "select 1") {
- t.Errorf("Expected to log Query with sql 'select 1', but got %v", l)
- }
-}
-
-func TestConnQueryNull(t *testing.T) {
- db := openDB(t)
- defer closeDB(t, db)
-
- rows, err := db.Query("select $1::int", nil)
- if err != nil {
- t.Fatalf("db.Query unexpectedly failed: %v", err)
- }
-
- rowCount := int64(0)
-
- for rows.Next() {
- rowCount++
-
- var n sql.NullInt64
- if err := rows.Scan(&n); err != nil {
- t.Fatalf("rows.Scan unexpectedly failed: %v", err)
- }
- if n.Valid != false {
- t.Errorf("Expected n to be null, but it was %v", n)
- }
- }
- err = rows.Err()
- if err != nil {
- t.Fatalf("rows.Err unexpectedly is: %v", err)
- }
- if rowCount != 1 {
- t.Fatalf("Expected to receive 11 rows, instead received %d", rowCount)
- }
-
- err = rows.Close()
- if err != nil {
- t.Fatalf("rows.Close unexpectedly failed: %v", err)
- }
-
- ensureConnValid(t, db)
-}
-
-func TestConnQueryRowByteSlice(t *testing.T) {
- db := openDB(t)
- defer closeDB(t, db)
-
- expected := []byte{222, 173, 190, 239}
- var actual []byte
-
- err := db.QueryRow(`select E'\\xdeadbeef'::bytea`).Scan(&actual)
- if err != nil {
- t.Fatalf("db.QueryRow unexpectedly failed: %v", err)
- }
-
- if bytes.Compare(actual, expected) != 0 {
- t.Fatalf("Expected %v, but got %v", expected, actual)
- }
-
- ensureConnValid(t, db)
-}
-
-func TestConnQueryFailure(t *testing.T) {
- db := openDB(t)
- defer closeDB(t, db)
-
- _, err := db.Query("select 'foo")
- if _, ok := err.(pgx.PgError); !ok {
- t.Fatalf("Expected db.Query to return pgx.PgError, but instead received: %v", err)
- }
-
- ensureConnValid(t, db)
-}
-
-// Test type that pgx would handle natively in binary, but since it is not a
-// database/sql native type should be passed through as a string
-func TestConnQueryRowPgxBinary(t *testing.T) {
- db := openDB(t)
- defer closeDB(t, db)
-
- sql := "select $1::int4[]"
- expected := "{1,2,3}"
- var actual string
-
- err := db.QueryRow(sql, expected).Scan(&actual)
- if err != nil {
- t.Errorf("Unexpected failure: %v (sql -> %v)", err, sql)
- }
-
- if actual != expected {
- t.Errorf(`Expected "%v", got "%v" (sql -> %v)`, expected, actual, sql)
- }
-
- ensureConnValid(t, db)
-}
-
-func TestConnQueryRowUnknownType(t *testing.T) {
- db := openDB(t)
- defer closeDB(t, db)
-
- sql := "select $1::point"
- expected := "(1,2)"
- var actual string
-
- err := db.QueryRow(sql, expected).Scan(&actual)
- if err != nil {
- t.Errorf("Unexpected failure: %v (sql -> %v)", err, sql)
- }
-
- if actual != expected {
- t.Errorf(`Expected "%v", got "%v" (sql -> %v)`, expected, actual, sql)
- }
-
- ensureConnValid(t, db)
-}
-
-func TestConnQueryJSONIntoByteSlice(t *testing.T) {
- db := openDB(t)
- defer closeDB(t, db)
-
- if !serverHasJSON(t, db) {
- t.Skip("Skipping due to server's lack of JSON type")
- }
-
- _, err := db.Exec(`
- create temporary table docs(
- body json not null
- );
-
- insert into docs(body) values('{"foo":"bar"}');
-`)
- if err != nil {
- t.Fatalf("db.Exec unexpectedly failed: %v", err)
- }
-
- sql := `select * from docs`
- expected := []byte(`{"foo":"bar"}`)
- var actual []byte
-
- err = db.QueryRow(sql).Scan(&actual)
- if err != nil {
- t.Errorf("Unexpected failure: %v (sql -> %v)", err, sql)
- }
-
- if bytes.Compare(actual, expected) != 0 {
- t.Errorf(`Expected "%v", got "%v" (sql -> %v)`, string(expected), string(actual), sql)
- }
-
- _, err = db.Exec(`drop table docs`)
- if err != nil {
- t.Fatalf("db.Exec unexpectedly failed: %v", err)
- }
-
- ensureConnValid(t, db)
-}
-
-func TestConnExecInsertByteSliceIntoJSON(t *testing.T) {
- db := openDB(t)
- defer closeDB(t, db)
-
- if !serverHasJSON(t, db) {
- t.Skip("Skipping due to server's lack of JSON type")
- }
-
- _, err := db.Exec(`
- create temporary table docs(
- body json not null
- );
-`)
- if err != nil {
- t.Fatalf("db.Exec unexpectedly failed: %v", err)
- }
-
- expected := []byte(`{"foo":"bar"}`)
-
- _, err = db.Exec(`insert into docs(body) values($1)`, expected)
- if err != nil {
- t.Fatalf("db.Exec unexpectedly failed: %v", err)
- }
-
- var actual []byte
- err = db.QueryRow(`select body from docs`).Scan(&actual)
- if err != nil {
- t.Fatalf("db.QueryRow unexpectedly failed: %v", err)
- }
-
- if bytes.Compare(actual, expected) != 0 {
- t.Errorf(`Expected "%v", got "%v"`, string(expected), string(actual))
- }
-
- _, err = db.Exec(`drop table docs`)
- if err != nil {
- t.Fatalf("db.Exec unexpectedly failed: %v", err)
- }
-
- ensureConnValid(t, db)
-}
-
-func serverHasJSON(t *testing.T, db *sql.DB) bool {
- var hasJSON bool
- err := db.QueryRow(`select exists(select 1 from pg_type where typname='json')`).Scan(&hasJSON)
- if err != nil {
- t.Fatalf("db.QueryRow unexpectedly failed: %v", err)
- }
- return hasJSON
-}
-
-func TestTransactionLifeCycle(t *testing.T) {
- db := openDB(t)
- defer closeDB(t, db)
-
- _, err := db.Exec("create temporary table t(a varchar not null)")
- if err != nil {
- t.Fatalf("db.Exec unexpectedly failed: %v", err)
- }
-
- tx, err := db.Begin()
- if err != nil {
- t.Fatalf("db.Begin unexpectedly failed: %v", err)
- }
-
- _, err = tx.Exec("insert into t values('hi')")
- if err != nil {
- t.Fatalf("tx.Exec unexpectedly failed: %v", err)
- }
-
- err = tx.Rollback()
- if err != nil {
- t.Fatalf("tx.Rollback unexpectedly failed: %v", err)
- }
-
- var n int64
- err = db.QueryRow("select count(*) from t").Scan(&n)
- if err != nil {
- t.Fatalf("db.QueryRow.Scan unexpectedly failed: %v", err)
- }
- if n != 0 {
- t.Fatalf("Expected 0 rows due to rollback, instead found %d", n)
- }
-
- tx, err = db.Begin()
- if err != nil {
- t.Fatalf("db.Begin unexpectedly failed: %v", err)
- }
-
- _, err = tx.Exec("insert into t values('hi')")
- if err != nil {
- t.Fatalf("tx.Exec unexpectedly failed: %v", err)
- }
-
- err = tx.Commit()
- if err != nil {
- t.Fatalf("tx.Commit unexpectedly failed: %v", err)
- }
-
- err = db.QueryRow("select count(*) from t").Scan(&n)
- if err != nil {
- t.Fatalf("db.QueryRow.Scan unexpectedly failed: %v", err)
- }
- if n != 1 {
- t.Fatalf("Expected 1 rows due to rollback, instead found %d", n)
- }
-
- ensureConnValid(t, db)
-}
diff --git a/vendor/github.com/jackc/pgx/stress_test.go b/vendor/github.com/jackc/pgx/stress_test.go
deleted file mode 100644
index 150d13c..0000000
--- a/vendor/github.com/jackc/pgx/stress_test.go
+++ /dev/null
@@ -1,346 +0,0 @@
-package pgx_test
-
-import (
- "errors"
- "fmt"
- "math/rand"
- "testing"
- "time"
-
- "github.com/jackc/fake"
- "github.com/jackc/pgx"
-)
-
-type execer interface {
- Exec(sql string, arguments ...interface{}) (commandTag pgx.CommandTag, err error)
-}
-type queryer interface {
- Query(sql string, args ...interface{}) (*pgx.Rows, error)
-}
-type queryRower interface {
- QueryRow(sql string, args ...interface{}) *pgx.Row
-}
-
-func TestStressConnPool(t *testing.T) {
- maxConnections := 8
- pool := createConnPool(t, maxConnections)
- defer pool.Close()
-
- setupStressDB(t, pool)
-
- actions := []struct {
- name string
- fn func(*pgx.ConnPool, int) error
- }{
- {"insertUnprepared", func(p *pgx.ConnPool, n int) error { return insertUnprepared(p, n) }},
- {"queryRowWithoutParams", func(p *pgx.ConnPool, n int) error { return queryRowWithoutParams(p, n) }},
- {"query", func(p *pgx.ConnPool, n int) error { return queryCloseEarly(p, n) }},
- {"queryCloseEarly", func(p *pgx.ConnPool, n int) error { return query(p, n) }},
- {"queryErrorWhileReturningRows", func(p *pgx.ConnPool, n int) error { return queryErrorWhileReturningRows(p, n) }},
- {"txInsertRollback", txInsertRollback},
- {"txInsertCommit", txInsertCommit},
- {"txMultipleQueries", txMultipleQueries},
- {"notify", notify},
- {"listenAndPoolUnlistens", listenAndPoolUnlistens},
- {"reset", func(p *pgx.ConnPool, n int) error { p.Reset(); return nil }},
- {"poolPrepareUseAndDeallocate", poolPrepareUseAndDeallocate},
- }
-
- var timer *time.Timer
- if testing.Short() {
- timer = time.NewTimer(5 * time.Second)
- } else {
- timer = time.NewTimer(60 * time.Second)
- }
- workerCount := 16
-
- workChan := make(chan int)
- doneChan := make(chan struct{})
- errChan := make(chan error)
-
- work := func() {
- for n := range workChan {
- action := actions[rand.Intn(len(actions))]
- err := action.fn(pool, n)
- if err != nil {
- errChan <- err
- break
- }
- }
- doneChan <- struct{}{}
- }
-
- for i := 0; i < workerCount; i++ {
- go work()
- }
-
- var stop bool
- for i := 0; !stop; i++ {
- select {
- case <-timer.C:
- stop = true
- case workChan <- i:
- case err := <-errChan:
- close(workChan)
- t.Fatal(err)
- }
- }
- close(workChan)
-
- for i := 0; i < workerCount; i++ {
- <-doneChan
- }
-}
-
-func TestStressTLSConnection(t *testing.T) {
- t.Parallel()
-
- if tlsConnConfig == nil {
- t.Skip("Skipping due to undefined tlsConnConfig")
- }
-
- if testing.Short() {
- t.Skip("Skipping due to testing -short")
- }
-
- conn, err := pgx.Connect(*tlsConnConfig)
- if err != nil {
- t.Fatalf("Unable to establish connection: %v", err)
- }
- defer conn.Close()
-
- for i := 0; i < 50; i++ {
- sql := `select * from generate_series(1, $1)`
-
- rows, err := conn.Query(sql, 2000000)
- if err != nil {
- t.Fatal(err)
- }
-
- var n int32
- for rows.Next() {
- rows.Scan(&n)
- }
-
- if rows.Err() != nil {
- t.Fatalf("queryCount: %d, Row number: %d. %v", i, n, rows.Err())
- }
- }
-}
-
-func setupStressDB(t *testing.T, pool *pgx.ConnPool) {
- _, err := pool.Exec(`
- drop table if exists widgets;
- create table widgets(
- id serial primary key,
- name varchar not null,
- description text,
- creation_time timestamptz
- );
-`)
- if err != nil {
- t.Fatal(err)
- }
-}
-
-func insertUnprepared(e execer, actionNum int) error {
- sql := `
- insert into widgets(name, description, creation_time)
- values($1, $2, $3)`
-
- _, err := e.Exec(sql, fake.ProductName(), fake.Sentences(), time.Now())
- return err
-}
-
-func queryRowWithoutParams(qr queryRower, actionNum int) error {
- var id int32
- var name, description string
- var creationTime time.Time
-
- sql := `select * from widgets order by random() limit 1`
-
- err := qr.QueryRow(sql).Scan(&id, &name, &description, &creationTime)
- if err == pgx.ErrNoRows {
- return nil
- }
- return err
-}
-
-func query(q queryer, actionNum int) error {
- sql := `select * from widgets order by random() limit $1`
-
- rows, err := q.Query(sql, 10)
- if err != nil {
- return err
- }
- defer rows.Close()
-
- for rows.Next() {
- var id int32
- var name, description string
- var creationTime time.Time
- rows.Scan(&id, &name, &description, &creationTime)
- }
-
- return rows.Err()
-}
-
-func queryCloseEarly(q queryer, actionNum int) error {
- sql := `select * from generate_series(1,$1)`
-
- rows, err := q.Query(sql, 100)
- if err != nil {
- return err
- }
- defer rows.Close()
-
- for i := 0; i < 10 && rows.Next(); i++ {
- var n int32
- rows.Scan(&n)
- }
- rows.Close()
-
- return rows.Err()
-}
-
-func queryErrorWhileReturningRows(q queryer, actionNum int) error {
- // This query should divide by 0 within the first number of rows
- sql := `select 42 / (random() * 20)::integer from generate_series(1,100000)`
-
- rows, err := q.Query(sql)
- if err != nil {
- return nil
- }
- defer rows.Close()
-
- for rows.Next() {
- var n int32
- rows.Scan(&n)
- }
-
- if _, ok := rows.Err().(pgx.PgError); ok {
- return nil
- }
- return rows.Err()
-}
-
-func notify(pool *pgx.ConnPool, actionNum int) error {
- _, err := pool.Exec("notify stress")
- return err
-}
-
-func listenAndPoolUnlistens(pool *pgx.ConnPool, actionNum int) error {
- conn, err := pool.Acquire()
- if err != nil {
- return err
- }
- defer pool.Release(conn)
-
- err = conn.Listen("stress")
- if err != nil {
- return err
- }
-
- _, err = conn.WaitForNotification(100 * time.Millisecond)
- if err == pgx.ErrNotificationTimeout {
- return nil
- }
- return err
-}
-
-func poolPrepareUseAndDeallocate(pool *pgx.ConnPool, actionNum int) error {
- psName := fmt.Sprintf("poolPreparedStatement%d", actionNum)
-
- _, err := pool.Prepare(psName, "select $1::text")
- if err != nil {
- return err
- }
-
- var s string
- err = pool.QueryRow(psName, "hello").Scan(&s)
- if err != nil {
- return err
- }
-
- if s != "hello" {
- return fmt.Errorf("Prepared statement did not return expected value: %v", s)
- }
-
- return pool.Deallocate(psName)
-}
-
-func txInsertRollback(pool *pgx.ConnPool, actionNum int) error {
- tx, err := pool.Begin()
- if err != nil {
- return err
- }
-
- sql := `
- insert into widgets(name, description, creation_time)
- values($1, $2, $3)`
-
- _, err = tx.Exec(sql, fake.ProductName(), fake.Sentences(), time.Now())
- if err != nil {
- return err
- }
-
- return tx.Rollback()
-}
-
-func txInsertCommit(pool *pgx.ConnPool, actionNum int) error {
- tx, err := pool.Begin()
- if err != nil {
- return err
- }
-
- sql := `
- insert into widgets(name, description, creation_time)
- values($1, $2, $3)`
-
- _, err = tx.Exec(sql, fake.ProductName(), fake.Sentences(), time.Now())
- if err != nil {
- tx.Rollback()
- return err
- }
-
- return tx.Commit()
-}
-
-func txMultipleQueries(pool *pgx.ConnPool, actionNum int) error {
- tx, err := pool.Begin()
- if err != nil {
- return err
- }
- defer tx.Rollback()
-
- errExpectedTxDeath := errors.New("Expected tx death")
-
- actions := []struct {
- name string
- fn func() error
- }{
- {"insertUnprepared", func() error { return insertUnprepared(tx, actionNum) }},
- {"queryRowWithoutParams", func() error { return queryRowWithoutParams(tx, actionNum) }},
- {"query", func() error { return query(tx, actionNum) }},
- {"queryCloseEarly", func() error { return queryCloseEarly(tx, actionNum) }},
- {"queryErrorWhileReturningRows", func() error {
- err := queryErrorWhileReturningRows(tx, actionNum)
- if err != nil {
- return err
- }
- return errExpectedTxDeath
- }},
- }
-
- for i := 0; i < 20; i++ {
- action := actions[rand.Intn(len(actions))]
- err := action.fn()
- if err == errExpectedTxDeath {
- return nil
- } else if err != nil {
- return err
- }
- }
-
- return tx.Commit()
-}
diff --git a/vendor/github.com/jackc/pgx/tx.go b/vendor/github.com/jackc/pgx/tx.go
index deb6c01..81fcfa2 100644
--- a/vendor/github.com/jackc/pgx/tx.go
+++ b/vendor/github.com/jackc/pgx/tx.go
@@ -1,16 +1,38 @@
package pgx
import (
- "errors"
+ "bytes"
+ "context"
"fmt"
+ "time"
+
+ "github.com/pkg/errors"
)
+type TxIsoLevel string
+
// Transaction isolation levels
const (
- Serializable = "serializable"
- RepeatableRead = "repeatable read"
- ReadCommitted = "read committed"
- ReadUncommitted = "read uncommitted"
+ Serializable = TxIsoLevel("serializable")
+ RepeatableRead = TxIsoLevel("repeatable read")
+ ReadCommitted = TxIsoLevel("read committed")
+ ReadUncommitted = TxIsoLevel("read uncommitted")
+)
+
+type TxAccessMode string
+
+// Transaction access modes
+const (
+ ReadWrite = TxAccessMode("read write")
+ ReadOnly = TxAccessMode("read only")
+)
+
+type TxDeferrableMode string
+
+// Transaction deferrable modes
+const (
+ Deferrable = TxDeferrableMode("deferrable")
+ NotDeferrable = TxDeferrableMode("not deferrable")
)
const (
@@ -21,6 +43,32 @@ const (
TxStatusRollbackSuccess = 2
)
+type TxOptions struct {
+ IsoLevel TxIsoLevel
+ AccessMode TxAccessMode
+ DeferrableMode TxDeferrableMode
+}
+
+func (txOptions *TxOptions) beginSQL() string {
+ if txOptions == nil {
+ return "begin"
+ }
+
+ buf := &bytes.Buffer{}
+ buf.WriteString("begin")
+ if txOptions.IsoLevel != "" {
+ fmt.Fprintf(buf, " isolation level %s", txOptions.IsoLevel)
+ }
+ if txOptions.AccessMode != "" {
+ fmt.Fprintf(buf, " %s", txOptions.AccessMode)
+ }
+ if txOptions.DeferrableMode != "" {
+ fmt.Fprintf(buf, " %s", txOptions.DeferrableMode)
+ }
+
+ return buf.String()
+}
+
var ErrTxClosed = errors.New("tx is closed")
// ErrTxCommitRollback occurs when an error has occurred in a transaction and
@@ -28,34 +76,21 @@ var ErrTxClosed = errors.New("tx is closed")
// it is treated as ROLLBACK.
var ErrTxCommitRollback = errors.New("commit unexpectedly resulted in rollback")
-// Begin starts a transaction with the default isolation level for the current
-// connection. To use a specific isolation level see BeginIso.
+// Begin starts a transaction with the default transaction mode for the
+// current connection. To use a specific transaction mode see BeginEx.
func (c *Conn) Begin() (*Tx, error) {
- return c.begin("")
+ return c.BeginEx(context.Background(), nil)
}
-// BeginIso starts a transaction with isoLevel as the transaction isolation
-// level.
-//
-// Valid isolation levels (and their constants) are:
-// serializable (pgx.Serializable)
-// repeatable read (pgx.RepeatableRead)
-// read committed (pgx.ReadCommitted)
-// read uncommitted (pgx.ReadUncommitted)
-func (c *Conn) BeginIso(isoLevel string) (*Tx, error) {
- return c.begin(isoLevel)
-}
-
-func (c *Conn) begin(isoLevel string) (*Tx, error) {
- var beginSQL string
- if isoLevel == "" {
- beginSQL = "begin"
- } else {
- beginSQL = fmt.Sprintf("begin isolation level %s", isoLevel)
- }
-
- _, err := c.Exec(beginSQL)
+// BeginEx starts a transaction with txOptions determining the transaction
+// mode. Unlike database/sql, the context only affects the begin command. i.e.
+// there is no auto-rollback on context cancelation.
+func (c *Conn) BeginEx(ctx context.Context, txOptions *TxOptions) (*Tx, error) {
+ _, err := c.ExecEx(ctx, txOptions.beginSQL(), nil)
if err != nil {
+ // begin should never fail unless there is an underlying connection issue or
+ // a context timeout. In either case, the connection is possibly broken.
+ c.die(errors.New("failed to begin transaction"))
return nil, err
}
@@ -67,19 +102,24 @@ func (c *Conn) begin(isoLevel string) (*Tx, error) {
// All Tx methods return ErrTxClosed if Commit or Rollback has already been
// called on the Tx.
type Tx struct {
- conn *Conn
- afterClose func(*Tx)
- err error
- status int8
+ conn *Conn
+ connPool *ConnPool
+ err error
+ status int8
}
// Commit commits the transaction
func (tx *Tx) Commit() error {
+ return tx.CommitEx(context.Background())
+}
+
+// CommitEx commits the transaction with a context.
+func (tx *Tx) CommitEx(ctx context.Context) error {
if tx.status != TxStatusInProgress {
return ErrTxClosed
}
- commandTag, err := tx.conn.Exec("commit")
+ commandTag, err := tx.conn.ExecEx(ctx, "commit", nil)
if err == nil && commandTag == "COMMIT" {
tx.status = TxStatusCommitSuccess
} else if err == nil && commandTag == "ROLLBACK" {
@@ -88,11 +128,14 @@ func (tx *Tx) Commit() error {
} else {
tx.status = TxStatusCommitFailure
tx.err = err
+ // A commit failure leaves the connection in an undefined state
+ tx.conn.die(errors.New("commit failed"))
}
- if tx.afterClose != nil {
- tx.afterClose(tx)
+ if tx.connPool != nil {
+ tx.connPool.Release(tx.conn)
}
+
return tx.err
}
@@ -101,55 +144,74 @@ func (tx *Tx) Commit() error {
// defer tx.Rollback() is safe even if tx.Commit() will be called first in a
// non-error condition.
func (tx *Tx) Rollback() error {
+ ctx, _ := context.WithTimeout(context.Background(), 15*time.Second)
+ return tx.RollbackEx(ctx)
+}
+
+// RollbackEx is the context version of Rollback
+func (tx *Tx) RollbackEx(ctx context.Context) error {
if tx.status != TxStatusInProgress {
return ErrTxClosed
}
- _, tx.err = tx.conn.Exec("rollback")
+ _, tx.err = tx.conn.ExecEx(ctx, "rollback", nil)
if tx.err == nil {
tx.status = TxStatusRollbackSuccess
} else {
tx.status = TxStatusRollbackFailure
+ // A rollback failure leaves the connection in an undefined state
+ tx.conn.die(errors.New("rollback failed"))
}
- if tx.afterClose != nil {
- tx.afterClose(tx)
+ if tx.connPool != nil {
+ tx.connPool.Release(tx.conn)
}
+
return tx.err
}
// Exec delegates to the underlying *Conn
func (tx *Tx) Exec(sql string, arguments ...interface{}) (commandTag CommandTag, err error) {
+ return tx.ExecEx(context.Background(), sql, nil, arguments...)
+}
+
+// ExecEx delegates to the underlying *Conn
+func (tx *Tx) ExecEx(ctx context.Context, sql string, options *QueryExOptions, arguments ...interface{}) (commandTag CommandTag, err error) {
if tx.status != TxStatusInProgress {
return CommandTag(""), ErrTxClosed
}
- return tx.conn.Exec(sql, arguments...)
+ return tx.conn.ExecEx(ctx, sql, options, arguments...)
}
// Prepare delegates to the underlying *Conn
func (tx *Tx) Prepare(name, sql string) (*PreparedStatement, error) {
- return tx.PrepareEx(name, sql, nil)
+ return tx.PrepareEx(context.Background(), name, sql, nil)
}
// PrepareEx delegates to the underlying *Conn
-func (tx *Tx) PrepareEx(name, sql string, opts *PrepareExOptions) (*PreparedStatement, error) {
+func (tx *Tx) PrepareEx(ctx context.Context, name, sql string, opts *PrepareExOptions) (*PreparedStatement, error) {
if tx.status != TxStatusInProgress {
return nil, ErrTxClosed
}
- return tx.conn.PrepareEx(name, sql, opts)
+ return tx.conn.PrepareEx(ctx, name, sql, opts)
}
// Query delegates to the underlying *Conn
func (tx *Tx) Query(sql string, args ...interface{}) (*Rows, error) {
+ return tx.QueryEx(context.Background(), sql, nil, args...)
+}
+
+// QueryEx delegates to the underlying *Conn
+func (tx *Tx) QueryEx(ctx context.Context, sql string, options *QueryExOptions, args ...interface{}) (*Rows, error) {
if tx.status != TxStatusInProgress {
// Because checking for errors can be deferred to the *Rows, build one with the error
err := ErrTxClosed
return &Rows{closed: true, err: err}, err
}
- return tx.conn.Query(sql, args...)
+ return tx.conn.QueryEx(ctx, sql, options, args...)
}
// QueryRow delegates to the underlying *Conn
@@ -158,13 +220,10 @@ func (tx *Tx) QueryRow(sql string, args ...interface{}) *Row {
return (*Row)(rows)
}
-// Deprecated. Use CopyFrom instead. CopyTo delegates to the underlying *Conn
-func (tx *Tx) CopyTo(tableName string, columnNames []string, rowSrc CopyToSource) (int, error) {
- if tx.status != TxStatusInProgress {
- return 0, ErrTxClosed
- }
-
- return tx.conn.CopyTo(tableName, columnNames, rowSrc)
+// QueryRowEx delegates to the underlying *Conn
+func (tx *Tx) QueryRowEx(ctx context.Context, sql string, options *QueryExOptions, args ...interface{}) *Row {
+ rows, _ := tx.QueryEx(ctx, sql, options, args...)
+ return (*Row)(rows)
}
// CopyFrom delegates to the underlying *Conn
@@ -176,11 +235,6 @@ func (tx *Tx) CopyFrom(tableName Identifier, columnNames []string, rowSrc CopyFr
return tx.conn.CopyFrom(tableName, columnNames, rowSrc)
}
-// Conn returns the *Conn this transaction is using.
-func (tx *Tx) Conn() *Conn {
- return tx.conn
-}
-
// Status returns the status of the transaction from the set of
// pgx.TxStatus* constants.
func (tx *Tx) Status() int8 {
@@ -191,17 +245,3 @@ func (tx *Tx) Status() int8 {
func (tx *Tx) Err() error {
return tx.err
}
-
-// AfterClose adds f to a LILO queue of functions that will be called when
-// the transaction is closed (either Commit or Rollback).
-func (tx *Tx) AfterClose(f func(*Tx)) {
- if tx.afterClose == nil {
- tx.afterClose = f
- } else {
- prevFn := tx.afterClose
- tx.afterClose = func(tx *Tx) {
- f(tx)
- prevFn(tx)
- }
- }
-}
diff --git a/vendor/github.com/jackc/pgx/tx_test.go b/vendor/github.com/jackc/pgx/tx_test.go
deleted file mode 100644
index 435521a..0000000
--- a/vendor/github.com/jackc/pgx/tx_test.go
+++ /dev/null
@@ -1,297 +0,0 @@
-package pgx_test
-
-import (
- "github.com/jackc/pgx"
- "testing"
- "time"
-)
-
-func TestTransactionSuccessfulCommit(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- createSql := `
- create temporary table foo(
- id integer,
- unique (id) initially deferred
- );
- `
-
- if _, err := conn.Exec(createSql); err != nil {
- t.Fatalf("Failed to create table: %v", err)
- }
-
- tx, err := conn.Begin()
- if err != nil {
- t.Fatalf("conn.Begin failed: %v", err)
- }
-
- _, err = tx.Exec("insert into foo(id) values (1)")
- if err != nil {
- t.Fatalf("tx.Exec failed: %v", err)
- }
-
- err = tx.Commit()
- if err != nil {
- t.Fatalf("tx.Commit failed: %v", err)
- }
-
- var n int64
- err = conn.QueryRow("select count(*) from foo").Scan(&n)
- if err != nil {
- t.Fatalf("QueryRow Scan failed: %v", err)
- }
- if n != 1 {
- t.Fatalf("Did not receive correct number of rows: %v", n)
- }
-}
-
-func TestTxCommitWhenTxBroken(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- createSql := `
- create temporary table foo(
- id integer,
- unique (id) initially deferred
- );
- `
-
- if _, err := conn.Exec(createSql); err != nil {
- t.Fatalf("Failed to create table: %v", err)
- }
-
- tx, err := conn.Begin()
- if err != nil {
- t.Fatalf("conn.Begin failed: %v", err)
- }
-
- if _, err := tx.Exec("insert into foo(id) values (1)"); err != nil {
- t.Fatalf("tx.Exec failed: %v", err)
- }
-
- // Purposely break transaction
- if _, err := tx.Exec("syntax error"); err == nil {
- t.Fatal("Unexpected success")
- }
-
- err = tx.Commit()
- if err != pgx.ErrTxCommitRollback {
- t.Fatalf("Expected error %v, got %v", pgx.ErrTxCommitRollback, err)
- }
-
- var n int64
- err = conn.QueryRow("select count(*) from foo").Scan(&n)
- if err != nil {
- t.Fatalf("QueryRow Scan failed: %v", err)
- }
- if n != 0 {
- t.Fatalf("Did not receive correct number of rows: %v", n)
- }
-}
-
-func TestTxCommitSerializationFailure(t *testing.T) {
- t.Parallel()
-
- pool := createConnPool(t, 5)
- defer pool.Close()
-
- pool.Exec(`drop table if exists tx_serializable_sums`)
- _, err := pool.Exec(`create table tx_serializable_sums(num integer);`)
- if err != nil {
- t.Fatalf("Unable to create temporary table: %v", err)
- }
- defer pool.Exec(`drop table tx_serializable_sums`)
-
- tx1, err := pool.BeginIso(pgx.Serializable)
- if err != nil {
- t.Fatalf("BeginIso failed: %v", err)
- }
- defer tx1.Rollback()
-
- tx2, err := pool.BeginIso(pgx.Serializable)
- if err != nil {
- t.Fatalf("BeginIso failed: %v", err)
- }
- defer tx2.Rollback()
-
- _, err = tx1.Exec(`insert into tx_serializable_sums(num) select sum(num) from tx_serializable_sums`)
- if err != nil {
- t.Fatalf("Exec failed: %v", err)
- }
-
- _, err = tx2.Exec(`insert into tx_serializable_sums(num) select sum(num) from tx_serializable_sums`)
- if err != nil {
- t.Fatalf("Exec failed: %v", err)
- }
-
- err = tx1.Commit()
- if err != nil {
- t.Fatalf("Commit failed: %v", err)
- }
-
- err = tx2.Commit()
- if pgErr, ok := err.(pgx.PgError); !ok || pgErr.Code != "40001" {
- t.Fatalf("Expected serialization error 40001, got %#v", err)
- }
-}
-
-func TestTransactionSuccessfulRollback(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- createSql := `
- create temporary table foo(
- id integer,
- unique (id) initially deferred
- );
- `
-
- if _, err := conn.Exec(createSql); err != nil {
- t.Fatalf("Failed to create table: %v", err)
- }
-
- tx, err := conn.Begin()
- if err != nil {
- t.Fatalf("conn.Begin failed: %v", err)
- }
-
- _, err = tx.Exec("insert into foo(id) values (1)")
- if err != nil {
- t.Fatalf("tx.Exec failed: %v", err)
- }
-
- err = tx.Rollback()
- if err != nil {
- t.Fatalf("tx.Rollback failed: %v", err)
- }
-
- var n int64
- err = conn.QueryRow("select count(*) from foo").Scan(&n)
- if err != nil {
- t.Fatalf("QueryRow Scan failed: %v", err)
- }
- if n != 0 {
- t.Fatalf("Did not receive correct number of rows: %v", n)
- }
-}
-
-func TestBeginIso(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- isoLevels := []string{pgx.Serializable, pgx.RepeatableRead, pgx.ReadCommitted, pgx.ReadUncommitted}
- for _, iso := range isoLevels {
- tx, err := conn.BeginIso(iso)
- if err != nil {
- t.Fatalf("conn.BeginIso failed: %v", err)
- }
-
- var level string
- conn.QueryRow("select current_setting('transaction_isolation')").Scan(&level)
- if level != iso {
- t.Errorf("Expected to be in isolation level %v but was %v", iso, level)
- }
-
- err = tx.Rollback()
- if err != nil {
- t.Fatalf("tx.Rollback failed: %v", err)
- }
- }
-}
-
-func TestTxAfterClose(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- tx, err := conn.Begin()
- if err != nil {
- t.Fatal(err)
- }
-
- var zeroTime, t1, t2 time.Time
- tx.AfterClose(func(tx *pgx.Tx) {
- t1 = time.Now()
- })
-
- tx.AfterClose(func(tx *pgx.Tx) {
- t2 = time.Now()
- })
-
- tx.Rollback()
-
- if t1 == zeroTime {
- t.Error("First Tx.AfterClose callback not called")
- }
-
- if t2 == zeroTime {
- t.Error("Second Tx.AfterClose callback not called")
- }
-
- if t1.Before(t2) {
- t.Errorf("AfterClose callbacks called out of order: %v, %v", t1, t2)
- }
-}
-
-func TestTxStatus(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- tx, err := conn.Begin()
- if err != nil {
- t.Fatal(err)
- }
-
- if status := tx.Status(); status != pgx.TxStatusInProgress {
- t.Fatalf("Expected status to be %v, but it was %v", pgx.TxStatusInProgress, status)
- }
-
- if err := tx.Rollback(); err != nil {
- t.Fatal(err)
- }
-
- if status := tx.Status(); status != pgx.TxStatusRollbackSuccess {
- t.Fatalf("Expected status to be %v, but it was %v", pgx.TxStatusRollbackSuccess, status)
- }
-}
-
-func TestTxErr(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- tx, err := conn.Begin()
- if err != nil {
- t.Fatal(err)
- }
-
- // Purposely break transaction
- if _, err := tx.Exec("syntax error"); err == nil {
- t.Fatal("Unexpected success")
- }
-
- if err := tx.Commit(); err != pgx.ErrTxCommitRollback {
- t.Fatalf("Expected error %v, got %v", pgx.ErrTxCommitRollback, err)
- }
-
- if status := tx.Status(); status != pgx.TxStatusCommitFailure {
- t.Fatalf("Expected status to be %v, but it was %v", pgx.TxStatusRollbackSuccess, status)
- }
-
- if err := tx.Err(); err != pgx.ErrTxCommitRollback {
- t.Fatalf("Expected error %v, got %v", pgx.ErrTxCommitRollback, err)
- }
-}
diff --git a/vendor/github.com/jackc/pgx/value_reader.go b/vendor/github.com/jackc/pgx/value_reader.go
deleted file mode 100644
index a489754..0000000
--- a/vendor/github.com/jackc/pgx/value_reader.go
+++ /dev/null
@@ -1,156 +0,0 @@
-package pgx
-
-import (
- "errors"
-)
-
-// ValueReader is used by the Scanner interface to decode values.
-type ValueReader struct {
- mr *msgReader
- fd *FieldDescription
- valueBytesRemaining int32
- err error
-}
-
-// Err returns any error that the ValueReader has experienced
-func (r *ValueReader) Err() error {
- return r.err
-}
-
-// Fatal tells r that a Fatal error has occurred
-func (r *ValueReader) Fatal(err error) {
- r.err = err
-}
-
-// Len returns the number of unread bytes
-func (r *ValueReader) Len() int32 {
- return r.valueBytesRemaining
-}
-
-// Type returns the *FieldDescription of the value
-func (r *ValueReader) Type() *FieldDescription {
- return r.fd
-}
-
-func (r *ValueReader) ReadByte() byte {
- if r.err != nil {
- return 0
- }
-
- r.valueBytesRemaining--
- if r.valueBytesRemaining < 0 {
- r.Fatal(errors.New("read past end of value"))
- return 0
- }
-
- return r.mr.readByte()
-}
-
-func (r *ValueReader) ReadInt16() int16 {
- if r.err != nil {
- return 0
- }
-
- r.valueBytesRemaining -= 2
- if r.valueBytesRemaining < 0 {
- r.Fatal(errors.New("read past end of value"))
- return 0
- }
-
- return r.mr.readInt16()
-}
-
-func (r *ValueReader) ReadUint16() uint16 {
- if r.err != nil {
- return 0
- }
-
- r.valueBytesRemaining -= 2
- if r.valueBytesRemaining < 0 {
- r.Fatal(errors.New("read past end of value"))
- return 0
- }
-
- return r.mr.readUint16()
-}
-
-func (r *ValueReader) ReadInt32() int32 {
- if r.err != nil {
- return 0
- }
-
- r.valueBytesRemaining -= 4
- if r.valueBytesRemaining < 0 {
- r.Fatal(errors.New("read past end of value"))
- return 0
- }
-
- return r.mr.readInt32()
-}
-
-func (r *ValueReader) ReadUint32() uint32 {
- if r.err != nil {
- return 0
- }
-
- r.valueBytesRemaining -= 4
- if r.valueBytesRemaining < 0 {
- r.Fatal(errors.New("read past end of value"))
- return 0
- }
-
- return r.mr.readUint32()
-}
-
-func (r *ValueReader) ReadInt64() int64 {
- if r.err != nil {
- return 0
- }
-
- r.valueBytesRemaining -= 8
- if r.valueBytesRemaining < 0 {
- r.Fatal(errors.New("read past end of value"))
- return 0
- }
-
- return r.mr.readInt64()
-}
-
-func (r *ValueReader) ReadOid() Oid {
- return Oid(r.ReadUint32())
-}
-
-// ReadString reads count bytes and returns as string
-func (r *ValueReader) ReadString(count int32) string {
- if r.err != nil {
- return ""
- }
-
- r.valueBytesRemaining -= count
- if r.valueBytesRemaining < 0 {
- r.Fatal(errors.New("read past end of value"))
- return ""
- }
-
- return r.mr.readString(count)
-}
-
-// ReadBytes reads count bytes and returns as []byte
-func (r *ValueReader) ReadBytes(count int32) []byte {
- if r.err != nil {
- return nil
- }
-
- if count < 0 {
- r.Fatal(errors.New("count must not be negative"))
- return nil
- }
-
- r.valueBytesRemaining -= count
- if r.valueBytesRemaining < 0 {
- r.Fatal(errors.New("read past end of value"))
- return nil
- }
-
- return r.mr.readBytes(count)
-}
diff --git a/vendor/github.com/jackc/pgx/values.go b/vendor/github.com/jackc/pgx/values.go
index a189e18..6a1c4f0 100644
--- a/vendor/github.com/jackc/pgx/values.go
+++ b/vendor/github.com/jackc/pgx/values.go
@@ -1,62 +1,15 @@
package pgx
import (
- "bytes"
"database/sql/driver"
- "encoding/json"
"fmt"
- "io"
"math"
- "net"
"reflect"
- "regexp"
- "strconv"
- "strings"
"time"
-)
-// PostgreSQL oids for common types
-const (
- BoolOid = 16
- ByteaOid = 17
- CharOid = 18
- NameOid = 19
- Int8Oid = 20
- Int2Oid = 21
- Int4Oid = 23
- TextOid = 25
- OidOid = 26
- TidOid = 27
- XidOid = 28
- CidOid = 29
- JsonOid = 114
- CidrOid = 650
- CidrArrayOid = 651
- Float4Oid = 700
- Float8Oid = 701
- UnknownOid = 705
- InetOid = 869
- BoolArrayOid = 1000
- Int2ArrayOid = 1005
- Int4ArrayOid = 1007
- TextArrayOid = 1009
- ByteaArrayOid = 1001
- VarcharArrayOid = 1015
- Int8ArrayOid = 1016
- Float4ArrayOid = 1021
- Float8ArrayOid = 1022
- AclItemOid = 1033
- AclItemArrayOid = 1034
- InetArrayOid = 1041
- VarcharOid = 1043
- DateOid = 1082
- TimestampOid = 1114
- TimestampArrayOid = 1115
- TimestampTzOid = 1184
- TimestampTzArrayOid = 1185
- RecordOid = 2249
- UuidOid = 2950
- JsonbOid = 3802
+ "github.com/jackc/pgx/pgio"
+ "github.com/jackc/pgx/pgtype"
+ "github.com/pkg/errors"
)
// PostgreSQL format codes
@@ -65,61 +18,6 @@ const (
BinaryFormatCode = 1
)
-const maxUint = ^uint(0)
-const maxInt = int(maxUint >> 1)
-const minInt = -maxInt - 1
-
-// DefaultTypeFormats maps type names to their default requested format (text
-// or binary). In theory the Scanner interface should be the one to determine
-// the format of the returned values. However, the query has already been
-// executed by the time Scan is called so it has no chance to set the format.
-// So for types that should always be returned in binary the format should be
-// set here.
-var DefaultTypeFormats map[string]int16
-
-func init() {
- DefaultTypeFormats = map[string]int16{
- "_aclitem": TextFormatCode, // Pg's src/backend/utils/adt/acl.c has only in/out (text) not send/recv (bin)
- "_bool": BinaryFormatCode,
- "_bytea": BinaryFormatCode,
- "_cidr": BinaryFormatCode,
- "_float4": BinaryFormatCode,
- "_float8": BinaryFormatCode,
- "_inet": BinaryFormatCode,
- "_int2": BinaryFormatCode,
- "_int4": BinaryFormatCode,
- "_int8": BinaryFormatCode,
- "_text": BinaryFormatCode,
- "_timestamp": BinaryFormatCode,
- "_timestamptz": BinaryFormatCode,
- "_varchar": BinaryFormatCode,
- "aclitem": TextFormatCode, // Pg's src/backend/utils/adt/acl.c has only in/out (text) not send/recv (bin)
- "bool": BinaryFormatCode,
- "bytea": BinaryFormatCode,
- "char": BinaryFormatCode,
- "cid": BinaryFormatCode,
- "cidr": BinaryFormatCode,
- "date": BinaryFormatCode,
- "float4": BinaryFormatCode,
- "float8": BinaryFormatCode,
- "json": BinaryFormatCode,
- "jsonb": BinaryFormatCode,
- "inet": BinaryFormatCode,
- "int2": BinaryFormatCode,
- "int4": BinaryFormatCode,
- "int8": BinaryFormatCode,
- "name": BinaryFormatCode,
- "oid": BinaryFormatCode,
- "record": BinaryFormatCode,
- "text": BinaryFormatCode,
- "tid": BinaryFormatCode,
- "timestamp": BinaryFormatCode,
- "timestamptz": BinaryFormatCode,
- "varchar": BinaryFormatCode,
- "xid": BinaryFormatCode,
- }
-}
-
// SerializationError occurs on failure to encode or decode a value
type SerializationError string
@@ -127,3313 +25,235 @@ func (e SerializationError) Error() string {
return string(e)
}
-// Deprecated: Scanner is an interface used to decode values from the PostgreSQL
-// server. To allow types to support pgx and database/sql.Scan this interface
-// has been deprecated in favor of PgxScanner.
-type Scanner interface {
- // Scan MUST check r.Type().DataType (to check by OID) or
- // r.Type().DataTypeName (to check by name) to ensure that it is scanning an
- // expected column type. It also MUST check r.Type().FormatCode before
- // decoding. It should not assume that it was called on a data type or format
- // that it understands.
- Scan(r *ValueReader) error
-}
-
-// PgxScanner is an interface used to decode values from the PostgreSQL server.
-// It is used exactly the same as the Scanner interface. It simply has renamed
-// the method.
-type PgxScanner interface {
- // ScanPgx MUST check r.Type().DataType (to check by OID) or
- // r.Type().DataTypeName (to check by name) to ensure that it is scanning an
- // expected column type. It also MUST check r.Type().FormatCode before
- // decoding. It should not assume that it was called on a data type or format
- // that it understands.
- ScanPgx(r *ValueReader) error
-}
-
-// Encoder is an interface used to encode values for transmission to the
-// PostgreSQL server.
-type Encoder interface {
- // Encode writes the value to w.
- //
- // If the value is NULL an int32(-1) should be written.
- //
- // Encode MUST check oid to see if the parameter data type is compatible. If
- // this is not done, the PostgreSQL server may detect the error if the
- // expected data size or format of the encoded data does not match. But if
- // the encoded data is a valid representation of the data type PostgreSQL
- // expects such as date and int4, incorrect data may be stored.
- Encode(w *WriteBuf, oid Oid) error
-
- // FormatCode returns the format that the encoder writes the value. It must be
- // either pgx.TextFormatCode or pgx.BinaryFormatCode.
- FormatCode() int16
-}
-
-// NullFloat32 represents an float4 that may be null. NullFloat32 implements the
-// Scanner and Encoder interfaces so it may be used both as an argument to
-// Query[Row] and a destination for Scan.
-//
-// If Valid is false then the value is NULL.
-type NullFloat32 struct {
- Float32 float32
- Valid bool // Valid is true if Float32 is not NULL
-}
-
-func (n *NullFloat32) Scan(vr *ValueReader) error {
- if vr.Type().DataType != Float4Oid {
- return SerializationError(fmt.Sprintf("NullFloat32.Scan cannot decode OID %d", vr.Type().DataType))
- }
-
- if vr.Len() == -1 {
- n.Float32, n.Valid = 0, false
- return nil
- }
- n.Valid = true
- n.Float32 = decodeFloat4(vr)
- return vr.Err()
-}
-
-func (n NullFloat32) FormatCode() int16 { return BinaryFormatCode }
-
-func (n NullFloat32) Encode(w *WriteBuf, oid Oid) error {
- if oid != Float4Oid {
- return SerializationError(fmt.Sprintf("NullFloat32.Encode cannot encode into OID %d", oid))
- }
-
- if !n.Valid {
- w.WriteInt32(-1)
- return nil
- }
-
- return encodeFloat32(w, oid, n.Float32)
-}
-
-// NullFloat64 represents an float8 that may be null. NullFloat64 implements the
-// Scanner and Encoder interfaces so it may be used both as an argument to
-// Query[Row] and a destination for Scan.
-//
-// If Valid is false then the value is NULL.
-type NullFloat64 struct {
- Float64 float64
- Valid bool // Valid is true if Float64 is not NULL
-}
-
-func (n *NullFloat64) Scan(vr *ValueReader) error {
- if vr.Type().DataType != Float8Oid {
- return SerializationError(fmt.Sprintf("NullFloat64.Scan cannot decode OID %d", vr.Type().DataType))
- }
-
- if vr.Len() == -1 {
- n.Float64, n.Valid = 0, false
- return nil
- }
- n.Valid = true
- n.Float64 = decodeFloat8(vr)
- return vr.Err()
-}
-
-func (n NullFloat64) FormatCode() int16 { return BinaryFormatCode }
-
-func (n NullFloat64) Encode(w *WriteBuf, oid Oid) error {
- if oid != Float8Oid {
- return SerializationError(fmt.Sprintf("NullFloat64.Encode cannot encode into OID %d", oid))
- }
-
- if !n.Valid {
- w.WriteInt32(-1)
- return nil
- }
-
- return encodeFloat64(w, oid, n.Float64)
-}
-
-// NullString represents an string that may be null. NullString implements the
-// Scanner Encoder interfaces so it may be used both as an argument to
-// Query[Row] and a destination for Scan.
-//
-// If Valid is false then the value is NULL.
-type NullString struct {
- String string
- Valid bool // Valid is true if String is not NULL
-}
-
-func (n *NullString) Scan(vr *ValueReader) error {
- // Not checking oid as so we can scan anything into into a NullString - may revisit this decision later
-
- if vr.Len() == -1 {
- n.String, n.Valid = "", false
- return nil
- }
-
- n.Valid = true
- n.String = decodeText(vr)
- return vr.Err()
-}
-
-func (n NullString) FormatCode() int16 { return TextFormatCode }
-
-func (n NullString) Encode(w *WriteBuf, oid Oid) error {
- if !n.Valid {
- w.WriteInt32(-1)
- return nil
- }
-
- return encodeString(w, oid, n.String)
-}
-
-// AclItem is used for PostgreSQL's aclitem data type. A sample aclitem
-// might look like this:
-//
-// postgres=arwdDxt/postgres
-//
-// Note, however, that because the user/role name part of an aclitem is
-// an identifier, it follows all the usual formatting rules for SQL
-// identifiers: if it contains spaces and other special characters,
-// it should appear in double-quotes:
-//
-// postgres=arwdDxt/"role with spaces"
-//
-type AclItem string
-
-// NullAclItem represents a pgx.AclItem that may be null. NullAclItem implements the
-// Scanner and Encoder interfaces so it may be used both as an argument to
-// Query[Row] and a destination for Scan for prepared and unprepared queries.
-//
-// If Valid is false then the value is NULL.
-type NullAclItem struct {
- AclItem AclItem
- Valid bool // Valid is true if AclItem is not NULL
-}
-
-func (n *NullAclItem) Scan(vr *ValueReader) error {
- if vr.Type().DataType != AclItemOid {
- return SerializationError(fmt.Sprintf("NullAclItem.Scan cannot decode OID %d", vr.Type().DataType))
- }
-
- if vr.Len() == -1 {
- n.AclItem, n.Valid = "", false
- return nil
- }
-
- n.Valid = true
- n.AclItem = AclItem(decodeText(vr))
- return vr.Err()
-}
-
-// Particularly important to return TextFormatCode, seeing as Postgres
-// only ever sends aclitem as text, not binary.
-func (n NullAclItem) FormatCode() int16 { return TextFormatCode }
-
-func (n NullAclItem) Encode(w *WriteBuf, oid Oid) error {
- if !n.Valid {
- w.WriteInt32(-1)
- return nil
- }
-
- return encodeString(w, oid, string(n.AclItem))
-}
-
-// Name is a type used for PostgreSQL's special 63-byte
-// name data type, used for identifiers like table names.
-// The pg_class.relname column is a good example of where the
-// name data type is used.
-//
-// Note that the underlying Go data type of pgx.Name is string,
-// so there is no way to enforce the 63-byte length. Inputting
-// a longer name into PostgreSQL will result in silent truncation
-// to 63 bytes.
-//
-// Also, if you have custom-compiled PostgreSQL and set
-// NAMEDATALEN to a different value, obviously that number of
-// bytes applies, rather than the default 63.
-type Name string
-
-// NullName represents a pgx.Name that may be null. NullName implements the
-// Scanner and Encoder interfaces so it may be used both as an argument to
-// Query[Row] and a destination for Scan for prepared and unprepared queries.
-//
-// If Valid is false then the value is NULL.
-type NullName struct {
- Name Name
- Valid bool // Valid is true if Name is not NULL
-}
-
-func (n *NullName) Scan(vr *ValueReader) error {
- if vr.Type().DataType != NameOid {
- return SerializationError(fmt.Sprintf("NullName.Scan cannot decode OID %d", vr.Type().DataType))
- }
-
- if vr.Len() == -1 {
- n.Name, n.Valid = "", false
- return nil
- }
-
- n.Valid = true
- n.Name = Name(decodeText(vr))
- return vr.Err()
-}
-
-func (n NullName) FormatCode() int16 { return TextFormatCode }
-
-func (n NullName) Encode(w *WriteBuf, oid Oid) error {
- if !n.Valid {
- w.WriteInt32(-1)
- return nil
- }
-
- return encodeString(w, oid, string(n.Name))
-}
-
-// The pgx.Char type is for PostgreSQL's special 8-bit-only
-// "char" type more akin to the C language's char type, or Go's byte type.
-// (Note that the name in PostgreSQL itself is "char", in double-quotes,
-// and not char.) It gets used a lot in PostgreSQL's system tables to hold
-// a single ASCII character value (eg pg_class.relkind).
-type Char byte
-
-// NullChar represents a pgx.Char that may be null. NullChar implements the
-// Scanner and Encoder interfaces so it may be used both as an argument to
-// Query[Row] and a destination for Scan for prepared and unprepared queries.
-//
-// If Valid is false then the value is NULL.
-type NullChar struct {
- Char Char
- Valid bool // Valid is true if Char is not NULL
-}
-
-func (n *NullChar) Scan(vr *ValueReader) error {
- if vr.Type().DataType != CharOid {
- return SerializationError(fmt.Sprintf("NullChar.Scan cannot decode OID %d", vr.Type().DataType))
- }
-
- if vr.Len() == -1 {
- n.Char, n.Valid = 0, false
- return nil
- }
- n.Valid = true
- n.Char = decodeChar(vr)
- return vr.Err()
-}
-
-func (n NullChar) FormatCode() int16 { return BinaryFormatCode }
-
-func (n NullChar) Encode(w *WriteBuf, oid Oid) error {
- if oid != CharOid {
- return SerializationError(fmt.Sprintf("NullChar.Encode cannot encode into OID %d", oid))
- }
-
- if !n.Valid {
- w.WriteInt32(-1)
- return nil
- }
-
- return encodeChar(w, oid, n.Char)
-}
-
-// NullInt16 represents a smallint that may be null. NullInt16 implements the
-// Scanner and Encoder interfaces so it may be used both as an argument to
-// Query[Row] and a destination for Scan for prepared and unprepared queries.
-//
-// If Valid is false then the value is NULL.
-type NullInt16 struct {
- Int16 int16
- Valid bool // Valid is true if Int16 is not NULL
-}
-
-func (n *NullInt16) Scan(vr *ValueReader) error {
- if vr.Type().DataType != Int2Oid {
- return SerializationError(fmt.Sprintf("NullInt16.Scan cannot decode OID %d", vr.Type().DataType))
- }
-
- if vr.Len() == -1 {
- n.Int16, n.Valid = 0, false
- return nil
- }
- n.Valid = true
- n.Int16 = decodeInt2(vr)
- return vr.Err()
-}
-
-func (n NullInt16) FormatCode() int16 { return BinaryFormatCode }
-
-func (n NullInt16) Encode(w *WriteBuf, oid Oid) error {
- if oid != Int2Oid {
- return SerializationError(fmt.Sprintf("NullInt16.Encode cannot encode into OID %d", oid))
- }
-
- if !n.Valid {
- w.WriteInt32(-1)
- return nil
- }
-
- return encodeInt16(w, oid, n.Int16)
-}
-
-// NullInt32 represents an integer that may be null. NullInt32 implements the
-// Scanner and Encoder interfaces so it may be used both as an argument to
-// Query[Row] and a destination for Scan.
-//
-// If Valid is false then the value is NULL.
-type NullInt32 struct {
- Int32 int32
- Valid bool // Valid is true if Int32 is not NULL
-}
-
-func (n *NullInt32) Scan(vr *ValueReader) error {
- if vr.Type().DataType != Int4Oid {
- return SerializationError(fmt.Sprintf("NullInt32.Scan cannot decode OID %d", vr.Type().DataType))
- }
-
- if vr.Len() == -1 {
- n.Int32, n.Valid = 0, false
- return nil
- }
- n.Valid = true
- n.Int32 = decodeInt4(vr)
- return vr.Err()
-}
-
-func (n NullInt32) FormatCode() int16 { return BinaryFormatCode }
-
-func (n NullInt32) Encode(w *WriteBuf, oid Oid) error {
- if oid != Int4Oid {
- return SerializationError(fmt.Sprintf("NullInt32.Encode cannot encode into OID %d", oid))
- }
-
- if !n.Valid {
- w.WriteInt32(-1)
- return nil
- }
-
- return encodeInt32(w, oid, n.Int32)
-}
-
-// Oid (Object Identifier Type) is, according to https://www.postgresql.org/docs/current/static/datatype-oid.html,
-// used internally by PostgreSQL as a primary key for various system tables. It is currently implemented
-// as an unsigned four-byte integer. Its definition can be found in src/include/postgres_ext.h
-// in the PostgreSQL sources.
-type Oid uint32
-
-// NullOid represents a Command Identifier (Oid) that may be null. NullOid implements the
-// Scanner and Encoder interfaces so it may be used both as an argument to
-// Query[Row] and a destination for Scan.
-//
-// If Valid is false then the value is NULL.
-type NullOid struct {
- Oid Oid
- Valid bool // Valid is true if Oid is not NULL
-}
-
-func (n *NullOid) Scan(vr *ValueReader) error {
- if vr.Type().DataType != OidOid {
- return SerializationError(fmt.Sprintf("NullOid.Scan cannot decode OID %d", vr.Type().DataType))
- }
-
- if vr.Len() == -1 {
- n.Oid, n.Valid = 0, false
- return nil
- }
- n.Valid = true
- n.Oid = decodeOid(vr)
- return vr.Err()
-}
-
-func (n NullOid) FormatCode() int16 { return BinaryFormatCode }
-
-func (n NullOid) Encode(w *WriteBuf, oid Oid) error {
- if oid != OidOid {
- return SerializationError(fmt.Sprintf("NullOid.Encode cannot encode into OID %d", oid))
- }
-
- if !n.Valid {
- w.WriteInt32(-1)
- return nil
- }
-
- return encodeOid(w, oid, n.Oid)
-}
-
-// Xid is PostgreSQL's Transaction ID type.
-//
-// In later versions of PostgreSQL, it is the type used for the backend_xid
-// and backend_xmin columns of the pg_stat_activity system view.
-//
-// Also, when one does
-//
-// select xmin, xmax, * from some_table;
-//
-// it is the data type of the xmin and xmax hidden system columns.
-//
-// It is currently implemented as an unsigned four byte integer.
-// Its definition can be found in src/include/postgres_ext.h as TransactionId
-// in the PostgreSQL sources.
-type Xid uint32
-
-// NullXid represents a Transaction ID (Xid) that may be null. NullXid implements the
-// Scanner and Encoder interfaces so it may be used both as an argument to
-// Query[Row] and a destination for Scan.
-//
-// If Valid is false then the value is NULL.
-type NullXid struct {
- Xid Xid
- Valid bool // Valid is true if Xid is not NULL
-}
-
-func (n *NullXid) Scan(vr *ValueReader) error {
- if vr.Type().DataType != XidOid {
- return SerializationError(fmt.Sprintf("NullXid.Scan cannot decode OID %d", vr.Type().DataType))
- }
-
- if vr.Len() == -1 {
- n.Xid, n.Valid = 0, false
- return nil
- }
- n.Valid = true
- n.Xid = decodeXid(vr)
- return vr.Err()
-}
-
-func (n NullXid) FormatCode() int16 { return BinaryFormatCode }
-
-func (n NullXid) Encode(w *WriteBuf, oid Oid) error {
- if oid != XidOid {
- return SerializationError(fmt.Sprintf("NullXid.Encode cannot encode into OID %d", oid))
- }
-
- if !n.Valid {
- w.WriteInt32(-1)
- return nil
- }
-
- return encodeXid(w, oid, n.Xid)
-}
-
-// Cid is PostgreSQL's Command Identifier type.
-//
-// When one does
-//
-// select cmin, cmax, * from some_table;
-//
-// it is the data type of the cmin and cmax hidden system columns.
-//
-// It is currently implemented as an unsigned four byte integer.
-// Its definition can be found in src/include/c.h as CommandId
-// in the PostgreSQL sources.
-type Cid uint32
-
-// NullCid represents a Command Identifier (Cid) that may be null. NullCid implements the
-// Scanner and Encoder interfaces so it may be used both as an argument to
-// Query[Row] and a destination for Scan.
-//
-// If Valid is false then the value is NULL.
-type NullCid struct {
- Cid Cid
- Valid bool // Valid is true if Cid is not NULL
-}
-
-func (n *NullCid) Scan(vr *ValueReader) error {
- if vr.Type().DataType != CidOid {
- return SerializationError(fmt.Sprintf("NullCid.Scan cannot decode OID %d", vr.Type().DataType))
- }
-
- if vr.Len() == -1 {
- n.Cid, n.Valid = 0, false
- return nil
- }
- n.Valid = true
- n.Cid = decodeCid(vr)
- return vr.Err()
-}
-
-func (n NullCid) FormatCode() int16 { return BinaryFormatCode }
-
-func (n NullCid) Encode(w *WriteBuf, oid Oid) error {
- if oid != CidOid {
- return SerializationError(fmt.Sprintf("NullCid.Encode cannot encode into OID %d", oid))
- }
-
- if !n.Valid {
- w.WriteInt32(-1)
- return nil
- }
-
- return encodeCid(w, oid, n.Cid)
-}
-
-// Tid is PostgreSQL's Tuple Identifier type.
-//
-// When one does
-//
-// select ctid, * from some_table;
-//
-// it is the data type of the ctid hidden system column.
-//
-// It is currently implemented as a pair unsigned two byte integers.
-// Its conversion functions can be found in src/backend/utils/adt/tid.c
-// in the PostgreSQL sources.
-type Tid struct {
- BlockNumber uint32
- OffsetNumber uint16
-}
-
-// NullTid represents a Tuple Identifier (Tid) that may be null. NullTid implements the
-// Scanner and Encoder interfaces so it may be used both as an argument to
-// Query[Row] and a destination for Scan.
-//
-// If Valid is false then the value is NULL.
-type NullTid struct {
- Tid Tid
- Valid bool // Valid is true if Tid is not NULL
-}
-
-func (n *NullTid) Scan(vr *ValueReader) error {
- if vr.Type().DataType != TidOid {
- return SerializationError(fmt.Sprintf("NullTid.Scan cannot decode OID %d", vr.Type().DataType))
- }
-
- if vr.Len() == -1 {
- n.Tid, n.Valid = Tid{BlockNumber: 0, OffsetNumber: 0}, false
- return nil
- }
- n.Valid = true
- n.Tid = decodeTid(vr)
- return vr.Err()
-}
-
-func (n NullTid) FormatCode() int16 { return BinaryFormatCode }
-
-func (n NullTid) Encode(w *WriteBuf, oid Oid) error {
- if oid != TidOid {
- return SerializationError(fmt.Sprintf("NullTid.Encode cannot encode into OID %d", oid))
- }
-
- if !n.Valid {
- w.WriteInt32(-1)
- return nil
- }
-
- return encodeTid(w, oid, n.Tid)
-}
-
-// NullInt64 represents an bigint that may be null. NullInt64 implements the
-// Scanner and Encoder interfaces so it may be used both as an argument to
-// Query[Row] and a destination for Scan.
-//
-// If Valid is false then the value is NULL.
-type NullInt64 struct {
- Int64 int64
- Valid bool // Valid is true if Int64 is not NULL
-}
-
-func (n *NullInt64) Scan(vr *ValueReader) error {
- if vr.Type().DataType != Int8Oid {
- return SerializationError(fmt.Sprintf("NullInt64.Scan cannot decode OID %d", vr.Type().DataType))
- }
-
- if vr.Len() == -1 {
- n.Int64, n.Valid = 0, false
- return nil
- }
- n.Valid = true
- n.Int64 = decodeInt8(vr)
- return vr.Err()
-}
-
-func (n NullInt64) FormatCode() int16 { return BinaryFormatCode }
-
-func (n NullInt64) Encode(w *WriteBuf, oid Oid) error {
- if oid != Int8Oid {
- return SerializationError(fmt.Sprintf("NullInt64.Encode cannot encode into OID %d", oid))
- }
-
- if !n.Valid {
- w.WriteInt32(-1)
- return nil
- }
-
- return encodeInt64(w, oid, n.Int64)
-}
-
-// NullBool represents an bool that may be null. NullBool implements the Scanner
-// and Encoder interfaces so it may be used both as an argument to Query[Row]
-// and a destination for Scan.
-//
-// If Valid is false then the value is NULL.
-type NullBool struct {
- Bool bool
- Valid bool // Valid is true if Bool is not NULL
-}
-
-func (n *NullBool) Scan(vr *ValueReader) error {
- if vr.Type().DataType != BoolOid {
- return SerializationError(fmt.Sprintf("NullBool.Scan cannot decode OID %d", vr.Type().DataType))
- }
-
- if vr.Len() == -1 {
- n.Bool, n.Valid = false, false
- return nil
- }
- n.Valid = true
- n.Bool = decodeBool(vr)
- return vr.Err()
-}
-
-func (n NullBool) FormatCode() int16 { return BinaryFormatCode }
-
-func (n NullBool) Encode(w *WriteBuf, oid Oid) error {
- if oid != BoolOid {
- return SerializationError(fmt.Sprintf("NullBool.Encode cannot encode into OID %d", oid))
- }
-
- if !n.Valid {
- w.WriteInt32(-1)
- return nil
- }
-
- return encodeBool(w, oid, n.Bool)
-}
-
-// NullTime represents an time.Time that may be null. NullTime implements the
-// Scanner and Encoder interfaces so it may be used both as an argument to
-// Query[Row] and a destination for Scan. It corresponds with the PostgreSQL
-// types timestamptz, timestamp, and date.
-//
-// If Valid is false then the value is NULL.
-type NullTime struct {
- Time time.Time
- Valid bool // Valid is true if Time is not NULL
-}
-
-func (n *NullTime) Scan(vr *ValueReader) error {
- oid := vr.Type().DataType
- if oid != TimestampTzOid && oid != TimestampOid && oid != DateOid {
- return SerializationError(fmt.Sprintf("NullTime.Scan cannot decode OID %d", vr.Type().DataType))
- }
-
- if vr.Len() == -1 {
- n.Time, n.Valid = time.Time{}, false
- return nil
- }
-
- n.Valid = true
- switch oid {
- case TimestampTzOid:
- n.Time = decodeTimestampTz(vr)
- case TimestampOid:
- n.Time = decodeTimestamp(vr)
- case DateOid:
- n.Time = decodeDate(vr)
- }
-
- return vr.Err()
-}
-
-func (n NullTime) FormatCode() int16 { return BinaryFormatCode }
-
-func (n NullTime) Encode(w *WriteBuf, oid Oid) error {
- if oid != TimestampTzOid && oid != TimestampOid && oid != DateOid {
- return SerializationError(fmt.Sprintf("NullTime.Encode cannot encode into OID %d", oid))
- }
-
- if !n.Valid {
- w.WriteInt32(-1)
- return nil
- }
-
- return encodeTime(w, oid, n.Time)
-}
-
-// Hstore represents an hstore column. It does not support a null column or null
-// key values (use NullHstore for this). Hstore implements the Scanner and
-// Encoder interfaces so it may be used both as an argument to Query[Row] and a
-// destination for Scan.
-type Hstore map[string]string
-
-func (h *Hstore) Scan(vr *ValueReader) error {
- //oid for hstore not standardized, so we check its type name
- if vr.Type().DataTypeName != "hstore" {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode type %s into Hstore", vr.Type().DataTypeName)))
- return nil
- }
-
- if vr.Len() == -1 {
- vr.Fatal(ProtocolError("Cannot decode null column into Hstore"))
- return nil
- }
-
- switch vr.Type().FormatCode {
- case TextFormatCode:
- m, err := parseHstoreToMap(vr.ReadString(vr.Len()))
- if err != nil {
- vr.Fatal(ProtocolError(fmt.Sprintf("Can't decode hstore column: %v", err)))
- return nil
- }
- hm := Hstore(m)
- *h = hm
- return nil
- case BinaryFormatCode:
- vr.Fatal(ProtocolError("Can't decode binary hstore"))
- return nil
- default:
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return nil
- }
-}
-
-func (h Hstore) FormatCode() int16 { return TextFormatCode }
-
-func (h Hstore) Encode(w *WriteBuf, oid Oid) error {
- var buf bytes.Buffer
-
- i := 0
- for k, v := range h {
- i++
- ks := strings.Replace(k, `\`, `\\`, -1)
- ks = strings.Replace(ks, `"`, `\"`, -1)
- vs := strings.Replace(v, `\`, `\\`, -1)
- vs = strings.Replace(vs, `"`, `\"`, -1)
- buf.WriteString(`"`)
- buf.WriteString(ks)
- buf.WriteString(`"=>"`)
- buf.WriteString(vs)
- buf.WriteString(`"`)
- if i < len(h) {
- buf.WriteString(", ")
- }
- }
- w.WriteInt32(int32(buf.Len()))
- w.WriteBytes(buf.Bytes())
- return nil
-}
-
-// NullHstore represents an hstore column that can be null or have null values
-// associated with its keys. NullHstore implements the Scanner and Encoder
-// interfaces so it may be used both as an argument to Query[Row] and a
-// destination for Scan.
-//
-// If Valid is false, then the value of the entire hstore column is NULL
-// If any of the NullString values in Store has Valid set to false, the key
-// appears in the hstore column, but its value is explicitly set to NULL.
-type NullHstore struct {
- Hstore map[string]NullString
- Valid bool
-}
-
-func (h *NullHstore) Scan(vr *ValueReader) error {
- //oid for hstore not standardized, so we check its type name
- if vr.Type().DataTypeName != "hstore" {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode type %s into NullHstore", vr.Type().DataTypeName)))
- return nil
- }
-
- if vr.Len() == -1 {
- h.Valid = false
- return nil
- }
-
- switch vr.Type().FormatCode {
- case TextFormatCode:
- store, err := parseHstoreToNullHstore(vr.ReadString(vr.Len()))
- if err != nil {
- vr.Fatal(ProtocolError(fmt.Sprintf("Can't decode hstore column: %v", err)))
- return nil
- }
- h.Valid = true
- h.Hstore = store
- return nil
- case BinaryFormatCode:
- vr.Fatal(ProtocolError("Can't decode binary hstore"))
- return nil
- default:
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return nil
- }
-}
-
-func (h NullHstore) FormatCode() int16 { return TextFormatCode }
-
-func (h NullHstore) Encode(w *WriteBuf, oid Oid) error {
- var buf bytes.Buffer
-
- if !h.Valid {
- w.WriteInt32(-1)
- return nil
- }
-
- i := 0
- for k, v := range h.Hstore {
- i++
- ks := strings.Replace(k, `\`, `\\`, -1)
- ks = strings.Replace(ks, `"`, `\"`, -1)
- if v.Valid {
- vs := strings.Replace(v.String, `\`, `\\`, -1)
- vs = strings.Replace(vs, `"`, `\"`, -1)
- buf.WriteString(fmt.Sprintf(`"%s"=>"%s"`, ks, vs))
- } else {
- buf.WriteString(fmt.Sprintf(`"%s"=>NULL`, ks))
- }
- if i < len(h.Hstore) {
- buf.WriteString(", ")
- }
- }
- w.WriteInt32(int32(buf.Len()))
- w.WriteBytes(buf.Bytes())
- return nil
-}
-
-// Encode encodes arg into wbuf as the type oid. This allows implementations
-// of the Encoder interface to delegate the actual work of encoding to the
-// built-in functionality.
-func Encode(wbuf *WriteBuf, oid Oid, arg interface{}) error {
+func convertSimpleArgument(ci *pgtype.ConnInfo, arg interface{}) (interface{}, error) {
if arg == nil {
- wbuf.WriteInt32(-1)
- return nil
+ return nil, nil
}
switch arg := arg.(type) {
- case Encoder:
- return arg.Encode(wbuf, oid)
case driver.Valuer:
- v, err := arg.Value()
+ return callValuerValue(arg)
+ case pgtype.TextEncoder:
+ buf, err := arg.EncodeText(ci, nil)
if err != nil {
- return err
+ return nil, err
}
- return Encode(wbuf, oid, v)
- case string:
- return encodeString(wbuf, oid, arg)
- case []AclItem:
- return encodeAclItemSlice(wbuf, oid, arg)
- case []byte:
- return encodeByteSlice(wbuf, oid, arg)
- case [][]byte:
- return encodeByteSliceSlice(wbuf, oid, arg)
- }
-
- refVal := reflect.ValueOf(arg)
-
- if refVal.Kind() == reflect.Ptr {
- if refVal.IsNil() {
- wbuf.WriteInt32(-1)
- return nil
+ if buf == nil {
+ return nil, nil
}
- arg = refVal.Elem().Interface()
- return Encode(wbuf, oid, arg)
- }
-
- if oid == JsonOid {
- return encodeJSON(wbuf, oid, arg)
- }
- if oid == JsonbOid {
- return encodeJSONB(wbuf, oid, arg)
- }
-
- switch arg := arg.(type) {
- case []string:
- return encodeStringSlice(wbuf, oid, arg)
+ return string(buf), nil
+ case int64:
+ return arg, nil
+ case float64:
+ return arg, nil
case bool:
- return encodeBool(wbuf, oid, arg)
- case []bool:
- return encodeBoolSlice(wbuf, oid, arg)
- case int:
- return encodeInt(wbuf, oid, arg)
- case uint:
- return encodeUInt(wbuf, oid, arg)
- case Char:
- return encodeChar(wbuf, oid, arg)
- case AclItem:
- // The aclitem data type goes over the wire using the same format as string,
- // so just cast to string and use encodeString
- return encodeString(wbuf, oid, string(arg))
- case Name:
- // The name data type goes over the wire using the same format as string,
- // so just cast to string and use encodeString
- return encodeString(wbuf, oid, string(arg))
+ return arg, nil
+ case time.Time:
+ return arg, nil
+ case string:
+ return arg, nil
+ case []byte:
+ return arg, nil
case int8:
- return encodeInt8(wbuf, oid, arg)
- case uint8:
- return encodeUInt8(wbuf, oid, arg)
+ return int64(arg), nil
case int16:
- return encodeInt16(wbuf, oid, arg)
- case []int16:
- return encodeInt16Slice(wbuf, oid, arg)
- case uint16:
- return encodeUInt16(wbuf, oid, arg)
- case []uint16:
- return encodeUInt16Slice(wbuf, oid, arg)
+ return int64(arg), nil
case int32:
- return encodeInt32(wbuf, oid, arg)
- case []int32:
- return encodeInt32Slice(wbuf, oid, arg)
+ return int64(arg), nil
+ case int:
+ return int64(arg), nil
+ case uint8:
+ return int64(arg), nil
+ case uint16:
+ return int64(arg), nil
case uint32:
- return encodeUInt32(wbuf, oid, arg)
- case []uint32:
- return encodeUInt32Slice(wbuf, oid, arg)
- case int64:
- return encodeInt64(wbuf, oid, arg)
- case []int64:
- return encodeInt64Slice(wbuf, oid, arg)
+ return int64(arg), nil
case uint64:
- return encodeUInt64(wbuf, oid, arg)
- case []uint64:
- return encodeUInt64Slice(wbuf, oid, arg)
- case float32:
- return encodeFloat32(wbuf, oid, arg)
- case []float32:
- return encodeFloat32Slice(wbuf, oid, arg)
- case float64:
- return encodeFloat64(wbuf, oid, arg)
- case []float64:
- return encodeFloat64Slice(wbuf, oid, arg)
- case time.Time:
- return encodeTime(wbuf, oid, arg)
- case []time.Time:
- return encodeTimeSlice(wbuf, oid, arg)
- case net.IP:
- return encodeIP(wbuf, oid, arg)
- case []net.IP:
- return encodeIPSlice(wbuf, oid, arg)
- case net.IPNet:
- return encodeIPNet(wbuf, oid, arg)
- case []net.IPNet:
- return encodeIPNetSlice(wbuf, oid, arg)
- case Oid:
- return encodeOid(wbuf, oid, arg)
- case Xid:
- return encodeXid(wbuf, oid, arg)
- case Cid:
- return encodeCid(wbuf, oid, arg)
- default:
- if strippedArg, ok := stripNamedType(&refVal); ok {
- return Encode(wbuf, oid, strippedArg)
- }
- return SerializationError(fmt.Sprintf("Cannot encode %T into oid %v - %T must implement Encoder or be converted to a string", arg, oid, arg))
- }
-}
-
-func stripNamedType(val *reflect.Value) (interface{}, bool) {
- switch val.Kind() {
- case reflect.Int:
- return int(val.Int()), true
- case reflect.Int8:
- return int8(val.Int()), true
- case reflect.Int16:
- return int16(val.Int()), true
- case reflect.Int32:
- return int32(val.Int()), true
- case reflect.Int64:
- return int64(val.Int()), true
- case reflect.Uint:
- return uint(val.Uint()), true
- case reflect.Uint8:
- return uint8(val.Uint()), true
- case reflect.Uint16:
- return uint16(val.Uint()), true
- case reflect.Uint32:
- return uint32(val.Uint()), true
- case reflect.Uint64:
- return uint64(val.Uint()), true
- case reflect.String:
- return val.String(), true
- }
-
- return nil, false
-}
-
-// Decode decodes from vr into d. d must be a pointer. This allows
-// implementations of the Decoder interface to delegate the actual work of
-// decoding to the built-in functionality.
-func Decode(vr *ValueReader, d interface{}) error {
- switch v := d.(type) {
- case *bool:
- *v = decodeBool(vr)
- case *int:
- n := decodeInt(vr)
- if n < int64(minInt) {
- return fmt.Errorf("%d is less than minimum value for int", n)
- } else if n > int64(maxInt) {
- return fmt.Errorf("%d is greater than maximum value for int", n)
- }
- *v = int(n)
- case *int8:
- n := decodeInt(vr)
- if n < math.MinInt8 {
- return fmt.Errorf("%d is less than minimum value for int8", n)
- } else if n > math.MaxInt8 {
- return fmt.Errorf("%d is greater than maximum value for int8", n)
- }
- *v = int8(n)
- case *int16:
- n := decodeInt(vr)
- if n < math.MinInt16 {
- return fmt.Errorf("%d is less than minimum value for int16", n)
- } else if n > math.MaxInt16 {
- return fmt.Errorf("%d is greater than maximum value for int16", n)
- }
- *v = int16(n)
- case *int32:
- n := decodeInt(vr)
- if n < math.MinInt32 {
- return fmt.Errorf("%d is less than minimum value for int32", n)
- } else if n > math.MaxInt32 {
- return fmt.Errorf("%d is greater than maximum value for int32", n)
- }
- *v = int32(n)
- case *int64:
- n := decodeInt(vr)
- if n < math.MinInt64 {
- return fmt.Errorf("%d is less than minimum value for int64", n)
- } else if n > math.MaxInt64 {
- return fmt.Errorf("%d is greater than maximum value for int64", n)
- }
- *v = int64(n)
- case *uint:
- n := decodeInt(vr)
- if n < 0 {
- return fmt.Errorf("%d is less than zero for uint8", n)
- } else if maxInt == math.MaxInt32 && n > math.MaxUint32 {
- return fmt.Errorf("%d is greater than maximum value for uint", n)
- }
- *v = uint(n)
- case *uint8:
- n := decodeInt(vr)
- if n < 0 {
- return fmt.Errorf("%d is less than zero for uint8", n)
- } else if n > math.MaxUint8 {
- return fmt.Errorf("%d is greater than maximum value for uint8", n)
- }
- *v = uint8(n)
- case *uint16:
- n := decodeInt(vr)
- if n < 0 {
- return fmt.Errorf("%d is less than zero for uint16", n)
- } else if n > math.MaxUint16 {
- return fmt.Errorf("%d is greater than maximum value for uint16", n)
- }
- *v = uint16(n)
- case *uint32:
- n := decodeInt(vr)
- if n < 0 {
- return fmt.Errorf("%d is less than zero for uint32", n)
- } else if n > math.MaxUint32 {
- return fmt.Errorf("%d is greater than maximum value for uint32", n)
- }
- *v = uint32(n)
- case *uint64:
- n := decodeInt(vr)
- if n < 0 {
- return fmt.Errorf("%d is less than zero for uint64", n)
- }
- *v = uint64(n)
- case *Char:
- *v = decodeChar(vr)
- case *AclItem:
- // aclitem goes over the wire just like text
- *v = AclItem(decodeText(vr))
- case *Name:
- // name goes over the wire just like text
- *v = Name(decodeText(vr))
- case *Oid:
- *v = decodeOid(vr)
- case *Xid:
- *v = decodeXid(vr)
- case *Tid:
- *v = decodeTid(vr)
- case *Cid:
- *v = decodeCid(vr)
- case *string:
- *v = decodeText(vr)
- case *float32:
- *v = decodeFloat4(vr)
- case *float64:
- *v = decodeFloat8(vr)
- case *[]AclItem:
- *v = decodeAclItemArray(vr)
- case *[]bool:
- *v = decodeBoolArray(vr)
- case *[]int16:
- *v = decodeInt2Array(vr)
- case *[]uint16:
- *v = decodeInt2ArrayToUInt(vr)
- case *[]int32:
- *v = decodeInt4Array(vr)
- case *[]uint32:
- *v = decodeInt4ArrayToUInt(vr)
- case *[]int64:
- *v = decodeInt8Array(vr)
- case *[]uint64:
- *v = decodeInt8ArrayToUInt(vr)
- case *[]float32:
- *v = decodeFloat4Array(vr)
- case *[]float64:
- *v = decodeFloat8Array(vr)
- case *[]string:
- *v = decodeTextArray(vr)
- case *[]time.Time:
- *v = decodeTimestampArray(vr)
- case *[][]byte:
- *v = decodeByteaArray(vr)
- case *[]interface{}:
- *v = decodeRecord(vr)
- case *time.Time:
- switch vr.Type().DataType {
- case DateOid:
- *v = decodeDate(vr)
- case TimestampTzOid:
- *v = decodeTimestampTz(vr)
- case TimestampOid:
- *v = decodeTimestamp(vr)
- default:
- return fmt.Errorf("Can't convert OID %v to time.Time", vr.Type().DataType)
- }
- case *net.IP:
- ipnet := decodeInet(vr)
- if oneCount, bitCount := ipnet.Mask.Size(); oneCount != bitCount {
- return fmt.Errorf("Cannot decode netmask into *net.IP")
- }
- *v = ipnet.IP
- case *[]net.IP:
- ipnets := decodeInetArray(vr)
- ips := make([]net.IP, len(ipnets))
- for i, ipnet := range ipnets {
- if oneCount, bitCount := ipnet.Mask.Size(); oneCount != bitCount {
- return fmt.Errorf("Cannot decode netmask into *net.IP")
- }
- ips[i] = ipnet.IP
- }
- *v = ips
- case *net.IPNet:
- *v = decodeInet(vr)
- case *[]net.IPNet:
- *v = decodeInetArray(vr)
- default:
- if v := reflect.ValueOf(d); v.Kind() == reflect.Ptr {
- el := v.Elem()
- switch el.Kind() {
- // if d is a pointer to pointer, strip the pointer and try again
- case reflect.Ptr:
- // -1 is a null value
- if vr.Len() == -1 {
- if !el.IsNil() {
- // if the destination pointer is not nil, nil it out
- el.Set(reflect.Zero(el.Type()))
- }
- return nil
- }
- if el.IsNil() {
- // allocate destination
- el.Set(reflect.New(el.Type().Elem()))
- }
- d = el.Interface()
- return Decode(vr, d)
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- n := decodeInt(vr)
- if el.OverflowInt(n) {
- return fmt.Errorf("Scan cannot decode %d into %T", n, d)
- }
- el.SetInt(n)
- return nil
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- n := decodeInt(vr)
- if n < 0 {
- return fmt.Errorf("%d is less than zero for %T", n, d)
- }
- if el.OverflowUint(uint64(n)) {
- return fmt.Errorf("Scan cannot decode %d into %T", n, d)
- }
- el.SetUint(uint64(n))
- return nil
- case reflect.String:
- el.SetString(decodeText(vr))
- return nil
- }
+ if arg > math.MaxInt64 {
+ return nil, errors.Errorf("arg too big for int64: %v", arg)
}
- return fmt.Errorf("Scan cannot decode into %T", d)
- }
-
- return nil
-}
-
-func decodeBool(vr *ValueReader) bool {
- if vr.Len() == -1 {
- vr.Fatal(ProtocolError("Cannot decode null into bool"))
- return false
- }
-
- if vr.Type().DataType != BoolOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into bool", vr.Type().DataType)))
- return false
- }
-
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return false
- }
-
- if vr.Len() != 1 {
- vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an bool: %d", vr.Len())))
- return false
- }
-
- b := vr.ReadByte()
- return b != 0
-}
-
-func encodeBool(w *WriteBuf, oid Oid, value bool) error {
- if oid != BoolOid {
- return fmt.Errorf("cannot encode Go %s into oid %d", "bool", oid)
- }
-
- w.WriteInt32(1)
-
- var n byte
- if value {
- n = 1
- }
-
- w.WriteByte(n)
-
- return nil
-}
-
-func decodeInt(vr *ValueReader) int64 {
- switch vr.Type().DataType {
- case Int2Oid:
- return int64(decodeInt2(vr))
- case Int4Oid:
- return int64(decodeInt4(vr))
- case Int8Oid:
- return int64(decodeInt8(vr))
- }
-
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into any integer type", vr.Type().DataType)))
- return 0
-}
-
-func decodeInt8(vr *ValueReader) int64 {
- if vr.Len() == -1 {
- vr.Fatal(ProtocolError("Cannot decode null into int64"))
- return 0
- }
-
- if vr.Type().DataType != Int8Oid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into int8", vr.Type().DataType)))
- return 0
- }
-
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return 0
- }
-
- if vr.Len() != 8 {
- vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an int8: %d", vr.Len())))
- return 0
- }
-
- return vr.ReadInt64()
-}
-
-func decodeChar(vr *ValueReader) Char {
- if vr.Len() == -1 {
- vr.Fatal(ProtocolError("Cannot decode null into char"))
- return Char(0)
- }
-
- if vr.Type().DataType != CharOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into char", vr.Type().DataType)))
- return Char(0)
- }
-
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return Char(0)
- }
-
- if vr.Len() != 1 {
- vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for a char: %d", vr.Len())))
- return Char(0)
- }
-
- return Char(vr.ReadByte())
-}
-
-func decodeInt2(vr *ValueReader) int16 {
- if vr.Len() == -1 {
- vr.Fatal(ProtocolError("Cannot decode null into int16"))
- return 0
- }
-
- if vr.Type().DataType != Int2Oid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into int16", vr.Type().DataType)))
- return 0
- }
-
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return 0
- }
-
- if vr.Len() != 2 {
- vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an int2: %d", vr.Len())))
- return 0
- }
-
- return vr.ReadInt16()
-}
-
-func encodeInt(w *WriteBuf, oid Oid, value int) error {
- switch oid {
- case Int2Oid:
- if value < math.MinInt16 {
- return fmt.Errorf("%d is less than min pg:int2", value)
- } else if value > math.MaxInt16 {
- return fmt.Errorf("%d is greater than max pg:int2", value)
- }
- w.WriteInt32(2)
- w.WriteInt16(int16(value))
- case Int4Oid:
- if value < math.MinInt32 {
- return fmt.Errorf("%d is less than min pg:int4", value)
- } else if value > math.MaxInt32 {
- return fmt.Errorf("%d is greater than max pg:int4", value)
- }
- w.WriteInt32(4)
- w.WriteInt32(int32(value))
- case Int8Oid:
- if int64(value) <= int64(math.MaxInt64) {
- w.WriteInt32(8)
- w.WriteInt64(int64(value))
- } else {
- return fmt.Errorf("%d is larger than max int64 %d", value, int64(math.MaxInt64))
- }
- default:
- return fmt.Errorf("cannot encode %s into oid %v", "int8", oid)
- }
-
- return nil
-}
-
-func encodeUInt(w *WriteBuf, oid Oid, value uint) error {
- switch oid {
- case Int2Oid:
- if value > math.MaxInt16 {
- return fmt.Errorf("%d is greater than max pg:int2", value)
- }
- w.WriteInt32(2)
- w.WriteInt16(int16(value))
- case Int4Oid:
- if value > math.MaxInt32 {
- return fmt.Errorf("%d is greater than max pg:int4", value)
- }
- w.WriteInt32(4)
- w.WriteInt32(int32(value))
- case Int8Oid:
- //****** Changed value to int64(value) and math.MaxInt64 to int64(math.MaxInt64)
- if int64(value) > int64(math.MaxInt64) {
- return fmt.Errorf("%d is greater than max pg:int8", value)
- }
- w.WriteInt32(8)
- w.WriteInt64(int64(value))
-
- default:
- return fmt.Errorf("cannot encode %s into oid %v", "uint8", oid)
- }
-
- return nil
-}
-
-func encodeChar(w *WriteBuf, oid Oid, value Char) error {
- w.WriteInt32(1)
- w.WriteByte(byte(value))
- return nil
-}
-
-func encodeInt8(w *WriteBuf, oid Oid, value int8) error {
- switch oid {
- case Int2Oid:
- w.WriteInt32(2)
- w.WriteInt16(int16(value))
- case Int4Oid:
- w.WriteInt32(4)
- w.WriteInt32(int32(value))
- case Int8Oid:
- w.WriteInt32(8)
- w.WriteInt64(int64(value))
- default:
- return fmt.Errorf("cannot encode %s into oid %v", "int8", oid)
- }
-
- return nil
-}
-
-func encodeUInt8(w *WriteBuf, oid Oid, value uint8) error {
- switch oid {
- case Int2Oid:
- w.WriteInt32(2)
- w.WriteInt16(int16(value))
- case Int4Oid:
- w.WriteInt32(4)
- w.WriteInt32(int32(value))
- case Int8Oid:
- w.WriteInt32(8)
- w.WriteInt64(int64(value))
- default:
- return fmt.Errorf("cannot encode %s into oid %v", "uint8", oid)
- }
-
- return nil
-}
-
-func encodeInt16(w *WriteBuf, oid Oid, value int16) error {
- switch oid {
- case Int2Oid:
- w.WriteInt32(2)
- w.WriteInt16(value)
- case Int4Oid:
- w.WriteInt32(4)
- w.WriteInt32(int32(value))
- case Int8Oid:
- w.WriteInt32(8)
- w.WriteInt64(int64(value))
- default:
- return fmt.Errorf("cannot encode %s into oid %v", "int16", oid)
- }
-
- return nil
-}
-
-func encodeUInt16(w *WriteBuf, oid Oid, value uint16) error {
- switch oid {
- case Int2Oid:
- if value <= math.MaxInt16 {
- w.WriteInt32(2)
- w.WriteInt16(int16(value))
- } else {
- return fmt.Errorf("%d is greater than max int16 %d", value, math.MaxInt16)
- }
- case Int4Oid:
- w.WriteInt32(4)
- w.WriteInt32(int32(value))
- case Int8Oid:
- w.WriteInt32(8)
- w.WriteInt64(int64(value))
- default:
- return fmt.Errorf("cannot encode %s into oid %v", "int16", oid)
- }
-
- return nil
-}
-
-func encodeInt32(w *WriteBuf, oid Oid, value int32) error {
- switch oid {
- case Int2Oid:
- if value <= math.MaxInt16 {
- w.WriteInt32(2)
- w.WriteInt16(int16(value))
- } else {
- return fmt.Errorf("%d is greater than max int16 %d", value, math.MaxInt16)
- }
- case Int4Oid:
- w.WriteInt32(4)
- w.WriteInt32(value)
- case Int8Oid:
- w.WriteInt32(8)
- w.WriteInt64(int64(value))
- default:
- return fmt.Errorf("cannot encode %s into oid %v", "int32", oid)
- }
-
- return nil
-}
-
-func encodeUInt32(w *WriteBuf, oid Oid, value uint32) error {
- switch oid {
- case Int2Oid:
- if value <= math.MaxInt16 {
- w.WriteInt32(2)
- w.WriteInt16(int16(value))
- } else {
- return fmt.Errorf("%d is greater than max int16 %d", value, math.MaxInt16)
- }
- case Int4Oid:
- if value <= math.MaxInt32 {
- w.WriteInt32(4)
- w.WriteInt32(int32(value))
- } else {
- return fmt.Errorf("%d is greater than max int32 %d", value, math.MaxInt32)
- }
- case Int8Oid:
- w.WriteInt32(8)
- w.WriteInt64(int64(value))
- default:
- return fmt.Errorf("cannot encode %s into oid %v", "uint32", oid)
- }
-
- return nil
-}
-
-func encodeInt64(w *WriteBuf, oid Oid, value int64) error {
- switch oid {
- case Int2Oid:
- if value <= math.MaxInt16 {
- w.WriteInt32(2)
- w.WriteInt16(int16(value))
- } else {
- return fmt.Errorf("%d is greater than max int16 %d", value, math.MaxInt16)
- }
- case Int4Oid:
- if value <= math.MaxInt32 {
- w.WriteInt32(4)
- w.WriteInt32(int32(value))
- } else {
- return fmt.Errorf("%d is greater than max int32 %d", value, math.MaxInt32)
- }
- case Int8Oid:
- w.WriteInt32(8)
- w.WriteInt64(value)
- default:
- return fmt.Errorf("cannot encode %s into oid %v", "int64", oid)
- }
-
- return nil
-}
-
-func encodeUInt64(w *WriteBuf, oid Oid, value uint64) error {
- switch oid {
- case Int2Oid:
- if value <= math.MaxInt16 {
- w.WriteInt32(2)
- w.WriteInt16(int16(value))
- } else {
- return fmt.Errorf("%d is greater than max int16 %d", value, math.MaxInt16)
- }
- case Int4Oid:
- if value <= math.MaxInt32 {
- w.WriteInt32(4)
- w.WriteInt32(int32(value))
- } else {
- return fmt.Errorf("%d is greater than max int32 %d", value, math.MaxInt32)
- }
- case Int8Oid:
-
- if value <= math.MaxInt64 {
- w.WriteInt32(8)
- w.WriteInt64(int64(value))
- } else {
- return fmt.Errorf("%d is greater than max int64 %d", value, int64(math.MaxInt64))
- }
- default:
- return fmt.Errorf("cannot encode %s into oid %v", "uint64", oid)
- }
-
- return nil
-}
-
-func decodeInt4(vr *ValueReader) int32 {
- if vr.Len() == -1 {
- vr.Fatal(ProtocolError("Cannot decode null into int32"))
- return 0
- }
-
- if vr.Type().DataType != Int4Oid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into int32", vr.Type().DataType)))
- return 0
- }
-
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return 0
- }
-
- if vr.Len() != 4 {
- vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an int4: %d", vr.Len())))
- return 0
- }
-
- return vr.ReadInt32()
-}
-
-func decodeOid(vr *ValueReader) Oid {
- if vr.Len() == -1 {
- vr.Fatal(ProtocolError("Cannot decode null into Oid"))
- return Oid(0)
- }
-
- if vr.Type().DataType != OidOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into pgx.Oid", vr.Type().DataType)))
- return Oid(0)
- }
-
- // Oid needs to decode text format because it is used in loadPgTypes
- switch vr.Type().FormatCode {
- case TextFormatCode:
- s := vr.ReadString(vr.Len())
- n, err := strconv.ParseUint(s, 10, 32)
- if err != nil {
- vr.Fatal(ProtocolError(fmt.Sprintf("Received invalid Oid: %v", s)))
- }
- return Oid(n)
- case BinaryFormatCode:
- if vr.Len() != 4 {
- vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an Oid: %d", vr.Len())))
- return Oid(0)
+ return int64(arg), nil
+ case uint:
+ if uint64(arg) > math.MaxInt64 {
+ return nil, errors.Errorf("arg too big for int64: %v", arg)
}
- return Oid(vr.ReadInt32())
- default:
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return Oid(0)
- }
-}
-
-func encodeOid(w *WriteBuf, oid Oid, value Oid) error {
- if oid != OidOid {
- return fmt.Errorf("cannot encode Go %s into oid %d", "pgx.Oid", oid)
- }
-
- w.WriteInt32(4)
- w.WriteUint32(uint32(value))
-
- return nil
-}
-
-func decodeXid(vr *ValueReader) Xid {
- if vr.Len() == -1 {
- vr.Fatal(ProtocolError("Cannot decode null into Xid"))
- return Xid(0)
+ return int64(arg), nil
+ case float32:
+ return float64(arg), nil
}
- if vr.Type().DataType != XidOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into pgx.Xid", vr.Type().DataType)))
- return Xid(0)
- }
+ refVal := reflect.ValueOf(arg)
- // Unlikely Xid will ever go over the wire as text format, but who knows?
- switch vr.Type().FormatCode {
- case TextFormatCode:
- s := vr.ReadString(vr.Len())
- n, err := strconv.ParseUint(s, 10, 32)
- if err != nil {
- vr.Fatal(ProtocolError(fmt.Sprintf("Received invalid Oid: %v", s)))
- }
- return Xid(n)
- case BinaryFormatCode:
- if vr.Len() != 4 {
- vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an Oid: %d", vr.Len())))
- return Xid(0)
+ if refVal.Kind() == reflect.Ptr {
+ if refVal.IsNil() {
+ return nil, nil
}
- return Xid(vr.ReadUint32())
- default:
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return Xid(0)
+ arg = refVal.Elem().Interface()
+ return convertSimpleArgument(ci, arg)
}
-}
-func encodeXid(w *WriteBuf, oid Oid, value Xid) error {
- if oid != XidOid {
- return fmt.Errorf("cannot encode Go %s into oid %d", "pgx.Xid", oid)
+ if strippedArg, ok := stripNamedType(&refVal); ok {
+ return convertSimpleArgument(ci, strippedArg)
}
-
- w.WriteInt32(4)
- w.WriteUint32(uint32(value))
-
- return nil
+ return nil, SerializationError(fmt.Sprintf("Cannot encode %T in simple protocol - %T must implement driver.Valuer, pgtype.TextEncoder, or be a native type", arg, arg))
}
-func decodeCid(vr *ValueReader) Cid {
- if vr.Len() == -1 {
- vr.Fatal(ProtocolError("Cannot decode null into Cid"))
- return Cid(0)
- }
-
- if vr.Type().DataType != CidOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into pgx.Cid", vr.Type().DataType)))
- return Cid(0)
+func encodePreparedStatementArgument(ci *pgtype.ConnInfo, buf []byte, oid pgtype.OID, arg interface{}) ([]byte, error) {
+ if arg == nil {
+ return pgio.AppendInt32(buf, -1), nil
}
- // Unlikely Cid will ever go over the wire as text format, but who knows?
- switch vr.Type().FormatCode {
- case TextFormatCode:
- s := vr.ReadString(vr.Len())
- n, err := strconv.ParseUint(s, 10, 32)
+ switch arg := arg.(type) {
+ case pgtype.BinaryEncoder:
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+ argBuf, err := arg.EncodeBinary(ci, buf)
if err != nil {
- vr.Fatal(ProtocolError(fmt.Sprintf("Received invalid Oid: %v", s)))
- }
- return Cid(n)
- case BinaryFormatCode:
- if vr.Len() != 4 {
- vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an Oid: %d", vr.Len())))
- return Cid(0)
- }
- return Cid(vr.ReadUint32())
- default:
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return Cid(0)
- }
-}
-
-func encodeCid(w *WriteBuf, oid Oid, value Cid) error {
- if oid != CidOid {
- return fmt.Errorf("cannot encode Go %s into oid %d", "pgx.Cid", oid)
- }
-
- w.WriteInt32(4)
- w.WriteUint32(uint32(value))
-
- return nil
-}
-
-// Note that we do not match negative numbers, because neither the
-// BlockNumber nor OffsetNumber of a Tid can be negative.
-var tidRegexp *regexp.Regexp = regexp.MustCompile(`^\((\d*),(\d*)\)$`)
-
-func decodeTid(vr *ValueReader) Tid {
- if vr.Len() == -1 {
- vr.Fatal(ProtocolError("Cannot decode null into Tid"))
- return Tid{BlockNumber: 0, OffsetNumber: 0}
- }
-
- if vr.Type().DataType != TidOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into pgx.Tid", vr.Type().DataType)))
- return Tid{BlockNumber: 0, OffsetNumber: 0}
- }
-
- // Unlikely Tid will ever go over the wire as text format, but who knows?
- switch vr.Type().FormatCode {
- case TextFormatCode:
- s := vr.ReadString(vr.Len())
-
- match := tidRegexp.FindStringSubmatch(s)
- if match == nil {
- vr.Fatal(ProtocolError(fmt.Sprintf("Received invalid Oid: %v", s)))
- return Tid{BlockNumber: 0, OffsetNumber: 0}
+ return nil, err
}
-
- blockNumber, err := strconv.ParseUint(s, 10, 16)
- if err != nil {
- vr.Fatal(ProtocolError(fmt.Sprintf("Received invalid BlockNumber part of a Tid: %v", s)))
+ if argBuf != nil {
+ buf = argBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
}
-
- offsetNumber, err := strconv.ParseUint(s, 10, 16)
+ return buf, nil
+ case pgtype.TextEncoder:
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+ argBuf, err := arg.EncodeText(ci, buf)
if err != nil {
- vr.Fatal(ProtocolError(fmt.Sprintf("Received invalid offsetNumber part of a Tid: %v", s)))
- }
- return Tid{BlockNumber: uint32(blockNumber), OffsetNumber: uint16(offsetNumber)}
- case BinaryFormatCode:
- if vr.Len() != 6 {
- vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an Oid: %d", vr.Len())))
- return Tid{BlockNumber: 0, OffsetNumber: 0}
- }
- return Tid{BlockNumber: vr.ReadUint32(), OffsetNumber: vr.ReadUint16()}
- default:
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return Tid{BlockNumber: 0, OffsetNumber: 0}
- }
-}
-
-func encodeTid(w *WriteBuf, oid Oid, value Tid) error {
- if oid != TidOid {
- return fmt.Errorf("cannot encode Go %s into oid %d", "pgx.Tid", oid)
- }
-
- w.WriteInt32(6)
- w.WriteUint32(value.BlockNumber)
- w.WriteUint16(value.OffsetNumber)
-
- return nil
-}
-
-func decodeFloat4(vr *ValueReader) float32 {
- if vr.Len() == -1 {
- vr.Fatal(ProtocolError("Cannot decode null into float32"))
- return 0
- }
-
- if vr.Type().DataType != Float4Oid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into float32", vr.Type().DataType)))
- return 0
- }
-
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return 0
- }
-
- if vr.Len() != 4 {
- vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an float4: %d", vr.Len())))
- return 0
- }
-
- i := vr.ReadInt32()
- return math.Float32frombits(uint32(i))
-}
-
-func encodeFloat32(w *WriteBuf, oid Oid, value float32) error {
- switch oid {
- case Float4Oid:
- w.WriteInt32(4)
- w.WriteInt32(int32(math.Float32bits(value)))
- case Float8Oid:
- w.WriteInt32(8)
- w.WriteInt64(int64(math.Float64bits(float64(value))))
- default:
- return fmt.Errorf("cannot encode %s into oid %v", "float32", oid)
- }
-
- return nil
-}
-
-func decodeFloat8(vr *ValueReader) float64 {
- if vr.Len() == -1 {
- vr.Fatal(ProtocolError("Cannot decode null into float64"))
- return 0
- }
-
- if vr.Type().DataType != Float8Oid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into float64", vr.Type().DataType)))
- return 0
- }
-
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return 0
- }
-
- if vr.Len() != 8 {
- vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an float8: %d", vr.Len())))
- return 0
- }
-
- i := vr.ReadInt64()
- return math.Float64frombits(uint64(i))
-}
-
-func encodeFloat64(w *WriteBuf, oid Oid, value float64) error {
- switch oid {
- case Float8Oid:
- w.WriteInt32(8)
- w.WriteInt64(int64(math.Float64bits(value)))
- default:
- return fmt.Errorf("cannot encode %s into oid %v", "float64", oid)
- }
-
- return nil
-}
-
-func decodeText(vr *ValueReader) string {
- if vr.Len() == -1 {
- vr.Fatal(ProtocolError("Cannot decode null into string"))
- return ""
- }
-
- return vr.ReadString(vr.Len())
-}
-
-func encodeString(w *WriteBuf, oid Oid, value string) error {
- w.WriteInt32(int32(len(value)))
- w.WriteBytes([]byte(value))
- return nil
-}
-
-func decodeBytea(vr *ValueReader) []byte {
- if vr.Len() == -1 {
- return nil
- }
-
- if vr.Type().DataType != ByteaOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into []byte", vr.Type().DataType)))
- return nil
- }
-
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return nil
- }
-
- return vr.ReadBytes(vr.Len())
-}
-
-func encodeByteSlice(w *WriteBuf, oid Oid, value []byte) error {
- w.WriteInt32(int32(len(value)))
- w.WriteBytes(value)
-
- return nil
-}
-
-func decodeJSON(vr *ValueReader, d interface{}) error {
- if vr.Len() == -1 {
- return nil
- }
-
- if vr.Type().DataType != JsonOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into json", vr.Type().DataType)))
- }
-
- bytes := vr.ReadBytes(vr.Len())
- err := json.Unmarshal(bytes, d)
- if err != nil {
- vr.Fatal(err)
- }
- return err
-}
-
-func encodeJSON(w *WriteBuf, oid Oid, value interface{}) error {
- if oid != JsonOid {
- return fmt.Errorf("cannot encode JSON into oid %v", oid)
- }
-
- s, err := json.Marshal(value)
- if err != nil {
- return fmt.Errorf("Failed to encode json from type: %T", value)
- }
-
- w.WriteInt32(int32(len(s)))
- w.WriteBytes(s)
-
- return nil
-}
-
-func decodeJSONB(vr *ValueReader, d interface{}) error {
- if vr.Len() == -1 {
- return nil
- }
-
- if vr.Type().DataType != JsonbOid {
- err := ProtocolError(fmt.Sprintf("Cannot decode oid %v into jsonb", vr.Type().DataType))
- vr.Fatal(err)
- return err
- }
-
- bytes := vr.ReadBytes(vr.Len())
- if vr.Type().FormatCode == BinaryFormatCode {
- if bytes[0] != 1 {
- err := ProtocolError(fmt.Sprintf("Unknown jsonb format byte: %x", bytes[0]))
- vr.Fatal(err)
- return err
- }
- bytes = bytes[1:]
- }
-
- err := json.Unmarshal(bytes, d)
- if err != nil {
- vr.Fatal(err)
- }
- return err
-}
-
-func encodeJSONB(w *WriteBuf, oid Oid, value interface{}) error {
- if oid != JsonbOid {
- return fmt.Errorf("cannot encode JSON into oid %v", oid)
- }
-
- s, err := json.Marshal(value)
- if err != nil {
- return fmt.Errorf("Failed to encode json from type: %T", value)
- }
-
- w.WriteInt32(int32(len(s) + 1))
- w.WriteByte(1) // JSONB format header
- w.WriteBytes(s)
-
- return nil
-}
-
-func decodeDate(vr *ValueReader) time.Time {
- var zeroTime time.Time
-
- if vr.Len() == -1 {
- vr.Fatal(ProtocolError("Cannot decode null into time.Time"))
- return zeroTime
- }
-
- if vr.Type().DataType != DateOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into time.Time", vr.Type().DataType)))
- return zeroTime
- }
-
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return zeroTime
- }
-
- if vr.Len() != 4 {
- vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an date: %d", vr.Len())))
- }
- dayOffset := vr.ReadInt32()
- return time.Date(2000, 1, int(1+dayOffset), 0, 0, 0, 0, time.Local)
-}
-
-func encodeTime(w *WriteBuf, oid Oid, value time.Time) error {
- switch oid {
- case DateOid:
- tUnix := time.Date(value.Year(), value.Month(), value.Day(), 0, 0, 0, 0, time.UTC).Unix()
- dateEpoch := time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC).Unix()
-
- secSinceDateEpoch := tUnix - dateEpoch
- daysSinceDateEpoch := secSinceDateEpoch / 86400
-
- w.WriteInt32(4)
- w.WriteInt32(int32(daysSinceDateEpoch))
-
- return nil
- case TimestampTzOid, TimestampOid:
- microsecSinceUnixEpoch := value.Unix()*1000000 + int64(value.Nanosecond())/1000
- microsecSinceY2K := microsecSinceUnixEpoch - microsecFromUnixEpochToY2K
-
- w.WriteInt32(8)
- w.WriteInt64(microsecSinceY2K)
-
- return nil
- default:
- return fmt.Errorf("cannot encode %s into oid %v", "time.Time", oid)
- }
-}
-
-const microsecFromUnixEpochToY2K = 946684800 * 1000000
-
-func decodeTimestampTz(vr *ValueReader) time.Time {
- var zeroTime time.Time
-
- if vr.Len() == -1 {
- vr.Fatal(ProtocolError("Cannot decode null into time.Time"))
- return zeroTime
- }
-
- if vr.Type().DataType != TimestampTzOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into time.Time", vr.Type().DataType)))
- return zeroTime
- }
-
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return zeroTime
- }
-
- if vr.Len() != 8 {
- vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an timestamptz: %d", vr.Len())))
- return zeroTime
- }
-
- microsecSinceY2K := vr.ReadInt64()
- microsecSinceUnixEpoch := microsecFromUnixEpochToY2K + microsecSinceY2K
- return time.Unix(microsecSinceUnixEpoch/1000000, (microsecSinceUnixEpoch%1000000)*1000)
-}
-
-func decodeTimestamp(vr *ValueReader) time.Time {
- var zeroTime time.Time
-
- if vr.Len() == -1 {
- vr.Fatal(ProtocolError("Cannot decode null into timestamp"))
- return zeroTime
- }
-
- if vr.Type().DataType != TimestampOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into time.Time", vr.Type().DataType)))
- return zeroTime
- }
-
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return zeroTime
- }
-
- if vr.Len() != 8 {
- vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an timestamp: %d", vr.Len())))
- return zeroTime
- }
-
- microsecSinceY2K := vr.ReadInt64()
- microsecSinceUnixEpoch := microsecFromUnixEpochToY2K + microsecSinceY2K
- return time.Unix(microsecSinceUnixEpoch/1000000, (microsecSinceUnixEpoch%1000000)*1000)
-}
-
-func decodeInet(vr *ValueReader) net.IPNet {
- var zero net.IPNet
-
- if vr.Len() == -1 {
- vr.Fatal(ProtocolError("Cannot decode null into net.IPNet"))
- return zero
- }
-
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return zero
- }
-
- pgType := vr.Type()
- if pgType.DataType != InetOid && pgType.DataType != CidrOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into %s", pgType.DataType, pgType.Name)))
- return zero
- }
- if vr.Len() != 8 && vr.Len() != 20 {
- vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for a %s: %d", pgType.Name, vr.Len())))
- return zero
- }
-
- vr.ReadByte() // ignore family
- bits := vr.ReadByte()
- vr.ReadByte() // ignore is_cidr
- addressLength := vr.ReadByte()
-
- var ipnet net.IPNet
- ipnet.IP = vr.ReadBytes(int32(addressLength))
- ipnet.Mask = net.CIDRMask(int(bits), int(addressLength)*8)
-
- return ipnet
-}
-
-func encodeIPNet(w *WriteBuf, oid Oid, value net.IPNet) error {
- if oid != InetOid && oid != CidrOid {
- return fmt.Errorf("cannot encode %s into oid %v", "net.IPNet", oid)
- }
-
- var size int32
- var family byte
- switch len(value.IP) {
- case net.IPv4len:
- size = 8
- family = *w.conn.pgsqlAfInet
- case net.IPv6len:
- size = 20
- family = *w.conn.pgsqlAfInet6
- default:
- return fmt.Errorf("Unexpected IP length: %v", len(value.IP))
- }
-
- w.WriteInt32(size)
- w.WriteByte(family)
- ones, _ := value.Mask.Size()
- w.WriteByte(byte(ones))
- w.WriteByte(0) // is_cidr is ignored on server
- w.WriteByte(byte(len(value.IP)))
- w.WriteBytes(value.IP)
-
- return nil
-}
-
-func encodeIP(w *WriteBuf, oid Oid, value net.IP) error {
- if oid != InetOid && oid != CidrOid {
- return fmt.Errorf("cannot encode %s into oid %v", "net.IP", oid)
- }
-
- var ipnet net.IPNet
- ipnet.IP = value
- bitCount := len(value) * 8
- ipnet.Mask = net.CIDRMask(bitCount, bitCount)
- return encodeIPNet(w, oid, ipnet)
-}
-
-func decodeRecord(vr *ValueReader) []interface{} {
- if vr.Len() == -1 {
- return nil
- }
-
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return nil
- }
-
- if vr.Type().DataType != RecordOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into []interface{}", vr.Type().DataType)))
- return nil
- }
-
- valueCount := vr.ReadInt32()
- record := make([]interface{}, 0, int(valueCount))
-
- for i := int32(0); i < valueCount; i++ {
- fd := FieldDescription{FormatCode: BinaryFormatCode}
- fieldVR := ValueReader{mr: vr.mr, fd: &fd}
- fd.DataType = vr.ReadOid()
- fieldVR.valueBytesRemaining = vr.ReadInt32()
- vr.valueBytesRemaining -= fieldVR.valueBytesRemaining
-
- switch fd.DataType {
- case BoolOid:
- record = append(record, decodeBool(&fieldVR))
- case ByteaOid:
- record = append(record, decodeBytea(&fieldVR))
- case Int8Oid:
- record = append(record, decodeInt8(&fieldVR))
- case Int2Oid:
- record = append(record, decodeInt2(&fieldVR))
- case Int4Oid:
- record = append(record, decodeInt4(&fieldVR))
- case OidOid:
- record = append(record, decodeOid(&fieldVR))
- case Float4Oid:
- record = append(record, decodeFloat4(&fieldVR))
- case Float8Oid:
- record = append(record, decodeFloat8(&fieldVR))
- case DateOid:
- record = append(record, decodeDate(&fieldVR))
- case TimestampTzOid:
- record = append(record, decodeTimestampTz(&fieldVR))
- case TimestampOid:
- record = append(record, decodeTimestamp(&fieldVR))
- case InetOid, CidrOid:
- record = append(record, decodeInet(&fieldVR))
- case TextOid, VarcharOid, UnknownOid:
- record = append(record, decodeText(&fieldVR))
- default:
- vr.Fatal(fmt.Errorf("decodeRecord cannot decode oid %d", fd.DataType))
- return nil
- }
-
- // Consume any remaining data
- if fieldVR.Len() > 0 {
- fieldVR.ReadBytes(fieldVR.Len())
- }
-
- if fieldVR.Err() != nil {
- vr.Fatal(fieldVR.Err())
- return nil
- }
- }
-
- return record
-}
-
-func decode1dArrayHeader(vr *ValueReader) (length int32, err error) {
- numDims := vr.ReadInt32()
- if numDims > 1 {
- return 0, ProtocolError(fmt.Sprintf("Expected array to have 0 or 1 dimension, but it had %v", numDims))
- }
-
- vr.ReadInt32() // 0 if no nulls / 1 if there is one or more nulls -- but we don't care
- vr.ReadInt32() // element oid
-
- if numDims == 0 {
- return 0, nil
- }
-
- length = vr.ReadInt32()
-
- idxFirstElem := vr.ReadInt32()
- if idxFirstElem != 1 {
- return 0, ProtocolError(fmt.Sprintf("Expected array's first element to start a index 1, but it is %d", idxFirstElem))
- }
-
- return length, nil
-}
-
-func decodeBoolArray(vr *ValueReader) []bool {
- if vr.Len() == -1 {
- return nil
- }
-
- if vr.Type().DataType != BoolArrayOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into []bool", vr.Type().DataType)))
- return nil
- }
-
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return nil
- }
-
- numElems, err := decode1dArrayHeader(vr)
- if err != nil {
- vr.Fatal(err)
- return nil
- }
-
- a := make([]bool, int(numElems))
- for i := 0; i < len(a); i++ {
- elSize := vr.ReadInt32()
- switch elSize {
- case 1:
- if vr.ReadByte() == 1 {
- a[i] = true
- }
- case -1:
- vr.Fatal(ProtocolError("Cannot decode null element"))
- return nil
- default:
- vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an bool element: %d", elSize)))
- return nil
- }
- }
-
- return a
-}
-
-func encodeBoolSlice(w *WriteBuf, oid Oid, slice []bool) error {
- if oid != BoolArrayOid {
- return fmt.Errorf("cannot encode Go %s into oid %d", "[]bool", oid)
- }
-
- encodeArrayHeader(w, BoolOid, len(slice), 5)
- for _, v := range slice {
- w.WriteInt32(1)
- var b byte
- if v {
- b = 1
- }
- w.WriteByte(b)
- }
-
- return nil
-}
-
-func decodeByteaArray(vr *ValueReader) [][]byte {
- if vr.Len() == -1 {
- return nil
- }
-
- if vr.Type().DataType != ByteaArrayOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into [][]byte", vr.Type().DataType)))
- return nil
- }
-
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return nil
- }
-
- numElems, err := decode1dArrayHeader(vr)
- if err != nil {
- vr.Fatal(err)
- return nil
- }
-
- a := make([][]byte, int(numElems))
- for i := 0; i < len(a); i++ {
- elSize := vr.ReadInt32()
- switch elSize {
- case -1:
- vr.Fatal(ProtocolError("Cannot decode null element"))
- return nil
- default:
- a[i] = vr.ReadBytes(elSize)
- }
- }
-
- return a
-}
-
-func encodeByteSliceSlice(w *WriteBuf, oid Oid, value [][]byte) error {
- if oid != ByteaArrayOid {
- return fmt.Errorf("cannot encode Go %s into oid %d", "[][]byte", oid)
- }
-
- size := 20 // array header size
- for _, el := range value {
- size += 4 + len(el)
- }
-
- w.WriteInt32(int32(size))
-
- w.WriteInt32(1) // number of dimensions
- w.WriteInt32(0) // no nulls
- w.WriteInt32(int32(ByteaOid)) // type of elements
- w.WriteInt32(int32(len(value))) // number of elements
- w.WriteInt32(1) // index of first element
-
- for _, el := range value {
- encodeByteSlice(w, ByteaOid, el)
- }
-
- return nil
-}
-
-func decodeInt2Array(vr *ValueReader) []int16 {
- if vr.Len() == -1 {
- return nil
- }
-
- if vr.Type().DataType != Int2ArrayOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into []int16", vr.Type().DataType)))
- return nil
- }
-
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return nil
- }
-
- numElems, err := decode1dArrayHeader(vr)
- if err != nil {
- vr.Fatal(err)
- return nil
- }
-
- a := make([]int16, int(numElems))
- for i := 0; i < len(a); i++ {
- elSize := vr.ReadInt32()
- switch elSize {
- case 2:
- a[i] = vr.ReadInt16()
- case -1:
- vr.Fatal(ProtocolError("Cannot decode null element"))
- return nil
- default:
- vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an int2 element: %d", elSize)))
- return nil
- }
- }
-
- return a
-}
-
-func decodeInt2ArrayToUInt(vr *ValueReader) []uint16 {
- if vr.Len() == -1 {
- return nil
- }
-
- if vr.Type().DataType != Int2ArrayOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into []uint16", vr.Type().DataType)))
- return nil
- }
-
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return nil
- }
-
- numElems, err := decode1dArrayHeader(vr)
- if err != nil {
- vr.Fatal(err)
- return nil
- }
-
- a := make([]uint16, int(numElems))
- for i := 0; i < len(a); i++ {
- elSize := vr.ReadInt32()
- switch elSize {
- case 2:
- tmp := vr.ReadInt16()
- if tmp < 0 {
- vr.Fatal(ProtocolError(fmt.Sprintf("%d is less than zero for uint16", tmp)))
- return nil
- }
- a[i] = uint16(tmp)
- case -1:
- vr.Fatal(ProtocolError("Cannot decode null element"))
- return nil
- default:
- vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an int2 element: %d", elSize)))
- return nil
- }
- }
-
- return a
-}
-
-func encodeInt16Slice(w *WriteBuf, oid Oid, slice []int16) error {
- if oid != Int2ArrayOid {
- return fmt.Errorf("cannot encode Go %s into oid %d", "[]int16", oid)
- }
-
- encodeArrayHeader(w, Int2Oid, len(slice), 6)
- for _, v := range slice {
- w.WriteInt32(2)
- w.WriteInt16(v)
- }
-
- return nil
-}
-
-func encodeUInt16Slice(w *WriteBuf, oid Oid, slice []uint16) error {
- if oid != Int2ArrayOid {
- return fmt.Errorf("cannot encode Go %s into oid %d", "[]uint16", oid)
- }
-
- encodeArrayHeader(w, Int2Oid, len(slice), 6)
- for _, v := range slice {
- if v <= math.MaxInt16 {
- w.WriteInt32(2)
- w.WriteInt16(int16(v))
- } else {
- return fmt.Errorf("%d is greater than max smallint %d", v, math.MaxInt16)
- }
- }
-
- return nil
-}
-
-func decodeInt4Array(vr *ValueReader) []int32 {
- if vr.Len() == -1 {
- return nil
- }
-
- if vr.Type().DataType != Int4ArrayOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into []int32", vr.Type().DataType)))
- return nil
- }
-
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return nil
- }
-
- numElems, err := decode1dArrayHeader(vr)
- if err != nil {
- vr.Fatal(err)
- return nil
- }
-
- a := make([]int32, int(numElems))
- for i := 0; i < len(a); i++ {
- elSize := vr.ReadInt32()
- switch elSize {
- case 4:
- a[i] = vr.ReadInt32()
- case -1:
- vr.Fatal(ProtocolError("Cannot decode null element"))
- return nil
- default:
- vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an int4 element: %d", elSize)))
- return nil
- }
- }
-
- return a
-}
-
-func decodeInt4ArrayToUInt(vr *ValueReader) []uint32 {
- if vr.Len() == -1 {
- return nil
- }
-
- if vr.Type().DataType != Int4ArrayOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into []uint32", vr.Type().DataType)))
- return nil
- }
-
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return nil
- }
-
- numElems, err := decode1dArrayHeader(vr)
- if err != nil {
- vr.Fatal(err)
- return nil
- }
-
- a := make([]uint32, int(numElems))
- for i := 0; i < len(a); i++ {
- elSize := vr.ReadInt32()
- switch elSize {
- case 4:
- tmp := vr.ReadInt32()
- if tmp < 0 {
- vr.Fatal(ProtocolError(fmt.Sprintf("%d is less than zero for uint32", tmp)))
- return nil
- }
- a[i] = uint32(tmp)
- case -1:
- vr.Fatal(ProtocolError("Cannot decode null element"))
- return nil
- default:
- vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an int4 element: %d", elSize)))
- return nil
- }
- }
-
- return a
-}
-
-func encodeInt32Slice(w *WriteBuf, oid Oid, slice []int32) error {
- if oid != Int4ArrayOid {
- return fmt.Errorf("cannot encode Go %s into oid %d", "[]int32", oid)
- }
-
- encodeArrayHeader(w, Int4Oid, len(slice), 8)
- for _, v := range slice {
- w.WriteInt32(4)
- w.WriteInt32(v)
- }
-
- return nil
-}
-
-func encodeUInt32Slice(w *WriteBuf, oid Oid, slice []uint32) error {
- if oid != Int4ArrayOid {
- return fmt.Errorf("cannot encode Go %s into oid %d", "[]uint32", oid)
- }
-
- encodeArrayHeader(w, Int4Oid, len(slice), 8)
- for _, v := range slice {
- if v <= math.MaxInt32 {
- w.WriteInt32(4)
- w.WriteInt32(int32(v))
- } else {
- return fmt.Errorf("%d is greater than max integer %d", v, math.MaxInt32)
- }
- }
-
- return nil
-}
-
-func decodeInt8Array(vr *ValueReader) []int64 {
- if vr.Len() == -1 {
- return nil
- }
-
- if vr.Type().DataType != Int8ArrayOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into []int64", vr.Type().DataType)))
- return nil
- }
-
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return nil
- }
-
- numElems, err := decode1dArrayHeader(vr)
- if err != nil {
- vr.Fatal(err)
- return nil
- }
-
- a := make([]int64, int(numElems))
- for i := 0; i < len(a); i++ {
- elSize := vr.ReadInt32()
- switch elSize {
- case 8:
- a[i] = vr.ReadInt64()
- case -1:
- vr.Fatal(ProtocolError("Cannot decode null element"))
- return nil
- default:
- vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an int8 element: %d", elSize)))
- return nil
- }
- }
-
- return a
-}
-
-func decodeInt8ArrayToUInt(vr *ValueReader) []uint64 {
- if vr.Len() == -1 {
- return nil
- }
-
- if vr.Type().DataType != Int8ArrayOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into []uint64", vr.Type().DataType)))
- return nil
- }
-
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return nil
- }
-
- numElems, err := decode1dArrayHeader(vr)
- if err != nil {
- vr.Fatal(err)
- return nil
- }
-
- a := make([]uint64, int(numElems))
- for i := 0; i < len(a); i++ {
- elSize := vr.ReadInt32()
- switch elSize {
- case 8:
- tmp := vr.ReadInt64()
- if tmp < 0 {
- vr.Fatal(ProtocolError(fmt.Sprintf("%d is less than zero for uint64", tmp)))
- return nil
- }
- a[i] = uint64(tmp)
- case -1:
- vr.Fatal(ProtocolError("Cannot decode null element"))
- return nil
- default:
- vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an int8 element: %d", elSize)))
- return nil
- }
- }
-
- return a
-}
-
-func encodeInt64Slice(w *WriteBuf, oid Oid, slice []int64) error {
- if oid != Int8ArrayOid {
- return fmt.Errorf("cannot encode Go %s into oid %d", "[]int64", oid)
- }
-
- encodeArrayHeader(w, Int8Oid, len(slice), 12)
- for _, v := range slice {
- w.WriteInt32(8)
- w.WriteInt64(v)
- }
-
- return nil
-}
-
-func encodeUInt64Slice(w *WriteBuf, oid Oid, slice []uint64) error {
- if oid != Int8ArrayOid {
- return fmt.Errorf("cannot encode Go %s into oid %d", "[]uint64", oid)
- }
-
- encodeArrayHeader(w, Int8Oid, len(slice), 12)
- for _, v := range slice {
- if v <= math.MaxInt64 {
- w.WriteInt32(8)
- w.WriteInt64(int64(v))
- } else {
- return fmt.Errorf("%d is greater than max bigint %d", v, int64(math.MaxInt64))
- }
- }
-
- return nil
-}
-
-func decodeFloat4Array(vr *ValueReader) []float32 {
- if vr.Len() == -1 {
- return nil
- }
-
- if vr.Type().DataType != Float4ArrayOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into []float32", vr.Type().DataType)))
- return nil
- }
-
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return nil
- }
-
- numElems, err := decode1dArrayHeader(vr)
- if err != nil {
- vr.Fatal(err)
- return nil
- }
-
- a := make([]float32, int(numElems))
- for i := 0; i < len(a); i++ {
- elSize := vr.ReadInt32()
- switch elSize {
- case 4:
- n := vr.ReadInt32()
- a[i] = math.Float32frombits(uint32(n))
- case -1:
- vr.Fatal(ProtocolError("Cannot decode null element"))
- return nil
- default:
- vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an float4 element: %d", elSize)))
- return nil
+ return nil, err
}
- }
-
- return a
-}
-
-func encodeFloat32Slice(w *WriteBuf, oid Oid, slice []float32) error {
- if oid != Float4ArrayOid {
- return fmt.Errorf("cannot encode Go %s into oid %d", "[]float32", oid)
- }
-
- encodeArrayHeader(w, Float4Oid, len(slice), 8)
- for _, v := range slice {
- w.WriteInt32(4)
- w.WriteInt32(int32(math.Float32bits(v)))
- }
-
- return nil
-}
-
-func decodeFloat8Array(vr *ValueReader) []float64 {
- if vr.Len() == -1 {
- return nil
- }
-
- if vr.Type().DataType != Float8ArrayOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into []float64", vr.Type().DataType)))
- return nil
- }
-
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return nil
- }
-
- numElems, err := decode1dArrayHeader(vr)
- if err != nil {
- vr.Fatal(err)
- return nil
- }
-
- a := make([]float64, int(numElems))
- for i := 0; i < len(a); i++ {
- elSize := vr.ReadInt32()
- switch elSize {
- case 8:
- n := vr.ReadInt64()
- a[i] = math.Float64frombits(uint64(n))
- case -1:
- vr.Fatal(ProtocolError("Cannot decode null element"))
- return nil
- default:
- vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an float4 element: %d", elSize)))
- return nil
+ if argBuf != nil {
+ buf = argBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
}
+ return buf, nil
+ case string:
+ buf = pgio.AppendInt32(buf, int32(len(arg)))
+ buf = append(buf, arg...)
+ return buf, nil
}
- return a
-}
-
-func encodeFloat64Slice(w *WriteBuf, oid Oid, slice []float64) error {
- if oid != Float8ArrayOid {
- return fmt.Errorf("cannot encode Go %s into oid %d", "[]float64", oid)
- }
-
- encodeArrayHeader(w, Float8Oid, len(slice), 12)
- for _, v := range slice {
- w.WriteInt32(8)
- w.WriteInt64(int64(math.Float64bits(v)))
- }
-
- return nil
-}
-
-func decodeTextArray(vr *ValueReader) []string {
- if vr.Len() == -1 {
- return nil
- }
-
- if vr.Type().DataType != TextArrayOid && vr.Type().DataType != VarcharArrayOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into []string", vr.Type().DataType)))
- return nil
- }
-
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return nil
- }
-
- numElems, err := decode1dArrayHeader(vr)
- if err != nil {
- vr.Fatal(err)
- return nil
- }
+ refVal := reflect.ValueOf(arg)
- a := make([]string, int(numElems))
- for i := 0; i < len(a); i++ {
- elSize := vr.ReadInt32()
- if elSize == -1 {
- vr.Fatal(ProtocolError("Cannot decode null element"))
- return nil
+ if refVal.Kind() == reflect.Ptr {
+ if refVal.IsNil() {
+ return pgio.AppendInt32(buf, -1), nil
}
-
- a[i] = vr.ReadString(elSize)
+ arg = refVal.Elem().Interface()
+ return encodePreparedStatementArgument(ci, buf, oid, arg)
}
- return a
-}
-
-// escapeAclItem escapes an AclItem before it is added to
-// its aclitem[] string representation. The PostgreSQL aclitem
-// datatype itself can need escapes because it follows the
-// formatting rules of SQL identifiers. Think of this function
-// as escaping the escapes, so that PostgreSQL's array parser
-// will do the right thing.
-func escapeAclItem(acl string) (string, error) {
- var escapedAclItem bytes.Buffer
- reader := strings.NewReader(acl)
- for {
- rn, _, err := reader.ReadRune()
+ if dt, ok := ci.DataTypeForOID(oid); ok {
+ value := dt.Value
+ err := value.Set(arg)
if err != nil {
- if err == io.EOF {
- // Here, EOF is an expected end state, not an error.
- return escapedAclItem.String(), nil
+ {
+ if arg, ok := arg.(driver.Valuer); ok {
+ v, err := callValuerValue(arg)
+ if err != nil {
+ return nil, err
+ }
+ return encodePreparedStatementArgument(ci, buf, oid, v)
+ }
}
- // This error was not expected
- return "", err
- }
- if needsEscape(rn) {
- escapedAclItem.WriteRune('\\')
- }
- escapedAclItem.WriteRune(rn)
- }
-}
-
-// needsEscape determines whether or not a rune needs escaping
-// before being placed in the textual representation of an
-// aclitem[] array.
-func needsEscape(rn rune) bool {
- return rn == '\\' || rn == ',' || rn == '"' || rn == '}'
-}
-
-// encodeAclItemSlice encodes a slice of AclItems in
-// their textual represention for PostgreSQL.
-func encodeAclItemSlice(w *WriteBuf, oid Oid, aclitems []AclItem) error {
- strs := make([]string, len(aclitems))
- var escapedAclItem string
- var err error
- for i := range strs {
- escapedAclItem, err = escapeAclItem(string(aclitems[i]))
- if err != nil {
- return err
- }
- strs[i] = string(escapedAclItem)
- }
-
- var buf bytes.Buffer
- buf.WriteRune('{')
- buf.WriteString(strings.Join(strs, ","))
- buf.WriteRune('}')
- str := buf.String()
- w.WriteInt32(int32(len(str)))
- w.WriteBytes([]byte(str))
- return nil
-}
-// parseAclItemArray parses the textual representation
-// of the aclitem[] type. The textual representation is chosen because
-// Pg's src/backend/utils/adt/acl.c has only in/out (text) not send/recv (bin).
-// See https://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO
-// for formatting notes.
-func parseAclItemArray(arr string) ([]AclItem, error) {
- reader := strings.NewReader(arr)
- // Difficult to guess a performant initial capacity for a slice of
- // aclitems, but let's go with 5.
- aclItems := make([]AclItem, 0, 5)
- // A single value
- aclItem := AclItem("")
- for {
- // Grab the first/next/last rune to see if we are dealing with a
- // quoted value, an unquoted value, or the end of the string.
- rn, _, err := reader.ReadRune()
- if err != nil {
- if err == io.EOF {
- // Here, EOF is an expected end state, not an error.
- return aclItems, nil
- }
- // This error was not expected
return nil, err
}
- if rn == '"' {
- // Discard the opening quote of the quoted value.
- aclItem, err = parseQuotedAclItem(reader)
- } else {
- // We have just read the first rune of an unquoted (bare) value;
- // put it back so that ParseBareValue can read it.
- err := reader.UnreadRune()
- if err != nil {
- return nil, err
- }
- aclItem, err = parseBareAclItem(reader)
- }
-
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+ argBuf, err := value.(pgtype.BinaryEncoder).EncodeBinary(ci, buf)
if err != nil {
- if err == io.EOF {
- // Here, EOF is an expected end state, not an error..
- aclItems = append(aclItems, aclItem)
- return aclItems, nil
- }
- // This error was not expected.
return nil, err
}
- aclItems = append(aclItems, aclItem)
- }
-}
-
-// parseBareAclItem parses a bare (unquoted) aclitem from reader
-func parseBareAclItem(reader *strings.Reader) (AclItem, error) {
- var aclItem bytes.Buffer
- for {
- rn, _, err := reader.ReadRune()
- if err != nil {
- // Return the read value in case the error is a harmless io.EOF.
- // (io.EOF marks the end of a bare aclitem at the end of a string)
- return AclItem(aclItem.String()), err
- }
- if rn == ',' {
- // A comma marks the end of a bare aclitem.
- return AclItem(aclItem.String()), nil
- } else {
- aclItem.WriteRune(rn)
- }
- }
-}
-
-// parseQuotedAclItem parses an aclitem which is in double quotes from reader
-func parseQuotedAclItem(reader *strings.Reader) (AclItem, error) {
- var aclItem bytes.Buffer
- for {
- rn, escaped, err := readPossiblyEscapedRune(reader)
- if err != nil {
- if err == io.EOF {
- // Even when it is the last value, the final rune of
- // a quoted aclitem should be the final closing quote, not io.EOF.
- return AclItem(""), fmt.Errorf("unexpected end of quoted value")
- }
- // Return the read aclitem in case the error is a harmless io.EOF,
- // which will be determined by the caller.
- return AclItem(aclItem.String()), err
- }
- if !escaped && rn == '"' {
- // An unescaped double quote marks the end of a quoted value.
- // The next rune should either be a comma or the end of the string.
- rn, _, err := reader.ReadRune()
- if err != nil {
- // Return the read value in case the error is a harmless io.EOF,
- // which will be determined by the caller.
- return AclItem(aclItem.String()), err
- }
- if rn != ',' {
- return AclItem(""), fmt.Errorf("unexpected rune after quoted value")
- }
- return AclItem(aclItem.String()), nil
+ if argBuf != nil {
+ buf = argBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
}
- aclItem.WriteRune(rn)
+ return buf, nil
}
-}
-// Returns the next rune from r, unless it is a backslash;
-// in that case, it returns the rune after the backslash. The second
-// return value tells us whether or not the rune was
-// preceeded by a backslash (escaped).
-func readPossiblyEscapedRune(reader *strings.Reader) (rune, bool, error) {
- rn, _, err := reader.ReadRune()
- if err != nil {
- return 0, false, err
- }
- if rn == '\\' {
- // Discard the backslash and read the next rune.
- rn, _, err = reader.ReadRune()
+ if arg, ok := arg.(driver.Valuer); ok {
+ v, err := callValuerValue(arg)
if err != nil {
- return 0, false, err
- }
- return rn, true, nil
- }
- return rn, false, nil
-}
-
-func decodeAclItemArray(vr *ValueReader) []AclItem {
- if vr.Len() == -1 {
- vr.Fatal(ProtocolError("Cannot decode null into []AclItem"))
- return nil
- }
-
- str := vr.ReadString(vr.Len())
-
- // Short-circuit empty array.
- if str == "{}" {
- return []AclItem{}
- }
-
- // Remove the '{' at the front and the '}' at the end,
- // so that parseAclItemArray doesn't have to deal with them.
- str = str[1 : len(str)-1]
- aclItems, err := parseAclItemArray(str)
- if err != nil {
- vr.Fatal(ProtocolError(err.Error()))
- return nil
- }
- return aclItems
-}
-
-func encodeStringSlice(w *WriteBuf, oid Oid, slice []string) error {
- var elOid Oid
- switch oid {
- case VarcharArrayOid:
- elOid = VarcharOid
- case TextArrayOid:
- elOid = TextOid
- default:
- return fmt.Errorf("cannot encode Go %s into oid %d", "[]string", oid)
- }
-
- var totalStringSize int
- for _, v := range slice {
- totalStringSize += len(v)
- }
-
- size := 20 + len(slice)*4 + totalStringSize
- w.WriteInt32(int32(size))
-
- w.WriteInt32(1) // number of dimensions
- w.WriteInt32(0) // no nulls
- w.WriteInt32(int32(elOid)) // type of elements
- w.WriteInt32(int32(len(slice))) // number of elements
- w.WriteInt32(1) // index of first element
-
- for _, v := range slice {
- w.WriteInt32(int32(len(v)))
- w.WriteBytes([]byte(v))
- }
-
- return nil
-}
-
-func decodeTimestampArray(vr *ValueReader) []time.Time {
- if vr.Len() == -1 {
- return nil
- }
-
- if vr.Type().DataType != TimestampArrayOid && vr.Type().DataType != TimestampTzArrayOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into []time.Time", vr.Type().DataType)))
- return nil
- }
-
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return nil
- }
-
- numElems, err := decode1dArrayHeader(vr)
- if err != nil {
- vr.Fatal(err)
- return nil
- }
-
- a := make([]time.Time, int(numElems))
- for i := 0; i < len(a); i++ {
- elSize := vr.ReadInt32()
- switch elSize {
- case 8:
- microsecSinceY2K := vr.ReadInt64()
- microsecSinceUnixEpoch := microsecFromUnixEpochToY2K + microsecSinceY2K
- a[i] = time.Unix(microsecSinceUnixEpoch/1000000, (microsecSinceUnixEpoch%1000000)*1000)
- case -1:
- vr.Fatal(ProtocolError("Cannot decode null element"))
- return nil
- default:
- vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an time.Time element: %d", elSize)))
- return nil
+ return nil, err
}
+ return encodePreparedStatementArgument(ci, buf, oid, v)
}
- return a
-}
-
-func encodeTimeSlice(w *WriteBuf, oid Oid, slice []time.Time) error {
- var elOid Oid
- switch oid {
- case TimestampArrayOid:
- elOid = TimestampOid
- case TimestampTzArrayOid:
- elOid = TimestampTzOid
- default:
- return fmt.Errorf("cannot encode Go %s into oid %d", "[]time.Time", oid)
- }
-
- encodeArrayHeader(w, int(elOid), len(slice), 12)
- for _, t := range slice {
- w.WriteInt32(8)
- microsecSinceUnixEpoch := t.Unix()*1000000 + int64(t.Nanosecond())/1000
- microsecSinceY2K := microsecSinceUnixEpoch - microsecFromUnixEpochToY2K
- w.WriteInt64(microsecSinceY2K)
+ if strippedArg, ok := stripNamedType(&refVal); ok {
+ return encodePreparedStatementArgument(ci, buf, oid, strippedArg)
}
-
- return nil
+ return nil, SerializationError(fmt.Sprintf("Cannot encode %T into oid %v - %T must implement Encoder or be converted to a string", arg, oid, arg))
}
-func decodeInetArray(vr *ValueReader) []net.IPNet {
- if vr.Len() == -1 {
- return nil
- }
-
- if vr.Type().DataType != InetArrayOid && vr.Type().DataType != CidrArrayOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into []net.IP", vr.Type().DataType)))
- return nil
+// chooseParameterFormatCode determines the correct format code for an
+// argument to a prepared statement. It defaults to TextFormatCode if no
+// determination can be made.
+func chooseParameterFormatCode(ci *pgtype.ConnInfo, oid pgtype.OID, arg interface{}) int16 {
+ switch arg.(type) {
+ case pgtype.BinaryEncoder:
+ return BinaryFormatCode
+ case string, *string, pgtype.TextEncoder:
+ return TextFormatCode
}
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return nil
- }
-
- numElems, err := decode1dArrayHeader(vr)
- if err != nil {
- vr.Fatal(err)
- return nil
- }
+ if dt, ok := ci.DataTypeForOID(oid); ok {
+ if _, ok := dt.Value.(pgtype.BinaryEncoder); ok {
+ if arg, ok := arg.(driver.Valuer); ok {
+ if err := dt.Value.Set(arg); err != nil {
+ if value, err := callValuerValue(arg); err == nil {
+ if _, ok := value.(string); ok {
+ return TextFormatCode
+ }
+ }
+ }
+ }
- a := make([]net.IPNet, int(numElems))
- for i := 0; i < len(a); i++ {
- elSize := vr.ReadInt32()
- if elSize == -1 {
- vr.Fatal(ProtocolError("Cannot decode null element"))
- return nil
+ return BinaryFormatCode
}
-
- vr.ReadByte() // ignore family
- bits := vr.ReadByte()
- vr.ReadByte() // ignore is_cidr
- addressLength := vr.ReadByte()
-
- var ipnet net.IPNet
- ipnet.IP = vr.ReadBytes(int32(addressLength))
- ipnet.Mask = net.CIDRMask(int(bits), int(addressLength)*8)
-
- a[i] = ipnet
- }
-
- return a
-}
-
-func encodeIPNetSlice(w *WriteBuf, oid Oid, slice []net.IPNet) error {
- var elOid Oid
- switch oid {
- case InetArrayOid:
- elOid = InetOid
- case CidrArrayOid:
- elOid = CidrOid
- default:
- return fmt.Errorf("cannot encode Go %s into oid %d", "[]net.IPNet", oid)
- }
-
- size := int32(20) // array header size
- for _, ipnet := range slice {
- size += 4 + 4 + int32(len(ipnet.IP)) // size of element + inet/cidr metadata + IP bytes
- }
- w.WriteInt32(int32(size))
-
- w.WriteInt32(1) // number of dimensions
- w.WriteInt32(0) // no nulls
- w.WriteInt32(int32(elOid)) // type of elements
- w.WriteInt32(int32(len(slice))) // number of elements
- w.WriteInt32(1) // index of first element
-
- for _, ipnet := range slice {
- encodeIPNet(w, elOid, ipnet)
}
- return nil
+ return TextFormatCode
}
-func encodeIPSlice(w *WriteBuf, oid Oid, slice []net.IP) error {
- var elOid Oid
- switch oid {
- case InetArrayOid:
- elOid = InetOid
- case CidrArrayOid:
- elOid = CidrOid
- default:
- return fmt.Errorf("cannot encode Go %s into oid %d", "[]net.IPNet", oid)
- }
-
- size := int32(20) // array header size
- for _, ip := range slice {
- size += 4 + 4 + int32(len(ip)) // size of element + inet/cidr metadata + IP bytes
- }
- w.WriteInt32(int32(size))
-
- w.WriteInt32(1) // number of dimensions
- w.WriteInt32(0) // no nulls
- w.WriteInt32(int32(elOid)) // type of elements
- w.WriteInt32(int32(len(slice))) // number of elements
- w.WriteInt32(1) // index of first element
-
- for _, ip := range slice {
- encodeIP(w, elOid, ip)
+func stripNamedType(val *reflect.Value) (interface{}, bool) {
+ switch val.Kind() {
+ case reflect.Int:
+ convVal := int(val.Int())
+ return convVal, reflect.TypeOf(convVal) != val.Type()
+ case reflect.Int8:
+ convVal := int8(val.Int())
+ return convVal, reflect.TypeOf(convVal) != val.Type()
+ case reflect.Int16:
+ convVal := int16(val.Int())
+ return convVal, reflect.TypeOf(convVal) != val.Type()
+ case reflect.Int32:
+ convVal := int32(val.Int())
+ return convVal, reflect.TypeOf(convVal) != val.Type()
+ case reflect.Int64:
+ convVal := int64(val.Int())
+ return convVal, reflect.TypeOf(convVal) != val.Type()
+ case reflect.Uint:
+ convVal := uint(val.Uint())
+ return convVal, reflect.TypeOf(convVal) != val.Type()
+ case reflect.Uint8:
+ convVal := uint8(val.Uint())
+ return convVal, reflect.TypeOf(convVal) != val.Type()
+ case reflect.Uint16:
+ convVal := uint16(val.Uint())
+ return convVal, reflect.TypeOf(convVal) != val.Type()
+ case reflect.Uint32:
+ convVal := uint32(val.Uint())
+ return convVal, reflect.TypeOf(convVal) != val.Type()
+ case reflect.Uint64:
+ convVal := uint64(val.Uint())
+ return convVal, reflect.TypeOf(convVal) != val.Type()
+ case reflect.String:
+ convVal := val.String()
+ return convVal, reflect.TypeOf(convVal) != val.Type()
}
- return nil
-}
-
-func encodeArrayHeader(w *WriteBuf, oid, length, sizePerItem int) {
- w.WriteInt32(int32(20 + length*sizePerItem))
- w.WriteInt32(1) // number of dimensions
- w.WriteInt32(0) // no nulls
- w.WriteInt32(int32(oid)) // type of elements
- w.WriteInt32(int32(length)) // number of elements
- w.WriteInt32(1) // index of first element
+ return nil, false
}
diff --git a/vendor/github.com/jackc/pgx/values_test.go b/vendor/github.com/jackc/pgx/values_test.go
deleted file mode 100644
index 42d5bd3..0000000
--- a/vendor/github.com/jackc/pgx/values_test.go
+++ /dev/null
@@ -1,1183 +0,0 @@
-package pgx_test
-
-import (
- "bytes"
- "net"
- "reflect"
- "strings"
- "testing"
- "time"
-
- "github.com/jackc/pgx"
-)
-
-func TestDateTranscode(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- dates := []time.Time{
- time.Date(1, 1, 1, 0, 0, 0, 0, time.Local),
- time.Date(1000, 1, 1, 0, 0, 0, 0, time.Local),
- time.Date(1600, 1, 1, 0, 0, 0, 0, time.Local),
- time.Date(1700, 1, 1, 0, 0, 0, 0, time.Local),
- time.Date(1800, 1, 1, 0, 0, 0, 0, time.Local),
- time.Date(1900, 1, 1, 0, 0, 0, 0, time.Local),
- time.Date(1990, 1, 1, 0, 0, 0, 0, time.Local),
- time.Date(1999, 12, 31, 0, 0, 0, 0, time.Local),
- time.Date(2000, 1, 1, 0, 0, 0, 0, time.Local),
- time.Date(2001, 1, 2, 0, 0, 0, 0, time.Local),
- time.Date(2004, 2, 29, 0, 0, 0, 0, time.Local),
- time.Date(2013, 7, 4, 0, 0, 0, 0, time.Local),
- time.Date(2013, 12, 25, 0, 0, 0, 0, time.Local),
- time.Date(2029, 1, 1, 0, 0, 0, 0, time.Local),
- time.Date(2081, 1, 1, 0, 0, 0, 0, time.Local),
- time.Date(2096, 2, 29, 0, 0, 0, 0, time.Local),
- time.Date(2550, 1, 1, 0, 0, 0, 0, time.Local),
- time.Date(9999, 12, 31, 0, 0, 0, 0, time.Local),
- }
-
- for _, actualDate := range dates {
- var d time.Time
-
- err := conn.QueryRow("select $1::date", actualDate).Scan(&d)
- if err != nil {
- t.Fatalf("Unexpected failure on QueryRow Scan: %v", err)
- }
- if !actualDate.Equal(d) {
- t.Errorf("Did not transcode date successfully: %v is not %v", d, actualDate)
- }
- }
-}
-
-func TestTimestampTzTranscode(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- inputTime := time.Date(2013, 1, 2, 3, 4, 5, 6000, time.Local)
-
- var outputTime time.Time
-
- err := conn.QueryRow("select $1::timestamptz", inputTime).Scan(&outputTime)
- if err != nil {
- t.Fatalf("QueryRow Scan failed: %v", err)
- }
- if !inputTime.Equal(outputTime) {
- t.Errorf("Did not transcode time successfully: %v is not %v", outputTime, inputTime)
- }
-
- err = conn.QueryRow("select $1::timestamptz", inputTime).Scan(&outputTime)
- if err != nil {
- t.Fatalf("QueryRow Scan failed: %v", err)
- }
- if !inputTime.Equal(outputTime) {
- t.Errorf("Did not transcode time successfully: %v is not %v", outputTime, inputTime)
- }
-}
-
-func TestJsonAndJsonbTranscode(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- for _, oid := range []pgx.Oid{pgx.JsonOid, pgx.JsonbOid} {
- if _, ok := conn.PgTypes[oid]; !ok {
- return // No JSON/JSONB type -- must be running against old PostgreSQL
- }
-
- for _, format := range []int16{pgx.TextFormatCode, pgx.BinaryFormatCode} {
- pgtype := conn.PgTypes[oid]
- pgtype.DefaultFormat = format
- conn.PgTypes[oid] = pgtype
-
- typename := conn.PgTypes[oid].Name
-
- testJsonString(t, conn, typename, format)
- testJsonStringPointer(t, conn, typename, format)
- testJsonSingleLevelStringMap(t, conn, typename, format)
- testJsonNestedMap(t, conn, typename, format)
- testJsonStringArray(t, conn, typename, format)
- testJsonInt64Array(t, conn, typename, format)
- testJsonInt16ArrayFailureDueToOverflow(t, conn, typename, format)
- testJsonStruct(t, conn, typename, format)
- }
- }
-}
-
-func testJsonString(t *testing.T, conn *pgx.Conn, typename string, format int16) {
- input := `{"key": "value"}`
- expectedOutput := map[string]string{"key": "value"}
- var output map[string]string
- err := conn.QueryRow("select $1::"+typename, input).Scan(&output)
- if err != nil {
- t.Errorf("%s %d: QueryRow Scan failed: %v", typename, format, err)
- return
- }
-
- if !reflect.DeepEqual(expectedOutput, output) {
- t.Errorf("%s %d: Did not transcode map[string]string successfully: %v is not %v", typename, format, expectedOutput, output)
- return
- }
-}
-
-func testJsonStringPointer(t *testing.T, conn *pgx.Conn, typename string, format int16) {
- input := `{"key": "value"}`
- expectedOutput := map[string]string{"key": "value"}
- var output map[string]string
- err := conn.QueryRow("select $1::"+typename, &input).Scan(&output)
- if err != nil {
- t.Errorf("%s %d: QueryRow Scan failed: %v", typename, format, err)
- return
- }
-
- if !reflect.DeepEqual(expectedOutput, output) {
- t.Errorf("%s %d: Did not transcode map[string]string successfully: %v is not %v", typename, format, expectedOutput, output)
- return
- }
-}
-
-func testJsonSingleLevelStringMap(t *testing.T, conn *pgx.Conn, typename string, format int16) {
- input := map[string]string{"key": "value"}
- var output map[string]string
- err := conn.QueryRow("select $1::"+typename, input).Scan(&output)
- if err != nil {
- t.Errorf("%s %d: QueryRow Scan failed: %v", typename, format, err)
- return
- }
-
- if !reflect.DeepEqual(input, output) {
- t.Errorf("%s %d: Did not transcode map[string]string successfully: %v is not %v", typename, format, input, output)
- return
- }
-}
-
-func testJsonNestedMap(t *testing.T, conn *pgx.Conn, typename string, format int16) {
- input := map[string]interface{}{
- "name": "Uncanny",
- "stats": map[string]interface{}{"hp": float64(107), "maxhp": float64(150)},
- "inventory": []interface{}{"phone", "key"},
- }
- var output map[string]interface{}
- err := conn.QueryRow("select $1::"+typename, input).Scan(&output)
- if err != nil {
- t.Errorf("%s %d: QueryRow Scan failed: %v", typename, format, err)
- return
- }
-
- if !reflect.DeepEqual(input, output) {
- t.Errorf("%s %d: Did not transcode map[string]interface{} successfully: %v is not %v", typename, format, input, output)
- return
- }
-}
-
-func testJsonStringArray(t *testing.T, conn *pgx.Conn, typename string, format int16) {
- input := []string{"foo", "bar", "baz"}
- var output []string
- err := conn.QueryRow("select $1::"+typename, input).Scan(&output)
- if err != nil {
- t.Errorf("%s %d: QueryRow Scan failed: %v", typename, format, err)
- }
-
- if !reflect.DeepEqual(input, output) {
- t.Errorf("%s %d: Did not transcode []string successfully: %v is not %v", typename, format, input, output)
- }
-}
-
-func testJsonInt64Array(t *testing.T, conn *pgx.Conn, typename string, format int16) {
- input := []int64{1, 2, 234432}
- var output []int64
- err := conn.QueryRow("select $1::"+typename, input).Scan(&output)
- if err != nil {
- t.Errorf("%s %d: QueryRow Scan failed: %v", typename, format, err)
- }
-
- if !reflect.DeepEqual(input, output) {
- t.Errorf("%s %d: Did not transcode []int64 successfully: %v is not %v", typename, format, input, output)
- }
-}
-
-func testJsonInt16ArrayFailureDueToOverflow(t *testing.T, conn *pgx.Conn, typename string, format int16) {
- input := []int{1, 2, 234432}
- var output []int16
- err := conn.QueryRow("select $1::"+typename, input).Scan(&output)
- if err == nil || err.Error() != "can't scan into dest[0]: json: cannot unmarshal number 234432 into Go value of type int16" {
- t.Errorf("%s %d: Expected *json.UnmarkalTypeError, but got %v", typename, format, err)
- }
-}
-
-func testJsonStruct(t *testing.T, conn *pgx.Conn, typename string, format int16) {
- type person struct {
- Name string `json:"name"`
- Age int `json:"age"`
- }
-
- input := person{
- Name: "John",
- Age: 42,
- }
-
- var output person
-
- err := conn.QueryRow("select $1::"+typename, input).Scan(&output)
- if err != nil {
- t.Errorf("%s %d: QueryRow Scan failed: %v", typename, format, err)
- }
-
- if !reflect.DeepEqual(input, output) {
- t.Errorf("%s %d: Did not transcode struct successfully: %v is not %v", typename, format, input, output)
- }
-}
-
-func mustParseCIDR(t *testing.T, s string) net.IPNet {
- _, ipnet, err := net.ParseCIDR(s)
- if err != nil {
- t.Fatal(err)
- }
-
- return *ipnet
-}
-
-func TestStringToNotTextTypeTranscode(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- input := "01086ee0-4963-4e35-9116-30c173a8d0bd"
-
- var output string
- err := conn.QueryRow("select $1::uuid", input).Scan(&output)
- if err != nil {
- t.Fatal(err)
- }
- if input != output {
- t.Errorf("uuid: Did not transcode string successfully: %s is not %s", input, output)
- }
-
- err = conn.QueryRow("select $1::uuid", &input).Scan(&output)
- if err != nil {
- t.Fatal(err)
- }
- if input != output {
- t.Errorf("uuid: Did not transcode pointer to string successfully: %s is not %s", input, output)
- }
-}
-
-func TestInetCidrTranscodeIPNet(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- tests := []struct {
- sql string
- value net.IPNet
- }{
- {"select $1::inet", mustParseCIDR(t, "0.0.0.0/32")},
- {"select $1::inet", mustParseCIDR(t, "127.0.0.1/32")},
- {"select $1::inet", mustParseCIDR(t, "12.34.56.0/32")},
- {"select $1::inet", mustParseCIDR(t, "192.168.1.0/24")},
- {"select $1::inet", mustParseCIDR(t, "255.0.0.0/8")},
- {"select $1::inet", mustParseCIDR(t, "255.255.255.255/32")},
- {"select $1::inet", mustParseCIDR(t, "::/128")},
- {"select $1::inet", mustParseCIDR(t, "::/0")},
- {"select $1::inet", mustParseCIDR(t, "::1/128")},
- {"select $1::inet", mustParseCIDR(t, "2607:f8b0:4009:80b::200e/128")},
- {"select $1::cidr", mustParseCIDR(t, "0.0.0.0/32")},
- {"select $1::cidr", mustParseCIDR(t, "127.0.0.1/32")},
- {"select $1::cidr", mustParseCIDR(t, "12.34.56.0/32")},
- {"select $1::cidr", mustParseCIDR(t, "192.168.1.0/24")},
- {"select $1::cidr", mustParseCIDR(t, "255.0.0.0/8")},
- {"select $1::cidr", mustParseCIDR(t, "255.255.255.255/32")},
- {"select $1::cidr", mustParseCIDR(t, "::/128")},
- {"select $1::cidr", mustParseCIDR(t, "::/0")},
- {"select $1::cidr", mustParseCIDR(t, "::1/128")},
- {"select $1::cidr", mustParseCIDR(t, "2607:f8b0:4009:80b::200e/128")},
- }
-
- for i, tt := range tests {
- var actual net.IPNet
-
- err := conn.QueryRow(tt.sql, tt.value).Scan(&actual)
- if err != nil {
- t.Errorf("%d. Unexpected failure: %v (sql -> %v, value -> %v)", i, err, tt.sql, tt.value)
- continue
- }
-
- if actual.String() != tt.value.String() {
- t.Errorf("%d. Expected %v, got %v (sql -> %v)", i, tt.value, actual, tt.sql)
- }
-
- ensureConnValid(t, conn)
- }
-}
-
-func TestInetCidrTranscodeIP(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- tests := []struct {
- sql string
- value net.IP
- }{
- {"select $1::inet", net.ParseIP("0.0.0.0")},
- {"select $1::inet", net.ParseIP("127.0.0.1")},
- {"select $1::inet", net.ParseIP("12.34.56.0")},
- {"select $1::inet", net.ParseIP("255.255.255.255")},
- {"select $1::inet", net.ParseIP("::1")},
- {"select $1::inet", net.ParseIP("2607:f8b0:4009:80b::200e")},
- {"select $1::cidr", net.ParseIP("0.0.0.0")},
- {"select $1::cidr", net.ParseIP("127.0.0.1")},
- {"select $1::cidr", net.ParseIP("12.34.56.0")},
- {"select $1::cidr", net.ParseIP("255.255.255.255")},
- {"select $1::cidr", net.ParseIP("::1")},
- {"select $1::cidr", net.ParseIP("2607:f8b0:4009:80b::200e")},
- }
-
- for i, tt := range tests {
- var actual net.IP
-
- err := conn.QueryRow(tt.sql, tt.value).Scan(&actual)
- if err != nil {
- t.Errorf("%d. Unexpected failure: %v (sql -> %v, value -> %v)", i, err, tt.sql, tt.value)
- continue
- }
-
- if !actual.Equal(tt.value) {
- t.Errorf("%d. Expected %v, got %v (sql -> %v)", i, tt.value, actual, tt.sql)
- }
-
- ensureConnValid(t, conn)
- }
-
- failTests := []struct {
- sql string
- value net.IPNet
- }{
- {"select $1::inet", mustParseCIDR(t, "192.168.1.0/24")},
- {"select $1::cidr", mustParseCIDR(t, "192.168.1.0/24")},
- }
- for i, tt := range failTests {
- var actual net.IP
-
- err := conn.QueryRow(tt.sql, tt.value).Scan(&actual)
- if !strings.Contains(err.Error(), "Cannot decode netmask") {
- t.Errorf("%d. Expected failure cannot decode netmask, but got: %v (sql -> %v, value -> %v)", i, err, tt.sql, tt.value)
- continue
- }
-
- ensureConnValid(t, conn)
- }
-}
-
-func TestInetCidrArrayTranscodeIPNet(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- tests := []struct {
- sql string
- value []net.IPNet
- }{
- {
- "select $1::inet[]",
- []net.IPNet{
- mustParseCIDR(t, "0.0.0.0/32"),
- mustParseCIDR(t, "127.0.0.1/32"),
- mustParseCIDR(t, "12.34.56.0/32"),
- mustParseCIDR(t, "192.168.1.0/24"),
- mustParseCIDR(t, "255.0.0.0/8"),
- mustParseCIDR(t, "255.255.255.255/32"),
- mustParseCIDR(t, "::/128"),
- mustParseCIDR(t, "::/0"),
- mustParseCIDR(t, "::1/128"),
- mustParseCIDR(t, "2607:f8b0:4009:80b::200e/128"),
- },
- },
- {
- "select $1::cidr[]",
- []net.IPNet{
- mustParseCIDR(t, "0.0.0.0/32"),
- mustParseCIDR(t, "127.0.0.1/32"),
- mustParseCIDR(t, "12.34.56.0/32"),
- mustParseCIDR(t, "192.168.1.0/24"),
- mustParseCIDR(t, "255.0.0.0/8"),
- mustParseCIDR(t, "255.255.255.255/32"),
- mustParseCIDR(t, "::/128"),
- mustParseCIDR(t, "::/0"),
- mustParseCIDR(t, "::1/128"),
- mustParseCIDR(t, "2607:f8b0:4009:80b::200e/128"),
- },
- },
- }
-
- for i, tt := range tests {
- var actual []net.IPNet
-
- err := conn.QueryRow(tt.sql, tt.value).Scan(&actual)
- if err != nil {
- t.Errorf("%d. Unexpected failure: %v (sql -> %v, value -> %v)", i, err, tt.sql, tt.value)
- continue
- }
-
- if !reflect.DeepEqual(actual, tt.value) {
- t.Errorf("%d. Expected %v, got %v (sql -> %v)", i, tt.value, actual, tt.sql)
- }
-
- ensureConnValid(t, conn)
- }
-}
-
-func TestInetCidrArrayTranscodeIP(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- tests := []struct {
- sql string
- value []net.IP
- }{
- {
- "select $1::inet[]",
- []net.IP{
- net.ParseIP("0.0.0.0"),
- net.ParseIP("127.0.0.1"),
- net.ParseIP("12.34.56.0"),
- net.ParseIP("255.255.255.255"),
- net.ParseIP("2607:f8b0:4009:80b::200e"),
- },
- },
- {
- "select $1::cidr[]",
- []net.IP{
- net.ParseIP("0.0.0.0"),
- net.ParseIP("127.0.0.1"),
- net.ParseIP("12.34.56.0"),
- net.ParseIP("255.255.255.255"),
- net.ParseIP("2607:f8b0:4009:80b::200e"),
- },
- },
- }
-
- for i, tt := range tests {
- var actual []net.IP
-
- err := conn.QueryRow(tt.sql, tt.value).Scan(&actual)
- if err != nil {
- t.Errorf("%d. Unexpected failure: %v (sql -> %v, value -> %v)", i, err, tt.sql, tt.value)
- continue
- }
-
- if !reflect.DeepEqual(actual, tt.value) {
- t.Errorf("%d. Expected %v, got %v (sql -> %v)", i, tt.value, actual, tt.sql)
- }
-
- ensureConnValid(t, conn)
- }
-
- failTests := []struct {
- sql string
- value []net.IPNet
- }{
- {
- "select $1::inet[]",
- []net.IPNet{
- mustParseCIDR(t, "12.34.56.0/32"),
- mustParseCIDR(t, "192.168.1.0/24"),
- },
- },
- {
- "select $1::cidr[]",
- []net.IPNet{
- mustParseCIDR(t, "12.34.56.0/32"),
- mustParseCIDR(t, "192.168.1.0/24"),
- },
- },
- }
-
- for i, tt := range failTests {
- var actual []net.IP
-
- err := conn.QueryRow(tt.sql, tt.value).Scan(&actual)
- if err == nil || !strings.Contains(err.Error(), "Cannot decode netmask") {
- t.Errorf("%d. Expected failure cannot decode netmask, but got: %v (sql -> %v, value -> %v)", i, err, tt.sql, tt.value)
- continue
- }
-
- ensureConnValid(t, conn)
- }
-}
-
-func TestInetCidrTranscodeWithJustIP(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- tests := []struct {
- sql string
- value string
- }{
- {"select $1::inet", "0.0.0.0/32"},
- {"select $1::inet", "127.0.0.1/32"},
- {"select $1::inet", "12.34.56.0/32"},
- {"select $1::inet", "255.255.255.255/32"},
- {"select $1::inet", "::/128"},
- {"select $1::inet", "2607:f8b0:4009:80b::200e/128"},
- {"select $1::cidr", "0.0.0.0/32"},
- {"select $1::cidr", "127.0.0.1/32"},
- {"select $1::cidr", "12.34.56.0/32"},
- {"select $1::cidr", "255.255.255.255/32"},
- {"select $1::cidr", "::/128"},
- {"select $1::cidr", "2607:f8b0:4009:80b::200e/128"},
- }
-
- for i, tt := range tests {
- expected := mustParseCIDR(t, tt.value)
- var actual net.IPNet
-
- err := conn.QueryRow(tt.sql, expected.IP).Scan(&actual)
- if err != nil {
- t.Errorf("%d. Unexpected failure: %v (sql -> %v, value -> %v)", i, err, tt.sql, tt.value)
- continue
- }
-
- if actual.String() != expected.String() {
- t.Errorf("%d. Expected %v, got %v (sql -> %v)", i, tt.value, actual, tt.sql)
- }
-
- ensureConnValid(t, conn)
- }
-}
-
-func TestNullX(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- type allTypes struct {
- s pgx.NullString
- i16 pgx.NullInt16
- i32 pgx.NullInt32
- c pgx.NullChar
- a pgx.NullAclItem
- n pgx.NullName
- oid pgx.NullOid
- xid pgx.NullXid
- cid pgx.NullCid
- tid pgx.NullTid
- i64 pgx.NullInt64
- f32 pgx.NullFloat32
- f64 pgx.NullFloat64
- b pgx.NullBool
- t pgx.NullTime
- }
-
- var actual, zero allTypes
-
- tests := []struct {
- sql string
- queryArgs []interface{}
- scanArgs []interface{}
- expected allTypes
- }{
- {"select $1::text", []interface{}{pgx.NullString{String: "foo", Valid: true}}, []interface{}{&actual.s}, allTypes{s: pgx.NullString{String: "foo", Valid: true}}},
- {"select $1::text", []interface{}{pgx.NullString{String: "foo", Valid: false}}, []interface{}{&actual.s}, allTypes{s: pgx.NullString{String: "", Valid: false}}},
- {"select $1::int2", []interface{}{pgx.NullInt16{Int16: 1, Valid: true}}, []interface{}{&actual.i16}, allTypes{i16: pgx.NullInt16{Int16: 1, Valid: true}}},
- {"select $1::int2", []interface{}{pgx.NullInt16{Int16: 1, Valid: false}}, []interface{}{&actual.i16}, allTypes{i16: pgx.NullInt16{Int16: 0, Valid: false}}},
- {"select $1::int4", []interface{}{pgx.NullInt32{Int32: 1, Valid: true}}, []interface{}{&actual.i32}, allTypes{i32: pgx.NullInt32{Int32: 1, Valid: true}}},
- {"select $1::int4", []interface{}{pgx.NullInt32{Int32: 1, Valid: false}}, []interface{}{&actual.i32}, allTypes{i32: pgx.NullInt32{Int32: 0, Valid: false}}},
- {"select $1::oid", []interface{}{pgx.NullOid{Oid: 1, Valid: true}}, []interface{}{&actual.oid}, allTypes{oid: pgx.NullOid{Oid: 1, Valid: true}}},
- {"select $1::oid", []interface{}{pgx.NullOid{Oid: 1, Valid: false}}, []interface{}{&actual.oid}, allTypes{oid: pgx.NullOid{Oid: 0, Valid: false}}},
- {"select $1::oid", []interface{}{pgx.NullOid{Oid: 4294967295, Valid: true}}, []interface{}{&actual.oid}, allTypes{oid: pgx.NullOid{Oid: 4294967295, Valid: true}}},
- {"select $1::xid", []interface{}{pgx.NullXid{Xid: 1, Valid: true}}, []interface{}{&actual.xid}, allTypes{xid: pgx.NullXid{Xid: 1, Valid: true}}},
- {"select $1::xid", []interface{}{pgx.NullXid{Xid: 1, Valid: false}}, []interface{}{&actual.xid}, allTypes{xid: pgx.NullXid{Xid: 0, Valid: false}}},
- {"select $1::xid", []interface{}{pgx.NullXid{Xid: 4294967295, Valid: true}}, []interface{}{&actual.xid}, allTypes{xid: pgx.NullXid{Xid: 4294967295, Valid: true}}},
- {"select $1::\"char\"", []interface{}{pgx.NullChar{Char: 1, Valid: true}}, []interface{}{&actual.c}, allTypes{c: pgx.NullChar{Char: 1, Valid: true}}},
- {"select $1::\"char\"", []interface{}{pgx.NullChar{Char: 1, Valid: false}}, []interface{}{&actual.c}, allTypes{c: pgx.NullChar{Char: 0, Valid: false}}},
- {"select $1::\"char\"", []interface{}{pgx.NullChar{Char: 255, Valid: true}}, []interface{}{&actual.c}, allTypes{c: pgx.NullChar{Char: 255, Valid: true}}},
- {"select $1::name", []interface{}{pgx.NullName{Name: "foo", Valid: true}}, []interface{}{&actual.n}, allTypes{n: pgx.NullName{Name: "foo", Valid: true}}},
- {"select $1::name", []interface{}{pgx.NullName{Name: "foo", Valid: false}}, []interface{}{&actual.n}, allTypes{n: pgx.NullName{Name: "", Valid: false}}},
- {"select $1::aclitem", []interface{}{pgx.NullAclItem{AclItem: "postgres=arwdDxt/postgres", Valid: true}}, []interface{}{&actual.a}, allTypes{a: pgx.NullAclItem{AclItem: "postgres=arwdDxt/postgres", Valid: true}}},
- {"select $1::aclitem", []interface{}{pgx.NullAclItem{AclItem: "postgres=arwdDxt/postgres", Valid: false}}, []interface{}{&actual.a}, allTypes{a: pgx.NullAclItem{AclItem: "", Valid: false}}},
- // A tricky (and valid) aclitem can still be used, especially with Go's useful backticks
- {"select $1::aclitem", []interface{}{pgx.NullAclItem{AclItem: `postgres=arwdDxt/" tricky, ' } "" \ test user "`, Valid: true}}, []interface{}{&actual.a}, allTypes{a: pgx.NullAclItem{AclItem: `postgres=arwdDxt/" tricky, ' } "" \ test user "`, Valid: true}}},
- {"select $1::cid", []interface{}{pgx.NullCid{Cid: 1, Valid: true}}, []interface{}{&actual.cid}, allTypes{cid: pgx.NullCid{Cid: 1, Valid: true}}},
- {"select $1::cid", []interface{}{pgx.NullCid{Cid: 1, Valid: false}}, []interface{}{&actual.cid}, allTypes{cid: pgx.NullCid{Cid: 0, Valid: false}}},
- {"select $1::cid", []interface{}{pgx.NullCid{Cid: 4294967295, Valid: true}}, []interface{}{&actual.cid}, allTypes{cid: pgx.NullCid{Cid: 4294967295, Valid: true}}},
- {"select $1::tid", []interface{}{pgx.NullTid{Tid: pgx.Tid{BlockNumber: 1, OffsetNumber: 1}, Valid: true}}, []interface{}{&actual.tid}, allTypes{tid: pgx.NullTid{Tid: pgx.Tid{BlockNumber: 1, OffsetNumber: 1}, Valid: true}}},
- {"select $1::tid", []interface{}{pgx.NullTid{Tid: pgx.Tid{BlockNumber: 1, OffsetNumber: 1}, Valid: false}}, []interface{}{&actual.tid}, allTypes{tid: pgx.NullTid{Tid: pgx.Tid{BlockNumber: 0, OffsetNumber: 0}, Valid: false}}},
- {"select $1::tid", []interface{}{pgx.NullTid{Tid: pgx.Tid{BlockNumber: 4294967295, OffsetNumber: 65535}, Valid: true}}, []interface{}{&actual.tid}, allTypes{tid: pgx.NullTid{Tid: pgx.Tid{BlockNumber: 4294967295, OffsetNumber: 65535}, Valid: true}}},
- {"select $1::int8", []interface{}{pgx.NullInt64{Int64: 1, Valid: true}}, []interface{}{&actual.i64}, allTypes{i64: pgx.NullInt64{Int64: 1, Valid: true}}},
- {"select $1::int8", []interface{}{pgx.NullInt64{Int64: 1, Valid: false}}, []interface{}{&actual.i64}, allTypes{i64: pgx.NullInt64{Int64: 0, Valid: false}}},
- {"select $1::float4", []interface{}{pgx.NullFloat32{Float32: 1.23, Valid: true}}, []interface{}{&actual.f32}, allTypes{f32: pgx.NullFloat32{Float32: 1.23, Valid: true}}},
- {"select $1::float4", []interface{}{pgx.NullFloat32{Float32: 1.23, Valid: false}}, []interface{}{&actual.f32}, allTypes{f32: pgx.NullFloat32{Float32: 0, Valid: false}}},
- {"select $1::float8", []interface{}{pgx.NullFloat64{Float64: 1.23, Valid: true}}, []interface{}{&actual.f64}, allTypes{f64: pgx.NullFloat64{Float64: 1.23, Valid: true}}},
- {"select $1::float8", []interface{}{pgx.NullFloat64{Float64: 1.23, Valid: false}}, []interface{}{&actual.f64}, allTypes{f64: pgx.NullFloat64{Float64: 0, Valid: false}}},
- {"select $1::bool", []interface{}{pgx.NullBool{Bool: true, Valid: true}}, []interface{}{&actual.b}, allTypes{b: pgx.NullBool{Bool: true, Valid: true}}},
- {"select $1::bool", []interface{}{pgx.NullBool{Bool: true, Valid: false}}, []interface{}{&actual.b}, allTypes{b: pgx.NullBool{Bool: false, Valid: false}}},
- {"select $1::timestamptz", []interface{}{pgx.NullTime{Time: time.Unix(123, 5000), Valid: true}}, []interface{}{&actual.t}, allTypes{t: pgx.NullTime{Time: time.Unix(123, 5000), Valid: true}}},
- {"select $1::timestamptz", []interface{}{pgx.NullTime{Time: time.Unix(123, 5000), Valid: false}}, []interface{}{&actual.t}, allTypes{t: pgx.NullTime{Time: time.Time{}, Valid: false}}},
- {"select $1::timestamp", []interface{}{pgx.NullTime{Time: time.Unix(123, 5000), Valid: true}}, []interface{}{&actual.t}, allTypes{t: pgx.NullTime{Time: time.Unix(123, 5000), Valid: true}}},
- {"select $1::timestamp", []interface{}{pgx.NullTime{Time: time.Unix(123, 5000), Valid: false}}, []interface{}{&actual.t}, allTypes{t: pgx.NullTime{Time: time.Time{}, Valid: false}}},
- {"select $1::date", []interface{}{pgx.NullTime{Time: time.Date(1990, 1, 1, 0, 0, 0, 0, time.Local), Valid: true}}, []interface{}{&actual.t}, allTypes{t: pgx.NullTime{Time: time.Date(1990, 1, 1, 0, 0, 0, 0, time.Local), Valid: true}}},
- {"select $1::date", []interface{}{pgx.NullTime{Time: time.Date(1990, 1, 1, 0, 0, 0, 0, time.Local), Valid: false}}, []interface{}{&actual.t}, allTypes{t: pgx.NullTime{Time: time.Time{}, Valid: false}}},
- {"select 42::int4, $1::float8", []interface{}{pgx.NullFloat64{Float64: 1.23, Valid: true}}, []interface{}{&actual.i32, &actual.f64}, allTypes{i32: pgx.NullInt32{Int32: 42, Valid: true}, f64: pgx.NullFloat64{Float64: 1.23, Valid: true}}},
- }
-
- for i, tt := range tests {
- actual = zero
-
- err := conn.QueryRow(tt.sql, tt.queryArgs...).Scan(tt.scanArgs...)
- if err != nil {
- t.Errorf("%d. Unexpected failure: %v (sql -> %v, queryArgs -> %v)", i, err, tt.sql, tt.queryArgs)
- }
-
- if actual != tt.expected {
- t.Errorf("%d. Expected %v, got %v (sql -> %v, queryArgs -> %v)", i, tt.expected, actual, tt.sql, tt.queryArgs)
- }
-
- ensureConnValid(t, conn)
- }
-}
-
-func assertAclItemSlicesEqual(t *testing.T, query, scan []pgx.AclItem) {
- if !reflect.DeepEqual(query, scan) {
- t.Errorf("failed to encode aclitem[]\n EXPECTED: %d %v\n ACTUAL: %d %v", len(query), query, len(scan), scan)
- }
-}
-
-func TestAclArrayDecoding(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- sql := "select $1::aclitem[]"
- var scan []pgx.AclItem
-
- tests := []struct {
- query []pgx.AclItem
- }{
- {
- []pgx.AclItem{},
- },
- {
- []pgx.AclItem{"=r/postgres"},
- },
- {
- []pgx.AclItem{"=r/postgres", "postgres=arwdDxt/postgres"},
- },
- {
- []pgx.AclItem{"=r/postgres", "postgres=arwdDxt/postgres", `postgres=arwdDxt/" tricky, ' } "" \ test user "`},
- },
- }
- for i, tt := range tests {
- err := conn.QueryRow(sql, tt.query).Scan(&scan)
- if err != nil {
- // t.Errorf(`%d. error reading array: %v`, i, err)
- t.Errorf(`%d. error reading array: %v query: %s`, i, err, tt.query)
- if pgerr, ok := err.(pgx.PgError); ok {
- t.Errorf(`%d. error reading array (detail): %s`, i, pgerr.Detail)
- }
- continue
- }
- assertAclItemSlicesEqual(t, tt.query, scan)
- ensureConnValid(t, conn)
- }
-}
-
-func TestArrayDecoding(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- tests := []struct {
- sql string
- query interface{}
- scan interface{}
- assert func(*testing.T, interface{}, interface{})
- }{
- {
- "select $1::bool[]", []bool{true, false, true}, &[]bool{},
- func(t *testing.T, query, scan interface{}) {
- if !reflect.DeepEqual(query, *(scan.(*[]bool))) {
- t.Errorf("failed to encode bool[]")
- }
- },
- },
- {
- "select $1::smallint[]", []int16{2, 4, 484, 32767}, &[]int16{},
- func(t *testing.T, query, scan interface{}) {
- if !reflect.DeepEqual(query, *(scan.(*[]int16))) {
- t.Errorf("failed to encode smallint[]")
- }
- },
- },
- {
- "select $1::smallint[]", []uint16{2, 4, 484, 32767}, &[]uint16{},
- func(t *testing.T, query, scan interface{}) {
- if !reflect.DeepEqual(query, *(scan.(*[]uint16))) {
- t.Errorf("failed to encode smallint[]")
- }
- },
- },
- {
- "select $1::int[]", []int32{2, 4, 484}, &[]int32{},
- func(t *testing.T, query, scan interface{}) {
- if !reflect.DeepEqual(query, *(scan.(*[]int32))) {
- t.Errorf("failed to encode int[]")
- }
- },
- },
- {
- "select $1::int[]", []uint32{2, 4, 484, 2147483647}, &[]uint32{},
- func(t *testing.T, query, scan interface{}) {
- if !reflect.DeepEqual(query, *(scan.(*[]uint32))) {
- t.Errorf("failed to encode int[]")
- }
- },
- },
- {
- "select $1::bigint[]", []int64{2, 4, 484, 9223372036854775807}, &[]int64{},
- func(t *testing.T, query, scan interface{}) {
- if !reflect.DeepEqual(query, *(scan.(*[]int64))) {
- t.Errorf("failed to encode bigint[]")
- }
- },
- },
- {
- "select $1::bigint[]", []uint64{2, 4, 484, 9223372036854775807}, &[]uint64{},
- func(t *testing.T, query, scan interface{}) {
- if !reflect.DeepEqual(query, *(scan.(*[]uint64))) {
- t.Errorf("failed to encode bigint[]")
- }
- },
- },
- {
- "select $1::text[]", []string{"it's", "over", "9000!"}, &[]string{},
- func(t *testing.T, query, scan interface{}) {
- if !reflect.DeepEqual(query, *(scan.(*[]string))) {
- t.Errorf("failed to encode text[]")
- }
- },
- },
- {
- "select $1::timestamp[]", []time.Time{time.Unix(323232, 0), time.Unix(3239949334, 00)}, &[]time.Time{},
- func(t *testing.T, query, scan interface{}) {
- if !reflect.DeepEqual(query, *(scan.(*[]time.Time))) {
- t.Errorf("failed to encode time.Time[] to timestamp[]")
- }
- },
- },
- {
- "select $1::timestamptz[]", []time.Time{time.Unix(323232, 0), time.Unix(3239949334, 00)}, &[]time.Time{},
- func(t *testing.T, query, scan interface{}) {
- if !reflect.DeepEqual(query, *(scan.(*[]time.Time))) {
- t.Errorf("failed to encode time.Time[] to timestamptz[]")
- }
- },
- },
- {
- "select $1::bytea[]", [][]byte{{0, 1, 2, 3}, {4, 5, 6, 7}}, &[][]byte{},
- func(t *testing.T, query, scan interface{}) {
- queryBytesSliceSlice := query.([][]byte)
- scanBytesSliceSlice := *(scan.(*[][]byte))
- if len(queryBytesSliceSlice) != len(scanBytesSliceSlice) {
- t.Errorf("failed to encode byte[][] to bytea[]: expected %d to equal %d", len(queryBytesSliceSlice), len(scanBytesSliceSlice))
- }
- for i := range queryBytesSliceSlice {
- qb := queryBytesSliceSlice[i]
- sb := scanBytesSliceSlice[i]
- if !bytes.Equal(qb, sb) {
- t.Errorf("failed to encode byte[][] to bytea[]: expected %v to equal %v", qb, sb)
- }
- }
- },
- },
- }
-
- for i, tt := range tests {
- err := conn.QueryRow(tt.sql, tt.query).Scan(tt.scan)
- if err != nil {
- t.Errorf(`%d. error reading array: %v`, i, err)
- continue
- }
- tt.assert(t, tt.query, tt.scan)
- ensureConnValid(t, conn)
- }
-}
-
-type shortScanner struct{}
-
-func (*shortScanner) Scan(r *pgx.ValueReader) error {
- r.ReadByte()
- return nil
-}
-
-func TestShortScanner(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- rows, err := conn.Query("select 'ab', 'cd' union select 'cd', 'ef'")
- if err != nil {
- t.Error(err)
- }
- defer rows.Close()
-
- for rows.Next() {
- var s1, s2 shortScanner
- err = rows.Scan(&s1, &s2)
- if err != nil {
- t.Error(err)
- }
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestEmptyArrayDecoding(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- var val []string
-
- err := conn.QueryRow("select array[]::text[]").Scan(&val)
- if err != nil {
- t.Errorf(`error reading array: %v`, err)
- }
- if len(val) != 0 {
- t.Errorf("Expected 0 values, got %d", len(val))
- }
-
- var n, m int32
-
- err = conn.QueryRow("select 1::integer, array[]::text[], 42::integer").Scan(&n, &val, &m)
- if err != nil {
- t.Errorf(`error reading array: %v`, err)
- }
- if len(val) != 0 {
- t.Errorf("Expected 0 values, got %d", len(val))
- }
- if n != 1 {
- t.Errorf("Expected n to be 1, but it was %d", n)
- }
- if m != 42 {
- t.Errorf("Expected n to be 42, but it was %d", n)
- }
-
- rows, err := conn.Query("select 1::integer, array['test']::text[] union select 2::integer, array[]::text[] union select 3::integer, array['test']::text[]")
- if err != nil {
- t.Errorf(`error retrieving rows with array: %v`, err)
- }
- defer rows.Close()
-
- for rows.Next() {
- err = rows.Scan(&n, &val)
- if err != nil {
- t.Errorf(`error reading array: %v`, err)
- }
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestNullXMismatch(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- type allTypes struct {
- s pgx.NullString
- i16 pgx.NullInt16
- i32 pgx.NullInt32
- i64 pgx.NullInt64
- f32 pgx.NullFloat32
- f64 pgx.NullFloat64
- b pgx.NullBool
- t pgx.NullTime
- }
-
- var actual, zero allTypes
-
- tests := []struct {
- sql string
- queryArgs []interface{}
- scanArgs []interface{}
- err string
- }{
- {"select $1::date", []interface{}{pgx.NullString{String: "foo", Valid: true}}, []interface{}{&actual.s}, "invalid input syntax for type date"},
- {"select $1::date", []interface{}{pgx.NullInt16{Int16: 1, Valid: true}}, []interface{}{&actual.i16}, "cannot encode into OID 1082"},
- {"select $1::date", []interface{}{pgx.NullInt32{Int32: 1, Valid: true}}, []interface{}{&actual.i32}, "cannot encode into OID 1082"},
- {"select $1::date", []interface{}{pgx.NullInt64{Int64: 1, Valid: true}}, []interface{}{&actual.i64}, "cannot encode into OID 1082"},
- {"select $1::date", []interface{}{pgx.NullFloat32{Float32: 1.23, Valid: true}}, []interface{}{&actual.f32}, "cannot encode into OID 1082"},
- {"select $1::date", []interface{}{pgx.NullFloat64{Float64: 1.23, Valid: true}}, []interface{}{&actual.f64}, "cannot encode into OID 1082"},
- {"select $1::date", []interface{}{pgx.NullBool{Bool: true, Valid: true}}, []interface{}{&actual.b}, "cannot encode into OID 1082"},
- {"select $1::int4", []interface{}{pgx.NullTime{Time: time.Unix(123, 5000), Valid: true}}, []interface{}{&actual.t}, "cannot encode into OID 23"},
- }
-
- for i, tt := range tests {
- actual = zero
-
- err := conn.QueryRow(tt.sql, tt.queryArgs...).Scan(tt.scanArgs...)
- if err == nil || !strings.Contains(err.Error(), tt.err) {
- t.Errorf(`%d. Expected error to contain "%s", but it didn't: %v`, i, tt.err, err)
- }
-
- ensureConnValid(t, conn)
- }
-}
-
-func TestPointerPointer(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- type allTypes struct {
- s *string
- i16 *int16
- i32 *int32
- i64 *int64
- f32 *float32
- f64 *float64
- b *bool
- t *time.Time
- }
-
- var actual, zero, expected allTypes
-
- {
- s := "foo"
- expected.s = &s
- i16 := int16(1)
- expected.i16 = &i16
- i32 := int32(1)
- expected.i32 = &i32
- i64 := int64(1)
- expected.i64 = &i64
- f32 := float32(1.23)
- expected.f32 = &f32
- f64 := float64(1.23)
- expected.f64 = &f64
- b := true
- expected.b = &b
- t := time.Unix(123, 5000)
- expected.t = &t
- }
-
- tests := []struct {
- sql string
- queryArgs []interface{}
- scanArgs []interface{}
- expected allTypes
- }{
- {"select $1::text", []interface{}{expected.s}, []interface{}{&actual.s}, allTypes{s: expected.s}},
- {"select $1::text", []interface{}{zero.s}, []interface{}{&actual.s}, allTypes{}},
- {"select $1::int2", []interface{}{expected.i16}, []interface{}{&actual.i16}, allTypes{i16: expected.i16}},
- {"select $1::int2", []interface{}{zero.i16}, []interface{}{&actual.i16}, allTypes{}},
- {"select $1::int4", []interface{}{expected.i32}, []interface{}{&actual.i32}, allTypes{i32: expected.i32}},
- {"select $1::int4", []interface{}{zero.i32}, []interface{}{&actual.i32}, allTypes{}},
- {"select $1::int8", []interface{}{expected.i64}, []interface{}{&actual.i64}, allTypes{i64: expected.i64}},
- {"select $1::int8", []interface{}{zero.i64}, []interface{}{&actual.i64}, allTypes{}},
- {"select $1::float4", []interface{}{expected.f32}, []interface{}{&actual.f32}, allTypes{f32: expected.f32}},
- {"select $1::float4", []interface{}{zero.f32}, []interface{}{&actual.f32}, allTypes{}},
- {"select $1::float8", []interface{}{expected.f64}, []interface{}{&actual.f64}, allTypes{f64: expected.f64}},
- {"select $1::float8", []interface{}{zero.f64}, []interface{}{&actual.f64}, allTypes{}},
- {"select $1::bool", []interface{}{expected.b}, []interface{}{&actual.b}, allTypes{b: expected.b}},
- {"select $1::bool", []interface{}{zero.b}, []interface{}{&actual.b}, allTypes{}},
- {"select $1::timestamptz", []interface{}{expected.t}, []interface{}{&actual.t}, allTypes{t: expected.t}},
- {"select $1::timestamptz", []interface{}{zero.t}, []interface{}{&actual.t}, allTypes{}},
- {"select $1::timestamp", []interface{}{expected.t}, []interface{}{&actual.t}, allTypes{t: expected.t}},
- {"select $1::timestamp", []interface{}{zero.t}, []interface{}{&actual.t}, allTypes{}},
- }
-
- for i, tt := range tests {
- actual = zero
-
- err := conn.QueryRow(tt.sql, tt.queryArgs...).Scan(tt.scanArgs...)
- if err != nil {
- t.Errorf("%d. Unexpected failure: %v (sql -> %v, queryArgs -> %v)", i, err, tt.sql, tt.queryArgs)
- }
-
- if !reflect.DeepEqual(actual, tt.expected) {
- t.Errorf("%d. Expected %v, got %v (sql -> %v, queryArgs -> %v)", i, tt.expected, actual, tt.sql, tt.queryArgs)
- }
-
- ensureConnValid(t, conn)
- }
-}
-
-func TestPointerPointerNonZero(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- f := "foo"
- dest := &f
-
- err := conn.QueryRow("select $1::text", nil).Scan(&dest)
- if err != nil {
- t.Errorf("Unexpected failure scanning: %v", err)
- }
- if dest != nil {
- t.Errorf("Expected dest to be nil, got %#v", dest)
- }
-}
-
-func TestEncodeTypeRename(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- type _int int
- inInt := _int(3)
- var outInt _int
-
- type _int8 int8
- inInt8 := _int8(3)
- var outInt8 _int8
-
- type _int16 int16
- inInt16 := _int16(3)
- var outInt16 _int16
-
- type _int32 int32
- inInt32 := _int32(4)
- var outInt32 _int32
-
- type _int64 int64
- inInt64 := _int64(5)
- var outInt64 _int64
-
- type _uint uint
- inUint := _uint(6)
- var outUint _uint
-
- type _uint8 uint8
- inUint8 := _uint8(7)
- var outUint8 _uint8
-
- type _uint16 uint16
- inUint16 := _uint16(8)
- var outUint16 _uint16
-
- type _uint32 uint32
- inUint32 := _uint32(9)
- var outUint32 _uint32
-
- type _uint64 uint64
- inUint64 := _uint64(10)
- var outUint64 _uint64
-
- type _string string
- inString := _string("foo")
- var outString _string
-
- err := conn.QueryRow("select $1::int, $2::int, $3::int2, $4::int4, $5::int8, $6::int, $7::int, $8::int, $9::int, $10::int, $11::text",
- inInt, inInt8, inInt16, inInt32, inInt64, inUint, inUint8, inUint16, inUint32, inUint64, inString,
- ).Scan(&outInt, &outInt8, &outInt16, &outInt32, &outInt64, &outUint, &outUint8, &outUint16, &outUint32, &outUint64, &outString)
- if err != nil {
- t.Fatalf("Failed with type rename: %v", err)
- }
-
- if inInt != outInt {
- t.Errorf("int rename: expected %v, got %v", inInt, outInt)
- }
-
- if inInt8 != outInt8 {
- t.Errorf("int8 rename: expected %v, got %v", inInt8, outInt8)
- }
-
- if inInt16 != outInt16 {
- t.Errorf("int16 rename: expected %v, got %v", inInt16, outInt16)
- }
-
- if inInt32 != outInt32 {
- t.Errorf("int32 rename: expected %v, got %v", inInt32, outInt32)
- }
-
- if inInt64 != outInt64 {
- t.Errorf("int64 rename: expected %v, got %v", inInt64, outInt64)
- }
-
- if inUint != outUint {
- t.Errorf("uint rename: expected %v, got %v", inUint, outUint)
- }
-
- if inUint8 != outUint8 {
- t.Errorf("uint8 rename: expected %v, got %v", inUint8, outUint8)
- }
-
- if inUint16 != outUint16 {
- t.Errorf("uint16 rename: expected %v, got %v", inUint16, outUint16)
- }
-
- if inUint32 != outUint32 {
- t.Errorf("uint32 rename: expected %v, got %v", inUint32, outUint32)
- }
-
- if inUint64 != outUint64 {
- t.Errorf("uint64 rename: expected %v, got %v", inUint64, outUint64)
- }
-
- if inString != outString {
- t.Errorf("string rename: expected %v, got %v", inString, outString)
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestRowDecode(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- tests := []struct {
- sql string
- expected []interface{}
- }{
- {
- "select row(1, 'cat', '2015-01-01 08:12:42-00'::timestamptz)",
- []interface{}{
- int32(1),
- "cat",
- time.Date(2015, 1, 1, 8, 12, 42, 0, time.UTC).Local(),
- },
- },
- }
-
- for i, tt := range tests {
- var actual []interface{}
-
- err := conn.QueryRow(tt.sql).Scan(&actual)
- if err != nil {
- t.Errorf("%d. Unexpected failure: %v (sql -> %v)", i, err, tt.sql)
- continue
- }
-
- if !reflect.DeepEqual(actual, tt.expected) {
- t.Errorf("%d. Expected %v, got %v (sql -> %v)", i, tt.expected, actual, tt.sql)
- }
-
- ensureConnValid(t, conn)
- }
-}
diff --git a/vendor/github.com/jmoiron/sqlx/README.md b/vendor/github.com/jmoiron/sqlx/README.md
index 5c1bb3c..d2d1258 100644
--- a/vendor/github.com/jmoiron/sqlx/README.md
+++ b/vendor/github.com/jmoiron/sqlx/README.md
@@ -66,10 +66,12 @@ usage.
package main
import (
- _ "github.com/lib/pq"
"database/sql"
- "github.com/jmoiron/sqlx"
+ "fmt"
"log"
+
+ _ "github.com/lib/pq"
+ "github.com/jmoiron/sqlx"
)
var schema = `
diff --git a/vendor/github.com/jmoiron/sqlx/bind.go b/vendor/github.com/jmoiron/sqlx/bind.go
index 10f7bdf..b81e6fc 100644
--- a/vendor/github.com/jmoiron/sqlx/bind.go
+++ b/vendor/github.com/jmoiron/sqlx/bind.go
@@ -21,7 +21,7 @@ const (
// BindType returns the bindtype for a given database given a drivername.
func BindType(driverName string) int {
switch driverName {
- case "postgres", "pgx":
+ case "postgres", "pgx", "pq-timeouts":
return DOLLAR
case "mysql":
return QUESTION
diff --git a/vendor/github.com/jmoiron/sqlx/named.go b/vendor/github.com/jmoiron/sqlx/named.go
index dd899d3..69eb954 100644
--- a/vendor/github.com/jmoiron/sqlx/named.go
+++ b/vendor/github.com/jmoiron/sqlx/named.go
@@ -163,16 +163,18 @@ func bindArgs(names []string, arg interface{}, m *reflectx.Mapper) ([]interface{
v = v.Elem()
}
- fields := m.TraversalsByName(v.Type(), names)
- for i, t := range fields {
+ err := m.TraversalsByNameFunc(v.Type(), names, func(i int, t []int) error {
if len(t) == 0 {
- return arglist, fmt.Errorf("could not find name %s in %#v", names[i], arg)
+ return fmt.Errorf("could not find name %s in %#v", names[i], arg)
}
+
val := reflectx.FieldByIndexesReadOnly(v, t)
arglist = append(arglist, val.Interface())
- }
- return arglist, nil
+ return nil
+ })
+
+ return arglist, err
}
// like bindArgs, but for maps.
diff --git a/vendor/github.com/jmoiron/sqlx/named_context_test.go b/vendor/github.com/jmoiron/sqlx/named_context_test.go
deleted file mode 100644
index 87e94ac..0000000
--- a/vendor/github.com/jmoiron/sqlx/named_context_test.go
+++ /dev/null
@@ -1,136 +0,0 @@
-// +build go1.8
-
-package sqlx
-
-import (
- "context"
- "database/sql"
- "testing"
-)
-
-func TestNamedContextQueries(t *testing.T) {
- RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) {
- loadDefaultFixture(db, t)
- test := Test{t}
- var ns *NamedStmt
- var err error
-
- ctx := context.Background()
-
- // Check that invalid preparations fail
- ns, err = db.PrepareNamedContext(ctx, "SELECT * FROM person WHERE first_name=:first:name")
- if err == nil {
- t.Error("Expected an error with invalid prepared statement.")
- }
-
- ns, err = db.PrepareNamedContext(ctx, "invalid sql")
- if err == nil {
- t.Error("Expected an error with invalid prepared statement.")
- }
-
- // Check closing works as anticipated
- ns, err = db.PrepareNamedContext(ctx, "SELECT * FROM person WHERE first_name=:first_name")
- test.Error(err)
- err = ns.Close()
- test.Error(err)
-
- ns, err = db.PrepareNamedContext(ctx, `
- SELECT first_name, last_name, email
- FROM person WHERE first_name=:first_name AND email=:email`)
- test.Error(err)
-
- // test Queryx w/ uses Query
- p := Person{FirstName: "Jason", LastName: "Moiron", Email: "jmoiron@jmoiron.net"}
-
- rows, err := ns.QueryxContext(ctx, p)
- test.Error(err)
- for rows.Next() {
- var p2 Person
- rows.StructScan(&p2)
- if p.FirstName != p2.FirstName {
- t.Errorf("got %s, expected %s", p.FirstName, p2.FirstName)
- }
- if p.LastName != p2.LastName {
- t.Errorf("got %s, expected %s", p.LastName, p2.LastName)
- }
- if p.Email != p2.Email {
- t.Errorf("got %s, expected %s", p.Email, p2.Email)
- }
- }
-
- // test Select
- people := make([]Person, 0, 5)
- err = ns.SelectContext(ctx, &people, p)
- test.Error(err)
-
- if len(people) != 1 {
- t.Errorf("got %d results, expected %d", len(people), 1)
- }
- if p.FirstName != people[0].FirstName {
- t.Errorf("got %s, expected %s", p.FirstName, people[0].FirstName)
- }
- if p.LastName != people[0].LastName {
- t.Errorf("got %s, expected %s", p.LastName, people[0].LastName)
- }
- if p.Email != people[0].Email {
- t.Errorf("got %s, expected %s", p.Email, people[0].Email)
- }
-
- // test Exec
- ns, err = db.PrepareNamedContext(ctx, `
- INSERT INTO person (first_name, last_name, email)
- VALUES (:first_name, :last_name, :email)`)
- test.Error(err)
-
- js := Person{
- FirstName: "Julien",
- LastName: "Savea",
- Email: "jsavea@ab.co.nz",
- }
- _, err = ns.ExecContext(ctx, js)
- test.Error(err)
-
- // Make sure we can pull him out again
- p2 := Person{}
- db.GetContext(ctx, &p2, db.Rebind("SELECT * FROM person WHERE email=?"), js.Email)
- if p2.Email != js.Email {
- t.Errorf("expected %s, got %s", js.Email, p2.Email)
- }
-
- // test Txn NamedStmts
- tx := db.MustBeginTx(ctx, nil)
- txns := tx.NamedStmtContext(ctx, ns)
-
- // We're going to add Steven in this txn
- sl := Person{
- FirstName: "Steven",
- LastName: "Luatua",
- Email: "sluatua@ab.co.nz",
- }
-
- _, err = txns.ExecContext(ctx, sl)
- test.Error(err)
- // then rollback...
- tx.Rollback()
- // looking for Steven after a rollback should fail
- err = db.GetContext(ctx, &p2, db.Rebind("SELECT * FROM person WHERE email=?"), sl.Email)
- if err != sql.ErrNoRows {
- t.Errorf("expected no rows error, got %v", err)
- }
-
- // now do the same, but commit
- tx = db.MustBeginTx(ctx, nil)
- txns = tx.NamedStmtContext(ctx, ns)
- _, err = txns.ExecContext(ctx, sl)
- test.Error(err)
- tx.Commit()
-
- // looking for Steven after a Commit should succeed
- err = db.GetContext(ctx, &p2, db.Rebind("SELECT * FROM person WHERE email=?"), sl.Email)
- test.Error(err)
- if p2.Email != sl.Email {
- t.Errorf("expected %s, got %s", sl.Email, p2.Email)
- }
-
- })
-}
diff --git a/vendor/github.com/jmoiron/sqlx/named_test.go b/vendor/github.com/jmoiron/sqlx/named_test.go
deleted file mode 100644
index d3459a8..0000000
--- a/vendor/github.com/jmoiron/sqlx/named_test.go
+++ /dev/null
@@ -1,227 +0,0 @@
-package sqlx
-
-import (
- "database/sql"
- "testing"
-)
-
-func TestCompileQuery(t *testing.T) {
- table := []struct {
- Q, R, D, N string
- V []string
- }{
- // basic test for named parameters, invalid char ',' terminating
- {
- Q: `INSERT INTO foo (a,b,c,d) VALUES (:name, :age, :first, :last)`,
- R: `INSERT INTO foo (a,b,c,d) VALUES (?, ?, ?, ?)`,
- D: `INSERT INTO foo (a,b,c,d) VALUES ($1, $2, $3, $4)`,
- N: `INSERT INTO foo (a,b,c,d) VALUES (:name, :age, :first, :last)`,
- V: []string{"name", "age", "first", "last"},
- },
- // This query tests a named parameter ending the string as well as numbers
- {
- Q: `SELECT * FROM a WHERE first_name=:name1 AND last_name=:name2`,
- R: `SELECT * FROM a WHERE first_name=? AND last_name=?`,
- D: `SELECT * FROM a WHERE first_name=$1 AND last_name=$2`,
- N: `SELECT * FROM a WHERE first_name=:name1 AND last_name=:name2`,
- V: []string{"name1", "name2"},
- },
- {
- Q: `SELECT "::foo" FROM a WHERE first_name=:name1 AND last_name=:name2`,
- R: `SELECT ":foo" FROM a WHERE first_name=? AND last_name=?`,
- D: `SELECT ":foo" FROM a WHERE first_name=$1 AND last_name=$2`,
- N: `SELECT ":foo" FROM a WHERE first_name=:name1 AND last_name=:name2`,
- V: []string{"name1", "name2"},
- },
- {
- Q: `SELECT 'a::b::c' || first_name, '::::ABC::_::' FROM person WHERE first_name=:first_name AND last_name=:last_name`,
- R: `SELECT 'a:b:c' || first_name, '::ABC:_:' FROM person WHERE first_name=? AND last_name=?`,
- D: `SELECT 'a:b:c' || first_name, '::ABC:_:' FROM person WHERE first_name=$1 AND last_name=$2`,
- N: `SELECT 'a:b:c' || first_name, '::ABC:_:' FROM person WHERE first_name=:first_name AND last_name=:last_name`,
- V: []string{"first_name", "last_name"},
- },
- /* This unicode awareness test sadly fails, because of our byte-wise worldview.
- * We could certainly iterate by Rune instead, though it's a great deal slower,
- * it's probably the RightWay(tm)
- {
- Q: `INSERT INTO foo (a,b,c,d) VALUES (:あ, :b, :キコ, :名前)`,
- R: `INSERT INTO foo (a,b,c,d) VALUES (?, ?, ?, ?)`,
- D: `INSERT INTO foo (a,b,c,d) VALUES ($1, $2, $3, $4)`,
- N: []string{"name", "age", "first", "last"},
- },
- */
- }
-
- for _, test := range table {
- qr, names, err := compileNamedQuery([]byte(test.Q), QUESTION)
- if err != nil {
- t.Error(err)
- }
- if qr != test.R {
- t.Errorf("expected %s, got %s", test.R, qr)
- }
- if len(names) != len(test.V) {
- t.Errorf("expected %#v, got %#v", test.V, names)
- } else {
- for i, name := range names {
- if name != test.V[i] {
- t.Errorf("expected %dth name to be %s, got %s", i+1, test.V[i], name)
- }
- }
- }
- qd, _, _ := compileNamedQuery([]byte(test.Q), DOLLAR)
- if qd != test.D {
- t.Errorf("\nexpected: `%s`\ngot: `%s`", test.D, qd)
- }
-
- qq, _, _ := compileNamedQuery([]byte(test.Q), NAMED)
- if qq != test.N {
- t.Errorf("\nexpected: `%s`\ngot: `%s`\n(len: %d vs %d)", test.N, qq, len(test.N), len(qq))
- }
- }
-}
-
-type Test struct {
- t *testing.T
-}
-
-func (t Test) Error(err error, msg ...interface{}) {
- if err != nil {
- if len(msg) == 0 {
- t.t.Error(err)
- } else {
- t.t.Error(msg...)
- }
- }
-}
-
-func (t Test) Errorf(err error, format string, args ...interface{}) {
- if err != nil {
- t.t.Errorf(format, args...)
- }
-}
-
-func TestNamedQueries(t *testing.T) {
- RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) {
- loadDefaultFixture(db, t)
- test := Test{t}
- var ns *NamedStmt
- var err error
-
- // Check that invalid preparations fail
- ns, err = db.PrepareNamed("SELECT * FROM person WHERE first_name=:first:name")
- if err == nil {
- t.Error("Expected an error with invalid prepared statement.")
- }
-
- ns, err = db.PrepareNamed("invalid sql")
- if err == nil {
- t.Error("Expected an error with invalid prepared statement.")
- }
-
- // Check closing works as anticipated
- ns, err = db.PrepareNamed("SELECT * FROM person WHERE first_name=:first_name")
- test.Error(err)
- err = ns.Close()
- test.Error(err)
-
- ns, err = db.PrepareNamed(`
- SELECT first_name, last_name, email
- FROM person WHERE first_name=:first_name AND email=:email`)
- test.Error(err)
-
- // test Queryx w/ uses Query
- p := Person{FirstName: "Jason", LastName: "Moiron", Email: "jmoiron@jmoiron.net"}
-
- rows, err := ns.Queryx(p)
- test.Error(err)
- for rows.Next() {
- var p2 Person
- rows.StructScan(&p2)
- if p.FirstName != p2.FirstName {
- t.Errorf("got %s, expected %s", p.FirstName, p2.FirstName)
- }
- if p.LastName != p2.LastName {
- t.Errorf("got %s, expected %s", p.LastName, p2.LastName)
- }
- if p.Email != p2.Email {
- t.Errorf("got %s, expected %s", p.Email, p2.Email)
- }
- }
-
- // test Select
- people := make([]Person, 0, 5)
- err = ns.Select(&people, p)
- test.Error(err)
-
- if len(people) != 1 {
- t.Errorf("got %d results, expected %d", len(people), 1)
- }
- if p.FirstName != people[0].FirstName {
- t.Errorf("got %s, expected %s", p.FirstName, people[0].FirstName)
- }
- if p.LastName != people[0].LastName {
- t.Errorf("got %s, expected %s", p.LastName, people[0].LastName)
- }
- if p.Email != people[0].Email {
- t.Errorf("got %s, expected %s", p.Email, people[0].Email)
- }
-
- // test Exec
- ns, err = db.PrepareNamed(`
- INSERT INTO person (first_name, last_name, email)
- VALUES (:first_name, :last_name, :email)`)
- test.Error(err)
-
- js := Person{
- FirstName: "Julien",
- LastName: "Savea",
- Email: "jsavea@ab.co.nz",
- }
- _, err = ns.Exec(js)
- test.Error(err)
-
- // Make sure we can pull him out again
- p2 := Person{}
- db.Get(&p2, db.Rebind("SELECT * FROM person WHERE email=?"), js.Email)
- if p2.Email != js.Email {
- t.Errorf("expected %s, got %s", js.Email, p2.Email)
- }
-
- // test Txn NamedStmts
- tx := db.MustBegin()
- txns := tx.NamedStmt(ns)
-
- // We're going to add Steven in this txn
- sl := Person{
- FirstName: "Steven",
- LastName: "Luatua",
- Email: "sluatua@ab.co.nz",
- }
-
- _, err = txns.Exec(sl)
- test.Error(err)
- // then rollback...
- tx.Rollback()
- // looking for Steven after a rollback should fail
- err = db.Get(&p2, db.Rebind("SELECT * FROM person WHERE email=?"), sl.Email)
- if err != sql.ErrNoRows {
- t.Errorf("expected no rows error, got %v", err)
- }
-
- // now do the same, but commit
- tx = db.MustBegin()
- txns = tx.NamedStmt(ns)
- _, err = txns.Exec(sl)
- test.Error(err)
- tx.Commit()
-
- // looking for Steven after a Commit should succeed
- err = db.Get(&p2, db.Rebind("SELECT * FROM person WHERE email=?"), sl.Email)
- test.Error(err)
- if p2.Email != sl.Email {
- t.Errorf("expected %s, got %s", sl.Email, p2.Email)
- }
-
- })
-}
diff --git a/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go b/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go
index f2802b8..73c21eb 100644
--- a/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go
+++ b/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go
@@ -166,20 +166,39 @@ func (m *Mapper) FieldsByName(v reflect.Value, names []string) []reflect.Value {
// traversals for each mapped name. Panics if t is not a struct or Indirectable
// to a struct. Returns empty int slice for each name not found.
func (m *Mapper) TraversalsByName(t reflect.Type, names []string) [][]int {
+ r := make([][]int, 0, len(names))
+ m.TraversalsByNameFunc(t, names, func(_ int, i []int) error {
+ if i == nil {
+ r = append(r, []int{})
+ } else {
+ r = append(r, i)
+ }
+
+ return nil
+ })
+ return r
+}
+
+// TraversalsByNameFunc traverses the mapped names and calls fn with the index of
+// each name and the struct traversal represented by that name. Panics if t is not
+// a struct or Indirectable to a struct. Returns the first error returned by fn or nil.
+func (m *Mapper) TraversalsByNameFunc(t reflect.Type, names []string, fn func(int, []int) error) error {
t = Deref(t)
mustBe(t, reflect.Struct)
tm := m.TypeMap(t)
-
- r := make([][]int, 0, len(names))
- for _, name := range names {
+ for i, name := range names {
fi, ok := tm.Names[name]
if !ok {
- r = append(r, []int{})
+ if err := fn(i, nil); err != nil {
+ return err
+ }
} else {
- r = append(r, fi.Index)
+ if err := fn(i, fi.Index); err != nil {
+ return err
+ }
}
}
- return r
+ return nil
}
// FieldByIndexes returns a value for the field given by the struct traversal
diff --git a/vendor/github.com/jmoiron/sqlx/reflectx/reflect_test.go b/vendor/github.com/jmoiron/sqlx/reflectx/reflect_test.go
deleted file mode 100644
index b702f9c..0000000
--- a/vendor/github.com/jmoiron/sqlx/reflectx/reflect_test.go
+++ /dev/null
@@ -1,905 +0,0 @@
-package reflectx
-
-import (
- "reflect"
- "strings"
- "testing"
-)
-
-func ival(v reflect.Value) int {
- return v.Interface().(int)
-}
-
-func TestBasic(t *testing.T) {
- type Foo struct {
- A int
- B int
- C int
- }
-
- f := Foo{1, 2, 3}
- fv := reflect.ValueOf(f)
- m := NewMapperFunc("", func(s string) string { return s })
-
- v := m.FieldByName(fv, "A")
- if ival(v) != f.A {
- t.Errorf("Expecting %d, got %d", ival(v), f.A)
- }
- v = m.FieldByName(fv, "B")
- if ival(v) != f.B {
- t.Errorf("Expecting %d, got %d", f.B, ival(v))
- }
- v = m.FieldByName(fv, "C")
- if ival(v) != f.C {
- t.Errorf("Expecting %d, got %d", f.C, ival(v))
- }
-}
-
-func TestBasicEmbedded(t *testing.T) {
- type Foo struct {
- A int
- }
-
- type Bar struct {
- Foo // `db:""` is implied for an embedded struct
- B int
- C int `db:"-"`
- }
-
- type Baz struct {
- A int
- Bar `db:"Bar"`
- }
-
- m := NewMapperFunc("db", func(s string) string { return s })
-
- z := Baz{}
- z.A = 1
- z.B = 2
- z.C = 4
- z.Bar.Foo.A = 3
-
- zv := reflect.ValueOf(z)
- fields := m.TypeMap(reflect.TypeOf(z))
-
- if len(fields.Index) != 5 {
- t.Errorf("Expecting 5 fields")
- }
-
- // for _, fi := range fields.Index {
- // log.Println(fi)
- // }
-
- v := m.FieldByName(zv, "A")
- if ival(v) != z.A {
- t.Errorf("Expecting %d, got %d", z.A, ival(v))
- }
- v = m.FieldByName(zv, "Bar.B")
- if ival(v) != z.Bar.B {
- t.Errorf("Expecting %d, got %d", z.Bar.B, ival(v))
- }
- v = m.FieldByName(zv, "Bar.A")
- if ival(v) != z.Bar.Foo.A {
- t.Errorf("Expecting %d, got %d", z.Bar.Foo.A, ival(v))
- }
- v = m.FieldByName(zv, "Bar.C")
- if _, ok := v.Interface().(int); ok {
- t.Errorf("Expecting Bar.C to not exist")
- }
-
- fi := fields.GetByPath("Bar.C")
- if fi != nil {
- t.Errorf("Bar.C should not exist")
- }
-}
-
-func TestEmbeddedSimple(t *testing.T) {
- type UUID [16]byte
- type MyID struct {
- UUID
- }
- type Item struct {
- ID MyID
- }
- z := Item{}
-
- m := NewMapper("db")
- m.TypeMap(reflect.TypeOf(z))
-}
-
-func TestBasicEmbeddedWithTags(t *testing.T) {
- type Foo struct {
- A int `db:"a"`
- }
-
- type Bar struct {
- Foo // `db:""` is implied for an embedded struct
- B int `db:"b"`
- }
-
- type Baz struct {
- A int `db:"a"`
- Bar // `db:""` is implied for an embedded struct
- }
-
- m := NewMapper("db")
-
- z := Baz{}
- z.A = 1
- z.B = 2
- z.Bar.Foo.A = 3
-
- zv := reflect.ValueOf(z)
- fields := m.TypeMap(reflect.TypeOf(z))
-
- if len(fields.Index) != 5 {
- t.Errorf("Expecting 5 fields")
- }
-
- // for _, fi := range fields.index {
- // log.Println(fi)
- // }
-
- v := m.FieldByName(zv, "a")
- if ival(v) != z.Bar.Foo.A { // the dominant field
- t.Errorf("Expecting %d, got %d", z.Bar.Foo.A, ival(v))
- }
- v = m.FieldByName(zv, "b")
- if ival(v) != z.B {
- t.Errorf("Expecting %d, got %d", z.B, ival(v))
- }
-}
-
-func TestFlatTags(t *testing.T) {
- m := NewMapper("db")
-
- type Asset struct {
- Title string `db:"title"`
- }
- type Post struct {
- Author string `db:"author,required"`
- Asset Asset `db:""`
- }
- // Post columns: (author title)
-
- post := Post{Author: "Joe", Asset: Asset{Title: "Hello"}}
- pv := reflect.ValueOf(post)
-
- v := m.FieldByName(pv, "author")
- if v.Interface().(string) != post.Author {
- t.Errorf("Expecting %s, got %s", post.Author, v.Interface().(string))
- }
- v = m.FieldByName(pv, "title")
- if v.Interface().(string) != post.Asset.Title {
- t.Errorf("Expecting %s, got %s", post.Asset.Title, v.Interface().(string))
- }
-}
-
-func TestNestedStruct(t *testing.T) {
- m := NewMapper("db")
-
- type Details struct {
- Active bool `db:"active"`
- }
- type Asset struct {
- Title string `db:"title"`
- Details Details `db:"details"`
- }
- type Post struct {
- Author string `db:"author,required"`
- Asset `db:"asset"`
- }
- // Post columns: (author asset.title asset.details.active)
-
- post := Post{
- Author: "Joe",
- Asset: Asset{Title: "Hello", Details: Details{Active: true}},
- }
- pv := reflect.ValueOf(post)
-
- v := m.FieldByName(pv, "author")
- if v.Interface().(string) != post.Author {
- t.Errorf("Expecting %s, got %s", post.Author, v.Interface().(string))
- }
- v = m.FieldByName(pv, "title")
- if _, ok := v.Interface().(string); ok {
- t.Errorf("Expecting field to not exist")
- }
- v = m.FieldByName(pv, "asset.title")
- if v.Interface().(string) != post.Asset.Title {
- t.Errorf("Expecting %s, got %s", post.Asset.Title, v.Interface().(string))
- }
- v = m.FieldByName(pv, "asset.details.active")
- if v.Interface().(bool) != post.Asset.Details.Active {
- t.Errorf("Expecting %v, got %v", post.Asset.Details.Active, v.Interface().(bool))
- }
-}
-
-func TestInlineStruct(t *testing.T) {
- m := NewMapperTagFunc("db", strings.ToLower, nil)
-
- type Employee struct {
- Name string
- ID int
- }
- type Boss Employee
- type person struct {
- Employee `db:"employee"`
- Boss `db:"boss"`
- }
- // employees columns: (employee.name employee.id boss.name boss.id)
-
- em := person{Employee: Employee{Name: "Joe", ID: 2}, Boss: Boss{Name: "Dick", ID: 1}}
- ev := reflect.ValueOf(em)
-
- fields := m.TypeMap(reflect.TypeOf(em))
- if len(fields.Index) != 6 {
- t.Errorf("Expecting 6 fields")
- }
-
- v := m.FieldByName(ev, "employee.name")
- if v.Interface().(string) != em.Employee.Name {
- t.Errorf("Expecting %s, got %s", em.Employee.Name, v.Interface().(string))
- }
- v = m.FieldByName(ev, "boss.id")
- if ival(v) != em.Boss.ID {
- t.Errorf("Expecting %v, got %v", em.Boss.ID, ival(v))
- }
-}
-
-func TestRecursiveStruct(t *testing.T) {
- type Person struct {
- Parent *Person
- }
- m := NewMapperFunc("db", strings.ToLower)
- var p *Person
- m.TypeMap(reflect.TypeOf(p))
-}
-
-func TestFieldsEmbedded(t *testing.T) {
- m := NewMapper("db")
-
- type Person struct {
- Name string `db:"name,size=64"`
- }
- type Place struct {
- Name string `db:"name"`
- }
- type Article struct {
- Title string `db:"title"`
- }
- type PP struct {
- Person `db:"person,required"`
- Place `db:",someflag"`
- Article `db:",required"`
- }
- // PP columns: (person.name name title)
-
- pp := PP{}
- pp.Person.Name = "Peter"
- pp.Place.Name = "Toronto"
- pp.Article.Title = "Best city ever"
-
- fields := m.TypeMap(reflect.TypeOf(pp))
- // for i, f := range fields {
- // log.Println(i, f)
- // }
-
- ppv := reflect.ValueOf(pp)
-
- v := m.FieldByName(ppv, "person.name")
- if v.Interface().(string) != pp.Person.Name {
- t.Errorf("Expecting %s, got %s", pp.Person.Name, v.Interface().(string))
- }
-
- v = m.FieldByName(ppv, "name")
- if v.Interface().(string) != pp.Place.Name {
- t.Errorf("Expecting %s, got %s", pp.Place.Name, v.Interface().(string))
- }
-
- v = m.FieldByName(ppv, "title")
- if v.Interface().(string) != pp.Article.Title {
- t.Errorf("Expecting %s, got %s", pp.Article.Title, v.Interface().(string))
- }
-
- fi := fields.GetByPath("person")
- if _, ok := fi.Options["required"]; !ok {
- t.Errorf("Expecting required option to be set")
- }
- if !fi.Embedded {
- t.Errorf("Expecting field to be embedded")
- }
- if len(fi.Index) != 1 || fi.Index[0] != 0 {
- t.Errorf("Expecting index to be [0]")
- }
-
- fi = fields.GetByPath("person.name")
- if fi == nil {
- t.Errorf("Expecting person.name to exist")
- }
- if fi.Path != "person.name" {
- t.Errorf("Expecting %s, got %s", "person.name", fi.Path)
- }
- if fi.Options["size"] != "64" {
- t.Errorf("Expecting %s, got %s", "64", fi.Options["size"])
- }
-
- fi = fields.GetByTraversal([]int{1, 0})
- if fi == nil {
- t.Errorf("Expecting traveral to exist")
- }
- if fi.Path != "name" {
- t.Errorf("Expecting %s, got %s", "name", fi.Path)
- }
-
- fi = fields.GetByTraversal([]int{2})
- if fi == nil {
- t.Errorf("Expecting traversal to exist")
- }
- if _, ok := fi.Options["required"]; !ok {
- t.Errorf("Expecting required option to be set")
- }
-
- trs := m.TraversalsByName(reflect.TypeOf(pp), []string{"person.name", "name", "title"})
- if !reflect.DeepEqual(trs, [][]int{{0, 0}, {1, 0}, {2, 0}}) {
- t.Errorf("Expecting traversal: %v", trs)
- }
-}
-
-func TestPtrFields(t *testing.T) {
- m := NewMapperTagFunc("db", strings.ToLower, nil)
- type Asset struct {
- Title string
- }
- type Post struct {
- *Asset `db:"asset"`
- Author string
- }
-
- post := &Post{Author: "Joe", Asset: &Asset{Title: "Hiyo"}}
- pv := reflect.ValueOf(post)
-
- fields := m.TypeMap(reflect.TypeOf(post))
- if len(fields.Index) != 3 {
- t.Errorf("Expecting 3 fields")
- }
-
- v := m.FieldByName(pv, "asset.title")
- if v.Interface().(string) != post.Asset.Title {
- t.Errorf("Expecting %s, got %s", post.Asset.Title, v.Interface().(string))
- }
- v = m.FieldByName(pv, "author")
- if v.Interface().(string) != post.Author {
- t.Errorf("Expecting %s, got %s", post.Author, v.Interface().(string))
- }
-}
-
-func TestNamedPtrFields(t *testing.T) {
- m := NewMapperTagFunc("db", strings.ToLower, nil)
-
- type User struct {
- Name string
- }
-
- type Asset struct {
- Title string
-
- Owner *User `db:"owner"`
- }
- type Post struct {
- Author string
-
- Asset1 *Asset `db:"asset1"`
- Asset2 *Asset `db:"asset2"`
- }
-
- post := &Post{Author: "Joe", Asset1: &Asset{Title: "Hiyo", Owner: &User{"Username"}}} // Let Asset2 be nil
- pv := reflect.ValueOf(post)
-
- fields := m.TypeMap(reflect.TypeOf(post))
- if len(fields.Index) != 9 {
- t.Errorf("Expecting 9 fields")
- }
-
- v := m.FieldByName(pv, "asset1.title")
- if v.Interface().(string) != post.Asset1.Title {
- t.Errorf("Expecting %s, got %s", post.Asset1.Title, v.Interface().(string))
- }
- v = m.FieldByName(pv, "asset1.owner.name")
- if v.Interface().(string) != post.Asset1.Owner.Name {
- t.Errorf("Expecting %s, got %s", post.Asset1.Owner.Name, v.Interface().(string))
- }
- v = m.FieldByName(pv, "asset2.title")
- if v.Interface().(string) != post.Asset2.Title {
- t.Errorf("Expecting %s, got %s", post.Asset2.Title, v.Interface().(string))
- }
- v = m.FieldByName(pv, "asset2.owner.name")
- if v.Interface().(string) != post.Asset2.Owner.Name {
- t.Errorf("Expecting %s, got %s", post.Asset2.Owner.Name, v.Interface().(string))
- }
- v = m.FieldByName(pv, "author")
- if v.Interface().(string) != post.Author {
- t.Errorf("Expecting %s, got %s", post.Author, v.Interface().(string))
- }
-}
-
-func TestFieldMap(t *testing.T) {
- type Foo struct {
- A int
- B int
- C int
- }
-
- f := Foo{1, 2, 3}
- m := NewMapperFunc("db", strings.ToLower)
-
- fm := m.FieldMap(reflect.ValueOf(f))
-
- if len(fm) != 3 {
- t.Errorf("Expecting %d keys, got %d", 3, len(fm))
- }
- if fm["a"].Interface().(int) != 1 {
- t.Errorf("Expecting %d, got %d", 1, ival(fm["a"]))
- }
- if fm["b"].Interface().(int) != 2 {
- t.Errorf("Expecting %d, got %d", 2, ival(fm["b"]))
- }
- if fm["c"].Interface().(int) != 3 {
- t.Errorf("Expecting %d, got %d", 3, ival(fm["c"]))
- }
-}
-
-func TestTagNameMapping(t *testing.T) {
- type Strategy struct {
- StrategyID string `protobuf:"bytes,1,opt,name=strategy_id" json:"strategy_id,omitempty"`
- StrategyName string
- }
-
- m := NewMapperTagFunc("json", strings.ToUpper, func(value string) string {
- if strings.Contains(value, ",") {
- return strings.Split(value, ",")[0]
- }
- return value
- })
- strategy := Strategy{"1", "Alpah"}
- mapping := m.TypeMap(reflect.TypeOf(strategy))
-
- for _, key := range []string{"strategy_id", "STRATEGYNAME"} {
- if fi := mapping.GetByPath(key); fi == nil {
- t.Errorf("Expecting to find key %s in mapping but did not.", key)
- }
- }
-}
-
-func TestMapping(t *testing.T) {
- type Person struct {
- ID int
- Name string
- WearsGlasses bool `db:"wears_glasses"`
- }
-
- m := NewMapperFunc("db", strings.ToLower)
- p := Person{1, "Jason", true}
- mapping := m.TypeMap(reflect.TypeOf(p))
-
- for _, key := range []string{"id", "name", "wears_glasses"} {
- if fi := mapping.GetByPath(key); fi == nil {
- t.Errorf("Expecting to find key %s in mapping but did not.", key)
- }
- }
-
- type SportsPerson struct {
- Weight int
- Age int
- Person
- }
- s := SportsPerson{Weight: 100, Age: 30, Person: p}
- mapping = m.TypeMap(reflect.TypeOf(s))
- for _, key := range []string{"id", "name", "wears_glasses", "weight", "age"} {
- if fi := mapping.GetByPath(key); fi == nil {
- t.Errorf("Expecting to find key %s in mapping but did not.", key)
- }
- }
-
- type RugbyPlayer struct {
- Position int
- IsIntense bool `db:"is_intense"`
- IsAllBlack bool `db:"-"`
- SportsPerson
- }
- r := RugbyPlayer{12, true, false, s}
- mapping = m.TypeMap(reflect.TypeOf(r))
- for _, key := range []string{"id", "name", "wears_glasses", "weight", "age", "position", "is_intense"} {
- if fi := mapping.GetByPath(key); fi == nil {
- t.Errorf("Expecting to find key %s in mapping but did not.", key)
- }
- }
-
- if fi := mapping.GetByPath("isallblack"); fi != nil {
- t.Errorf("Expecting to ignore `IsAllBlack` field")
- }
-}
-
-func TestGetByTraversal(t *testing.T) {
- type C struct {
- C0 int
- C1 int
- }
- type B struct {
- B0 string
- B1 *C
- }
- type A struct {
- A0 int
- A1 B
- }
-
- testCases := []struct {
- Index []int
- ExpectedName string
- ExpectNil bool
- }{
- {
- Index: []int{0},
- ExpectedName: "A0",
- },
- {
- Index: []int{1, 0},
- ExpectedName: "B0",
- },
- {
- Index: []int{1, 1, 1},
- ExpectedName: "C1",
- },
- {
- Index: []int{3, 4, 5},
- ExpectNil: true,
- },
- {
- Index: []int{},
- ExpectNil: true,
- },
- {
- Index: nil,
- ExpectNil: true,
- },
- }
-
- m := NewMapperFunc("db", func(n string) string { return n })
- tm := m.TypeMap(reflect.TypeOf(A{}))
-
- for i, tc := range testCases {
- fi := tm.GetByTraversal(tc.Index)
- if tc.ExpectNil {
- if fi != nil {
- t.Errorf("%d: expected nil, got %v", i, fi)
- }
- continue
- }
-
- if fi == nil {
- t.Errorf("%d: expected %s, got nil", i, tc.ExpectedName)
- continue
- }
-
- if fi.Name != tc.ExpectedName {
- t.Errorf("%d: expected %s, got %s", i, tc.ExpectedName, fi.Name)
- }
- }
-}
-
-// TestMapperMethodsByName tests Mapper methods FieldByName and TraversalsByName
-func TestMapperMethodsByName(t *testing.T) {
- type C struct {
- C0 string
- C1 int
- }
- type B struct {
- B0 *C `db:"B0"`
- B1 C `db:"B1"`
- B2 string `db:"B2"`
- }
- type A struct {
- A0 *B `db:"A0"`
- B `db:"A1"`
- A2 int
- a3 int
- }
-
- val := &A{
- A0: &B{
- B0: &C{C0: "0", C1: 1},
- B1: C{C0: "2", C1: 3},
- B2: "4",
- },
- B: B{
- B0: nil,
- B1: C{C0: "5", C1: 6},
- B2: "7",
- },
- A2: 8,
- }
-
- testCases := []struct {
- Name string
- ExpectInvalid bool
- ExpectedValue interface{}
- ExpectedIndexes []int
- }{
- {
- Name: "A0.B0.C0",
- ExpectedValue: "0",
- ExpectedIndexes: []int{0, 0, 0},
- },
- {
- Name: "A0.B0.C1",
- ExpectedValue: 1,
- ExpectedIndexes: []int{0, 0, 1},
- },
- {
- Name: "A0.B1.C0",
- ExpectedValue: "2",
- ExpectedIndexes: []int{0, 1, 0},
- },
- {
- Name: "A0.B1.C1",
- ExpectedValue: 3,
- ExpectedIndexes: []int{0, 1, 1},
- },
- {
- Name: "A0.B2",
- ExpectedValue: "4",
- ExpectedIndexes: []int{0, 2},
- },
- {
- Name: "A1.B0.C0",
- ExpectedValue: "",
- ExpectedIndexes: []int{1, 0, 0},
- },
- {
- Name: "A1.B0.C1",
- ExpectedValue: 0,
- ExpectedIndexes: []int{1, 0, 1},
- },
- {
- Name: "A1.B1.C0",
- ExpectedValue: "5",
- ExpectedIndexes: []int{1, 1, 0},
- },
- {
- Name: "A1.B1.C1",
- ExpectedValue: 6,
- ExpectedIndexes: []int{1, 1, 1},
- },
- {
- Name: "A1.B2",
- ExpectedValue: "7",
- ExpectedIndexes: []int{1, 2},
- },
- {
- Name: "A2",
- ExpectedValue: 8,
- ExpectedIndexes: []int{2},
- },
- {
- Name: "XYZ",
- ExpectInvalid: true,
- ExpectedIndexes: []int{},
- },
- {
- Name: "a3",
- ExpectInvalid: true,
- ExpectedIndexes: []int{},
- },
- }
-
- // build the names array from the test cases
- names := make([]string, len(testCases))
- for i, tc := range testCases {
- names[i] = tc.Name
- }
- m := NewMapperFunc("db", func(n string) string { return n })
- v := reflect.ValueOf(val)
- values := m.FieldsByName(v, names)
- if len(values) != len(testCases) {
- t.Errorf("expected %d values, got %d", len(testCases), len(values))
- t.FailNow()
- }
- indexes := m.TraversalsByName(v.Type(), names)
- if len(indexes) != len(testCases) {
- t.Errorf("expected %d traversals, got %d", len(testCases), len(indexes))
- t.FailNow()
- }
- for i, val := range values {
- tc := testCases[i]
- traversal := indexes[i]
- if !reflect.DeepEqual(tc.ExpectedIndexes, traversal) {
- t.Errorf("expected %v, got %v", tc.ExpectedIndexes, traversal)
- t.FailNow()
- }
- val = reflect.Indirect(val)
- if tc.ExpectInvalid {
- if val.IsValid() {
- t.Errorf("%d: expected zero value, got %v", i, val)
- }
- continue
- }
- if !val.IsValid() {
- t.Errorf("%d: expected valid value, got %v", i, val)
- continue
- }
- actualValue := reflect.Indirect(val).Interface()
- if !reflect.DeepEqual(tc.ExpectedValue, actualValue) {
- t.Errorf("%d: expected %v, got %v", i, tc.ExpectedValue, actualValue)
- }
- }
-}
-
-func TestFieldByIndexes(t *testing.T) {
- type C struct {
- C0 bool
- C1 string
- C2 int
- C3 map[string]int
- }
- type B struct {
- B1 C
- B2 *C
- }
- type A struct {
- A1 B
- A2 *B
- }
- testCases := []struct {
- value interface{}
- indexes []int
- expectedValue interface{}
- readOnly bool
- }{
- {
- value: A{
- A1: B{B1: C{C0: true}},
- },
- indexes: []int{0, 0, 0},
- expectedValue: true,
- readOnly: true,
- },
- {
- value: A{
- A2: &B{B2: &C{C1: "answer"}},
- },
- indexes: []int{1, 1, 1},
- expectedValue: "answer",
- readOnly: true,
- },
- {
- value: &A{},
- indexes: []int{1, 1, 3},
- expectedValue: map[string]int{},
- },
- }
-
- for i, tc := range testCases {
- checkResults := func(v reflect.Value) {
- if tc.expectedValue == nil {
- if !v.IsNil() {
- t.Errorf("%d: expected nil, actual %v", i, v.Interface())
- }
- } else {
- if !reflect.DeepEqual(tc.expectedValue, v.Interface()) {
- t.Errorf("%d: expected %v, actual %v", i, tc.expectedValue, v.Interface())
- }
- }
- }
-
- checkResults(FieldByIndexes(reflect.ValueOf(tc.value), tc.indexes))
- if tc.readOnly {
- checkResults(FieldByIndexesReadOnly(reflect.ValueOf(tc.value), tc.indexes))
- }
- }
-}
-
-func TestMustBe(t *testing.T) {
- typ := reflect.TypeOf(E1{})
- mustBe(typ, reflect.Struct)
-
- defer func() {
- if r := recover(); r != nil {
- valueErr, ok := r.(*reflect.ValueError)
- if !ok {
- t.Errorf("unexpected Method: %s", valueErr.Method)
- t.Error("expected panic with *reflect.ValueError")
- return
- }
- if valueErr.Method != "github.com/jmoiron/sqlx/reflectx.TestMustBe" {
- }
- if valueErr.Kind != reflect.String {
- t.Errorf("unexpected Kind: %s", valueErr.Kind)
- }
- } else {
- t.Error("expected panic")
- }
- }()
-
- typ = reflect.TypeOf("string")
- mustBe(typ, reflect.Struct)
- t.Error("got here, didn't expect to")
-}
-
-type E1 struct {
- A int
-}
-type E2 struct {
- E1
- B int
-}
-type E3 struct {
- E2
- C int
-}
-type E4 struct {
- E3
- D int
-}
-
-func BenchmarkFieldNameL1(b *testing.B) {
- e4 := E4{D: 1}
- for i := 0; i < b.N; i++ {
- v := reflect.ValueOf(e4)
- f := v.FieldByName("D")
- if f.Interface().(int) != 1 {
- b.Fatal("Wrong value.")
- }
- }
-}
-
-func BenchmarkFieldNameL4(b *testing.B) {
- e4 := E4{}
- e4.A = 1
- for i := 0; i < b.N; i++ {
- v := reflect.ValueOf(e4)
- f := v.FieldByName("A")
- if f.Interface().(int) != 1 {
- b.Fatal("Wrong value.")
- }
- }
-}
-
-func BenchmarkFieldPosL1(b *testing.B) {
- e4 := E4{D: 1}
- for i := 0; i < b.N; i++ {
- v := reflect.ValueOf(e4)
- f := v.Field(1)
- if f.Interface().(int) != 1 {
- b.Fatal("Wrong value.")
- }
- }
-}
-
-func BenchmarkFieldPosL4(b *testing.B) {
- e4 := E4{}
- e4.A = 1
- for i := 0; i < b.N; i++ {
- v := reflect.ValueOf(e4)
- f := v.Field(0)
- f = f.Field(0)
- f = f.Field(0)
- f = f.Field(0)
- if f.Interface().(int) != 1 {
- b.Fatal("Wrong value.")
- }
- }
-}
-
-func BenchmarkFieldByIndexL4(b *testing.B) {
- e4 := E4{}
- e4.A = 1
- idx := []int{0, 0, 0, 0}
- for i := 0; i < b.N; i++ {
- v := reflect.ValueOf(e4)
- f := FieldByIndexes(v, idx)
- if f.Interface().(int) != 1 {
- b.Fatal("Wrong value.")
- }
- }
-}
diff --git a/vendor/github.com/jmoiron/sqlx/sqlx.go b/vendor/github.com/jmoiron/sqlx/sqlx.go
index 4859d5a..e95f23f 100644
--- a/vendor/github.com/jmoiron/sqlx/sqlx.go
+++ b/vendor/github.com/jmoiron/sqlx/sqlx.go
@@ -627,10 +627,14 @@ func (r *Rows) StructScan(dest interface{}) error {
func Connect(driverName, dataSourceName string) (*DB, error) {
db, err := Open(driverName, dataSourceName)
if err != nil {
- return db, err
+ return nil, err
}
err = db.Ping()
- return db, err
+ if err != nil {
+ db.Close()
+ return nil, err
+ }
+ return db, nil
}
// MustConnect connects to a database and panics on error.
diff --git a/vendor/github.com/jmoiron/sqlx/sqlx_context_test.go b/vendor/github.com/jmoiron/sqlx/sqlx_context_test.go
deleted file mode 100644
index 85e112b..0000000
--- a/vendor/github.com/jmoiron/sqlx/sqlx_context_test.go
+++ /dev/null
@@ -1,1344 +0,0 @@
-// +build go1.8
-
-// The following environment variables, if set, will be used:
-//
-// * SQLX_SQLITE_DSN
-// * SQLX_POSTGRES_DSN
-// * SQLX_MYSQL_DSN
-//
-// Set any of these variables to 'skip' to skip them. Note that for MySQL,
-// the string '?parseTime=True' will be appended to the DSN if it's not there
-// already.
-//
-package sqlx
-
-import (
- "context"
- "database/sql"
- "encoding/json"
- "fmt"
- "log"
- "strings"
- "testing"
- "time"
-
- _ "github.com/go-sql-driver/mysql"
- "github.com/jmoiron/sqlx/reflectx"
- _ "github.com/lib/pq"
- _ "github.com/mattn/go-sqlite3"
-)
-
-func MultiExecContext(ctx context.Context, e ExecerContext, query string) {
- stmts := strings.Split(query, ";\n")
- if len(strings.Trim(stmts[len(stmts)-1], " \n\t\r")) == 0 {
- stmts = stmts[:len(stmts)-1]
- }
- for _, s := range stmts {
- _, err := e.ExecContext(ctx, s)
- if err != nil {
- fmt.Println(err, s)
- }
- }
-}
-
-func RunWithSchemaContext(ctx context.Context, schema Schema, t *testing.T, test func(ctx context.Context, db *DB, t *testing.T)) {
- runner := func(ctx context.Context, db *DB, t *testing.T, create, drop string) {
- defer func() {
- MultiExecContext(ctx, db, drop)
- }()
-
- MultiExecContext(ctx, db, create)
- test(ctx, db, t)
- }
-
- if TestPostgres {
- create, drop := schema.Postgres()
- runner(ctx, pgdb, t, create, drop)
- }
- if TestSqlite {
- create, drop := schema.Sqlite3()
- runner(ctx, sldb, t, create, drop)
- }
- if TestMysql {
- create, drop := schema.MySQL()
- runner(ctx, mysqldb, t, create, drop)
- }
-}
-
-func loadDefaultFixtureContext(ctx context.Context, db *DB, t *testing.T) {
- tx := db.MustBeginTx(ctx, nil)
- tx.MustExecContext(ctx, tx.Rebind("INSERT INTO person (first_name, last_name, email) VALUES (?, ?, ?)"), "Jason", "Moiron", "jmoiron@jmoiron.net")
- tx.MustExecContext(ctx, tx.Rebind("INSERT INTO person (first_name, last_name, email) VALUES (?, ?, ?)"), "John", "Doe", "johndoeDNE@gmail.net")
- tx.MustExecContext(ctx, tx.Rebind("INSERT INTO place (country, city, telcode) VALUES (?, ?, ?)"), "United States", "New York", "1")
- tx.MustExecContext(ctx, tx.Rebind("INSERT INTO place (country, telcode) VALUES (?, ?)"), "Hong Kong", "852")
- tx.MustExecContext(ctx, tx.Rebind("INSERT INTO place (country, telcode) VALUES (?, ?)"), "Singapore", "65")
- if db.DriverName() == "mysql" {
- tx.MustExecContext(ctx, tx.Rebind("INSERT INTO capplace (`COUNTRY`, `TELCODE`) VALUES (?, ?)"), "Sarf Efrica", "27")
- } else {
- tx.MustExecContext(ctx, tx.Rebind("INSERT INTO capplace (\"COUNTRY\", \"TELCODE\") VALUES (?, ?)"), "Sarf Efrica", "27")
- }
- tx.MustExecContext(ctx, tx.Rebind("INSERT INTO employees (name, id) VALUES (?, ?)"), "Peter", "4444")
- tx.MustExecContext(ctx, tx.Rebind("INSERT INTO employees (name, id, boss_id) VALUES (?, ?, ?)"), "Joe", "1", "4444")
- tx.MustExecContext(ctx, tx.Rebind("INSERT INTO employees (name, id, boss_id) VALUES (?, ?, ?)"), "Martin", "2", "4444")
- tx.Commit()
-}
-
-// Test a new backwards compatible feature, that missing scan destinations
-// will silently scan into sql.RawText rather than failing/panicing
-func TestMissingNamesContextContext(t *testing.T) {
- RunWithSchemaContext(context.Background(), defaultSchema, t, func(ctx context.Context, db *DB, t *testing.T) {
- loadDefaultFixtureContext(ctx, db, t)
- type PersonPlus struct {
- FirstName string `db:"first_name"`
- LastName string `db:"last_name"`
- Email string
- //AddedAt time.Time `db:"added_at"`
- }
-
- // test Select first
- pps := []PersonPlus{}
- // pps lacks added_at destination
- err := db.SelectContext(ctx, &pps, "SELECT * FROM person")
- if err == nil {
- t.Error("Expected missing name from Select to fail, but it did not.")
- }
-
- // test Get
- pp := PersonPlus{}
- err = db.GetContext(ctx, &pp, "SELECT * FROM person LIMIT 1")
- if err == nil {
- t.Error("Expected missing name Get to fail, but it did not.")
- }
-
- // test naked StructScan
- pps = []PersonPlus{}
- rows, err := db.QueryContext(ctx, "SELECT * FROM person LIMIT 1")
- if err != nil {
- t.Fatal(err)
- }
- rows.Next()
- err = StructScan(rows, &pps)
- if err == nil {
- t.Error("Expected missing name in StructScan to fail, but it did not.")
- }
- rows.Close()
-
- // now try various things with unsafe set.
- db = db.Unsafe()
- pps = []PersonPlus{}
- err = db.SelectContext(ctx, &pps, "SELECT * FROM person")
- if err != nil {
- t.Error(err)
- }
-
- // test Get
- pp = PersonPlus{}
- err = db.GetContext(ctx, &pp, "SELECT * FROM person LIMIT 1")
- if err != nil {
- t.Error(err)
- }
-
- // test naked StructScan
- pps = []PersonPlus{}
- rowsx, err := db.QueryxContext(ctx, "SELECT * FROM person LIMIT 1")
- if err != nil {
- t.Fatal(err)
- }
- rowsx.Next()
- err = StructScan(rowsx, &pps)
- if err != nil {
- t.Error(err)
- }
- rowsx.Close()
-
- // test Named stmt
- if !isUnsafe(db) {
- t.Error("Expected db to be unsafe, but it isn't")
- }
- nstmt, err := db.PrepareNamedContext(ctx, `SELECT * FROM person WHERE first_name != :name`)
- if err != nil {
- t.Fatal(err)
- }
- // its internal stmt should be marked unsafe
- if !nstmt.Stmt.unsafe {
- t.Error("expected NamedStmt to be unsafe but its underlying stmt did not inherit safety")
- }
- pps = []PersonPlus{}
- err = nstmt.SelectContext(ctx, &pps, map[string]interface{}{"name": "Jason"})
- if err != nil {
- t.Fatal(err)
- }
- if len(pps) != 1 {
- t.Errorf("Expected 1 person back, got %d", len(pps))
- }
-
- // test it with a safe db
- db.unsafe = false
- if isUnsafe(db) {
- t.Error("expected db to be safe but it isn't")
- }
- nstmt, err = db.PrepareNamedContext(ctx, `SELECT * FROM person WHERE first_name != :name`)
- if err != nil {
- t.Fatal(err)
- }
- // it should be safe
- if isUnsafe(nstmt) {
- t.Error("NamedStmt did not inherit safety")
- }
- nstmt.Unsafe()
- if !isUnsafe(nstmt) {
- t.Error("expected newly unsafed NamedStmt to be unsafe")
- }
- pps = []PersonPlus{}
- err = nstmt.SelectContext(ctx, &pps, map[string]interface{}{"name": "Jason"})
- if err != nil {
- t.Fatal(err)
- }
- if len(pps) != 1 {
- t.Errorf("Expected 1 person back, got %d", len(pps))
- }
-
- })
-}
-
-func TestEmbeddedStructsContextContext(t *testing.T) {
- type Loop1 struct{ Person }
- type Loop2 struct{ Loop1 }
- type Loop3 struct{ Loop2 }
-
- RunWithSchemaContext(context.Background(), defaultSchema, t, func(ctx context.Context, db *DB, t *testing.T) {
- loadDefaultFixtureContext(ctx, db, t)
- peopleAndPlaces := []PersonPlace{}
- err := db.SelectContext(
- ctx,
- &peopleAndPlaces,
- `SELECT person.*, place.* FROM
- person natural join place`)
- if err != nil {
- t.Fatal(err)
- }
- for _, pp := range peopleAndPlaces {
- if len(pp.Person.FirstName) == 0 {
- t.Errorf("Expected non zero lengthed first name.")
- }
- if len(pp.Place.Country) == 0 {
- t.Errorf("Expected non zero lengthed country.")
- }
- }
-
- // test embedded structs with StructScan
- rows, err := db.QueryxContext(
- ctx,
- `SELECT person.*, place.* FROM
- person natural join place`)
- if err != nil {
- t.Error(err)
- }
-
- perp := PersonPlace{}
- rows.Next()
- err = rows.StructScan(&perp)
- if err != nil {
- t.Error(err)
- }
-
- if len(perp.Person.FirstName) == 0 {
- t.Errorf("Expected non zero lengthed first name.")
- }
- if len(perp.Place.Country) == 0 {
- t.Errorf("Expected non zero lengthed country.")
- }
-
- rows.Close()
-
- // test the same for embedded pointer structs
- peopleAndPlacesPtrs := []PersonPlacePtr{}
- err = db.SelectContext(
- ctx,
- &peopleAndPlacesPtrs,
- `SELECT person.*, place.* FROM
- person natural join place`)
- if err != nil {
- t.Fatal(err)
- }
- for _, pp := range peopleAndPlacesPtrs {
- if len(pp.Person.FirstName) == 0 {
- t.Errorf("Expected non zero lengthed first name.")
- }
- if len(pp.Place.Country) == 0 {
- t.Errorf("Expected non zero lengthed country.")
- }
- }
-
- // test "deep nesting"
- l3s := []Loop3{}
- err = db.SelectContext(ctx, &l3s, `select * from person`)
- if err != nil {
- t.Fatal(err)
- }
- for _, l3 := range l3s {
- if len(l3.Loop2.Loop1.Person.FirstName) == 0 {
- t.Errorf("Expected non zero lengthed first name.")
- }
- }
-
- // test "embed conflicts"
- ec := []EmbedConflict{}
- err = db.SelectContext(ctx, &ec, `select * from person`)
- // I'm torn between erroring here or having some kind of working behavior
- // in order to allow for more flexibility in destination structs
- if err != nil {
- t.Errorf("Was not expecting an error on embed conflicts.")
- }
- })
-}
-
-func TestJoinQueryContext(t *testing.T) {
- type Employee struct {
- Name string
- ID int64
- // BossID is an id into the employee table
- BossID sql.NullInt64 `db:"boss_id"`
- }
- type Boss Employee
-
- RunWithSchemaContext(context.Background(), defaultSchema, t, func(ctx context.Context, db *DB, t *testing.T) {
- loadDefaultFixtureContext(ctx, db, t)
-
- var employees []struct {
- Employee
- Boss `db:"boss"`
- }
-
- err := db.SelectContext(ctx,
- &employees,
- `SELECT employees.*, boss.id "boss.id", boss.name "boss.name" FROM employees
- JOIN employees AS boss ON employees.boss_id = boss.id`)
- if err != nil {
- t.Fatal(err)
- }
-
- for _, em := range employees {
- if len(em.Employee.Name) == 0 {
- t.Errorf("Expected non zero lengthed name.")
- }
- if em.Employee.BossID.Int64 != em.Boss.ID {
- t.Errorf("Expected boss ids to match")
- }
- }
- })
-}
-
-func TestJoinQueryNamedPointerStructsContext(t *testing.T) {
- type Employee struct {
- Name string
- ID int64
- // BossID is an id into the employee table
- BossID sql.NullInt64 `db:"boss_id"`
- }
- type Boss Employee
-
- RunWithSchemaContext(context.Background(), defaultSchema, t, func(ctx context.Context, db *DB, t *testing.T) {
- loadDefaultFixtureContext(ctx, db, t)
-
- var employees []struct {
- Emp1 *Employee `db:"emp1"`
- Emp2 *Employee `db:"emp2"`
- *Boss `db:"boss"`
- }
-
- err := db.SelectContext(ctx,
- &employees,
- `SELECT emp.name "emp1.name", emp.id "emp1.id", emp.boss_id "emp1.boss_id",
- emp.name "emp2.name", emp.id "emp2.id", emp.boss_id "emp2.boss_id",
- boss.id "boss.id", boss.name "boss.name" FROM employees AS emp
- JOIN employees AS boss ON emp.boss_id = boss.id
- `)
- if err != nil {
- t.Fatal(err)
- }
-
- for _, em := range employees {
- if len(em.Emp1.Name) == 0 || len(em.Emp2.Name) == 0 {
- t.Errorf("Expected non zero lengthed name.")
- }
- if em.Emp1.BossID.Int64 != em.Boss.ID || em.Emp2.BossID.Int64 != em.Boss.ID {
- t.Errorf("Expected boss ids to match")
- }
- }
- })
-}
-
-func TestSelectSliceMapTimeContext(t *testing.T) {
- RunWithSchemaContext(context.Background(), defaultSchema, t, func(ctx context.Context, db *DB, t *testing.T) {
- loadDefaultFixtureContext(ctx, db, t)
- rows, err := db.QueryxContext(ctx, "SELECT * FROM person")
- if err != nil {
- t.Fatal(err)
- }
- for rows.Next() {
- _, err := rows.SliceScan()
- if err != nil {
- t.Error(err)
- }
- }
-
- rows, err = db.QueryxContext(ctx, "SELECT * FROM person")
- if err != nil {
- t.Fatal(err)
- }
- for rows.Next() {
- m := map[string]interface{}{}
- err := rows.MapScan(m)
- if err != nil {
- t.Error(err)
- }
- }
-
- })
-}
-
-func TestNilReceiverContext(t *testing.T) {
- RunWithSchemaContext(context.Background(), defaultSchema, t, func(ctx context.Context, db *DB, t *testing.T) {
- loadDefaultFixtureContext(ctx, db, t)
- var p *Person
- err := db.GetContext(ctx, p, "SELECT * FROM person LIMIT 1")
- if err == nil {
- t.Error("Expected error when getting into nil struct ptr.")
- }
- var pp *[]Person
- err = db.SelectContext(ctx, pp, "SELECT * FROM person")
- if err == nil {
- t.Error("Expected an error when selecting into nil slice ptr.")
- }
- })
-}
-
-func TestNamedQueryContext(t *testing.T) {
- var schema = Schema{
- create: `
- CREATE TABLE place (
- id integer PRIMARY KEY,
- name text NULL
- );
- CREATE TABLE person (
- first_name text NULL,
- last_name text NULL,
- email text NULL
- );
- CREATE TABLE placeperson (
- first_name text NULL,
- last_name text NULL,
- email text NULL,
- place_id integer NULL
- );
- CREATE TABLE jsperson (
- "FIRST" text NULL,
- last_name text NULL,
- "EMAIL" text NULL
- );`,
- drop: `
- drop table person;
- drop table jsperson;
- drop table place;
- drop table placeperson;
- `,
- }
-
- RunWithSchemaContext(context.Background(), schema, t, func(ctx context.Context, db *DB, t *testing.T) {
- type Person struct {
- FirstName sql.NullString `db:"first_name"`
- LastName sql.NullString `db:"last_name"`
- Email sql.NullString
- }
-
- p := Person{
- FirstName: sql.NullString{String: "ben", Valid: true},
- LastName: sql.NullString{String: "doe", Valid: true},
- Email: sql.NullString{String: "ben@doe.com", Valid: true},
- }
-
- q1 := `INSERT INTO person (first_name, last_name, email) VALUES (:first_name, :last_name, :email)`
- _, err := db.NamedExecContext(ctx, q1, p)
- if err != nil {
- log.Fatal(err)
- }
-
- p2 := &Person{}
- rows, err := db.NamedQueryContext(ctx, "SELECT * FROM person WHERE first_name=:first_name", p)
- if err != nil {
- log.Fatal(err)
- }
- for rows.Next() {
- err = rows.StructScan(p2)
- if err != nil {
- t.Error(err)
- }
- if p2.FirstName.String != "ben" {
- t.Error("Expected first name of `ben`, got " + p2.FirstName.String)
- }
- if p2.LastName.String != "doe" {
- t.Error("Expected first name of `doe`, got " + p2.LastName.String)
- }
- }
-
- // these are tests for #73; they verify that named queries work if you've
- // changed the db mapper. This code checks both NamedQuery "ad-hoc" style
- // queries and NamedStmt queries, which use different code paths internally.
- old := *db.Mapper
-
- type JSONPerson struct {
- FirstName sql.NullString `json:"FIRST"`
- LastName sql.NullString `json:"last_name"`
- Email sql.NullString
- }
-
- jp := JSONPerson{
- FirstName: sql.NullString{String: "ben", Valid: true},
- LastName: sql.NullString{String: "smith", Valid: true},
- Email: sql.NullString{String: "ben@smith.com", Valid: true},
- }
-
- db.Mapper = reflectx.NewMapperFunc("json", strings.ToUpper)
-
- // prepare queries for case sensitivity to test our ToUpper function.
- // postgres and sqlite accept "", but mysql uses ``; since Go's multi-line
- // strings are `` we use "" by default and swap out for MySQL
- pdb := func(s string, db *DB) string {
- if db.DriverName() == "mysql" {
- return strings.Replace(s, `"`, "`", -1)
- }
- return s
- }
-
- q1 = `INSERT INTO jsperson ("FIRST", last_name, "EMAIL") VALUES (:FIRST, :last_name, :EMAIL)`
- _, err = db.NamedExecContext(ctx, pdb(q1, db), jp)
- if err != nil {
- t.Fatal(err, db.DriverName())
- }
-
- // Checks that a person pulled out of the db matches the one we put in
- check := func(t *testing.T, rows *Rows) {
- jp = JSONPerson{}
- for rows.Next() {
- err = rows.StructScan(&jp)
- if err != nil {
- t.Error(err)
- }
- if jp.FirstName.String != "ben" {
- t.Errorf("Expected first name of `ben`, got `%s` (%s) ", jp.FirstName.String, db.DriverName())
- }
- if jp.LastName.String != "smith" {
- t.Errorf("Expected LastName of `smith`, got `%s` (%s)", jp.LastName.String, db.DriverName())
- }
- if jp.Email.String != "ben@smith.com" {
- t.Errorf("Expected first name of `doe`, got `%s` (%s)", jp.Email.String, db.DriverName())
- }
- }
- }
-
- ns, err := db.PrepareNamed(pdb(`
- SELECT * FROM jsperson
- WHERE
- "FIRST"=:FIRST AND
- last_name=:last_name AND
- "EMAIL"=:EMAIL
- `, db))
-
- if err != nil {
- t.Fatal(err)
- }
- rows, err = ns.QueryxContext(ctx, jp)
- if err != nil {
- t.Fatal(err)
- }
-
- check(t, rows)
-
- // Check exactly the same thing, but with db.NamedQuery, which does not go
- // through the PrepareNamed/NamedStmt path.
- rows, err = db.NamedQueryContext(ctx, pdb(`
- SELECT * FROM jsperson
- WHERE
- "FIRST"=:FIRST AND
- last_name=:last_name AND
- "EMAIL"=:EMAIL
- `, db), jp)
- if err != nil {
- t.Fatal(err)
- }
-
- check(t, rows)
-
- db.Mapper = &old
-
- // Test nested structs
- type Place struct {
- ID int `db:"id"`
- Name sql.NullString `db:"name"`
- }
- type PlacePerson struct {
- FirstName sql.NullString `db:"first_name"`
- LastName sql.NullString `db:"last_name"`
- Email sql.NullString
- Place Place `db:"place"`
- }
-
- pl := Place{
- Name: sql.NullString{String: "myplace", Valid: true},
- }
-
- pp := PlacePerson{
- FirstName: sql.NullString{String: "ben", Valid: true},
- LastName: sql.NullString{String: "doe", Valid: true},
- Email: sql.NullString{String: "ben@doe.com", Valid: true},
- }
-
- q2 := `INSERT INTO place (id, name) VALUES (1, :name)`
- _, err = db.NamedExecContext(ctx, q2, pl)
- if err != nil {
- log.Fatal(err)
- }
-
- id := 1
- pp.Place.ID = id
-
- q3 := `INSERT INTO placeperson (first_name, last_name, email, place_id) VALUES (:first_name, :last_name, :email, :place.id)`
- _, err = db.NamedExecContext(ctx, q3, pp)
- if err != nil {
- log.Fatal(err)
- }
-
- pp2 := &PlacePerson{}
- rows, err = db.NamedQueryContext(ctx, `
- SELECT
- first_name,
- last_name,
- email,
- place.id AS "place.id",
- place.name AS "place.name"
- FROM placeperson
- INNER JOIN place ON place.id = placeperson.place_id
- WHERE
- place.id=:place.id`, pp)
- if err != nil {
- log.Fatal(err)
- }
- for rows.Next() {
- err = rows.StructScan(pp2)
- if err != nil {
- t.Error(err)
- }
- if pp2.FirstName.String != "ben" {
- t.Error("Expected first name of `ben`, got " + pp2.FirstName.String)
- }
- if pp2.LastName.String != "doe" {
- t.Error("Expected first name of `doe`, got " + pp2.LastName.String)
- }
- if pp2.Place.Name.String != "myplace" {
- t.Error("Expected place name of `myplace`, got " + pp2.Place.Name.String)
- }
- if pp2.Place.ID != pp.Place.ID {
- t.Errorf("Expected place name of %v, got %v", pp.Place.ID, pp2.Place.ID)
- }
- }
- })
-}
-
-func TestNilInsertsContext(t *testing.T) {
- var schema = Schema{
- create: `
- CREATE TABLE tt (
- id integer,
- value text NULL DEFAULT NULL
- );`,
- drop: "drop table tt;",
- }
-
- RunWithSchemaContext(context.Background(), schema, t, func(ctx context.Context, db *DB, t *testing.T) {
- type TT struct {
- ID int
- Value *string
- }
- var v, v2 TT
- r := db.Rebind
-
- db.MustExecContext(ctx, r(`INSERT INTO tt (id) VALUES (1)`))
- db.GetContext(ctx, &v, r(`SELECT * FROM tt`))
- if v.ID != 1 {
- t.Errorf("Expecting id of 1, got %v", v.ID)
- }
- if v.Value != nil {
- t.Errorf("Expecting NULL to map to nil, got %s", *v.Value)
- }
-
- v.ID = 2
- // NOTE: this incidentally uncovered a bug which was that named queries with
- // pointer destinations would not work if the passed value here was not addressable,
- // as reflectx.FieldByIndexes attempts to allocate nil pointer receivers for
- // writing. This was fixed by creating & using the reflectx.FieldByIndexesReadOnly
- // function. This next line is important as it provides the only coverage for this.
- db.NamedExecContext(ctx, `INSERT INTO tt (id, value) VALUES (:id, :value)`, v)
-
- db.GetContext(ctx, &v2, r(`SELECT * FROM tt WHERE id=2`))
- if v.ID != v2.ID {
- t.Errorf("%v != %v", v.ID, v2.ID)
- }
- if v2.Value != nil {
- t.Errorf("Expecting NULL to map to nil, got %s", *v.Value)
- }
- })
-}
-
-func TestScanErrorContext(t *testing.T) {
- var schema = Schema{
- create: `
- CREATE TABLE kv (
- k text,
- v integer
- );`,
- drop: `drop table kv;`,
- }
-
- RunWithSchemaContext(context.Background(), schema, t, func(ctx context.Context, db *DB, t *testing.T) {
- type WrongTypes struct {
- K int
- V string
- }
- _, err := db.Exec(db.Rebind("INSERT INTO kv (k, v) VALUES (?, ?)"), "hi", 1)
- if err != nil {
- t.Error(err)
- }
-
- rows, err := db.QueryxContext(ctx, "SELECT * FROM kv")
- if err != nil {
- t.Error(err)
- }
- for rows.Next() {
- var wt WrongTypes
- err := rows.StructScan(&wt)
- if err == nil {
- t.Errorf("%s: Scanning wrong types into keys should have errored.", db.DriverName())
- }
- }
- })
-}
-
-// FIXME: this function is kinda big but it slows things down to be constantly
-// loading and reloading the schema..
-
-func TestUsageContext(t *testing.T) {
- RunWithSchemaContext(context.Background(), defaultSchema, t, func(ctx context.Context, db *DB, t *testing.T) {
- loadDefaultFixtureContext(ctx, db, t)
- slicemembers := []SliceMember{}
- err := db.SelectContext(ctx, &slicemembers, "SELECT * FROM place ORDER BY telcode ASC")
- if err != nil {
- t.Fatal(err)
- }
-
- people := []Person{}
-
- err = db.SelectContext(ctx, &people, "SELECT * FROM person ORDER BY first_name ASC")
- if err != nil {
- t.Fatal(err)
- }
-
- jason, john := people[0], people[1]
- if jason.FirstName != "Jason" {
- t.Errorf("Expecting FirstName of Jason, got %s", jason.FirstName)
- }
- if jason.LastName != "Moiron" {
- t.Errorf("Expecting LastName of Moiron, got %s", jason.LastName)
- }
- if jason.Email != "jmoiron@jmoiron.net" {
- t.Errorf("Expecting Email of jmoiron@jmoiron.net, got %s", jason.Email)
- }
- if john.FirstName != "John" || john.LastName != "Doe" || john.Email != "johndoeDNE@gmail.net" {
- t.Errorf("John Doe's person record not what expected: Got %v\n", john)
- }
-
- jason = Person{}
- err = db.GetContext(ctx, &jason, db.Rebind("SELECT * FROM person WHERE first_name=?"), "Jason")
-
- if err != nil {
- t.Fatal(err)
- }
- if jason.FirstName != "Jason" {
- t.Errorf("Expecting to get back Jason, but got %v\n", jason.FirstName)
- }
-
- err = db.GetContext(ctx, &jason, db.Rebind("SELECT * FROM person WHERE first_name=?"), "Foobar")
- if err == nil {
- t.Errorf("Expecting an error, got nil\n")
- }
- if err != sql.ErrNoRows {
- t.Errorf("Expected sql.ErrNoRows, got %v\n", err)
- }
-
- // The following tests check statement reuse, which was actually a problem
- // due to copying being done when creating Stmt's which was eventually removed
- stmt1, err := db.PreparexContext(ctx, db.Rebind("SELECT * FROM person WHERE first_name=?"))
- if err != nil {
- t.Fatal(err)
- }
- jason = Person{}
-
- row := stmt1.QueryRowx("DoesNotExist")
- row.Scan(&jason)
- row = stmt1.QueryRowx("DoesNotExist")
- row.Scan(&jason)
-
- err = stmt1.GetContext(ctx, &jason, "DoesNotExist User")
- if err == nil {
- t.Error("Expected an error")
- }
- err = stmt1.GetContext(ctx, &jason, "DoesNotExist User 2")
- if err == nil {
- t.Fatal(err)
- }
-
- stmt2, err := db.PreparexContext(ctx, db.Rebind("SELECT * FROM person WHERE first_name=?"))
- if err != nil {
- t.Fatal(err)
- }
- jason = Person{}
- tx, err := db.Beginx()
- if err != nil {
- t.Fatal(err)
- }
- tstmt2 := tx.Stmtx(stmt2)
- row2 := tstmt2.QueryRowx("Jason")
- err = row2.StructScan(&jason)
- if err != nil {
- t.Error(err)
- }
- tx.Commit()
-
- places := []*Place{}
- err = db.SelectContext(ctx, &places, "SELECT telcode FROM place ORDER BY telcode ASC")
- if err != nil {
- t.Fatal(err)
- }
-
- usa, singsing, honkers := places[0], places[1], places[2]
-
- if usa.TelCode != 1 || honkers.TelCode != 852 || singsing.TelCode != 65 {
- t.Errorf("Expected integer telcodes to work, got %#v", places)
- }
-
- placesptr := []PlacePtr{}
- err = db.SelectContext(ctx, &placesptr, "SELECT * FROM place ORDER BY telcode ASC")
- if err != nil {
- t.Error(err)
- }
- //fmt.Printf("%#v\n%#v\n%#v\n", placesptr[0], placesptr[1], placesptr[2])
-
- // if you have null fields and use SELECT *, you must use sql.Null* in your struct
- // this test also verifies that you can use either a []Struct{} or a []*Struct{}
- places2 := []Place{}
- err = db.SelectContext(ctx, &places2, "SELECT * FROM place ORDER BY telcode ASC")
- if err != nil {
- t.Fatal(err)
- }
-
- usa, singsing, honkers = &places2[0], &places2[1], &places2[2]
-
- // this should return a type error that &p is not a pointer to a struct slice
- p := Place{}
- err = db.SelectContext(ctx, &p, "SELECT * FROM place ORDER BY telcode ASC")
- if err == nil {
- t.Errorf("Expected an error, argument to select should be a pointer to a struct slice")
- }
-
- // this should be an error
- pl := []Place{}
- err = db.SelectContext(ctx, pl, "SELECT * FROM place ORDER BY telcode ASC")
- if err == nil {
- t.Errorf("Expected an error, argument to select should be a pointer to a struct slice, not a slice.")
- }
-
- if usa.TelCode != 1 || honkers.TelCode != 852 || singsing.TelCode != 65 {
- t.Errorf("Expected integer telcodes to work, got %#v", places)
- }
-
- stmt, err := db.PreparexContext(ctx, db.Rebind("SELECT country, telcode FROM place WHERE telcode > ? ORDER BY telcode ASC"))
- if err != nil {
- t.Error(err)
- }
-
- places = []*Place{}
- err = stmt.SelectContext(ctx, &places, 10)
- if len(places) != 2 {
- t.Error("Expected 2 places, got 0.")
- }
- if err != nil {
- t.Fatal(err)
- }
- singsing, honkers = places[0], places[1]
- if singsing.TelCode != 65 || honkers.TelCode != 852 {
- t.Errorf("Expected the right telcodes, got %#v", places)
- }
-
- rows, err := db.QueryxContext(ctx, "SELECT * FROM place")
- if err != nil {
- t.Fatal(err)
- }
- place := Place{}
- for rows.Next() {
- err = rows.StructScan(&place)
- if err != nil {
- t.Fatal(err)
- }
- }
-
- rows, err = db.QueryxContext(ctx, "SELECT * FROM place")
- if err != nil {
- t.Fatal(err)
- }
- m := map[string]interface{}{}
- for rows.Next() {
- err = rows.MapScan(m)
- if err != nil {
- t.Fatal(err)
- }
- _, ok := m["country"]
- if !ok {
- t.Errorf("Expected key `country` in map but could not find it (%#v)\n", m)
- }
- }
-
- rows, err = db.QueryxContext(ctx, "SELECT * FROM place")
- if err != nil {
- t.Fatal(err)
- }
- for rows.Next() {
- s, err := rows.SliceScan()
- if err != nil {
- t.Error(err)
- }
- if len(s) != 3 {
- t.Errorf("Expected 3 columns in result, got %d\n", len(s))
- }
- }
-
- // test advanced querying
- // test that NamedExec works with a map as well as a struct
- _, err = db.NamedExecContext(ctx, "INSERT INTO person (first_name, last_name, email) VALUES (:first, :last, :email)", map[string]interface{}{
- "first": "Bin",
- "last": "Smuth",
- "email": "bensmith@allblacks.nz",
- })
- if err != nil {
- t.Fatal(err)
- }
-
- // ensure that if the named param happens right at the end it still works
- // ensure that NamedQuery works with a map[string]interface{}
- rows, err = db.NamedQueryContext(ctx, "SELECT * FROM person WHERE first_name=:first", map[string]interface{}{"first": "Bin"})
- if err != nil {
- t.Fatal(err)
- }
-
- ben := &Person{}
- for rows.Next() {
- err = rows.StructScan(ben)
- if err != nil {
- t.Fatal(err)
- }
- if ben.FirstName != "Bin" {
- t.Fatal("Expected first name of `Bin`, got " + ben.FirstName)
- }
- if ben.LastName != "Smuth" {
- t.Fatal("Expected first name of `Smuth`, got " + ben.LastName)
- }
- }
-
- ben.FirstName = "Ben"
- ben.LastName = "Smith"
- ben.Email = "binsmuth@allblacks.nz"
-
- // Insert via a named query using the struct
- _, err = db.NamedExecContext(ctx, "INSERT INTO person (first_name, last_name, email) VALUES (:first_name, :last_name, :email)", ben)
-
- if err != nil {
- t.Fatal(err)
- }
-
- rows, err = db.NamedQueryContext(ctx, "SELECT * FROM person WHERE first_name=:first_name", ben)
- if err != nil {
- t.Fatal(err)
- }
- for rows.Next() {
- err = rows.StructScan(ben)
- if err != nil {
- t.Fatal(err)
- }
- if ben.FirstName != "Ben" {
- t.Fatal("Expected first name of `Ben`, got " + ben.FirstName)
- }
- if ben.LastName != "Smith" {
- t.Fatal("Expected first name of `Smith`, got " + ben.LastName)
- }
- }
- // ensure that Get does not panic on emppty result set
- person := &Person{}
- err = db.GetContext(ctx, person, "SELECT * FROM person WHERE first_name=$1", "does-not-exist")
- if err == nil {
- t.Fatal("Should have got an error for Get on non-existant row.")
- }
-
- // lets test prepared statements some more
-
- stmt, err = db.PreparexContext(ctx, db.Rebind("SELECT * FROM person WHERE first_name=?"))
- if err != nil {
- t.Fatal(err)
- }
- rows, err = stmt.QueryxContext(ctx, "Ben")
- if err != nil {
- t.Fatal(err)
- }
- for rows.Next() {
- err = rows.StructScan(ben)
- if err != nil {
- t.Fatal(err)
- }
- if ben.FirstName != "Ben" {
- t.Fatal("Expected first name of `Ben`, got " + ben.FirstName)
- }
- if ben.LastName != "Smith" {
- t.Fatal("Expected first name of `Smith`, got " + ben.LastName)
- }
- }
-
- john = Person{}
- stmt, err = db.PreparexContext(ctx, db.Rebind("SELECT * FROM person WHERE first_name=?"))
- if err != nil {
- t.Error(err)
- }
- err = stmt.GetContext(ctx, &john, "John")
- if err != nil {
- t.Error(err)
- }
-
- // test name mapping
- // THIS USED TO WORK BUT WILL NO LONGER WORK.
- db.MapperFunc(strings.ToUpper)
- rsa := CPlace{}
- err = db.GetContext(ctx, &rsa, "SELECT * FROM capplace;")
- if err != nil {
- t.Error(err, "in db:", db.DriverName())
- }
- db.MapperFunc(strings.ToLower)
-
- // create a copy and change the mapper, then verify the copy behaves
- // differently from the original.
- dbCopy := NewDb(db.DB, db.DriverName())
- dbCopy.MapperFunc(strings.ToUpper)
- err = dbCopy.GetContext(ctx, &rsa, "SELECT * FROM capplace;")
- if err != nil {
- fmt.Println(db.DriverName())
- t.Error(err)
- }
-
- err = db.GetContext(ctx, &rsa, "SELECT * FROM cappplace;")
- if err == nil {
- t.Error("Expected no error, got ", err)
- }
-
- // test base type slices
- var sdest []string
- rows, err = db.QueryxContext(ctx, "SELECT email FROM person ORDER BY email ASC;")
- if err != nil {
- t.Error(err)
- }
- err = scanAll(rows, &sdest, false)
- if err != nil {
- t.Error(err)
- }
-
- // test Get with base types
- var count int
- err = db.GetContext(ctx, &count, "SELECT count(*) FROM person;")
- if err != nil {
- t.Error(err)
- }
- if count != len(sdest) {
- t.Errorf("Expected %d == %d (count(*) vs len(SELECT ..)", count, len(sdest))
- }
-
- // test Get and Select with time.Time, #84
- var addedAt time.Time
- err = db.GetContext(ctx, &addedAt, "SELECT added_at FROM person LIMIT 1;")
- if err != nil {
- t.Error(err)
- }
-
- var addedAts []time.Time
- err = db.SelectContext(ctx, &addedAts, "SELECT added_at FROM person;")
- if err != nil {
- t.Error(err)
- }
-
- // test it on a double pointer
- var pcount *int
- err = db.GetContext(ctx, &pcount, "SELECT count(*) FROM person;")
- if err != nil {
- t.Error(err)
- }
- if *pcount != count {
- t.Errorf("expected %d = %d", *pcount, count)
- }
-
- // test Select...
- sdest = []string{}
- err = db.SelectContext(ctx, &sdest, "SELECT first_name FROM person ORDER BY first_name ASC;")
- if err != nil {
- t.Error(err)
- }
- expected := []string{"Ben", "Bin", "Jason", "John"}
- for i, got := range sdest {
- if got != expected[i] {
- t.Errorf("Expected %d result to be %s, but got %s", i, expected[i], got)
- }
- }
-
- var nsdest []sql.NullString
- err = db.SelectContext(ctx, &nsdest, "SELECT city FROM place ORDER BY city ASC")
- if err != nil {
- t.Error(err)
- }
- for _, val := range nsdest {
- if val.Valid && val.String != "New York" {
- t.Errorf("expected single valid result to be `New York`, but got %s", val.String)
- }
- }
- })
-}
-
-// tests that sqlx will not panic when the wrong driver is passed because
-// of an automatic nil dereference in sqlx.Open(), which was fixed.
-func TestDoNotPanicOnConnectContext(t *testing.T) {
- _, err := ConnectContext(context.Background(), "bogus", "hehe")
- if err == nil {
- t.Errorf("Should return error when using bogus driverName")
- }
-}
-
-func TestEmbeddedMapsContext(t *testing.T) {
- var schema = Schema{
- create: `
- CREATE TABLE message (
- string text,
- properties text
- );`,
- drop: `drop table message;`,
- }
-
- RunWithSchemaContext(context.Background(), schema, t, func(ctx context.Context, db *DB, t *testing.T) {
- messages := []Message{
- {"Hello, World", PropertyMap{"one": "1", "two": "2"}},
- {"Thanks, Joy", PropertyMap{"pull": "request"}},
- }
- q1 := `INSERT INTO message (string, properties) VALUES (:string, :properties);`
- for _, m := range messages {
- _, err := db.NamedExecContext(ctx, q1, m)
- if err != nil {
- t.Fatal(err)
- }
- }
- var count int
- err := db.GetContext(ctx, &count, "SELECT count(*) FROM message")
- if err != nil {
- t.Fatal(err)
- }
- if count != len(messages) {
- t.Fatalf("Expected %d messages in DB, found %d", len(messages), count)
- }
-
- var m Message
- err = db.GetContext(ctx, &m, "SELECT * FROM message LIMIT 1;")
- if err != nil {
- t.Fatal(err)
- }
- if m.Properties == nil {
- t.Fatal("Expected m.Properties to not be nil, but it was.")
- }
- })
-}
-
-func TestIssue197Context(t *testing.T) {
- // this test actually tests for a bug in database/sql:
- // https://github.com/golang/go/issues/13905
- // this potentially makes _any_ named type that is an alias for []byte
- // unsafe to use in a lot of different ways (basically, unsafe to hold
- // onto after loading from the database).
- t.Skip()
-
- type mybyte []byte
- type Var struct{ Raw json.RawMessage }
- type Var2 struct{ Raw []byte }
- type Var3 struct{ Raw mybyte }
- RunWithSchemaContext(context.Background(), defaultSchema, t, func(ctx context.Context, db *DB, t *testing.T) {
- var err error
- var v, q Var
- if err = db.GetContext(ctx, &v, `SELECT '{"a": "b"}' AS raw`); err != nil {
- t.Fatal(err)
- }
- if err = db.GetContext(ctx, &q, `SELECT 'null' AS raw`); err != nil {
- t.Fatal(err)
- }
-
- var v2, q2 Var2
- if err = db.GetContext(ctx, &v2, `SELECT '{"a": "b"}' AS raw`); err != nil {
- t.Fatal(err)
- }
- if err = db.GetContext(ctx, &q2, `SELECT 'null' AS raw`); err != nil {
- t.Fatal(err)
- }
-
- var v3, q3 Var3
- if err = db.QueryRowContext(ctx, `SELECT '{"a": "b"}' AS raw`).Scan(&v3.Raw); err != nil {
- t.Fatal(err)
- }
- if err = db.QueryRowContext(ctx, `SELECT '{"c": "d"}' AS raw`).Scan(&q3.Raw); err != nil {
- t.Fatal(err)
- }
- t.Fail()
- })
-}
-
-func TestInContext(t *testing.T) {
- // some quite normal situations
- type tr struct {
- q string
- args []interface{}
- c int
- }
- tests := []tr{
- {"SELECT * FROM foo WHERE x = ? AND v in (?) AND y = ?",
- []interface{}{"foo", []int{0, 5, 7, 2, 9}, "bar"},
- 7},
- {"SELECT * FROM foo WHERE x in (?)",
- []interface{}{[]int{1, 2, 3, 4, 5, 6, 7, 8}},
- 8},
- }
- for _, test := range tests {
- q, a, err := In(test.q, test.args...)
- if err != nil {
- t.Error(err)
- }
- if len(a) != test.c {
- t.Errorf("Expected %d args, but got %d (%+v)", test.c, len(a), a)
- }
- if strings.Count(q, "?") != test.c {
- t.Errorf("Expected %d bindVars, got %d", test.c, strings.Count(q, "?"))
- }
- }
-
- // too many bindVars, but no slices, so short circuits parsing
- // i'm not sure if this is the right behavior; this query/arg combo
- // might not work, but we shouldn't parse if we don't need to
- {
- orig := "SELECT * FROM foo WHERE x = ? AND y = ?"
- q, a, err := In(orig, "foo", "bar", "baz")
- if err != nil {
- t.Error(err)
- }
- if len(a) != 3 {
- t.Errorf("Expected 3 args, but got %d (%+v)", len(a), a)
- }
- if q != orig {
- t.Error("Expected unchanged query.")
- }
- }
-
- tests = []tr{
- // too many bindvars; slice present so should return error during parse
- {"SELECT * FROM foo WHERE x = ? and y = ?",
- []interface{}{"foo", []int{1, 2, 3}, "bar"},
- 0},
- // empty slice, should return error before parse
- {"SELECT * FROM foo WHERE x = ?",
- []interface{}{[]int{}},
- 0},
- // too *few* bindvars, should return an error
- {"SELECT * FROM foo WHERE x = ? AND y in (?)",
- []interface{}{[]int{1, 2, 3}},
- 0},
- }
- for _, test := range tests {
- _, _, err := In(test.q, test.args...)
- if err == nil {
- t.Error("Expected an error, but got nil.")
- }
- }
- RunWithSchemaContext(context.Background(), defaultSchema, t, func(ctx context.Context, db *DB, t *testing.T) {
- loadDefaultFixtureContext(ctx, db, t)
- //tx.MustExecContext(ctx, tx.Rebind("INSERT INTO place (country, city, telcode) VALUES (?, ?, ?)"), "United States", "New York", "1")
- //tx.MustExecContext(ctx, tx.Rebind("INSERT INTO place (country, telcode) VALUES (?, ?)"), "Hong Kong", "852")
- //tx.MustExecContext(ctx, tx.Rebind("INSERT INTO place (country, telcode) VALUES (?, ?)"), "Singapore", "65")
- telcodes := []int{852, 65}
- q := "SELECT * FROM place WHERE telcode IN(?) ORDER BY telcode"
- query, args, err := In(q, telcodes)
- if err != nil {
- t.Error(err)
- }
- query = db.Rebind(query)
- places := []Place{}
- err = db.SelectContext(ctx, &places, query, args...)
- if err != nil {
- t.Error(err)
- }
- if len(places) != 2 {
- t.Fatalf("Expecting 2 results, got %d", len(places))
- }
- if places[0].TelCode != 65 {
- t.Errorf("Expecting singapore first, but got %#v", places[0])
- }
- if places[1].TelCode != 852 {
- t.Errorf("Expecting hong kong second, but got %#v", places[1])
- }
- })
-}
-
-func TestEmbeddedLiteralsContext(t *testing.T) {
- var schema = Schema{
- create: `
- CREATE TABLE x (
- k text
- );`,
- drop: `drop table x;`,
- }
-
- RunWithSchemaContext(context.Background(), schema, t, func(ctx context.Context, db *DB, t *testing.T) {
- type t1 struct {
- K *string
- }
- type t2 struct {
- Inline struct {
- F string
- }
- K *string
- }
-
- db.MustExecContext(ctx, db.Rebind("INSERT INTO x (k) VALUES (?), (?), (?);"), "one", "two", "three")
-
- target := t1{}
- err := db.GetContext(ctx, &target, db.Rebind("SELECT * FROM x WHERE k=?"), "one")
- if err != nil {
- t.Error(err)
- }
- if *target.K != "one" {
- t.Error("Expected target.K to be `one`, got ", target.K)
- }
-
- target2 := t2{}
- err = db.GetContext(ctx, &target2, db.Rebind("SELECT * FROM x WHERE k=?"), "one")
- if err != nil {
- t.Error(err)
- }
- if *target2.K != "one" {
- t.Errorf("Expected target2.K to be `one`, got `%v`", target2.K)
- }
- })
-}
diff --git a/vendor/github.com/jmoiron/sqlx/sqlx_test.go b/vendor/github.com/jmoiron/sqlx/sqlx_test.go
deleted file mode 100644
index 5752773..0000000
--- a/vendor/github.com/jmoiron/sqlx/sqlx_test.go
+++ /dev/null
@@ -1,1792 +0,0 @@
-// The following environment variables, if set, will be used:
-//
-// * SQLX_SQLITE_DSN
-// * SQLX_POSTGRES_DSN
-// * SQLX_MYSQL_DSN
-//
-// Set any of these variables to 'skip' to skip them. Note that for MySQL,
-// the string '?parseTime=True' will be appended to the DSN if it's not there
-// already.
-//
-package sqlx
-
-import (
- "database/sql"
- "database/sql/driver"
- "encoding/json"
- "fmt"
- "log"
- "os"
- "reflect"
- "strings"
- "testing"
- "time"
-
- _ "github.com/go-sql-driver/mysql"
- "github.com/jmoiron/sqlx/reflectx"
- _ "github.com/lib/pq"
- _ "github.com/mattn/go-sqlite3"
-)
-
-/* compile time checks that Db, Tx, Stmt (qStmt) implement expected interfaces */
-var _, _ Ext = &DB{}, &Tx{}
-var _, _ ColScanner = &Row{}, &Rows{}
-var _ Queryer = &qStmt{}
-var _ Execer = &qStmt{}
-
-var TestPostgres = true
-var TestSqlite = true
-var TestMysql = true
-
-var sldb *DB
-var pgdb *DB
-var mysqldb *DB
-var active = []*DB{}
-
-func init() {
- ConnectAll()
-}
-
-func ConnectAll() {
- var err error
-
- pgdsn := os.Getenv("SQLX_POSTGRES_DSN")
- mydsn := os.Getenv("SQLX_MYSQL_DSN")
- sqdsn := os.Getenv("SQLX_SQLITE_DSN")
-
- TestPostgres = pgdsn != "skip"
- TestMysql = mydsn != "skip"
- TestSqlite = sqdsn != "skip"
-
- if !strings.Contains(mydsn, "parseTime=true") {
- mydsn += "?parseTime=true"
- }
-
- if TestPostgres {
- pgdb, err = Connect("postgres", pgdsn)
- if err != nil {
- fmt.Printf("Disabling PG tests:\n %v\n", err)
- TestPostgres = false
- }
- } else {
- fmt.Println("Disabling Postgres tests.")
- }
-
- if TestMysql {
- mysqldb, err = Connect("mysql", mydsn)
- if err != nil {
- fmt.Printf("Disabling MySQL tests:\n %v", err)
- TestMysql = false
- }
- } else {
- fmt.Println("Disabling MySQL tests.")
- }
-
- if TestSqlite {
- sldb, err = Connect("sqlite3", sqdsn)
- if err != nil {
- fmt.Printf("Disabling SQLite:\n %v", err)
- TestSqlite = false
- }
- } else {
- fmt.Println("Disabling SQLite tests.")
- }
-}
-
-type Schema struct {
- create string
- drop string
-}
-
-func (s Schema) Postgres() (string, string) {
- return s.create, s.drop
-}
-
-func (s Schema) MySQL() (string, string) {
- return strings.Replace(s.create, `"`, "`", -1), s.drop
-}
-
-func (s Schema) Sqlite3() (string, string) {
- return strings.Replace(s.create, `now()`, `CURRENT_TIMESTAMP`, -1), s.drop
-}
-
-var defaultSchema = Schema{
- create: `
-CREATE TABLE person (
- first_name text,
- last_name text,
- email text,
- added_at timestamp default now()
-);
-
-CREATE TABLE place (
- country text,
- city text NULL,
- telcode integer
-);
-
-CREATE TABLE capplace (
- "COUNTRY" text,
- "CITY" text NULL,
- "TELCODE" integer
-);
-
-CREATE TABLE nullperson (
- first_name text NULL,
- last_name text NULL,
- email text NULL
-);
-
-CREATE TABLE employees (
- name text,
- id integer,
- boss_id integer
-);
-
-`,
- drop: `
-drop table person;
-drop table place;
-drop table capplace;
-drop table nullperson;
-drop table employees;
-`,
-}
-
-type Person struct {
- FirstName string `db:"first_name"`
- LastName string `db:"last_name"`
- Email string
- AddedAt time.Time `db:"added_at"`
-}
-
-type Person2 struct {
- FirstName sql.NullString `db:"first_name"`
- LastName sql.NullString `db:"last_name"`
- Email sql.NullString
-}
-
-type Place struct {
- Country string
- City sql.NullString
- TelCode int
-}
-
-type PlacePtr struct {
- Country string
- City *string
- TelCode int
-}
-
-type PersonPlace struct {
- Person
- Place
-}
-
-type PersonPlacePtr struct {
- *Person
- *Place
-}
-
-type EmbedConflict struct {
- FirstName string `db:"first_name"`
- Person
-}
-
-type SliceMember struct {
- Country string
- City sql.NullString
- TelCode int
- People []Person `db:"-"`
- Addresses []Place `db:"-"`
-}
-
-// Note that because of field map caching, we need a new type here
-// if we've used Place already somewhere in sqlx
-type CPlace Place
-
-func MultiExec(e Execer, query string) {
- stmts := strings.Split(query, ";\n")
- if len(strings.Trim(stmts[len(stmts)-1], " \n\t\r")) == 0 {
- stmts = stmts[:len(stmts)-1]
- }
- for _, s := range stmts {
- _, err := e.Exec(s)
- if err != nil {
- fmt.Println(err, s)
- }
- }
-}
-
-func RunWithSchema(schema Schema, t *testing.T, test func(db *DB, t *testing.T)) {
- runner := func(db *DB, t *testing.T, create, drop string) {
- defer func() {
- MultiExec(db, drop)
- }()
-
- MultiExec(db, create)
- test(db, t)
- }
-
- if TestPostgres {
- create, drop := schema.Postgres()
- runner(pgdb, t, create, drop)
- }
- if TestSqlite {
- create, drop := schema.Sqlite3()
- runner(sldb, t, create, drop)
- }
- if TestMysql {
- create, drop := schema.MySQL()
- runner(mysqldb, t, create, drop)
- }
-}
-
-func loadDefaultFixture(db *DB, t *testing.T) {
- tx := db.MustBegin()
- tx.MustExec(tx.Rebind("INSERT INTO person (first_name, last_name, email) VALUES (?, ?, ?)"), "Jason", "Moiron", "jmoiron@jmoiron.net")
- tx.MustExec(tx.Rebind("INSERT INTO person (first_name, last_name, email) VALUES (?, ?, ?)"), "John", "Doe", "johndoeDNE@gmail.net")
- tx.MustExec(tx.Rebind("INSERT INTO place (country, city, telcode) VALUES (?, ?, ?)"), "United States", "New York", "1")
- tx.MustExec(tx.Rebind("INSERT INTO place (country, telcode) VALUES (?, ?)"), "Hong Kong", "852")
- tx.MustExec(tx.Rebind("INSERT INTO place (country, telcode) VALUES (?, ?)"), "Singapore", "65")
- if db.DriverName() == "mysql" {
- tx.MustExec(tx.Rebind("INSERT INTO capplace (`COUNTRY`, `TELCODE`) VALUES (?, ?)"), "Sarf Efrica", "27")
- } else {
- tx.MustExec(tx.Rebind("INSERT INTO capplace (\"COUNTRY\", \"TELCODE\") VALUES (?, ?)"), "Sarf Efrica", "27")
- }
- tx.MustExec(tx.Rebind("INSERT INTO employees (name, id) VALUES (?, ?)"), "Peter", "4444")
- tx.MustExec(tx.Rebind("INSERT INTO employees (name, id, boss_id) VALUES (?, ?, ?)"), "Joe", "1", "4444")
- tx.MustExec(tx.Rebind("INSERT INTO employees (name, id, boss_id) VALUES (?, ?, ?)"), "Martin", "2", "4444")
- tx.Commit()
-}
-
-// Test a new backwards compatible feature, that missing scan destinations
-// will silently scan into sql.RawText rather than failing/panicing
-func TestMissingNames(t *testing.T) {
- RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) {
- loadDefaultFixture(db, t)
- type PersonPlus struct {
- FirstName string `db:"first_name"`
- LastName string `db:"last_name"`
- Email string
- //AddedAt time.Time `db:"added_at"`
- }
-
- // test Select first
- pps := []PersonPlus{}
- // pps lacks added_at destination
- err := db.Select(&pps, "SELECT * FROM person")
- if err == nil {
- t.Error("Expected missing name from Select to fail, but it did not.")
- }
-
- // test Get
- pp := PersonPlus{}
- err = db.Get(&pp, "SELECT * FROM person LIMIT 1")
- if err == nil {
- t.Error("Expected missing name Get to fail, but it did not.")
- }
-
- // test naked StructScan
- pps = []PersonPlus{}
- rows, err := db.Query("SELECT * FROM person LIMIT 1")
- if err != nil {
- t.Fatal(err)
- }
- rows.Next()
- err = StructScan(rows, &pps)
- if err == nil {
- t.Error("Expected missing name in StructScan to fail, but it did not.")
- }
- rows.Close()
-
- // now try various things with unsafe set.
- db = db.Unsafe()
- pps = []PersonPlus{}
- err = db.Select(&pps, "SELECT * FROM person")
- if err != nil {
- t.Error(err)
- }
-
- // test Get
- pp = PersonPlus{}
- err = db.Get(&pp, "SELECT * FROM person LIMIT 1")
- if err != nil {
- t.Error(err)
- }
-
- // test naked StructScan
- pps = []PersonPlus{}
- rowsx, err := db.Queryx("SELECT * FROM person LIMIT 1")
- if err != nil {
- t.Fatal(err)
- }
- rowsx.Next()
- err = StructScan(rowsx, &pps)
- if err != nil {
- t.Error(err)
- }
- rowsx.Close()
-
- // test Named stmt
- if !isUnsafe(db) {
- t.Error("Expected db to be unsafe, but it isn't")
- }
- nstmt, err := db.PrepareNamed(`SELECT * FROM person WHERE first_name != :name`)
- if err != nil {
- t.Fatal(err)
- }
- // its internal stmt should be marked unsafe
- if !nstmt.Stmt.unsafe {
- t.Error("expected NamedStmt to be unsafe but its underlying stmt did not inherit safety")
- }
- pps = []PersonPlus{}
- err = nstmt.Select(&pps, map[string]interface{}{"name": "Jason"})
- if err != nil {
- t.Fatal(err)
- }
- if len(pps) != 1 {
- t.Errorf("Expected 1 person back, got %d", len(pps))
- }
-
- // test it with a safe db
- db.unsafe = false
- if isUnsafe(db) {
- t.Error("expected db to be safe but it isn't")
- }
- nstmt, err = db.PrepareNamed(`SELECT * FROM person WHERE first_name != :name`)
- if err != nil {
- t.Fatal(err)
- }
- // it should be safe
- if isUnsafe(nstmt) {
- t.Error("NamedStmt did not inherit safety")
- }
- nstmt.Unsafe()
- if !isUnsafe(nstmt) {
- t.Error("expected newly unsafed NamedStmt to be unsafe")
- }
- pps = []PersonPlus{}
- err = nstmt.Select(&pps, map[string]interface{}{"name": "Jason"})
- if err != nil {
- t.Fatal(err)
- }
- if len(pps) != 1 {
- t.Errorf("Expected 1 person back, got %d", len(pps))
- }
-
- })
-}
-
-func TestEmbeddedStructs(t *testing.T) {
- type Loop1 struct{ Person }
- type Loop2 struct{ Loop1 }
- type Loop3 struct{ Loop2 }
-
- RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) {
- loadDefaultFixture(db, t)
- peopleAndPlaces := []PersonPlace{}
- err := db.Select(
- &peopleAndPlaces,
- `SELECT person.*, place.* FROM
- person natural join place`)
- if err != nil {
- t.Fatal(err)
- }
- for _, pp := range peopleAndPlaces {
- if len(pp.Person.FirstName) == 0 {
- t.Errorf("Expected non zero lengthed first name.")
- }
- if len(pp.Place.Country) == 0 {
- t.Errorf("Expected non zero lengthed country.")
- }
- }
-
- // test embedded structs with StructScan
- rows, err := db.Queryx(
- `SELECT person.*, place.* FROM
- person natural join place`)
- if err != nil {
- t.Error(err)
- }
-
- perp := PersonPlace{}
- rows.Next()
- err = rows.StructScan(&perp)
- if err != nil {
- t.Error(err)
- }
-
- if len(perp.Person.FirstName) == 0 {
- t.Errorf("Expected non zero lengthed first name.")
- }
- if len(perp.Place.Country) == 0 {
- t.Errorf("Expected non zero lengthed country.")
- }
-
- rows.Close()
-
- // test the same for embedded pointer structs
- peopleAndPlacesPtrs := []PersonPlacePtr{}
- err = db.Select(
- &peopleAndPlacesPtrs,
- `SELECT person.*, place.* FROM
- person natural join place`)
- if err != nil {
- t.Fatal(err)
- }
- for _, pp := range peopleAndPlacesPtrs {
- if len(pp.Person.FirstName) == 0 {
- t.Errorf("Expected non zero lengthed first name.")
- }
- if len(pp.Place.Country) == 0 {
- t.Errorf("Expected non zero lengthed country.")
- }
- }
-
- // test "deep nesting"
- l3s := []Loop3{}
- err = db.Select(&l3s, `select * from person`)
- if err != nil {
- t.Fatal(err)
- }
- for _, l3 := range l3s {
- if len(l3.Loop2.Loop1.Person.FirstName) == 0 {
- t.Errorf("Expected non zero lengthed first name.")
- }
- }
-
- // test "embed conflicts"
- ec := []EmbedConflict{}
- err = db.Select(&ec, `select * from person`)
- // I'm torn between erroring here or having some kind of working behavior
- // in order to allow for more flexibility in destination structs
- if err != nil {
- t.Errorf("Was not expecting an error on embed conflicts.")
- }
- })
-}
-
-func TestJoinQuery(t *testing.T) {
- type Employee struct {
- Name string
- ID int64
- // BossID is an id into the employee table
- BossID sql.NullInt64 `db:"boss_id"`
- }
- type Boss Employee
-
- RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) {
- loadDefaultFixture(db, t)
-
- var employees []struct {
- Employee
- Boss `db:"boss"`
- }
-
- err := db.Select(
- &employees,
- `SELECT employees.*, boss.id "boss.id", boss.name "boss.name" FROM employees
- JOIN employees AS boss ON employees.boss_id = boss.id`)
- if err != nil {
- t.Fatal(err)
- }
-
- for _, em := range employees {
- if len(em.Employee.Name) == 0 {
- t.Errorf("Expected non zero lengthed name.")
- }
- if em.Employee.BossID.Int64 != em.Boss.ID {
- t.Errorf("Expected boss ids to match")
- }
- }
- })
-}
-
-func TestJoinQueryNamedPointerStructs(t *testing.T) {
- type Employee struct {
- Name string
- ID int64
- // BossID is an id into the employee table
- BossID sql.NullInt64 `db:"boss_id"`
- }
- type Boss Employee
-
- RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) {
- loadDefaultFixture(db, t)
-
- var employees []struct {
- Emp1 *Employee `db:"emp1"`
- Emp2 *Employee `db:"emp2"`
- *Boss `db:"boss"`
- }
-
- err := db.Select(
- &employees,
- `SELECT emp.name "emp1.name", emp.id "emp1.id", emp.boss_id "emp1.boss_id",
- emp.name "emp2.name", emp.id "emp2.id", emp.boss_id "emp2.boss_id",
- boss.id "boss.id", boss.name "boss.name" FROM employees AS emp
- JOIN employees AS boss ON emp.boss_id = boss.id
- `)
- if err != nil {
- t.Fatal(err)
- }
-
- for _, em := range employees {
- if len(em.Emp1.Name) == 0 || len(em.Emp2.Name) == 0 {
- t.Errorf("Expected non zero lengthed name.")
- }
- if em.Emp1.BossID.Int64 != em.Boss.ID || em.Emp2.BossID.Int64 != em.Boss.ID {
- t.Errorf("Expected boss ids to match")
- }
- }
- })
-}
-
-func TestSelectSliceMapTime(t *testing.T) {
- RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) {
- loadDefaultFixture(db, t)
- rows, err := db.Queryx("SELECT * FROM person")
- if err != nil {
- t.Fatal(err)
- }
- for rows.Next() {
- _, err := rows.SliceScan()
- if err != nil {
- t.Error(err)
- }
- }
-
- rows, err = db.Queryx("SELECT * FROM person")
- if err != nil {
- t.Fatal(err)
- }
- for rows.Next() {
- m := map[string]interface{}{}
- err := rows.MapScan(m)
- if err != nil {
- t.Error(err)
- }
- }
-
- })
-}
-
-func TestNilReceiver(t *testing.T) {
- RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) {
- loadDefaultFixture(db, t)
- var p *Person
- err := db.Get(p, "SELECT * FROM person LIMIT 1")
- if err == nil {
- t.Error("Expected error when getting into nil struct ptr.")
- }
- var pp *[]Person
- err = db.Select(pp, "SELECT * FROM person")
- if err == nil {
- t.Error("Expected an error when selecting into nil slice ptr.")
- }
- })
-}
-
-func TestNamedQuery(t *testing.T) {
- var schema = Schema{
- create: `
- CREATE TABLE place (
- id integer PRIMARY KEY,
- name text NULL
- );
- CREATE TABLE person (
- first_name text NULL,
- last_name text NULL,
- email text NULL
- );
- CREATE TABLE placeperson (
- first_name text NULL,
- last_name text NULL,
- email text NULL,
- place_id integer NULL
- );
- CREATE TABLE jsperson (
- "FIRST" text NULL,
- last_name text NULL,
- "EMAIL" text NULL
- );`,
- drop: `
- drop table person;
- drop table jsperson;
- drop table place;
- drop table placeperson;
- `,
- }
-
- RunWithSchema(schema, t, func(db *DB, t *testing.T) {
- type Person struct {
- FirstName sql.NullString `db:"first_name"`
- LastName sql.NullString `db:"last_name"`
- Email sql.NullString
- }
-
- p := Person{
- FirstName: sql.NullString{String: "ben", Valid: true},
- LastName: sql.NullString{String: "doe", Valid: true},
- Email: sql.NullString{String: "ben@doe.com", Valid: true},
- }
-
- q1 := `INSERT INTO person (first_name, last_name, email) VALUES (:first_name, :last_name, :email)`
- _, err := db.NamedExec(q1, p)
- if err != nil {
- log.Fatal(err)
- }
-
- p2 := &Person{}
- rows, err := db.NamedQuery("SELECT * FROM person WHERE first_name=:first_name", p)
- if err != nil {
- log.Fatal(err)
- }
- for rows.Next() {
- err = rows.StructScan(p2)
- if err != nil {
- t.Error(err)
- }
- if p2.FirstName.String != "ben" {
- t.Error("Expected first name of `ben`, got " + p2.FirstName.String)
- }
- if p2.LastName.String != "doe" {
- t.Error("Expected first name of `doe`, got " + p2.LastName.String)
- }
- }
-
- // these are tests for #73; they verify that named queries work if you've
- // changed the db mapper. This code checks both NamedQuery "ad-hoc" style
- // queries and NamedStmt queries, which use different code paths internally.
- old := *db.Mapper
-
- type JSONPerson struct {
- FirstName sql.NullString `json:"FIRST"`
- LastName sql.NullString `json:"last_name"`
- Email sql.NullString
- }
-
- jp := JSONPerson{
- FirstName: sql.NullString{String: "ben", Valid: true},
- LastName: sql.NullString{String: "smith", Valid: true},
- Email: sql.NullString{String: "ben@smith.com", Valid: true},
- }
-
- db.Mapper = reflectx.NewMapperFunc("json", strings.ToUpper)
-
- // prepare queries for case sensitivity to test our ToUpper function.
- // postgres and sqlite accept "", but mysql uses ``; since Go's multi-line
- // strings are `` we use "" by default and swap out for MySQL
- pdb := func(s string, db *DB) string {
- if db.DriverName() == "mysql" {
- return strings.Replace(s, `"`, "`", -1)
- }
- return s
- }
-
- q1 = `INSERT INTO jsperson ("FIRST", last_name, "EMAIL") VALUES (:FIRST, :last_name, :EMAIL)`
- _, err = db.NamedExec(pdb(q1, db), jp)
- if err != nil {
- t.Fatal(err, db.DriverName())
- }
-
- // Checks that a person pulled out of the db matches the one we put in
- check := func(t *testing.T, rows *Rows) {
- jp = JSONPerson{}
- for rows.Next() {
- err = rows.StructScan(&jp)
- if err != nil {
- t.Error(err)
- }
- if jp.FirstName.String != "ben" {
- t.Errorf("Expected first name of `ben`, got `%s` (%s) ", jp.FirstName.String, db.DriverName())
- }
- if jp.LastName.String != "smith" {
- t.Errorf("Expected LastName of `smith`, got `%s` (%s)", jp.LastName.String, db.DriverName())
- }
- if jp.Email.String != "ben@smith.com" {
- t.Errorf("Expected first name of `doe`, got `%s` (%s)", jp.Email.String, db.DriverName())
- }
- }
- }
-
- ns, err := db.PrepareNamed(pdb(`
- SELECT * FROM jsperson
- WHERE
- "FIRST"=:FIRST AND
- last_name=:last_name AND
- "EMAIL"=:EMAIL
- `, db))
-
- if err != nil {
- t.Fatal(err)
- }
- rows, err = ns.Queryx(jp)
- if err != nil {
- t.Fatal(err)
- }
-
- check(t, rows)
-
- // Check exactly the same thing, but with db.NamedQuery, which does not go
- // through the PrepareNamed/NamedStmt path.
- rows, err = db.NamedQuery(pdb(`
- SELECT * FROM jsperson
- WHERE
- "FIRST"=:FIRST AND
- last_name=:last_name AND
- "EMAIL"=:EMAIL
- `, db), jp)
- if err != nil {
- t.Fatal(err)
- }
-
- check(t, rows)
-
- db.Mapper = &old
-
- // Test nested structs
- type Place struct {
- ID int `db:"id"`
- Name sql.NullString `db:"name"`
- }
- type PlacePerson struct {
- FirstName sql.NullString `db:"first_name"`
- LastName sql.NullString `db:"last_name"`
- Email sql.NullString
- Place Place `db:"place"`
- }
-
- pl := Place{
- Name: sql.NullString{String: "myplace", Valid: true},
- }
-
- pp := PlacePerson{
- FirstName: sql.NullString{String: "ben", Valid: true},
- LastName: sql.NullString{String: "doe", Valid: true},
- Email: sql.NullString{String: "ben@doe.com", Valid: true},
- }
-
- q2 := `INSERT INTO place (id, name) VALUES (1, :name)`
- _, err = db.NamedExec(q2, pl)
- if err != nil {
- log.Fatal(err)
- }
-
- id := 1
- pp.Place.ID = id
-
- q3 := `INSERT INTO placeperson (first_name, last_name, email, place_id) VALUES (:first_name, :last_name, :email, :place.id)`
- _, err = db.NamedExec(q3, pp)
- if err != nil {
- log.Fatal(err)
- }
-
- pp2 := &PlacePerson{}
- rows, err = db.NamedQuery(`
- SELECT
- first_name,
- last_name,
- email,
- place.id AS "place.id",
- place.name AS "place.name"
- FROM placeperson
- INNER JOIN place ON place.id = placeperson.place_id
- WHERE
- place.id=:place.id`, pp)
- if err != nil {
- log.Fatal(err)
- }
- for rows.Next() {
- err = rows.StructScan(pp2)
- if err != nil {
- t.Error(err)
- }
- if pp2.FirstName.String != "ben" {
- t.Error("Expected first name of `ben`, got " + pp2.FirstName.String)
- }
- if pp2.LastName.String != "doe" {
- t.Error("Expected first name of `doe`, got " + pp2.LastName.String)
- }
- if pp2.Place.Name.String != "myplace" {
- t.Error("Expected place name of `myplace`, got " + pp2.Place.Name.String)
- }
- if pp2.Place.ID != pp.Place.ID {
- t.Errorf("Expected place name of %v, got %v", pp.Place.ID, pp2.Place.ID)
- }
- }
- })
-}
-
-func TestNilInserts(t *testing.T) {
- var schema = Schema{
- create: `
- CREATE TABLE tt (
- id integer,
- value text NULL DEFAULT NULL
- );`,
- drop: "drop table tt;",
- }
-
- RunWithSchema(schema, t, func(db *DB, t *testing.T) {
- type TT struct {
- ID int
- Value *string
- }
- var v, v2 TT
- r := db.Rebind
-
- db.MustExec(r(`INSERT INTO tt (id) VALUES (1)`))
- db.Get(&v, r(`SELECT * FROM tt`))
- if v.ID != 1 {
- t.Errorf("Expecting id of 1, got %v", v.ID)
- }
- if v.Value != nil {
- t.Errorf("Expecting NULL to map to nil, got %s", *v.Value)
- }
-
- v.ID = 2
- // NOTE: this incidentally uncovered a bug which was that named queries with
- // pointer destinations would not work if the passed value here was not addressable,
- // as reflectx.FieldByIndexes attempts to allocate nil pointer receivers for
- // writing. This was fixed by creating & using the reflectx.FieldByIndexesReadOnly
- // function. This next line is important as it provides the only coverage for this.
- db.NamedExec(`INSERT INTO tt (id, value) VALUES (:id, :value)`, v)
-
- db.Get(&v2, r(`SELECT * FROM tt WHERE id=2`))
- if v.ID != v2.ID {
- t.Errorf("%v != %v", v.ID, v2.ID)
- }
- if v2.Value != nil {
- t.Errorf("Expecting NULL to map to nil, got %s", *v.Value)
- }
- })
-}
-
-func TestScanError(t *testing.T) {
- var schema = Schema{
- create: `
- CREATE TABLE kv (
- k text,
- v integer
- );`,
- drop: `drop table kv;`,
- }
-
- RunWithSchema(schema, t, func(db *DB, t *testing.T) {
- type WrongTypes struct {
- K int
- V string
- }
- _, err := db.Exec(db.Rebind("INSERT INTO kv (k, v) VALUES (?, ?)"), "hi", 1)
- if err != nil {
- t.Error(err)
- }
-
- rows, err := db.Queryx("SELECT * FROM kv")
- if err != nil {
- t.Error(err)
- }
- for rows.Next() {
- var wt WrongTypes
- err := rows.StructScan(&wt)
- if err == nil {
- t.Errorf("%s: Scanning wrong types into keys should have errored.", db.DriverName())
- }
- }
- })
-}
-
-// FIXME: this function is kinda big but it slows things down to be constantly
-// loading and reloading the schema..
-
-func TestUsage(t *testing.T) {
- RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) {
- loadDefaultFixture(db, t)
- slicemembers := []SliceMember{}
- err := db.Select(&slicemembers, "SELECT * FROM place ORDER BY telcode ASC")
- if err != nil {
- t.Fatal(err)
- }
-
- people := []Person{}
-
- err = db.Select(&people, "SELECT * FROM person ORDER BY first_name ASC")
- if err != nil {
- t.Fatal(err)
- }
-
- jason, john := people[0], people[1]
- if jason.FirstName != "Jason" {
- t.Errorf("Expecting FirstName of Jason, got %s", jason.FirstName)
- }
- if jason.LastName != "Moiron" {
- t.Errorf("Expecting LastName of Moiron, got %s", jason.LastName)
- }
- if jason.Email != "jmoiron@jmoiron.net" {
- t.Errorf("Expecting Email of jmoiron@jmoiron.net, got %s", jason.Email)
- }
- if john.FirstName != "John" || john.LastName != "Doe" || john.Email != "johndoeDNE@gmail.net" {
- t.Errorf("John Doe's person record not what expected: Got %v\n", john)
- }
-
- jason = Person{}
- err = db.Get(&jason, db.Rebind("SELECT * FROM person WHERE first_name=?"), "Jason")
-
- if err != nil {
- t.Fatal(err)
- }
- if jason.FirstName != "Jason" {
- t.Errorf("Expecting to get back Jason, but got %v\n", jason.FirstName)
- }
-
- err = db.Get(&jason, db.Rebind("SELECT * FROM person WHERE first_name=?"), "Foobar")
- if err == nil {
- t.Errorf("Expecting an error, got nil\n")
- }
- if err != sql.ErrNoRows {
- t.Errorf("Expected sql.ErrNoRows, got %v\n", err)
- }
-
- // The following tests check statement reuse, which was actually a problem
- // due to copying being done when creating Stmt's which was eventually removed
- stmt1, err := db.Preparex(db.Rebind("SELECT * FROM person WHERE first_name=?"))
- if err != nil {
- t.Fatal(err)
- }
- jason = Person{}
-
- row := stmt1.QueryRowx("DoesNotExist")
- row.Scan(&jason)
- row = stmt1.QueryRowx("DoesNotExist")
- row.Scan(&jason)
-
- err = stmt1.Get(&jason, "DoesNotExist User")
- if err == nil {
- t.Error("Expected an error")
- }
- err = stmt1.Get(&jason, "DoesNotExist User 2")
- if err == nil {
- t.Fatal(err)
- }
-
- stmt2, err := db.Preparex(db.Rebind("SELECT * FROM person WHERE first_name=?"))
- if err != nil {
- t.Fatal(err)
- }
- jason = Person{}
- tx, err := db.Beginx()
- if err != nil {
- t.Fatal(err)
- }
- tstmt2 := tx.Stmtx(stmt2)
- row2 := tstmt2.QueryRowx("Jason")
- err = row2.StructScan(&jason)
- if err != nil {
- t.Error(err)
- }
- tx.Commit()
-
- places := []*Place{}
- err = db.Select(&places, "SELECT telcode FROM place ORDER BY telcode ASC")
- if err != nil {
- t.Fatal(err)
- }
-
- usa, singsing, honkers := places[0], places[1], places[2]
-
- if usa.TelCode != 1 || honkers.TelCode != 852 || singsing.TelCode != 65 {
- t.Errorf("Expected integer telcodes to work, got %#v", places)
- }
-
- placesptr := []PlacePtr{}
- err = db.Select(&placesptr, "SELECT * FROM place ORDER BY telcode ASC")
- if err != nil {
- t.Error(err)
- }
- //fmt.Printf("%#v\n%#v\n%#v\n", placesptr[0], placesptr[1], placesptr[2])
-
- // if you have null fields and use SELECT *, you must use sql.Null* in your struct
- // this test also verifies that you can use either a []Struct{} or a []*Struct{}
- places2 := []Place{}
- err = db.Select(&places2, "SELECT * FROM place ORDER BY telcode ASC")
- if err != nil {
- t.Fatal(err)
- }
-
- usa, singsing, honkers = &places2[0], &places2[1], &places2[2]
-
- // this should return a type error that &p is not a pointer to a struct slice
- p := Place{}
- err = db.Select(&p, "SELECT * FROM place ORDER BY telcode ASC")
- if err == nil {
- t.Errorf("Expected an error, argument to select should be a pointer to a struct slice")
- }
-
- // this should be an error
- pl := []Place{}
- err = db.Select(pl, "SELECT * FROM place ORDER BY telcode ASC")
- if err == nil {
- t.Errorf("Expected an error, argument to select should be a pointer to a struct slice, not a slice.")
- }
-
- if usa.TelCode != 1 || honkers.TelCode != 852 || singsing.TelCode != 65 {
- t.Errorf("Expected integer telcodes to work, got %#v", places)
- }
-
- stmt, err := db.Preparex(db.Rebind("SELECT country, telcode FROM place WHERE telcode > ? ORDER BY telcode ASC"))
- if err != nil {
- t.Error(err)
- }
-
- places = []*Place{}
- err = stmt.Select(&places, 10)
- if len(places) != 2 {
- t.Error("Expected 2 places, got 0.")
- }
- if err != nil {
- t.Fatal(err)
- }
- singsing, honkers = places[0], places[1]
- if singsing.TelCode != 65 || honkers.TelCode != 852 {
- t.Errorf("Expected the right telcodes, got %#v", places)
- }
-
- rows, err := db.Queryx("SELECT * FROM place")
- if err != nil {
- t.Fatal(err)
- }
- place := Place{}
- for rows.Next() {
- err = rows.StructScan(&place)
- if err != nil {
- t.Fatal(err)
- }
- }
-
- rows, err = db.Queryx("SELECT * FROM place")
- if err != nil {
- t.Fatal(err)
- }
- m := map[string]interface{}{}
- for rows.Next() {
- err = rows.MapScan(m)
- if err != nil {
- t.Fatal(err)
- }
- _, ok := m["country"]
- if !ok {
- t.Errorf("Expected key `country` in map but could not find it (%#v)\n", m)
- }
- }
-
- rows, err = db.Queryx("SELECT * FROM place")
- if err != nil {
- t.Fatal(err)
- }
- for rows.Next() {
- s, err := rows.SliceScan()
- if err != nil {
- t.Error(err)
- }
- if len(s) != 3 {
- t.Errorf("Expected 3 columns in result, got %d\n", len(s))
- }
- }
-
- // test advanced querying
- // test that NamedExec works with a map as well as a struct
- _, err = db.NamedExec("INSERT INTO person (first_name, last_name, email) VALUES (:first, :last, :email)", map[string]interface{}{
- "first": "Bin",
- "last": "Smuth",
- "email": "bensmith@allblacks.nz",
- })
- if err != nil {
- t.Fatal(err)
- }
-
- // ensure that if the named param happens right at the end it still works
- // ensure that NamedQuery works with a map[string]interface{}
- rows, err = db.NamedQuery("SELECT * FROM person WHERE first_name=:first", map[string]interface{}{"first": "Bin"})
- if err != nil {
- t.Fatal(err)
- }
-
- ben := &Person{}
- for rows.Next() {
- err = rows.StructScan(ben)
- if err != nil {
- t.Fatal(err)
- }
- if ben.FirstName != "Bin" {
- t.Fatal("Expected first name of `Bin`, got " + ben.FirstName)
- }
- if ben.LastName != "Smuth" {
- t.Fatal("Expected first name of `Smuth`, got " + ben.LastName)
- }
- }
-
- ben.FirstName = "Ben"
- ben.LastName = "Smith"
- ben.Email = "binsmuth@allblacks.nz"
-
- // Insert via a named query using the struct
- _, err = db.NamedExec("INSERT INTO person (first_name, last_name, email) VALUES (:first_name, :last_name, :email)", ben)
-
- if err != nil {
- t.Fatal(err)
- }
-
- rows, err = db.NamedQuery("SELECT * FROM person WHERE first_name=:first_name", ben)
- if err != nil {
- t.Fatal(err)
- }
- for rows.Next() {
- err = rows.StructScan(ben)
- if err != nil {
- t.Fatal(err)
- }
- if ben.FirstName != "Ben" {
- t.Fatal("Expected first name of `Ben`, got " + ben.FirstName)
- }
- if ben.LastName != "Smith" {
- t.Fatal("Expected first name of `Smith`, got " + ben.LastName)
- }
- }
- // ensure that Get does not panic on emppty result set
- person := &Person{}
- err = db.Get(person, "SELECT * FROM person WHERE first_name=$1", "does-not-exist")
- if err == nil {
- t.Fatal("Should have got an error for Get on non-existant row.")
- }
-
- // lets test prepared statements some more
-
- stmt, err = db.Preparex(db.Rebind("SELECT * FROM person WHERE first_name=?"))
- if err != nil {
- t.Fatal(err)
- }
- rows, err = stmt.Queryx("Ben")
- if err != nil {
- t.Fatal(err)
- }
- for rows.Next() {
- err = rows.StructScan(ben)
- if err != nil {
- t.Fatal(err)
- }
- if ben.FirstName != "Ben" {
- t.Fatal("Expected first name of `Ben`, got " + ben.FirstName)
- }
- if ben.LastName != "Smith" {
- t.Fatal("Expected first name of `Smith`, got " + ben.LastName)
- }
- }
-
- john = Person{}
- stmt, err = db.Preparex(db.Rebind("SELECT * FROM person WHERE first_name=?"))
- if err != nil {
- t.Error(err)
- }
- err = stmt.Get(&john, "John")
- if err != nil {
- t.Error(err)
- }
-
- // test name mapping
- // THIS USED TO WORK BUT WILL NO LONGER WORK.
- db.MapperFunc(strings.ToUpper)
- rsa := CPlace{}
- err = db.Get(&rsa, "SELECT * FROM capplace;")
- if err != nil {
- t.Error(err, "in db:", db.DriverName())
- }
- db.MapperFunc(strings.ToLower)
-
- // create a copy and change the mapper, then verify the copy behaves
- // differently from the original.
- dbCopy := NewDb(db.DB, db.DriverName())
- dbCopy.MapperFunc(strings.ToUpper)
- err = dbCopy.Get(&rsa, "SELECT * FROM capplace;")
- if err != nil {
- fmt.Println(db.DriverName())
- t.Error(err)
- }
-
- err = db.Get(&rsa, "SELECT * FROM cappplace;")
- if err == nil {
- t.Error("Expected no error, got ", err)
- }
-
- // test base type slices
- var sdest []string
- rows, err = db.Queryx("SELECT email FROM person ORDER BY email ASC;")
- if err != nil {
- t.Error(err)
- }
- err = scanAll(rows, &sdest, false)
- if err != nil {
- t.Error(err)
- }
-
- // test Get with base types
- var count int
- err = db.Get(&count, "SELECT count(*) FROM person;")
- if err != nil {
- t.Error(err)
- }
- if count != len(sdest) {
- t.Errorf("Expected %d == %d (count(*) vs len(SELECT ..)", count, len(sdest))
- }
-
- // test Get and Select with time.Time, #84
- var addedAt time.Time
- err = db.Get(&addedAt, "SELECT added_at FROM person LIMIT 1;")
- if err != nil {
- t.Error(err)
- }
-
- var addedAts []time.Time
- err = db.Select(&addedAts, "SELECT added_at FROM person;")
- if err != nil {
- t.Error(err)
- }
-
- // test it on a double pointer
- var pcount *int
- err = db.Get(&pcount, "SELECT count(*) FROM person;")
- if err != nil {
- t.Error(err)
- }
- if *pcount != count {
- t.Errorf("expected %d = %d", *pcount, count)
- }
-
- // test Select...
- sdest = []string{}
- err = db.Select(&sdest, "SELECT first_name FROM person ORDER BY first_name ASC;")
- if err != nil {
- t.Error(err)
- }
- expected := []string{"Ben", "Bin", "Jason", "John"}
- for i, got := range sdest {
- if got != expected[i] {
- t.Errorf("Expected %d result to be %s, but got %s", i, expected[i], got)
- }
- }
-
- var nsdest []sql.NullString
- err = db.Select(&nsdest, "SELECT city FROM place ORDER BY city ASC")
- if err != nil {
- t.Error(err)
- }
- for _, val := range nsdest {
- if val.Valid && val.String != "New York" {
- t.Errorf("expected single valid result to be `New York`, but got %s", val.String)
- }
- }
- })
-}
-
-type Product struct {
- ProductID int
-}
-
-// tests that sqlx will not panic when the wrong driver is passed because
-// of an automatic nil dereference in sqlx.Open(), which was fixed.
-func TestDoNotPanicOnConnect(t *testing.T) {
- _, err := Connect("bogus", "hehe")
- if err == nil {
- t.Errorf("Should return error when using bogus driverName")
- }
-}
-
-func TestRebind(t *testing.T) {
- q1 := `INSERT INTO foo (a, b, c, d, e, f, g, h, i) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`
- q2 := `INSERT INTO foo (a, b, c) VALUES (?, ?, "foo"), ("Hi", ?, ?)`
-
- s1 := Rebind(DOLLAR, q1)
- s2 := Rebind(DOLLAR, q2)
-
- if s1 != `INSERT INTO foo (a, b, c, d, e, f, g, h, i) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)` {
- t.Errorf("q1 failed")
- }
-
- if s2 != `INSERT INTO foo (a, b, c) VALUES ($1, $2, "foo"), ("Hi", $3, $4)` {
- t.Errorf("q2 failed")
- }
-
- s1 = Rebind(NAMED, q1)
- s2 = Rebind(NAMED, q2)
-
- ex1 := `INSERT INTO foo (a, b, c, d, e, f, g, h, i) VALUES ` +
- `(:arg1, :arg2, :arg3, :arg4, :arg5, :arg6, :arg7, :arg8, :arg9, :arg10)`
- if s1 != ex1 {
- t.Error("q1 failed on Named params")
- }
-
- ex2 := `INSERT INTO foo (a, b, c) VALUES (:arg1, :arg2, "foo"), ("Hi", :arg3, :arg4)`
- if s2 != ex2 {
- t.Error("q2 failed on Named params")
- }
-}
-
-func TestBindMap(t *testing.T) {
- // Test that it works..
- q1 := `INSERT INTO foo (a, b, c, d) VALUES (:name, :age, :first, :last)`
- am := map[string]interface{}{
- "name": "Jason Moiron",
- "age": 30,
- "first": "Jason",
- "last": "Moiron",
- }
-
- bq, args, _ := bindMap(QUESTION, q1, am)
- expect := `INSERT INTO foo (a, b, c, d) VALUES (?, ?, ?, ?)`
- if bq != expect {
- t.Errorf("Interpolation of query failed: got `%v`, expected `%v`\n", bq, expect)
- }
-
- if args[0].(string) != "Jason Moiron" {
- t.Errorf("Expected `Jason Moiron`, got %v\n", args[0])
- }
-
- if args[1].(int) != 30 {
- t.Errorf("Expected 30, got %v\n", args[1])
- }
-
- if args[2].(string) != "Jason" {
- t.Errorf("Expected Jason, got %v\n", args[2])
- }
-
- if args[3].(string) != "Moiron" {
- t.Errorf("Expected Moiron, got %v\n", args[3])
- }
-}
-
-// Test for #117, embedded nil maps
-
-type Message struct {
- Text string `db:"string"`
- Properties PropertyMap `db:"properties"` // Stored as JSON in the database
-}
-
-type PropertyMap map[string]string
-
-// Implement driver.Valuer and sql.Scanner interfaces on PropertyMap
-func (p PropertyMap) Value() (driver.Value, error) {
- if len(p) == 0 {
- return nil, nil
- }
- return json.Marshal(p)
-}
-
-func (p PropertyMap) Scan(src interface{}) error {
- v := reflect.ValueOf(src)
- if !v.IsValid() || v.IsNil() {
- return nil
- }
- if data, ok := src.([]byte); ok {
- return json.Unmarshal(data, &p)
- }
- return fmt.Errorf("Could not not decode type %T -> %T", src, p)
-}
-
-func TestEmbeddedMaps(t *testing.T) {
- var schema = Schema{
- create: `
- CREATE TABLE message (
- string text,
- properties text
- );`,
- drop: `drop table message;`,
- }
-
- RunWithSchema(schema, t, func(db *DB, t *testing.T) {
- messages := []Message{
- {"Hello, World", PropertyMap{"one": "1", "two": "2"}},
- {"Thanks, Joy", PropertyMap{"pull": "request"}},
- }
- q1 := `INSERT INTO message (string, properties) VALUES (:string, :properties);`
- for _, m := range messages {
- _, err := db.NamedExec(q1, m)
- if err != nil {
- t.Fatal(err)
- }
- }
- var count int
- err := db.Get(&count, "SELECT count(*) FROM message")
- if err != nil {
- t.Fatal(err)
- }
- if count != len(messages) {
- t.Fatalf("Expected %d messages in DB, found %d", len(messages), count)
- }
-
- var m Message
- err = db.Get(&m, "SELECT * FROM message LIMIT 1;")
- if err != nil {
- t.Fatal(err)
- }
- if m.Properties == nil {
- t.Fatal("Expected m.Properties to not be nil, but it was.")
- }
- })
-}
-
-func TestIssue197(t *testing.T) {
- // this test actually tests for a bug in database/sql:
- // https://github.com/golang/go/issues/13905
- // this potentially makes _any_ named type that is an alias for []byte
- // unsafe to use in a lot of different ways (basically, unsafe to hold
- // onto after loading from the database).
- t.Skip()
-
- type mybyte []byte
- type Var struct{ Raw json.RawMessage }
- type Var2 struct{ Raw []byte }
- type Var3 struct{ Raw mybyte }
- RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) {
- var err error
- var v, q Var
- if err = db.Get(&v, `SELECT '{"a": "b"}' AS raw`); err != nil {
- t.Fatal(err)
- }
- if err = db.Get(&q, `SELECT 'null' AS raw`); err != nil {
- t.Fatal(err)
- }
-
- var v2, q2 Var2
- if err = db.Get(&v2, `SELECT '{"a": "b"}' AS raw`); err != nil {
- t.Fatal(err)
- }
- if err = db.Get(&q2, `SELECT 'null' AS raw`); err != nil {
- t.Fatal(err)
- }
-
- var v3, q3 Var3
- if err = db.QueryRow(`SELECT '{"a": "b"}' AS raw`).Scan(&v3.Raw); err != nil {
- t.Fatal(err)
- }
- if err = db.QueryRow(`SELECT '{"c": "d"}' AS raw`).Scan(&q3.Raw); err != nil {
- t.Fatal(err)
- }
- t.Fail()
- })
-}
-
-func TestIn(t *testing.T) {
- // some quite normal situations
- type tr struct {
- q string
- args []interface{}
- c int
- }
- tests := []tr{
- {"SELECT * FROM foo WHERE x = ? AND v in (?) AND y = ?",
- []interface{}{"foo", []int{0, 5, 7, 2, 9}, "bar"},
- 7},
- {"SELECT * FROM foo WHERE x in (?)",
- []interface{}{[]int{1, 2, 3, 4, 5, 6, 7, 8}},
- 8},
- }
- for _, test := range tests {
- q, a, err := In(test.q, test.args...)
- if err != nil {
- t.Error(err)
- }
- if len(a) != test.c {
- t.Errorf("Expected %d args, but got %d (%+v)", test.c, len(a), a)
- }
- if strings.Count(q, "?") != test.c {
- t.Errorf("Expected %d bindVars, got %d", test.c, strings.Count(q, "?"))
- }
- }
-
- // too many bindVars, but no slices, so short circuits parsing
- // i'm not sure if this is the right behavior; this query/arg combo
- // might not work, but we shouldn't parse if we don't need to
- {
- orig := "SELECT * FROM foo WHERE x = ? AND y = ?"
- q, a, err := In(orig, "foo", "bar", "baz")
- if err != nil {
- t.Error(err)
- }
- if len(a) != 3 {
- t.Errorf("Expected 3 args, but got %d (%+v)", len(a), a)
- }
- if q != orig {
- t.Error("Expected unchanged query.")
- }
- }
-
- tests = []tr{
- // too many bindvars; slice present so should return error during parse
- {"SELECT * FROM foo WHERE x = ? and y = ?",
- []interface{}{"foo", []int{1, 2, 3}, "bar"},
- 0},
- // empty slice, should return error before parse
- {"SELECT * FROM foo WHERE x = ?",
- []interface{}{[]int{}},
- 0},
- // too *few* bindvars, should return an error
- {"SELECT * FROM foo WHERE x = ? AND y in (?)",
- []interface{}{[]int{1, 2, 3}},
- 0},
- }
- for _, test := range tests {
- _, _, err := In(test.q, test.args...)
- if err == nil {
- t.Error("Expected an error, but got nil.")
- }
- }
- RunWithSchema(defaultSchema, t, func(db *DB, t *testing.T) {
- loadDefaultFixture(db, t)
- //tx.MustExec(tx.Rebind("INSERT INTO place (country, city, telcode) VALUES (?, ?, ?)"), "United States", "New York", "1")
- //tx.MustExec(tx.Rebind("INSERT INTO place (country, telcode) VALUES (?, ?)"), "Hong Kong", "852")
- //tx.MustExec(tx.Rebind("INSERT INTO place (country, telcode) VALUES (?, ?)"), "Singapore", "65")
- telcodes := []int{852, 65}
- q := "SELECT * FROM place WHERE telcode IN(?) ORDER BY telcode"
- query, args, err := In(q, telcodes)
- if err != nil {
- t.Error(err)
- }
- query = db.Rebind(query)
- places := []Place{}
- err = db.Select(&places, query, args...)
- if err != nil {
- t.Error(err)
- }
- if len(places) != 2 {
- t.Fatalf("Expecting 2 results, got %d", len(places))
- }
- if places[0].TelCode != 65 {
- t.Errorf("Expecting singapore first, but got %#v", places[0])
- }
- if places[1].TelCode != 852 {
- t.Errorf("Expecting hong kong second, but got %#v", places[1])
- }
- })
-}
-
-func TestBindStruct(t *testing.T) {
- var err error
-
- q1 := `INSERT INTO foo (a, b, c, d) VALUES (:name, :age, :first, :last)`
-
- type tt struct {
- Name string
- Age int
- First string
- Last string
- }
-
- type tt2 struct {
- Field1 string `db:"field_1"`
- Field2 string `db:"field_2"`
- }
-
- type tt3 struct {
- tt2
- Name string
- }
-
- am := tt{"Jason Moiron", 30, "Jason", "Moiron"}
-
- bq, args, _ := bindStruct(QUESTION, q1, am, mapper())
- expect := `INSERT INTO foo (a, b, c, d) VALUES (?, ?, ?, ?)`
- if bq != expect {
- t.Errorf("Interpolation of query failed: got `%v`, expected `%v`\n", bq, expect)
- }
-
- if args[0].(string) != "Jason Moiron" {
- t.Errorf("Expected `Jason Moiron`, got %v\n", args[0])
- }
-
- if args[1].(int) != 30 {
- t.Errorf("Expected 30, got %v\n", args[1])
- }
-
- if args[2].(string) != "Jason" {
- t.Errorf("Expected Jason, got %v\n", args[2])
- }
-
- if args[3].(string) != "Moiron" {
- t.Errorf("Expected Moiron, got %v\n", args[3])
- }
-
- am2 := tt2{"Hello", "World"}
- bq, args, _ = bindStruct(QUESTION, "INSERT INTO foo (a, b) VALUES (:field_2, :field_1)", am2, mapper())
- expect = `INSERT INTO foo (a, b) VALUES (?, ?)`
- if bq != expect {
- t.Errorf("Interpolation of query failed: got `%v`, expected `%v`\n", bq, expect)
- }
-
- if args[0].(string) != "World" {
- t.Errorf("Expected 'World', got %s\n", args[0].(string))
- }
- if args[1].(string) != "Hello" {
- t.Errorf("Expected 'Hello', got %s\n", args[1].(string))
- }
-
- am3 := tt3{Name: "Hello!"}
- am3.Field1 = "Hello"
- am3.Field2 = "World"
-
- bq, args, err = bindStruct(QUESTION, "INSERT INTO foo (a, b, c) VALUES (:name, :field_1, :field_2)", am3, mapper())
-
- if err != nil {
- t.Fatal(err)
- }
-
- expect = `INSERT INTO foo (a, b, c) VALUES (?, ?, ?)`
- if bq != expect {
- t.Errorf("Interpolation of query failed: got `%v`, expected `%v`\n", bq, expect)
- }
-
- if args[0].(string) != "Hello!" {
- t.Errorf("Expected 'Hello!', got %s\n", args[0].(string))
- }
- if args[1].(string) != "Hello" {
- t.Errorf("Expected 'Hello', got %s\n", args[1].(string))
- }
- if args[2].(string) != "World" {
- t.Errorf("Expected 'World', got %s\n", args[0].(string))
- }
-}
-
-func TestEmbeddedLiterals(t *testing.T) {
- var schema = Schema{
- create: `
- CREATE TABLE x (
- k text
- );`,
- drop: `drop table x;`,
- }
-
- RunWithSchema(schema, t, func(db *DB, t *testing.T) {
- type t1 struct {
- K *string
- }
- type t2 struct {
- Inline struct {
- F string
- }
- K *string
- }
-
- db.MustExec(db.Rebind("INSERT INTO x (k) VALUES (?), (?), (?);"), "one", "two", "three")
-
- target := t1{}
- err := db.Get(&target, db.Rebind("SELECT * FROM x WHERE k=?"), "one")
- if err != nil {
- t.Error(err)
- }
- if *target.K != "one" {
- t.Error("Expected target.K to be `one`, got ", target.K)
- }
-
- target2 := t2{}
- err = db.Get(&target2, db.Rebind("SELECT * FROM x WHERE k=?"), "one")
- if err != nil {
- t.Error(err)
- }
- if *target2.K != "one" {
- t.Errorf("Expected target2.K to be `one`, got `%v`", target2.K)
- }
- })
-}
-
-func BenchmarkBindStruct(b *testing.B) {
- b.StopTimer()
- q1 := `INSERT INTO foo (a, b, c, d) VALUES (:name, :age, :first, :last)`
- type t struct {
- Name string
- Age int
- First string
- Last string
- }
- am := t{"Jason Moiron", 30, "Jason", "Moiron"}
- b.StartTimer()
- for i := 0; i < b.N; i++ {
- bindStruct(DOLLAR, q1, am, mapper())
- }
-}
-
-func BenchmarkBindMap(b *testing.B) {
- b.StopTimer()
- q1 := `INSERT INTO foo (a, b, c, d) VALUES (:name, :age, :first, :last)`
- am := map[string]interface{}{
- "name": "Jason Moiron",
- "age": 30,
- "first": "Jason",
- "last": "Moiron",
- }
- b.StartTimer()
- for i := 0; i < b.N; i++ {
- bindMap(DOLLAR, q1, am)
- }
-}
-
-func BenchmarkIn(b *testing.B) {
- q := `SELECT * FROM foo WHERE x = ? AND v in (?) AND y = ?`
-
- for i := 0; i < b.N; i++ {
- _, _, _ = In(q, []interface{}{"foo", []int{0, 5, 7, 2, 9}, "bar"}...)
- }
-}
-
-func BenchmarkIn1k(b *testing.B) {
- q := `SELECT * FROM foo WHERE x = ? AND v in (?) AND y = ?`
-
- var vals [1000]interface{}
-
- for i := 0; i < b.N; i++ {
- _, _, _ = In(q, []interface{}{"foo", vals[:], "bar"}...)
- }
-}
-
-func BenchmarkIn1kInt(b *testing.B) {
- q := `SELECT * FROM foo WHERE x = ? AND v in (?) AND y = ?`
-
- var vals [1000]int
-
- for i := 0; i < b.N; i++ {
- _, _, _ = In(q, []interface{}{"foo", vals[:], "bar"}...)
- }
-}
-
-func BenchmarkIn1kString(b *testing.B) {
- q := `SELECT * FROM foo WHERE x = ? AND v in (?) AND y = ?`
-
- var vals [1000]string
-
- for i := 0; i < b.N; i++ {
- _, _, _ = In(q, []interface{}{"foo", vals[:], "bar"}...)
- }
-}
-
-func BenchmarkRebind(b *testing.B) {
- b.StopTimer()
- q1 := `INSERT INTO foo (a, b, c, d, e, f, g, h, i) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`
- q2 := `INSERT INTO foo (a, b, c) VALUES (?, ?, "foo"), ("Hi", ?, ?)`
- b.StartTimer()
-
- for i := 0; i < b.N; i++ {
- Rebind(DOLLAR, q1)
- Rebind(DOLLAR, q2)
- }
-}
-
-func BenchmarkRebindBuffer(b *testing.B) {
- b.StopTimer()
- q1 := `INSERT INTO foo (a, b, c, d, e, f, g, h, i) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`
- q2 := `INSERT INTO foo (a, b, c) VALUES (?, ?, "foo"), ("Hi", ?, ?)`
- b.StartTimer()
-
- for i := 0; i < b.N; i++ {
- rebindBuff(DOLLAR, q1)
- rebindBuff(DOLLAR, q2)
- }
-}
diff --git a/vendor/github.com/pkg/errors/.gitignore b/vendor/github.com/pkg/errors/.gitignore
new file mode 100644
index 0000000..daf913b
--- /dev/null
+++ b/vendor/github.com/pkg/errors/.gitignore
@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
diff --git a/vendor/github.com/pkg/errors/.travis.yml b/vendor/github.com/pkg/errors/.travis.yml
new file mode 100644
index 0000000..588ceca
--- /dev/null
+++ b/vendor/github.com/pkg/errors/.travis.yml
@@ -0,0 +1,11 @@
+language: go
+go_import_path: github.com/pkg/errors
+go:
+ - 1.4.3
+ - 1.5.4
+ - 1.6.2
+ - 1.7.1
+ - tip
+
+script:
+ - go test -v ./...
diff --git a/vendor/github.com/pkg/errors/LICENSE b/vendor/github.com/pkg/errors/LICENSE
new file mode 100644
index 0000000..835ba3e
--- /dev/null
+++ b/vendor/github.com/pkg/errors/LICENSE
@@ -0,0 +1,23 @@
+Copyright (c) 2015, Dave Cheney <dave@cheney.net>
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md
new file mode 100644
index 0000000..273db3c
--- /dev/null
+++ b/vendor/github.com/pkg/errors/README.md
@@ -0,0 +1,52 @@
+# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors)
+
+Package errors provides simple error handling primitives.
+
+`go get github.com/pkg/errors`
+
+The traditional error handling idiom in Go is roughly akin to
+```go
+if err != nil {
+ return err
+}
+```
+which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error.
+
+## Adding context to an error
+
+The errors.Wrap function returns a new error that adds context to the original error. For example
+```go
+_, err := ioutil.ReadAll(r)
+if err != nil {
+ return errors.Wrap(err, "read failed")
+}
+```
+## Retrieving the cause of an error
+
+Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`.
+```go
+type causer interface {
+ Cause() error
+}
+```
+`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example:
+```go
+switch err := errors.Cause(err).(type) {
+case *MyError:
+ // handle specifically
+default:
+ // unknown error
+}
+```
+
+[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors).
+
+## Contributing
+
+We welcome pull requests, bug fixes and issue reports. With that said, the bar for adding new symbols to this package is intentionally set high.
+
+Before proposing a change, please discuss your change by raising an issue.
+
+## Licence
+
+BSD-2-Clause
diff --git a/vendor/github.com/pkg/errors/appveyor.yml b/vendor/github.com/pkg/errors/appveyor.yml
new file mode 100644
index 0000000..a932ead
--- /dev/null
+++ b/vendor/github.com/pkg/errors/appveyor.yml
@@ -0,0 +1,32 @@
+version: build-{build}.{branch}
+
+clone_folder: C:\gopath\src\github.com\pkg\errors
+shallow_clone: true # for startup speed
+
+environment:
+ GOPATH: C:\gopath
+
+platform:
+ - x64
+
+# http://www.appveyor.com/docs/installed-software
+install:
+ # some helpful output for debugging builds
+ - go version
+ - go env
+ # pre-installed MinGW at C:\MinGW is 32bit only
+ # but MSYS2 at C:\msys64 has mingw64
+ - set PATH=C:\msys64\mingw64\bin;%PATH%
+ - gcc --version
+ - g++ --version
+
+build_script:
+ - go install -v ./...
+
+test_script:
+ - set PATH=C:\gopath\bin;%PATH%
+ - go test -v ./...
+
+#artifacts:
+# - path: '%GOPATH%\bin\*.exe'
+deploy: off
diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go
new file mode 100644
index 0000000..842ee80
--- /dev/null
+++ b/vendor/github.com/pkg/errors/errors.go
@@ -0,0 +1,269 @@
+// Package errors provides simple error handling primitives.
+//
+// The traditional error handling idiom in Go is roughly akin to
+//
+// if err != nil {
+// return err
+// }
+//
+// which applied recursively up the call stack results in error reports
+// without context or debugging information. The errors package allows
+// programmers to add context to the failure path in their code in a way
+// that does not destroy the original value of the error.
+//
+// Adding context to an error
+//
+// The errors.Wrap function returns a new error that adds context to the
+// original error by recording a stack trace at the point Wrap is called,
+// and the supplied message. For example
+//
+// _, err := ioutil.ReadAll(r)
+// if err != nil {
+// return errors.Wrap(err, "read failed")
+// }
+//
+// If additional control is required the errors.WithStack and errors.WithMessage
+// functions destructure errors.Wrap into its component operations of annotating
+// an error with a stack trace and an a message, respectively.
+//
+// Retrieving the cause of an error
+//
+// Using errors.Wrap constructs a stack of errors, adding context to the
+// preceding error. Depending on the nature of the error it may be necessary
+// to reverse the operation of errors.Wrap to retrieve the original error
+// for inspection. Any error value which implements this interface
+//
+// type causer interface {
+// Cause() error
+// }
+//
+// can be inspected by errors.Cause. errors.Cause will recursively retrieve
+// the topmost error which does not implement causer, which is assumed to be
+// the original cause. For example:
+//
+// switch err := errors.Cause(err).(type) {
+// case *MyError:
+// // handle specifically
+// default:
+// // unknown error
+// }
+//
+// causer interface is not exported by this package, but is considered a part
+// of stable public API.
+//
+// Formatted printing of errors
+//
+// All error values returned from this package implement fmt.Formatter and can
+// be formatted by the fmt package. The following verbs are supported
+//
+// %s print the error. If the error has a Cause it will be
+// printed recursively
+// %v see %s
+// %+v extended format. Each Frame of the error's StackTrace will
+// be printed in detail.
+//
+// Retrieving the stack trace of an error or wrapper
+//
+// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are
+// invoked. This information can be retrieved with the following interface.
+//
+// type stackTracer interface {
+// StackTrace() errors.StackTrace
+// }
+//
+// Where errors.StackTrace is defined as
+//
+// type StackTrace []Frame
+//
+// The Frame type represents a call site in the stack trace. Frame supports
+// the fmt.Formatter interface that can be used for printing information about
+// the stack trace of this error. For example:
+//
+// if err, ok := err.(stackTracer); ok {
+// for _, f := range err.StackTrace() {
+// fmt.Printf("%+s:%d", f)
+// }
+// }
+//
+// stackTracer interface is not exported by this package, but is considered a part
+// of stable public API.
+//
+// See the documentation for Frame.Format for more details.
+package errors
+
+import (
+ "fmt"
+ "io"
+)
+
+// New returns an error with the supplied message.
+// New also records the stack trace at the point it was called.
+func New(message string) error {
+ return &fundamental{
+ msg: message,
+ stack: callers(),
+ }
+}
+
+// Errorf formats according to a format specifier and returns the string
+// as a value that satisfies error.
+// Errorf also records the stack trace at the point it was called.
+func Errorf(format string, args ...interface{}) error {
+ return &fundamental{
+ msg: fmt.Sprintf(format, args...),
+ stack: callers(),
+ }
+}
+
+// fundamental is an error that has a message and a stack, but no caller.
+type fundamental struct {
+ msg string
+ *stack
+}
+
+func (f *fundamental) Error() string { return f.msg }
+
+func (f *fundamental) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ if s.Flag('+') {
+ io.WriteString(s, f.msg)
+ f.stack.Format(s, verb)
+ return
+ }
+ fallthrough
+ case 's':
+ io.WriteString(s, f.msg)
+ case 'q':
+ fmt.Fprintf(s, "%q", f.msg)
+ }
+}
+
+// WithStack annotates err with a stack trace at the point WithStack was called.
+// If err is nil, WithStack returns nil.
+func WithStack(err error) error {
+ if err == nil {
+ return nil
+ }
+ return &withStack{
+ err,
+ callers(),
+ }
+}
+
+type withStack struct {
+ error
+ *stack
+}
+
+func (w *withStack) Cause() error { return w.error }
+
+func (w *withStack) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ if s.Flag('+') {
+ fmt.Fprintf(s, "%+v", w.Cause())
+ w.stack.Format(s, verb)
+ return
+ }
+ fallthrough
+ case 's':
+ io.WriteString(s, w.Error())
+ case 'q':
+ fmt.Fprintf(s, "%q", w.Error())
+ }
+}
+
+// Wrap returns an error annotating err with a stack trace
+// at the point Wrap is called, and the supplied message.
+// If err is nil, Wrap returns nil.
+func Wrap(err error, message string) error {
+ if err == nil {
+ return nil
+ }
+ err = &withMessage{
+ cause: err,
+ msg: message,
+ }
+ return &withStack{
+ err,
+ callers(),
+ }
+}
+
+// Wrapf returns an error annotating err with a stack trace
+// at the point Wrapf is call, and the format specifier.
+// If err is nil, Wrapf returns nil.
+func Wrapf(err error, format string, args ...interface{}) error {
+ if err == nil {
+ return nil
+ }
+ err = &withMessage{
+ cause: err,
+ msg: fmt.Sprintf(format, args...),
+ }
+ return &withStack{
+ err,
+ callers(),
+ }
+}
+
+// WithMessage annotates err with a new message.
+// If err is nil, WithMessage returns nil.
+func WithMessage(err error, message string) error {
+ if err == nil {
+ return nil
+ }
+ return &withMessage{
+ cause: err,
+ msg: message,
+ }
+}
+
+type withMessage struct {
+ cause error
+ msg string
+}
+
+func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() }
+func (w *withMessage) Cause() error { return w.cause }
+
+func (w *withMessage) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ if s.Flag('+') {
+ fmt.Fprintf(s, "%+v\n", w.Cause())
+ io.WriteString(s, w.msg)
+ return
+ }
+ fallthrough
+ case 's', 'q':
+ io.WriteString(s, w.Error())
+ }
+}
+
+// Cause returns the underlying cause of the error, if possible.
+// An error value has a cause if it implements the following
+// interface:
+//
+// type causer interface {
+// Cause() error
+// }
+//
+// If the error does not implement Cause, the original error will
+// be returned. If the error is nil, nil will be returned without further
+// investigation.
+func Cause(err error) error {
+ type causer interface {
+ Cause() error
+ }
+
+ for err != nil {
+ cause, ok := err.(causer)
+ if !ok {
+ break
+ }
+ err = cause.Cause()
+ }
+ return err
+}
diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go
new file mode 100644
index 0000000..6b1f289
--- /dev/null
+++ b/vendor/github.com/pkg/errors/stack.go
@@ -0,0 +1,178 @@
+package errors
+
+import (
+ "fmt"
+ "io"
+ "path"
+ "runtime"
+ "strings"
+)
+
+// Frame represents a program counter inside a stack frame.
+type Frame uintptr
+
+// pc returns the program counter for this frame;
+// multiple frames may have the same PC value.
+func (f Frame) pc() uintptr { return uintptr(f) - 1 }
+
+// file returns the full path to the file that contains the
+// function for this Frame's pc.
+func (f Frame) file() string {
+ fn := runtime.FuncForPC(f.pc())
+ if fn == nil {
+ return "unknown"
+ }
+ file, _ := fn.FileLine(f.pc())
+ return file
+}
+
+// line returns the line number of source code of the
+// function for this Frame's pc.
+func (f Frame) line() int {
+ fn := runtime.FuncForPC(f.pc())
+ if fn == nil {
+ return 0
+ }
+ _, line := fn.FileLine(f.pc())
+ return line
+}
+
+// Format formats the frame according to the fmt.Formatter interface.
+//
+// %s source file
+// %d source line
+// %n function name
+// %v equivalent to %s:%d
+//
+// Format accepts flags that alter the printing of some verbs, as follows:
+//
+// %+s path of source file relative to the compile time GOPATH
+// %+v equivalent to %+s:%d
+func (f Frame) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 's':
+ switch {
+ case s.Flag('+'):
+ pc := f.pc()
+ fn := runtime.FuncForPC(pc)
+ if fn == nil {
+ io.WriteString(s, "unknown")
+ } else {
+ file, _ := fn.FileLine(pc)
+ fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file)
+ }
+ default:
+ io.WriteString(s, path.Base(f.file()))
+ }
+ case 'd':
+ fmt.Fprintf(s, "%d", f.line())
+ case 'n':
+ name := runtime.FuncForPC(f.pc()).Name()
+ io.WriteString(s, funcname(name))
+ case 'v':
+ f.Format(s, 's')
+ io.WriteString(s, ":")
+ f.Format(s, 'd')
+ }
+}
+
+// StackTrace is stack of Frames from innermost (newest) to outermost (oldest).
+type StackTrace []Frame
+
+func (st StackTrace) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ switch {
+ case s.Flag('+'):
+ for _, f := range st {
+ fmt.Fprintf(s, "\n%+v", f)
+ }
+ case s.Flag('#'):
+ fmt.Fprintf(s, "%#v", []Frame(st))
+ default:
+ fmt.Fprintf(s, "%v", []Frame(st))
+ }
+ case 's':
+ fmt.Fprintf(s, "%s", []Frame(st))
+ }
+}
+
+// stack represents a stack of program counters.
+type stack []uintptr
+
+func (s *stack) Format(st fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ switch {
+ case st.Flag('+'):
+ for _, pc := range *s {
+ f := Frame(pc)
+ fmt.Fprintf(st, "\n%+v", f)
+ }
+ }
+ }
+}
+
+func (s *stack) StackTrace() StackTrace {
+ f := make([]Frame, len(*s))
+ for i := 0; i < len(f); i++ {
+ f[i] = Frame((*s)[i])
+ }
+ return f
+}
+
+func callers() *stack {
+ const depth = 32
+ var pcs [depth]uintptr
+ n := runtime.Callers(3, pcs[:])
+ var st stack = pcs[0:n]
+ return &st
+}
+
+// funcname removes the path prefix component of a function's name reported by func.Name().
+func funcname(name string) string {
+ i := strings.LastIndex(name, "/")
+ name = name[i+1:]
+ i = strings.Index(name, ".")
+ return name[i+1:]
+}
+
+func trimGOPATH(name, file string) string {
+ // Here we want to get the source file path relative to the compile time
+ // GOPATH. As of Go 1.6.x there is no direct way to know the compiled
+ // GOPATH at runtime, but we can infer the number of path segments in the
+ // GOPATH. We note that fn.Name() returns the function name qualified by
+ // the import path, which does not include the GOPATH. Thus we can trim
+ // segments from the beginning of the file path until the number of path
+ // separators remaining is one more than the number of path separators in
+ // the function name. For example, given:
+ //
+ // GOPATH /home/user
+ // file /home/user/src/pkg/sub/file.go
+ // fn.Name() pkg/sub.Type.Method
+ //
+ // We want to produce:
+ //
+ // pkg/sub/file.go
+ //
+ // From this we can easily see that fn.Name() has one less path separator
+ // than our desired output. We count separators from the end of the file
+ // path until it finds two more than in the function name and then move
+ // one character forward to preserve the initial path segment without a
+ // leading separator.
+ const sep = "/"
+ goal := strings.Count(name, sep) + 2
+ i := len(file)
+ for n := 0; n < goal; n++ {
+ i = strings.LastIndex(file[:i], sep)
+ if i == -1 {
+ // not enough separators found, set i so that the slice expression
+ // below leaves file unmodified
+ i = -len(sep)
+ break
+ }
+ }
+ // get back to 0 or trim the leading separator
+ file = file[i+len(sep):]
+ return file
+}
diff --git a/vendor/golang.org/x/net/AUTHORS b/vendor/golang.org/x/net/AUTHORS
new file mode 100644
index 0000000..15167cd
--- /dev/null
+++ b/vendor/golang.org/x/net/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/golang.org/x/net/CONTRIBUTORS b/vendor/golang.org/x/net/CONTRIBUTORS
new file mode 100644
index 0000000..1c4577e
--- /dev/null
+++ b/vendor/golang.org/x/net/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/golang.org/x/net/LICENSE b/vendor/golang.org/x/net/LICENSE
new file mode 100644
index 0000000..6a66aea
--- /dev/null
+++ b/vendor/golang.org/x/net/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/net/PATENTS b/vendor/golang.org/x/net/PATENTS
new file mode 100644
index 0000000..7330990
--- /dev/null
+++ b/vendor/golang.org/x/net/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go
new file mode 100644
index 0000000..a3c021d
--- /dev/null
+++ b/vendor/golang.org/x/net/context/context.go
@@ -0,0 +1,56 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package context defines the Context type, which carries deadlines,
+// cancelation signals, and other request-scoped values across API boundaries
+// and between processes.
+// As of Go 1.7 this package is available in the standard library under the
+// name context. https://golang.org/pkg/context.
+//
+// Incoming requests to a server should create a Context, and outgoing calls to
+// servers should accept a Context. The chain of function calls between must
+// propagate the Context, optionally replacing it with a modified copy created
+// using WithDeadline, WithTimeout, WithCancel, or WithValue.
+//
+// Programs that use Contexts should follow these rules to keep interfaces
+// consistent across packages and enable static analysis tools to check context
+// propagation:
+//
+// Do not store Contexts inside a struct type; instead, pass a Context
+// explicitly to each function that needs it. The Context should be the first
+// parameter, typically named ctx:
+//
+// func DoSomething(ctx context.Context, arg Arg) error {
+// // ... use ctx ...
+// }
+//
+// Do not pass a nil Context, even if a function permits it. Pass context.TODO
+// if you are unsure about which Context to use.
+//
+// Use context Values only for request-scoped data that transits processes and
+// APIs, not for passing optional parameters to functions.
+//
+// The same Context may be passed to functions running in different goroutines;
+// Contexts are safe for simultaneous use by multiple goroutines.
+//
+// See http://blog.golang.org/context for example code for a server that uses
+// Contexts.
+package context // import "golang.org/x/net/context"
+
+// Background returns a non-nil, empty Context. It is never canceled, has no
+// values, and has no deadline. It is typically used by the main function,
+// initialization, and tests, and as the top-level Context for incoming
+// requests.
+func Background() Context {
+ return background
+}
+
+// TODO returns a non-nil, empty Context. Code should use context.TODO when
+// it's unclear which Context to use or it is not yet available (because the
+// surrounding function has not yet been extended to accept a Context
+// parameter). TODO is recognized by static analysis tools that determine
+// whether Contexts are propagated correctly in a program.
+func TODO() Context {
+ return todo
+}
diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go
new file mode 100644
index 0000000..d20f52b
--- /dev/null
+++ b/vendor/golang.org/x/net/context/go17.go
@@ -0,0 +1,72 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.7
+
+package context
+
+import (
+ "context" // standard library's context, as of Go 1.7
+ "time"
+)
+
+var (
+ todo = context.TODO()
+ background = context.Background()
+)
+
+// Canceled is the error returned by Context.Err when the context is canceled.
+var Canceled = context.Canceled
+
+// DeadlineExceeded is the error returned by Context.Err when the context's
+// deadline passes.
+var DeadlineExceeded = context.DeadlineExceeded
+
+// WithCancel returns a copy of parent with a new Done channel. The returned
+// context's Done channel is closed when the returned cancel function is called
+// or when the parent context's Done channel is closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete.
+func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
+ ctx, f := context.WithCancel(parent)
+ return ctx, CancelFunc(f)
+}
+
+// WithDeadline returns a copy of the parent context with the deadline adjusted
+// to be no later than d. If the parent's deadline is already earlier than d,
+// WithDeadline(parent, d) is semantically equivalent to parent. The returned
+// context's Done channel is closed when the deadline expires, when the returned
+// cancel function is called, or when the parent context's Done channel is
+// closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete.
+func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
+ ctx, f := context.WithDeadline(parent, deadline)
+ return ctx, CancelFunc(f)
+}
+
+// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete:
+//
+// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
+// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
+// defer cancel() // releases resources if slowOperation completes before timeout elapses
+// return slowOperation(ctx)
+// }
+func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
+ return WithDeadline(parent, time.Now().Add(timeout))
+}
+
+// WithValue returns a copy of parent in which the value associated with key is
+// val.
+//
+// Use context Values only for request-scoped data that transits processes and
+// APIs, not for passing optional parameters to functions.
+func WithValue(parent Context, key interface{}, val interface{}) Context {
+ return context.WithValue(parent, key, val)
+}
diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go
new file mode 100644
index 0000000..d88bd1d
--- /dev/null
+++ b/vendor/golang.org/x/net/context/go19.go
@@ -0,0 +1,20 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.9
+
+package context
+
+import "context" // standard library's context, as of Go 1.7
+
+// A Context carries a deadline, a cancelation signal, and other values across
+// API boundaries.
+//
+// Context's methods may be called by multiple goroutines simultaneously.
+type Context = context.Context
+
+// A CancelFunc tells an operation to abandon its work.
+// A CancelFunc does not wait for the work to stop.
+// After the first call, subsequent calls to a CancelFunc do nothing.
+type CancelFunc = context.CancelFunc
diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go
new file mode 100644
index 0000000..0f35592
--- /dev/null
+++ b/vendor/golang.org/x/net/context/pre_go17.go
@@ -0,0 +1,300 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.7
+
+package context
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+ "time"
+)
+
+// An emptyCtx is never canceled, has no values, and has no deadline. It is not
+// struct{}, since vars of this type must have distinct addresses.
+type emptyCtx int
+
+func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
+ return
+}
+
+func (*emptyCtx) Done() <-chan struct{} {
+ return nil
+}
+
+func (*emptyCtx) Err() error {
+ return nil
+}
+
+func (*emptyCtx) Value(key interface{}) interface{} {
+ return nil
+}
+
+func (e *emptyCtx) String() string {
+ switch e {
+ case background:
+ return "context.Background"
+ case todo:
+ return "context.TODO"
+ }
+ return "unknown empty Context"
+}
+
+var (
+ background = new(emptyCtx)
+ todo = new(emptyCtx)
+)
+
+// Canceled is the error returned by Context.Err when the context is canceled.
+var Canceled = errors.New("context canceled")
+
+// DeadlineExceeded is the error returned by Context.Err when the context's
+// deadline passes.
+var DeadlineExceeded = errors.New("context deadline exceeded")
+
+// WithCancel returns a copy of parent with a new Done channel. The returned
+// context's Done channel is closed when the returned cancel function is called
+// or when the parent context's Done channel is closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete.
+func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
+ c := newCancelCtx(parent)
+ propagateCancel(parent, c)
+ return c, func() { c.cancel(true, Canceled) }
+}
+
+// newCancelCtx returns an initialized cancelCtx.
+func newCancelCtx(parent Context) *cancelCtx {
+ return &cancelCtx{
+ Context: parent,
+ done: make(chan struct{}),
+ }
+}
+
+// propagateCancel arranges for child to be canceled when parent is.
+func propagateCancel(parent Context, child canceler) {
+ if parent.Done() == nil {
+ return // parent is never canceled
+ }
+ if p, ok := parentCancelCtx(parent); ok {
+ p.mu.Lock()
+ if p.err != nil {
+ // parent has already been canceled
+ child.cancel(false, p.err)
+ } else {
+ if p.children == nil {
+ p.children = make(map[canceler]bool)
+ }
+ p.children[child] = true
+ }
+ p.mu.Unlock()
+ } else {
+ go func() {
+ select {
+ case <-parent.Done():
+ child.cancel(false, parent.Err())
+ case <-child.Done():
+ }
+ }()
+ }
+}
+
+// parentCancelCtx follows a chain of parent references until it finds a
+// *cancelCtx. This function understands how each of the concrete types in this
+// package represents its parent.
+func parentCancelCtx(parent Context) (*cancelCtx, bool) {
+ for {
+ switch c := parent.(type) {
+ case *cancelCtx:
+ return c, true
+ case *timerCtx:
+ return c.cancelCtx, true
+ case *valueCtx:
+ parent = c.Context
+ default:
+ return nil, false
+ }
+ }
+}
+
+// removeChild removes a context from its parent.
+func removeChild(parent Context, child canceler) {
+ p, ok := parentCancelCtx(parent)
+ if !ok {
+ return
+ }
+ p.mu.Lock()
+ if p.children != nil {
+ delete(p.children, child)
+ }
+ p.mu.Unlock()
+}
+
+// A canceler is a context type that can be canceled directly. The
+// implementations are *cancelCtx and *timerCtx.
+type canceler interface {
+ cancel(removeFromParent bool, err error)
+ Done() <-chan struct{}
+}
+
+// A cancelCtx can be canceled. When canceled, it also cancels any children
+// that implement canceler.
+type cancelCtx struct {
+ Context
+
+ done chan struct{} // closed by the first cancel call.
+
+ mu sync.Mutex
+ children map[canceler]bool // set to nil by the first cancel call
+ err error // set to non-nil by the first cancel call
+}
+
+func (c *cancelCtx) Done() <-chan struct{} {
+ return c.done
+}
+
+func (c *cancelCtx) Err() error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ return c.err
+}
+
+func (c *cancelCtx) String() string {
+ return fmt.Sprintf("%v.WithCancel", c.Context)
+}
+
+// cancel closes c.done, cancels each of c's children, and, if
+// removeFromParent is true, removes c from its parent's children.
+func (c *cancelCtx) cancel(removeFromParent bool, err error) {
+ if err == nil {
+ panic("context: internal error: missing cancel error")
+ }
+ c.mu.Lock()
+ if c.err != nil {
+ c.mu.Unlock()
+ return // already canceled
+ }
+ c.err = err
+ close(c.done)
+ for child := range c.children {
+ // NOTE: acquiring the child's lock while holding parent's lock.
+ child.cancel(false, err)
+ }
+ c.children = nil
+ c.mu.Unlock()
+
+ if removeFromParent {
+ removeChild(c.Context, c)
+ }
+}
+
+// WithDeadline returns a copy of the parent context with the deadline adjusted
+// to be no later than d. If the parent's deadline is already earlier than d,
+// WithDeadline(parent, d) is semantically equivalent to parent. The returned
+// context's Done channel is closed when the deadline expires, when the returned
+// cancel function is called, or when the parent context's Done channel is
+// closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete.
+func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
+ if cur, ok := parent.Deadline(); ok && cur.Before(deadline) {
+ // The current deadline is already sooner than the new one.
+ return WithCancel(parent)
+ }
+ c := &timerCtx{
+ cancelCtx: newCancelCtx(parent),
+ deadline: deadline,
+ }
+ propagateCancel(parent, c)
+ d := deadline.Sub(time.Now())
+ if d <= 0 {
+ c.cancel(true, DeadlineExceeded) // deadline has already passed
+ return c, func() { c.cancel(true, Canceled) }
+ }
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ if c.err == nil {
+ c.timer = time.AfterFunc(d, func() {
+ c.cancel(true, DeadlineExceeded)
+ })
+ }
+ return c, func() { c.cancel(true, Canceled) }
+}
+
+// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to
+// implement Done and Err. It implements cancel by stopping its timer then
+// delegating to cancelCtx.cancel.
+type timerCtx struct {
+ *cancelCtx
+ timer *time.Timer // Under cancelCtx.mu.
+
+ deadline time.Time
+}
+
+func (c *timerCtx) Deadline() (deadline time.Time, ok bool) {
+ return c.deadline, true
+}
+
+func (c *timerCtx) String() string {
+ return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now()))
+}
+
+func (c *timerCtx) cancel(removeFromParent bool, err error) {
+ c.cancelCtx.cancel(false, err)
+ if removeFromParent {
+ // Remove this timerCtx from its parent cancelCtx's children.
+ removeChild(c.cancelCtx.Context, c)
+ }
+ c.mu.Lock()
+ if c.timer != nil {
+ c.timer.Stop()
+ c.timer = nil
+ }
+ c.mu.Unlock()
+}
+
+// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete:
+//
+// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
+// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
+// defer cancel() // releases resources if slowOperation completes before timeout elapses
+// return slowOperation(ctx)
+// }
+func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
+ return WithDeadline(parent, time.Now().Add(timeout))
+}
+
+// WithValue returns a copy of parent in which the value associated with key is
+// val.
+//
+// Use context Values only for request-scoped data that transits processes and
+// APIs, not for passing optional parameters to functions.
+func WithValue(parent Context, key interface{}, val interface{}) Context {
+ return &valueCtx{parent, key, val}
+}
+
+// A valueCtx carries a key-value pair. It implements Value for that key and
+// delegates all other calls to the embedded Context.
+type valueCtx struct {
+ Context
+ key, val interface{}
+}
+
+func (c *valueCtx) String() string {
+ return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val)
+}
+
+func (c *valueCtx) Value(key interface{}) interface{} {
+ if c.key == key {
+ return c.val
+ }
+ return c.Context.Value(key)
+}
diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go
new file mode 100644
index 0000000..b105f80
--- /dev/null
+++ b/vendor/golang.org/x/net/context/pre_go19.go
@@ -0,0 +1,109 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.9
+
+package context
+
+import "time"
+
+// A Context carries a deadline, a cancelation signal, and other values across
+// API boundaries.
+//
+// Context's methods may be called by multiple goroutines simultaneously.
+type Context interface {
+ // Deadline returns the time when work done on behalf of this context
+ // should be canceled. Deadline returns ok==false when no deadline is
+ // set. Successive calls to Deadline return the same results.
+ Deadline() (deadline time.Time, ok bool)
+
+ // Done returns a channel that's closed when work done on behalf of this
+ // context should be canceled. Done may return nil if this context can
+ // never be canceled. Successive calls to Done return the same value.
+ //
+ // WithCancel arranges for Done to be closed when cancel is called;
+ // WithDeadline arranges for Done to be closed when the deadline
+ // expires; WithTimeout arranges for Done to be closed when the timeout
+ // elapses.
+ //
+ // Done is provided for use in select statements:
+ //
+ // // Stream generates values with DoSomething and sends them to out
+ // // until DoSomething returns an error or ctx.Done is closed.
+ // func Stream(ctx context.Context, out chan<- Value) error {
+ // for {
+ // v, err := DoSomething(ctx)
+ // if err != nil {
+ // return err
+ // }
+ // select {
+ // case <-ctx.Done():
+ // return ctx.Err()
+ // case out <- v:
+ // }
+ // }
+ // }
+ //
+ // See http://blog.golang.org/pipelines for more examples of how to use
+ // a Done channel for cancelation.
+ Done() <-chan struct{}
+
+ // Err returns a non-nil error value after Done is closed. Err returns
+ // Canceled if the context was canceled or DeadlineExceeded if the
+ // context's deadline passed. No other values for Err are defined.
+ // After Done is closed, successive calls to Err return the same value.
+ Err() error
+
+ // Value returns the value associated with this context for key, or nil
+ // if no value is associated with key. Successive calls to Value with
+ // the same key returns the same result.
+ //
+ // Use context values only for request-scoped data that transits
+ // processes and API boundaries, not for passing optional parameters to
+ // functions.
+ //
+ // A key identifies a specific value in a Context. Functions that wish
+ // to store values in Context typically allocate a key in a global
+ // variable then use that key as the argument to context.WithValue and
+ // Context.Value. A key can be any type that supports equality;
+ // packages should define keys as an unexported type to avoid
+ // collisions.
+ //
+ // Packages that define a Context key should provide type-safe accessors
+ // for the values stores using that key:
+ //
+ // // Package user defines a User type that's stored in Contexts.
+ // package user
+ //
+ // import "golang.org/x/net/context"
+ //
+ // // User is the type of value stored in the Contexts.
+ // type User struct {...}
+ //
+ // // key is an unexported type for keys defined in this package.
+ // // This prevents collisions with keys defined in other packages.
+ // type key int
+ //
+ // // userKey is the key for user.User values in Contexts. It is
+ // // unexported; clients use user.NewContext and user.FromContext
+ // // instead of using this key directly.
+ // var userKey key = 0
+ //
+ // // NewContext returns a new Context that carries value u.
+ // func NewContext(ctx context.Context, u *User) context.Context {
+ // return context.WithValue(ctx, userKey, u)
+ // }
+ //
+ // // FromContext returns the User value stored in ctx, if any.
+ // func FromContext(ctx context.Context) (*User, bool) {
+ // u, ok := ctx.Value(userKey).(*User)
+ // return u, ok
+ // }
+ Value(key interface{}) interface{}
+}
+
+// A CancelFunc tells an operation to abandon its work.
+// A CancelFunc does not wait for the work to stop.
+// After the first call, subsequent calls to a CancelFunc do nothing.
+type CancelFunc func()
diff --git a/vendor/golang.org/x/time/AUTHORS b/vendor/golang.org/x/time/AUTHORS
new file mode 100644
index 0000000..15167cd
--- /dev/null
+++ b/vendor/golang.org/x/time/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/golang.org/x/time/CONTRIBUTORS b/vendor/golang.org/x/time/CONTRIBUTORS
new file mode 100644
index 0000000..1c4577e
--- /dev/null
+++ b/vendor/golang.org/x/time/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/golang.org/x/time/LICENSE b/vendor/golang.org/x/time/LICENSE
new file mode 100644
index 0000000..6a66aea
--- /dev/null
+++ b/vendor/golang.org/x/time/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/time/PATENTS b/vendor/golang.org/x/time/PATENTS
new file mode 100644
index 0000000..7330990
--- /dev/null
+++ b/vendor/golang.org/x/time/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go
new file mode 100644
index 0000000..eabcd11
--- /dev/null
+++ b/vendor/golang.org/x/time/rate/rate.go
@@ -0,0 +1,380 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package rate provides a rate limiter.
+package rate
+
+import (
+ "fmt"
+ "math"
+ "sync"
+ "time"
+)
+
+// Limit defines the maximum frequency of some events.
+// Limit is represented as number of events per second.
+// A zero Limit allows no events.
+type Limit float64
+
+// Inf is the infinite rate limit; it allows all events (even if burst is zero).
+const Inf = Limit(math.MaxFloat64)
+
+// Every converts a minimum time interval between events to a Limit.
+func Every(interval time.Duration) Limit {
+ if interval <= 0 {
+ return Inf
+ }
+ return 1 / Limit(interval.Seconds())
+}
+
+// A Limiter controls how frequently events are allowed to happen.
+// It implements a "token bucket" of size b, initially full and refilled
+// at rate r tokens per second.
+// Informally, in any large enough time interval, the Limiter limits the
+// rate to r tokens per second, with a maximum burst size of b events.
+// As a special case, if r == Inf (the infinite rate), b is ignored.
+// See https://en.wikipedia.org/wiki/Token_bucket for more about token buckets.
+//
+// The zero value is a valid Limiter, but it will reject all events.
+// Use NewLimiter to create non-zero Limiters.
+//
+// Limiter has three main methods, Allow, Reserve, and Wait.
+// Most callers should use Wait.
+//
+// Each of the three methods consumes a single token.
+// They differ in their behavior when no token is available.
+// If no token is available, Allow returns false.
+// If no token is available, Reserve returns a reservation for a future token
+// and the amount of time the caller must wait before using it.
+// If no token is available, Wait blocks until one can be obtained
+// or its associated context.Context is canceled.
+//
+// The methods AllowN, ReserveN, and WaitN consume n tokens.
+type Limiter struct {
+ limit Limit
+ burst int
+
+ mu sync.Mutex
+ tokens float64
+ // last is the last time the limiter's tokens field was updated
+ last time.Time
+ // lastEvent is the latest time of a rate-limited event (past or future)
+ lastEvent time.Time
+}
+
+// Limit returns the maximum overall event rate.
+func (lim *Limiter) Limit() Limit {
+ lim.mu.Lock()
+ defer lim.mu.Unlock()
+ return lim.limit
+}
+
+// Burst returns the maximum burst size. Burst is the maximum number of tokens
+// that can be consumed in a single call to Allow, Reserve, or Wait, so higher
+// Burst values allow more events to happen at once.
+// A zero Burst allows no events, unless limit == Inf.
+func (lim *Limiter) Burst() int {
+ return lim.burst
+}
+
+// NewLimiter returns a new Limiter that allows events up to rate r and permits
+// bursts of at most b tokens.
+func NewLimiter(r Limit, b int) *Limiter {
+ return &Limiter{
+ limit: r,
+ burst: b,
+ }
+}
+
+// Allow is shorthand for AllowN(time.Now(), 1).
+func (lim *Limiter) Allow() bool {
+ return lim.AllowN(time.Now(), 1)
+}
+
+// AllowN reports whether n events may happen at time now.
+// Use this method if you intend to drop / skip events that exceed the rate limit.
+// Otherwise use Reserve or Wait.
+func (lim *Limiter) AllowN(now time.Time, n int) bool {
+ return lim.reserveN(now, n, 0).ok
+}
+
+// A Reservation holds information about events that are permitted by a Limiter to happen after a delay.
+// A Reservation may be canceled, which may enable the Limiter to permit additional events.
+type Reservation struct {
+ ok bool
+ lim *Limiter
+ tokens int
+ timeToAct time.Time
+ // This is the Limit at reservation time, it can change later.
+ limit Limit
+}
+
+// OK returns whether the limiter can provide the requested number of tokens
+// within the maximum wait time. If OK is false, Delay returns InfDuration, and
+// Cancel does nothing.
+func (r *Reservation) OK() bool {
+ return r.ok
+}
+
+// Delay is shorthand for DelayFrom(time.Now()).
+func (r *Reservation) Delay() time.Duration {
+ return r.DelayFrom(time.Now())
+}
+
+// InfDuration is the duration returned by Delay when a Reservation is not OK.
+const InfDuration = time.Duration(1<<63 - 1)
+
+// DelayFrom returns the duration for which the reservation holder must wait
+// before taking the reserved action. Zero duration means act immediately.
+// InfDuration means the limiter cannot grant the tokens requested in this
+// Reservation within the maximum wait time.
+func (r *Reservation) DelayFrom(now time.Time) time.Duration {
+ if !r.ok {
+ return InfDuration
+ }
+ delay := r.timeToAct.Sub(now)
+ if delay < 0 {
+ return 0
+ }
+ return delay
+}
+
+// Cancel is shorthand for CancelAt(time.Now()).
+func (r *Reservation) Cancel() {
+ r.CancelAt(time.Now())
+ return
+}
+
+// CancelAt indicates that the reservation holder will not perform the reserved action
+// and reverses the effects of this Reservation on the rate limit as much as possible,
+// considering that other reservations may have already been made.
+func (r *Reservation) CancelAt(now time.Time) {
+ if !r.ok {
+ return
+ }
+
+ r.lim.mu.Lock()
+ defer r.lim.mu.Unlock()
+
+ if r.lim.limit == Inf || r.tokens == 0 || r.timeToAct.Before(now) {
+ return
+ }
+
+ // calculate tokens to restore
+ // The duration between lim.lastEvent and r.timeToAct tells us how many tokens were reserved
+ // after r was obtained. These tokens should not be restored.
+ restoreTokens := float64(r.tokens) - r.limit.tokensFromDuration(r.lim.lastEvent.Sub(r.timeToAct))
+ if restoreTokens <= 0 {
+ return
+ }
+ // advance time to now
+ now, _, tokens := r.lim.advance(now)
+ // calculate new number of tokens
+ tokens += restoreTokens
+ if burst := float64(r.lim.burst); tokens > burst {
+ tokens = burst
+ }
+ // update state
+ r.lim.last = now
+ r.lim.tokens = tokens
+ if r.timeToAct == r.lim.lastEvent {
+ prevEvent := r.timeToAct.Add(r.limit.durationFromTokens(float64(-r.tokens)))
+ if !prevEvent.Before(now) {
+ r.lim.lastEvent = prevEvent
+ }
+ }
+
+ return
+}
+
+// Reserve is shorthand for ReserveN(time.Now(), 1).
+func (lim *Limiter) Reserve() *Reservation {
+ return lim.ReserveN(time.Now(), 1)
+}
+
+// ReserveN returns a Reservation that indicates how long the caller must wait before n events happen.
+// The Limiter takes this Reservation into account when allowing future events.
+// ReserveN returns false if n exceeds the Limiter's burst size.
+// Usage example:
+// r := lim.ReserveN(time.Now(), 1)
+// if !r.OK() {
+// // Not allowed to act! Did you remember to set lim.burst to be > 0 ?
+// return
+// }
+// time.Sleep(r.Delay())
+// Act()
+// Use this method if you wish to wait and slow down in accordance with the rate limit without dropping events.
+// If you need to respect a deadline or cancel the delay, use Wait instead.
+// To drop or skip events exceeding rate limit, use Allow instead.
+func (lim *Limiter) ReserveN(now time.Time, n int) *Reservation {
+ r := lim.reserveN(now, n, InfDuration)
+ return &r
+}
+
+// contextContext is a temporary(?) copy of the context.Context type
+// to support both Go 1.6 using golang.org/x/net/context and Go 1.7+
+// with the built-in context package. If people ever stop using Go 1.6
+// we can remove this.
+type contextContext interface {
+ Deadline() (deadline time.Time, ok bool)
+ Done() <-chan struct{}
+ Err() error
+ Value(key interface{}) interface{}
+}
+
+// Wait is shorthand for WaitN(ctx, 1).
+func (lim *Limiter) wait(ctx contextContext) (err error) {
+ return lim.WaitN(ctx, 1)
+}
+
+// WaitN blocks until lim permits n events to happen.
+// It returns an error if n exceeds the Limiter's burst size, the Context is
+// canceled, or the expected wait time exceeds the Context's Deadline.
+// The burst limit is ignored if the rate limit is Inf.
+func (lim *Limiter) waitN(ctx contextContext, n int) (err error) {
+ if n > lim.burst && lim.limit != Inf {
+ return fmt.Errorf("rate: Wait(n=%d) exceeds limiter's burst %d", n, lim.burst)
+ }
+ // Check if ctx is already cancelled
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+ // Determine wait limit
+ now := time.Now()
+ waitLimit := InfDuration
+ if deadline, ok := ctx.Deadline(); ok {
+ waitLimit = deadline.Sub(now)
+ }
+ // Reserve
+ r := lim.reserveN(now, n, waitLimit)
+ if !r.ok {
+ return fmt.Errorf("rate: Wait(n=%d) would exceed context deadline", n)
+ }
+ // Wait
+ t := time.NewTimer(r.DelayFrom(now))
+ defer t.Stop()
+ select {
+ case <-t.C:
+ // We can proceed.
+ return nil
+ case <-ctx.Done():
+ // Context was canceled before we could proceed. Cancel the
+ // reservation, which may permit other events to proceed sooner.
+ r.Cancel()
+ return ctx.Err()
+ }
+}
+
+// SetLimit is shorthand for SetLimitAt(time.Now(), newLimit).
+func (lim *Limiter) SetLimit(newLimit Limit) {
+ lim.SetLimitAt(time.Now(), newLimit)
+}
+
+// SetLimitAt sets a new Limit for the limiter. The new Limit, and Burst, may be violated
+// or underutilized by those which reserved (using Reserve or Wait) but did not yet act
+// before SetLimitAt was called.
+func (lim *Limiter) SetLimitAt(now time.Time, newLimit Limit) {
+ lim.mu.Lock()
+ defer lim.mu.Unlock()
+
+ now, _, tokens := lim.advance(now)
+
+ lim.last = now
+ lim.tokens = tokens
+ lim.limit = newLimit
+}
+
+// reserveN is a helper method for AllowN, ReserveN, and WaitN.
+// maxFutureReserve specifies the maximum reservation wait duration allowed.
+// reserveN returns Reservation, not *Reservation, to avoid allocation in AllowN and WaitN.
+func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duration) Reservation {
+ lim.mu.Lock()
+
+ if lim.limit == Inf {
+ lim.mu.Unlock()
+ return Reservation{
+ ok: true,
+ lim: lim,
+ tokens: n,
+ timeToAct: now,
+ }
+ }
+
+ now, last, tokens := lim.advance(now)
+
+ // Calculate the remaining number of tokens resulting from the request.
+ tokens -= float64(n)
+
+ // Calculate the wait duration
+ var waitDuration time.Duration
+ if tokens < 0 {
+ waitDuration = lim.limit.durationFromTokens(-tokens)
+ }
+
+ // Decide result
+ ok := n <= lim.burst && waitDuration <= maxFutureReserve
+
+ // Prepare reservation
+ r := Reservation{
+ ok: ok,
+ lim: lim,
+ limit: lim.limit,
+ }
+ if ok {
+ r.tokens = n
+ r.timeToAct = now.Add(waitDuration)
+ }
+
+ // Update state
+ if ok {
+ lim.last = now
+ lim.tokens = tokens
+ lim.lastEvent = r.timeToAct
+ } else {
+ lim.last = last
+ }
+
+ lim.mu.Unlock()
+ return r
+}
+
+// advance calculates and returns an updated state for lim resulting from the passage of time.
+// lim is not changed.
+func (lim *Limiter) advance(now time.Time) (newNow time.Time, newLast time.Time, newTokens float64) {
+ last := lim.last
+ if now.Before(last) {
+ last = now
+ }
+
+ // Avoid making delta overflow below when last is very old.
+ maxElapsed := lim.limit.durationFromTokens(float64(lim.burst) - lim.tokens)
+ elapsed := now.Sub(last)
+ if elapsed > maxElapsed {
+ elapsed = maxElapsed
+ }
+
+ // Calculate the new number of tokens, due to time that passed.
+ delta := lim.limit.tokensFromDuration(elapsed)
+ tokens := lim.tokens + delta
+ if burst := float64(lim.burst); tokens > burst {
+ tokens = burst
+ }
+
+ return now, last, tokens
+}
+
+// durationFromTokens is a unit conversion function from the number of tokens to the duration
+// of time it takes to accumulate them at a rate of limit tokens per second.
+func (limit Limit) durationFromTokens(tokens float64) time.Duration {
+ seconds := tokens / float64(limit)
+ return time.Nanosecond * time.Duration(1e9*seconds)
+}
+
+// tokensFromDuration is a unit conversion function from a time duration to the number of tokens
+// which could be accumulated during that duration at a rate of limit tokens per second.
+func (limit Limit) tokensFromDuration(d time.Duration) float64 {
+ return d.Seconds() * float64(limit)
+}
diff --git a/vendor/golang.org/x/time/rate/rate_go16.go b/vendor/golang.org/x/time/rate/rate_go16.go
new file mode 100644
index 0000000..6bab185
--- /dev/null
+++ b/vendor/golang.org/x/time/rate/rate_go16.go
@@ -0,0 +1,21 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.7
+
+package rate
+
+import "golang.org/x/net/context"
+
+// Wait is shorthand for WaitN(ctx, 1).
+func (lim *Limiter) Wait(ctx context.Context) (err error) {
+ return lim.waitN(ctx, 1)
+}
+
+// WaitN blocks until lim permits n events to happen.
+// It returns an error if n exceeds the Limiter's burst size, the Context is
+// canceled, or the expected wait time exceeds the Context's Deadline.
+func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) {
+ return lim.waitN(ctx, n)
+}
diff --git a/vendor/golang.org/x/time/rate/rate_go17.go b/vendor/golang.org/x/time/rate/rate_go17.go
new file mode 100644
index 0000000..f90d85f
--- /dev/null
+++ b/vendor/golang.org/x/time/rate/rate_go17.go
@@ -0,0 +1,21 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.7
+
+package rate
+
+import "context"
+
+// Wait is shorthand for WaitN(ctx, 1).
+func (lim *Limiter) Wait(ctx context.Context) (err error) {
+ return lim.waitN(ctx, 1)
+}
+
+// WaitN blocks until lim permits n events to happen.
+// It returns an error if n exceeds the Limiter's burst size, the Context is
+// canceled, or the expected wait time exceeds the Context's Deadline.
+func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) {
+ return lim.waitN(ctx, n)
+}