aboutsummaryrefslogtreecommitdiff
path: root/vendor/github.com/jackc
diff options
context:
space:
mode:
authorFelix Hanley <felix@userspace.com.au>2018-02-21 04:21:58 +0000
committerFelix Hanley <felix@userspace.com.au>2018-02-21 04:22:32 +0000
commit734db776ce17a234825e83180a547cd3ad28f5e2 (patch)
treed5c289056fb01dfc1f3f438dd4eec21a31a23166 /vendor/github.com/jackc
parente9adf3a2bf8b81615275a6705b7957e43753f0ec (diff)
downloaddhtsearch-734db776ce17a234825e83180a547cd3ad28f5e2.tar.gz
dhtsearch-734db776ce17a234825e83180a547cd3ad28f5e2.tar.bz2
Update vendor deps
Diffstat (limited to 'vendor/github.com/jackc')
-rw-r--r--vendor/github.com/jackc/pgx/.gitignore1
-rw-r--r--vendor/github.com/jackc/pgx/.travis.yml45
-rw-r--r--vendor/github.com/jackc/pgx/CHANGELOG.md111
-rw-r--r--vendor/github.com/jackc/pgx/README.md88
-rw-r--r--vendor/github.com/jackc/pgx/aclitem_parse_test.go126
-rw-r--r--vendor/github.com/jackc/pgx/batch.go313
-rw-r--r--vendor/github.com/jackc/pgx/bench_test.go765
-rw-r--r--vendor/github.com/jackc/pgx/chunkreader/chunkreader.go89
-rw-r--r--vendor/github.com/jackc/pgx/conn.go1562
-rw-r--r--vendor/github.com/jackc/pgx/conn_config_test.go.example3
-rw-r--r--vendor/github.com/jackc/pgx/conn_config_test.go.travis12
-rw-r--r--vendor/github.com/jackc/pgx/conn_pool.go138
-rw-r--r--vendor/github.com/jackc/pgx/conn_pool_private_test.go44
-rw-r--r--vendor/github.com/jackc/pgx/conn_pool_test.go982
-rw-r--r--vendor/github.com/jackc/pgx/conn_test.go1744
-rw-r--r--vendor/github.com/jackc/pgx/copy_from.go90
-rw-r--r--vendor/github.com/jackc/pgx/copy_from_test.go428
-rw-r--r--vendor/github.com/jackc/pgx/copy_to.go222
-rw-r--r--vendor/github.com/jackc/pgx/copy_to_test.go367
-rw-r--r--vendor/github.com/jackc/pgx/doc.go82
-rw-r--r--vendor/github.com/jackc/pgx/example_custom_type_test.go104
-rw-r--r--vendor/github.com/jackc/pgx/example_json_test.go43
-rw-r--r--vendor/github.com/jackc/pgx/fastpath.go65
-rw-r--r--vendor/github.com/jackc/pgx/go_stdlib.go61
-rw-r--r--vendor/github.com/jackc/pgx/helper_test.go74
-rw-r--r--vendor/github.com/jackc/pgx/hstore.go222
-rw-r--r--vendor/github.com/jackc/pgx/hstore_test.go181
-rw-r--r--vendor/github.com/jackc/pgx/internal/sanitize/sanitize.go237
-rw-r--r--vendor/github.com/jackc/pgx/large_objects.go38
-rw-r--r--vendor/github.com/jackc/pgx/large_objects_test.go121
-rw-r--r--vendor/github.com/jackc/pgx/logger.go41
-rw-r--r--vendor/github.com/jackc/pgx/messages.go248
-rw-r--r--vendor/github.com/jackc/pgx/msg_reader.go316
-rw-r--r--vendor/github.com/jackc/pgx/pgio/doc.go6
-rw-r--r--vendor/github.com/jackc/pgx/pgio/write.go40
-rw-r--r--vendor/github.com/jackc/pgx/pgpass_test.go57
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/authentication.go54
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/backend.go110
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/backend_key_data.go46
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/big_endian.go37
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/bind.go171
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/bind_complete.go29
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/close.go59
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/close_complete.go29
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/command_complete.go48
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/copy_both_response.go65
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/copy_data.go37
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/copy_in_response.go65
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/copy_out_response.go65
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/data_row.go112
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/describe.go59
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/empty_query_response.go29
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/error_response.go197
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/execute.go60
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/flush.go29
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/frontend.go122
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/function_call_response.go78
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/no_data.go29
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/notice_response.go13
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/notification_response.go67
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/parameter_description.go61
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/parameter_status.go61
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/parse.go83
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/parse_complete.go29
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/password_message.go46
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/pgproto3.go42
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/query.go45
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/ready_for_query.go35
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/row_description.go100
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/startup_message.go97
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/sync.go29
-rw-r--r--vendor/github.com/jackc/pgx/pgproto3/terminate.go29
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/aclitem.go126
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/aclitem_array.go212
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/array.go352
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/bit.go37
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/bool.go159
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/bool_array.go300
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/box.go162
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/bpchar.go68
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/bpchar_array.go300
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/bytea.go156
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/bytea_array.go300
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/cid.go61
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/cidr.go31
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/cidr_array.go329
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/circle.go146
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/convert.go424
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/database_sql.go42
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/date.go209
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/date_array.go301
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/daterange.go250
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/decimal.go31
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/enum_array.go212
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/float4.go197
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/float4_array.go300
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/float8.go187
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/float8_array.go300
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/generic_binary.go39
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/generic_text.go39
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/hstore.go434
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/hstore_array.go300
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/inet.go215
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/inet_array.go329
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/int2.go209
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/int2_array.go328
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/int4.go213
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/int4_array.go328
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/int4range.go250
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/int8.go199
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/int8_array.go328
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/int8range.go250
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/interval.go250
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/json.go161
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/jsonb.go70
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/line.go143
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/lseg.go161
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/macaddr.go154
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/name.go58
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/numeric.go600
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/numeric_array.go328
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/numrange.go250
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/oid.go81
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/oid_value.go55
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/path.go193
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/pgtype.go280
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/pguint32.go162
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/point.go139
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/polygon.go174
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/qchar.go146
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/range.go278
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/record.go129
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/text.go163
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/text_array.go300
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/tid.go144
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/timestamp.go225
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/timestamp_array.go301
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/timestamptz.go221
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/timestamptz_array.go301
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/tsrange.go250
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/tstzrange.go250
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/typed_array.go.erb304
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/typed_array_gen.sh24
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/typed_range.go.erb252
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/typed_range_gen.sh7
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/unknown.go44
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/uuid.go183
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/uuid_array.go356
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/varbit.go133
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/varchar.go58
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/varchar_array.go300
-rw-r--r--vendor/github.com/jackc/pgx/pgtype/xid.go64
-rw-r--r--vendor/github.com/jackc/pgx/query.go585
-rw-r--r--vendor/github.com/jackc/pgx/query_test.go1414
-rw-r--r--vendor/github.com/jackc/pgx/replication.go198
-rw-r--r--vendor/github.com/jackc/pgx/replication_test.go329
-rw-r--r--vendor/github.com/jackc/pgx/sql_test.go36
-rw-r--r--vendor/github.com/jackc/pgx/stdlib/sql.go513
-rw-r--r--vendor/github.com/jackc/pgx/stdlib/sql_test.go691
-rw-r--r--vendor/github.com/jackc/pgx/stress_test.go346
-rw-r--r--vendor/github.com/jackc/pgx/tx.go180
-rw-r--r--vendor/github.com/jackc/pgx/tx_test.go297
-rw-r--r--vendor/github.com/jackc/pgx/value_reader.go156
-rw-r--r--vendor/github.com/jackc/pgx/values.go3512
-rw-r--r--vendor/github.com/jackc/pgx/values_test.go1183
165 files changed, 22068 insertions, 15050 deletions
diff --git a/vendor/github.com/jackc/pgx/.gitignore b/vendor/github.com/jackc/pgx/.gitignore
index cb0cd90..0ff0080 100644
--- a/vendor/github.com/jackc/pgx/.gitignore
+++ b/vendor/github.com/jackc/pgx/.gitignore
@@ -22,3 +22,4 @@ _testmain.go
*.exe
conn_config_test.go
+.envrc
diff --git a/vendor/github.com/jackc/pgx/.travis.yml b/vendor/github.com/jackc/pgx/.travis.yml
index d9ea43b..6d4b3cd 100644
--- a/vendor/github.com/jackc/pgx/.travis.yml
+++ b/vendor/github.com/jackc/pgx/.travis.yml
@@ -1,59 +1,32 @@
language: go
go:
- - 1.7.4
- - 1.6.4
+ - 1.x
- tip
# Derived from https://github.com/lib/pq/blob/master/.travis.yml
before_install:
- - sudo apt-get remove -y --purge postgresql libpq-dev libpq5 postgresql-client-common postgresql-common
- - sudo rm -rf /var/lib/postgresql
- - wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add -
- - sudo sh -c "echo deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main $PGVERSION >> /etc/apt/sources.list.d/postgresql.list"
- - sudo apt-get update -qq
- - sudo apt-get -y -o Dpkg::Options::=--force-confdef -o Dpkg::Options::="--force-confnew" install postgresql-$PGVERSION postgresql-server-dev-$PGVERSION postgresql-contrib-$PGVERSION
- - sudo chmod 777 /etc/postgresql/$PGVERSION/main/pg_hba.conf
- - echo "local all postgres trust" > /etc/postgresql/$PGVERSION/main/pg_hba.conf
- - echo "local all all trust" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf
- - echo "host all pgx_md5 127.0.0.1/32 md5" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf
- - echo "host all pgx_pw 127.0.0.1/32 password" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf
- - echo "hostssl all pgx_ssl 127.0.0.1/32 md5" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf
- - echo "host replication pgx_replication 127.0.0.1/32 md5" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf
- - echo "host pgx_test pgx_replication 127.0.0.1/32 md5" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf
- - sudo chmod 777 /etc/postgresql/$PGVERSION/main/postgresql.conf
- - "[[ $PGVERSION < 9.6 ]] || echo \"wal_level='logical'\" >> /etc/postgresql/$PGVERSION/main/postgresql.conf"
- - "[[ $PGVERSION < 9.6 ]] || echo \"max_wal_senders=5\" >> /etc/postgresql/$PGVERSION/main/postgresql.conf"
- - "[[ $PGVERSION < 9.6 ]] || echo \"max_replication_slots=5\" >> /etc/postgresql/$PGVERSION/main/postgresql.conf"
- - sudo /etc/init.d/postgresql restart
+ - ./travis/before_install.bash
env:
+ global:
+ - PGX_TEST_DATABASE=postgres://pgx_md5:secret@127.0.0.1/pgx_test
matrix:
+ - CRATEVERSION=2.1
+ - PGVERSION=10
- PGVERSION=9.6
- PGVERSION=9.5
- PGVERSION=9.4
- PGVERSION=9.3
- - PGVERSION=9.2
-# The tricky test user, below, has to actually exist so that it can be used in a test
-# of aclitem formatting. It turns out aclitems cannot contain non-existing users/roles.
before_script:
- - mv conn_config_test.go.travis conn_config_test.go
- - psql -U postgres -c 'create database pgx_test'
- - "[[ \"${PGVERSION}\" = '9.0' ]] && psql -U postgres -f /usr/share/postgresql/9.0/contrib/hstore.sql pgx_test || psql -U postgres pgx_test -c 'create extension hstore'"
- - psql -U postgres -c "create user pgx_ssl SUPERUSER PASSWORD 'secret'"
- - psql -U postgres -c "create user pgx_md5 SUPERUSER PASSWORD 'secret'"
- - psql -U postgres -c "create user pgx_pw SUPERUSER PASSWORD 'secret'"
- - psql -U postgres -c "create user pgx_replication with replication password 'secret'"
- - psql -U postgres -c "create user \" tricky, ' } \"\" \\ test user \" superuser password 'secret'"
+ - ./travis/before_script.bash
install:
- - go get -u github.com/shopspring/decimal
- - go get -u gopkg.in/inconshreveable/log15.v2
- - go get -u github.com/jackc/fake
+ - ./travis/install.bash
script:
- - go test -v -race -short ./...
+ - ./travis/script.bash
matrix:
allow_failures:
diff --git a/vendor/github.com/jackc/pgx/CHANGELOG.md b/vendor/github.com/jackc/pgx/CHANGELOG.md
index 88c663b..0bc4162 100644
--- a/vendor/github.com/jackc/pgx/CHANGELOG.md
+++ b/vendor/github.com/jackc/pgx/CHANGELOG.md
@@ -1,4 +1,113 @@
-# Unreleased
+# 3.1.0 (January 15, 2018)
+
+## Features
+
+* Add QueryEx, QueryRowEx, ExecEx, and RollbackEx to Tx
+* Add more ColumnType support (Timothée Peignier)
+* Add UUIDArray type (Kelsey Francis)
+* Add zap log adapter (Kelsey Francis)
+* Add CreateReplicationSlotEx that consistent_point and snapshot_name (Mark Fletcher)
+* Add BeginBatch to Tx (Gaspard Douady)
+* Support CrateDB (Felix Geisendörfer)
+* Allow use of logrus logger with fields configured (André Bierlein)
+* Add array of enum support
+* Add support for bit type
+* Handle timeout parameters (Timothée Peignier)
+* Allow overriding connection info (James Lawrence)
+* Add support for bpchar type (Iurii Krasnoshchok)
+* Add ConnConfig.PreferSimpleProtocol
+
+## Fixes
+
+* Fix numeric EncodeBinary bug (Wei Congrui)
+* Fix logrus updated package name (Damir Vandic)
+* Fix some invalid one round trip execs failing to return non-nil error. (Kelsey Francis)
+* Return ErrClosedPool when Acquire() with closed pool (Mike Graf)
+* Fix decoding row with same type values
+* Always return non-nil \*Rows from Query to fix QueryRow (Kelsey Francis)
+* Fix pgtype types that can Set database/sql/driver.driver.Valuer
+* Prefix types in namespaces other than pg_catalog or public (Kelsey Francis)
+* Fix incomplete selects during batch (Gaspard Douady and Jack Christensen)
+* Support nil pointers to value implementing driver.Valuer
+* Fix time logging for QueryEx
+* Fix ranges with text format where end is unbounded
+* Detect erroneous JSON(B) encoding
+* Fix missing interval mapping
+* ConnPool begin should not retry if ctx is done (Gaspard Douady)
+* Fix reading interrupted messages could break connection
+* Return error on unknown oid while decoding record instead of panic (Iurii Krasnoshchok)
+
+## Changes
+
+* Align sslmode "require" more closely to libpq (Johan Brandhorst)
+
+# 3.0.1 (August 12, 2017)
+
+## Fixes
+
+* Fix compilation on 32-bit platform
+* Fix invalid MarshalJSON of types with status Undefined
+* Fix pid logging
+
+# 3.0.0 (July 24, 2017)
+
+## Changes
+
+* Pid to PID in accordance with Go naming conventions.
+* Conn.Pid changed to accessor method Conn.PID()
+* Conn.SecretKey removed
+* Remove Conn.TxStatus
+* Logger interface reduced to single Log method.
+* Replace BeginIso with BeginEx. BeginEx adds support for read/write mode and deferrable mode.
+* Transaction isolation level constants are now typed strings instead of bare strings.
+* Conn.WaitForNotification now takes context.Context instead of time.Duration for cancellation support.
+* Conn.WaitForNotification no longer automatically pings internally every 15 seconds.
+* ReplicationConn.WaitForReplicationMessage now takes context.Context instead of time.Duration for cancellation support.
+* Reject scanning binary format values into a string (e.g. binary encoded timestamptz to string). See https://github.com/jackc/pgx/issues/219 and https://github.com/jackc/pgx/issues/228
+* No longer can read raw bytes of any value into a []byte. Use pgtype.GenericBinary if this functionality is needed.
+* Remove CopyTo (functionality is now in CopyFrom)
+* OID constants moved from pgx to pgtype package
+* Replaced Scanner, Encoder, and PgxScanner interfaces with pgtype system
+* Removed ValueReader
+* ConnPool.Close no longer waits for all acquired connections to be released. Instead, it immediately closes all available connections, and closes acquired connections when they are released in the same manner as ConnPool.Reset.
+* Removed Rows.Fatal(error)
+* Removed Rows.AfterClose()
+* Removed Rows.Conn()
+* Removed Tx.AfterClose()
+* Removed Tx.Conn()
+* Use Go casing convention for OID, UUID, JSON(B), ACLItem, CID, TID, XID, and CIDR
+* Replaced stdlib.OpenFromConnPool with DriverConfig system
+
+## Features
+
+* Entirely revamped pluggable type system that supports approximately 60 PostgreSQL types.
+* Types support database/sql interfaces and therefore can be used with other drivers
+* Added context methods supporting cancellation where appropriate
+* Added simple query protocol support
+* Added single round-trip query mode
+* Added batch query operations
+* Added OnNotice
+* github.com/pkg/errors used where possible for errors
+* Added stdlib.DriverConfig which directly allows full configuration of underlying pgx connections without needing to use a pgx.ConnPool
+* Added AcquireConn and ReleaseConn to stdlib to allow acquiring a connection from a database/sql connection.
+
+# 2.11.0 (June 5, 2017)
+
+## Fixes
+
+* Fix race with concurrent execution of stdlib.OpenFromConnPool (Terin Stock)
+
+## Features
+
+* .pgpass support (j7b)
+* Add missing CopyFrom delegators to Tx and ConnPool (Jack Christensen)
+* Add ParseConnectionString (James Lawrence)
+
+## Performance
+
+* Optimize HStore encoding (René Kroon)
+
+# 2.10.0 (March 17, 2017)
## Fixes
diff --git a/vendor/github.com/jackc/pgx/README.md b/vendor/github.com/jackc/pgx/README.md
index 5550f6b..1acaabf 100644
--- a/vendor/github.com/jackc/pgx/README.md
+++ b/vendor/github.com/jackc/pgx/README.md
@@ -1,63 +1,68 @@
[![](https://godoc.org/github.com/jackc/pgx?status.svg)](https://godoc.org/github.com/jackc/pgx)
+[![Build Status](https://travis-ci.org/jackc/pgx.svg)](https://travis-ci.org/jackc/pgx)
-# Pgx
+# pgx - PostgreSQL Driver and Toolkit
-## Master Branch
+pgx is a pure Go driver and toolkit for PostgreSQL. pgx is different from other drivers such as [pq](http://godoc.org/github.com/lib/pq) because, while it can operate as a database/sql compatible driver, pgx is also usable directly. It offers a native interface similar to database/sql that offers better performance and more features.
-This is the `master` branch which tracks the stable release of the current
-version. At the moment this is `v2`. The `v3` branch which is currently in beta.
-General release is planned for July. `v3` is considered to be stable in the
-sense of lack of known bugs, but the API is not considered stable until general
-release. No further changes are planned, but the beta process may surface
-desirable changes. If possible API changes are acceptable, then `v3` is the
-recommented branch for new development. Regardless, please lock to the `v2` or
-`v3` branch as when `v3` is released breaking changes will be applied to the
-master branch.
-Pgx is a pure Go database connection library designed specifically for
-PostgreSQL. Pgx is different from other drivers such as
-[pq](http://godoc.org/github.com/lib/pq) because, while it can operate as a
-database/sql compatible driver, pgx is primarily intended to be used directly.
-It offers a native interface similar to database/sql that offers better
-performance and more features.
+```go
+var name string
+var weight int64
+err := conn.QueryRow("select name, weight from widgets where id=$1", 42).Scan(&name, &weight)
+if err != nil {
+ return err
+}
+```
## Features
-Pgx supports many additional features beyond what is available through database/sql.
+pgx supports many additional features beyond what is available through database/sql.
-* Listen / notify
-* Transaction isolation level control
+* Support for approximately 60 different PostgreSQL types
+* Batch queries
+* Single-round trip query mode
* Full TLS connection control
* Binary format support for custom types (can be much faster)
-* Copy from protocol support for faster bulk data loads
-* Logging support
-* Configurable connection pool with after connect hooks to do arbitrary connection setup
+* Copy protocol support for faster bulk data loads
+* Extendable logging support including built-in support for log15 and logrus
+* Connection pool with after connect hook to do arbitrary connection setup
+* Listen / notify
* PostgreSQL array to Go slice mapping for integers, floats, and strings
* Hstore support
* JSON and JSONB support
* Maps inet and cidr PostgreSQL types to net.IPNet and net.IP
* Large object support
-* Null mapping to Null* struct or pointer to pointer.
+* NULL mapping to Null* struct or pointer to pointer.
* Supports database/sql.Scanner and database/sql/driver.Valuer interfaces for custom types
* Logical replication connections, including receiving WAL and sending standby status updates
+* Notice response handling (this is different than listen / notify)
## Performance
-Pgx performs roughly equivalent to [pq](http://godoc.org/github.com/lib/pq) and
-[go-pg](https://github.com/go-pg/pg) for selecting a single column from a single
-row, but it is substantially faster when selecting multiple entire rows (6893
-queries/sec for pgx vs. 3968 queries/sec for pq -- 73% faster).
+pgx performs roughly equivalent to [go-pg](https://github.com/go-pg/pg) and is almost always faster than [pq](http://godoc.org/github.com/lib/pq). When parsing large result sets the percentage difference can be significant (16483 queries/sec for pgx vs. 10106 queries/sec for pq -- 63% faster).
+
+In many use cases a significant cause of latency is network round trips between the application and the server. pgx supports query batching to bundle multiple queries into a single round trip. Even in the case of a connection with the lowest possible latency, a local Unix domain socket, batching as few as three queries together can yield an improvement of 57%. With a typical network connection the results can be even more substantial.
+
+See this [gist](https://gist.github.com/jackc/4996e8648a0c59839bff644f49d6e434) for the underlying benchmark results or checkout [go_db_bench](https://github.com/jackc/go_db_bench) to run tests for yourself.
+
+In addition to the native driver, pgx also includes a number of packages that provide additional functionality.
+
+## github.com/jackc/pgx/stdlib
-See this [gist](https://gist.github.com/jackc/d282f39e088b495fba3e) for the
-underlying benchmark results or checkout
-[go_db_bench](https://github.com/jackc/go_db_bench) to run tests for yourself.
+database/sql compatibility layer for pgx. pgx can be used as a normal database/sql driver, but at any time the native interface may be acquired for more performance or PostgreSQL specific functionality.
-## database/sql
+## github.com/jackc/pgx/pgtype
-Import the ```github.com/jackc/pgx/stdlib``` package to use pgx as a driver for
-database/sql. It is possible to retrieve a pgx connection from database/sql on
-demand. This allows using the database/sql interface in most places, but using
-pgx directly when more performance or PostgreSQL specific features are needed.
+Approximately 60 PostgreSQL types are supported including uuid, hstore, json, bytea, numeric, interval, inet, and arrays. These types support database/sql interfaces and are usable even outside of pgx. They are fully tested in pgx and pq. They also support a higher performance interface when used with the pgx driver.
+
+## github.com/jackc/pgx/pgproto3
+
+pgproto3 provides standalone encoding and decoding of the PostgreSQL v3 wire protocol. This is useful for implementing very low level PostgreSQL tooling.
+
+## github.com/jackc/pgx/pgmock
+
+pgmock offers the ability to create a server that mocks the PostgreSQL wire protocol. This is used internally to test pgx by purposely inducing unusual errors. pgproto3 and pgmock together provide most of the foundational tooling required to implement a PostgreSQL proxy or MitM (such as for a custom connection pooler).
## Documentation
@@ -74,8 +79,15 @@ skip tests for connection types that are not configured.
To setup the normal test environment, first install these dependencies:
+ go get github.com/cockroachdb/apd
+ go get github.com/hashicorp/go-version
go get github.com/jackc/fake
+ go get github.com/lib/pq
+ go get github.com/pkg/errors
+ go get github.com/satori/go.uuid
go get github.com/shopspring/decimal
+ go get github.com/sirupsen/logrus
+ go get go.uber.org/zap
go get gopkg.in/inconshreveable/log15.v2
Then run the following SQL:
@@ -132,6 +144,8 @@ Change the following settings in your postgresql.conf:
max_wal_senders=5
max_replication_slots=5
+Set `replicationConnConfig` appropriately in `conn_config_test.go`.
+
## Version Policy
-pgx follows semantic versioning for the documented public API on stable releases. Branch `v2` is the latest stable release. `master` can contain new features or behavior that will change or be removed before being merged to the stable `v2` branch (in practice, this occurs very rarely).
+pgx follows semantic versioning for the documented public API on stable releases. Branch `v3` is the latest stable release. `master` can contain new features or behavior that will change or be removed before being merged to the stable `v3` branch (in practice, this occurs very rarely). `v2` is the previous stable release.
diff --git a/vendor/github.com/jackc/pgx/aclitem_parse_test.go b/vendor/github.com/jackc/pgx/aclitem_parse_test.go
deleted file mode 100644
index 5c7c748..0000000
--- a/vendor/github.com/jackc/pgx/aclitem_parse_test.go
+++ /dev/null
@@ -1,126 +0,0 @@
-package pgx
-
-import (
- "reflect"
- "testing"
-)
-
-func TestEscapeAclItem(t *testing.T) {
- tests := []struct {
- input string
- expected string
- }{
- {
- "foo",
- "foo",
- },
- {
- `foo, "\}`,
- `foo\, \"\\\}`,
- },
- }
-
- for i, tt := range tests {
- actual, err := escapeAclItem(tt.input)
-
- if err != nil {
- t.Errorf("%d. Unexpected error %v", i, err)
- }
-
- if actual != tt.expected {
- t.Errorf("%d.\nexpected: %s,\nactual: %s", i, tt.expected, actual)
- }
- }
-}
-
-func TestParseAclItemArray(t *testing.T) {
- tests := []struct {
- input string
- expected []AclItem
- errMsg string
- }{
- {
- "",
- []AclItem{},
- "",
- },
- {
- "one",
- []AclItem{"one"},
- "",
- },
- {
- `"one"`,
- []AclItem{"one"},
- "",
- },
- {
- "one,two,three",
- []AclItem{"one", "two", "three"},
- "",
- },
- {
- `"one","two","three"`,
- []AclItem{"one", "two", "three"},
- "",
- },
- {
- `"one",two,"three"`,
- []AclItem{"one", "two", "three"},
- "",
- },
- {
- `one,two,"three"`,
- []AclItem{"one", "two", "three"},
- "",
- },
- {
- `"one","two",three`,
- []AclItem{"one", "two", "three"},
- "",
- },
- {
- `"one","t w o",three`,
- []AclItem{"one", "t w o", "three"},
- "",
- },
- {
- `"one","t, w o\"\}\\",three`,
- []AclItem{"one", `t, w o"}\`, "three"},
- "",
- },
- {
- `"one","two",three"`,
- []AclItem{"one", "two", `three"`},
- "",
- },
- {
- `"one","two,"three"`,
- nil,
- "unexpected rune after quoted value",
- },
- {
- `"one","two","three`,
- nil,
- "unexpected end of quoted value",
- },
- }
-
- for i, tt := range tests {
- actual, err := parseAclItemArray(tt.input)
-
- if err != nil {
- if tt.errMsg == "" {
- t.Errorf("%d. Unexpected error %v", i, err)
- } else if err.Error() != tt.errMsg {
- t.Errorf("%d. Expected error %v did not match actual error %v", i, tt.errMsg, err.Error())
- }
- } else if tt.errMsg != "" {
- t.Errorf("%d. Expected error not returned: \"%v\"", i, tt.errMsg)
- }
-
- if !reflect.DeepEqual(actual, tt.expected) {
- t.Errorf("%d. Expected %v did not match actual %v", i, tt.expected, actual)
- }
- }
-}
diff --git a/vendor/github.com/jackc/pgx/batch.go b/vendor/github.com/jackc/pgx/batch.go
new file mode 100644
index 0000000..0d7f14c
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/batch.go
@@ -0,0 +1,313 @@
+package pgx
+
+import (
+ "context"
+
+ "github.com/jackc/pgx/pgproto3"
+ "github.com/jackc/pgx/pgtype"
+)
+
+type batchItem struct {
+ query string
+ arguments []interface{}
+ parameterOIDs []pgtype.OID
+ resultFormatCodes []int16
+}
+
+// Batch queries are a way of bundling multiple queries together to avoid
+// unnecessary network round trips.
+type Batch struct {
+ conn *Conn
+ connPool *ConnPool
+ items []*batchItem
+ resultsRead int
+ pendingCommandComplete bool
+ ctx context.Context
+ err error
+ inTx bool
+}
+
+// BeginBatch returns a *Batch query for c.
+func (c *Conn) BeginBatch() *Batch {
+ return &Batch{conn: c}
+}
+
+// BeginBatch returns a *Batch query for tx. Since this *Batch is already part
+// of a transaction it will not automatically be wrapped in a transaction.
+func (tx *Tx) BeginBatch() *Batch {
+ return &Batch{conn: tx.conn, inTx: true}
+}
+
+// Conn returns the underlying connection that b will or was performed on.
+func (b *Batch) Conn() *Conn {
+ return b.conn
+}
+
+// Queue queues a query to batch b. parameterOIDs are required if there are
+// parameters and query is not the name of a prepared statement.
+// resultFormatCodes are required if there is a result.
+func (b *Batch) Queue(query string, arguments []interface{}, parameterOIDs []pgtype.OID, resultFormatCodes []int16) {
+ b.items = append(b.items, &batchItem{
+ query: query,
+ arguments: arguments,
+ parameterOIDs: parameterOIDs,
+ resultFormatCodes: resultFormatCodes,
+ })
+}
+
+// Send sends all queued queries to the server at once.
+// If the batch is created from a conn Object then All queries are wrapped
+// in a transaction. The transaction can optionally be configured with
+// txOptions. The context is in effect until the Batch is closed.
+//
+// Warning: Send writes all queued queries before reading any results. This can
+// cause a deadlock if an excessive number of queries are queued. It is highly
+// advisable to use a timeout context to protect against this possibility.
+// Unfortunately, this excessive number can vary based on operating system,
+// connection type (TCP or Unix domain socket), and type of query. Unix domain
+// sockets seem to be much more susceptible to this issue than TCP connections.
+// However, it usually is at least several thousand.
+//
+// The deadlock occurs when the batched queries to be sent are so large that the
+// PostgreSQL server cannot receive it all at once. PostgreSQL received some of
+// the queued queries and starts executing them. As PostgreSQL executes the
+// queries it sends responses back. pgx will not read any of these responses
+// until it has finished sending. Therefore, if all network buffers are full pgx
+// will not be able to finish sending the queries and PostgreSQL will not be
+// able to finish sending the responses.
+//
+// See https://github.com/jackc/pgx/issues/374.
+func (b *Batch) Send(ctx context.Context, txOptions *TxOptions) error {
+ if b.err != nil {
+ return b.err
+ }
+
+ b.ctx = ctx
+
+ err := b.conn.waitForPreviousCancelQuery(ctx)
+ if err != nil {
+ return err
+ }
+
+ if err := b.conn.ensureConnectionReadyForQuery(); err != nil {
+ return err
+ }
+
+ buf := b.conn.wbuf
+ if !b.inTx {
+ buf = appendQuery(buf, txOptions.beginSQL())
+ }
+
+ err = b.conn.initContext(ctx)
+ if err != nil {
+ return err
+ }
+
+ for _, bi := range b.items {
+ var psName string
+ var psParameterOIDs []pgtype.OID
+
+ if ps, ok := b.conn.preparedStatements[bi.query]; ok {
+ psName = ps.Name
+ psParameterOIDs = ps.ParameterOIDs
+ } else {
+ psParameterOIDs = bi.parameterOIDs
+ buf = appendParse(buf, "", bi.query, psParameterOIDs)
+ }
+
+ var err error
+ buf, err = appendBind(buf, "", psName, b.conn.ConnInfo, psParameterOIDs, bi.arguments, bi.resultFormatCodes)
+ if err != nil {
+ return err
+ }
+
+ buf = appendDescribe(buf, 'P', "")
+ buf = appendExecute(buf, "", 0)
+ }
+
+ buf = appendSync(buf)
+ b.conn.pendingReadyForQueryCount++
+
+ if !b.inTx {
+ buf = appendQuery(buf, "commit")
+ b.conn.pendingReadyForQueryCount++
+ }
+
+ n, err := b.conn.conn.Write(buf)
+ if err != nil {
+ if fatalWriteErr(n, err) {
+ b.conn.die(err)
+ }
+ return err
+ }
+
+ for !b.inTx {
+ msg, err := b.conn.rxMsg()
+ if err != nil {
+ return err
+ }
+
+ switch msg := msg.(type) {
+ case *pgproto3.ReadyForQuery:
+ return nil
+ default:
+ if err := b.conn.processContextFreeMsg(msg); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+// ExecResults reads the results from the next query in the batch as if the
+// query has been sent with Exec.
+func (b *Batch) ExecResults() (CommandTag, error) {
+ if b.err != nil {
+ return "", b.err
+ }
+
+ select {
+ case <-b.ctx.Done():
+ b.die(b.ctx.Err())
+ return "", b.ctx.Err()
+ default:
+ }
+
+ if err := b.ensureCommandComplete(); err != nil {
+ b.die(err)
+ return "", err
+ }
+
+ b.resultsRead++
+
+ b.pendingCommandComplete = true
+
+ for {
+ msg, err := b.conn.rxMsg()
+ if err != nil {
+ return "", err
+ }
+
+ switch msg := msg.(type) {
+ case *pgproto3.CommandComplete:
+ b.pendingCommandComplete = false
+ return CommandTag(msg.CommandTag), nil
+ default:
+ if err := b.conn.processContextFreeMsg(msg); err != nil {
+ return "", err
+ }
+ }
+ }
+}
+
+// QueryResults reads the results from the next query in the batch as if the
+// query has been sent with Query.
+func (b *Batch) QueryResults() (*Rows, error) {
+ rows := b.conn.getRows("batch query", nil)
+
+ if b.err != nil {
+ rows.fatal(b.err)
+ return rows, b.err
+ }
+
+ select {
+ case <-b.ctx.Done():
+ b.die(b.ctx.Err())
+ rows.fatal(b.err)
+ return rows, b.ctx.Err()
+ default:
+ }
+
+ if err := b.ensureCommandComplete(); err != nil {
+ b.die(err)
+ rows.fatal(err)
+ return rows, err
+ }
+
+ b.resultsRead++
+
+ b.pendingCommandComplete = true
+
+ fieldDescriptions, err := b.conn.readUntilRowDescription()
+ if err != nil {
+ b.die(err)
+ rows.fatal(b.err)
+ return rows, err
+ }
+
+ rows.batch = b
+ rows.fields = fieldDescriptions
+ return rows, nil
+}
+
+// QueryRowResults reads the results from the next query in the batch as if the
+// query has been sent with QueryRow.
+func (b *Batch) QueryRowResults() *Row {
+ rows, _ := b.QueryResults()
+ return (*Row)(rows)
+
+}
+
+// Close closes the batch operation. Any error that occured during a batch
+// operation may have made it impossible to resyncronize the connection with the
+// server. In this case the underlying connection will have been closed.
+func (b *Batch) Close() (err error) {
+ if b.err != nil {
+ return b.err
+ }
+
+ defer func() {
+ err = b.conn.termContext(err)
+ if b.conn != nil && b.connPool != nil {
+ b.connPool.Release(b.conn)
+ }
+ }()
+
+ for i := b.resultsRead; i < len(b.items); i++ {
+ if _, err = b.ExecResults(); err != nil {
+ return err
+ }
+ }
+
+ if err = b.conn.ensureConnectionReadyForQuery(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (b *Batch) die(err error) {
+ if b.err != nil {
+ return
+ }
+
+ b.err = err
+ b.conn.die(err)
+
+ if b.conn != nil && b.connPool != nil {
+ b.connPool.Release(b.conn)
+ }
+}
+
+func (b *Batch) ensureCommandComplete() error {
+ for b.pendingCommandComplete {
+ msg, err := b.conn.rxMsg()
+ if err != nil {
+ return err
+ }
+
+ switch msg := msg.(type) {
+ case *pgproto3.CommandComplete:
+ b.pendingCommandComplete = false
+ return nil
+ default:
+ err = b.conn.processContextFreeMsg(msg)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/bench_test.go b/vendor/github.com/jackc/pgx/bench_test.go
deleted file mode 100644
index 30e31e2..0000000
--- a/vendor/github.com/jackc/pgx/bench_test.go
+++ /dev/null
@@ -1,765 +0,0 @@
-package pgx_test
-
-import (
- "bytes"
- "fmt"
- "strings"
- "testing"
- "time"
-
- "github.com/jackc/pgx"
- log "gopkg.in/inconshreveable/log15.v2"
-)
-
-func BenchmarkConnPool(b *testing.B) {
- config := pgx.ConnPoolConfig{ConnConfig: *defaultConnConfig, MaxConnections: 5}
- pool, err := pgx.NewConnPool(config)
- if err != nil {
- b.Fatalf("Unable to create connection pool: %v", err)
- }
- defer pool.Close()
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- var conn *pgx.Conn
- if conn, err = pool.Acquire(); err != nil {
- b.Fatalf("Unable to acquire connection: %v", err)
- }
- pool.Release(conn)
- }
-}
-
-func BenchmarkConnPoolQueryRow(b *testing.B) {
- config := pgx.ConnPoolConfig{ConnConfig: *defaultConnConfig, MaxConnections: 5}
- pool, err := pgx.NewConnPool(config)
- if err != nil {
- b.Fatalf("Unable to create connection pool: %v", err)
- }
- defer pool.Close()
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- num := float64(-1)
- if err := pool.QueryRow("select random()").Scan(&num); err != nil {
- b.Fatal(err)
- }
-
- if num < 0 {
- b.Fatalf("expected `select random()` to return between 0 and 1 but it was: %v", num)
- }
- }
-}
-
-func BenchmarkNullXWithNullValues(b *testing.B) {
- conn := mustConnect(b, *defaultConnConfig)
- defer closeConn(b, conn)
-
- _, err := conn.Prepare("selectNulls", "select 1::int4, 'johnsmith', null::text, null::text, null::text, null::date, null::timestamptz")
- if err != nil {
- b.Fatal(err)
- }
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- var record struct {
- id int32
- userName string
- email pgx.NullString
- name pgx.NullString
- sex pgx.NullString
- birthDate pgx.NullTime
- lastLoginTime pgx.NullTime
- }
-
- err = conn.QueryRow("selectNulls").Scan(
- &record.id,
- &record.userName,
- &record.email,
- &record.name,
- &record.sex,
- &record.birthDate,
- &record.lastLoginTime,
- )
- if err != nil {
- b.Fatal(err)
- }
-
- // These checks both ensure that the correct data was returned
- // and provide a benchmark of accessing the returned values.
- if record.id != 1 {
- b.Fatalf("bad value for id: %v", record.id)
- }
- if record.userName != "johnsmith" {
- b.Fatalf("bad value for userName: %v", record.userName)
- }
- if record.email.Valid {
- b.Fatalf("bad value for email: %v", record.email)
- }
- if record.name.Valid {
- b.Fatalf("bad value for name: %v", record.name)
- }
- if record.sex.Valid {
- b.Fatalf("bad value for sex: %v", record.sex)
- }
- if record.birthDate.Valid {
- b.Fatalf("bad value for birthDate: %v", record.birthDate)
- }
- if record.lastLoginTime.Valid {
- b.Fatalf("bad value for lastLoginTime: %v", record.lastLoginTime)
- }
- }
-}
-
-func BenchmarkNullXWithPresentValues(b *testing.B) {
- conn := mustConnect(b, *defaultConnConfig)
- defer closeConn(b, conn)
-
- _, err := conn.Prepare("selectNulls", "select 1::int4, 'johnsmith', 'johnsmith@example.com', 'John Smith', 'male', '1970-01-01'::date, '2015-01-01 00:00:00'::timestamptz")
- if err != nil {
- b.Fatal(err)
- }
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- var record struct {
- id int32
- userName string
- email pgx.NullString
- name pgx.NullString
- sex pgx.NullString
- birthDate pgx.NullTime
- lastLoginTime pgx.NullTime
- }
-
- err = conn.QueryRow("selectNulls").Scan(
- &record.id,
- &record.userName,
- &record.email,
- &record.name,
- &record.sex,
- &record.birthDate,
- &record.lastLoginTime,
- )
- if err != nil {
- b.Fatal(err)
- }
-
- // These checks both ensure that the correct data was returned
- // and provide a benchmark of accessing the returned values.
- if record.id != 1 {
- b.Fatalf("bad value for id: %v", record.id)
- }
- if record.userName != "johnsmith" {
- b.Fatalf("bad value for userName: %v", record.userName)
- }
- if !record.email.Valid || record.email.String != "johnsmith@example.com" {
- b.Fatalf("bad value for email: %v", record.email)
- }
- if !record.name.Valid || record.name.String != "John Smith" {
- b.Fatalf("bad value for name: %v", record.name)
- }
- if !record.sex.Valid || record.sex.String != "male" {
- b.Fatalf("bad value for sex: %v", record.sex)
- }
- if !record.birthDate.Valid || record.birthDate.Time != time.Date(1970, 1, 1, 0, 0, 0, 0, time.Local) {
- b.Fatalf("bad value for birthDate: %v", record.birthDate)
- }
- if !record.lastLoginTime.Valid || record.lastLoginTime.Time != time.Date(2015, 1, 1, 0, 0, 0, 0, time.Local) {
- b.Fatalf("bad value for lastLoginTime: %v", record.lastLoginTime)
- }
- }
-}
-
-func BenchmarkPointerPointerWithNullValues(b *testing.B) {
- conn := mustConnect(b, *defaultConnConfig)
- defer closeConn(b, conn)
-
- _, err := conn.Prepare("selectNulls", "select 1::int4, 'johnsmith', null::text, null::text, null::text, null::date, null::timestamptz")
- if err != nil {
- b.Fatal(err)
- }
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- var record struct {
- id int32
- userName string
- email *string
- name *string
- sex *string
- birthDate *time.Time
- lastLoginTime *time.Time
- }
-
- err = conn.QueryRow("selectNulls").Scan(
- &record.id,
- &record.userName,
- &record.email,
- &record.name,
- &record.sex,
- &record.birthDate,
- &record.lastLoginTime,
- )
- if err != nil {
- b.Fatal(err)
- }
-
- // These checks both ensure that the correct data was returned
- // and provide a benchmark of accessing the returned values.
- if record.id != 1 {
- b.Fatalf("bad value for id: %v", record.id)
- }
- if record.userName != "johnsmith" {
- b.Fatalf("bad value for userName: %v", record.userName)
- }
- if record.email != nil {
- b.Fatalf("bad value for email: %v", record.email)
- }
- if record.name != nil {
- b.Fatalf("bad value for name: %v", record.name)
- }
- if record.sex != nil {
- b.Fatalf("bad value for sex: %v", record.sex)
- }
- if record.birthDate != nil {
- b.Fatalf("bad value for birthDate: %v", record.birthDate)
- }
- if record.lastLoginTime != nil {
- b.Fatalf("bad value for lastLoginTime: %v", record.lastLoginTime)
- }
- }
-}
-
-func BenchmarkPointerPointerWithPresentValues(b *testing.B) {
- conn := mustConnect(b, *defaultConnConfig)
- defer closeConn(b, conn)
-
- _, err := conn.Prepare("selectNulls", "select 1::int4, 'johnsmith', 'johnsmith@example.com', 'John Smith', 'male', '1970-01-01'::date, '2015-01-01 00:00:00'::timestamptz")
- if err != nil {
- b.Fatal(err)
- }
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- var record struct {
- id int32
- userName string
- email *string
- name *string
- sex *string
- birthDate *time.Time
- lastLoginTime *time.Time
- }
-
- err = conn.QueryRow("selectNulls").Scan(
- &record.id,
- &record.userName,
- &record.email,
- &record.name,
- &record.sex,
- &record.birthDate,
- &record.lastLoginTime,
- )
- if err != nil {
- b.Fatal(err)
- }
-
- // These checks both ensure that the correct data was returned
- // and provide a benchmark of accessing the returned values.
- if record.id != 1 {
- b.Fatalf("bad value for id: %v", record.id)
- }
- if record.userName != "johnsmith" {
- b.Fatalf("bad value for userName: %v", record.userName)
- }
- if record.email == nil || *record.email != "johnsmith@example.com" {
- b.Fatalf("bad value for email: %v", record.email)
- }
- if record.name == nil || *record.name != "John Smith" {
- b.Fatalf("bad value for name: %v", record.name)
- }
- if record.sex == nil || *record.sex != "male" {
- b.Fatalf("bad value for sex: %v", record.sex)
- }
- if record.birthDate == nil || *record.birthDate != time.Date(1970, 1, 1, 0, 0, 0, 0, time.Local) {
- b.Fatalf("bad value for birthDate: %v", record.birthDate)
- }
- if record.lastLoginTime == nil || *record.lastLoginTime != time.Date(2015, 1, 1, 0, 0, 0, 0, time.Local) {
- b.Fatalf("bad value for lastLoginTime: %v", record.lastLoginTime)
- }
- }
-}
-
-func BenchmarkSelectWithoutLogging(b *testing.B) {
- conn := mustConnect(b, *defaultConnConfig)
- defer closeConn(b, conn)
-
- benchmarkSelectWithLog(b, conn)
-}
-
-func BenchmarkSelectWithLoggingTraceWithLog15(b *testing.B) {
- connConfig := *defaultConnConfig
-
- logger := log.New()
- lvl, err := log.LvlFromString("debug")
- if err != nil {
- b.Fatal(err)
- }
- logger.SetHandler(log.LvlFilterHandler(lvl, log.DiscardHandler()))
- connConfig.Logger = logger
- connConfig.LogLevel = pgx.LogLevelTrace
- conn := mustConnect(b, connConfig)
- defer closeConn(b, conn)
-
- benchmarkSelectWithLog(b, conn)
-}
-
-func BenchmarkSelectWithLoggingDebugWithLog15(b *testing.B) {
- connConfig := *defaultConnConfig
-
- logger := log.New()
- lvl, err := log.LvlFromString("debug")
- if err != nil {
- b.Fatal(err)
- }
- logger.SetHandler(log.LvlFilterHandler(lvl, log.DiscardHandler()))
- connConfig.Logger = logger
- connConfig.LogLevel = pgx.LogLevelDebug
- conn := mustConnect(b, connConfig)
- defer closeConn(b, conn)
-
- benchmarkSelectWithLog(b, conn)
-}
-
-func BenchmarkSelectWithLoggingInfoWithLog15(b *testing.B) {
- connConfig := *defaultConnConfig
-
- logger := log.New()
- lvl, err := log.LvlFromString("info")
- if err != nil {
- b.Fatal(err)
- }
- logger.SetHandler(log.LvlFilterHandler(lvl, log.DiscardHandler()))
- connConfig.Logger = logger
- connConfig.LogLevel = pgx.LogLevelInfo
- conn := mustConnect(b, connConfig)
- defer closeConn(b, conn)
-
- benchmarkSelectWithLog(b, conn)
-}
-
-func BenchmarkSelectWithLoggingErrorWithLog15(b *testing.B) {
- connConfig := *defaultConnConfig
-
- logger := log.New()
- lvl, err := log.LvlFromString("error")
- if err != nil {
- b.Fatal(err)
- }
- logger.SetHandler(log.LvlFilterHandler(lvl, log.DiscardHandler()))
- connConfig.Logger = logger
- connConfig.LogLevel = pgx.LogLevelError
- conn := mustConnect(b, connConfig)
- defer closeConn(b, conn)
-
- benchmarkSelectWithLog(b, conn)
-}
-
-func benchmarkSelectWithLog(b *testing.B, conn *pgx.Conn) {
- _, err := conn.Prepare("test", "select 1::int4, 'johnsmith', 'johnsmith@example.com', 'John Smith', 'male', '1970-01-01'::date, '2015-01-01 00:00:00'::timestamptz")
- if err != nil {
- b.Fatal(err)
- }
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- var record struct {
- id int32
- userName string
- email string
- name string
- sex string
- birthDate time.Time
- lastLoginTime time.Time
- }
-
- err = conn.QueryRow("test").Scan(
- &record.id,
- &record.userName,
- &record.email,
- &record.name,
- &record.sex,
- &record.birthDate,
- &record.lastLoginTime,
- )
- if err != nil {
- b.Fatal(err)
- }
-
- // These checks both ensure that the correct data was returned
- // and provide a benchmark of accessing the returned values.
- if record.id != 1 {
- b.Fatalf("bad value for id: %v", record.id)
- }
- if record.userName != "johnsmith" {
- b.Fatalf("bad value for userName: %v", record.userName)
- }
- if record.email != "johnsmith@example.com" {
- b.Fatalf("bad value for email: %v", record.email)
- }
- if record.name != "John Smith" {
- b.Fatalf("bad value for name: %v", record.name)
- }
- if record.sex != "male" {
- b.Fatalf("bad value for sex: %v", record.sex)
- }
- if record.birthDate != time.Date(1970, 1, 1, 0, 0, 0, 0, time.Local) {
- b.Fatalf("bad value for birthDate: %v", record.birthDate)
- }
- if record.lastLoginTime != time.Date(2015, 1, 1, 0, 0, 0, 0, time.Local) {
- b.Fatalf("bad value for lastLoginTime: %v", record.lastLoginTime)
- }
- }
-}
-
-func BenchmarkLog15Discard(b *testing.B) {
- logger := log.New()
- lvl, err := log.LvlFromString("error")
- if err != nil {
- b.Fatal(err)
- }
- logger.SetHandler(log.LvlFilterHandler(lvl, log.DiscardHandler()))
-
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- logger.Debug("benchmark", "i", i, "b.N", b.N)
- }
-}
-
-const benchmarkWriteTableCreateSQL = `drop table if exists t;
-
-create table t(
- varchar_1 varchar not null,
- varchar_2 varchar not null,
- varchar_null_1 varchar,
- date_1 date not null,
- date_null_1 date,
- int4_1 int4 not null,
- int4_2 int4 not null,
- int4_null_1 int4,
- tstz_1 timestamptz not null,
- tstz_2 timestamptz,
- bool_1 bool not null,
- bool_2 bool not null,
- bool_3 bool not null
-);
-`
-
-const benchmarkWriteTableInsertSQL = `insert into t(
- varchar_1,
- varchar_2,
- varchar_null_1,
- date_1,
- date_null_1,
- int4_1,
- int4_2,
- int4_null_1,
- tstz_1,
- tstz_2,
- bool_1,
- bool_2,
- bool_3
-) values (
- $1::varchar,
- $2::varchar,
- $3::varchar,
- $4::date,
- $5::date,
- $6::int4,
- $7::int4,
- $8::int4,
- $9::timestamptz,
- $10::timestamptz,
- $11::bool,
- $12::bool,
- $13::bool
-)`
-
-type benchmarkWriteTableCopyFromSrc struct {
- count int
- idx int
- row []interface{}
-}
-
-func (s *benchmarkWriteTableCopyFromSrc) Next() bool {
- s.idx++
- return s.idx < s.count
-}
-
-func (s *benchmarkWriteTableCopyFromSrc) Values() ([]interface{}, error) {
- return s.row, nil
-}
-
-func (s *benchmarkWriteTableCopyFromSrc) Err() error {
- return nil
-}
-
-func newBenchmarkWriteTableCopyFromSrc(count int) pgx.CopyFromSource {
- return &benchmarkWriteTableCopyFromSrc{
- count: count,
- row: []interface{}{
- "varchar_1",
- "varchar_2",
- pgx.NullString{},
- time.Date(2000, 1, 1, 0, 0, 0, 0, time.Local),
- pgx.NullTime{},
- 1,
- 2,
- pgx.NullInt32{},
- time.Date(2001, 1, 1, 0, 0, 0, 0, time.Local),
- time.Date(2002, 1, 1, 0, 0, 0, 0, time.Local),
- true,
- false,
- true,
- },
- }
-}
-
-func benchmarkWriteNRowsViaInsert(b *testing.B, n int) {
- conn := mustConnect(b, *defaultConnConfig)
- defer closeConn(b, conn)
-
- mustExec(b, conn, benchmarkWriteTableCreateSQL)
- _, err := conn.Prepare("insert_t", benchmarkWriteTableInsertSQL)
- if err != nil {
- b.Fatal(err)
- }
-
- b.ResetTimer()
-
- for i := 0; i < b.N; i++ {
- src := newBenchmarkWriteTableCopyFromSrc(n)
-
- tx, err := conn.Begin()
- if err != nil {
- b.Fatal(err)
- }
-
- for src.Next() {
- values, _ := src.Values()
- if _, err = tx.Exec("insert_t", values...); err != nil {
- b.Fatalf("Exec unexpectedly failed with: %v", err)
- }
- }
-
- err = tx.Commit()
- if err != nil {
- b.Fatal(err)
- }
- }
-}
-
-// note this function is only used for benchmarks -- it doesn't escape tableName
-// or columnNames
-func multiInsert(conn *pgx.Conn, tableName string, columnNames []string, rowSrc pgx.CopyFromSource) (int, error) {
- maxRowsPerInsert := 65535 / len(columnNames)
- rowsThisInsert := 0
- rowCount := 0
-
- sqlBuf := &bytes.Buffer{}
- args := make(pgx.QueryArgs, 0)
-
- resetQuery := func() {
- sqlBuf.Reset()
- fmt.Fprintf(sqlBuf, "insert into %s(%s) values", tableName, strings.Join(columnNames, ", "))
-
- args = args[0:0]
-
- rowsThisInsert = 0
- }
- resetQuery()
-
- tx, err := conn.Begin()
- if err != nil {
- return 0, err
- }
- defer tx.Rollback()
-
- for rowSrc.Next() {
- if rowsThisInsert > 0 {
- sqlBuf.WriteByte(',')
- }
-
- sqlBuf.WriteByte('(')
-
- values, err := rowSrc.Values()
- if err != nil {
- return 0, err
- }
-
- for i, val := range values {
- if i > 0 {
- sqlBuf.WriteByte(',')
- }
- sqlBuf.WriteString(args.Append(val))
- }
-
- sqlBuf.WriteByte(')')
-
- rowsThisInsert++
-
- if rowsThisInsert == maxRowsPerInsert {
- _, err := tx.Exec(sqlBuf.String(), args...)
- if err != nil {
- return 0, err
- }
-
- rowCount += rowsThisInsert
- resetQuery()
- }
- }
-
- if rowsThisInsert > 0 {
- _, err := tx.Exec(sqlBuf.String(), args...)
- if err != nil {
- return 0, err
- }
-
- rowCount += rowsThisInsert
- }
-
- if err := tx.Commit(); err != nil {
- return 0, nil
- }
-
- return rowCount, nil
-
-}
-
-func benchmarkWriteNRowsViaMultiInsert(b *testing.B, n int) {
- conn := mustConnect(b, *defaultConnConfig)
- defer closeConn(b, conn)
-
- mustExec(b, conn, benchmarkWriteTableCreateSQL)
- _, err := conn.Prepare("insert_t", benchmarkWriteTableInsertSQL)
- if err != nil {
- b.Fatal(err)
- }
-
- b.ResetTimer()
-
- for i := 0; i < b.N; i++ {
- src := newBenchmarkWriteTableCopyFromSrc(n)
-
- _, err := multiInsert(conn, "t",
- []string{"varchar_1",
- "varchar_2",
- "varchar_null_1",
- "date_1",
- "date_null_1",
- "int4_1",
- "int4_2",
- "int4_null_1",
- "tstz_1",
- "tstz_2",
- "bool_1",
- "bool_2",
- "bool_3"},
- src)
- if err != nil {
- b.Fatal(err)
- }
- }
-}
-
-func benchmarkWriteNRowsViaCopy(b *testing.B, n int) {
- conn := mustConnect(b, *defaultConnConfig)
- defer closeConn(b, conn)
-
- mustExec(b, conn, benchmarkWriteTableCreateSQL)
-
- b.ResetTimer()
-
- for i := 0; i < b.N; i++ {
- src := newBenchmarkWriteTableCopyFromSrc(n)
-
- _, err := conn.CopyFrom(pgx.Identifier{"t"},
- []string{"varchar_1",
- "varchar_2",
- "varchar_null_1",
- "date_1",
- "date_null_1",
- "int4_1",
- "int4_2",
- "int4_null_1",
- "tstz_1",
- "tstz_2",
- "bool_1",
- "bool_2",
- "bool_3"},
- src)
- if err != nil {
- b.Fatal(err)
- }
- }
-}
-
-func BenchmarkWrite5RowsViaInsert(b *testing.B) {
- benchmarkWriteNRowsViaInsert(b, 5)
-}
-
-func BenchmarkWrite5RowsViaMultiInsert(b *testing.B) {
- benchmarkWriteNRowsViaMultiInsert(b, 5)
-}
-
-func BenchmarkWrite5RowsViaCopy(b *testing.B) {
- benchmarkWriteNRowsViaCopy(b, 5)
-}
-
-func BenchmarkWrite10RowsViaInsert(b *testing.B) {
- benchmarkWriteNRowsViaInsert(b, 10)
-}
-
-func BenchmarkWrite10RowsViaMultiInsert(b *testing.B) {
- benchmarkWriteNRowsViaMultiInsert(b, 10)
-}
-
-func BenchmarkWrite10RowsViaCopy(b *testing.B) {
- benchmarkWriteNRowsViaCopy(b, 10)
-}
-
-func BenchmarkWrite100RowsViaInsert(b *testing.B) {
- benchmarkWriteNRowsViaInsert(b, 100)
-}
-
-func BenchmarkWrite100RowsViaMultiInsert(b *testing.B) {
- benchmarkWriteNRowsViaMultiInsert(b, 100)
-}
-
-func BenchmarkWrite100RowsViaCopy(b *testing.B) {
- benchmarkWriteNRowsViaCopy(b, 100)
-}
-
-func BenchmarkWrite1000RowsViaInsert(b *testing.B) {
- benchmarkWriteNRowsViaInsert(b, 1000)
-}
-
-func BenchmarkWrite1000RowsViaMultiInsert(b *testing.B) {
- benchmarkWriteNRowsViaMultiInsert(b, 1000)
-}
-
-func BenchmarkWrite1000RowsViaCopy(b *testing.B) {
- benchmarkWriteNRowsViaCopy(b, 1000)
-}
-
-func BenchmarkWrite10000RowsViaInsert(b *testing.B) {
- benchmarkWriteNRowsViaInsert(b, 10000)
-}
-
-func BenchmarkWrite10000RowsViaMultiInsert(b *testing.B) {
- benchmarkWriteNRowsViaMultiInsert(b, 10000)
-}
-
-func BenchmarkWrite10000RowsViaCopy(b *testing.B) {
- benchmarkWriteNRowsViaCopy(b, 10000)
-}
diff --git a/vendor/github.com/jackc/pgx/chunkreader/chunkreader.go b/vendor/github.com/jackc/pgx/chunkreader/chunkreader.go
new file mode 100644
index 0000000..f8d437b
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/chunkreader/chunkreader.go
@@ -0,0 +1,89 @@
+package chunkreader
+
+import (
+ "io"
+)
+
+type ChunkReader struct {
+ r io.Reader
+
+ buf []byte
+ rp, wp int // buf read position and write position
+
+ options Options
+}
+
+type Options struct {
+ MinBufLen int // Minimum buffer length
+}
+
+func NewChunkReader(r io.Reader) *ChunkReader {
+ cr, err := NewChunkReaderEx(r, Options{})
+ if err != nil {
+ panic("default options can't be bad")
+ }
+
+ return cr
+}
+
+func NewChunkReaderEx(r io.Reader, options Options) (*ChunkReader, error) {
+ if options.MinBufLen == 0 {
+ options.MinBufLen = 4096
+ }
+
+ return &ChunkReader{
+ r: r,
+ buf: make([]byte, options.MinBufLen),
+ options: options,
+ }, nil
+}
+
+// Next returns buf filled with the next n bytes. If an error occurs, buf will
+// be nil.
+func (r *ChunkReader) Next(n int) (buf []byte, err error) {
+ // n bytes already in buf
+ if (r.wp - r.rp) >= n {
+ buf = r.buf[r.rp : r.rp+n]
+ r.rp += n
+ return buf, err
+ }
+
+ // available space in buf is less than n
+ if len(r.buf) < n {
+ r.copyBufContents(r.newBuf(n))
+ }
+
+ // buf is large enough, but need to shift filled area to start to make enough contiguous space
+ minReadCount := n - (r.wp - r.rp)
+ if (len(r.buf) - r.wp) < minReadCount {
+ newBuf := r.newBuf(n)
+ r.copyBufContents(newBuf)
+ }
+
+ if err := r.appendAtLeast(minReadCount); err != nil {
+ return nil, err
+ }
+
+ buf = r.buf[r.rp : r.rp+n]
+ r.rp += n
+ return buf, nil
+}
+
+func (r *ChunkReader) appendAtLeast(fillLen int) error {
+ n, err := io.ReadAtLeast(r.r, r.buf[r.wp:], fillLen)
+ r.wp += n
+ return err
+}
+
+func (r *ChunkReader) newBuf(size int) []byte {
+ if size < r.options.MinBufLen {
+ size = r.options.MinBufLen
+ }
+ return make([]byte, size)
+}
+
+func (r *ChunkReader) copyBufContents(dest []byte) {
+ r.wp = copy(dest, r.buf[r.rp:r.wp])
+ r.rp = 0
+ r.buf = dest
+}
diff --git a/vendor/github.com/jackc/pgx/conn.go b/vendor/github.com/jackc/pgx/conn.go
index a2d60e7..125d903 100644
--- a/vendor/github.com/jackc/pgx/conn.go
+++ b/vendor/github.com/jackc/pgx/conn.go
@@ -1,12 +1,11 @@
package pgx
import (
- "bufio"
+ "context"
"crypto/md5"
"crypto/tls"
"encoding/binary"
"encoding/hex"
- "errors"
"fmt"
"io"
"net"
@@ -17,9 +16,46 @@ import (
"regexp"
"strconv"
"strings"
+ "sync"
+ "sync/atomic"
"time"
+
+ "github.com/pkg/errors"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/jackc/pgx/pgproto3"
+ "github.com/jackc/pgx/pgtype"
+)
+
+const (
+ connStatusUninitialized = iota
+ connStatusClosed
+ connStatusIdle
+ connStatusBusy
)
+// minimalConnInfo has just enough static type information to establish the
+// connection and retrieve the type data.
+var minimalConnInfo *pgtype.ConnInfo
+
+func init() {
+ minimalConnInfo = pgtype.NewConnInfo()
+ minimalConnInfo.InitializeDataTypes(map[string]pgtype.OID{
+ "int4": pgtype.Int4OID,
+ "name": pgtype.NameOID,
+ "oid": pgtype.OIDOID,
+ "text": pgtype.TextOID,
+ "varchar": pgtype.VarcharOID,
+ })
+}
+
+// NoticeHandler is a function that can handle notices received from the
+// PostgreSQL server. Notices can be received at any time, usually during
+// handling of a query response. The *Conn is provided so the handler is aware
+// of the origin of the notice, but it must not invoke any query method. Be
+// aware that this is distinct from LISTEN/NOTIFY notification.
+type NoticeHandler func(*Conn, *Notice)
+
// DialFunc is a function that can be used to connect to a PostgreSQL server
type DialFunc func(network, addr string) (net.Conn, error)
@@ -36,38 +72,76 @@ type ConnConfig struct {
Logger Logger
LogLevel int
Dial DialFunc
- RuntimeParams map[string]string // Run-time parameters to set on connection as session default values (e.g. search_path or application_name)
+ RuntimeParams map[string]string // Run-time parameters to set on connection as session default values (e.g. search_path or application_name)
+ OnNotice NoticeHandler // Callback function called when a notice response is received.
+ CustomConnInfo func(*Conn) (*pgtype.ConnInfo, error) // Callback function to implement connection strategies for different backends. crate, pgbouncer, pgpool, etc.
+
+ // PreferSimpleProtocol disables implicit prepared statement usage. By default
+ // pgx automatically uses the unnamed prepared statement for Query and
+ // QueryRow. It also uses a prepared statement when Exec has arguments. This
+ // can improve performance due to being able to use the binary format. It also
+ // does not rely on client side parameter sanitization. However, it does incur
+ // two round-trips per query and may be incompatible proxies such as
+ // PGBouncer. Setting PreferSimpleProtocol causes the simple protocol to be
+ // used by default. The same functionality can be controlled on a per query
+ // basis by setting QueryExOptions.SimpleProtocol.
+ PreferSimpleProtocol bool
+}
+
+func (cc *ConnConfig) networkAddress() (network, address string) {
+ network = "tcp"
+ address = fmt.Sprintf("%s:%d", cc.Host, cc.Port)
+ // See if host is a valid path, if yes connect with a socket
+ if _, err := os.Stat(cc.Host); err == nil {
+ // For backward compatibility accept socket file paths -- but directories are now preferred
+ network = "unix"
+ address = cc.Host
+ if !strings.Contains(address, "/.s.PGSQL.") {
+ address = filepath.Join(address, ".s.PGSQL.") + strconv.FormatInt(int64(cc.Port), 10)
+ }
+ }
+
+ return network, address
}
// Conn is a PostgreSQL connection handle. It is not safe for concurrent usage.
// Use ConnPool to manage access to multiple database connections from multiple
// goroutines.
type Conn struct {
- conn net.Conn // the underlying TCP or unix domain socket connection
- lastActivityTime time.Time // the last time the connection was used
- reader *bufio.Reader // buffered reader to improve read performance
- wbuf [1024]byte
- writeBuf WriteBuf
- Pid int32 // backend pid
- SecretKey int32 // key to use to send a cancel query message to the server
+ conn net.Conn // the underlying TCP or unix domain socket connection
+ lastActivityTime time.Time // the last time the connection was used
+ wbuf []byte
+ pid uint32 // backend pid
+ secretKey uint32 // key to use to send a cancel query message to the server
RuntimeParams map[string]string // parameters that have been reported by the server
- PgTypes map[Oid]PgType // oids to PgTypes
config ConnConfig // config used when establishing this connection
- TxStatus byte
+ txStatus byte
preparedStatements map[string]*PreparedStatement
channels map[string]struct{}
notifications []*Notification
- alive bool
- causeOfDeath error
logger Logger
logLevel int
- mr msgReader
fp *fastpath
- pgsqlAfInet *byte
- pgsqlAfInet6 *byte
- busy bool
poolResetCount int
preallocatedRows []Rows
+ onNotice NoticeHandler
+
+ mux sync.Mutex
+ status byte // One of connStatus* constants
+ causeOfDeath error
+
+ pendingReadyForQueryCount int // numer of ReadyForQuery messages expected
+ cancelQueryInProgress int32
+ cancelQueryCompleted chan struct{}
+
+ // context support
+ ctxInProgress bool
+ doneChan chan struct{}
+ closedChan chan error
+
+ ConnInfo *pgtype.ConnInfo
+
+ frontend *pgproto3.Frontend
}
// PreparedStatement is a description of a prepared statement
@@ -75,27 +149,21 @@ type PreparedStatement struct {
Name string
SQL string
FieldDescriptions []FieldDescription
- ParameterOids []Oid
+ ParameterOIDs []pgtype.OID
}
// PrepareExOptions is an option struct that can be passed to PrepareEx
type PrepareExOptions struct {
- ParameterOids []Oid
+ ParameterOIDs []pgtype.OID
}
// Notification is a message received from the PostgreSQL LISTEN/NOTIFY system
type Notification struct {
- Pid int32 // backend pid that sent the notification
+ PID uint32 // backend pid that sent the notification
Channel string // channel from which notification was received
Payload string
}
-// PgType is information about PostgreSQL type and how to encode and decode it
-type PgType struct {
- Name string // name of type e.g. int4, text, date
- DefaultFormat int16 // default format (text or binary) this type will be requested in
-}
-
// CommandTag is the result of an Exec function
type CommandTag string
@@ -127,9 +195,6 @@ func (ident Identifier) Sanitize() string {
// ErrNoRows occurs when rows are expected but none are returned.
var ErrNoRows = errors.New("no rows in result set")
-// ErrNotificationTimeout occurs when WaitForNotification times out.
-var ErrNotificationTimeout = errors.New("notification timeout")
-
// ErrDeadConn occurs on an attempt to use a dead connection
var ErrDeadConn = errors.New("conn is dead")
@@ -138,7 +203,7 @@ var ErrDeadConn = errors.New("conn is dead")
var ErrTLSRefused = errors.New("server refused TLS connection")
// ErrConnBusy occurs when the connection is busy (for example, in the middle of
-// reading query results) and another action is attempts.
+// reading query results) and another action is attempted.
var ErrConnBusy = errors.New("conn is busy")
// ErrInvalidLogLevel occurs on attempt to set an invalid log level.
@@ -155,29 +220,18 @@ func (e ProtocolError) Error() string {
// config.Host must be specified. config.User will default to the OS user name.
// Other config fields are optional.
func Connect(config ConnConfig) (c *Conn, err error) {
- return connect(config, nil, nil, nil)
+ return connect(config, minimalConnInfo)
}
-func connect(config ConnConfig, pgTypes map[Oid]PgType, pgsqlAfInet *byte, pgsqlAfInet6 *byte) (c *Conn, err error) {
+func defaultDialer() *net.Dialer {
+ return &net.Dialer{KeepAlive: 5 * time.Minute}
+}
+
+func connect(config ConnConfig, connInfo *pgtype.ConnInfo) (c *Conn, err error) {
c = new(Conn)
c.config = config
-
- if pgTypes != nil {
- c.PgTypes = make(map[Oid]PgType, len(pgTypes))
- for k, v := range pgTypes {
- c.PgTypes[k] = v
- }
- }
-
- if pgsqlAfInet != nil {
- c.pgsqlAfInet = new(byte)
- *c.pgsqlAfInet = *pgsqlAfInet
- }
- if pgsqlAfInet6 != nil {
- c.pgsqlAfInet6 = new(byte)
- *c.pgsqlAfInet6 = *pgsqlAfInet6
- }
+ c.ConnInfo = connInfo
if c.config.LogLevel != 0 {
c.logLevel = c.config.LogLevel
@@ -186,8 +240,6 @@ func connect(config ConnConfig, pgTypes map[Oid]PgType, pgsqlAfInet *byte, pgsql
c.logLevel = LogLevelDebug
}
c.logger = c.config.Logger
- c.mr.log = c.log
- c.mr.shouldLog = c.shouldLog
if c.config.User == "" {
user, err := user.Current()
@@ -196,46 +248,39 @@ func connect(config ConnConfig, pgTypes map[Oid]PgType, pgsqlAfInet *byte, pgsql
}
c.config.User = user.Username
if c.shouldLog(LogLevelDebug) {
- c.log(LogLevelDebug, "Using default connection config", "User", c.config.User)
+ c.log(LogLevelDebug, "Using default connection config", map[string]interface{}{"User": c.config.User})
}
}
if c.config.Port == 0 {
c.config.Port = 5432
if c.shouldLog(LogLevelDebug) {
- c.log(LogLevelDebug, "Using default connection config", "Port", c.config.Port)
+ c.log(LogLevelDebug, "Using default connection config", map[string]interface{}{"Port": c.config.Port})
}
}
- network := "tcp"
- address := fmt.Sprintf("%s:%d", c.config.Host, c.config.Port)
- // See if host is a valid path, if yes connect with a socket
- if _, err := os.Stat(c.config.Host); err == nil {
- // For backward compatibility accept socket file paths -- but directories are now preferred
- network = "unix"
- address = c.config.Host
- if !strings.Contains(address, "/.s.PGSQL.") {
- address = filepath.Join(address, ".s.PGSQL.") + strconv.FormatInt(int64(c.config.Port), 10)
- }
- }
+ c.onNotice = config.OnNotice
+
+ network, address := c.config.networkAddress()
if c.config.Dial == nil {
- c.config.Dial = (&net.Dialer{KeepAlive: 5 * time.Minute}).Dial
+ d := defaultDialer()
+ c.config.Dial = d.Dial
}
if c.shouldLog(LogLevelInfo) {
- c.log(LogLevelInfo, fmt.Sprintf("Dialing PostgreSQL server at %s address: %s", network, address))
+ c.log(LogLevelInfo, "Dialing PostgreSQL server", map[string]interface{}{"network": network, "address": address})
}
err = c.connect(config, network, address, config.TLSConfig)
if err != nil && config.UseFallbackTLS {
if c.shouldLog(LogLevelInfo) {
- c.log(LogLevelInfo, fmt.Sprintf("Connect with TLSConfig failed, trying FallbackTLSConfig: %v", err))
+ c.log(LogLevelInfo, "connect with TLSConfig failed, trying FallbackTLSConfig", map[string]interface{}{"err": err})
}
err = c.connect(config, network, address, config.FallbackTLSConfig)
}
if err != nil {
if c.shouldLog(LogLevelError) {
- c.log(LogLevelError, fmt.Sprintf("Connect failed: %v", err))
+ c.log(LogLevelError, "connect failed", map[string]interface{}{"err": err})
}
return nil, err
}
@@ -251,88 +296,95 @@ func (c *Conn) connect(config ConnConfig, network, address string, tlsConfig *tl
defer func() {
if c != nil && err != nil {
c.conn.Close()
- c.alive = false
+ c.mux.Lock()
+ c.status = connStatusClosed
+ c.mux.Unlock()
}
}()
c.RuntimeParams = make(map[string]string)
c.preparedStatements = make(map[string]*PreparedStatement)
c.channels = make(map[string]struct{})
- c.alive = true
c.lastActivityTime = time.Now()
+ c.cancelQueryCompleted = make(chan struct{}, 1)
+ c.doneChan = make(chan struct{})
+ c.closedChan = make(chan error)
+ c.wbuf = make([]byte, 0, 1024)
+
+ c.mux.Lock()
+ c.status = connStatusIdle
+ c.mux.Unlock()
if tlsConfig != nil {
if c.shouldLog(LogLevelDebug) {
- c.log(LogLevelDebug, "Starting TLS handshake")
+ c.log(LogLevelDebug, "starting TLS handshake", nil)
}
if err := c.startTLS(tlsConfig); err != nil {
return err
}
}
- c.reader = bufio.NewReader(c.conn)
- c.mr.reader = c.reader
+ c.frontend, err = pgproto3.NewFrontend(c.conn, c.conn)
+ if err != nil {
+ return err
+ }
- msg := newStartupMessage()
+ startupMsg := pgproto3.StartupMessage{
+ ProtocolVersion: pgproto3.ProtocolVersionNumber,
+ Parameters: make(map[string]string),
+ }
// Default to disabling TLS renegotiation.
//
// Go does not support (https://github.com/golang/go/issues/5742)
// PostgreSQL recommends disabling (http://www.postgresql.org/docs/9.4/static/runtime-config-connection.html#GUC-SSL-RENEGOTIATION-LIMIT)
if tlsConfig != nil {
- msg.options["ssl_renegotiation_limit"] = "0"
+ startupMsg.Parameters["ssl_renegotiation_limit"] = "0"
}
// Copy default run-time params
for k, v := range config.RuntimeParams {
- msg.options[k] = v
+ startupMsg.Parameters[k] = v
}
- msg.options["user"] = c.config.User
+ startupMsg.Parameters["user"] = c.config.User
if c.config.Database != "" {
- msg.options["database"] = c.config.Database
+ startupMsg.Parameters["database"] = c.config.Database
}
- if err = c.txStartupMessage(msg); err != nil {
+ if _, err := c.conn.Write(startupMsg.Encode(nil)); err != nil {
return err
}
+ c.pendingReadyForQueryCount = 1
+
for {
- var t byte
- var r *msgReader
- t, r, err = c.rxMsg()
+ msg, err := c.rxMsg()
if err != nil {
return err
}
- switch t {
- case backendKeyData:
- c.rxBackendKeyData(r)
- case authenticationX:
- if err = c.rxAuthenticationX(r); err != nil {
+ switch msg := msg.(type) {
+ case *pgproto3.BackendKeyData:
+ c.rxBackendKeyData(msg)
+ case *pgproto3.Authentication:
+ if err = c.rxAuthenticationX(msg); err != nil {
return err
}
- case readyForQuery:
- c.rxReadyForQuery(r)
+ case *pgproto3.ReadyForQuery:
+ c.rxReadyForQuery(msg)
if c.shouldLog(LogLevelInfo) {
- c.log(LogLevelInfo, "Connection established")
+ c.log(LogLevelInfo, "connection established", nil)
}
// Replication connections can't execute the queries to
// populate the c.PgTypes and c.pgsqlAfInet
- if _, ok := msg.options["replication"]; ok {
+ if _, ok := config.RuntimeParams["replication"]; ok {
return nil
}
- if c.PgTypes == nil {
- err = c.loadPgTypes()
- if err != nil {
- return err
- }
- }
-
- if c.pgsqlAfInet == nil || c.pgsqlAfInet6 == nil {
- err = c.loadInetConstants()
+ if c.ConnInfo == minimalConnInfo {
+ err = c.initConnInfo()
if err != nil {
return err
}
@@ -340,77 +392,276 @@ func (c *Conn) connect(config ConnConfig, network, address string, tlsConfig *tl
return nil
default:
- if err = c.processContextFreeMsg(t, r); err != nil {
+ if err = c.processContextFreeMsg(msg); err != nil {
return err
}
}
}
}
-func (c *Conn) loadPgTypes() error {
- rows, err := c.Query(`select t.oid, t.typname
+func initPostgresql(c *Conn) (*pgtype.ConnInfo, error) {
+ const (
+ namedOIDQuery = `select t.oid,
+ case when nsp.nspname in ('pg_catalog', 'public') then t.typname
+ else nsp.nspname||'.'||t.typname
+ end
from pg_type t
left join pg_type base_type on t.typelem=base_type.oid
+left join pg_namespace nsp on t.typnamespace=nsp.oid
where (
- t.typtype='b'
- and (base_type.oid is null or base_type.typtype='b')
+ t.typtype in('b', 'p', 'r', 'e')
+ and (base_type.oid is null or base_type.typtype in('b', 'p', 'r'))
+ )`
)
- or t.typname in('record');`)
+
+ nameOIDs, err := connInfoFromRows(c.Query(namedOIDQuery))
if err != nil {
- return err
+ return nil, err
}
- c.PgTypes = make(map[Oid]PgType, 128)
+ cinfo := pgtype.NewConnInfo()
+ cinfo.InitializeDataTypes(nameOIDs)
- for rows.Next() {
- var oid Oid
- var t PgType
+ if err = c.initConnInfoEnumArray(cinfo); err != nil {
+ return nil, err
+ }
+
+ return cinfo, nil
+}
+
+func (c *Conn) initConnInfo() (err error) {
+ var (
+ connInfo *pgtype.ConnInfo
+ )
+
+ if c.config.CustomConnInfo != nil {
+ if c.ConnInfo, err = c.config.CustomConnInfo(c); err != nil {
+ return err
+ }
- rows.Scan(&oid, &t.Name)
+ return nil
+ }
- // The zero value is text format so we ignore any types without a default type format
- t.DefaultFormat, _ = DefaultTypeFormats[t.Name]
+ if connInfo, err = initPostgresql(c); err == nil {
+ c.ConnInfo = connInfo
+ return err
+ }
- c.PgTypes[oid] = t
+ // Check if CrateDB specific approach might still allow us to connect.
+ if connInfo, err = c.crateDBTypesQuery(err); err == nil {
+ c.ConnInfo = connInfo
}
- return rows.Err()
+ return err
}
-// Family is needed for binary encoding of inet/cidr. The constant is based on
-// the server's definition of AF_INET. In theory, this could differ between
-// platforms, so request an IPv4 and an IPv6 inet and get the family from that.
-func (c *Conn) loadInetConstants() error {
- var ipv4, ipv6 []byte
-
- err := c.QueryRow("select '127.0.0.1'::inet, '1::'::inet").Scan(&ipv4, &ipv6)
+// initConnInfoEnumArray introspects for arrays of enums and registers a data type for them.
+func (c *Conn) initConnInfoEnumArray(cinfo *pgtype.ConnInfo) error {
+ nameOIDs := make(map[string]pgtype.OID, 16)
+ rows, err := c.Query(`select t.oid, t.typname
+from pg_type t
+ join pg_type base_type on t.typelem=base_type.oid
+where t.typtype = 'b'
+ and base_type.typtype = 'e'`)
if err != nil {
return err
}
- c.pgsqlAfInet = &ipv4[0]
- c.pgsqlAfInet6 = &ipv6[0]
+ for rows.Next() {
+ var oid pgtype.OID
+ var name pgtype.Text
+ if err := rows.Scan(&oid, &name); err != nil {
+ return err
+ }
+
+ nameOIDs[name.String] = oid
+ }
+
+ if rows.Err() != nil {
+ return rows.Err()
+ }
+
+ for name, oid := range nameOIDs {
+ cinfo.RegisterDataType(pgtype.DataType{
+ Value: &pgtype.EnumArray{},
+ Name: name,
+ OID: oid,
+ })
+ }
return nil
}
+// crateDBTypesQuery checks if the given err is likely to be the result of
+// CrateDB not implementing the pg_types table correctly. If yes, a CrateDB
+// specific query against pg_types is executed and its results are returned. If
+// not, the original error is returned.
+func (c *Conn) crateDBTypesQuery(err error) (*pgtype.ConnInfo, error) {
+ // CrateDB 2.1.6 is a database that implements the PostgreSQL wire protocol,
+ // but not perfectly. In particular, the pg_catalog schema containing the
+ // pg_type table is not visible by default and the pg_type.typtype column is
+ // not implemented. Therefor the query above currently returns the following
+ // error:
+ //
+ // pgx.PgError{Severity:"ERROR", Code:"XX000",
+ // Message:"TableUnknownException: Table 'test.pg_type' unknown",
+ // Detail:"", Hint:"", Position:0, InternalPosition:0, InternalQuery:"",
+ // Where:"", SchemaName:"", TableName:"", ColumnName:"", DataTypeName:"",
+ // ConstraintName:"", File:"Schemas.java", Line:99, Routine:"getTableInfo"}
+ //
+ // If CrateDB was to fix the pg_type table visbility in the future, we'd
+ // still get this error until typtype column is implemented:
+ //
+ // pgx.PgError{Severity:"ERROR", Code:"XX000",
+ // Message:"ColumnUnknownException: Column typtype unknown", Detail:"",
+ // Hint:"", Position:0, InternalPosition:0, InternalQuery:"", Where:"",
+ // SchemaName:"", TableName:"", ColumnName:"", DataTypeName:"",
+ // ConstraintName:"", File:"FullQualifiedNameFieldProvider.java", Line:132,
+ //
+ // Additionally CrateDB doesn't implement Postgres error codes [2], and
+ // instead always returns "XX000" (internal_error). The code below uses all
+ // of this knowledge as a heuristic to detect CrateDB. If CrateDB is
+ // detected, a CrateDB specific pg_type query is executed instead.
+ //
+ // The heuristic is designed to still work even if CrateDB fixes [2] or
+ // renames its internal exception names. If both are changed but pg_types
+ // isn't fixed, this code will need to be changed.
+ //
+ // There is also a small chance the heuristic will yield a false positive for
+ // non-CrateDB databases (e.g. if a real Postgres instance returns a XX000
+ // error), but hopefully there will be no harm in attempting the alternative
+ // query in this case.
+ //
+ // CrateDB also uses the type varchar for the typname column which required
+ // adding varchar to the minimalConnInfo init code.
+ //
+ // Also see the discussion here [3].
+ //
+ // [1] https://crate.io/
+ // [2] https://github.com/crate/crate/issues/5027
+ // [3] https://github.com/jackc/pgx/issues/320
+
+ if pgErr, ok := err.(PgError); ok &&
+ (pgErr.Code == "XX000" ||
+ strings.Contains(pgErr.Message, "TableUnknownException") ||
+ strings.Contains(pgErr.Message, "ColumnUnknownException")) {
+ var (
+ nameOIDs map[string]pgtype.OID
+ )
+
+ if nameOIDs, err = connInfoFromRows(c.Query(`select oid, typname from pg_catalog.pg_type`)); err != nil {
+ return nil, err
+ }
+
+ cinfo := pgtype.NewConnInfo()
+ cinfo.InitializeDataTypes(nameOIDs)
+
+ return cinfo, err
+ }
+
+ return nil, err
+}
+
+// PID returns the backend PID for this connection.
+func (c *Conn) PID() uint32 {
+ return c.pid
+}
+
// Close closes a connection. It is safe to call Close on a already closed
// connection.
func (c *Conn) Close() (err error) {
- if !c.IsAlive() {
+ c.mux.Lock()
+ defer c.mux.Unlock()
+
+ if c.status < connStatusIdle {
return nil
}
+ c.status = connStatusClosed
- wbuf := newWriteBuf(c, 'X')
- wbuf.closeMsg()
+ defer func() {
+ c.conn.Close()
+ c.causeOfDeath = errors.New("Closed")
+ if c.shouldLog(LogLevelInfo) {
+ c.log(LogLevelInfo, "closed connection", nil)
+ }
+ }()
- _, err = c.conn.Write(wbuf.buf)
+ err = c.conn.SetDeadline(time.Time{})
+ if err != nil && c.shouldLog(LogLevelWarn) {
+ c.log(LogLevelWarn, "failed to clear deadlines to send close message", map[string]interface{}{"err": err})
+ return err
+ }
- c.die(errors.New("Closed"))
- if c.shouldLog(LogLevelInfo) {
- c.log(LogLevelInfo, "Closed connection")
+ _, err = c.conn.Write([]byte{'X', 0, 0, 0, 4})
+ if err != nil && c.shouldLog(LogLevelWarn) {
+ c.log(LogLevelWarn, "failed to send terminate message", map[string]interface{}{"err": err})
+ return err
}
- return err
+
+ err = c.conn.SetReadDeadline(time.Now().Add(5 * time.Second))
+ if err != nil && c.shouldLog(LogLevelWarn) {
+ c.log(LogLevelWarn, "failed to set read deadline to finish closing", map[string]interface{}{"err": err})
+ return err
+ }
+
+ _, err = c.conn.Read(make([]byte, 1))
+ if err != io.EOF {
+ return err
+ }
+
+ return nil
+}
+
+// Merge returns a new ConnConfig with the attributes of old and other
+// combined. When an attribute is set on both, other takes precedence.
+//
+// As a security precaution, if the other TLSConfig is nil, all old TLS
+// attributes will be preserved.
+func (old ConnConfig) Merge(other ConnConfig) ConnConfig {
+ cc := old
+
+ if other.Host != "" {
+ cc.Host = other.Host
+ }
+ if other.Port != 0 {
+ cc.Port = other.Port
+ }
+ if other.Database != "" {
+ cc.Database = other.Database
+ }
+ if other.User != "" {
+ cc.User = other.User
+ }
+ if other.Password != "" {
+ cc.Password = other.Password
+ }
+
+ if other.TLSConfig != nil {
+ cc.TLSConfig = other.TLSConfig
+ cc.UseFallbackTLS = other.UseFallbackTLS
+ cc.FallbackTLSConfig = other.FallbackTLSConfig
+ }
+
+ if other.Logger != nil {
+ cc.Logger = other.Logger
+ }
+ if other.LogLevel != 0 {
+ cc.LogLevel = other.LogLevel
+ }
+
+ if other.Dial != nil {
+ cc.Dial = other.Dial
+ }
+
+ cc.RuntimeParams = make(map[string]string)
+ for k, v := range old.RuntimeParams {
+ cc.RuntimeParams[k] = v
+ }
+ for k, v := range other.RuntimeParams {
+ cc.RuntimeParams[k] = v
+ }
+
+ return cc
}
// ParseURI parses a database URI into ConnConfig
@@ -440,13 +691,24 @@ func ParseURI(uri string) (ConnConfig, error) {
}
cp.Database = strings.TrimLeft(url.Path, "/")
+ if pgtimeout := url.Query().Get("connect_timeout"); pgtimeout != "" {
+ timeout, err := strconv.ParseInt(pgtimeout, 10, 64)
+ if err != nil {
+ return cp, err
+ }
+ d := defaultDialer()
+ d.Timeout = time.Duration(timeout) * time.Second
+ cp.Dial = d.Dial
+ }
+
err = configSSL(url.Query().Get("sslmode"), &cp)
if err != nil {
return cp, err
}
ignoreKeys := map[string]struct{}{
- "sslmode": {},
+ "sslmode": {},
+ "connect_timeout": {},
}
cp.RuntimeParams = make(map[string]string)
@@ -504,6 +766,14 @@ func ParseDSN(s string) (ConnConfig, error) {
cp.Database = b[2]
case "sslmode":
sslmode = b[2]
+ case "connect_timeout":
+ timeout, err := strconv.ParseInt(b[2], 10, 64)
+ if err != nil {
+ return cp, err
+ }
+ d := defaultDialer()
+ d.Timeout = time.Duration(timeout) * time.Second
+ cp.Dial = d.Dial
default:
cp.RuntimeParams[b[1]] = b[2]
}
@@ -541,6 +811,7 @@ func ParseConnectionString(s string) (ConnConfig, error) {
// PGPASSWORD
// PGSSLMODE
// PGAPPNAME
+// PGCONNECT_TIMEOUT
//
// Important TLS Security Notes:
// ParseEnvLibpq tries to match libpq behavior with regard to PGSSLMODE. This
@@ -549,10 +820,10 @@ func ParseConnectionString(s string) (ConnConfig, error) {
// See http://www.postgresql.org/docs/9.4/static/libpq-ssl.html#LIBPQ-SSL-PROTECTION
// for details on what level of security each sslmode provides.
//
-// "require" and "verify-ca" modes currently are treated as "verify-full". e.g.
-// They have stronger security guarantees than they would with libpq. Do not
-// rely on this behavior as it may be possible to match libpq in the future. If
-// you need full security use "verify-full".
+// "verify-ca" mode currently is treated as "verify-full". e.g. It has stronger
+// security guarantees than it would with libpq. Do not rely on this behavior as it
+// may be possible to match libpq in the future. If you need full security use
+// "verify-full".
//
// Several of the PGSSLMODE options (including the default behavior of "prefer")
// will set UseFallbackTLS to true and FallbackTLSConfig to a disabled or
@@ -576,6 +847,16 @@ func ParseEnvLibpq() (ConnConfig, error) {
cc.User = os.Getenv("PGUSER")
cc.Password = os.Getenv("PGPASSWORD")
+ if pgtimeout := os.Getenv("PGCONNECT_TIMEOUT"); pgtimeout != "" {
+ if timeout, err := strconv.ParseInt(pgtimeout, 10, 64); err == nil {
+ d := defaultDialer()
+ d.Timeout = time.Duration(timeout) * time.Second
+ cc.Dial = d.Dial
+ } else {
+ return cc, err
+ }
+ }
+
sslmode := os.Getenv("PGSSLMODE")
err := configSSL(sslmode, &cc)
@@ -608,7 +889,9 @@ func configSSL(sslmode string, cc *ConnConfig) error {
cc.TLSConfig = &tls.Config{InsecureSkipVerify: true}
cc.UseFallbackTLS = true
cc.FallbackTLSConfig = nil
- case "require", "verify-ca", "verify-full":
+ case "require":
+ cc.TLSConfig = &tls.Config{InsecureSkipVerify: true}
+ case "verify-ca", "verify-full":
cc.TLSConfig = &tls.Config{
ServerName: cc.Host,
}
@@ -626,7 +909,7 @@ func configSSL(sslmode string, cc *ConnConfig) error {
// name and sql arguments. This allows a code path to Prepare and Query/Exec without
// concern for if the statement has already been prepared.
func (c *Conn) Prepare(name, sql string) (ps *PreparedStatement, err error) {
- return c.PrepareEx(name, sql, nil)
+ return c.PrepareEx(context.Background(), name, sql, nil)
}
// PrepareEx creates a prepared statement with name and sql. sql can contain placeholders
@@ -636,83 +919,95 @@ func (c *Conn) Prepare(name, sql string) (ps *PreparedStatement, err error) {
// PrepareEx is idempotent; i.e. it is safe to call PrepareEx multiple times with the same
// name and sql arguments. This allows a code path to PrepareEx and Query/Exec without
// concern for if the statement has already been prepared.
-func (c *Conn) PrepareEx(name, sql string, opts *PrepareExOptions) (ps *PreparedStatement, err error) {
+func (c *Conn) PrepareEx(ctx context.Context, name, sql string, opts *PrepareExOptions) (ps *PreparedStatement, err error) {
+ err = c.waitForPreviousCancelQuery(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ err = c.initContext(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ ps, err = c.prepareEx(name, sql, opts)
+ err = c.termContext(err)
+ return ps, err
+}
+
+func (c *Conn) prepareEx(name, sql string, opts *PrepareExOptions) (ps *PreparedStatement, err error) {
if name != "" {
if ps, ok := c.preparedStatements[name]; ok && ps.SQL == sql {
return ps, nil
}
}
+ if err := c.ensureConnectionReadyForQuery(); err != nil {
+ return nil, err
+ }
+
if c.shouldLog(LogLevelError) {
defer func() {
if err != nil {
- c.log(LogLevelError, fmt.Sprintf("Prepare `%s` as `%s` failed: %v", name, sql, err))
+ c.log(LogLevelError, "prepareEx failed", map[string]interface{}{"err": err, "name": name, "sql": sql})
}
}()
}
- // parse
- wbuf := newWriteBuf(c, 'P')
- wbuf.WriteCString(name)
- wbuf.WriteCString(sql)
-
- if opts != nil {
- if len(opts.ParameterOids) > 65535 {
- return nil, fmt.Errorf("Number of PrepareExOptions ParameterOids must be between 0 and 65535, received %d", len(opts.ParameterOids))
- }
- wbuf.WriteInt16(int16(len(opts.ParameterOids)))
- for _, oid := range opts.ParameterOids {
- wbuf.WriteInt32(int32(oid))
- }
- } else {
- wbuf.WriteInt16(0)
+ if opts == nil {
+ opts = &PrepareExOptions{}
}
- // describe
- wbuf.startMsg('D')
- wbuf.WriteByte('S')
- wbuf.WriteCString(name)
+ if len(opts.ParameterOIDs) > 65535 {
+ return nil, errors.Errorf("Number of PrepareExOptions ParameterOIDs must be between 0 and 65535, received %d", len(opts.ParameterOIDs))
+ }
- // sync
- wbuf.startMsg('S')
- wbuf.closeMsg()
+ buf := appendParse(c.wbuf, name, sql, opts.ParameterOIDs)
+ buf = appendDescribe(buf, 'S', name)
+ buf = appendSync(buf)
- _, err = c.conn.Write(wbuf.buf)
+ n, err := c.conn.Write(buf)
if err != nil {
- c.die(err)
+ if fatalWriteErr(n, err) {
+ c.die(err)
+ }
return nil, err
}
+ c.pendingReadyForQueryCount++
ps = &PreparedStatement{Name: name, SQL: sql}
var softErr error
for {
- var t byte
- var r *msgReader
- t, r, err := c.rxMsg()
+ msg, err := c.rxMsg()
if err != nil {
return nil, err
}
- switch t {
- case parseComplete:
- case parameterDescription:
- ps.ParameterOids = c.rxParameterDescription(r)
+ switch msg := msg.(type) {
+ case *pgproto3.ParameterDescription:
+ ps.ParameterOIDs = c.rxParameterDescription(msg)
- if len(ps.ParameterOids) > 65535 && softErr == nil {
- softErr = fmt.Errorf("PostgreSQL supports maximum of 65535 parameters, received %d", len(ps.ParameterOids))
+ if len(ps.ParameterOIDs) > 65535 && softErr == nil {
+ softErr = errors.Errorf("PostgreSQL supports maximum of 65535 parameters, received %d", len(ps.ParameterOIDs))
}
- case rowDescription:
- ps.FieldDescriptions = c.rxRowDescription(r)
+ case *pgproto3.RowDescription:
+ ps.FieldDescriptions = c.rxRowDescription(msg)
for i := range ps.FieldDescriptions {
- t, _ := c.PgTypes[ps.FieldDescriptions[i].DataType]
- ps.FieldDescriptions[i].DataTypeName = t.Name
- ps.FieldDescriptions[i].FormatCode = t.DefaultFormat
+ if dt, ok := c.ConnInfo.DataTypeForOID(ps.FieldDescriptions[i].DataType); ok {
+ ps.FieldDescriptions[i].DataTypeName = dt.Name
+ if _, ok := dt.Value.(pgtype.BinaryDecoder); ok {
+ ps.FieldDescriptions[i].FormatCode = BinaryFormatCode
+ } else {
+ ps.FieldDescriptions[i].FormatCode = TextFormatCode
+ }
+ } else {
+ return nil, errors.Errorf("unknown oid: %d", ps.FieldDescriptions[i].DataType)
+ }
}
- case noData:
- case readyForQuery:
- c.rxReadyForQuery(r)
+ case *pgproto3.ReadyForQuery:
+ c.rxReadyForQuery(msg)
if softErr == nil {
c.preparedStatements[name] = ps
@@ -720,7 +1015,7 @@ func (c *Conn) PrepareEx(name, sql string, opts *PrepareExOptions) (ps *Prepared
return ps, softErr
default:
- if e := c.processContextFreeMsg(t, r); e != nil && softErr == nil {
+ if e := c.processContextFreeMsg(msg); e != nil && softErr == nil {
softErr = e
}
}
@@ -728,37 +1023,62 @@ func (c *Conn) PrepareEx(name, sql string, opts *PrepareExOptions) (ps *Prepared
}
// Deallocate released a prepared statement
-func (c *Conn) Deallocate(name string) (err error) {
+func (c *Conn) Deallocate(name string) error {
+ return c.deallocateContext(context.Background(), name)
+}
+
+// TODO - consider making this public
+func (c *Conn) deallocateContext(ctx context.Context, name string) (err error) {
+ err = c.waitForPreviousCancelQuery(ctx)
+ if err != nil {
+ return err
+ }
+
+ err = c.initContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ err = c.termContext(err)
+ }()
+
+ if err := c.ensureConnectionReadyForQuery(); err != nil {
+ return err
+ }
+
delete(c.preparedStatements, name)
// close
- wbuf := newWriteBuf(c, 'C')
- wbuf.WriteByte('S')
- wbuf.WriteCString(name)
+ buf := c.wbuf
+ buf = append(buf, 'C')
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+ buf = append(buf, 'S')
+ buf = append(buf, name...)
+ buf = append(buf, 0)
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])))
// flush
- wbuf.startMsg('H')
- wbuf.closeMsg()
+ buf = append(buf, 'H')
+ buf = pgio.AppendInt32(buf, 4)
- _, err = c.conn.Write(wbuf.buf)
+ _, err = c.conn.Write(buf)
if err != nil {
c.die(err)
return err
}
for {
- var t byte
- var r *msgReader
- t, r, err := c.rxMsg()
+ msg, err := c.rxMsg()
if err != nil {
return err
}
- switch t {
- case closeComplete:
+ switch msg.(type) {
+ case *pgproto3.CloseComplete:
return nil
default:
- err = c.processContextFreeMsg(t, r)
+ err = c.processContextFreeMsg(msg)
if err != nil {
return err
}
@@ -789,9 +1109,8 @@ func (c *Conn) Unlisten(channel string) error {
return nil
}
-// WaitForNotification waits for a PostgreSQL notification for up to timeout.
-// If the timeout occurs it returns pgx.ErrNotificationTimeout
-func (c *Conn) WaitForNotification(timeout time.Duration) (*Notification, error) {
+// WaitForNotification waits for a PostgreSQL notification.
+func (c *Conn) WaitForNotification(ctx context.Context) (notification *Notification, err error) {
// Return already received notification immediately
if len(c.notifications) > 0 {
notification := c.notifications[0]
@@ -799,86 +1118,40 @@ func (c *Conn) WaitForNotification(timeout time.Duration) (*Notification, error)
return notification, nil
}
- stopTime := time.Now().Add(timeout)
-
- for {
- now := time.Now()
-
- if now.After(stopTime) {
- return nil, ErrNotificationTimeout
- }
-
- // If there has been no activity on this connection for a while send a nop message just to ensure
- // the connection is alive
- nextEnsureAliveTime := c.lastActivityTime.Add(15 * time.Second)
- if nextEnsureAliveTime.Before(now) {
- // If the server can't respond to a nop in 15 seconds, assume it's dead
- err := c.conn.SetReadDeadline(now.Add(15 * time.Second))
- if err != nil {
- return nil, err
- }
-
- _, err = c.Exec("--;")
- if err != nil {
- return nil, err
- }
+ err = c.waitForPreviousCancelQuery(ctx)
+ if err != nil {
+ return nil, err
+ }
- c.lastActivityTime = now
- }
+ err = c.initContext(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ err = c.termContext(err)
+ }()
- var deadline time.Time
- if stopTime.Before(nextEnsureAliveTime) {
- deadline = stopTime
- } else {
- deadline = nextEnsureAliveTime
+ if err = c.lock(); err != nil {
+ return nil, err
+ }
+ defer func() {
+ if unlockErr := c.unlock(); unlockErr != nil && err == nil {
+ err = unlockErr
}
+ }()
- notification, err := c.waitForNotification(deadline)
- if err != ErrNotificationTimeout {
- return notification, err
- }
+ if err := c.ensureConnectionReadyForQuery(); err != nil {
+ return nil, err
}
-}
-
-func (c *Conn) waitForNotification(deadline time.Time) (*Notification, error) {
- var zeroTime time.Time
for {
- // Use SetReadDeadline to implement the timeout. SetReadDeadline will
- // cause operations to fail with a *net.OpError that has a Timeout()
- // of true. Because the normal pgx rxMsg path considers any error to
- // have potentially corrupted the state of the connection, it dies
- // on any errors. So to avoid timeout errors in rxMsg we set the
- // deadline and peek into the reader. If a timeout error occurs there
- // we don't break the pgx connection. If the Peek returns that data
- // is available then we turn off the read deadline before the rxMsg.
- err := c.conn.SetReadDeadline(deadline)
+ msg, err := c.rxMsg()
if err != nil {
return nil, err
}
- // Wait until there is a byte available before continuing onto the normal msg reading path
- _, err = c.reader.Peek(1)
+ err = c.processContextFreeMsg(msg)
if err != nil {
- c.conn.SetReadDeadline(zeroTime) // we can only return one error and we already have one -- so ignore possiple error from SetReadDeadline
- if err, ok := err.(*net.OpError); ok && err.Timeout() {
- return nil, ErrNotificationTimeout
- }
- return nil, err
- }
-
- err = c.conn.SetReadDeadline(zeroTime)
- if err != nil {
- return nil, err
- }
-
- var t byte
- var r *msgReader
- if t, r, err = c.rxMsg(); err == nil {
- if err = c.processContextFreeMsg(t, r); err != nil {
- return nil, err
- }
- } else {
return nil, err
}
@@ -891,10 +1164,14 @@ func (c *Conn) waitForNotification(deadline time.Time) (*Notification, error) {
}
func (c *Conn) IsAlive() bool {
- return c.alive
+ c.mux.Lock()
+ defer c.mux.Unlock()
+ return c.status >= connStatusIdle
}
func (c *Conn) CauseOfDeath() error {
+ c.mux.Lock()
+ defer c.mux.Unlock()
return c.causeOfDeath
}
@@ -906,17 +1183,19 @@ func (c *Conn) sendQuery(sql string, arguments ...interface{}) (err error) {
}
func (c *Conn) sendSimpleQuery(sql string, args ...interface{}) error {
+ if err := c.ensureConnectionReadyForQuery(); err != nil {
+ return err
+ }
if len(args) == 0 {
- wbuf := newWriteBuf(c, 'Q')
- wbuf.WriteCString(sql)
- wbuf.closeMsg()
+ buf := appendQuery(c.wbuf, sql)
- _, err := c.conn.Write(wbuf.buf)
+ _, err := c.conn.Write(buf)
if err != nil {
c.die(err)
return err
}
+ c.pendingReadyForQueryCount++
return nil
}
@@ -930,168 +1209,105 @@ func (c *Conn) sendSimpleQuery(sql string, args ...interface{}) error {
}
func (c *Conn) sendPreparedQuery(ps *PreparedStatement, arguments ...interface{}) (err error) {
- if len(ps.ParameterOids) != len(arguments) {
- return fmt.Errorf("Prepared statement \"%v\" requires %d parameters, but %d were provided", ps.Name, len(ps.ParameterOids), len(arguments))
- }
-
- // bind
- wbuf := newWriteBuf(c, 'B')
- wbuf.WriteByte(0)
- wbuf.WriteCString(ps.Name)
-
- wbuf.WriteInt16(int16(len(ps.ParameterOids)))
- for i, oid := range ps.ParameterOids {
- switch arg := arguments[i].(type) {
- case Encoder:
- wbuf.WriteInt16(arg.FormatCode())
- case string, *string:
- wbuf.WriteInt16(TextFormatCode)
- default:
- switch oid {
- case BoolOid, ByteaOid, Int2Oid, Int4Oid, Int8Oid, Float4Oid, Float8Oid, TimestampTzOid, TimestampTzArrayOid, TimestampOid, TimestampArrayOid, DateOid, BoolArrayOid, ByteaArrayOid, Int2ArrayOid, Int4ArrayOid, Int8ArrayOid, Float4ArrayOid, Float8ArrayOid, TextArrayOid, VarcharArrayOid, OidOid, InetOid, CidrOid, InetArrayOid, CidrArrayOid, RecordOid, JsonOid, JsonbOid:
- wbuf.WriteInt16(BinaryFormatCode)
- default:
- wbuf.WriteInt16(TextFormatCode)
- }
- }
+ if len(ps.ParameterOIDs) != len(arguments) {
+ return errors.Errorf("Prepared statement \"%v\" requires %d parameters, but %d were provided", ps.Name, len(ps.ParameterOIDs), len(arguments))
}
- wbuf.WriteInt16(int16(len(arguments)))
- for i, oid := range ps.ParameterOids {
- if err := Encode(wbuf, oid, arguments[i]); err != nil {
- return err
- }
+ if err := c.ensureConnectionReadyForQuery(); err != nil {
+ return err
}
- wbuf.WriteInt16(int16(len(ps.FieldDescriptions)))
- for _, fd := range ps.FieldDescriptions {
- wbuf.WriteInt16(fd.FormatCode)
+ resultFormatCodes := make([]int16, len(ps.FieldDescriptions))
+ for i, fd := range ps.FieldDescriptions {
+ resultFormatCodes[i] = fd.FormatCode
}
-
- // execute
- wbuf.startMsg('E')
- wbuf.WriteByte(0)
- wbuf.WriteInt32(0)
-
- // sync
- wbuf.startMsg('S')
- wbuf.closeMsg()
-
- _, err = c.conn.Write(wbuf.buf)
+ buf, err := appendBind(c.wbuf, "", ps.Name, c.ConnInfo, ps.ParameterOIDs, arguments, resultFormatCodes)
if err != nil {
- c.die(err)
- }
-
- return err
-}
-
-// Exec executes sql. sql can be either a prepared statement name or an SQL string.
-// arguments should be referenced positionally from the sql string as $1, $2, etc.
-func (c *Conn) Exec(sql string, arguments ...interface{}) (commandTag CommandTag, err error) {
- if err = c.lock(); err != nil {
- return commandTag, err
+ return err
}
- startTime := time.Now()
- c.lastActivityTime = startTime
+ buf = appendExecute(buf, "", 0)
+ buf = appendSync(buf)
- defer func() {
- if err == nil {
- if c.shouldLog(LogLevelInfo) {
- endTime := time.Now()
- c.log(LogLevelInfo, "Exec", "sql", sql, "args", logQueryArgs(arguments), "time", endTime.Sub(startTime), "commandTag", commandTag)
- }
- } else {
- if c.shouldLog(LogLevelError) {
- c.log(LogLevelError, "Exec", "sql", sql, "args", logQueryArgs(arguments), "error", err)
- }
+ n, err := c.conn.Write(buf)
+ if err != nil {
+ if fatalWriteErr(n, err) {
+ c.die(err)
}
+ return err
+ }
+ c.pendingReadyForQueryCount++
- if unlockErr := c.unlock(); unlockErr != nil && err == nil {
- err = unlockErr
- }
- }()
+ return nil
+}
- if err = c.sendQuery(sql, arguments...); err != nil {
- return
+// fatalWriteError takes the response of a net.Conn.Write and determines if it is fatal
+func fatalWriteErr(bytesWritten int, err error) bool {
+ // Partial writes break the connection
+ if bytesWritten > 0 {
+ return true
}
- var softErr error
-
- for {
- var t byte
- var r *msgReader
- t, r, err = c.rxMsg()
- if err != nil {
- return commandTag, err
- }
+ netErr, is := err.(net.Error)
+ return !(is && netErr.Timeout())
+}
- switch t {
- case readyForQuery:
- c.rxReadyForQuery(r)
- return commandTag, softErr
- case rowDescription:
- case dataRow:
- case bindComplete:
- case commandComplete:
- commandTag = CommandTag(r.readCString())
- default:
- if e := c.processContextFreeMsg(t, r); e != nil && softErr == nil {
- softErr = e
- }
- }
- }
+// Exec executes sql. sql can be either a prepared statement name or an SQL string.
+// arguments should be referenced positionally from the sql string as $1, $2, etc.
+func (c *Conn) Exec(sql string, arguments ...interface{}) (commandTag CommandTag, err error) {
+ return c.ExecEx(context.Background(), sql, nil, arguments...)
}
// Processes messages that are not exclusive to one context such as
-// authentication or query response. The response to these messages
-// is the same regardless of when they occur.
-func (c *Conn) processContextFreeMsg(t byte, r *msgReader) (err error) {
- switch t {
- case 'S':
- c.rxParameterStatus(r)
- return nil
- case errorResponse:
- return c.rxErrorResponse(r)
- case noticeResponse:
- return nil
- case emptyQueryResponse:
- return nil
- case notificationResponse:
- c.rxNotificationResponse(r)
- return nil
- default:
- return fmt.Errorf("Received unknown message type: %c", t)
+// authentication or query response. The response to these messages is the same
+// regardless of when they occur. It also ignores messages that are only
+// meaningful in a given context. These messages can occur due to a context
+// deadline interrupting message processing. For example, an interrupted query
+// may have left DataRow messages on the wire.
+func (c *Conn) processContextFreeMsg(msg pgproto3.BackendMessage) (err error) {
+ switch msg := msg.(type) {
+ case *pgproto3.ErrorResponse:
+ return c.rxErrorResponse(msg)
+ case *pgproto3.NoticeResponse:
+ c.rxNoticeResponse(msg)
+ case *pgproto3.NotificationResponse:
+ c.rxNotificationResponse(msg)
+ case *pgproto3.ReadyForQuery:
+ c.rxReadyForQuery(msg)
+ case *pgproto3.ParameterStatus:
+ c.rxParameterStatus(msg)
}
+
+ return nil
}
-func (c *Conn) rxMsg() (t byte, r *msgReader, err error) {
- if !c.alive {
- return 0, nil, ErrDeadConn
+func (c *Conn) rxMsg() (pgproto3.BackendMessage, error) {
+ if !c.IsAlive() {
+ return nil, ErrDeadConn
}
- t, err = c.mr.rxMsg()
+ msg, err := c.frontend.Receive()
if err != nil {
- c.die(err)
+ if netErr, ok := err.(net.Error); !(ok && netErr.Timeout()) {
+ c.die(err)
+ }
+ return nil, err
}
c.lastActivityTime = time.Now()
- if c.shouldLog(LogLevelTrace) {
- c.log(LogLevelTrace, "rxMsg", "type", string(t), "msgBytesRemaining", c.mr.msgBytesRemaining)
- }
+ // fmt.Printf("rxMsg: %#v\n", msg)
- return t, &c.mr, err
+ return msg, nil
}
-func (c *Conn) rxAuthenticationX(r *msgReader) (err error) {
- switch r.readInt32() {
- case 0: // AuthenticationOk
- case 3: // AuthenticationCleartextPassword
+func (c *Conn) rxAuthenticationX(msg *pgproto3.Authentication) (err error) {
+ switch msg.Type {
+ case pgproto3.AuthTypeOk:
+ case pgproto3.AuthTypeCleartextPassword:
err = c.txPasswordMessage(c.config.Password)
- case 5: // AuthenticationMD5Password
- salt := r.readString(4)
- digestedPassword := "md5" + hexMD5(hexMD5(c.config.Password+c.config.User)+salt)
+ case pgproto3.AuthTypeMD5Password:
+ digestedPassword := "md5" + hexMD5(hexMD5(c.config.Password+c.config.User)+string(msg.Salt[:]))
err = c.txPasswordMessage(digestedPassword)
default:
err = errors.New("Received unknown authentication message")
@@ -1106,114 +1322,103 @@ func hexMD5(s string) string {
return hex.EncodeToString(hash.Sum(nil))
}
-func (c *Conn) rxParameterStatus(r *msgReader) {
- key := r.readCString()
- value := r.readCString()
- c.RuntimeParams[key] = value
+func (c *Conn) rxParameterStatus(msg *pgproto3.ParameterStatus) {
+ c.RuntimeParams[msg.Name] = msg.Value
}
-func (c *Conn) rxErrorResponse(r *msgReader) (err PgError) {
- for {
- switch r.readByte() {
- case 'S':
- err.Severity = r.readCString()
- case 'C':
- err.Code = r.readCString()
- case 'M':
- err.Message = r.readCString()
- case 'D':
- err.Detail = r.readCString()
- case 'H':
- err.Hint = r.readCString()
- case 'P':
- s := r.readCString()
- n, _ := strconv.ParseInt(s, 10, 32)
- err.Position = int32(n)
- case 'p':
- s := r.readCString()
- n, _ := strconv.ParseInt(s, 10, 32)
- err.InternalPosition = int32(n)
- case 'q':
- err.InternalQuery = r.readCString()
- case 'W':
- err.Where = r.readCString()
- case 's':
- err.SchemaName = r.readCString()
- case 't':
- err.TableName = r.readCString()
- case 'c':
- err.ColumnName = r.readCString()
- case 'd':
- err.DataTypeName = r.readCString()
- case 'n':
- err.ConstraintName = r.readCString()
- case 'F':
- err.File = r.readCString()
- case 'L':
- s := r.readCString()
- n, _ := strconv.ParseInt(s, 10, 32)
- err.Line = int32(n)
- case 'R':
- err.Routine = r.readCString()
-
- case 0: // End of error message
- if err.Severity == "FATAL" {
- c.die(err)
- }
- return
- default: // Ignore other error fields
- r.readCString()
- }
+func (c *Conn) rxErrorResponse(msg *pgproto3.ErrorResponse) PgError {
+ err := PgError{
+ Severity: msg.Severity,
+ Code: msg.Code,
+ Message: msg.Message,
+ Detail: msg.Detail,
+ Hint: msg.Hint,
+ Position: msg.Position,
+ InternalPosition: msg.InternalPosition,
+ InternalQuery: msg.InternalQuery,
+ Where: msg.Where,
+ SchemaName: msg.SchemaName,
+ TableName: msg.TableName,
+ ColumnName: msg.ColumnName,
+ DataTypeName: msg.DataTypeName,
+ ConstraintName: msg.ConstraintName,
+ File: msg.File,
+ Line: msg.Line,
+ Routine: msg.Routine,
+ }
+
+ if err.Severity == "FATAL" {
+ c.die(err)
}
-}
-func (c *Conn) rxBackendKeyData(r *msgReader) {
- c.Pid = r.readInt32()
- c.SecretKey = r.readInt32()
+ return err
}
-func (c *Conn) rxReadyForQuery(r *msgReader) {
- c.TxStatus = r.readByte()
+func (c *Conn) rxNoticeResponse(msg *pgproto3.NoticeResponse) {
+ if c.onNotice == nil {
+ return
+ }
+
+ notice := &Notice{
+ Severity: msg.Severity,
+ Code: msg.Code,
+ Message: msg.Message,
+ Detail: msg.Detail,
+ Hint: msg.Hint,
+ Position: msg.Position,
+ InternalPosition: msg.InternalPosition,
+ InternalQuery: msg.InternalQuery,
+ Where: msg.Where,
+ SchemaName: msg.SchemaName,
+ TableName: msg.TableName,
+ ColumnName: msg.ColumnName,
+ DataTypeName: msg.DataTypeName,
+ ConstraintName: msg.ConstraintName,
+ File: msg.File,
+ Line: msg.Line,
+ Routine: msg.Routine,
+ }
+
+ c.onNotice(c, notice)
}
-func (c *Conn) rxRowDescription(r *msgReader) (fields []FieldDescription) {
- fieldCount := r.readInt16()
- fields = make([]FieldDescription, fieldCount)
- for i := int16(0); i < fieldCount; i++ {
- f := &fields[i]
- f.Name = r.readCString()
- f.Table = r.readOid()
- f.AttributeNumber = r.readInt16()
- f.DataType = r.readOid()
- f.DataTypeSize = r.readInt16()
- f.Modifier = r.readInt32()
- f.FormatCode = r.readInt16()
- }
- return
+func (c *Conn) rxBackendKeyData(msg *pgproto3.BackendKeyData) {
+ c.pid = msg.ProcessID
+ c.secretKey = msg.SecretKey
}
-func (c *Conn) rxParameterDescription(r *msgReader) (parameters []Oid) {
- // Internally, PostgreSQL supports greater than 64k parameters to a prepared
- // statement. But the parameter description uses a 16-bit integer for the
- // count of parameters. If there are more than 64K parameters, this count is
- // wrong. So read the count, ignore it, and compute the proper value from
- // the size of the message.
- r.readInt16()
- parameterCount := r.msgBytesRemaining / 4
+func (c *Conn) rxReadyForQuery(msg *pgproto3.ReadyForQuery) {
+ c.pendingReadyForQueryCount--
+ c.txStatus = msg.TxStatus
+}
- parameters = make([]Oid, 0, parameterCount)
+func (c *Conn) rxRowDescription(msg *pgproto3.RowDescription) []FieldDescription {
+ fields := make([]FieldDescription, len(msg.Fields))
+ for i := 0; i < len(fields); i++ {
+ fields[i].Name = msg.Fields[i].Name
+ fields[i].Table = pgtype.OID(msg.Fields[i].TableOID)
+ fields[i].AttributeNumber = msg.Fields[i].TableAttributeNumber
+ fields[i].DataType = pgtype.OID(msg.Fields[i].DataTypeOID)
+ fields[i].DataTypeSize = msg.Fields[i].DataTypeSize
+ fields[i].Modifier = msg.Fields[i].TypeModifier
+ fields[i].FormatCode = msg.Fields[i].Format
+ }
+ return fields
+}
- for i := int32(0); i < parameterCount; i++ {
- parameters = append(parameters, r.readOid())
+func (c *Conn) rxParameterDescription(msg *pgproto3.ParameterDescription) []pgtype.OID {
+ parameters := make([]pgtype.OID, len(msg.ParameterOIDs))
+ for i := 0; i < len(parameters); i++ {
+ parameters[i] = pgtype.OID(msg.ParameterOIDs[i])
}
- return
+ return parameters
}
-func (c *Conn) rxNotificationResponse(r *msgReader) {
+func (c *Conn) rxNotificationResponse(msg *pgproto3.NotificationResponse) {
n := new(Notification)
- n.Pid = r.readInt32()
- n.Channel = r.readCString()
- n.Payload = r.readCString()
+ n.PID = msg.PID
+ n.Channel = msg.Channel
+ n.Payload = msg.Payload
c.notifications = append(c.notifications, n)
}
@@ -1237,40 +1442,54 @@ func (c *Conn) startTLS(tlsConfig *tls.Config) (err error) {
return nil
}
-func (c *Conn) txStartupMessage(msg *startupMessage) error {
- _, err := c.conn.Write(msg.Bytes())
- return err
-}
-
func (c *Conn) txPasswordMessage(password string) (err error) {
- wbuf := newWriteBuf(c, 'p')
- wbuf.WriteCString(password)
- wbuf.closeMsg()
+ buf := c.wbuf
+ buf = append(buf, 'p')
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+ buf = append(buf, password...)
+ buf = append(buf, 0)
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])))
- _, err = c.conn.Write(wbuf.buf)
+ _, err = c.conn.Write(buf)
return err
}
func (c *Conn) die(err error) {
- c.alive = false
+ c.mux.Lock()
+ defer c.mux.Unlock()
+
+ if c.status == connStatusClosed {
+ return
+ }
+
+ c.status = connStatusClosed
c.causeOfDeath = err
c.conn.Close()
}
func (c *Conn) lock() error {
- if c.busy {
+ c.mux.Lock()
+ defer c.mux.Unlock()
+
+ if c.status != connStatusIdle {
return ErrConnBusy
}
- c.busy = true
+
+ c.status = connStatusBusy
return nil
}
func (c *Conn) unlock() error {
- if !c.busy {
+ c.mux.Lock()
+ defer c.mux.Unlock()
+
+ if c.status != connStatusBusy {
return errors.New("unlock conn that is not busy")
}
- c.busy = false
+
+ c.status = connStatusIdle
return nil
}
@@ -1278,23 +1497,15 @@ func (c *Conn) shouldLog(lvl int) bool {
return c.logger != nil && c.logLevel >= lvl
}
-func (c *Conn) log(lvl int, msg string, ctx ...interface{}) {
- if c.Pid != 0 {
- ctx = append(ctx, "pid", c.Pid)
+func (c *Conn) log(lvl LogLevel, msg string, data map[string]interface{}) {
+ if data == nil {
+ data = map[string]interface{}{}
}
-
- switch lvl {
- case LogLevelTrace:
- c.logger.Debug(msg, ctx...)
- case LogLevelDebug:
- c.logger.Debug(msg, ctx...)
- case LogLevelInfo:
- c.logger.Info(msg, ctx...)
- case LogLevelWarn:
- c.logger.Warn(msg, ctx...)
- case LogLevelError:
- c.logger.Error(msg, ctx...)
+ if c.pid != 0 {
+ data["pid"] = c.pid
}
+
+ c.logger.Log(lvl, msg, data)
}
// SetLogger replaces the current logger and returns the previous logger.
@@ -1320,3 +1531,306 @@ func (c *Conn) SetLogLevel(lvl int) (int, error) {
func quoteIdentifier(s string) string {
return `"` + strings.Replace(s, `"`, `""`, -1) + `"`
}
+
+// cancelQuery sends a cancel request to the PostgreSQL server. It returns an
+// error if unable to deliver the cancel request, but lack of an error does not
+// ensure that the query was canceled. As specified in the documentation, there
+// is no way to be sure a query was canceled. See
+// https://www.postgresql.org/docs/current/static/protocol-flow.html#AEN112861
+func (c *Conn) cancelQuery() {
+ if !atomic.CompareAndSwapInt32(&c.cancelQueryInProgress, 0, 1) {
+ panic("cancelQuery when cancelQueryInProgress")
+ }
+
+ if err := c.conn.SetDeadline(time.Now()); err != nil {
+ c.Close() // Close connection if unable to set deadline
+ return
+ }
+
+ doCancel := func() error {
+ network, address := c.config.networkAddress()
+ cancelConn, err := c.config.Dial(network, address)
+ if err != nil {
+ return err
+ }
+ defer cancelConn.Close()
+
+ // If server doesn't process cancellation request in bounded time then abort.
+ err = cancelConn.SetDeadline(time.Now().Add(15 * time.Second))
+ if err != nil {
+ return err
+ }
+
+ buf := make([]byte, 16)
+ binary.BigEndian.PutUint32(buf[0:4], 16)
+ binary.BigEndian.PutUint32(buf[4:8], 80877102)
+ binary.BigEndian.PutUint32(buf[8:12], uint32(c.pid))
+ binary.BigEndian.PutUint32(buf[12:16], uint32(c.secretKey))
+ _, err = cancelConn.Write(buf)
+ if err != nil {
+ return err
+ }
+
+ _, err = cancelConn.Read(buf)
+ if err != io.EOF {
+ return errors.Errorf("Server failed to close connection after cancel query request: %v %v", err, buf)
+ }
+
+ return nil
+ }
+
+ go func() {
+ err := doCancel()
+ if err != nil {
+ c.Close() // Something is very wrong. Terminate the connection.
+ }
+ c.cancelQueryCompleted <- struct{}{}
+ }()
+}
+
+func (c *Conn) Ping(ctx context.Context) error {
+ _, err := c.ExecEx(ctx, ";", nil)
+ return err
+}
+
+func (c *Conn) ExecEx(ctx context.Context, sql string, options *QueryExOptions, arguments ...interface{}) (CommandTag, error) {
+ err := c.waitForPreviousCancelQuery(ctx)
+ if err != nil {
+ return "", err
+ }
+
+ if err := c.lock(); err != nil {
+ return "", err
+ }
+ defer c.unlock()
+
+ startTime := time.Now()
+ c.lastActivityTime = startTime
+
+ commandTag, err := c.execEx(ctx, sql, options, arguments...)
+ if err != nil {
+ if c.shouldLog(LogLevelError) {
+ c.log(LogLevelError, "Exec", map[string]interface{}{"sql": sql, "args": logQueryArgs(arguments), "err": err})
+ }
+ return commandTag, err
+ }
+
+ if c.shouldLog(LogLevelInfo) {
+ endTime := time.Now()
+ c.log(LogLevelInfo, "Exec", map[string]interface{}{"sql": sql, "args": logQueryArgs(arguments), "time": endTime.Sub(startTime), "commandTag": commandTag})
+ }
+
+ return commandTag, err
+}
+
+func (c *Conn) execEx(ctx context.Context, sql string, options *QueryExOptions, arguments ...interface{}) (commandTag CommandTag, err error) {
+ err = c.initContext(ctx)
+ if err != nil {
+ return "", err
+ }
+ defer func() {
+ err = c.termContext(err)
+ }()
+
+ if (options == nil && c.config.PreferSimpleProtocol) || (options != nil && options.SimpleProtocol) {
+ err = c.sanitizeAndSendSimpleQuery(sql, arguments...)
+ if err != nil {
+ return "", err
+ }
+ } else if options != nil && len(options.ParameterOIDs) > 0 {
+ if err := c.ensureConnectionReadyForQuery(); err != nil {
+ return "", err
+ }
+
+ buf, err := c.buildOneRoundTripExec(c.wbuf, sql, options, arguments)
+ if err != nil {
+ return "", err
+ }
+
+ buf = appendSync(buf)
+
+ n, err := c.conn.Write(buf)
+ if err != nil && fatalWriteErr(n, err) {
+ c.die(err)
+ return "", err
+ }
+ c.pendingReadyForQueryCount++
+ } else {
+ if len(arguments) > 0 {
+ ps, ok := c.preparedStatements[sql]
+ if !ok {
+ var err error
+ ps, err = c.prepareEx("", sql, nil)
+ if err != nil {
+ return "", err
+ }
+ }
+
+ err = c.sendPreparedQuery(ps, arguments...)
+ if err != nil {
+ return "", err
+ }
+ } else {
+ if err = c.sendQuery(sql, arguments...); err != nil {
+ return
+ }
+ }
+ }
+
+ var softErr error
+
+ for {
+ msg, err := c.rxMsg()
+ if err != nil {
+ return commandTag, err
+ }
+
+ switch msg := msg.(type) {
+ case *pgproto3.ReadyForQuery:
+ c.rxReadyForQuery(msg)
+ return commandTag, softErr
+ case *pgproto3.CommandComplete:
+ commandTag = CommandTag(msg.CommandTag)
+ default:
+ if e := c.processContextFreeMsg(msg); e != nil && softErr == nil {
+ softErr = e
+ }
+ }
+ }
+}
+
+func (c *Conn) buildOneRoundTripExec(buf []byte, sql string, options *QueryExOptions, arguments []interface{}) ([]byte, error) {
+ if len(arguments) != len(options.ParameterOIDs) {
+ return nil, errors.Errorf("mismatched number of arguments (%d) and options.ParameterOIDs (%d)", len(arguments), len(options.ParameterOIDs))
+ }
+
+ if len(options.ParameterOIDs) > 65535 {
+ return nil, errors.Errorf("Number of QueryExOptions ParameterOIDs must be between 0 and 65535, received %d", len(options.ParameterOIDs))
+ }
+
+ buf = appendParse(buf, "", sql, options.ParameterOIDs)
+ buf, err := appendBind(buf, "", "", c.ConnInfo, options.ParameterOIDs, arguments, nil)
+ if err != nil {
+ return nil, err
+ }
+ buf = appendExecute(buf, "", 0)
+
+ return buf, nil
+}
+
+func (c *Conn) initContext(ctx context.Context) error {
+ if c.ctxInProgress {
+ return errors.New("ctx already in progress")
+ }
+
+ if ctx.Done() == nil {
+ return nil
+ }
+
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ c.ctxInProgress = true
+
+ go c.contextHandler(ctx)
+
+ return nil
+}
+
+func (c *Conn) termContext(opErr error) error {
+ if !c.ctxInProgress {
+ return opErr
+ }
+
+ var err error
+
+ select {
+ case err = <-c.closedChan:
+ if opErr == nil {
+ err = nil
+ }
+ case c.doneChan <- struct{}{}:
+ err = opErr
+ }
+
+ c.ctxInProgress = false
+ return err
+}
+
+func (c *Conn) contextHandler(ctx context.Context) {
+ select {
+ case <-ctx.Done():
+ c.cancelQuery()
+ c.closedChan <- ctx.Err()
+ case <-c.doneChan:
+ }
+}
+
+func (c *Conn) waitForPreviousCancelQuery(ctx context.Context) error {
+ if atomic.LoadInt32(&c.cancelQueryInProgress) == 0 {
+ return nil
+ }
+
+ select {
+ case <-c.cancelQueryCompleted:
+ atomic.StoreInt32(&c.cancelQueryInProgress, 0)
+ if err := c.conn.SetDeadline(time.Time{}); err != nil {
+ c.Close() // Close connection if unable to disable deadline
+ return err
+ }
+ return nil
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+}
+
+func (c *Conn) ensureConnectionReadyForQuery() error {
+ for c.pendingReadyForQueryCount > 0 {
+ msg, err := c.rxMsg()
+ if err != nil {
+ return err
+ }
+
+ switch msg := msg.(type) {
+ case *pgproto3.ErrorResponse:
+ pgErr := c.rxErrorResponse(msg)
+ if pgErr.Severity == "FATAL" {
+ return pgErr
+ }
+ default:
+ err = c.processContextFreeMsg(msg)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func connInfoFromRows(rows *Rows, err error) (map[string]pgtype.OID, error) {
+ if err != nil {
+ return nil, err
+ }
+ defer rows.Close()
+
+ nameOIDs := make(map[string]pgtype.OID, 256)
+ for rows.Next() {
+ var oid pgtype.OID
+ var name pgtype.Text
+ if err = rows.Scan(&oid, &name); err != nil {
+ return nil, err
+ }
+
+ nameOIDs[name.String] = oid
+ }
+
+ if err = rows.Err(); err != nil {
+ return nil, err
+ }
+
+ return nameOIDs, err
+}
diff --git a/vendor/github.com/jackc/pgx/conn_config_test.go.example b/vendor/github.com/jackc/pgx/conn_config_test.go.example
index cac798b..463c084 100644
--- a/vendor/github.com/jackc/pgx/conn_config_test.go.example
+++ b/vendor/github.com/jackc/pgx/conn_config_test.go.example
@@ -15,6 +15,7 @@ var invalidUserConnConfig *pgx.ConnConfig = nil
var tlsConnConfig *pgx.ConnConfig = nil
var customDialerConnConfig *pgx.ConnConfig = nil
var replicationConnConfig *pgx.ConnConfig = nil
+var cratedbConnConfig *pgx.ConnConfig = nil
// var tcpConnConfig *pgx.ConnConfig = &pgx.ConnConfig{Host: "127.0.0.1", User: "pgx_md5", Password: "secret", Database: "pgx_test"}
// var unixSocketConnConfig *pgx.ConnConfig = &pgx.ConnConfig{Host: "/private/tmp", User: "pgx_none", Database: "pgx_test"}
@@ -23,3 +24,5 @@ var replicationConnConfig *pgx.ConnConfig = nil
// var invalidUserConnConfig *pgx.ConnConfig = &pgx.ConnConfig{Host: "127.0.0.1", User: "invalid", Database: "pgx_test"}
// var tlsConnConfig *pgx.ConnConfig = &pgx.ConnConfig{Host: "127.0.0.1", User: "pgx_md5", Password: "secret", Database: "pgx_test", TLSConfig: &tls.Config{InsecureSkipVerify: true}}
// var customDialerConnConfig *pgx.ConnConfig = &pgx.ConnConfig{Host: "127.0.0.1", User: "pgx_md5", Password: "secret", Database: "pgx_test"}
+// var replicationConnConfig *pgx.ConnConfig = &pgx.ConnConfig{Host: "127.0.0.1", User: "pgx_replication", Password: "secret", Database: "pgx_test"}
+
diff --git a/vendor/github.com/jackc/pgx/conn_config_test.go.travis b/vendor/github.com/jackc/pgx/conn_config_test.go.travis
index 75714bf..cf29a74 100644
--- a/vendor/github.com/jackc/pgx/conn_config_test.go.travis
+++ b/vendor/github.com/jackc/pgx/conn_config_test.go.travis
@@ -16,15 +16,21 @@ var invalidUserConnConfig = &pgx.ConnConfig{Host: "127.0.0.1", User: "invalid",
var tlsConnConfig = &pgx.ConnConfig{Host: "127.0.0.1", User: "pgx_ssl", Password: "secret", Database: "pgx_test", TLSConfig: &tls.Config{InsecureSkipVerify: true}}
var customDialerConnConfig = &pgx.ConnConfig{Host: "127.0.0.1", User: "pgx_md5", Password: "secret", Database: "pgx_test"}
var replicationConnConfig *pgx.ConnConfig = nil
+var cratedbConnConfig *pgx.ConnConfig = nil
func init() {
- version := os.Getenv("PGVERSION")
+ pgVersion := os.Getenv("PGVERSION")
- if len(version) > 0 {
- v, err := strconv.ParseFloat(version,64)
+ if len(pgVersion) > 0 {
+ v, err := strconv.ParseFloat(pgVersion, 64)
if err == nil && v >= 9.6 {
replicationConnConfig = &pgx.ConnConfig{Host: "127.0.0.1", User: "pgx_replication", Password: "secret", Database: "pgx_test"}
}
}
+
+ crateVersion := os.Getenv("CRATEVERSION")
+ if crateVersion != "" {
+ cratedbConnConfig = &pgx.ConnConfig{Host: "127.0.0.1", Port: 6543, User: "pgx", Password: "", Database: "pgx_test"}
+ }
}
diff --git a/vendor/github.com/jackc/pgx/conn_pool.go b/vendor/github.com/jackc/pgx/conn_pool.go
index 1913699..6ca0ee0 100644
--- a/vendor/github.com/jackc/pgx/conn_pool.go
+++ b/vendor/github.com/jackc/pgx/conn_pool.go
@@ -1,9 +1,13 @@
package pgx
import (
- "errors"
+ "context"
"sync"
"time"
+
+ "github.com/pkg/errors"
+
+ "github.com/jackc/pgx/pgtype"
)
type ConnPoolConfig struct {
@@ -27,11 +31,7 @@ type ConnPool struct {
closed bool
preparedStatements map[string]*PreparedStatement
acquireTimeout time.Duration
- pgTypes map[Oid]PgType
- pgsqlAfInet *byte
- pgsqlAfInet6 *byte
- txAfterClose func(tx *Tx)
- rowsAfterClose func(rows *Rows)
+ connInfo *pgtype.ConnInfo
}
type ConnPoolStat struct {
@@ -43,11 +43,15 @@ type ConnPoolStat struct {
// ErrAcquireTimeout occurs when an attempt to acquire a connection times out.
var ErrAcquireTimeout = errors.New("timeout acquiring connection from pool")
+// ErrClosedPool occurs on an attempt to acquire a connection from a closed pool.
+var ErrClosedPool = errors.New("cannot acquire from closed pool")
+
// NewConnPool creates a new ConnPool. config.ConnConfig is passed through to
// Connect directly.
func NewConnPool(config ConnPoolConfig) (p *ConnPool, err error) {
p = new(ConnPool)
p.config = config.ConnConfig
+ p.connInfo = minimalConnInfo
p.maxConnections = config.MaxConnections
if p.maxConnections == 0 {
p.maxConnections = 5
@@ -73,14 +77,6 @@ func NewConnPool(config ConnPoolConfig) (p *ConnPool, err error) {
p.logLevel = LogLevelNone
}
- p.txAfterClose = func(tx *Tx) {
- p.Release(tx.Conn())
- }
-
- p.rowsAfterClose = func(rows *Rows) {
- p.Release(rows.Conn())
- }
-
p.allConnections = make([]*Conn, 0, p.maxConnections)
p.availableConnections = make([]*Conn, 0, p.maxConnections)
p.preparedStatements = make(map[string]*PreparedStatement)
@@ -94,6 +90,7 @@ func NewConnPool(config ConnPoolConfig) (p *ConnPool, err error) {
}
p.allConnections = append(p.allConnections, c)
p.availableConnections = append(p.availableConnections, c)
+ p.connInfo = c.ConnInfo.DeepCopy()
return
}
@@ -114,7 +111,7 @@ func (p *ConnPool) deadlinePassed(deadline *time.Time) bool {
// acquire performs acquision assuming pool is already locked
func (p *ConnPool) acquire(deadline *time.Time) (*Conn, error) {
if p.closed {
- return nil, errors.New("cannot acquire from closed pool")
+ return nil, ErrClosedPool
}
// A connection is available
@@ -161,7 +158,7 @@ func (p *ConnPool) acquire(deadline *time.Time) (*Conn, error) {
}
// All connections are in use and we cannot create more
if p.logLevel >= LogLevelWarn {
- p.logger.Warn("All connections in pool are busy - waiting...")
+ p.logger.Log(LogLevelWarn, "waiting for available connection", nil)
}
// Wait until there is an available connection OR room to create a new connection
@@ -181,7 +178,11 @@ func (p *ConnPool) acquire(deadline *time.Time) (*Conn, error) {
// Release gives up use of a connection.
func (p *ConnPool) Release(conn *Conn) {
- if conn.TxStatus != 'I' {
+ if conn.ctxInProgress {
+ panic("should never release when context is in progress")
+ }
+
+ if conn.txStatus != 'I' {
conn.Exec("rollback")
}
@@ -223,25 +224,21 @@ func (p *ConnPool) removeFromAllConnections(conn *Conn) bool {
return false
}
-// Close ends the use of a connection pool. It prevents any new connections
-// from being acquired, waits until all acquired connections are released,
-// then closes all underlying connections.
+// Close ends the use of a connection pool. It prevents any new connections from
+// being acquired and closes available underlying connections. Any acquired
+// connections will be closed when they are released.
func (p *ConnPool) Close() {
p.cond.L.Lock()
defer p.cond.L.Unlock()
p.closed = true
- // Wait until all connections are released
- if len(p.availableConnections) != len(p.allConnections) {
- for len(p.availableConnections) != len(p.allConnections) {
- p.cond.Wait()
- }
- }
-
- for _, c := range p.allConnections {
+ for _, c := range p.availableConnections {
_ = c.Close()
}
+
+ // This will cause any checked out connections to be closed on release
+ p.resetCount++
}
// Reset closes all open connections, but leaves the pool open. It is intended
@@ -289,7 +286,7 @@ func (p *ConnPool) Stat() (s ConnPoolStat) {
}
func (p *ConnPool) createConnection() (*Conn, error) {
- c, err := connect(p.config, p.pgTypes, p.pgsqlAfInet, p.pgsqlAfInet6)
+ c, err := connect(p.config, p.connInfo)
if err != nil {
return nil, err
}
@@ -324,10 +321,6 @@ func (p *ConnPool) createConnectionUnlocked() (*Conn, error) {
// afterConnectionCreated executes (if it is) afterConnect() callback and prepares
// all the known statements for the new connection.
func (p *ConnPool) afterConnectionCreated(c *Conn) (*Conn, error) {
- p.pgTypes = c.PgTypes
- p.pgsqlAfInet = c.pgsqlAfInet
- p.pgsqlAfInet6 = c.pgsqlAfInet6
-
if p.afterConnect != nil {
err := p.afterConnect(c)
if err != nil {
@@ -357,6 +350,16 @@ func (p *ConnPool) Exec(sql string, arguments ...interface{}) (commandTag Comman
return c.Exec(sql, arguments...)
}
+func (p *ConnPool) ExecEx(ctx context.Context, sql string, options *QueryExOptions, arguments ...interface{}) (commandTag CommandTag, err error) {
+ var c *Conn
+ if c, err = p.Acquire(); err != nil {
+ return
+ }
+ defer p.Release(c)
+
+ return c.ExecEx(ctx, sql, options, arguments...)
+}
+
// Query acquires a connection and delegates the call to that connection. When
// *Rows are closed, the connection is released automatically.
func (p *ConnPool) Query(sql string, args ...interface{}) (*Rows, error) {
@@ -372,7 +375,25 @@ func (p *ConnPool) Query(sql string, args ...interface{}) (*Rows, error) {
return rows, err
}
- rows.AfterClose(p.rowsAfterClose)
+ rows.connPool = p
+
+ return rows, nil
+}
+
+func (p *ConnPool) QueryEx(ctx context.Context, sql string, options *QueryExOptions, args ...interface{}) (*Rows, error) {
+ c, err := p.Acquire()
+ if err != nil {
+ // Because checking for errors can be deferred to the *Rows, build one with the error
+ return &Rows{closed: true, err: err}, err
+ }
+
+ rows, err := c.QueryEx(ctx, sql, options, args...)
+ if err != nil {
+ p.Release(c)
+ return rows, err
+ }
+
+ rows.connPool = p
return rows, nil
}
@@ -385,10 +406,15 @@ func (p *ConnPool) QueryRow(sql string, args ...interface{}) *Row {
return (*Row)(rows)
}
+func (p *ConnPool) QueryRowEx(ctx context.Context, sql string, options *QueryExOptions, args ...interface{}) *Row {
+ rows, _ := p.QueryEx(ctx, sql, options, args...)
+ return (*Row)(rows)
+}
+
// Begin acquires a connection and begins a transaction on it. When the
// transaction is closed the connection will be automatically released.
func (p *ConnPool) Begin() (*Tx, error) {
- return p.BeginIso("")
+ return p.BeginEx(context.Background(), nil)
}
// Prepare creates a prepared statement on a connection in the pool to test the
@@ -403,7 +429,7 @@ func (p *ConnPool) Begin() (*Tx, error) {
// the same name and sql arguments. This allows a code path to Prepare and
// Query/Exec/PrepareEx without concern for if the statement has already been prepared.
func (p *ConnPool) Prepare(name, sql string) (*PreparedStatement, error) {
- return p.PrepareEx(name, sql, nil)
+ return p.PrepareEx(context.Background(), name, sql, nil)
}
// PrepareEx creates a prepared statement on a connection in the pool to test the
@@ -417,7 +443,7 @@ func (p *ConnPool) Prepare(name, sql string) (*PreparedStatement, error) {
// PrepareEx is idempotent; i.e. it is safe to call PrepareEx multiple times with the same
// name and sql arguments. This allows a code path to PrepareEx and Query/Exec/Prepare without
// concern for if the statement has already been prepared.
-func (p *ConnPool) PrepareEx(name, sql string, opts *PrepareExOptions) (*PreparedStatement, error) {
+func (p *ConnPool) PrepareEx(ctx context.Context, name, sql string, opts *PrepareExOptions) (*PreparedStatement, error) {
p.cond.L.Lock()
defer p.cond.L.Unlock()
@@ -439,13 +465,13 @@ func (p *ConnPool) PrepareEx(name, sql string, opts *PrepareExOptions) (*Prepare
return ps, nil
}
- ps, err := c.PrepareEx(name, sql, opts)
+ ps, err := c.PrepareEx(ctx, name, sql, opts)
if err != nil {
return nil, err
}
for _, c := range p.availableConnections {
- _, err := c.PrepareEx(name, sql, opts)
+ _, err := c.PrepareEx(ctx, name, sql, opts)
if err != nil {
return nil, err
}
@@ -474,17 +500,17 @@ func (p *ConnPool) Deallocate(name string) (err error) {
return nil
}
-// BeginIso acquires a connection and begins a transaction in isolation mode iso
-// on it. When the transaction is closed the connection will be automatically
-// released.
-func (p *ConnPool) BeginIso(iso string) (*Tx, error) {
+// BeginEx acquires a connection and starts a transaction with txOptions
+// determining the transaction mode. When the transaction is closed the
+// connection will be automatically released.
+func (p *ConnPool) BeginEx(ctx context.Context, txOptions *TxOptions) (*Tx, error) {
for {
c, err := p.Acquire()
if err != nil {
return nil, err
}
- tx, err := c.BeginIso(iso)
+ tx, err := c.BeginEx(ctx, txOptions)
if err != nil {
alive := c.IsAlive()
p.Release(c)
@@ -493,37 +519,31 @@ func (p *ConnPool) BeginIso(iso string) (*Tx, error) {
// again on a new connection would fix, so just return the error. But
// if the connection is dead try to acquire a new connection and try
// again.
- if alive {
+ if alive || ctx.Err() != nil {
return nil, err
}
continue
}
- tx.AfterClose(p.txAfterClose)
+ tx.connPool = p
return tx, nil
}
}
-// Deprecated. Use CopyFrom instead. CopyTo acquires a connection, delegates the
-// call to that connection, and releases the connection.
-func (p *ConnPool) CopyTo(tableName string, columnNames []string, rowSrc CopyToSource) (int, error) {
+// CopyFrom acquires a connection, delegates the call to that connection, and releases the connection
+func (p *ConnPool) CopyFrom(tableName Identifier, columnNames []string, rowSrc CopyFromSource) (int, error) {
c, err := p.Acquire()
if err != nil {
return 0, err
}
defer p.Release(c)
- return c.CopyTo(tableName, columnNames, rowSrc)
+ return c.CopyFrom(tableName, columnNames, rowSrc)
}
-// CopyFrom acquires a connection, delegates the call to that connection, and
-// releases the connection.
-func (p *ConnPool) CopyFrom(tableName Identifier, columnNames []string, rowSrc CopyFromSource) (int, error) {
+// BeginBatch acquires a connection and begins a batch on that connection. When
+// *Batch is finished, the connection is released automatically.
+func (p *ConnPool) BeginBatch() *Batch {
c, err := p.Acquire()
- if err != nil {
- return 0, err
- }
- defer p.Release(c)
-
- return c.CopyFrom(tableName, columnNames, rowSrc)
+ return &Batch{conn: c, connPool: p, err: err}
}
diff --git a/vendor/github.com/jackc/pgx/conn_pool_private_test.go b/vendor/github.com/jackc/pgx/conn_pool_private_test.go
deleted file mode 100644
index ef0ec1d..0000000
--- a/vendor/github.com/jackc/pgx/conn_pool_private_test.go
+++ /dev/null
@@ -1,44 +0,0 @@
-package pgx
-
-import (
- "testing"
-)
-
-func compareConnSlices(slice1, slice2 []*Conn) bool {
- if len(slice1) != len(slice2) {
- return false
- }
- for i, c := range slice1 {
- if c != slice2[i] {
- return false
- }
- }
- return true
-}
-
-func TestConnPoolRemoveFromAllConnections(t *testing.T) {
- t.Parallel()
- pool := ConnPool{}
- conn1 := &Conn{}
- conn2 := &Conn{}
- conn3 := &Conn{}
-
- // First element
- pool.allConnections = []*Conn{conn1, conn2, conn3}
- pool.removeFromAllConnections(conn1)
- if !compareConnSlices(pool.allConnections, []*Conn{conn2, conn3}) {
- t.Fatal("First element test failed")
- }
- // Element somewhere in the middle
- pool.allConnections = []*Conn{conn1, conn2, conn3}
- pool.removeFromAllConnections(conn2)
- if !compareConnSlices(pool.allConnections, []*Conn{conn1, conn3}) {
- t.Fatal("Middle element test failed")
- }
- // Last element
- pool.allConnections = []*Conn{conn1, conn2, conn3}
- pool.removeFromAllConnections(conn3)
- if !compareConnSlices(pool.allConnections, []*Conn{conn1, conn2}) {
- t.Fatal("Last element test failed")
- }
-}
diff --git a/vendor/github.com/jackc/pgx/conn_pool_test.go b/vendor/github.com/jackc/pgx/conn_pool_test.go
deleted file mode 100644
index ab76bfb..0000000
--- a/vendor/github.com/jackc/pgx/conn_pool_test.go
+++ /dev/null
@@ -1,982 +0,0 @@
-package pgx_test
-
-import (
- "errors"
- "fmt"
- "net"
- "sync"
- "testing"
- "time"
-
- "github.com/jackc/pgx"
-)
-
-func createConnPool(t *testing.T, maxConnections int) *pgx.ConnPool {
- config := pgx.ConnPoolConfig{ConnConfig: *defaultConnConfig, MaxConnections: maxConnections}
- pool, err := pgx.NewConnPool(config)
- if err != nil {
- t.Fatalf("Unable to create connection pool: %v", err)
- }
- return pool
-}
-
-func acquireAllConnections(t *testing.T, pool *pgx.ConnPool, maxConnections int) []*pgx.Conn {
- connections := make([]*pgx.Conn, maxConnections)
- for i := 0; i < maxConnections; i++ {
- var err error
- if connections[i], err = pool.Acquire(); err != nil {
- t.Fatalf("Unable to acquire connection: %v", err)
- }
- }
- return connections
-}
-
-func releaseAllConnections(pool *pgx.ConnPool, connections []*pgx.Conn) {
- for _, c := range connections {
- pool.Release(c)
- }
-}
-
-func acquireWithTimeTaken(pool *pgx.ConnPool) (*pgx.Conn, time.Duration, error) {
- startTime := time.Now()
- c, err := pool.Acquire()
- return c, time.Since(startTime), err
-}
-
-func TestNewConnPool(t *testing.T) {
- t.Parallel()
-
- var numCallbacks int
- afterConnect := func(c *pgx.Conn) error {
- numCallbacks++
- return nil
- }
-
- config := pgx.ConnPoolConfig{ConnConfig: *defaultConnConfig, MaxConnections: 2, AfterConnect: afterConnect}
- pool, err := pgx.NewConnPool(config)
- if err != nil {
- t.Fatal("Unable to establish connection pool")
- }
- defer pool.Close()
-
- // It initially connects once
- stat := pool.Stat()
- if stat.CurrentConnections != 1 {
- t.Errorf("Expected 1 connection to be established immediately, but %v were", numCallbacks)
- }
-
- // Pool creation returns an error if any AfterConnect callback does
- errAfterConnect := errors.New("Some error")
- afterConnect = func(c *pgx.Conn) error {
- return errAfterConnect
- }
-
- config = pgx.ConnPoolConfig{ConnConfig: *defaultConnConfig, MaxConnections: 2, AfterConnect: afterConnect}
- pool, err = pgx.NewConnPool(config)
- if err != errAfterConnect {
- t.Errorf("Expected errAfterConnect but received unexpected: %v", err)
- }
-}
-
-func TestNewConnPoolDefaultsTo5MaxConnections(t *testing.T) {
- t.Parallel()
-
- config := pgx.ConnPoolConfig{ConnConfig: *defaultConnConfig}
- pool, err := pgx.NewConnPool(config)
- if err != nil {
- t.Fatal("Unable to establish connection pool")
- }
- defer pool.Close()
-
- if n := pool.Stat().MaxConnections; n != 5 {
- t.Fatalf("Expected pool to default to 5 max connections, but it was %d", n)
- }
-}
-
-func TestPoolAcquireAndReleaseCycle(t *testing.T) {
- t.Parallel()
-
- maxConnections := 2
- incrementCount := int32(100)
- completeSync := make(chan int)
- pool := createConnPool(t, maxConnections)
- defer pool.Close()
-
- allConnections := acquireAllConnections(t, pool, maxConnections)
-
- for _, c := range allConnections {
- mustExec(t, c, "create temporary table t(counter integer not null)")
- mustExec(t, c, "insert into t(counter) values(0);")
- }
-
- releaseAllConnections(pool, allConnections)
-
- f := func() {
- conn, err := pool.Acquire()
- if err != nil {
- t.Fatal("Unable to acquire connection")
- }
- defer pool.Release(conn)
-
- // Increment counter...
- mustExec(t, conn, "update t set counter = counter + 1")
- completeSync <- 0
- }
-
- for i := int32(0); i < incrementCount; i++ {
- go f()
- }
-
- // Wait for all f() to complete
- for i := int32(0); i < incrementCount; i++ {
- <-completeSync
- }
-
- // Check that temp table in each connection has been incremented some number of times
- actualCount := int32(0)
- allConnections = acquireAllConnections(t, pool, maxConnections)
-
- for _, c := range allConnections {
- var n int32
- c.QueryRow("select counter from t").Scan(&n)
- if n == 0 {
- t.Error("A connection was never used")
- }
-
- actualCount += n
- }
-
- if actualCount != incrementCount {
- fmt.Println(actualCount)
- t.Error("Wrong number of increments")
- }
-
- releaseAllConnections(pool, allConnections)
-}
-
-func TestPoolNonBlockingConnections(t *testing.T) {
- t.Parallel()
-
- var dialCountLock sync.Mutex
- dialCount := 0
- openTimeout := 1 * time.Second
- testDialer := func(network, address string) (net.Conn, error) {
- var firstDial bool
- dialCountLock.Lock()
- dialCount++
- firstDial = dialCount == 1
- dialCountLock.Unlock()
-
- if firstDial {
- return net.Dial(network, address)
- } else {
- time.Sleep(openTimeout)
- return nil, &net.OpError{Op: "dial", Net: "tcp"}
- }
- }
-
- maxConnections := 3
- config := pgx.ConnPoolConfig{
- ConnConfig: *defaultConnConfig,
- MaxConnections: maxConnections,
- }
- config.ConnConfig.Dial = testDialer
-
- pool, err := pgx.NewConnPool(config)
- if err != nil {
- t.Fatalf("Expected NewConnPool not to fail, instead it failed with: %v", err)
- }
- defer pool.Close()
-
- // NewConnPool establishes an initial connection
- // so we need to close that for the rest of the test
- if conn, err := pool.Acquire(); err == nil {
- conn.Close()
- pool.Release(conn)
- } else {
- t.Fatalf("pool.Acquire unexpectedly failed: %v", err)
- }
-
- var wg sync.WaitGroup
- wg.Add(maxConnections)
-
- startedAt := time.Now()
- for i := 0; i < maxConnections; i++ {
- go func() {
- _, err := pool.Acquire()
- wg.Done()
- if err == nil {
- t.Fatal("Acquire() expected to fail but it did not")
- }
- }()
- }
- wg.Wait()
-
- // Prior to createConnectionUnlocked() use the test took
- // maxConnections * openTimeout seconds to complete.
- // With createConnectionUnlocked() it takes ~ 1 * openTimeout seconds.
- timeTaken := time.Since(startedAt)
- if timeTaken > openTimeout+1*time.Second {
- t.Fatalf("Expected all Acquire() to run in parallel and take about %v, instead it took '%v'", openTimeout, timeTaken)
- }
-
-}
-
-func TestAcquireTimeoutSanity(t *testing.T) {
- t.Parallel()
-
- config := pgx.ConnPoolConfig{
- ConnConfig: *defaultConnConfig,
- MaxConnections: 1,
- }
-
- // case 1: default 0 value
- pool, err := pgx.NewConnPool(config)
- if err != nil {
- t.Fatalf("Expected NewConnPool with default config.AcquireTimeout not to fail, instead it failed with '%v'", err)
- }
- pool.Close()
-
- // case 2: negative value
- config.AcquireTimeout = -1 * time.Second
- _, err = pgx.NewConnPool(config)
- if err == nil {
- t.Fatal("Expected NewConnPool with negative config.AcquireTimeout to fail, instead it did not")
- }
-
- // case 3: positive value
- config.AcquireTimeout = 1 * time.Second
- pool, err = pgx.NewConnPool(config)
- if err != nil {
- t.Fatalf("Expected NewConnPool with positive config.AcquireTimeout not to fail, instead it failed with '%v'", err)
- }
- defer pool.Close()
-}
-
-func TestPoolWithAcquireTimeoutSet(t *testing.T) {
- t.Parallel()
-
- connAllocTimeout := 2 * time.Second
- config := pgx.ConnPoolConfig{
- ConnConfig: *defaultConnConfig,
- MaxConnections: 1,
- AcquireTimeout: connAllocTimeout,
- }
-
- pool, err := pgx.NewConnPool(config)
- if err != nil {
- t.Fatalf("Unable to create connection pool: %v", err)
- }
- defer pool.Close()
-
- // Consume all connections ...
- allConnections := acquireAllConnections(t, pool, config.MaxConnections)
- defer releaseAllConnections(pool, allConnections)
-
- // ... then try to consume 1 more. It should fail after a short timeout.
- _, timeTaken, err := acquireWithTimeTaken(pool)
-
- if err == nil || err != pgx.ErrAcquireTimeout {
- t.Fatalf("Expected error to be pgx.ErrAcquireTimeout, instead it was '%v'", err)
- }
- if timeTaken < connAllocTimeout {
- t.Fatalf("Expected connection allocation time to be at least %v, instead it was '%v'", connAllocTimeout, timeTaken)
- }
-}
-
-func TestPoolWithoutAcquireTimeoutSet(t *testing.T) {
- t.Parallel()
-
- maxConnections := 1
- pool := createConnPool(t, maxConnections)
- defer pool.Close()
-
- // Consume all connections ...
- allConnections := acquireAllConnections(t, pool, maxConnections)
-
- // ... then try to consume 1 more. It should hang forever.
- // To unblock it we release the previously taken connection in a goroutine.
- stopDeadWaitTimeout := 5 * time.Second
- timer := time.AfterFunc(stopDeadWaitTimeout, func() {
- releaseAllConnections(pool, allConnections)
- })
- defer timer.Stop()
-
- conn, timeTaken, err := acquireWithTimeTaken(pool)
- if err == nil {
- pool.Release(conn)
- } else {
- t.Fatalf("Expected error to be nil, instead it was '%v'", err)
- }
- if timeTaken < stopDeadWaitTimeout {
- t.Fatalf("Expected connection allocation time to be at least %v, instead it was '%v'", stopDeadWaitTimeout, timeTaken)
- }
-}
-
-func TestPoolReleaseWithTransactions(t *testing.T) {
- t.Parallel()
-
- pool := createConnPool(t, 2)
- defer pool.Close()
-
- conn, err := pool.Acquire()
- if err != nil {
- t.Fatalf("Unable to acquire connection: %v", err)
- }
- mustExec(t, conn, "begin")
- if _, err = conn.Exec("selct"); err == nil {
- t.Fatal("Did not receive expected error")
- }
-
- if conn.TxStatus != 'E' {
- t.Fatalf("Expected TxStatus to be 'E', instead it was '%c'", conn.TxStatus)
- }
-
- pool.Release(conn)
-
- if conn.TxStatus != 'I' {
- t.Fatalf("Expected release to rollback errored transaction, but it did not: '%c'", conn.TxStatus)
- }
-
- conn, err = pool.Acquire()
- if err != nil {
- t.Fatalf("Unable to acquire connection: %v", err)
- }
- mustExec(t, conn, "begin")
- if conn.TxStatus != 'T' {
- t.Fatalf("Expected txStatus to be 'T', instead it was '%c'", conn.TxStatus)
- }
-
- pool.Release(conn)
-
- if conn.TxStatus != 'I' {
- t.Fatalf("Expected release to rollback uncommitted transaction, but it did not: '%c'", conn.TxStatus)
- }
-}
-
-func TestPoolAcquireAndReleaseCycleAutoConnect(t *testing.T) {
- t.Parallel()
-
- maxConnections := 3
- pool := createConnPool(t, maxConnections)
- defer pool.Close()
-
- doSomething := func() {
- c, err := pool.Acquire()
- if err != nil {
- t.Fatalf("Unable to Acquire: %v", err)
- }
- rows, _ := c.Query("select 1, pg_sleep(0.02)")
- rows.Close()
- pool.Release(c)
- }
-
- for i := 0; i < 10; i++ {
- doSomething()
- }
-
- stat := pool.Stat()
- if stat.CurrentConnections != 1 {
- t.Fatalf("Pool shouldn't have established more connections when no contention: %v", stat.CurrentConnections)
- }
-
- var wg sync.WaitGroup
- for i := 0; i < 10; i++ {
- wg.Add(1)
- go func() {
- defer wg.Done()
- doSomething()
- }()
- }
- wg.Wait()
-
- stat = pool.Stat()
- if stat.CurrentConnections != stat.MaxConnections {
- t.Fatalf("Pool should have used all possible connections: %v", stat.CurrentConnections)
- }
-}
-
-func TestPoolReleaseDiscardsDeadConnections(t *testing.T) {
- t.Parallel()
-
- // Run timing sensitive test many times
- for i := 0; i < 50; i++ {
- func() {
- maxConnections := 3
- pool := createConnPool(t, maxConnections)
- defer pool.Close()
-
- var c1, c2 *pgx.Conn
- var err error
- var stat pgx.ConnPoolStat
-
- if c1, err = pool.Acquire(); err != nil {
- t.Fatalf("Unexpected error acquiring connection: %v", err)
- }
- defer func() {
- if c1 != nil {
- pool.Release(c1)
- }
- }()
-
- if c2, err = pool.Acquire(); err != nil {
- t.Fatalf("Unexpected error acquiring connection: %v", err)
- }
- defer func() {
- if c2 != nil {
- pool.Release(c2)
- }
- }()
-
- if _, err = c2.Exec("select pg_terminate_backend($1)", c1.Pid); err != nil {
- t.Fatalf("Unable to kill backend PostgreSQL process: %v", err)
- }
-
- // do something with the connection so it knows it's dead
- rows, _ := c1.Query("select 1")
- rows.Close()
- if rows.Err() == nil {
- t.Fatal("Expected error but none occurred")
- }
-
- if c1.IsAlive() {
- t.Fatal("Expected connection to be dead but it wasn't")
- }
-
- stat = pool.Stat()
- if stat.CurrentConnections != 2 {
- t.Fatalf("Unexpected CurrentConnections: %v", stat.CurrentConnections)
- }
- if stat.AvailableConnections != 0 {
- t.Fatalf("Unexpected AvailableConnections: %v", stat.CurrentConnections)
- }
-
- pool.Release(c1)
- c1 = nil // so it doesn't get released again by the defer
-
- stat = pool.Stat()
- if stat.CurrentConnections != 1 {
- t.Fatalf("Unexpected CurrentConnections: %v", stat.CurrentConnections)
- }
- if stat.AvailableConnections != 0 {
- t.Fatalf("Unexpected AvailableConnections: %v", stat.CurrentConnections)
- }
- }()
- }
-}
-
-func TestConnPoolResetClosesCheckedOutConnectionsOnRelease(t *testing.T) {
- t.Parallel()
-
- pool := createConnPool(t, 5)
- defer pool.Close()
-
- inProgressRows := []*pgx.Rows{}
- var inProgressPIDs []int32
-
- // Start some queries and reset pool while they are in progress
- for i := 0; i < 10; i++ {
- rows, err := pool.Query("select pg_backend_pid() union all select 1 union all select 2")
- if err != nil {
- t.Fatal(err)
- }
-
- rows.Next()
- var pid int32
- rows.Scan(&pid)
- inProgressPIDs = append(inProgressPIDs, pid)
-
- inProgressRows = append(inProgressRows, rows)
- pool.Reset()
- }
-
- // Check that the queries are completed
- for _, rows := range inProgressRows {
- var expectedN int32
-
- for rows.Next() {
- expectedN++
- var n int32
- err := rows.Scan(&n)
- if err != nil {
- t.Fatal(err)
- }
- if expectedN != n {
- t.Fatalf("Expected n to be %d, but it was %d", expectedN, n)
- }
- }
-
- if err := rows.Err(); err != nil {
- t.Fatal(err)
- }
- }
-
- // pool should be in fresh state due to previous reset
- stats := pool.Stat()
- if stats.CurrentConnections != 0 || stats.AvailableConnections != 0 {
- t.Fatalf("Unexpected connection pool stats: %v", stats)
- }
-
- var connCount int
- err := pool.QueryRow("select count(*) from pg_stat_activity where pid = any($1::int4[])", inProgressPIDs).Scan(&connCount)
- if err != nil {
- t.Fatal(err)
- }
- if connCount != 0 {
- t.Fatalf("%d connections not closed", connCount)
- }
-}
-
-func TestConnPoolResetClosesCheckedInConnections(t *testing.T) {
- t.Parallel()
-
- pool := createConnPool(t, 5)
- defer pool.Close()
-
- inProgressRows := []*pgx.Rows{}
- var inProgressPIDs []int32
-
- // Start some queries and reset pool while they are in progress
- for i := 0; i < 5; i++ {
- rows, err := pool.Query("select pg_backend_pid()")
- if err != nil {
- t.Fatal(err)
- }
-
- inProgressRows = append(inProgressRows, rows)
- }
-
- // Check that the queries are completed
- for _, rows := range inProgressRows {
- for rows.Next() {
- var pid int32
- err := rows.Scan(&pid)
- if err != nil {
- t.Fatal(err)
- }
- inProgressPIDs = append(inProgressPIDs, pid)
-
- }
-
- if err := rows.Err(); err != nil {
- t.Fatal(err)
- }
- }
-
- // Ensure pool is fully connected and available
- stats := pool.Stat()
- if stats.CurrentConnections != 5 || stats.AvailableConnections != 5 {
- t.Fatalf("Unexpected connection pool stats: %v", stats)
- }
-
- pool.Reset()
-
- // Pool should be empty after reset
- stats = pool.Stat()
- if stats.CurrentConnections != 0 || stats.AvailableConnections != 0 {
- t.Fatalf("Unexpected connection pool stats: %v", stats)
- }
-
- var connCount int
- err := pool.QueryRow("select count(*) from pg_stat_activity where pid = any($1::int4[])", inProgressPIDs).Scan(&connCount)
- if err != nil {
- t.Fatal(err)
- }
- if connCount != 0 {
- t.Fatalf("%d connections not closed", connCount)
- }
-}
-
-func TestConnPoolTransaction(t *testing.T) {
- t.Parallel()
-
- pool := createConnPool(t, 2)
- defer pool.Close()
-
- stats := pool.Stat()
- if stats.CurrentConnections != 1 || stats.AvailableConnections != 1 {
- t.Fatalf("Unexpected connection pool stats: %v", stats)
- }
-
- tx, err := pool.Begin()
- if err != nil {
- t.Fatalf("pool.Begin failed: %v", err)
- }
- defer tx.Rollback()
-
- var n int32
- err = tx.QueryRow("select 40+$1", 2).Scan(&n)
- if err != nil {
- t.Fatalf("tx.QueryRow Scan failed: %v", err)
- }
- if n != 42 {
- t.Errorf("Expected 42, got %d", n)
- }
-
- stats = pool.Stat()
- if stats.CurrentConnections != 1 || stats.AvailableConnections != 0 {
- t.Fatalf("Unexpected connection pool stats: %v", stats)
- }
-
- err = tx.Rollback()
- if err != nil {
- t.Fatalf("tx.Rollback failed: %v", err)
- }
-
- stats = pool.Stat()
- if stats.CurrentConnections != 1 || stats.AvailableConnections != 1 {
- t.Fatalf("Unexpected connection pool stats: %v", stats)
- }
-}
-
-func TestConnPoolTransactionIso(t *testing.T) {
- t.Parallel()
-
- pool := createConnPool(t, 2)
- defer pool.Close()
-
- tx, err := pool.BeginIso(pgx.Serializable)
- if err != nil {
- t.Fatalf("pool.Begin failed: %v", err)
- }
- defer tx.Rollback()
-
- var level string
- err = tx.QueryRow("select current_setting('transaction_isolation')").Scan(&level)
- if err != nil {
- t.Fatalf("tx.QueryRow failed: %v", level)
- }
-
- if level != "serializable" {
- t.Errorf("Expected to be in isolation level %v but was %v", "serializable", level)
- }
-}
-
-func TestConnPoolBeginRetry(t *testing.T) {
- t.Parallel()
-
- // Run timing sensitive test many times
- for i := 0; i < 50; i++ {
- func() {
- pool := createConnPool(t, 2)
- defer pool.Close()
-
- killerConn, err := pool.Acquire()
- if err != nil {
- t.Fatal(err)
- }
- defer pool.Release(killerConn)
-
- victimConn, err := pool.Acquire()
- if err != nil {
- t.Fatal(err)
- }
- pool.Release(victimConn)
-
- // Terminate connection that was released to pool
- if _, err = killerConn.Exec("select pg_terminate_backend($1)", victimConn.Pid); err != nil {
- t.Fatalf("Unable to kill backend PostgreSQL process: %v", err)
- }
-
- // Since victimConn is the only available connection in the pool, pool.Begin should
- // try to use it, fail, and allocate another connection
- tx, err := pool.Begin()
- if err != nil {
- t.Fatalf("pool.Begin failed: %v", err)
- }
- defer tx.Rollback()
-
- var txPid int32
- err = tx.QueryRow("select pg_backend_pid()").Scan(&txPid)
- if err != nil {
- t.Fatalf("tx.QueryRow Scan failed: %v", err)
- }
- if txPid == victimConn.Pid {
- t.Error("Expected txPid to defer from killed conn pid, but it didn't")
- }
- }()
- }
-}
-
-func TestConnPoolQuery(t *testing.T) {
- t.Parallel()
-
- pool := createConnPool(t, 2)
- defer pool.Close()
-
- var sum, rowCount int32
-
- rows, err := pool.Query("select generate_series(1,$1)", 10)
- if err != nil {
- t.Fatalf("pool.Query failed: %v", err)
- }
-
- stats := pool.Stat()
- if stats.CurrentConnections != 1 || stats.AvailableConnections != 0 {
- t.Fatalf("Unexpected connection pool stats: %v", stats)
- }
-
- for rows.Next() {
- var n int32
- rows.Scan(&n)
- sum += n
- rowCount++
- }
-
- if rows.Err() != nil {
- t.Fatalf("conn.Query failed: %v", err)
- }
-
- if rowCount != 10 {
- t.Error("Select called onDataRow wrong number of times")
- }
- if sum != 55 {
- t.Error("Wrong values returned")
- }
-
- stats = pool.Stat()
- if stats.CurrentConnections != 1 || stats.AvailableConnections != 1 {
- t.Fatalf("Unexpected connection pool stats: %v", stats)
- }
-}
-
-func TestConnPoolQueryConcurrentLoad(t *testing.T) {
- t.Parallel()
-
- pool := createConnPool(t, 10)
- defer pool.Close()
-
- n := 100
- done := make(chan bool)
-
- for i := 0; i < n; i++ {
- go func() {
- defer func() { done <- true }()
- var rowCount int32
-
- rows, err := pool.Query("select generate_series(1,$1)", 1000)
- if err != nil {
- t.Fatalf("pool.Query failed: %v", err)
- }
- defer rows.Close()
-
- for rows.Next() {
- var n int32
- err = rows.Scan(&n)
- if err != nil {
- t.Fatalf("rows.Scan failed: %v", err)
- }
- if n != rowCount+1 {
- t.Fatalf("Expected n to be %d, but it was %d", rowCount+1, n)
- }
- rowCount++
- }
-
- if rows.Err() != nil {
- t.Fatalf("conn.Query failed: %v", rows.Err())
- }
-
- if rowCount != 1000 {
- t.Error("Select called onDataRow wrong number of times")
- }
-
- _, err = pool.Exec("--;")
- if err != nil {
- t.Fatalf("pool.Exec failed: %v", err)
- }
- }()
- }
-
- for i := 0; i < n; i++ {
- <-done
- }
-}
-
-func TestConnPoolQueryRow(t *testing.T) {
- t.Parallel()
-
- pool := createConnPool(t, 2)
- defer pool.Close()
-
- var n int32
- err := pool.QueryRow("select 40+$1", 2).Scan(&n)
- if err != nil {
- t.Fatalf("pool.QueryRow Scan failed: %v", err)
- }
-
- if n != 42 {
- t.Errorf("Expected 42, got %d", n)
- }
-
- stats := pool.Stat()
- if stats.CurrentConnections != 1 || stats.AvailableConnections != 1 {
- t.Fatalf("Unexpected connection pool stats: %v", stats)
- }
-}
-
-func TestConnPoolExec(t *testing.T) {
- t.Parallel()
-
- pool := createConnPool(t, 2)
- defer pool.Close()
-
- results, err := pool.Exec("create temporary table foo(id integer primary key);")
- if err != nil {
- t.Fatalf("Unexpected error from pool.Exec: %v", err)
- }
- if results != "CREATE TABLE" {
- t.Errorf("Unexpected results from Exec: %v", results)
- }
-
- results, err = pool.Exec("insert into foo(id) values($1)", 1)
- if err != nil {
- t.Fatalf("Unexpected error from pool.Exec: %v", err)
- }
- if results != "INSERT 0 1" {
- t.Errorf("Unexpected results from Exec: %v", results)
- }
-
- results, err = pool.Exec("drop table foo;")
- if err != nil {
- t.Fatalf("Unexpected error from pool.Exec: %v", err)
- }
- if results != "DROP TABLE" {
- t.Errorf("Unexpected results from Exec: %v", results)
- }
-}
-
-func TestConnPoolPrepare(t *testing.T) {
- t.Parallel()
-
- pool := createConnPool(t, 2)
- defer pool.Close()
-
- _, err := pool.Prepare("test", "select $1::varchar")
- if err != nil {
- t.Fatalf("Unable to prepare statement: %v", err)
- }
-
- var s string
- err = pool.QueryRow("test", "hello").Scan(&s)
- if err != nil {
- t.Errorf("Executing prepared statement failed: %v", err)
- }
-
- if s != "hello" {
- t.Errorf("Prepared statement did not return expected value: %v", s)
- }
-
- err = pool.Deallocate("test")
- if err != nil {
- t.Errorf("Deallocate failed: %v", err)
- }
-
- err = pool.QueryRow("test", "hello").Scan(&s)
- if err, ok := err.(pgx.PgError); !(ok && err.Code == "42601") {
- t.Errorf("Expected error calling deallocated prepared statement, but got: %v", err)
- }
-}
-
-func TestConnPoolPrepareDeallocatePrepare(t *testing.T) {
- t.Parallel()
-
- pool := createConnPool(t, 2)
- defer pool.Close()
-
- _, err := pool.Prepare("test", "select $1::varchar")
- if err != nil {
- t.Fatalf("Unable to prepare statement: %v", err)
- }
- err = pool.Deallocate("test")
- if err != nil {
- t.Fatalf("Unable to deallocate statement: %v", err)
- }
- _, err = pool.Prepare("test", "select $1::varchar")
- if err != nil {
- t.Fatalf("Unable to prepare statement: %v", err)
- }
-
- var s string
- err = pool.QueryRow("test", "hello").Scan(&s)
- if err != nil {
- t.Fatalf("Executing prepared statement failed: %v", err)
- }
-
- if s != "hello" {
- t.Errorf("Prepared statement did not return expected value: %v", s)
- }
-}
-
-func TestConnPoolPrepareWhenConnIsAlreadyAcquired(t *testing.T) {
- t.Parallel()
-
- pool := createConnPool(t, 2)
- defer pool.Close()
-
- testPreparedStatement := func(db queryRower, desc string) {
- var s string
- err := db.QueryRow("test", "hello").Scan(&s)
- if err != nil {
- t.Fatalf("%s. Executing prepared statement failed: %v", desc, err)
- }
-
- if s != "hello" {
- t.Fatalf("%s. Prepared statement did not return expected value: %v", desc, s)
- }
- }
-
- newReleaseOnce := func(c *pgx.Conn) func() {
- var once sync.Once
- return func() {
- once.Do(func() { pool.Release(c) })
- }
- }
-
- c1, err := pool.Acquire()
- if err != nil {
- t.Fatalf("Unable to acquire connection: %v", err)
- }
- c1Release := newReleaseOnce(c1)
- defer c1Release()
-
- _, err = pool.Prepare("test", "select $1::varchar")
- if err != nil {
- t.Fatalf("Unable to prepare statement: %v", err)
- }
-
- testPreparedStatement(pool, "pool")
-
- c1Release()
-
- c2, err := pool.Acquire()
- if err != nil {
- t.Fatalf("Unable to acquire connection: %v", err)
- }
- c2Release := newReleaseOnce(c2)
- defer c2Release()
-
- // This conn will not be available and will be connection at this point
- c3, err := pool.Acquire()
- if err != nil {
- t.Fatalf("Unable to acquire connection: %v", err)
- }
- c3Release := newReleaseOnce(c3)
- defer c3Release()
-
- testPreparedStatement(c2, "c2")
- testPreparedStatement(c3, "c3")
-
- c2Release()
- c3Release()
-
- err = pool.Deallocate("test")
- if err != nil {
- t.Errorf("Deallocate failed: %v", err)
- }
-
- var s string
- err = pool.QueryRow("test", "hello").Scan(&s)
- if err, ok := err.(pgx.PgError); !(ok && err.Code == "42601") {
- t.Errorf("Expected error calling deallocated prepared statement, but got: %v", err)
- }
-}
diff --git a/vendor/github.com/jackc/pgx/conn_test.go b/vendor/github.com/jackc/pgx/conn_test.go
deleted file mode 100644
index cfb9956..0000000
--- a/vendor/github.com/jackc/pgx/conn_test.go
+++ /dev/null
@@ -1,1744 +0,0 @@
-package pgx_test
-
-import (
- "crypto/tls"
- "fmt"
- "net"
- "os"
- "reflect"
- "strconv"
- "strings"
- "sync"
- "testing"
- "time"
-
- "github.com/jackc/pgx"
-)
-
-func TestConnect(t *testing.T) {
- t.Parallel()
-
- conn, err := pgx.Connect(*defaultConnConfig)
- if err != nil {
- t.Fatalf("Unable to establish connection: %v", err)
- }
-
- if _, present := conn.RuntimeParams["server_version"]; !present {
- t.Error("Runtime parameters not stored")
- }
-
- if conn.Pid == 0 {
- t.Error("Backend PID not stored")
- }
-
- if conn.SecretKey == 0 {
- t.Error("Backend secret key not stored")
- }
-
- var currentDB string
- err = conn.QueryRow("select current_database()").Scan(&currentDB)
- if err != nil {
- t.Fatalf("QueryRow Scan unexpectedly failed: %v", err)
- }
- if currentDB != defaultConnConfig.Database {
- t.Errorf("Did not connect to specified database (%v)", defaultConnConfig.Database)
- }
-
- var user string
- err = conn.QueryRow("select current_user").Scan(&user)
- if err != nil {
- t.Fatalf("QueryRow Scan unexpectedly failed: %v", err)
- }
- if user != defaultConnConfig.User {
- t.Errorf("Did not connect as specified user (%v)", defaultConnConfig.User)
- }
-
- err = conn.Close()
- if err != nil {
- t.Fatal("Unable to close connection")
- }
-}
-
-func TestConnectWithUnixSocketDirectory(t *testing.T) {
- t.Parallel()
-
- // /.s.PGSQL.5432
- if unixSocketConnConfig == nil {
- t.Skip("Skipping due to undefined unixSocketConnConfig")
- }
-
- conn, err := pgx.Connect(*unixSocketConnConfig)
- if err != nil {
- t.Fatalf("Unable to establish connection: %v", err)
- }
-
- err = conn.Close()
- if err != nil {
- t.Fatal("Unable to close connection")
- }
-}
-
-func TestConnectWithUnixSocketFile(t *testing.T) {
- t.Parallel()
-
- if unixSocketConnConfig == nil {
- t.Skip("Skipping due to undefined unixSocketConnConfig")
- }
-
- connParams := *unixSocketConnConfig
- connParams.Host = connParams.Host + "/.s.PGSQL.5432"
- conn, err := pgx.Connect(connParams)
- if err != nil {
- t.Fatalf("Unable to establish connection: %v", err)
- }
-
- err = conn.Close()
- if err != nil {
- t.Fatal("Unable to close connection")
- }
-}
-
-func TestConnectWithTcp(t *testing.T) {
- t.Parallel()
-
- if tcpConnConfig == nil {
- t.Skip("Skipping due to undefined tcpConnConfig")
- }
-
- conn, err := pgx.Connect(*tcpConnConfig)
- if err != nil {
- t.Fatal("Unable to establish connection: " + err.Error())
- }
-
- err = conn.Close()
- if err != nil {
- t.Fatal("Unable to close connection")
- }
-}
-
-func TestConnectWithTLS(t *testing.T) {
- t.Parallel()
-
- if tlsConnConfig == nil {
- t.Skip("Skipping due to undefined tlsConnConfig")
- }
-
- conn, err := pgx.Connect(*tlsConnConfig)
- if err != nil {
- t.Fatal("Unable to establish connection: " + err.Error())
- }
-
- err = conn.Close()
- if err != nil {
- t.Fatal("Unable to close connection")
- }
-}
-
-func TestConnectWithInvalidUser(t *testing.T) {
- t.Parallel()
-
- if invalidUserConnConfig == nil {
- t.Skip("Skipping due to undefined invalidUserConnConfig")
- }
-
- _, err := pgx.Connect(*invalidUserConnConfig)
- pgErr, ok := err.(pgx.PgError)
- if !ok {
- t.Fatalf("Expected to receive a PgError with code 28000, instead received: %v", err)
- }
- if pgErr.Code != "28000" && pgErr.Code != "28P01" {
- t.Fatalf("Expected to receive a PgError with code 28000 or 28P01, instead received: %v", pgErr)
- }
-}
-
-func TestConnectWithPlainTextPassword(t *testing.T) {
- t.Parallel()
-
- if plainPasswordConnConfig == nil {
- t.Skip("Skipping due to undefined plainPasswordConnConfig")
- }
-
- conn, err := pgx.Connect(*plainPasswordConnConfig)
- if err != nil {
- t.Fatal("Unable to establish connection: " + err.Error())
- }
-
- err = conn.Close()
- if err != nil {
- t.Fatal("Unable to close connection")
- }
-}
-
-func TestConnectWithMD5Password(t *testing.T) {
- t.Parallel()
-
- if md5ConnConfig == nil {
- t.Skip("Skipping due to undefined md5ConnConfig")
- }
-
- conn, err := pgx.Connect(*md5ConnConfig)
- if err != nil {
- t.Fatal("Unable to establish connection: " + err.Error())
- }
-
- err = conn.Close()
- if err != nil {
- t.Fatal("Unable to close connection")
- }
-}
-
-func TestConnectWithTLSFallback(t *testing.T) {
- t.Parallel()
-
- if tlsConnConfig == nil {
- t.Skip("Skipping due to undefined tlsConnConfig")
- }
-
- connConfig := *tlsConnConfig
- connConfig.TLSConfig = &tls.Config{ServerName: "bogus.local"} // bogus ServerName should ensure certificate validation failure
-
- conn, err := pgx.Connect(connConfig)
- if err == nil {
- t.Fatal("Expected failed connection, but succeeded")
- }
-
- connConfig.UseFallbackTLS = true
- connConfig.FallbackTLSConfig = &tls.Config{InsecureSkipVerify: true}
-
- conn, err = pgx.Connect(connConfig)
- if err != nil {
- t.Fatal("Unable to establish connection: " + err.Error())
- }
-
- err = conn.Close()
- if err != nil {
- t.Fatal("Unable to close connection")
- }
-}
-
-func TestConnectWithConnectionRefused(t *testing.T) {
- t.Parallel()
-
- // Presumably nothing is listening on 127.0.0.1:1
- bad := *defaultConnConfig
- bad.Host = "127.0.0.1"
- bad.Port = 1
-
- _, err := pgx.Connect(bad)
- if err == nil {
- t.Fatal("Expected error establishing connection to bad port")
- }
-}
-
-func TestConnectCustomDialer(t *testing.T) {
- t.Parallel()
-
- if customDialerConnConfig == nil {
- t.Skip("Skipping due to undefined customDialerConnConfig")
- }
-
- dialled := false
- conf := *customDialerConnConfig
- conf.Dial = func(network, address string) (net.Conn, error) {
- dialled = true
- return net.Dial(network, address)
- }
-
- conn, err := pgx.Connect(conf)
- if err != nil {
- t.Fatalf("Unable to establish connection: %s", err)
- }
- if !dialled {
- t.Fatal("Connect did not use custom dialer")
- }
-
- err = conn.Close()
- if err != nil {
- t.Fatal("Unable to close connection")
- }
-}
-
-func TestConnectWithRuntimeParams(t *testing.T) {
- t.Parallel()
-
- connConfig := *defaultConnConfig
- connConfig.RuntimeParams = map[string]string{
- "application_name": "pgxtest",
- "search_path": "myschema",
- }
-
- conn, err := pgx.Connect(connConfig)
- if err != nil {
- t.Fatalf("Unable to establish connection: %v", err)
- }
- defer conn.Close()
-
- var s string
- err = conn.QueryRow("show application_name").Scan(&s)
- if err != nil {
- t.Fatalf("QueryRow Scan unexpectedly failed: %v", err)
- }
- if s != "pgxtest" {
- t.Errorf("Expected application_name to be %s, but it was %s", "pgxtest", s)
- }
-
- err = conn.QueryRow("show search_path").Scan(&s)
- if err != nil {
- t.Fatalf("QueryRow Scan unexpectedly failed: %v", err)
- }
- if s != "myschema" {
- t.Errorf("Expected search_path to be %s, but it was %s", "myschema", s)
- }
-}
-
-func TestParseURI(t *testing.T) {
- t.Parallel()
-
- tests := []struct {
- url string
- connParams pgx.ConnConfig
- }{
- {
- url: "postgres://jack:secret@localhost:5432/mydb?sslmode=prefer",
- connParams: pgx.ConnConfig{
- User: "jack",
- Password: "secret",
- Host: "localhost",
- Port: 5432,
- Database: "mydb",
- TLSConfig: &tls.Config{
- InsecureSkipVerify: true,
- },
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{},
- },
- },
- {
- url: "postgres://jack:secret@localhost:5432/mydb?sslmode=disable",
- connParams: pgx.ConnConfig{
- User: "jack",
- Password: "secret",
- Host: "localhost",
- Port: 5432,
- Database: "mydb",
- TLSConfig: nil,
- UseFallbackTLS: false,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{},
- },
- },
- {
- url: "postgres://jack:secret@localhost:5432/mydb",
- connParams: pgx.ConnConfig{
- User: "jack",
- Password: "secret",
- Host: "localhost",
- Port: 5432,
- Database: "mydb",
- TLSConfig: &tls.Config{
- InsecureSkipVerify: true,
- },
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{},
- },
- },
- {
- url: "postgresql://jack:secret@localhost:5432/mydb",
- connParams: pgx.ConnConfig{
- User: "jack",
- Password: "secret",
- Host: "localhost",
- Port: 5432,
- Database: "mydb",
- TLSConfig: &tls.Config{
- InsecureSkipVerify: true,
- },
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{},
- },
- },
- {
- url: "postgres://jack@localhost:5432/mydb",
- connParams: pgx.ConnConfig{
- User: "jack",
- Host: "localhost",
- Port: 5432,
- Database: "mydb",
- TLSConfig: &tls.Config{
- InsecureSkipVerify: true,
- },
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{},
- },
- },
- {
- url: "postgres://jack@localhost/mydb",
- connParams: pgx.ConnConfig{
- User: "jack",
- Host: "localhost",
- Database: "mydb",
- TLSConfig: &tls.Config{
- InsecureSkipVerify: true,
- },
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{},
- },
- },
- {
- url: "postgres://jack@localhost/mydb?application_name=pgxtest&search_path=myschema",
- connParams: pgx.ConnConfig{
- User: "jack",
- Host: "localhost",
- Database: "mydb",
- TLSConfig: &tls.Config{
- InsecureSkipVerify: true,
- },
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{
- "application_name": "pgxtest",
- "search_path": "myschema",
- },
- },
- },
- }
-
- for i, tt := range tests {
- connParams, err := pgx.ParseURI(tt.url)
- if err != nil {
- t.Errorf("%d. Unexpected error from pgx.ParseURL(%q) => %v", i, tt.url, err)
- continue
- }
-
- if !reflect.DeepEqual(connParams, tt.connParams) {
- t.Errorf("%d. expected %#v got %#v", i, tt.connParams, connParams)
- }
- }
-}
-
-func TestParseDSN(t *testing.T) {
- t.Parallel()
-
- tests := []struct {
- url string
- connParams pgx.ConnConfig
- }{
- {
- url: "user=jack password=secret host=localhost port=5432 dbname=mydb sslmode=disable",
- connParams: pgx.ConnConfig{
- User: "jack",
- Password: "secret",
- Host: "localhost",
- Port: 5432,
- Database: "mydb",
- RuntimeParams: map[string]string{},
- },
- },
- {
- url: "user=jack password=secret host=localhost port=5432 dbname=mydb sslmode=prefer",
- connParams: pgx.ConnConfig{
- User: "jack",
- Password: "secret",
- Host: "localhost",
- Port: 5432,
- Database: "mydb",
- TLSConfig: &tls.Config{
- InsecureSkipVerify: true,
- },
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{},
- },
- },
- {
- url: "user=jack password=secret host=localhost port=5432 dbname=mydb",
- connParams: pgx.ConnConfig{
- User: "jack",
- Password: "secret",
- Host: "localhost",
- Port: 5432,
- Database: "mydb",
- TLSConfig: &tls.Config{
- InsecureSkipVerify: true,
- },
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{},
- },
- },
- {
- url: "user=jack host=localhost port=5432 dbname=mydb",
- connParams: pgx.ConnConfig{
- User: "jack",
- Host: "localhost",
- Port: 5432,
- Database: "mydb",
- TLSConfig: &tls.Config{
- InsecureSkipVerify: true,
- },
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{},
- },
- },
- {
- url: "user=jack host=localhost dbname=mydb",
- connParams: pgx.ConnConfig{
- User: "jack",
- Host: "localhost",
- Database: "mydb",
- TLSConfig: &tls.Config{
- InsecureSkipVerify: true,
- },
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{},
- },
- },
- {
- url: "user=jack host=localhost dbname=mydb application_name=pgxtest search_path=myschema",
- connParams: pgx.ConnConfig{
- User: "jack",
- Host: "localhost",
- Database: "mydb",
- TLSConfig: &tls.Config{
- InsecureSkipVerify: true,
- },
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{
- "application_name": "pgxtest",
- "search_path": "myschema",
- },
- },
- },
- }
-
- for i, tt := range tests {
- connParams, err := pgx.ParseDSN(tt.url)
- if err != nil {
- t.Errorf("%d. Unexpected error from pgx.ParseDSN(%q) => %v", i, tt.url, err)
- continue
- }
-
- if !reflect.DeepEqual(connParams, tt.connParams) {
- t.Errorf("%d. expected %#v got %#v", i, tt.connParams, connParams)
- }
- }
-}
-
-func TestParseConnectionString(t *testing.T) {
- t.Parallel()
-
- tests := []struct {
- url string
- connParams pgx.ConnConfig
- }{
- {
- url: "postgres://jack:secret@localhost:5432/mydb?sslmode=prefer",
- connParams: pgx.ConnConfig{
- User: "jack",
- Password: "secret",
- Host: "localhost",
- Port: 5432,
- Database: "mydb",
- TLSConfig: &tls.Config{
- InsecureSkipVerify: true,
- },
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{},
- },
- },
- {
- url: "postgres://jack:secret@localhost:5432/mydb?sslmode=disable",
- connParams: pgx.ConnConfig{
- User: "jack",
- Password: "secret",
- Host: "localhost",
- Port: 5432,
- Database: "mydb",
- TLSConfig: nil,
- UseFallbackTLS: false,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{},
- },
- },
- {
- url: "postgres://jack:secret@localhost:5432/mydb",
- connParams: pgx.ConnConfig{
- User: "jack",
- Password: "secret",
- Host: "localhost",
- Port: 5432,
- Database: "mydb",
- TLSConfig: &tls.Config{
- InsecureSkipVerify: true,
- },
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{},
- },
- },
- {
- url: "postgresql://jack:secret@localhost:5432/mydb",
- connParams: pgx.ConnConfig{
- User: "jack",
- Password: "secret",
- Host: "localhost",
- Port: 5432,
- Database: "mydb",
- TLSConfig: &tls.Config{
- InsecureSkipVerify: true,
- },
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{},
- },
- },
- {
- url: "postgres://jack@localhost:5432/mydb",
- connParams: pgx.ConnConfig{
- User: "jack",
- Host: "localhost",
- Port: 5432,
- Database: "mydb",
- TLSConfig: &tls.Config{
- InsecureSkipVerify: true,
- },
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{},
- },
- },
- {
- url: "postgres://jack@localhost/mydb",
- connParams: pgx.ConnConfig{
- User: "jack",
- Host: "localhost",
- Database: "mydb",
- TLSConfig: &tls.Config{
- InsecureSkipVerify: true,
- },
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{},
- },
- },
- {
- url: "postgres://jack@localhost/mydb?application_name=pgxtest&search_path=myschema",
- connParams: pgx.ConnConfig{
- User: "jack",
- Host: "localhost",
- Database: "mydb",
- TLSConfig: &tls.Config{
- InsecureSkipVerify: true,
- },
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{
- "application_name": "pgxtest",
- "search_path": "myschema",
- },
- },
- },
- {
- url: "user=jack password=secret host=localhost port=5432 dbname=mydb sslmode=disable",
- connParams: pgx.ConnConfig{
- User: "jack",
- Password: "secret",
- Host: "localhost",
- Port: 5432,
- Database: "mydb",
- RuntimeParams: map[string]string{},
- },
- },
- {
- url: "user=jack password=secret host=localhost port=5432 dbname=mydb sslmode=prefer",
- connParams: pgx.ConnConfig{
- User: "jack",
- Password: "secret",
- Host: "localhost",
- Port: 5432,
- Database: "mydb",
- TLSConfig: &tls.Config{
- InsecureSkipVerify: true,
- },
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{},
- },
- },
- {
- url: "user=jack password=secret host=localhost port=5432 dbname=mydb",
- connParams: pgx.ConnConfig{
- User: "jack",
- Password: "secret",
- Host: "localhost",
- Port: 5432,
- Database: "mydb",
- TLSConfig: &tls.Config{
- InsecureSkipVerify: true,
- },
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{},
- },
- },
- {
- url: "user=jack host=localhost port=5432 dbname=mydb",
- connParams: pgx.ConnConfig{
- User: "jack",
- Host: "localhost",
- Port: 5432,
- Database: "mydb",
- TLSConfig: &tls.Config{
- InsecureSkipVerify: true,
- },
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{},
- },
- },
- {
- url: "user=jack host=localhost dbname=mydb",
- connParams: pgx.ConnConfig{
- User: "jack",
- Host: "localhost",
- Database: "mydb",
- TLSConfig: &tls.Config{
- InsecureSkipVerify: true,
- },
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{},
- },
- },
- {
- url: "user=jack host=localhost dbname=mydb application_name=pgxtest search_path=myschema",
- connParams: pgx.ConnConfig{
- User: "jack",
- Host: "localhost",
- Database: "mydb",
- TLSConfig: &tls.Config{
- InsecureSkipVerify: true,
- },
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{
- "application_name": "pgxtest",
- "search_path": "myschema",
- },
- },
- },
- }
-
- for i, tt := range tests {
- connParams, err := pgx.ParseConnectionString(tt.url)
- if err != nil {
- t.Errorf("%d. Unexpected error from pgx.ParseDSN(%q) => %v", i, tt.url, err)
- continue
- }
-
- if !reflect.DeepEqual(connParams, tt.connParams) {
- t.Errorf("%d. expected %#v got %#v", i, tt.connParams, connParams)
- }
- }
-}
-
-func TestParseEnvLibpq(t *testing.T) {
- pgEnvvars := []string{"PGHOST", "PGPORT", "PGDATABASE", "PGUSER", "PGPASSWORD", "PGAPPNAME"}
-
- savedEnv := make(map[string]string)
- for _, n := range pgEnvvars {
- savedEnv[n] = os.Getenv(n)
- }
- defer func() {
- for k, v := range savedEnv {
- err := os.Setenv(k, v)
- if err != nil {
- t.Fatalf("Unable to restore environment: %v", err)
- }
- }
- }()
-
- tests := []struct {
- name string
- envvars map[string]string
- config pgx.ConnConfig
- }{
- {
- name: "No environment",
- envvars: map[string]string{},
- config: pgx.ConnConfig{
- TLSConfig: &tls.Config{InsecureSkipVerify: true},
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{},
- },
- },
- {
- name: "Normal PG vars",
- envvars: map[string]string{
- "PGHOST": "123.123.123.123",
- "PGPORT": "7777",
- "PGDATABASE": "foo",
- "PGUSER": "bar",
- "PGPASSWORD": "baz",
- },
- config: pgx.ConnConfig{
- Host: "123.123.123.123",
- Port: 7777,
- Database: "foo",
- User: "bar",
- Password: "baz",
- TLSConfig: &tls.Config{InsecureSkipVerify: true},
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{},
- },
- },
- {
- name: "application_name",
- envvars: map[string]string{
- "PGAPPNAME": "pgxtest",
- },
- config: pgx.ConnConfig{
- TLSConfig: &tls.Config{InsecureSkipVerify: true},
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{"application_name": "pgxtest"},
- },
- },
- {
- name: "sslmode=disable",
- envvars: map[string]string{
- "PGSSLMODE": "disable",
- },
- config: pgx.ConnConfig{
- TLSConfig: nil,
- UseFallbackTLS: false,
- RuntimeParams: map[string]string{},
- },
- },
- {
- name: "sslmode=allow",
- envvars: map[string]string{
- "PGSSLMODE": "allow",
- },
- config: pgx.ConnConfig{
- TLSConfig: nil,
- UseFallbackTLS: true,
- FallbackTLSConfig: &tls.Config{InsecureSkipVerify: true},
- RuntimeParams: map[string]string{},
- },
- },
- {
- name: "sslmode=prefer",
- envvars: map[string]string{
- "PGSSLMODE": "prefer",
- },
- config: pgx.ConnConfig{
- TLSConfig: &tls.Config{InsecureSkipVerify: true},
- UseFallbackTLS: true,
- FallbackTLSConfig: nil,
- RuntimeParams: map[string]string{},
- },
- },
- {
- name: "sslmode=require",
- envvars: map[string]string{
- "PGSSLMODE": "require",
- },
- config: pgx.ConnConfig{
- TLSConfig: &tls.Config{},
- UseFallbackTLS: false,
- RuntimeParams: map[string]string{},
- },
- },
- {
- name: "sslmode=verify-ca",
- envvars: map[string]string{
- "PGSSLMODE": "verify-ca",
- },
- config: pgx.ConnConfig{
- TLSConfig: &tls.Config{},
- UseFallbackTLS: false,
- RuntimeParams: map[string]string{},
- },
- },
- {
- name: "sslmode=verify-full",
- envvars: map[string]string{
- "PGSSLMODE": "verify-full",
- },
- config: pgx.ConnConfig{
- TLSConfig: &tls.Config{},
- UseFallbackTLS: false,
- RuntimeParams: map[string]string{},
- },
- },
- {
- name: "sslmode=verify-full with host",
- envvars: map[string]string{
- "PGHOST": "pgx.example",
- "PGSSLMODE": "verify-full",
- },
- config: pgx.ConnConfig{
- Host: "pgx.example",
- TLSConfig: &tls.Config{
- ServerName: "pgx.example",
- },
- UseFallbackTLS: false,
- RuntimeParams: map[string]string{},
- },
- },
- }
-
- for _, tt := range tests {
- for _, n := range pgEnvvars {
- err := os.Unsetenv(n)
- if err != nil {
- t.Fatalf("%s: Unable to clear environment: %v", tt.name, err)
- }
- }
-
- for k, v := range tt.envvars {
- err := os.Setenv(k, v)
- if err != nil {
- t.Fatalf("%s: Unable to set environment: %v", tt.name, err)
- }
- }
-
- config, err := pgx.ParseEnvLibpq()
- if err != nil {
- t.Errorf("%s: Unexpected error from pgx.ParseLibpq() => %v", tt.name, err)
- continue
- }
-
- if config.Host != tt.config.Host {
- t.Errorf("%s: expected Host to be %v got %v", tt.name, tt.config.Host, config.Host)
- }
- if config.Port != tt.config.Port {
- t.Errorf("%s: expected Port to be %v got %v", tt.name, tt.config.Port, config.Port)
- }
- if config.Port != tt.config.Port {
- t.Errorf("%s: expected Port to be %v got %v", tt.name, tt.config.Port, config.Port)
- }
- if config.User != tt.config.User {
- t.Errorf("%s: expected User to be %v got %v", tt.name, tt.config.User, config.User)
- }
- if config.Password != tt.config.Password {
- t.Errorf("%s: expected Password to be %v got %v", tt.name, tt.config.Password, config.Password)
- }
-
- if !reflect.DeepEqual(config.RuntimeParams, tt.config.RuntimeParams) {
- t.Errorf("%s: expected RuntimeParams to be %#v got %#v", tt.name, tt.config.RuntimeParams, config.RuntimeParams)
- }
-
- tlsTests := []struct {
- name string
- expected *tls.Config
- actual *tls.Config
- }{
- {
- name: "TLSConfig",
- expected: tt.config.TLSConfig,
- actual: config.TLSConfig,
- },
- {
- name: "FallbackTLSConfig",
- expected: tt.config.FallbackTLSConfig,
- actual: config.FallbackTLSConfig,
- },
- }
- for _, tlsTest := range tlsTests {
- name := tlsTest.name
- expected := tlsTest.expected
- actual := tlsTest.actual
-
- if expected == nil && actual != nil {
- t.Errorf("%s / %s: expected nil, but it was set", tt.name, name)
- } else if expected != nil && actual == nil {
- t.Errorf("%s / %s: expected to be set, but got nil", tt.name, name)
- } else if expected != nil && actual != nil {
- if actual.InsecureSkipVerify != expected.InsecureSkipVerify {
- t.Errorf("%s / %s: expected InsecureSkipVerify to be %v got %v", tt.name, name, expected.InsecureSkipVerify, actual.InsecureSkipVerify)
- }
-
- if actual.ServerName != expected.ServerName {
- t.Errorf("%s / %s: expected ServerName to be %v got %v", tt.name, name, expected.ServerName, actual.ServerName)
- }
- }
- }
-
- if config.UseFallbackTLS != tt.config.UseFallbackTLS {
- t.Errorf("%s: expected UseFallbackTLS to be %v got %v", tt.name, tt.config.UseFallbackTLS, config.UseFallbackTLS)
- }
- }
-}
-
-func TestExec(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- if results := mustExec(t, conn, "create temporary table foo(id integer primary key);"); results != "CREATE TABLE" {
- t.Error("Unexpected results from Exec")
- }
-
- // Accept parameters
- if results := mustExec(t, conn, "insert into foo(id) values($1)", 1); results != "INSERT 0 1" {
- t.Errorf("Unexpected results from Exec: %v", results)
- }
-
- if results := mustExec(t, conn, "drop table foo;"); results != "DROP TABLE" {
- t.Error("Unexpected results from Exec")
- }
-
- // Multiple statements can be executed -- last command tag is returned
- if results := mustExec(t, conn, "create temporary table foo(id serial primary key); drop table foo;"); results != "DROP TABLE" {
- t.Error("Unexpected results from Exec")
- }
-
- // Can execute longer SQL strings than sharedBufferSize
- if results := mustExec(t, conn, strings.Repeat("select 42; ", 1000)); results != "SELECT 1" {
- t.Errorf("Unexpected results from Exec: %v", results)
- }
-
- // Exec no-op which does not return a command tag
- if results := mustExec(t, conn, "--;"); results != "" {
- t.Errorf("Unexpected results from Exec: %v", results)
- }
-}
-
-func TestExecFailure(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- if _, err := conn.Exec("selct;"); err == nil {
- t.Fatal("Expected SQL syntax error")
- }
-
- rows, _ := conn.Query("select 1")
- rows.Close()
- if rows.Err() != nil {
- t.Fatalf("Exec failure appears to have broken connection: %v", rows.Err())
- }
-}
-
-func TestPrepare(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- _, err := conn.Prepare("test", "select $1::varchar")
- if err != nil {
- t.Errorf("Unable to prepare statement: %v", err)
- return
- }
-
- var s string
- err = conn.QueryRow("test", "hello").Scan(&s)
- if err != nil {
- t.Errorf("Executing prepared statement failed: %v", err)
- }
-
- if s != "hello" {
- t.Errorf("Prepared statement did not return expected value: %v", s)
- }
-
- err = conn.Deallocate("test")
- if err != nil {
- t.Errorf("conn.Deallocate failed: %v", err)
- }
-
- // Create another prepared statement to ensure Deallocate left the connection
- // in a working state and that we can reuse the prepared statement name.
-
- _, err = conn.Prepare("test", "select $1::integer")
- if err != nil {
- t.Errorf("Unable to prepare statement: %v", err)
- return
- }
-
- var n int32
- err = conn.QueryRow("test", int32(1)).Scan(&n)
- if err != nil {
- t.Errorf("Executing prepared statement failed: %v", err)
- }
-
- if n != 1 {
- t.Errorf("Prepared statement did not return expected value: %v", s)
- }
-
- err = conn.Deallocate("test")
- if err != nil {
- t.Errorf("conn.Deallocate failed: %v", err)
- }
-}
-
-func TestPrepareBadSQLFailure(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- if _, err := conn.Prepare("badSQL", "select foo"); err == nil {
- t.Fatal("Prepare should have failed with syntax error")
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestPrepareQueryManyParameters(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- tests := []struct {
- count int
- succeed bool
- }{
- {
- count: 65534,
- succeed: true,
- },
- {
- count: 65535,
- succeed: true,
- },
- {
- count: 65536,
- succeed: false,
- },
- {
- count: 65537,
- succeed: false,
- },
- }
-
- for i, tt := range tests {
- params := make([]string, 0, tt.count)
- args := make([]interface{}, 0, tt.count)
- for j := 0; j < tt.count; j++ {
- params = append(params, fmt.Sprintf("($%d::text)", j+1))
- args = append(args, strconv.Itoa(j))
- }
-
- sql := "values" + strings.Join(params, ", ")
-
- psName := fmt.Sprintf("manyParams%d", i)
- _, err := conn.Prepare(psName, sql)
- if err != nil {
- if tt.succeed {
- t.Errorf("%d. %v", i, err)
- }
- continue
- }
- if !tt.succeed {
- t.Errorf("%d. Expected error but succeeded", i)
- continue
- }
-
- rows, err := conn.Query(psName, args...)
- if err != nil {
- t.Errorf("conn.Query failed: %v", err)
- continue
- }
-
- for rows.Next() {
- var s string
- rows.Scan(&s)
- }
-
- if rows.Err() != nil {
- t.Errorf("Reading query result failed: %v", err)
- }
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestPrepareIdempotency(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- for i := 0; i < 2; i++ {
- _, err := conn.Prepare("test", "select 42::integer")
- if err != nil {
- t.Fatalf("%d. Unable to prepare statement: %v", i, err)
- }
-
- var n int32
- err = conn.QueryRow("test").Scan(&n)
- if err != nil {
- t.Errorf("%d. Executing prepared statement failed: %v", i, err)
- }
-
- if n != int32(42) {
- t.Errorf("%d. Prepared statement did not return expected value: %v", i, n)
- }
- }
-
- _, err := conn.Prepare("test", "select 'fail'::varchar")
- if err == nil {
- t.Fatalf("Prepare statement with same name but different SQL should have failed but it didn't")
- return
- }
-}
-
-func TestPrepareEx(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- _, err := conn.PrepareEx("test", "select $1", &pgx.PrepareExOptions{ParameterOids: []pgx.Oid{pgx.TextOid}})
- if err != nil {
- t.Errorf("Unable to prepare statement: %v", err)
- return
- }
-
- var s string
- err = conn.QueryRow("test", "hello").Scan(&s)
- if err != nil {
- t.Errorf("Executing prepared statement failed: %v", err)
- }
-
- if s != "hello" {
- t.Errorf("Prepared statement did not return expected value: %v", s)
- }
-
- err = conn.Deallocate("test")
- if err != nil {
- t.Errorf("conn.Deallocate failed: %v", err)
- }
-}
-
-func TestListenNotify(t *testing.T) {
- t.Parallel()
-
- listener := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, listener)
-
- if err := listener.Listen("chat"); err != nil {
- t.Fatalf("Unable to start listening: %v", err)
- }
-
- notifier := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, notifier)
-
- mustExec(t, notifier, "notify chat")
-
- // when notification is waiting on the socket to be read
- notification, err := listener.WaitForNotification(time.Second)
- if err != nil {
- t.Fatalf("Unexpected error on WaitForNotification: %v", err)
- }
- if notification.Channel != "chat" {
- t.Errorf("Did not receive notification on expected channel: %v", notification.Channel)
- }
-
- // when notification has already been read during previous query
- mustExec(t, notifier, "notify chat")
- rows, _ := listener.Query("select 1")
- rows.Close()
- if rows.Err() != nil {
- t.Fatalf("Unexpected error on Query: %v", rows.Err())
- }
- notification, err = listener.WaitForNotification(0)
- if err != nil {
- t.Fatalf("Unexpected error on WaitForNotification: %v", err)
- }
- if notification.Channel != "chat" {
- t.Errorf("Did not receive notification on expected channel: %v", notification.Channel)
- }
-
- // when timeout occurs
- notification, err = listener.WaitForNotification(time.Millisecond)
- if err != pgx.ErrNotificationTimeout {
- t.Errorf("WaitForNotification returned the wrong kind of error: %v", err)
- }
- if notification != nil {
- t.Errorf("WaitForNotification returned an unexpected notification: %v", notification)
- }
-
- // listener can listen again after a timeout
- mustExec(t, notifier, "notify chat")
- notification, err = listener.WaitForNotification(time.Second)
- if err != nil {
- t.Fatalf("Unexpected error on WaitForNotification: %v", err)
- }
- if notification.Channel != "chat" {
- t.Errorf("Did not receive notification on expected channel: %v", notification.Channel)
- }
-}
-
-func TestUnlistenSpecificChannel(t *testing.T) {
- t.Parallel()
-
- listener := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, listener)
-
- if err := listener.Listen("unlisten_test"); err != nil {
- t.Fatalf("Unable to start listening: %v", err)
- }
-
- notifier := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, notifier)
-
- mustExec(t, notifier, "notify unlisten_test")
-
- // when notification is waiting on the socket to be read
- notification, err := listener.WaitForNotification(time.Second)
- if err != nil {
- t.Fatalf("Unexpected error on WaitForNotification: %v", err)
- }
- if notification.Channel != "unlisten_test" {
- t.Errorf("Did not receive notification on expected channel: %v", notification.Channel)
- }
-
- err = listener.Unlisten("unlisten_test")
- if err != nil {
- t.Fatalf("Unexpected error on Unlisten: %v", err)
- }
-
- // when notification has already been read during previous query
- mustExec(t, notifier, "notify unlisten_test")
- rows, _ := listener.Query("select 1")
- rows.Close()
- if rows.Err() != nil {
- t.Fatalf("Unexpected error on Query: %v", rows.Err())
- }
- notification, err = listener.WaitForNotification(100 * time.Millisecond)
- if err != pgx.ErrNotificationTimeout {
- t.Errorf("WaitForNotification returned the wrong kind of error: %v", err)
- }
-}
-
-func TestListenNotifyWhileBusyIsSafe(t *testing.T) {
- t.Parallel()
-
- listenerDone := make(chan bool)
- go func() {
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
- defer func() {
- listenerDone <- true
- }()
-
- if err := conn.Listen("busysafe"); err != nil {
- t.Fatalf("Unable to start listening: %v", err)
- }
-
- for i := 0; i < 5000; i++ {
- var sum int32
- var rowCount int32
-
- rows, err := conn.Query("select generate_series(1,$1)", 100)
- if err != nil {
- t.Fatalf("conn.Query failed: %v", err)
- }
-
- for rows.Next() {
- var n int32
- rows.Scan(&n)
- sum += n
- rowCount++
- }
-
- if rows.Err() != nil {
- t.Fatalf("conn.Query failed: %v", err)
- }
-
- if sum != 5050 {
- t.Fatalf("Wrong rows sum: %v", sum)
- }
-
- if rowCount != 100 {
- t.Fatalf("Wrong number of rows: %v", rowCount)
- }
-
- time.Sleep(1 * time.Microsecond)
- }
- }()
-
- notifierDone := make(chan bool)
- go func() {
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
- defer func() {
- notifierDone <- true
- }()
-
- for i := 0; i < 100000; i++ {
- mustExec(t, conn, "notify busysafe, 'hello'")
- time.Sleep(1 * time.Microsecond)
- }
- }()
-
- <-listenerDone
-}
-
-func TestListenNotifySelfNotification(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- if err := conn.Listen("self"); err != nil {
- t.Fatalf("Unable to start listening: %v", err)
- }
-
- // Notify self and WaitForNotification immediately
- mustExec(t, conn, "notify self")
-
- notification, err := conn.WaitForNotification(time.Second)
- if err != nil {
- t.Fatalf("Unexpected error on WaitForNotification: %v", err)
- }
- if notification.Channel != "self" {
- t.Errorf("Did not receive notification on expected channel: %v", notification.Channel)
- }
-
- // Notify self and do something else before WaitForNotification
- mustExec(t, conn, "notify self")
-
- rows, _ := conn.Query("select 1")
- rows.Close()
- if rows.Err() != nil {
- t.Fatalf("Unexpected error on Query: %v", rows.Err())
- }
-
- notification, err = conn.WaitForNotification(time.Second)
- if err != nil {
- t.Fatalf("Unexpected error on WaitForNotification: %v", err)
- }
- if notification.Channel != "self" {
- t.Errorf("Did not receive notification on expected channel: %v", notification.Channel)
- }
-}
-
-func TestListenUnlistenSpecialCharacters(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- chanName := "special characters !@#{$%^&*()}"
- if err := conn.Listen(chanName); err != nil {
- t.Fatalf("Unable to start listening: %v", err)
- }
-
- if err := conn.Unlisten(chanName); err != nil {
- t.Fatalf("Unable to stop listening: %v", err)
- }
-}
-
-func TestFatalRxError(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- var wg sync.WaitGroup
- wg.Add(1)
- go func() {
- defer wg.Done()
- var n int32
- var s string
- err := conn.QueryRow("select 1::int4, pg_sleep(10)::varchar").Scan(&n, &s)
- if err == pgx.ErrDeadConn {
- } else if pgErr, ok := err.(pgx.PgError); ok && pgErr.Severity == "FATAL" {
- } else {
- t.Fatalf("Expected QueryRow Scan to return fatal PgError or ErrDeadConn, but instead received %v", err)
- }
- }()
-
- otherConn, err := pgx.Connect(*defaultConnConfig)
- if err != nil {
- t.Fatalf("Unable to establish connection: %v", err)
- }
- defer otherConn.Close()
-
- if _, err := otherConn.Exec("select pg_terminate_backend($1)", conn.Pid); err != nil {
- t.Fatalf("Unable to kill backend PostgreSQL process: %v", err)
- }
-
- wg.Wait()
-
- if conn.IsAlive() {
- t.Fatal("Connection should not be live but was")
- }
-}
-
-func TestFatalTxError(t *testing.T) {
- t.Parallel()
-
- // Run timing sensitive test many times
- for i := 0; i < 50; i++ {
- func() {
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- otherConn, err := pgx.Connect(*defaultConnConfig)
- if err != nil {
- t.Fatalf("Unable to establish connection: %v", err)
- }
- defer otherConn.Close()
-
- _, err = otherConn.Exec("select pg_terminate_backend($1)", conn.Pid)
- if err != nil {
- t.Fatalf("Unable to kill backend PostgreSQL process: %v", err)
- }
-
- _, err = conn.Query("select 1")
- if err == nil {
- t.Fatal("Expected error but none occurred")
- }
-
- if conn.IsAlive() {
- t.Fatalf("Connection should not be live but was. Previous Query err: %v", err)
- }
- }()
- }
-}
-
-func TestCommandTag(t *testing.T) {
- t.Parallel()
-
- var tests = []struct {
- commandTag pgx.CommandTag
- rowsAffected int64
- }{
- {commandTag: "INSERT 0 5", rowsAffected: 5},
- {commandTag: "UPDATE 0", rowsAffected: 0},
- {commandTag: "UPDATE 1", rowsAffected: 1},
- {commandTag: "DELETE 0", rowsAffected: 0},
- {commandTag: "DELETE 1", rowsAffected: 1},
- {commandTag: "CREATE TABLE", rowsAffected: 0},
- {commandTag: "ALTER TABLE", rowsAffected: 0},
- {commandTag: "DROP TABLE", rowsAffected: 0},
- }
-
- for i, tt := range tests {
- actual := tt.commandTag.RowsAffected()
- if tt.rowsAffected != actual {
- t.Errorf(`%d. "%s" should have affected %d rows but it was %d`, i, tt.commandTag, tt.rowsAffected, actual)
- }
- }
-}
-
-func TestInsertBoolArray(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- if results := mustExec(t, conn, "create temporary table foo(spice bool[]);"); results != "CREATE TABLE" {
- t.Error("Unexpected results from Exec")
- }
-
- // Accept parameters
- if results := mustExec(t, conn, "insert into foo(spice) values($1)", []bool{true, false, true}); results != "INSERT 0 1" {
- t.Errorf("Unexpected results from Exec: %v", results)
- }
-}
-
-func TestInsertTimestampArray(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- if results := mustExec(t, conn, "create temporary table foo(spice timestamp[]);"); results != "CREATE TABLE" {
- t.Error("Unexpected results from Exec")
- }
-
- // Accept parameters
- if results := mustExec(t, conn, "insert into foo(spice) values($1)", []time.Time{time.Unix(1419143667, 0), time.Unix(1419143672, 0)}); results != "INSERT 0 1" {
- t.Errorf("Unexpected results from Exec: %v", results)
- }
-}
-
-func TestCatchSimultaneousConnectionQueries(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- rows1, err := conn.Query("select generate_series(1,$1)", 10)
- if err != nil {
- t.Fatalf("conn.Query failed: %v", err)
- }
- defer rows1.Close()
-
- _, err = conn.Query("select generate_series(1,$1)", 10)
- if err != pgx.ErrConnBusy {
- t.Fatalf("conn.Query should have failed with pgx.ErrConnBusy, but it was %v", err)
- }
-}
-
-func TestCatchSimultaneousConnectionQueryAndExec(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- rows, err := conn.Query("select generate_series(1,$1)", 10)
- if err != nil {
- t.Fatalf("conn.Query failed: %v", err)
- }
- defer rows.Close()
-
- _, err = conn.Exec("create temporary table foo(spice timestamp[])")
- if err != pgx.ErrConnBusy {
- t.Fatalf("conn.Exec should have failed with pgx.ErrConnBusy, but it was %v", err)
- }
-}
-
-type testLog struct {
- lvl int
- msg string
- ctx []interface{}
-}
-
-type testLogger struct {
- logs []testLog
-}
-
-func (l *testLogger) Debug(msg string, ctx ...interface{}) {
- l.logs = append(l.logs, testLog{lvl: pgx.LogLevelDebug, msg: msg, ctx: ctx})
-}
-func (l *testLogger) Info(msg string, ctx ...interface{}) {
- l.logs = append(l.logs, testLog{lvl: pgx.LogLevelInfo, msg: msg, ctx: ctx})
-}
-func (l *testLogger) Warn(msg string, ctx ...interface{}) {
- l.logs = append(l.logs, testLog{lvl: pgx.LogLevelWarn, msg: msg, ctx: ctx})
-}
-func (l *testLogger) Error(msg string, ctx ...interface{}) {
- l.logs = append(l.logs, testLog{lvl: pgx.LogLevelError, msg: msg, ctx: ctx})
-}
-
-func TestSetLogger(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- l1 := &testLogger{}
- oldLogger := conn.SetLogger(l1)
- if oldLogger != nil {
- t.Fatalf("Expected conn.SetLogger to return %v, but it was %v", nil, oldLogger)
- }
-
- if err := conn.Listen("foo"); err != nil {
- t.Fatal(err)
- }
-
- if len(l1.logs) == 0 {
- t.Fatal("Expected new logger l1 to be called, but it wasn't")
- }
-
- l2 := &testLogger{}
- oldLogger = conn.SetLogger(l2)
- if oldLogger != l1 {
- t.Fatalf("Expected conn.SetLogger to return %v, but it was %v", l1, oldLogger)
- }
-
- if err := conn.Listen("bar"); err != nil {
- t.Fatal(err)
- }
-
- if len(l2.logs) == 0 {
- t.Fatal("Expected new logger l2 to be called, but it wasn't")
- }
-}
-
-func TestSetLogLevel(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- logger := &testLogger{}
- conn.SetLogger(logger)
-
- if _, err := conn.SetLogLevel(0); err != pgx.ErrInvalidLogLevel {
- t.Fatal("SetLogLevel with invalid level did not return error")
- }
-
- if _, err := conn.SetLogLevel(pgx.LogLevelNone); err != nil {
- t.Fatal(err)
- }
-
- if err := conn.Listen("foo"); err != nil {
- t.Fatal(err)
- }
-
- if len(logger.logs) != 0 {
- t.Fatalf("Expected logger not to be called, but it was: %v", logger.logs)
- }
-
- if _, err := conn.SetLogLevel(pgx.LogLevelTrace); err != nil {
- t.Fatal(err)
- }
-
- if err := conn.Listen("bar"); err != nil {
- t.Fatal(err)
- }
-
- if len(logger.logs) == 0 {
- t.Fatal("Expected logger to be called, but it wasn't")
- }
-}
-
-func TestIdentifierSanitize(t *testing.T) {
- t.Parallel()
-
- tests := []struct {
- ident pgx.Identifier
- expected string
- }{
- {
- ident: pgx.Identifier{`foo`},
- expected: `"foo"`,
- },
- {
- ident: pgx.Identifier{`select`},
- expected: `"select"`,
- },
- {
- ident: pgx.Identifier{`foo`, `bar`},
- expected: `"foo"."bar"`,
- },
- {
- ident: pgx.Identifier{`you should " not do this`},
- expected: `"you should "" not do this"`,
- },
- {
- ident: pgx.Identifier{`you should " not do this`, `please don't`},
- expected: `"you should "" not do this"."please don't"`,
- },
- }
-
- for i, tt := range tests {
- qval := tt.ident.Sanitize()
- if qval != tt.expected {
- t.Errorf("%d. Expected Sanitize %v to return %v but it was %v", i, tt.ident, tt.expected, qval)
- }
- }
-}
diff --git a/vendor/github.com/jackc/pgx/copy_from.go b/vendor/github.com/jackc/pgx/copy_from.go
index 1f8a230..8b7c3d5 100644
--- a/vendor/github.com/jackc/pgx/copy_from.go
+++ b/vendor/github.com/jackc/pgx/copy_from.go
@@ -3,6 +3,10 @@ package pgx
import (
"bytes"
"fmt"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/jackc/pgx/pgproto3"
+ "github.com/pkg/errors"
)
// CopyFromRows returns a CopyFromSource interface over the provided rows slice
@@ -54,25 +58,25 @@ type copyFrom struct {
func (ct *copyFrom) readUntilReadyForQuery() {
for {
- t, r, err := ct.conn.rxMsg()
+ msg, err := ct.conn.rxMsg()
if err != nil {
ct.readerErrChan <- err
close(ct.readerErrChan)
return
}
- switch t {
- case readyForQuery:
- ct.conn.rxReadyForQuery(r)
+ switch msg := msg.(type) {
+ case *pgproto3.ReadyForQuery:
+ ct.conn.rxReadyForQuery(msg)
close(ct.readerErrChan)
return
- case commandComplete:
- case errorResponse:
- ct.readerErrChan <- ct.conn.rxErrorResponse(r)
+ case *pgproto3.CommandComplete:
+ case *pgproto3.ErrorResponse:
+ ct.readerErrChan <- ct.conn.rxErrorResponse(msg)
default:
- err = ct.conn.processContextFreeMsg(t, r)
+ err = ct.conn.processContextFreeMsg(msg)
if err != nil {
- ct.readerErrChan <- ct.conn.processContextFreeMsg(t, r)
+ ct.readerErrChan <- ct.conn.processContextFreeMsg(msg)
}
}
}
@@ -87,14 +91,14 @@ func (ct *copyFrom) waitForReaderDone() error {
func (ct *copyFrom) run() (int, error) {
quotedTableName := ct.tableName.Sanitize()
- buf := &bytes.Buffer{}
+ cbuf := &bytes.Buffer{}
for i, cn := range ct.columnNames {
if i != 0 {
- buf.WriteString(", ")
+ cbuf.WriteString(", ")
}
- buf.WriteString(quoteIdentifier(cn))
+ cbuf.WriteString(quoteIdentifier(cn))
}
- quotedColumnNames := buf.String()
+ quotedColumnNames := cbuf.String()
ps, err := ct.conn.Prepare("", fmt.Sprintf("select %s from %s", quotedColumnNames, quotedTableName))
if err != nil {
@@ -114,11 +118,14 @@ func (ct *copyFrom) run() (int, error) {
go ct.readUntilReadyForQuery()
defer ct.waitForReaderDone()
- wbuf := newWriteBuf(ct.conn, copyData)
+ buf := ct.conn.wbuf
+ buf = append(buf, copyData)
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
- wbuf.WriteBytes([]byte("PGCOPY\n\377\r\n\000"))
- wbuf.WriteInt32(0)
- wbuf.WriteInt32(0)
+ buf = append(buf, "PGCOPY\n\377\r\n\000"...)
+ buf = pgio.AppendInt32(buf, 0)
+ buf = pgio.AppendInt32(buf, 0)
var sentCount int
@@ -129,18 +136,16 @@ func (ct *copyFrom) run() (int, error) {
default:
}
- if len(wbuf.buf) > 65536 {
- wbuf.closeMsg()
- _, err = ct.conn.conn.Write(wbuf.buf)
+ if len(buf) > 65536 {
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])))
+ _, err = ct.conn.conn.Write(buf)
if err != nil {
ct.conn.die(err)
return 0, err
}
// Directly manipulate wbuf to reset to reuse the same buffer
- wbuf.buf = wbuf.buf[0:5]
- wbuf.buf[0] = copyData
- wbuf.sizeIdx = 1
+ buf = buf[0:5]
}
sentCount++
@@ -152,12 +157,12 @@ func (ct *copyFrom) run() (int, error) {
}
if len(values) != len(ct.columnNames) {
ct.cancelCopyIn()
- return 0, fmt.Errorf("expected %d values, got %d values", len(ct.columnNames), len(values))
+ return 0, errors.Errorf("expected %d values, got %d values", len(ct.columnNames), len(values))
}
- wbuf.WriteInt16(int16(len(ct.columnNames)))
+ buf = pgio.AppendInt16(buf, int16(len(ct.columnNames)))
for i, val := range values {
- err = Encode(wbuf, ps.FieldDescriptions[i].DataType, val)
+ buf, err = encodePreparedStatementArgument(ct.conn.ConnInfo, buf, ps.FieldDescriptions[i].DataType, val)
if err != nil {
ct.cancelCopyIn()
return 0, err
@@ -171,11 +176,13 @@ func (ct *copyFrom) run() (int, error) {
return 0, ct.rowSrc.Err()
}
- wbuf.WriteInt16(-1) // terminate the copy stream
+ buf = pgio.AppendInt16(buf, -1) // terminate the copy stream
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])))
+
+ buf = append(buf, copyDone)
+ buf = pgio.AppendInt32(buf, 4)
- wbuf.startMsg(copyDone)
- wbuf.closeMsg()
- _, err = ct.conn.conn.Write(wbuf.buf)
+ _, err = ct.conn.conn.Write(buf)
if err != nil {
ct.conn.die(err)
return 0, err
@@ -190,18 +197,16 @@ func (ct *copyFrom) run() (int, error) {
func (c *Conn) readUntilCopyInResponse() error {
for {
- var t byte
- var r *msgReader
- t, r, err := c.rxMsg()
+ msg, err := c.rxMsg()
if err != nil {
return err
}
- switch t {
- case copyInResponse:
+ switch msg := msg.(type) {
+ case *pgproto3.CopyInResponse:
return nil
default:
- err = c.processContextFreeMsg(t, r)
+ err = c.processContextFreeMsg(msg)
if err != nil {
return err
}
@@ -210,10 +215,15 @@ func (c *Conn) readUntilCopyInResponse() error {
}
func (ct *copyFrom) cancelCopyIn() error {
- wbuf := newWriteBuf(ct.conn, copyFail)
- wbuf.WriteCString("client error: abort")
- wbuf.closeMsg()
- _, err := ct.conn.conn.Write(wbuf.buf)
+ buf := ct.conn.wbuf
+ buf = append(buf, copyFail)
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+ buf = append(buf, "client error: abort"...)
+ buf = append(buf, 0)
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])))
+
+ _, err := ct.conn.conn.Write(buf)
if err != nil {
ct.conn.die(err)
return err
diff --git a/vendor/github.com/jackc/pgx/copy_from_test.go b/vendor/github.com/jackc/pgx/copy_from_test.go
deleted file mode 100644
index 54da698..0000000
--- a/vendor/github.com/jackc/pgx/copy_from_test.go
+++ /dev/null
@@ -1,428 +0,0 @@
-package pgx_test
-
-import (
- "fmt"
- "reflect"
- "testing"
- "time"
-
- "github.com/jackc/pgx"
-)
-
-func TestConnCopyFromSmall(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- mustExec(t, conn, `create temporary table foo(
- a int2,
- b int4,
- c int8,
- d varchar,
- e text,
- f date,
- g timestamptz
- )`)
-
- inputRows := [][]interface{}{
- {int16(0), int32(1), int64(2), "abc", "efg", time.Date(2000, 1, 1, 0, 0, 0, 0, time.Local), time.Date(2010, 2, 3, 4, 5, 6, 0, time.Local)},
- {nil, nil, nil, nil, nil, nil, nil},
- }
-
- copyCount, err := conn.CopyFrom(pgx.Identifier{"foo"}, []string{"a", "b", "c", "d", "e", "f", "g"}, pgx.CopyFromRows(inputRows))
- if err != nil {
- t.Errorf("Unexpected error for CopyFrom: %v", err)
- }
- if copyCount != len(inputRows) {
- t.Errorf("Expected CopyFrom to return %d copied rows, but got %d", len(inputRows), copyCount)
- }
-
- rows, err := conn.Query("select * from foo")
- if err != nil {
- t.Errorf("Unexpected error for Query: %v", err)
- }
-
- var outputRows [][]interface{}
- for rows.Next() {
- row, err := rows.Values()
- if err != nil {
- t.Errorf("Unexpected error for rows.Values(): %v", err)
- }
- outputRows = append(outputRows, row)
- }
-
- if rows.Err() != nil {
- t.Errorf("Unexpected error for rows.Err(): %v", rows.Err())
- }
-
- if !reflect.DeepEqual(inputRows, outputRows) {
- t.Errorf("Input rows and output rows do not equal: %v -> %v", inputRows, outputRows)
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestConnCopyFromLarge(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- mustExec(t, conn, `create temporary table foo(
- a int2,
- b int4,
- c int8,
- d varchar,
- e text,
- f date,
- g timestamptz,
- h bytea
- )`)
-
- inputRows := [][]interface{}{}
-
- for i := 0; i < 10000; i++ {
- inputRows = append(inputRows, []interface{}{int16(0), int32(1), int64(2), "abc", "efg", time.Date(2000, 1, 1, 0, 0, 0, 0, time.Local), time.Date(2010, 2, 3, 4, 5, 6, 0, time.Local), []byte{111, 111, 111, 111}})
- }
-
- copyCount, err := conn.CopyFrom(pgx.Identifier{"foo"}, []string{"a", "b", "c", "d", "e", "f", "g", "h"}, pgx.CopyFromRows(inputRows))
- if err != nil {
- t.Errorf("Unexpected error for CopyFrom: %v", err)
- }
- if copyCount != len(inputRows) {
- t.Errorf("Expected CopyFrom to return %d copied rows, but got %d", len(inputRows), copyCount)
- }
-
- rows, err := conn.Query("select * from foo")
- if err != nil {
- t.Errorf("Unexpected error for Query: %v", err)
- }
-
- var outputRows [][]interface{}
- for rows.Next() {
- row, err := rows.Values()
- if err != nil {
- t.Errorf("Unexpected error for rows.Values(): %v", err)
- }
- outputRows = append(outputRows, row)
- }
-
- if rows.Err() != nil {
- t.Errorf("Unexpected error for rows.Err(): %v", rows.Err())
- }
-
- if !reflect.DeepEqual(inputRows, outputRows) {
- t.Errorf("Input rows and output rows do not equal")
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestConnCopyFromJSON(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- for _, oid := range []pgx.Oid{pgx.JsonOid, pgx.JsonbOid} {
- if _, ok := conn.PgTypes[oid]; !ok {
- return // No JSON/JSONB type -- must be running against old PostgreSQL
- }
- }
-
- mustExec(t, conn, `create temporary table foo(
- a json,
- b jsonb
- )`)
-
- inputRows := [][]interface{}{
- {map[string]interface{}{"foo": "bar"}, map[string]interface{}{"bar": "quz"}},
- {nil, nil},
- }
-
- copyCount, err := conn.CopyFrom(pgx.Identifier{"foo"}, []string{"a", "b"}, pgx.CopyFromRows(inputRows))
- if err != nil {
- t.Errorf("Unexpected error for CopyFrom: %v", err)
- }
- if copyCount != len(inputRows) {
- t.Errorf("Expected CopyFrom to return %d copied rows, but got %d", len(inputRows), copyCount)
- }
-
- rows, err := conn.Query("select * from foo")
- if err != nil {
- t.Errorf("Unexpected error for Query: %v", err)
- }
-
- var outputRows [][]interface{}
- for rows.Next() {
- row, err := rows.Values()
- if err != nil {
- t.Errorf("Unexpected error for rows.Values(): %v", err)
- }
- outputRows = append(outputRows, row)
- }
-
- if rows.Err() != nil {
- t.Errorf("Unexpected error for rows.Err(): %v", rows.Err())
- }
-
- if !reflect.DeepEqual(inputRows, outputRows) {
- t.Errorf("Input rows and output rows do not equal: %v -> %v", inputRows, outputRows)
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestConnCopyFromFailServerSideMidway(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- mustExec(t, conn, `create temporary table foo(
- a int4,
- b varchar not null
- )`)
-
- inputRows := [][]interface{}{
- {int32(1), "abc"},
- {int32(2), nil}, // this row should trigger a failure
- {int32(3), "def"},
- }
-
- copyCount, err := conn.CopyFrom(pgx.Identifier{"foo"}, []string{"a", "b"}, pgx.CopyFromRows(inputRows))
- if err == nil {
- t.Errorf("Expected CopyFrom return error, but it did not")
- }
- if _, ok := err.(pgx.PgError); !ok {
- t.Errorf("Expected CopyFrom return pgx.PgError, but instead it returned: %v", err)
- }
- if copyCount != 0 {
- t.Errorf("Expected CopyFrom to return 0 copied rows, but got %d", copyCount)
- }
-
- rows, err := conn.Query("select * from foo")
- if err != nil {
- t.Errorf("Unexpected error for Query: %v", err)
- }
-
- var outputRows [][]interface{}
- for rows.Next() {
- row, err := rows.Values()
- if err != nil {
- t.Errorf("Unexpected error for rows.Values(): %v", err)
- }
- outputRows = append(outputRows, row)
- }
-
- if rows.Err() != nil {
- t.Errorf("Unexpected error for rows.Err(): %v", rows.Err())
- }
-
- if len(outputRows) != 0 {
- t.Errorf("Expected 0 rows, but got %v", outputRows)
- }
-
- ensureConnValid(t, conn)
-}
-
-type failSource struct {
- count int
-}
-
-func (fs *failSource) Next() bool {
- time.Sleep(time.Millisecond * 100)
- fs.count++
- return fs.count < 100
-}
-
-func (fs *failSource) Values() ([]interface{}, error) {
- if fs.count == 3 {
- return []interface{}{nil}, nil
- }
- return []interface{}{make([]byte, 100000)}, nil
-}
-
-func (fs *failSource) Err() error {
- return nil
-}
-
-func TestConnCopyFromFailServerSideMidwayAbortsWithoutWaiting(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- mustExec(t, conn, `create temporary table foo(
- a bytea not null
- )`)
-
- startTime := time.Now()
-
- copyCount, err := conn.CopyFrom(pgx.Identifier{"foo"}, []string{"a"}, &failSource{})
- if err == nil {
- t.Errorf("Expected CopyFrom return error, but it did not")
- }
- if _, ok := err.(pgx.PgError); !ok {
- t.Errorf("Expected CopyFrom return pgx.PgError, but instead it returned: %v", err)
- }
- if copyCount != 0 {
- t.Errorf("Expected CopyFrom to return 0 copied rows, but got %d", copyCount)
- }
-
- endTime := time.Now()
- copyTime := endTime.Sub(startTime)
- if copyTime > time.Second {
- t.Errorf("Failing CopyFrom shouldn't have taken so long: %v", copyTime)
- }
-
- rows, err := conn.Query("select * from foo")
- if err != nil {
- t.Errorf("Unexpected error for Query: %v", err)
- }
-
- var outputRows [][]interface{}
- for rows.Next() {
- row, err := rows.Values()
- if err != nil {
- t.Errorf("Unexpected error for rows.Values(): %v", err)
- }
- outputRows = append(outputRows, row)
- }
-
- if rows.Err() != nil {
- t.Errorf("Unexpected error for rows.Err(): %v", rows.Err())
- }
-
- if len(outputRows) != 0 {
- t.Errorf("Expected 0 rows, but got %v", outputRows)
- }
-
- ensureConnValid(t, conn)
-}
-
-type clientFailSource struct {
- count int
- err error
-}
-
-func (cfs *clientFailSource) Next() bool {
- cfs.count++
- return cfs.count < 100
-}
-
-func (cfs *clientFailSource) Values() ([]interface{}, error) {
- if cfs.count == 3 {
- cfs.err = fmt.Errorf("client error")
- return nil, cfs.err
- }
- return []interface{}{make([]byte, 100000)}, nil
-}
-
-func (cfs *clientFailSource) Err() error {
- return cfs.err
-}
-
-func TestConnCopyFromCopyFromSourceErrorMidway(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- mustExec(t, conn, `create temporary table foo(
- a bytea not null
- )`)
-
- copyCount, err := conn.CopyFrom(pgx.Identifier{"foo"}, []string{"a"}, &clientFailSource{})
- if err == nil {
- t.Errorf("Expected CopyFrom return error, but it did not")
- }
- if copyCount != 0 {
- t.Errorf("Expected CopyFrom to return 0 copied rows, but got %d", copyCount)
- }
-
- rows, err := conn.Query("select * from foo")
- if err != nil {
- t.Errorf("Unexpected error for Query: %v", err)
- }
-
- var outputRows [][]interface{}
- for rows.Next() {
- row, err := rows.Values()
- if err != nil {
- t.Errorf("Unexpected error for rows.Values(): %v", err)
- }
- outputRows = append(outputRows, row)
- }
-
- if rows.Err() != nil {
- t.Errorf("Unexpected error for rows.Err(): %v", rows.Err())
- }
-
- if len(outputRows) != 0 {
- t.Errorf("Expected 0 rows, but got %v", outputRows)
- }
-
- ensureConnValid(t, conn)
-}
-
-type clientFinalErrSource struct {
- count int
-}
-
-func (cfs *clientFinalErrSource) Next() bool {
- cfs.count++
- return cfs.count < 5
-}
-
-func (cfs *clientFinalErrSource) Values() ([]interface{}, error) {
- return []interface{}{make([]byte, 100000)}, nil
-}
-
-func (cfs *clientFinalErrSource) Err() error {
- return fmt.Errorf("final error")
-}
-
-func TestConnCopyFromCopyFromSourceErrorEnd(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- mustExec(t, conn, `create temporary table foo(
- a bytea not null
- )`)
-
- copyCount, err := conn.CopyFrom(pgx.Identifier{"foo"}, []string{"a"}, &clientFinalErrSource{})
- if err == nil {
- t.Errorf("Expected CopyFrom return error, but it did not")
- }
- if copyCount != 0 {
- t.Errorf("Expected CopyFrom to return 0 copied rows, but got %d", copyCount)
- }
-
- rows, err := conn.Query("select * from foo")
- if err != nil {
- t.Errorf("Unexpected error for Query: %v", err)
- }
-
- var outputRows [][]interface{}
- for rows.Next() {
- row, err := rows.Values()
- if err != nil {
- t.Errorf("Unexpected error for rows.Values(): %v", err)
- }
- outputRows = append(outputRows, row)
- }
-
- if rows.Err() != nil {
- t.Errorf("Unexpected error for rows.Err(): %v", rows.Err())
- }
-
- if len(outputRows) != 0 {
- t.Errorf("Expected 0 rows, but got %v", outputRows)
- }
-
- ensureConnValid(t, conn)
-}
diff --git a/vendor/github.com/jackc/pgx/copy_to.go b/vendor/github.com/jackc/pgx/copy_to.go
deleted file mode 100644
index 229e9a4..0000000
--- a/vendor/github.com/jackc/pgx/copy_to.go
+++ /dev/null
@@ -1,222 +0,0 @@
-package pgx
-
-import (
- "bytes"
- "fmt"
-)
-
-// Deprecated. Use CopyFromRows instead. CopyToRows returns a CopyToSource
-// interface over the provided rows slice making it usable by *Conn.CopyTo.
-func CopyToRows(rows [][]interface{}) CopyToSource {
- return &copyToRows{rows: rows, idx: -1}
-}
-
-type copyToRows struct {
- rows [][]interface{}
- idx int
-}
-
-func (ctr *copyToRows) Next() bool {
- ctr.idx++
- return ctr.idx < len(ctr.rows)
-}
-
-func (ctr *copyToRows) Values() ([]interface{}, error) {
- return ctr.rows[ctr.idx], nil
-}
-
-func (ctr *copyToRows) Err() error {
- return nil
-}
-
-// Deprecated. Use CopyFromSource instead. CopyToSource is the interface used by
-// *Conn.CopyTo as the source for copy data.
-type CopyToSource interface {
- // Next returns true if there is another row and makes the next row data
- // available to Values(). When there are no more rows available or an error
- // has occurred it returns false.
- Next() bool
-
- // Values returns the values for the current row.
- Values() ([]interface{}, error)
-
- // Err returns any error that has been encountered by the CopyToSource. If
- // this is not nil *Conn.CopyTo will abort the copy.
- Err() error
-}
-
-type copyTo struct {
- conn *Conn
- tableName string
- columnNames []string
- rowSrc CopyToSource
- readerErrChan chan error
-}
-
-func (ct *copyTo) readUntilReadyForQuery() {
- for {
- t, r, err := ct.conn.rxMsg()
- if err != nil {
- ct.readerErrChan <- err
- close(ct.readerErrChan)
- return
- }
-
- switch t {
- case readyForQuery:
- ct.conn.rxReadyForQuery(r)
- close(ct.readerErrChan)
- return
- case commandComplete:
- case errorResponse:
- ct.readerErrChan <- ct.conn.rxErrorResponse(r)
- default:
- err = ct.conn.processContextFreeMsg(t, r)
- if err != nil {
- ct.readerErrChan <- ct.conn.processContextFreeMsg(t, r)
- }
- }
- }
-}
-
-func (ct *copyTo) waitForReaderDone() error {
- var err error
- for err = range ct.readerErrChan {
- }
- return err
-}
-
-func (ct *copyTo) run() (int, error) {
- quotedTableName := quoteIdentifier(ct.tableName)
- buf := &bytes.Buffer{}
- for i, cn := range ct.columnNames {
- if i != 0 {
- buf.WriteString(", ")
- }
- buf.WriteString(quoteIdentifier(cn))
- }
- quotedColumnNames := buf.String()
-
- ps, err := ct.conn.Prepare("", fmt.Sprintf("select %s from %s", quotedColumnNames, quotedTableName))
- if err != nil {
- return 0, err
- }
-
- err = ct.conn.sendSimpleQuery(fmt.Sprintf("copy %s ( %s ) from stdin binary;", quotedTableName, quotedColumnNames))
- if err != nil {
- return 0, err
- }
-
- err = ct.conn.readUntilCopyInResponse()
- if err != nil {
- return 0, err
- }
-
- go ct.readUntilReadyForQuery()
- defer ct.waitForReaderDone()
-
- wbuf := newWriteBuf(ct.conn, copyData)
-
- wbuf.WriteBytes([]byte("PGCOPY\n\377\r\n\000"))
- wbuf.WriteInt32(0)
- wbuf.WriteInt32(0)
-
- var sentCount int
-
- for ct.rowSrc.Next() {
- select {
- case err = <-ct.readerErrChan:
- return 0, err
- default:
- }
-
- if len(wbuf.buf) > 65536 {
- wbuf.closeMsg()
- _, err = ct.conn.conn.Write(wbuf.buf)
- if err != nil {
- ct.conn.die(err)
- return 0, err
- }
-
- // Directly manipulate wbuf to reset to reuse the same buffer
- wbuf.buf = wbuf.buf[0:5]
- wbuf.buf[0] = copyData
- wbuf.sizeIdx = 1
- }
-
- sentCount++
-
- values, err := ct.rowSrc.Values()
- if err != nil {
- ct.cancelCopyIn()
- return 0, err
- }
- if len(values) != len(ct.columnNames) {
- ct.cancelCopyIn()
- return 0, fmt.Errorf("expected %d values, got %d values", len(ct.columnNames), len(values))
- }
-
- wbuf.WriteInt16(int16(len(ct.columnNames)))
- for i, val := range values {
- err = Encode(wbuf, ps.FieldDescriptions[i].DataType, val)
- if err != nil {
- ct.cancelCopyIn()
- return 0, err
- }
-
- }
- }
-
- if ct.rowSrc.Err() != nil {
- ct.cancelCopyIn()
- return 0, ct.rowSrc.Err()
- }
-
- wbuf.WriteInt16(-1) // terminate the copy stream
-
- wbuf.startMsg(copyDone)
- wbuf.closeMsg()
- _, err = ct.conn.conn.Write(wbuf.buf)
- if err != nil {
- ct.conn.die(err)
- return 0, err
- }
-
- err = ct.waitForReaderDone()
- if err != nil {
- return 0, err
- }
- return sentCount, nil
-}
-
-func (ct *copyTo) cancelCopyIn() error {
- wbuf := newWriteBuf(ct.conn, copyFail)
- wbuf.WriteCString("client error: abort")
- wbuf.closeMsg()
- _, err := ct.conn.conn.Write(wbuf.buf)
- if err != nil {
- ct.conn.die(err)
- return err
- }
-
- return nil
-}
-
-// Deprecated. Use CopyFrom instead. CopyTo uses the PostgreSQL copy protocol to
-// perform bulk data insertion. It returns the number of rows copied and an
-// error.
-//
-// CopyTo requires all values use the binary format. Almost all types
-// implemented by pgx use the binary format by default. Types implementing
-// Encoder can only be used if they encode to the binary format.
-func (c *Conn) CopyTo(tableName string, columnNames []string, rowSrc CopyToSource) (int, error) {
- ct := &copyTo{
- conn: c,
- tableName: tableName,
- columnNames: columnNames,
- rowSrc: rowSrc,
- readerErrChan: make(chan error),
- }
-
- return ct.run()
-}
diff --git a/vendor/github.com/jackc/pgx/copy_to_test.go b/vendor/github.com/jackc/pgx/copy_to_test.go
deleted file mode 100644
index ac27042..0000000
--- a/vendor/github.com/jackc/pgx/copy_to_test.go
+++ /dev/null
@@ -1,367 +0,0 @@
-package pgx_test
-
-import (
- "reflect"
- "testing"
- "time"
-
- "github.com/jackc/pgx"
-)
-
-func TestConnCopyToSmall(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- mustExec(t, conn, `create temporary table foo(
- a int2,
- b int4,
- c int8,
- d varchar,
- e text,
- f date,
- g timestamptz
- )`)
-
- inputRows := [][]interface{}{
- {int16(0), int32(1), int64(2), "abc", "efg", time.Date(2000, 1, 1, 0, 0, 0, 0, time.Local), time.Date(2010, 2, 3, 4, 5, 6, 0, time.Local)},
- {nil, nil, nil, nil, nil, nil, nil},
- }
-
- copyCount, err := conn.CopyTo("foo", []string{"a", "b", "c", "d", "e", "f", "g"}, pgx.CopyToRows(inputRows))
- if err != nil {
- t.Errorf("Unexpected error for CopyTo: %v", err)
- }
- if copyCount != len(inputRows) {
- t.Errorf("Expected CopyTo to return %d copied rows, but got %d", len(inputRows), copyCount)
- }
-
- rows, err := conn.Query("select * from foo")
- if err != nil {
- t.Errorf("Unexpected error for Query: %v", err)
- }
-
- var outputRows [][]interface{}
- for rows.Next() {
- row, err := rows.Values()
- if err != nil {
- t.Errorf("Unexpected error for rows.Values(): %v", err)
- }
- outputRows = append(outputRows, row)
- }
-
- if rows.Err() != nil {
- t.Errorf("Unexpected error for rows.Err(): %v", rows.Err())
- }
-
- if !reflect.DeepEqual(inputRows, outputRows) {
- t.Errorf("Input rows and output rows do not equal: %v -> %v", inputRows, outputRows)
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestConnCopyToLarge(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- mustExec(t, conn, `create temporary table foo(
- a int2,
- b int4,
- c int8,
- d varchar,
- e text,
- f date,
- g timestamptz,
- h bytea
- )`)
-
- inputRows := [][]interface{}{}
-
- for i := 0; i < 10000; i++ {
- inputRows = append(inputRows, []interface{}{int16(0), int32(1), int64(2), "abc", "efg", time.Date(2000, 1, 1, 0, 0, 0, 0, time.Local), time.Date(2010, 2, 3, 4, 5, 6, 0, time.Local), []byte{111, 111, 111, 111}})
- }
-
- copyCount, err := conn.CopyTo("foo", []string{"a", "b", "c", "d", "e", "f", "g", "h"}, pgx.CopyToRows(inputRows))
- if err != nil {
- t.Errorf("Unexpected error for CopyTo: %v", err)
- }
- if copyCount != len(inputRows) {
- t.Errorf("Expected CopyTo to return %d copied rows, but got %d", len(inputRows), copyCount)
- }
-
- rows, err := conn.Query("select * from foo")
- if err != nil {
- t.Errorf("Unexpected error for Query: %v", err)
- }
-
- var outputRows [][]interface{}
- for rows.Next() {
- row, err := rows.Values()
- if err != nil {
- t.Errorf("Unexpected error for rows.Values(): %v", err)
- }
- outputRows = append(outputRows, row)
- }
-
- if rows.Err() != nil {
- t.Errorf("Unexpected error for rows.Err(): %v", rows.Err())
- }
-
- if !reflect.DeepEqual(inputRows, outputRows) {
- t.Errorf("Input rows and output rows do not equal")
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestConnCopyToJSON(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- for _, oid := range []pgx.Oid{pgx.JsonOid, pgx.JsonbOid} {
- if _, ok := conn.PgTypes[oid]; !ok {
- return // No JSON/JSONB type -- must be running against old PostgreSQL
- }
- }
-
- mustExec(t, conn, `create temporary table foo(
- a json,
- b jsonb
- )`)
-
- inputRows := [][]interface{}{
- {map[string]interface{}{"foo": "bar"}, map[string]interface{}{"bar": "quz"}},
- {nil, nil},
- }
-
- copyCount, err := conn.CopyTo("foo", []string{"a", "b"}, pgx.CopyToRows(inputRows))
- if err != nil {
- t.Errorf("Unexpected error for CopyTo: %v", err)
- }
- if copyCount != len(inputRows) {
- t.Errorf("Expected CopyTo to return %d copied rows, but got %d", len(inputRows), copyCount)
- }
-
- rows, err := conn.Query("select * from foo")
- if err != nil {
- t.Errorf("Unexpected error for Query: %v", err)
- }
-
- var outputRows [][]interface{}
- for rows.Next() {
- row, err := rows.Values()
- if err != nil {
- t.Errorf("Unexpected error for rows.Values(): %v", err)
- }
- outputRows = append(outputRows, row)
- }
-
- if rows.Err() != nil {
- t.Errorf("Unexpected error for rows.Err(): %v", rows.Err())
- }
-
- if !reflect.DeepEqual(inputRows, outputRows) {
- t.Errorf("Input rows and output rows do not equal: %v -> %v", inputRows, outputRows)
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestConnCopyToFailServerSideMidway(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- mustExec(t, conn, `create temporary table foo(
- a int4,
- b varchar not null
- )`)
-
- inputRows := [][]interface{}{
- {int32(1), "abc"},
- {int32(2), nil}, // this row should trigger a failure
- {int32(3), "def"},
- }
-
- copyCount, err := conn.CopyTo("foo", []string{"a", "b"}, pgx.CopyToRows(inputRows))
- if err == nil {
- t.Errorf("Expected CopyTo return error, but it did not")
- }
- if _, ok := err.(pgx.PgError); !ok {
- t.Errorf("Expected CopyTo return pgx.PgError, but instead it returned: %v", err)
- }
- if copyCount != 0 {
- t.Errorf("Expected CopyTo to return 0 copied rows, but got %d", copyCount)
- }
-
- rows, err := conn.Query("select * from foo")
- if err != nil {
- t.Errorf("Unexpected error for Query: %v", err)
- }
-
- var outputRows [][]interface{}
- for rows.Next() {
- row, err := rows.Values()
- if err != nil {
- t.Errorf("Unexpected error for rows.Values(): %v", err)
- }
- outputRows = append(outputRows, row)
- }
-
- if rows.Err() != nil {
- t.Errorf("Unexpected error for rows.Err(): %v", rows.Err())
- }
-
- if len(outputRows) != 0 {
- t.Errorf("Expected 0 rows, but got %v", outputRows)
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestConnCopyToFailServerSideMidwayAbortsWithoutWaiting(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- mustExec(t, conn, `create temporary table foo(
- a bytea not null
- )`)
-
- startTime := time.Now()
-
- copyCount, err := conn.CopyTo("foo", []string{"a"}, &failSource{})
- if err == nil {
- t.Errorf("Expected CopyTo return error, but it did not")
- }
- if _, ok := err.(pgx.PgError); !ok {
- t.Errorf("Expected CopyTo return pgx.PgError, but instead it returned: %v", err)
- }
- if copyCount != 0 {
- t.Errorf("Expected CopyTo to return 0 copied rows, but got %d", copyCount)
- }
-
- endTime := time.Now()
- copyTime := endTime.Sub(startTime)
- if copyTime > time.Second {
- t.Errorf("Failing CopyTo shouldn't have taken so long: %v", copyTime)
- }
-
- rows, err := conn.Query("select * from foo")
- if err != nil {
- t.Errorf("Unexpected error for Query: %v", err)
- }
-
- var outputRows [][]interface{}
- for rows.Next() {
- row, err := rows.Values()
- if err != nil {
- t.Errorf("Unexpected error for rows.Values(): %v", err)
- }
- outputRows = append(outputRows, row)
- }
-
- if rows.Err() != nil {
- t.Errorf("Unexpected error for rows.Err(): %v", rows.Err())
- }
-
- if len(outputRows) != 0 {
- t.Errorf("Expected 0 rows, but got %v", outputRows)
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestConnCopyToCopyToSourceErrorMidway(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- mustExec(t, conn, `create temporary table foo(
- a bytea not null
- )`)
-
- copyCount, err := conn.CopyTo("foo", []string{"a"}, &clientFailSource{})
- if err == nil {
- t.Errorf("Expected CopyTo return error, but it did not")
- }
- if copyCount != 0 {
- t.Errorf("Expected CopyTo to return 0 copied rows, but got %d", copyCount)
- }
-
- rows, err := conn.Query("select * from foo")
- if err != nil {
- t.Errorf("Unexpected error for Query: %v", err)
- }
-
- var outputRows [][]interface{}
- for rows.Next() {
- row, err := rows.Values()
- if err != nil {
- t.Errorf("Unexpected error for rows.Values(): %v", err)
- }
- outputRows = append(outputRows, row)
- }
-
- if rows.Err() != nil {
- t.Errorf("Unexpected error for rows.Err(): %v", rows.Err())
- }
-
- if len(outputRows) != 0 {
- t.Errorf("Expected 0 rows, but got %v", outputRows)
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestConnCopyToCopyToSourceErrorEnd(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- mustExec(t, conn, `create temporary table foo(
- a bytea not null
- )`)
-
- copyCount, err := conn.CopyTo("foo", []string{"a"}, &clientFinalErrSource{})
- if err == nil {
- t.Errorf("Expected CopyTo return error, but it did not")
- }
- if copyCount != 0 {
- t.Errorf("Expected CopyTo to return 0 copied rows, but got %d", copyCount)
- }
-
- rows, err := conn.Query("select * from foo")
- if err != nil {
- t.Errorf("Unexpected error for Query: %v", err)
- }
-
- var outputRows [][]interface{}
- for rows.Next() {
- row, err := rows.Values()
- if err != nil {
- t.Errorf("Unexpected error for rows.Values(): %v", err)
- }
- outputRows = append(outputRows, row)
- }
-
- if rows.Err() != nil {
- t.Errorf("Unexpected error for rows.Err(): %v", rows.Err())
- }
-
- if len(outputRows) != 0 {
- t.Errorf("Expected 0 rows, but got %v", outputRows)
- }
-
- ensureConnValid(t, conn)
-}
diff --git a/vendor/github.com/jackc/pgx/doc.go b/vendor/github.com/jackc/pgx/doc.go
index f527d11..51f1edc 100644
--- a/vendor/github.com/jackc/pgx/doc.go
+++ b/vendor/github.com/jackc/pgx/doc.go
@@ -1,9 +1,9 @@
// Package pgx is a PostgreSQL database driver.
/*
-pgx provides lower level access to PostgreSQL than the standard database/sql
+pgx provides lower level access to PostgreSQL than the standard database/sql.
It remains as similar to the database/sql interface as possible while
providing better speed and access to PostgreSQL specific features. Import
-github.com/jack/pgx/stdlib to use pgx as a database/sql compatible driver.
+github.com/jackc/pgx/stdlib to use pgx as a database/sql compatible driver.
Query Interface
@@ -62,17 +62,15 @@ Use Exec to execute a query that does not return a result set.
Connection Pool
-Connection pool usage is explicit and configurable. In pgx, a connection can
-be created and managed directly, or a connection pool with a configurable
-maximum connections can be used. Also, the connection pool offers an after
-connect hook that allows every connection to be automatically setup before
-being made available in the connection pool. This is especially useful to
-ensure all connections have the same prepared statements available or to
-change any other connection settings.
+Connection pool usage is explicit and configurable. In pgx, a connection can be
+created and managed directly, or a connection pool with a configurable maximum
+connections can be used. The connection pool offers an after connect hook that
+allows every connection to be automatically setup before being made available in
+the connection pool.
-It delegates Query, QueryRow, Exec, and Begin functions to an automatically
-checked out and released connection so you can avoid manually acquiring and
-releasing connections when you do not need that level of control.
+It delegates methods such as QueryRow to an automatically checked out and
+released connection so you can avoid manually acquiring and releasing
+connections when you do not need that level of control.
var name string
var weight int64
@@ -117,11 +115,11 @@ particular:
Null Mapping
-pgx can map nulls in two ways. The first is Null* types that have a data field
-and a valid field. They work in a similar fashion to database/sql. The second
-is to use a pointer to a pointer.
+pgx can map nulls in two ways. The first is package pgtype provides types that
+have a data field and a status field. They work in a similar fashion to
+database/sql. The second is to use a pointer to a pointer.
- var foo pgx.NullString
+ var foo pgtype.Varchar
var bar *string
err := conn.QueryRow("select foo, bar from widgets where id=$1", 42).Scan(&a, &b)
if err != nil {
@@ -133,20 +131,15 @@ Array Mapping
pgx maps between int16, int32, int64, float32, float64, and string Go slices
and the equivalent PostgreSQL array type. Go slices of native types do not
support nulls, so if a PostgreSQL array that contains a null is read into a
-native Go slice an error will occur.
-
-Hstore Mapping
-
-pgx includes an Hstore type and a NullHstore type. Hstore is simply a
-map[string]string and is preferred when the hstore contains no nulls. NullHstore
-follows the Null* pattern and supports null values.
+native Go slice an error will occur. The pgtype package includes many more
+array types for PostgreSQL types that do not directly map to native Go types.
JSON and JSONB Mapping
pgx includes built-in support to marshal and unmarshal between Go types and
the PostgreSQL JSON and JSONB.
-Inet and Cidr Mapping
+Inet and CIDR Mapping
pgx encodes from net.IPNet to and from inet and cidr PostgreSQL types. In
addition, as a convenience pgx will encode from a net.IP; it will assume a /32
@@ -155,25 +148,10 @@ netmask for IPv4 and a /128 for IPv6.
Custom Type Support
pgx includes support for the common data types like integers, floats, strings,
-dates, and times that have direct mappings between Go and SQL. Support can be
-added for additional types like point, hstore, numeric, etc. that do not have
-direct mappings in Go by the types implementing ScannerPgx and Encoder.
-
-Custom types can support text or binary formats. Binary format can provide a
-large performance increase. The natural place for deciding the format for a
-value would be in ScannerPgx as it is responsible for decoding the returned
-data. However, that is impossible as the query has already been sent by the time
-the ScannerPgx is invoked. The solution to this is the global
-DefaultTypeFormats. If a custom type prefers binary format it should register it
-there.
-
- pgx.DefaultTypeFormats["point"] = pgx.BinaryFormatCode
-
-Note that the type is referred to by name, not by OID. This is because custom
-PostgreSQL types like hstore will have different OIDs on different servers. When
-pgx establishes a connection it queries the pg_type table for all types. It then
-matches the names in DefaultTypeFormats with the returned OIDs and stores it in
-Conn.PgTypes.
+dates, and times that have direct mappings between Go and SQL. In addition,
+pgx uses the github.com/jackc/pgx/pgtype library to support more types. See
+documention for that library for instructions on how to implement custom
+types.
See example_custom_type_test.go for an example of a custom type for the
PostgreSQL point type.
@@ -184,15 +162,12 @@ and database/sql/driver.Valuer interfaces.
Raw Bytes Mapping
[]byte passed as arguments to Query, QueryRow, and Exec are passed unmodified
-to PostgreSQL. In like manner, a *[]byte passed to Scan will be filled with
-the raw bytes returned by PostgreSQL. This can be especially useful for reading
-varchar, text, json, and jsonb values directly into a []byte and avoiding the
-type conversion from string.
+to PostgreSQL.
Transactions
-Transactions are started by calling Begin or BeginIso. The BeginIso variant
-creates a transaction with a specified isolation level.
+Transactions are started by calling Begin or BeginEx. The BeginEx variant
+can create a transaction with a specified isolation level.
tx, err := conn.Begin()
if err != nil {
@@ -225,7 +200,7 @@ implement CopyFromSource to avoid buffering the entire data set in memory.
}
copyCount, err := conn.CopyFrom(
- "people",
+ pgx.Identifier{"people"},
[]string{"first_name", "last_name", "age"},
pgx.CopyFromRows(rows),
)
@@ -257,9 +232,8 @@ connection.
Logging
pgx defines a simple logger interface. Connections optionally accept a logger
-that satisfies this interface. The log15 package
-(http://gopkg.in/inconshreveable/log15.v2) satisfies this interface and it is
-simple to define adapters for other loggers. Set LogLevel to control logging
-verbosity.
+that satisfies this interface. Set LogLevel to control logging verbosity.
+Adapters for github.com/inconshreveable/log15, github.com/sirupsen/logrus, and
+the testing log are provided in the log directory.
*/
package pgx
diff --git a/vendor/github.com/jackc/pgx/example_custom_type_test.go b/vendor/github.com/jackc/pgx/example_custom_type_test.go
deleted file mode 100644
index 34cc316..0000000
--- a/vendor/github.com/jackc/pgx/example_custom_type_test.go
+++ /dev/null
@@ -1,104 +0,0 @@
-package pgx_test
-
-import (
- "errors"
- "fmt"
- "github.com/jackc/pgx"
- "regexp"
- "strconv"
-)
-
-var pointRegexp *regexp.Regexp = regexp.MustCompile(`^\((.*),(.*)\)$`)
-
-// NullPoint represents a point that may be null.
-//
-// If Valid is false then the value is NULL.
-type NullPoint struct {
- X, Y float64 // Coordinates of point
- Valid bool // Valid is true if not NULL
-}
-
-func (p *NullPoint) ScanPgx(vr *pgx.ValueReader) error {
- if vr.Type().DataTypeName != "point" {
- return pgx.SerializationError(fmt.Sprintf("NullPoint.Scan cannot decode %s (OID %d)", vr.Type().DataTypeName, vr.Type().DataType))
- }
-
- if vr.Len() == -1 {
- p.X, p.Y, p.Valid = 0, 0, false
- return nil
- }
-
- switch vr.Type().FormatCode {
- case pgx.TextFormatCode:
- s := vr.ReadString(vr.Len())
- match := pointRegexp.FindStringSubmatch(s)
- if match == nil {
- return pgx.SerializationError(fmt.Sprintf("Received invalid point: %v", s))
- }
-
- var err error
- p.X, err = strconv.ParseFloat(match[1], 64)
- if err != nil {
- return pgx.SerializationError(fmt.Sprintf("Received invalid point: %v", s))
- }
- p.Y, err = strconv.ParseFloat(match[2], 64)
- if err != nil {
- return pgx.SerializationError(fmt.Sprintf("Received invalid point: %v", s))
- }
- case pgx.BinaryFormatCode:
- return errors.New("binary format not implemented")
- default:
- return fmt.Errorf("unknown format %v", vr.Type().FormatCode)
- }
-
- p.Valid = true
- return vr.Err()
-}
-
-func (p NullPoint) FormatCode() int16 { return pgx.BinaryFormatCode }
-
-func (p NullPoint) Encode(w *pgx.WriteBuf, oid pgx.Oid) error {
- if !p.Valid {
- w.WriteInt32(-1)
- return nil
- }
-
- s := fmt.Sprintf("point(%v,%v)", p.X, p.Y)
- w.WriteInt32(int32(len(s)))
- w.WriteBytes([]byte(s))
-
- return nil
-}
-
-func (p NullPoint) String() string {
- if p.Valid {
- return fmt.Sprintf("%v, %v", p.X, p.Y)
- }
- return "null point"
-}
-
-func Example_CustomType() {
- conn, err := pgx.Connect(*defaultConnConfig)
- if err != nil {
- fmt.Printf("Unable to establish connection: %v", err)
- return
- }
-
- var p NullPoint
- err = conn.QueryRow("select null::point").Scan(&p)
- if err != nil {
- fmt.Println(err)
- return
- }
- fmt.Println(p)
-
- err = conn.QueryRow("select point(1.5,2.5)").Scan(&p)
- if err != nil {
- fmt.Println(err)
- return
- }
- fmt.Println(p)
- // Output:
- // null point
- // 1.5, 2.5
-}
diff --git a/vendor/github.com/jackc/pgx/example_json_test.go b/vendor/github.com/jackc/pgx/example_json_test.go
deleted file mode 100644
index c153415..0000000
--- a/vendor/github.com/jackc/pgx/example_json_test.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package pgx_test
-
-import (
- "fmt"
- "github.com/jackc/pgx"
-)
-
-func Example_JSON() {
- conn, err := pgx.Connect(*defaultConnConfig)
- if err != nil {
- fmt.Printf("Unable to establish connection: %v", err)
- return
- }
-
- if _, ok := conn.PgTypes[pgx.JsonOid]; !ok {
- // No JSON type -- must be running against very old PostgreSQL
- // Pretend it works
- fmt.Println("John", 42)
- return
- }
-
- type person struct {
- Name string `json:"name"`
- Age int `json:"age"`
- }
-
- input := person{
- Name: "John",
- Age: 42,
- }
-
- var output person
-
- err = conn.QueryRow("select $1::json", input).Scan(&output)
- if err != nil {
- fmt.Println(err)
- return
- }
-
- fmt.Println(output.Name, output.Age)
- // Output:
- // John 42
-}
diff --git a/vendor/github.com/jackc/pgx/fastpath.go b/vendor/github.com/jackc/pgx/fastpath.go
index 19b9878..06e1354 100644
--- a/vendor/github.com/jackc/pgx/fastpath.go
+++ b/vendor/github.com/jackc/pgx/fastpath.go
@@ -2,29 +2,33 @@ package pgx
import (
"encoding/binary"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/jackc/pgx/pgproto3"
+ "github.com/jackc/pgx/pgtype"
)
func newFastpath(cn *Conn) *fastpath {
- return &fastpath{cn: cn, fns: make(map[string]Oid)}
+ return &fastpath{cn: cn, fns: make(map[string]pgtype.OID)}
}
type fastpath struct {
cn *Conn
- fns map[string]Oid
+ fns map[string]pgtype.OID
}
-func (f *fastpath) functionOID(name string) Oid {
+func (f *fastpath) functionOID(name string) pgtype.OID {
return f.fns[name]
}
-func (f *fastpath) addFunction(name string, oid Oid) {
+func (f *fastpath) addFunction(name string, oid pgtype.OID) {
f.fns[name] = oid
}
func (f *fastpath) addFunctions(rows *Rows) error {
for rows.Next() {
var name string
- var oid Oid
+ var oid pgtype.OID
if err := rows.Scan(&name, &oid); err != nil {
return err
}
@@ -47,41 +51,46 @@ func fpInt64Arg(n int64) fpArg {
return res
}
-func (f *fastpath) Call(oid Oid, args []fpArg) (res []byte, err error) {
- wbuf := newWriteBuf(f.cn, 'F') // function call
- wbuf.WriteInt32(int32(oid)) // function object id
- wbuf.WriteInt16(1) // # of argument format codes
- wbuf.WriteInt16(1) // format code: binary
- wbuf.WriteInt16(int16(len(args))) // # of arguments
+func (f *fastpath) Call(oid pgtype.OID, args []fpArg) (res []byte, err error) {
+ if err := f.cn.ensureConnectionReadyForQuery(); err != nil {
+ return nil, err
+ }
+
+ buf := f.cn.wbuf
+ buf = append(buf, 'F') // function call
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ buf = pgio.AppendInt32(buf, int32(oid)) // function object id
+ buf = pgio.AppendInt16(buf, 1) // # of argument format codes
+ buf = pgio.AppendInt16(buf, 1) // format code: binary
+ buf = pgio.AppendInt16(buf, int16(len(args))) // # of arguments
for _, arg := range args {
- wbuf.WriteInt32(int32(len(arg))) // length of argument
- wbuf.WriteBytes(arg) // argument value
+ buf = pgio.AppendInt32(buf, int32(len(arg))) // length of argument
+ buf = append(buf, arg...) // argument value
}
- wbuf.WriteInt16(1) // response format code (binary)
- wbuf.closeMsg()
+ buf = pgio.AppendInt16(buf, 1) // response format code (binary)
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])))
- if _, err := f.cn.conn.Write(wbuf.buf); err != nil {
+ if _, err := f.cn.conn.Write(buf); err != nil {
return nil, err
}
for {
- var t byte
- var r *msgReader
- t, r, err = f.cn.rxMsg()
+ msg, err := f.cn.rxMsg()
if err != nil {
return nil, err
}
- switch t {
- case 'V': // FunctionCallResponse
- data := r.readBytes(r.readInt32())
- res = make([]byte, len(data))
- copy(res, data)
- case 'Z': // Ready for query
- f.cn.rxReadyForQuery(r)
+ switch msg := msg.(type) {
+ case *pgproto3.FunctionCallResponse:
+ res = make([]byte, len(msg.Result))
+ copy(res, msg.Result)
+ case *pgproto3.ReadyForQuery:
+ f.cn.rxReadyForQuery(msg)
// done
- return
+ return res, err
default:
- if err := f.cn.processContextFreeMsg(t, r); err != nil {
+ if err := f.cn.processContextFreeMsg(msg); err != nil {
return nil, err
}
}
diff --git a/vendor/github.com/jackc/pgx/go_stdlib.go b/vendor/github.com/jackc/pgx/go_stdlib.go
new file mode 100644
index 0000000..9372f9e
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/go_stdlib.go
@@ -0,0 +1,61 @@
+package pgx
+
+import (
+ "database/sql/driver"
+ "reflect"
+)
+
+// This file contains code copied from the Go standard library due to the
+// required function not being public.
+
+// Copyright (c) 2009 The Go Authors. All rights reserved.
+
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// From database/sql/convert.go
+
+var valuerReflectType = reflect.TypeOf((*driver.Valuer)(nil)).Elem()
+
+// callValuerValue returns vr.Value(), with one exception:
+// If vr.Value is an auto-generated method on a pointer type and the
+// pointer is nil, it would panic at runtime in the panicwrap
+// method. Treat it like nil instead.
+// Issue 8415.
+//
+// This is so people can implement driver.Value on value types and
+// still use nil pointers to those types to mean nil/NULL, just like
+// string/*string.
+//
+// This function is mirrored in the database/sql/driver package.
+func callValuerValue(vr driver.Valuer) (v driver.Value, err error) {
+ if rv := reflect.ValueOf(vr); rv.Kind() == reflect.Ptr &&
+ rv.IsNil() &&
+ rv.Type().Elem().Implements(valuerReflectType) {
+ return nil, nil
+ }
+ return vr.Value()
+}
diff --git a/vendor/github.com/jackc/pgx/helper_test.go b/vendor/github.com/jackc/pgx/helper_test.go
deleted file mode 100644
index eff731e..0000000
--- a/vendor/github.com/jackc/pgx/helper_test.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package pgx_test
-
-import (
- "github.com/jackc/pgx"
- "testing"
-)
-
-func mustConnect(t testing.TB, config pgx.ConnConfig) *pgx.Conn {
- conn, err := pgx.Connect(config)
- if err != nil {
- t.Fatalf("Unable to establish connection: %v", err)
- }
- return conn
-}
-
-func mustReplicationConnect(t testing.TB, config pgx.ConnConfig) *pgx.ReplicationConn {
- conn, err := pgx.ReplicationConnect(config)
- if err != nil {
- t.Fatalf("Unable to establish connection: %v", err)
- }
- return conn
-}
-
-
-func closeConn(t testing.TB, conn *pgx.Conn) {
- err := conn.Close()
- if err != nil {
- t.Fatalf("conn.Close unexpectedly failed: %v", err)
- }
-}
-
-func closeReplicationConn(t testing.TB, conn *pgx.ReplicationConn) {
- err := conn.Close()
- if err != nil {
- t.Fatalf("conn.Close unexpectedly failed: %v", err)
- }
-}
-
-func mustExec(t testing.TB, conn *pgx.Conn, sql string, arguments ...interface{}) (commandTag pgx.CommandTag) {
- var err error
- if commandTag, err = conn.Exec(sql, arguments...); err != nil {
- t.Fatalf("Exec unexpectedly failed with %v: %v", sql, err)
- }
- return
-}
-
-// Do a simple query to ensure the connection is still usable
-func ensureConnValid(t *testing.T, conn *pgx.Conn) {
- var sum, rowCount int32
-
- rows, err := conn.Query("select generate_series(1,$1)", 10)
- if err != nil {
- t.Fatalf("conn.Query failed: %v", err)
- }
- defer rows.Close()
-
- for rows.Next() {
- var n int32
- rows.Scan(&n)
- sum += n
- rowCount++
- }
-
- if rows.Err() != nil {
- t.Fatalf("conn.Query failed: %v", err)
- }
-
- if rowCount != 10 {
- t.Error("Select called onDataRow wrong number of times")
- }
- if sum != 55 {
- t.Error("Wrong values returned")
- }
-}
diff --git a/vendor/github.com/jackc/pgx/hstore.go b/vendor/github.com/jackc/pgx/hstore.go
deleted file mode 100644
index 0ab9f77..0000000
--- a/vendor/github.com/jackc/pgx/hstore.go
+++ /dev/null
@@ -1,222 +0,0 @@
-package pgx
-
-import (
- "bytes"
- "errors"
- "fmt"
- "unicode"
- "unicode/utf8"
-)
-
-const (
- hsPre = iota
- hsKey
- hsSep
- hsVal
- hsNul
- hsNext
-)
-
-type hstoreParser struct {
- str string
- pos int
-}
-
-func newHSP(in string) *hstoreParser {
- return &hstoreParser{
- pos: 0,
- str: in,
- }
-}
-
-func (p *hstoreParser) Consume() (r rune, end bool) {
- if p.pos >= len(p.str) {
- end = true
- return
- }
- r, w := utf8.DecodeRuneInString(p.str[p.pos:])
- p.pos += w
- return
-}
-
-func (p *hstoreParser) Peek() (r rune, end bool) {
- if p.pos >= len(p.str) {
- end = true
- return
- }
- r, _ = utf8.DecodeRuneInString(p.str[p.pos:])
- return
-}
-
-func parseHstoreToMap(s string) (m map[string]string, err error) {
- keys, values, err := ParseHstore(s)
- if err != nil {
- return
- }
- m = make(map[string]string, len(keys))
- for i, key := range keys {
- if !values[i].Valid {
- err = fmt.Errorf("key '%s' has NULL value", key)
- m = nil
- return
- }
- m[key] = values[i].String
- }
- return
-}
-
-func parseHstoreToNullHstore(s string) (store map[string]NullString, err error) {
- keys, values, err := ParseHstore(s)
- if err != nil {
- return
- }
-
- store = make(map[string]NullString, len(keys))
-
- for i, key := range keys {
- store[key] = values[i]
- }
- return
-}
-
-// ParseHstore parses the string representation of an hstore column (the same
-// you would get from an ordinary SELECT) into two slices of keys and values. it
-// is used internally in the default parsing of hstores, but is exported for use
-// in handling custom data structures backed by an hstore column without the
-// overhead of creating a map[string]string
-func ParseHstore(s string) (k []string, v []NullString, err error) {
- if s == "" {
- return
- }
-
- buf := bytes.Buffer{}
- keys := []string{}
- values := []NullString{}
- p := newHSP(s)
-
- r, end := p.Consume()
- state := hsPre
-
- for !end {
- switch state {
- case hsPre:
- if r == '"' {
- state = hsKey
- } else {
- err = errors.New("String does not begin with \"")
- }
- case hsKey:
- switch r {
- case '"': //End of the key
- if buf.Len() == 0 {
- err = errors.New("Empty Key is invalid")
- } else {
- keys = append(keys, buf.String())
- buf = bytes.Buffer{}
- state = hsSep
- }
- case '\\': //Potential escaped character
- n, end := p.Consume()
- switch {
- case end:
- err = errors.New("Found EOS in key, expecting character or \"")
- case n == '"', n == '\\':
- buf.WriteRune(n)
- default:
- buf.WriteRune(r)
- buf.WriteRune(n)
- }
- default: //Any other character
- buf.WriteRune(r)
- }
- case hsSep:
- if r == '=' {
- r, end = p.Consume()
- switch {
- case end:
- err = errors.New("Found EOS after '=', expecting '>'")
- case r == '>':
- r, end = p.Consume()
- switch {
- case end:
- err = errors.New("Found EOS after '=>', expecting '\"' or 'NULL'")
- case r == '"':
- state = hsVal
- case r == 'N':
- state = hsNul
- default:
- err = fmt.Errorf("Invalid character '%c' after '=>', expecting '\"' or 'NULL'", r)
- }
- default:
- err = fmt.Errorf("Invalid character after '=', expecting '>'")
- }
- } else {
- err = fmt.Errorf("Invalid character '%c' after value, expecting '='", r)
- }
- case hsVal:
- switch r {
- case '"': //End of the value
- values = append(values, NullString{String: buf.String(), Valid: true})
- buf = bytes.Buffer{}
- state = hsNext
- case '\\': //Potential escaped character
- n, end := p.Consume()
- switch {
- case end:
- err = errors.New("Found EOS in key, expecting character or \"")
- case n == '"', n == '\\':
- buf.WriteRune(n)
- default:
- buf.WriteRune(r)
- buf.WriteRune(n)
- }
- default: //Any other character
- buf.WriteRune(r)
- }
- case hsNul:
- nulBuf := make([]rune, 3)
- nulBuf[0] = r
- for i := 1; i < 3; i++ {
- r, end = p.Consume()
- if end {
- err = errors.New("Found EOS in NULL value")
- return
- }
- nulBuf[i] = r
- }
- if nulBuf[0] == 'U' && nulBuf[1] == 'L' && nulBuf[2] == 'L' {
- values = append(values, NullString{String: "", Valid: false})
- state = hsNext
- } else {
- err = fmt.Errorf("Invalid NULL value: 'N%s'", string(nulBuf))
- }
- case hsNext:
- if r == ',' {
- r, end = p.Consume()
- switch {
- case end:
- err = errors.New("Found EOS after ',', expcting space")
- case (unicode.IsSpace(r)):
- r, end = p.Consume()
- state = hsKey
- default:
- err = fmt.Errorf("Invalid character '%c' after ', ', expecting \"", r)
- }
- } else {
- err = fmt.Errorf("Invalid character '%c' after value, expecting ','", r)
- }
- }
-
- if err != nil {
- return
- }
- r, end = p.Consume()
- }
- if state != hsNext {
- err = errors.New("Improperly formatted hstore")
- return
- }
- k = keys
- v = values
- return
-}
diff --git a/vendor/github.com/jackc/pgx/hstore_test.go b/vendor/github.com/jackc/pgx/hstore_test.go
deleted file mode 100644
index c948f0c..0000000
--- a/vendor/github.com/jackc/pgx/hstore_test.go
+++ /dev/null
@@ -1,181 +0,0 @@
-package pgx_test
-
-import (
- "github.com/jackc/pgx"
- "testing"
-)
-
-func TestHstoreTranscode(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- type test struct {
- hstore pgx.Hstore
- description string
- }
-
- tests := []test{
- {pgx.Hstore{}, "empty"},
- {pgx.Hstore{"foo": "bar"}, "single key/value"},
- {pgx.Hstore{"foo": "bar", "baz": "quz"}, "multiple key/values"},
- {pgx.Hstore{"NULL": "bar"}, `string "NULL" key`},
- {pgx.Hstore{"foo": "NULL"}, `string "NULL" value`},
- }
-
- specialStringTests := []struct {
- input string
- description string
- }{
- {`"`, `double quote (")`},
- {`'`, `single quote (')`},
- {`\`, `backslash (\)`},
- {`\\`, `multiple backslashes (\\)`},
- {`=>`, `separator (=>)`},
- {` `, `space`},
- {`\ / / \\ => " ' " '`, `multiple special characters`},
- }
- for _, sst := range specialStringTests {
- tests = append(tests, test{pgx.Hstore{sst.input + "foo": "bar"}, "key with " + sst.description + " at beginning"})
- tests = append(tests, test{pgx.Hstore{"foo" + sst.input + "foo": "bar"}, "key with " + sst.description + " in middle"})
- tests = append(tests, test{pgx.Hstore{"foo" + sst.input: "bar"}, "key with " + sst.description + " at end"})
- tests = append(tests, test{pgx.Hstore{sst.input: "bar"}, "key is " + sst.description})
-
- tests = append(tests, test{pgx.Hstore{"foo": sst.input + "bar"}, "value with " + sst.description + " at beginning"})
- tests = append(tests, test{pgx.Hstore{"foo": "bar" + sst.input + "bar"}, "value with " + sst.description + " in middle"})
- tests = append(tests, test{pgx.Hstore{"foo": "bar" + sst.input}, "value with " + sst.description + " at end"})
- tests = append(tests, test{pgx.Hstore{"foo": sst.input}, "value is " + sst.description})
- }
-
- for _, tt := range tests {
- var result pgx.Hstore
- err := conn.QueryRow("select $1::hstore", tt.hstore).Scan(&result)
- if err != nil {
- t.Errorf(`%s: QueryRow.Scan returned an error: %v`, tt.description, err)
- }
-
- for key, inValue := range tt.hstore {
- outValue, ok := result[key]
- if ok {
- if inValue != outValue {
- t.Errorf(`%s: Key %s mismatch - expected %s, received %s`, tt.description, key, inValue, outValue)
- }
- } else {
- t.Errorf(`%s: Missing key %s`, tt.description, key)
- }
- }
-
- ensureConnValid(t, conn)
- }
-}
-
-func TestNullHstoreTranscode(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- type test struct {
- nullHstore pgx.NullHstore
- description string
- }
-
- tests := []test{
- {pgx.NullHstore{}, "null"},
- {pgx.NullHstore{Valid: true}, "empty"},
- {pgx.NullHstore{
- Hstore: map[string]pgx.NullString{"foo": {String: "bar", Valid: true}},
- Valid: true},
- "single key/value"},
- {pgx.NullHstore{
- Hstore: map[string]pgx.NullString{"foo": {String: "bar", Valid: true}, "baz": {String: "quz", Valid: true}},
- Valid: true},
- "multiple key/values"},
- {pgx.NullHstore{
- Hstore: map[string]pgx.NullString{"NULL": {String: "bar", Valid: true}},
- Valid: true},
- `string "NULL" key`},
- {pgx.NullHstore{
- Hstore: map[string]pgx.NullString{"foo": {String: "NULL", Valid: true}},
- Valid: true},
- `string "NULL" value`},
- {pgx.NullHstore{
- Hstore: map[string]pgx.NullString{"foo": {String: "", Valid: false}},
- Valid: true},
- `NULL value`},
- }
-
- specialStringTests := []struct {
- input string
- description string
- }{
- {`"`, `double quote (")`},
- {`'`, `single quote (')`},
- {`\`, `backslash (\)`},
- {`\\`, `multiple backslashes (\\)`},
- {`=>`, `separator (=>)`},
- {` `, `space`},
- {`\ / / \\ => " ' " '`, `multiple special characters`},
- }
- for _, sst := range specialStringTests {
- tests = append(tests, test{pgx.NullHstore{
- Hstore: map[string]pgx.NullString{sst.input + "foo": {String: "bar", Valid: true}},
- Valid: true},
- "key with " + sst.description + " at beginning"})
- tests = append(tests, test{pgx.NullHstore{
- Hstore: map[string]pgx.NullString{"foo" + sst.input + "foo": {String: "bar", Valid: true}},
- Valid: true},
- "key with " + sst.description + " in middle"})
- tests = append(tests, test{pgx.NullHstore{
- Hstore: map[string]pgx.NullString{"foo" + sst.input: {String: "bar", Valid: true}},
- Valid: true},
- "key with " + sst.description + " at end"})
- tests = append(tests, test{pgx.NullHstore{
- Hstore: map[string]pgx.NullString{sst.input: {String: "bar", Valid: true}},
- Valid: true},
- "key is " + sst.description})
-
- tests = append(tests, test{pgx.NullHstore{
- Hstore: map[string]pgx.NullString{"foo": {String: sst.input + "bar", Valid: true}},
- Valid: true},
- "value with " + sst.description + " at beginning"})
- tests = append(tests, test{pgx.NullHstore{
- Hstore: map[string]pgx.NullString{"foo": {String: "bar" + sst.input + "bar", Valid: true}},
- Valid: true},
- "value with " + sst.description + " in middle"})
- tests = append(tests, test{pgx.NullHstore{
- Hstore: map[string]pgx.NullString{"foo": {String: "bar" + sst.input, Valid: true}},
- Valid: true},
- "value with " + sst.description + " at end"})
- tests = append(tests, test{pgx.NullHstore{
- Hstore: map[string]pgx.NullString{"foo": {String: sst.input, Valid: true}},
- Valid: true},
- "value is " + sst.description})
- }
-
- for _, tt := range tests {
- var result pgx.NullHstore
- err := conn.QueryRow("select $1::hstore", tt.nullHstore).Scan(&result)
- if err != nil {
- t.Errorf(`%s: QueryRow.Scan returned an error: %v`, tt.description, err)
- }
-
- if result.Valid != tt.nullHstore.Valid {
- t.Errorf(`%s: Valid mismatch - expected %v, received %v`, tt.description, tt.nullHstore.Valid, result.Valid)
- }
-
- for key, inValue := range tt.nullHstore.Hstore {
- outValue, ok := result.Hstore[key]
- if ok {
- if inValue != outValue {
- t.Errorf(`%s: Key %s mismatch - expected %v, received %v`, tt.description, key, inValue, outValue)
- }
- } else {
- t.Errorf(`%s: Missing key %s`, tt.description, key)
- }
- }
-
- ensureConnValid(t, conn)
- }
-}
diff --git a/vendor/github.com/jackc/pgx/internal/sanitize/sanitize.go b/vendor/github.com/jackc/pgx/internal/sanitize/sanitize.go
new file mode 100644
index 0000000..53543b8
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/internal/sanitize/sanitize.go
@@ -0,0 +1,237 @@
+package sanitize
+
+import (
+ "bytes"
+ "encoding/hex"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/pkg/errors"
+)
+
+// Part is either a string or an int. A string is raw SQL. An int is a
+// argument placeholder.
+type Part interface{}
+
+type Query struct {
+ Parts []Part
+}
+
+func (q *Query) Sanitize(args ...interface{}) (string, error) {
+ argUse := make([]bool, len(args))
+ buf := &bytes.Buffer{}
+
+ for _, part := range q.Parts {
+ var str string
+ switch part := part.(type) {
+ case string:
+ str = part
+ case int:
+ argIdx := part - 1
+ if argIdx >= len(args) {
+ return "", errors.Errorf("insufficient arguments")
+ }
+ arg := args[argIdx]
+ switch arg := arg.(type) {
+ case nil:
+ str = "null"
+ case int64:
+ str = strconv.FormatInt(arg, 10)
+ case float64:
+ str = strconv.FormatFloat(arg, 'f', -1, 64)
+ case bool:
+ str = strconv.FormatBool(arg)
+ case []byte:
+ str = QuoteBytes(arg)
+ case string:
+ str = QuoteString(arg)
+ case time.Time:
+ str = arg.Format("'2006-01-02 15:04:05.999999999Z07:00:00'")
+ default:
+ return "", errors.Errorf("invalid arg type: %T", arg)
+ }
+ argUse[argIdx] = true
+ default:
+ return "", errors.Errorf("invalid Part type: %T", part)
+ }
+ buf.WriteString(str)
+ }
+
+ for i, used := range argUse {
+ if !used {
+ return "", errors.Errorf("unused argument: %d", i)
+ }
+ }
+ return buf.String(), nil
+}
+
+func NewQuery(sql string) (*Query, error) {
+ l := &sqlLexer{
+ src: sql,
+ stateFn: rawState,
+ }
+
+ for l.stateFn != nil {
+ l.stateFn = l.stateFn(l)
+ }
+
+ query := &Query{Parts: l.parts}
+
+ return query, nil
+}
+
+func QuoteString(str string) string {
+ return "'" + strings.Replace(str, "'", "''", -1) + "'"
+}
+
+func QuoteBytes(buf []byte) string {
+ return `'\x` + hex.EncodeToString(buf) + "'"
+}
+
+type sqlLexer struct {
+ src string
+ start int
+ pos int
+ stateFn stateFn
+ parts []Part
+}
+
+type stateFn func(*sqlLexer) stateFn
+
+func rawState(l *sqlLexer) stateFn {
+ for {
+ r, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ l.pos += width
+
+ switch r {
+ case 'e', 'E':
+ nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ if nextRune == '\'' {
+ l.pos += width
+ return escapeStringState
+ }
+ case '\'':
+ return singleQuoteState
+ case '"':
+ return doubleQuoteState
+ case '$':
+ nextRune, _ := utf8.DecodeRuneInString(l.src[l.pos:])
+ if '0' <= nextRune && nextRune <= '9' {
+ if l.pos-l.start > 0 {
+ l.parts = append(l.parts, l.src[l.start:l.pos-width])
+ }
+ l.start = l.pos
+ return placeholderState
+ }
+ case utf8.RuneError:
+ if l.pos-l.start > 0 {
+ l.parts = append(l.parts, l.src[l.start:l.pos])
+ l.start = l.pos
+ }
+ return nil
+ }
+ }
+}
+
+func singleQuoteState(l *sqlLexer) stateFn {
+ for {
+ r, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ l.pos += width
+
+ switch r {
+ case '\'':
+ nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ if nextRune != '\'' {
+ return rawState
+ }
+ l.pos += width
+ case utf8.RuneError:
+ if l.pos-l.start > 0 {
+ l.parts = append(l.parts, l.src[l.start:l.pos])
+ l.start = l.pos
+ }
+ return nil
+ }
+ }
+}
+
+func doubleQuoteState(l *sqlLexer) stateFn {
+ for {
+ r, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ l.pos += width
+
+ switch r {
+ case '"':
+ nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ if nextRune != '"' {
+ return rawState
+ }
+ l.pos += width
+ case utf8.RuneError:
+ if l.pos-l.start > 0 {
+ l.parts = append(l.parts, l.src[l.start:l.pos])
+ l.start = l.pos
+ }
+ return nil
+ }
+ }
+}
+
+// placeholderState consumes a placeholder value. The $ must have already has
+// already been consumed. The first rune must be a digit.
+func placeholderState(l *sqlLexer) stateFn {
+ num := 0
+
+ for {
+ r, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ l.pos += width
+
+ if '0' <= r && r <= '9' {
+ num *= 10
+ num += int(r - '0')
+ } else {
+ l.parts = append(l.parts, num)
+ l.pos -= width
+ l.start = l.pos
+ return rawState
+ }
+ }
+}
+
+func escapeStringState(l *sqlLexer) stateFn {
+ for {
+ r, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ l.pos += width
+
+ switch r {
+ case '\\':
+ _, width = utf8.DecodeRuneInString(l.src[l.pos:])
+ l.pos += width
+ case '\'':
+ nextRune, width := utf8.DecodeRuneInString(l.src[l.pos:])
+ if nextRune != '\'' {
+ return rawState
+ }
+ l.pos += width
+ case utf8.RuneError:
+ if l.pos-l.start > 0 {
+ l.parts = append(l.parts, l.src[l.start:l.pos])
+ l.start = l.pos
+ }
+ return nil
+ }
+ }
+}
+
+// SanitizeSQL replaces placeholder values with args. It quotes and escapes args
+// as necessary. This function is only safe when standard_conforming_strings is
+// on.
+func SanitizeSQL(sql string, args ...interface{}) (string, error) {
+ query, err := NewQuery(sql)
+ if err != nil {
+ return "", err
+ }
+ return query.Sanitize(args...)
+}
diff --git a/vendor/github.com/jackc/pgx/large_objects.go b/vendor/github.com/jackc/pgx/large_objects.go
index a4922ef..e109bce 100644
--- a/vendor/github.com/jackc/pgx/large_objects.go
+++ b/vendor/github.com/jackc/pgx/large_objects.go
@@ -2,6 +2,8 @@ package pgx
import (
"io"
+
+ "github.com/jackc/pgx/pgtype"
)
// LargeObjects is a structure used to access the large objects API. It is only
@@ -14,20 +16,20 @@ type LargeObjects struct {
fp *fastpath
}
-const largeObjectFns = `select proname, oid from pg_catalog.pg_proc
+const largeObjectFns = `select proname, oid from pg_catalog.pg_proc
where proname in (
-'lo_open',
-'lo_close',
-'lo_create',
-'lo_unlink',
-'lo_lseek',
-'lo_lseek64',
-'lo_tell',
-'lo_tell64',
-'lo_truncate',
-'lo_truncate64',
-'loread',
-'lowrite')
+'lo_open',
+'lo_close',
+'lo_create',
+'lo_unlink',
+'lo_lseek',
+'lo_lseek64',
+'lo_tell',
+'lo_tell64',
+'lo_truncate',
+'lo_truncate64',
+'loread',
+'lowrite')
and pronamespace = (select oid from pg_catalog.pg_namespace where nspname = 'pg_catalog')`
// LargeObjects returns a LargeObjects instance for the transaction.
@@ -60,19 +62,19 @@ const (
// Create creates a new large object. If id is zero, the server assigns an
// unused OID.
-func (o *LargeObjects) Create(id Oid) (Oid, error) {
- newOid, err := fpInt32(o.fp.CallFn("lo_create", []fpArg{fpIntArg(int32(id))}))
- return Oid(newOid), err
+func (o *LargeObjects) Create(id pgtype.OID) (pgtype.OID, error) {
+ newOID, err := fpInt32(o.fp.CallFn("lo_create", []fpArg{fpIntArg(int32(id))}))
+ return pgtype.OID(newOID), err
}
// Open opens an existing large object with the given mode.
-func (o *LargeObjects) Open(oid Oid, mode LargeObjectMode) (*LargeObject, error) {
+func (o *LargeObjects) Open(oid pgtype.OID, mode LargeObjectMode) (*LargeObject, error) {
fd, err := fpInt32(o.fp.CallFn("lo_open", []fpArg{fpIntArg(int32(oid)), fpIntArg(int32(mode))}))
return &LargeObject{fd: fd, lo: o}, err
}
// Unlink removes a large object from the database.
-func (o *LargeObjects) Unlink(oid Oid) error {
+func (o *LargeObjects) Unlink(oid pgtype.OID) error {
_, err := o.fp.CallFn("lo_unlink", []fpArg{fpIntArg(int32(oid))})
return err
}
diff --git a/vendor/github.com/jackc/pgx/large_objects_test.go b/vendor/github.com/jackc/pgx/large_objects_test.go
deleted file mode 100644
index a19c851..0000000
--- a/vendor/github.com/jackc/pgx/large_objects_test.go
+++ /dev/null
@@ -1,121 +0,0 @@
-package pgx_test
-
-import (
- "io"
- "testing"
-
- "github.com/jackc/pgx"
-)
-
-func TestLargeObjects(t *testing.T) {
- t.Parallel()
-
- conn, err := pgx.Connect(*defaultConnConfig)
- if err != nil {
- t.Fatal(err)
- }
-
- tx, err := conn.Begin()
- if err != nil {
- t.Fatal(err)
- }
-
- lo, err := tx.LargeObjects()
- if err != nil {
- t.Fatal(err)
- }
-
- id, err := lo.Create(0)
- if err != nil {
- t.Fatal(err)
- }
-
- obj, err := lo.Open(id, pgx.LargeObjectModeRead|pgx.LargeObjectModeWrite)
- if err != nil {
- t.Fatal(err)
- }
-
- n, err := obj.Write([]byte("testing"))
- if err != nil {
- t.Fatal(err)
- }
- if n != 7 {
- t.Errorf("Expected n to be 7, got %d", n)
- }
-
- pos, err := obj.Seek(1, 0)
- if err != nil {
- t.Fatal(err)
- }
- if pos != 1 {
- t.Errorf("Expected pos to be 1, got %d", pos)
- }
-
- res := make([]byte, 6)
- n, err = obj.Read(res)
- if err != nil {
- t.Fatal(err)
- }
- if string(res) != "esting" {
- t.Errorf(`Expected res to be "esting", got %q`, res)
- }
- if n != 6 {
- t.Errorf("Expected n to be 6, got %d", n)
- }
-
- n, err = obj.Read(res)
- if err != io.EOF {
- t.Error("Expected io.EOF, go nil")
- }
- if n != 0 {
- t.Errorf("Expected n to be 0, got %d", n)
- }
-
- pos, err = obj.Tell()
- if err != nil {
- t.Fatal(err)
- }
- if pos != 7 {
- t.Errorf("Expected pos to be 7, got %d", pos)
- }
-
- err = obj.Truncate(1)
- if err != nil {
- t.Fatal(err)
- }
-
- pos, err = obj.Seek(-1, 2)
- if err != nil {
- t.Fatal(err)
- }
- if pos != 0 {
- t.Errorf("Expected pos to be 0, got %d", pos)
- }
-
- res = make([]byte, 2)
- n, err = obj.Read(res)
- if err != io.EOF {
- t.Errorf("Expected err to be io.EOF, got %v", err)
- }
- if n != 1 {
- t.Errorf("Expected n to be 1, got %d", n)
- }
- if res[0] != 't' {
- t.Errorf("Expected res[0] to be 't', got %v", res[0])
- }
-
- err = obj.Close()
- if err != nil {
- t.Fatal(err)
- }
-
- err = lo.Unlink(id)
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = lo.Open(id, pgx.LargeObjectModeRead)
- if e, ok := err.(pgx.PgError); !ok || e.Code != "42704" {
- t.Errorf("Expected undefined_object error (42704), got %#v", err)
- }
-}
diff --git a/vendor/github.com/jackc/pgx/logger.go b/vendor/github.com/jackc/pgx/logger.go
index 4423325..528698b 100644
--- a/vendor/github.com/jackc/pgx/logger.go
+++ b/vendor/github.com/jackc/pgx/logger.go
@@ -2,13 +2,13 @@ package pgx
import (
"encoding/hex"
- "errors"
"fmt"
+
+ "github.com/pkg/errors"
)
// The values for log levels are chosen such that the zero value means that no
-// log level was specified and we can default to LogLevelDebug to preserve
-// the behavior that existed prior to log level introduction.
+// log level was specified.
const (
LogLevelTrace = 6
LogLevelDebug = 5
@@ -18,16 +18,33 @@ const (
LogLevelNone = 1
)
+// LogLevel represents the pgx logging level. See LogLevel* constants for
+// possible values.
+type LogLevel int
+
+func (ll LogLevel) String() string {
+ switch ll {
+ case LogLevelTrace:
+ return "trace"
+ case LogLevelDebug:
+ return "debug"
+ case LogLevelInfo:
+ return "info"
+ case LogLevelWarn:
+ return "warn"
+ case LogLevelError:
+ return "error"
+ case LogLevelNone:
+ return "none"
+ default:
+ return fmt.Sprintf("invalid level %d", ll)
+ }
+}
+
// Logger is the interface used to get logging from pgx internals.
-// https://github.com/inconshreveable/log15 is the recommended logging package.
-// This logging interface was extracted from there. However, it should be simple
-// to adapt any logger to this interface.
type Logger interface {
- // Log a message at the given level with context key/value pairs
- Debug(msg string, ctx ...interface{})
- Info(msg string, ctx ...interface{})
- Warn(msg string, ctx ...interface{})
- Error(msg string, ctx ...interface{})
+ // Log a message at the given level with data key/value pairs. data may be nil.
+ Log(level LogLevel, msg string, data map[string]interface{})
}
// LogLevelFromString converts log level string to constant
@@ -39,7 +56,7 @@ type Logger interface {
// warn
// error
// none
-func LogLevelFromString(s string) (int, error) {
+func LogLevelFromString(s string) (LogLevel, error) {
switch s {
case "trace":
return LogLevelTrace, nil
diff --git a/vendor/github.com/jackc/pgx/messages.go b/vendor/github.com/jackc/pgx/messages.go
index 317ba27..97e8929 100644
--- a/vendor/github.com/jackc/pgx/messages.go
+++ b/vendor/github.com/jackc/pgx/messages.go
@@ -1,67 +1,76 @@
package pgx
import (
- "encoding/binary"
-)
+ "math"
+ "reflect"
+ "time"
-const (
- protocolVersionNumber = 196608 // 3.0
+ "github.com/jackc/pgx/pgio"
+ "github.com/jackc/pgx/pgtype"
)
const (
- backendKeyData = 'K'
- authenticationX = 'R'
- readyForQuery = 'Z'
- rowDescription = 'T'
- dataRow = 'D'
- commandComplete = 'C'
- errorResponse = 'E'
- noticeResponse = 'N'
- parseComplete = '1'
- parameterDescription = 't'
- bindComplete = '2'
- notificationResponse = 'A'
- emptyQueryResponse = 'I'
- noData = 'n'
- closeComplete = '3'
- flush = 'H'
- copyInResponse = 'G'
- copyData = 'd'
- copyFail = 'f'
- copyDone = 'c'
+ copyData = 'd'
+ copyFail = 'f'
+ copyDone = 'c'
+ varHeaderSize = 4
)
-type startupMessage struct {
- options map[string]string
+type FieldDescription struct {
+ Name string
+ Table pgtype.OID
+ AttributeNumber uint16
+ DataType pgtype.OID
+ DataTypeSize int16
+ DataTypeName string
+ Modifier uint32
+ FormatCode int16
}
-func newStartupMessage() *startupMessage {
- return &startupMessage{map[string]string{}}
+func (fd FieldDescription) Length() (int64, bool) {
+ switch fd.DataType {
+ case pgtype.TextOID, pgtype.ByteaOID:
+ return math.MaxInt64, true
+ case pgtype.VarcharOID, pgtype.BPCharArrayOID:
+ return int64(fd.Modifier - varHeaderSize), true
+ default:
+ return 0, false
+ }
}
-func (s *startupMessage) Bytes() (buf []byte) {
- buf = make([]byte, 8, 128)
- binary.BigEndian.PutUint32(buf[4:8], uint32(protocolVersionNumber))
- for key, value := range s.options {
- buf = append(buf, key...)
- buf = append(buf, 0)
- buf = append(buf, value...)
- buf = append(buf, 0)
+func (fd FieldDescription) PrecisionScale() (precision, scale int64, ok bool) {
+ switch fd.DataType {
+ case pgtype.NumericOID:
+ mod := fd.Modifier - varHeaderSize
+ precision = int64((mod >> 16) & 0xffff)
+ scale = int64(mod & 0xffff)
+ return precision, scale, true
+ default:
+ return 0, 0, false
}
- buf = append(buf, ("\000")...)
- binary.BigEndian.PutUint32(buf[0:4], uint32(len(buf)))
- return buf
}
-type FieldDescription struct {
- Name string
- Table Oid
- AttributeNumber int16
- DataType Oid
- DataTypeSize int16
- DataTypeName string
- Modifier int32
- FormatCode int16
+func (fd FieldDescription) Type() reflect.Type {
+ switch fd.DataType {
+ case pgtype.Int8OID:
+ return reflect.TypeOf(int64(0))
+ case pgtype.Int4OID:
+ return reflect.TypeOf(int32(0))
+ case pgtype.Int2OID:
+ return reflect.TypeOf(int16(0))
+ case pgtype.VarcharOID, pgtype.BPCharArrayOID, pgtype.TextOID:
+ return reflect.TypeOf("")
+ case pgtype.BoolOID:
+ return reflect.TypeOf(false)
+ case pgtype.NumericOID:
+ return reflect.TypeOf(float64(0))
+ case pgtype.DateOID, pgtype.TimestampOID, pgtype.TimestamptzOID:
+ return reflect.TypeOf(time.Time{})
+ case pgtype.ByteaOID:
+ return reflect.TypeOf([]byte(nil))
+ default:
+ return reflect.TypeOf(new(interface{})).Elem()
+ }
}
// PgError represents an error reported by the PostgreSQL server. See
@@ -91,69 +100,114 @@ func (pe PgError) Error() string {
return pe.Severity + ": " + pe.Message + " (SQLSTATE " + pe.Code + ")"
}
-func newWriteBuf(c *Conn, t byte) *WriteBuf {
- buf := append(c.wbuf[0:0], t, 0, 0, 0, 0)
- c.writeBuf = WriteBuf{buf: buf, sizeIdx: 1, conn: c}
- return &c.writeBuf
-}
+// Notice represents a notice response message reported by the PostgreSQL
+// server. Be aware that this is distinct from LISTEN/NOTIFY notification.
+type Notice PgError
-// WriteBuf is used build messages to send to the PostgreSQL server. It is used
-// by the Encoder interface when implementing custom encoders.
-type WriteBuf struct {
- buf []byte
- sizeIdx int
- conn *Conn
-}
+// appendParse appends a PostgreSQL wire protocol parse message to buf and returns it.
+func appendParse(buf []byte, name string, query string, parameterOIDs []pgtype.OID) []byte {
+ buf = append(buf, 'P')
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+ buf = append(buf, name...)
+ buf = append(buf, 0)
+ buf = append(buf, query...)
+ buf = append(buf, 0)
-func (wb *WriteBuf) startMsg(t byte) {
- wb.closeMsg()
- wb.buf = append(wb.buf, t, 0, 0, 0, 0)
- wb.sizeIdx = len(wb.buf) - 4
-}
+ buf = pgio.AppendInt16(buf, int16(len(parameterOIDs)))
+ for _, oid := range parameterOIDs {
+ buf = pgio.AppendUint32(buf, uint32(oid))
+ }
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])))
-func (wb *WriteBuf) closeMsg() {
- binary.BigEndian.PutUint32(wb.buf[wb.sizeIdx:wb.sizeIdx+4], uint32(len(wb.buf)-wb.sizeIdx))
+ return buf
}
-func (wb *WriteBuf) WriteByte(b byte) {
- wb.buf = append(wb.buf, b)
-}
+// appendDescribe appends a PostgreSQL wire protocol describe message to buf and returns it.
+func appendDescribe(buf []byte, objectType byte, name string) []byte {
+ buf = append(buf, 'D')
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+ buf = append(buf, objectType)
+ buf = append(buf, name...)
+ buf = append(buf, 0)
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])))
-func (wb *WriteBuf) WriteCString(s string) {
- wb.buf = append(wb.buf, []byte(s)...)
- wb.buf = append(wb.buf, 0)
+ return buf
}
-func (wb *WriteBuf) WriteInt16(n int16) {
- b := make([]byte, 2)
- binary.BigEndian.PutUint16(b, uint16(n))
- wb.buf = append(wb.buf, b...)
-}
+// appendSync appends a PostgreSQL wire protocol sync message to buf and returns it.
+func appendSync(buf []byte) []byte {
+ buf = append(buf, 'S')
+ buf = pgio.AppendInt32(buf, 4)
-func (wb *WriteBuf) WriteUint16(n uint16) {
- b := make([]byte, 2)
- binary.BigEndian.PutUint16(b, n)
- wb.buf = append(wb.buf, b...)
+ return buf
}
-func (wb *WriteBuf) WriteInt32(n int32) {
- b := make([]byte, 4)
- binary.BigEndian.PutUint32(b, uint32(n))
- wb.buf = append(wb.buf, b...)
-}
+// appendBind appends a PostgreSQL wire protocol bind message to buf and returns it.
+func appendBind(
+ buf []byte,
+ destinationPortal,
+ preparedStatement string,
+ connInfo *pgtype.ConnInfo,
+ parameterOIDs []pgtype.OID,
+ arguments []interface{},
+ resultFormatCodes []int16,
+) ([]byte, error) {
+ buf = append(buf, 'B')
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+ buf = append(buf, destinationPortal...)
+ buf = append(buf, 0)
+ buf = append(buf, preparedStatement...)
+ buf = append(buf, 0)
+
+ buf = pgio.AppendInt16(buf, int16(len(parameterOIDs)))
+ for i, oid := range parameterOIDs {
+ buf = pgio.AppendInt16(buf, chooseParameterFormatCode(connInfo, oid, arguments[i]))
+ }
+
+ buf = pgio.AppendInt16(buf, int16(len(arguments)))
+ for i, oid := range parameterOIDs {
+ var err error
+ buf, err = encodePreparedStatementArgument(connInfo, buf, oid, arguments[i])
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ buf = pgio.AppendInt16(buf, int16(len(resultFormatCodes)))
+ for _, fc := range resultFormatCodes {
+ buf = pgio.AppendInt16(buf, fc)
+ }
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])))
-func (wb *WriteBuf) WriteUint32(n uint32) {
- b := make([]byte, 4)
- binary.BigEndian.PutUint32(b, n)
- wb.buf = append(wb.buf, b...)
+ return buf, nil
}
-func (wb *WriteBuf) WriteInt64(n int64) {
- b := make([]byte, 8)
- binary.BigEndian.PutUint64(b, uint64(n))
- wb.buf = append(wb.buf, b...)
+// appendExecute appends a PostgreSQL wire protocol execute message to buf and returns it.
+func appendExecute(buf []byte, portal string, maxRows uint32) []byte {
+ buf = append(buf, 'E')
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ buf = append(buf, portal...)
+ buf = append(buf, 0)
+ buf = pgio.AppendUint32(buf, maxRows)
+
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])))
+
+ return buf
}
-func (wb *WriteBuf) WriteBytes(b []byte) {
- wb.buf = append(wb.buf, b...)
+// appendQuery appends a PostgreSQL wire protocol query message to buf and returns it.
+func appendQuery(buf []byte, query string) []byte {
+ buf = append(buf, 'Q')
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+ buf = append(buf, query...)
+ buf = append(buf, 0)
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])))
+
+ return buf
}
diff --git a/vendor/github.com/jackc/pgx/msg_reader.go b/vendor/github.com/jackc/pgx/msg_reader.go
deleted file mode 100644
index 21db5d2..0000000
--- a/vendor/github.com/jackc/pgx/msg_reader.go
+++ /dev/null
@@ -1,316 +0,0 @@
-package pgx
-
-import (
- "bufio"
- "encoding/binary"
- "errors"
- "io"
-)
-
-// msgReader is a helper that reads values from a PostgreSQL message.
-type msgReader struct {
- reader *bufio.Reader
- msgBytesRemaining int32
- err error
- log func(lvl int, msg string, ctx ...interface{})
- shouldLog func(lvl int) bool
-}
-
-// Err returns any error that the msgReader has experienced
-func (r *msgReader) Err() error {
- return r.err
-}
-
-// fatal tells rc that a Fatal error has occurred
-func (r *msgReader) fatal(err error) {
- if r.shouldLog(LogLevelTrace) {
- r.log(LogLevelTrace, "msgReader.fatal", "error", err, "msgBytesRemaining", r.msgBytesRemaining)
- }
- r.err = err
-}
-
-// rxMsg reads the type and size of the next message.
-func (r *msgReader) rxMsg() (byte, error) {
- if r.err != nil {
- return 0, r.err
- }
-
- if r.msgBytesRemaining > 0 {
- if r.shouldLog(LogLevelTrace) {
- r.log(LogLevelTrace, "msgReader.rxMsg discarding unread previous message", "msgBytesRemaining", r.msgBytesRemaining)
- }
-
- _, err := r.reader.Discard(int(r.msgBytesRemaining))
- if err != nil {
- return 0, err
- }
- }
-
- b, err := r.reader.Peek(5)
- if err != nil {
- r.fatal(err)
- return 0, err
- }
- msgType := b[0]
- r.msgBytesRemaining = int32(binary.BigEndian.Uint32(b[1:])) - 4
- r.reader.Discard(5)
- return msgType, nil
-}
-
-func (r *msgReader) readByte() byte {
- if r.err != nil {
- return 0
- }
-
- r.msgBytesRemaining--
- if r.msgBytesRemaining < 0 {
- r.fatal(errors.New("read past end of message"))
- return 0
- }
-
- b, err := r.reader.ReadByte()
- if err != nil {
- r.fatal(err)
- return 0
- }
-
- if r.shouldLog(LogLevelTrace) {
- r.log(LogLevelTrace, "msgReader.readByte", "value", b, "byteAsString", string(b), "msgBytesRemaining", r.msgBytesRemaining)
- }
-
- return b
-}
-
-func (r *msgReader) readInt16() int16 {
- if r.err != nil {
- return 0
- }
-
- r.msgBytesRemaining -= 2
- if r.msgBytesRemaining < 0 {
- r.fatal(errors.New("read past end of message"))
- return 0
- }
-
- b, err := r.reader.Peek(2)
- if err != nil {
- r.fatal(err)
- return 0
- }
-
- n := int16(binary.BigEndian.Uint16(b))
-
- r.reader.Discard(2)
-
- if r.shouldLog(LogLevelTrace) {
- r.log(LogLevelTrace, "msgReader.readInt16", "value", n, "msgBytesRemaining", r.msgBytesRemaining)
- }
-
- return n
-}
-
-func (r *msgReader) readInt32() int32 {
- if r.err != nil {
- return 0
- }
-
- r.msgBytesRemaining -= 4
- if r.msgBytesRemaining < 0 {
- r.fatal(errors.New("read past end of message"))
- return 0
- }
-
- b, err := r.reader.Peek(4)
- if err != nil {
- r.fatal(err)
- return 0
- }
-
- n := int32(binary.BigEndian.Uint32(b))
-
- r.reader.Discard(4)
-
- if r.shouldLog(LogLevelTrace) {
- r.log(LogLevelTrace, "msgReader.readInt32", "value", n, "msgBytesRemaining", r.msgBytesRemaining)
- }
-
- return n
-}
-
-func (r *msgReader) readUint16() uint16 {
- if r.err != nil {
- return 0
- }
-
- r.msgBytesRemaining -= 2
- if r.msgBytesRemaining < 0 {
- r.fatal(errors.New("read past end of message"))
- return 0
- }
-
- b, err := r.reader.Peek(2)
- if err != nil {
- r.fatal(err)
- return 0
- }
-
- n := uint16(binary.BigEndian.Uint16(b))
-
- r.reader.Discard(2)
-
- if r.shouldLog(LogLevelTrace) {
- r.log(LogLevelTrace, "msgReader.readUint16", "value", n, "msgBytesRemaining", r.msgBytesRemaining)
- }
-
- return n
-}
-
-func (r *msgReader) readUint32() uint32 {
- if r.err != nil {
- return 0
- }
-
- r.msgBytesRemaining -= 4
- if r.msgBytesRemaining < 0 {
- r.fatal(errors.New("read past end of message"))
- return 0
- }
-
- b, err := r.reader.Peek(4)
- if err != nil {
- r.fatal(err)
- return 0
- }
-
- n := uint32(binary.BigEndian.Uint32(b))
-
- r.reader.Discard(4)
-
- if r.shouldLog(LogLevelTrace) {
- r.log(LogLevelTrace, "msgReader.readUint32", "value", n, "msgBytesRemaining", r.msgBytesRemaining)
- }
-
- return n
-}
-
-func (r *msgReader) readInt64() int64 {
- if r.err != nil {
- return 0
- }
-
- r.msgBytesRemaining -= 8
- if r.msgBytesRemaining < 0 {
- r.fatal(errors.New("read past end of message"))
- return 0
- }
-
- b, err := r.reader.Peek(8)
- if err != nil {
- r.fatal(err)
- return 0
- }
-
- n := int64(binary.BigEndian.Uint64(b))
-
- r.reader.Discard(8)
-
- if r.shouldLog(LogLevelTrace) {
- r.log(LogLevelTrace, "msgReader.readInt64", "value", n, "msgBytesRemaining", r.msgBytesRemaining)
- }
-
- return n
-}
-
-func (r *msgReader) readOid() Oid {
- return Oid(r.readInt32())
-}
-
-// readCString reads a null terminated string
-func (r *msgReader) readCString() string {
- if r.err != nil {
- return ""
- }
-
- b, err := r.reader.ReadBytes(0)
- if err != nil {
- r.fatal(err)
- return ""
- }
-
- r.msgBytesRemaining -= int32(len(b))
- if r.msgBytesRemaining < 0 {
- r.fatal(errors.New("read past end of message"))
- return ""
- }
-
- s := string(b[0 : len(b)-1])
-
- if r.shouldLog(LogLevelTrace) {
- r.log(LogLevelTrace, "msgReader.readCString", "value", s, "msgBytesRemaining", r.msgBytesRemaining)
- }
-
- return s
-}
-
-// readString reads count bytes and returns as string
-func (r *msgReader) readString(countI32 int32) string {
- if r.err != nil {
- return ""
- }
-
- r.msgBytesRemaining -= countI32
- if r.msgBytesRemaining < 0 {
- r.fatal(errors.New("read past end of message"))
- return ""
- }
-
- count := int(countI32)
- var s string
-
- if r.reader.Buffered() >= count {
- buf, _ := r.reader.Peek(count)
- s = string(buf)
- r.reader.Discard(count)
- } else {
- buf := make([]byte, count)
- _, err := io.ReadFull(r.reader, buf)
- if err != nil {
- r.fatal(err)
- return ""
- }
- s = string(buf)
- }
-
- if r.shouldLog(LogLevelTrace) {
- r.log(LogLevelTrace, "msgReader.readString", "value", s, "msgBytesRemaining", r.msgBytesRemaining)
- }
-
- return s
-}
-
-// readBytes reads count bytes and returns as []byte
-func (r *msgReader) readBytes(count int32) []byte {
- if r.err != nil {
- return nil
- }
-
- r.msgBytesRemaining -= count
- if r.msgBytesRemaining < 0 {
- r.fatal(errors.New("read past end of message"))
- return nil
- }
-
- b := make([]byte, int(count))
-
- _, err := io.ReadFull(r.reader, b)
- if err != nil {
- r.fatal(err)
- return nil
- }
-
- if r.shouldLog(LogLevelTrace) {
- r.log(LogLevelTrace, "msgReader.readBytes", "value", b, "msgBytesRemaining", r.msgBytesRemaining)
- }
-
- return b
-}
diff --git a/vendor/github.com/jackc/pgx/pgio/doc.go b/vendor/github.com/jackc/pgx/pgio/doc.go
new file mode 100644
index 0000000..ef2dcc7
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgio/doc.go
@@ -0,0 +1,6 @@
+// Package pgio is a low-level toolkit building messages in the PostgreSQL wire protocol.
+/*
+pgio provides functions for appending integers to a []byte while doing byte
+order conversion.
+*/
+package pgio
diff --git a/vendor/github.com/jackc/pgx/pgio/write.go b/vendor/github.com/jackc/pgx/pgio/write.go
new file mode 100644
index 0000000..96aedf9
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgio/write.go
@@ -0,0 +1,40 @@
+package pgio
+
+import "encoding/binary"
+
+func AppendUint16(buf []byte, n uint16) []byte {
+ wp := len(buf)
+ buf = append(buf, 0, 0)
+ binary.BigEndian.PutUint16(buf[wp:], n)
+ return buf
+}
+
+func AppendUint32(buf []byte, n uint32) []byte {
+ wp := len(buf)
+ buf = append(buf, 0, 0, 0, 0)
+ binary.BigEndian.PutUint32(buf[wp:], n)
+ return buf
+}
+
+func AppendUint64(buf []byte, n uint64) []byte {
+ wp := len(buf)
+ buf = append(buf, 0, 0, 0, 0, 0, 0, 0, 0)
+ binary.BigEndian.PutUint64(buf[wp:], n)
+ return buf
+}
+
+func AppendInt16(buf []byte, n int16) []byte {
+ return AppendUint16(buf, uint16(n))
+}
+
+func AppendInt32(buf []byte, n int32) []byte {
+ return AppendUint32(buf, uint32(n))
+}
+
+func AppendInt64(buf []byte, n int64) []byte {
+ return AppendUint64(buf, uint64(n))
+}
+
+func SetInt32(buf []byte, n int32) {
+ binary.BigEndian.PutUint32(buf, uint32(n))
+}
diff --git a/vendor/github.com/jackc/pgx/pgpass_test.go b/vendor/github.com/jackc/pgx/pgpass_test.go
deleted file mode 100644
index f6094c8..0000000
--- a/vendor/github.com/jackc/pgx/pgpass_test.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package pgx
-
-import (
- "fmt"
- "io/ioutil"
- "os"
- "strings"
- "testing"
-)
-
-func unescape(s string) string {
- s = strings.Replace(s, `\:`, `:`, -1)
- s = strings.Replace(s, `\\`, `\`, -1)
- return s
-}
-
-var passfile = [][]string{
- []string{"test1", "5432", "larrydb", "larry", "whatstheidea"},
- []string{"test1", "5432", "moedb", "moe", "imbecile"},
- []string{"test1", "5432", "curlydb", "curly", "nyuknyuknyuk"},
- []string{"test2", "5432", "*", "shemp", "heymoe"},
- []string{"test2", "5432", "*", "*", `test\\ing\:`},
-}
-
-func TestPGPass(t *testing.T) {
- tf, err := ioutil.TempFile("", "")
- if err != nil {
- t.Fatal(err)
- }
- defer tf.Close()
- defer os.Remove(tf.Name())
- os.Setenv("PGPASSFILE", tf.Name())
- for _, l := range passfile {
- _, err := fmt.Fprintln(tf, strings.Join(l, `:`))
- if err != nil {
- t.Fatal(err)
- }
- }
- if err = tf.Close(); err != nil {
- t.Fatal(err)
- }
- for i, l := range passfile {
- cfg := ConnConfig{Host: l[0], Database: l[2], User: l[3]}
- found := pgpass(&cfg)
- if !found {
- t.Fatalf("Entry %v not found", i)
- }
- if cfg.Password != unescape(l[4]) {
- t.Fatalf(`Password mismatch entry %v want %s got %s`, i, unescape(l[4]), cfg.Password)
- }
- }
- cfg := ConnConfig{Host: "derp", Database: "herp", User: "joe"}
- found := pgpass(&cfg)
- if found {
- t.Fatal("bad found")
- }
-}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/authentication.go b/vendor/github.com/jackc/pgx/pgproto3/authentication.go
new file mode 100644
index 0000000..77750b8
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/authentication.go
@@ -0,0 +1,54 @@
+package pgproto3
+
+import (
+ "encoding/binary"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+const (
+ AuthTypeOk = 0
+ AuthTypeCleartextPassword = 3
+ AuthTypeMD5Password = 5
+)
+
+type Authentication struct {
+ Type uint32
+
+ // MD5Password fields
+ Salt [4]byte
+}
+
+func (*Authentication) Backend() {}
+
+func (dst *Authentication) Decode(src []byte) error {
+ *dst = Authentication{Type: binary.BigEndian.Uint32(src[:4])}
+
+ switch dst.Type {
+ case AuthTypeOk:
+ case AuthTypeCleartextPassword:
+ case AuthTypeMD5Password:
+ copy(dst.Salt[:], src[4:8])
+ default:
+ return errors.Errorf("unknown authentication type: %d", dst.Type)
+ }
+
+ return nil
+}
+
+func (src *Authentication) Encode(dst []byte) []byte {
+ dst = append(dst, 'R')
+ sp := len(dst)
+ dst = pgio.AppendInt32(dst, -1)
+ dst = pgio.AppendUint32(dst, src.Type)
+
+ switch src.Type {
+ case AuthTypeMD5Password:
+ dst = append(dst, src.Salt[:]...)
+ }
+
+ pgio.SetInt32(dst[sp:], int32(len(dst[sp:])))
+
+ return dst
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/backend.go b/vendor/github.com/jackc/pgx/pgproto3/backend.go
new file mode 100644
index 0000000..8f3c347
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/backend.go
@@ -0,0 +1,110 @@
+package pgproto3
+
+import (
+ "encoding/binary"
+ "io"
+
+ "github.com/jackc/pgx/chunkreader"
+ "github.com/pkg/errors"
+)
+
+type Backend struct {
+ cr *chunkreader.ChunkReader
+ w io.Writer
+
+ // Frontend message flyweights
+ bind Bind
+ _close Close
+ describe Describe
+ execute Execute
+ flush Flush
+ parse Parse
+ passwordMessage PasswordMessage
+ query Query
+ startupMessage StartupMessage
+ sync Sync
+ terminate Terminate
+
+ bodyLen int
+ msgType byte
+ partialMsg bool
+}
+
+func NewBackend(r io.Reader, w io.Writer) (*Backend, error) {
+ cr := chunkreader.NewChunkReader(r)
+ return &Backend{cr: cr, w: w}, nil
+}
+
+func (b *Backend) Send(msg BackendMessage) error {
+ _, err := b.w.Write(msg.Encode(nil))
+ return err
+}
+
+func (b *Backend) ReceiveStartupMessage() (*StartupMessage, error) {
+ buf, err := b.cr.Next(4)
+ if err != nil {
+ return nil, err
+ }
+ msgSize := int(binary.BigEndian.Uint32(buf) - 4)
+
+ buf, err = b.cr.Next(msgSize)
+ if err != nil {
+ return nil, err
+ }
+
+ err = b.startupMessage.Decode(buf)
+ if err != nil {
+ return nil, err
+ }
+
+ return &b.startupMessage, nil
+}
+
+func (b *Backend) Receive() (FrontendMessage, error) {
+ if !b.partialMsg {
+ header, err := b.cr.Next(5)
+ if err != nil {
+ return nil, err
+ }
+
+ b.msgType = header[0]
+ b.bodyLen = int(binary.BigEndian.Uint32(header[1:])) - 4
+ b.partialMsg = true
+ }
+
+ var msg FrontendMessage
+ switch b.msgType {
+ case 'B':
+ msg = &b.bind
+ case 'C':
+ msg = &b._close
+ case 'D':
+ msg = &b.describe
+ case 'E':
+ msg = &b.execute
+ case 'H':
+ msg = &b.flush
+ case 'P':
+ msg = &b.parse
+ case 'p':
+ msg = &b.passwordMessage
+ case 'Q':
+ msg = &b.query
+ case 'S':
+ msg = &b.sync
+ case 'X':
+ msg = &b.terminate
+ default:
+ return nil, errors.Errorf("unknown message type: %c", b.msgType)
+ }
+
+ msgBody, err := b.cr.Next(b.bodyLen)
+ if err != nil {
+ return nil, err
+ }
+
+ b.partialMsg = false
+
+ err = msg.Decode(msgBody)
+ return msg, err
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/backend_key_data.go b/vendor/github.com/jackc/pgx/pgproto3/backend_key_data.go
new file mode 100644
index 0000000..5a478f1
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/backend_key_data.go
@@ -0,0 +1,46 @@
+package pgproto3
+
+import (
+ "encoding/binary"
+ "encoding/json"
+
+ "github.com/jackc/pgx/pgio"
+)
+
+type BackendKeyData struct {
+ ProcessID uint32
+ SecretKey uint32
+}
+
+func (*BackendKeyData) Backend() {}
+
+func (dst *BackendKeyData) Decode(src []byte) error {
+ if len(src) != 8 {
+ return &invalidMessageLenErr{messageType: "BackendKeyData", expectedLen: 8, actualLen: len(src)}
+ }
+
+ dst.ProcessID = binary.BigEndian.Uint32(src[:4])
+ dst.SecretKey = binary.BigEndian.Uint32(src[4:])
+
+ return nil
+}
+
+func (src *BackendKeyData) Encode(dst []byte) []byte {
+ dst = append(dst, 'K')
+ dst = pgio.AppendUint32(dst, 12)
+ dst = pgio.AppendUint32(dst, src.ProcessID)
+ dst = pgio.AppendUint32(dst, src.SecretKey)
+ return dst
+}
+
+func (src *BackendKeyData) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ ProcessID uint32
+ SecretKey uint32
+ }{
+ Type: "BackendKeyData",
+ ProcessID: src.ProcessID,
+ SecretKey: src.SecretKey,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/big_endian.go b/vendor/github.com/jackc/pgx/pgproto3/big_endian.go
new file mode 100644
index 0000000..f7bdb97
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/big_endian.go
@@ -0,0 +1,37 @@
+package pgproto3
+
+import (
+ "encoding/binary"
+)
+
+type BigEndianBuf [8]byte
+
+func (b BigEndianBuf) Int16(n int16) []byte {
+ buf := b[0:2]
+ binary.BigEndian.PutUint16(buf, uint16(n))
+ return buf
+}
+
+func (b BigEndianBuf) Uint16(n uint16) []byte {
+ buf := b[0:2]
+ binary.BigEndian.PutUint16(buf, n)
+ return buf
+}
+
+func (b BigEndianBuf) Int32(n int32) []byte {
+ buf := b[0:4]
+ binary.BigEndian.PutUint32(buf, uint32(n))
+ return buf
+}
+
+func (b BigEndianBuf) Uint32(n uint32) []byte {
+ buf := b[0:4]
+ binary.BigEndian.PutUint32(buf, n)
+ return buf
+}
+
+func (b BigEndianBuf) Int64(n int64) []byte {
+ buf := b[0:8]
+ binary.BigEndian.PutUint64(buf, uint64(n))
+ return buf
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/bind.go b/vendor/github.com/jackc/pgx/pgproto3/bind.go
new file mode 100644
index 0000000..cceee6a
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/bind.go
@@ -0,0 +1,171 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/hex"
+ "encoding/json"
+
+ "github.com/jackc/pgx/pgio"
+)
+
+type Bind struct {
+ DestinationPortal string
+ PreparedStatement string
+ ParameterFormatCodes []int16
+ Parameters [][]byte
+ ResultFormatCodes []int16
+}
+
+func (*Bind) Frontend() {}
+
+func (dst *Bind) Decode(src []byte) error {
+ *dst = Bind{}
+
+ idx := bytes.IndexByte(src, 0)
+ if idx < 0 {
+ return &invalidMessageFormatErr{messageType: "Bind"}
+ }
+ dst.DestinationPortal = string(src[:idx])
+ rp := idx + 1
+
+ idx = bytes.IndexByte(src[rp:], 0)
+ if idx < 0 {
+ return &invalidMessageFormatErr{messageType: "Bind"}
+ }
+ dst.PreparedStatement = string(src[rp : rp+idx])
+ rp += idx + 1
+
+ if len(src[rp:]) < 2 {
+ return &invalidMessageFormatErr{messageType: "Bind"}
+ }
+ parameterFormatCodeCount := int(binary.BigEndian.Uint16(src[rp:]))
+ rp += 2
+
+ if parameterFormatCodeCount > 0 {
+ dst.ParameterFormatCodes = make([]int16, parameterFormatCodeCount)
+
+ if len(src[rp:]) < len(dst.ParameterFormatCodes)*2 {
+ return &invalidMessageFormatErr{messageType: "Bind"}
+ }
+ for i := 0; i < parameterFormatCodeCount; i++ {
+ dst.ParameterFormatCodes[i] = int16(binary.BigEndian.Uint16(src[rp:]))
+ rp += 2
+ }
+ }
+
+ if len(src[rp:]) < 2 {
+ return &invalidMessageFormatErr{messageType: "Bind"}
+ }
+ parameterCount := int(binary.BigEndian.Uint16(src[rp:]))
+ rp += 2
+
+ if parameterCount > 0 {
+ dst.Parameters = make([][]byte, parameterCount)
+
+ for i := 0; i < parameterCount; i++ {
+ if len(src[rp:]) < 4 {
+ return &invalidMessageFormatErr{messageType: "Bind"}
+ }
+
+ msgSize := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+
+ // null
+ if msgSize == -1 {
+ continue
+ }
+
+ if len(src[rp:]) < msgSize {
+ return &invalidMessageFormatErr{messageType: "Bind"}
+ }
+
+ dst.Parameters[i] = src[rp : rp+msgSize]
+ rp += msgSize
+ }
+ }
+
+ if len(src[rp:]) < 2 {
+ return &invalidMessageFormatErr{messageType: "Bind"}
+ }
+ resultFormatCodeCount := int(binary.BigEndian.Uint16(src[rp:]))
+ rp += 2
+
+ dst.ResultFormatCodes = make([]int16, resultFormatCodeCount)
+ if len(src[rp:]) < len(dst.ResultFormatCodes)*2 {
+ return &invalidMessageFormatErr{messageType: "Bind"}
+ }
+ for i := 0; i < resultFormatCodeCount; i++ {
+ dst.ResultFormatCodes[i] = int16(binary.BigEndian.Uint16(src[rp:]))
+ rp += 2
+ }
+
+ return nil
+}
+
+func (src *Bind) Encode(dst []byte) []byte {
+ dst = append(dst, 'B')
+ sp := len(dst)
+ dst = pgio.AppendInt32(dst, -1)
+
+ dst = append(dst, src.DestinationPortal...)
+ dst = append(dst, 0)
+ dst = append(dst, src.PreparedStatement...)
+ dst = append(dst, 0)
+
+ dst = pgio.AppendUint16(dst, uint16(len(src.ParameterFormatCodes)))
+ for _, fc := range src.ParameterFormatCodes {
+ dst = pgio.AppendInt16(dst, fc)
+ }
+
+ dst = pgio.AppendUint16(dst, uint16(len(src.Parameters)))
+ for _, p := range src.Parameters {
+ if p == nil {
+ dst = pgio.AppendInt32(dst, -1)
+ continue
+ }
+
+ dst = pgio.AppendInt32(dst, int32(len(p)))
+ dst = append(dst, p...)
+ }
+
+ dst = pgio.AppendUint16(dst, uint16(len(src.ResultFormatCodes)))
+ for _, fc := range src.ResultFormatCodes {
+ dst = pgio.AppendInt16(dst, fc)
+ }
+
+ pgio.SetInt32(dst[sp:], int32(len(dst[sp:])))
+
+ return dst
+}
+
+func (src *Bind) MarshalJSON() ([]byte, error) {
+ formattedParameters := make([]map[string]string, len(src.Parameters))
+ for i, p := range src.Parameters {
+ if p == nil {
+ continue
+ }
+
+ if src.ParameterFormatCodes[i] == 0 {
+ formattedParameters[i] = map[string]string{"text": string(p)}
+ } else {
+ formattedParameters[i] = map[string]string{"binary": hex.EncodeToString(p)}
+ }
+ }
+
+ return json.Marshal(struct {
+ Type string
+ DestinationPortal string
+ PreparedStatement string
+ ParameterFormatCodes []int16
+ Parameters []map[string]string
+ ResultFormatCodes []int16
+ }{
+ Type: "Bind",
+ DestinationPortal: src.DestinationPortal,
+ PreparedStatement: src.PreparedStatement,
+ ParameterFormatCodes: src.ParameterFormatCodes,
+ Parameters: formattedParameters,
+ ResultFormatCodes: src.ResultFormatCodes,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/bind_complete.go b/vendor/github.com/jackc/pgx/pgproto3/bind_complete.go
new file mode 100644
index 0000000..6036051
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/bind_complete.go
@@ -0,0 +1,29 @@
+package pgproto3
+
+import (
+ "encoding/json"
+)
+
+type BindComplete struct{}
+
+func (*BindComplete) Backend() {}
+
+func (dst *BindComplete) Decode(src []byte) error {
+ if len(src) != 0 {
+ return &invalidMessageLenErr{messageType: "BindComplete", expectedLen: 0, actualLen: len(src)}
+ }
+
+ return nil
+}
+
+func (src *BindComplete) Encode(dst []byte) []byte {
+ return append(dst, '2', 0, 0, 0, 4)
+}
+
+func (src *BindComplete) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ }{
+ Type: "BindComplete",
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/close.go b/vendor/github.com/jackc/pgx/pgproto3/close.go
new file mode 100644
index 0000000..5ff4c88
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/close.go
@@ -0,0 +1,59 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/json"
+
+ "github.com/jackc/pgx/pgio"
+)
+
+type Close struct {
+ ObjectType byte // 'S' = prepared statement, 'P' = portal
+ Name string
+}
+
+func (*Close) Frontend() {}
+
+func (dst *Close) Decode(src []byte) error {
+ if len(src) < 2 {
+ return &invalidMessageFormatErr{messageType: "Close"}
+ }
+
+ dst.ObjectType = src[0]
+ rp := 1
+
+ idx := bytes.IndexByte(src[rp:], 0)
+ if idx != len(src[rp:])-1 {
+ return &invalidMessageFormatErr{messageType: "Close"}
+ }
+
+ dst.Name = string(src[rp : len(src)-1])
+
+ return nil
+}
+
+func (src *Close) Encode(dst []byte) []byte {
+ dst = append(dst, 'C')
+ sp := len(dst)
+ dst = pgio.AppendInt32(dst, -1)
+
+ dst = append(dst, src.ObjectType)
+ dst = append(dst, src.Name...)
+ dst = append(dst, 0)
+
+ pgio.SetInt32(dst[sp:], int32(len(dst[sp:])))
+
+ return dst
+}
+
+func (src *Close) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ ObjectType string
+ Name string
+ }{
+ Type: "Close",
+ ObjectType: string(src.ObjectType),
+ Name: src.Name,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/close_complete.go b/vendor/github.com/jackc/pgx/pgproto3/close_complete.go
new file mode 100644
index 0000000..db793c9
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/close_complete.go
@@ -0,0 +1,29 @@
+package pgproto3
+
+import (
+ "encoding/json"
+)
+
+type CloseComplete struct{}
+
+func (*CloseComplete) Backend() {}
+
+func (dst *CloseComplete) Decode(src []byte) error {
+ if len(src) != 0 {
+ return &invalidMessageLenErr{messageType: "CloseComplete", expectedLen: 0, actualLen: len(src)}
+ }
+
+ return nil
+}
+
+func (src *CloseComplete) Encode(dst []byte) []byte {
+ return append(dst, '3', 0, 0, 0, 4)
+}
+
+func (src *CloseComplete) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ }{
+ Type: "CloseComplete",
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/command_complete.go b/vendor/github.com/jackc/pgx/pgproto3/command_complete.go
new file mode 100644
index 0000000..8584853
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/command_complete.go
@@ -0,0 +1,48 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/json"
+
+ "github.com/jackc/pgx/pgio"
+)
+
+type CommandComplete struct {
+ CommandTag string
+}
+
+func (*CommandComplete) Backend() {}
+
+func (dst *CommandComplete) Decode(src []byte) error {
+ idx := bytes.IndexByte(src, 0)
+ if idx != len(src)-1 {
+ return &invalidMessageFormatErr{messageType: "CommandComplete"}
+ }
+
+ dst.CommandTag = string(src[:idx])
+
+ return nil
+}
+
+func (src *CommandComplete) Encode(dst []byte) []byte {
+ dst = append(dst, 'C')
+ sp := len(dst)
+ dst = pgio.AppendInt32(dst, -1)
+
+ dst = append(dst, src.CommandTag...)
+ dst = append(dst, 0)
+
+ pgio.SetInt32(dst[sp:], int32(len(dst[sp:])))
+
+ return dst
+}
+
+func (src *CommandComplete) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ CommandTag string
+ }{
+ Type: "CommandComplete",
+ CommandTag: src.CommandTag,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/copy_both_response.go b/vendor/github.com/jackc/pgx/pgproto3/copy_both_response.go
new file mode 100644
index 0000000..2862a34
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/copy_both_response.go
@@ -0,0 +1,65 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/json"
+
+ "github.com/jackc/pgx/pgio"
+)
+
+type CopyBothResponse struct {
+ OverallFormat byte
+ ColumnFormatCodes []uint16
+}
+
+func (*CopyBothResponse) Backend() {}
+
+func (dst *CopyBothResponse) Decode(src []byte) error {
+ buf := bytes.NewBuffer(src)
+
+ if buf.Len() < 3 {
+ return &invalidMessageFormatErr{messageType: "CopyBothResponse"}
+ }
+
+ overallFormat := buf.Next(1)[0]
+
+ columnCount := int(binary.BigEndian.Uint16(buf.Next(2)))
+ if buf.Len() != columnCount*2 {
+ return &invalidMessageFormatErr{messageType: "CopyBothResponse"}
+ }
+
+ columnFormatCodes := make([]uint16, columnCount)
+ for i := 0; i < columnCount; i++ {
+ columnFormatCodes[i] = binary.BigEndian.Uint16(buf.Next(2))
+ }
+
+ *dst = CopyBothResponse{OverallFormat: overallFormat, ColumnFormatCodes: columnFormatCodes}
+
+ return nil
+}
+
+func (src *CopyBothResponse) Encode(dst []byte) []byte {
+ dst = append(dst, 'W')
+ sp := len(dst)
+ dst = pgio.AppendInt32(dst, -1)
+
+ dst = pgio.AppendUint16(dst, uint16(len(src.ColumnFormatCodes)))
+ for _, fc := range src.ColumnFormatCodes {
+ dst = pgio.AppendUint16(dst, fc)
+ }
+
+ pgio.SetInt32(dst[sp:], int32(len(dst[sp:])))
+
+ return dst
+}
+
+func (src *CopyBothResponse) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ ColumnFormatCodes []uint16
+ }{
+ Type: "CopyBothResponse",
+ ColumnFormatCodes: src.ColumnFormatCodes,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/copy_data.go b/vendor/github.com/jackc/pgx/pgproto3/copy_data.go
new file mode 100644
index 0000000..fab139e
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/copy_data.go
@@ -0,0 +1,37 @@
+package pgproto3
+
+import (
+ "encoding/hex"
+ "encoding/json"
+
+ "github.com/jackc/pgx/pgio"
+)
+
+type CopyData struct {
+ Data []byte
+}
+
+func (*CopyData) Backend() {}
+func (*CopyData) Frontend() {}
+
+func (dst *CopyData) Decode(src []byte) error {
+ dst.Data = src
+ return nil
+}
+
+func (src *CopyData) Encode(dst []byte) []byte {
+ dst = append(dst, 'd')
+ dst = pgio.AppendInt32(dst, int32(4+len(src.Data)))
+ dst = append(dst, src.Data...)
+ return dst
+}
+
+func (src *CopyData) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ Data string
+ }{
+ Type: "CopyData",
+ Data: hex.EncodeToString(src.Data),
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/copy_in_response.go b/vendor/github.com/jackc/pgx/pgproto3/copy_in_response.go
new file mode 100644
index 0000000..54083cd
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/copy_in_response.go
@@ -0,0 +1,65 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/json"
+
+ "github.com/jackc/pgx/pgio"
+)
+
+type CopyInResponse struct {
+ OverallFormat byte
+ ColumnFormatCodes []uint16
+}
+
+func (*CopyInResponse) Backend() {}
+
+func (dst *CopyInResponse) Decode(src []byte) error {
+ buf := bytes.NewBuffer(src)
+
+ if buf.Len() < 3 {
+ return &invalidMessageFormatErr{messageType: "CopyInResponse"}
+ }
+
+ overallFormat := buf.Next(1)[0]
+
+ columnCount := int(binary.BigEndian.Uint16(buf.Next(2)))
+ if buf.Len() != columnCount*2 {
+ return &invalidMessageFormatErr{messageType: "CopyInResponse"}
+ }
+
+ columnFormatCodes := make([]uint16, columnCount)
+ for i := 0; i < columnCount; i++ {
+ columnFormatCodes[i] = binary.BigEndian.Uint16(buf.Next(2))
+ }
+
+ *dst = CopyInResponse{OverallFormat: overallFormat, ColumnFormatCodes: columnFormatCodes}
+
+ return nil
+}
+
+func (src *CopyInResponse) Encode(dst []byte) []byte {
+ dst = append(dst, 'G')
+ sp := len(dst)
+ dst = pgio.AppendInt32(dst, -1)
+
+ dst = pgio.AppendUint16(dst, uint16(len(src.ColumnFormatCodes)))
+ for _, fc := range src.ColumnFormatCodes {
+ dst = pgio.AppendUint16(dst, fc)
+ }
+
+ pgio.SetInt32(dst[sp:], int32(len(dst[sp:])))
+
+ return dst
+}
+
+func (src *CopyInResponse) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ ColumnFormatCodes []uint16
+ }{
+ Type: "CopyInResponse",
+ ColumnFormatCodes: src.ColumnFormatCodes,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/copy_out_response.go b/vendor/github.com/jackc/pgx/pgproto3/copy_out_response.go
new file mode 100644
index 0000000..eaa33b8
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/copy_out_response.go
@@ -0,0 +1,65 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/json"
+
+ "github.com/jackc/pgx/pgio"
+)
+
+type CopyOutResponse struct {
+ OverallFormat byte
+ ColumnFormatCodes []uint16
+}
+
+func (*CopyOutResponse) Backend() {}
+
+func (dst *CopyOutResponse) Decode(src []byte) error {
+ buf := bytes.NewBuffer(src)
+
+ if buf.Len() < 3 {
+ return &invalidMessageFormatErr{messageType: "CopyOutResponse"}
+ }
+
+ overallFormat := buf.Next(1)[0]
+
+ columnCount := int(binary.BigEndian.Uint16(buf.Next(2)))
+ if buf.Len() != columnCount*2 {
+ return &invalidMessageFormatErr{messageType: "CopyOutResponse"}
+ }
+
+ columnFormatCodes := make([]uint16, columnCount)
+ for i := 0; i < columnCount; i++ {
+ columnFormatCodes[i] = binary.BigEndian.Uint16(buf.Next(2))
+ }
+
+ *dst = CopyOutResponse{OverallFormat: overallFormat, ColumnFormatCodes: columnFormatCodes}
+
+ return nil
+}
+
+func (src *CopyOutResponse) Encode(dst []byte) []byte {
+ dst = append(dst, 'H')
+ sp := len(dst)
+ dst = pgio.AppendInt32(dst, -1)
+
+ dst = pgio.AppendUint16(dst, uint16(len(src.ColumnFormatCodes)))
+ for _, fc := range src.ColumnFormatCodes {
+ dst = pgio.AppendUint16(dst, fc)
+ }
+
+ pgio.SetInt32(dst[sp:], int32(len(dst[sp:])))
+
+ return dst
+}
+
+func (src *CopyOutResponse) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ ColumnFormatCodes []uint16
+ }{
+ Type: "CopyOutResponse",
+ ColumnFormatCodes: src.ColumnFormatCodes,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/data_row.go b/vendor/github.com/jackc/pgx/pgproto3/data_row.go
new file mode 100644
index 0000000..e46d3cc
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/data_row.go
@@ -0,0 +1,112 @@
+package pgproto3
+
+import (
+ "encoding/binary"
+ "encoding/hex"
+ "encoding/json"
+
+ "github.com/jackc/pgx/pgio"
+)
+
+type DataRow struct {
+ Values [][]byte
+}
+
+func (*DataRow) Backend() {}
+
+func (dst *DataRow) Decode(src []byte) error {
+ if len(src) < 2 {
+ return &invalidMessageFormatErr{messageType: "DataRow"}
+ }
+ rp := 0
+ fieldCount := int(binary.BigEndian.Uint16(src[rp:]))
+ rp += 2
+
+ // If the capacity of the values slice is too small OR substantially too
+ // large reallocate. This is too avoid one row with many columns from
+ // permanently allocating memory.
+ if cap(dst.Values) < fieldCount || cap(dst.Values)-fieldCount > 32 {
+ newCap := 32
+ if newCap < fieldCount {
+ newCap = fieldCount
+ }
+ dst.Values = make([][]byte, fieldCount, newCap)
+ } else {
+ dst.Values = dst.Values[:fieldCount]
+ }
+
+ for i := 0; i < fieldCount; i++ {
+ if len(src[rp:]) < 4 {
+ return &invalidMessageFormatErr{messageType: "DataRow"}
+ }
+
+ msgSize := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+
+ // null
+ if msgSize == -1 {
+ dst.Values[i] = nil
+ } else {
+ if len(src[rp:]) < msgSize {
+ return &invalidMessageFormatErr{messageType: "DataRow"}
+ }
+
+ dst.Values[i] = src[rp : rp+msgSize]
+ rp += msgSize
+ }
+ }
+
+ return nil
+}
+
+func (src *DataRow) Encode(dst []byte) []byte {
+ dst = append(dst, 'D')
+ sp := len(dst)
+ dst = pgio.AppendInt32(dst, -1)
+
+ dst = pgio.AppendUint16(dst, uint16(len(src.Values)))
+ for _, v := range src.Values {
+ if v == nil {
+ dst = pgio.AppendInt32(dst, -1)
+ continue
+ }
+
+ dst = pgio.AppendInt32(dst, int32(len(v)))
+ dst = append(dst, v...)
+ }
+
+ pgio.SetInt32(dst[sp:], int32(len(dst[sp:])))
+
+ return dst
+}
+
+func (src *DataRow) MarshalJSON() ([]byte, error) {
+ formattedValues := make([]map[string]string, len(src.Values))
+ for i, v := range src.Values {
+ if v == nil {
+ continue
+ }
+
+ var hasNonPrintable bool
+ for _, b := range v {
+ if b < 32 {
+ hasNonPrintable = true
+ break
+ }
+ }
+
+ if hasNonPrintable {
+ formattedValues[i] = map[string]string{"binary": hex.EncodeToString(v)}
+ } else {
+ formattedValues[i] = map[string]string{"text": string(v)}
+ }
+ }
+
+ return json.Marshal(struct {
+ Type string
+ Values []map[string]string
+ }{
+ Type: "DataRow",
+ Values: formattedValues,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/describe.go b/vendor/github.com/jackc/pgx/pgproto3/describe.go
new file mode 100644
index 0000000..bb7bc05
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/describe.go
@@ -0,0 +1,59 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/json"
+
+ "github.com/jackc/pgx/pgio"
+)
+
+type Describe struct {
+ ObjectType byte // 'S' = prepared statement, 'P' = portal
+ Name string
+}
+
+func (*Describe) Frontend() {}
+
+func (dst *Describe) Decode(src []byte) error {
+ if len(src) < 2 {
+ return &invalidMessageFormatErr{messageType: "Describe"}
+ }
+
+ dst.ObjectType = src[0]
+ rp := 1
+
+ idx := bytes.IndexByte(src[rp:], 0)
+ if idx != len(src[rp:])-1 {
+ return &invalidMessageFormatErr{messageType: "Describe"}
+ }
+
+ dst.Name = string(src[rp : len(src)-1])
+
+ return nil
+}
+
+func (src *Describe) Encode(dst []byte) []byte {
+ dst = append(dst, 'D')
+ sp := len(dst)
+ dst = pgio.AppendInt32(dst, -1)
+
+ dst = append(dst, src.ObjectType)
+ dst = append(dst, src.Name...)
+ dst = append(dst, 0)
+
+ pgio.SetInt32(dst[sp:], int32(len(dst[sp:])))
+
+ return dst
+}
+
+func (src *Describe) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ ObjectType string
+ Name string
+ }{
+ Type: "Describe",
+ ObjectType: string(src.ObjectType),
+ Name: src.Name,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/empty_query_response.go b/vendor/github.com/jackc/pgx/pgproto3/empty_query_response.go
new file mode 100644
index 0000000..d283b06
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/empty_query_response.go
@@ -0,0 +1,29 @@
+package pgproto3
+
+import (
+ "encoding/json"
+)
+
+type EmptyQueryResponse struct{}
+
+func (*EmptyQueryResponse) Backend() {}
+
+func (dst *EmptyQueryResponse) Decode(src []byte) error {
+ if len(src) != 0 {
+ return &invalidMessageLenErr{messageType: "EmptyQueryResponse", expectedLen: 0, actualLen: len(src)}
+ }
+
+ return nil
+}
+
+func (src *EmptyQueryResponse) Encode(dst []byte) []byte {
+ return append(dst, 'I', 0, 0, 0, 4)
+}
+
+func (src *EmptyQueryResponse) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ }{
+ Type: "EmptyQueryResponse",
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/error_response.go b/vendor/github.com/jackc/pgx/pgproto3/error_response.go
new file mode 100644
index 0000000..160234f
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/error_response.go
@@ -0,0 +1,197 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/binary"
+ "strconv"
+)
+
+type ErrorResponse struct {
+ Severity string
+ Code string
+ Message string
+ Detail string
+ Hint string
+ Position int32
+ InternalPosition int32
+ InternalQuery string
+ Where string
+ SchemaName string
+ TableName string
+ ColumnName string
+ DataTypeName string
+ ConstraintName string
+ File string
+ Line int32
+ Routine string
+
+ UnknownFields map[byte]string
+}
+
+func (*ErrorResponse) Backend() {}
+
+func (dst *ErrorResponse) Decode(src []byte) error {
+ *dst = ErrorResponse{}
+
+ buf := bytes.NewBuffer(src)
+
+ for {
+ k, err := buf.ReadByte()
+ if err != nil {
+ return err
+ }
+ if k == 0 {
+ break
+ }
+
+ vb, err := buf.ReadBytes(0)
+ if err != nil {
+ return err
+ }
+ v := string(vb[:len(vb)-1])
+
+ switch k {
+ case 'S':
+ dst.Severity = v
+ case 'C':
+ dst.Code = v
+ case 'M':
+ dst.Message = v
+ case 'D':
+ dst.Detail = v
+ case 'H':
+ dst.Hint = v
+ case 'P':
+ s := v
+ n, _ := strconv.ParseInt(s, 10, 32)
+ dst.Position = int32(n)
+ case 'p':
+ s := v
+ n, _ := strconv.ParseInt(s, 10, 32)
+ dst.InternalPosition = int32(n)
+ case 'q':
+ dst.InternalQuery = v
+ case 'W':
+ dst.Where = v
+ case 's':
+ dst.SchemaName = v
+ case 't':
+ dst.TableName = v
+ case 'c':
+ dst.ColumnName = v
+ case 'd':
+ dst.DataTypeName = v
+ case 'n':
+ dst.ConstraintName = v
+ case 'F':
+ dst.File = v
+ case 'L':
+ s := v
+ n, _ := strconv.ParseInt(s, 10, 32)
+ dst.Line = int32(n)
+ case 'R':
+ dst.Routine = v
+
+ default:
+ if dst.UnknownFields == nil {
+ dst.UnknownFields = make(map[byte]string)
+ }
+ dst.UnknownFields[k] = v
+ }
+ }
+
+ return nil
+}
+
+func (src *ErrorResponse) Encode(dst []byte) []byte {
+ return append(dst, src.marshalBinary('E')...)
+}
+
+func (src *ErrorResponse) marshalBinary(typeByte byte) []byte {
+ var bigEndian BigEndianBuf
+ buf := &bytes.Buffer{}
+
+ buf.WriteByte(typeByte)
+ buf.Write(bigEndian.Uint32(0))
+
+ if src.Severity != "" {
+ buf.WriteString(src.Severity)
+ buf.WriteByte(0)
+ }
+ if src.Code != "" {
+ buf.WriteString(src.Code)
+ buf.WriteByte(0)
+ }
+ if src.Message != "" {
+ buf.WriteString(src.Message)
+ buf.WriteByte(0)
+ }
+ if src.Detail != "" {
+ buf.WriteString(src.Detail)
+ buf.WriteByte(0)
+ }
+ if src.Hint != "" {
+ buf.WriteString(src.Hint)
+ buf.WriteByte(0)
+ }
+ if src.Position != 0 {
+ buf.WriteString(strconv.Itoa(int(src.Position)))
+ buf.WriteByte(0)
+ }
+ if src.InternalPosition != 0 {
+ buf.WriteString(strconv.Itoa(int(src.InternalPosition)))
+ buf.WriteByte(0)
+ }
+ if src.InternalQuery != "" {
+ buf.WriteString(src.InternalQuery)
+ buf.WriteByte(0)
+ }
+ if src.Where != "" {
+ buf.WriteString(src.Where)
+ buf.WriteByte(0)
+ }
+ if src.SchemaName != "" {
+ buf.WriteString(src.SchemaName)
+ buf.WriteByte(0)
+ }
+ if src.TableName != "" {
+ buf.WriteString(src.TableName)
+ buf.WriteByte(0)
+ }
+ if src.ColumnName != "" {
+ buf.WriteString(src.ColumnName)
+ buf.WriteByte(0)
+ }
+ if src.DataTypeName != "" {
+ buf.WriteString(src.DataTypeName)
+ buf.WriteByte(0)
+ }
+ if src.ConstraintName != "" {
+ buf.WriteString(src.ConstraintName)
+ buf.WriteByte(0)
+ }
+ if src.File != "" {
+ buf.WriteString(src.File)
+ buf.WriteByte(0)
+ }
+ if src.Line != 0 {
+ buf.WriteString(strconv.Itoa(int(src.Line)))
+ buf.WriteByte(0)
+ }
+ if src.Routine != "" {
+ buf.WriteString(src.Routine)
+ buf.WriteByte(0)
+ }
+
+ for k, v := range src.UnknownFields {
+ buf.WriteByte(k)
+ buf.WriteByte(0)
+ buf.WriteString(v)
+ buf.WriteByte(0)
+ }
+ buf.WriteByte(0)
+
+ binary.BigEndian.PutUint32(buf.Bytes()[1:5], uint32(buf.Len()-1))
+
+ return buf.Bytes()
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/execute.go b/vendor/github.com/jackc/pgx/pgproto3/execute.go
new file mode 100644
index 0000000..76da994
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/execute.go
@@ -0,0 +1,60 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/json"
+
+ "github.com/jackc/pgx/pgio"
+)
+
+type Execute struct {
+ Portal string
+ MaxRows uint32
+}
+
+func (*Execute) Frontend() {}
+
+func (dst *Execute) Decode(src []byte) error {
+ buf := bytes.NewBuffer(src)
+
+ b, err := buf.ReadBytes(0)
+ if err != nil {
+ return err
+ }
+ dst.Portal = string(b[:len(b)-1])
+
+ if buf.Len() < 4 {
+ return &invalidMessageFormatErr{messageType: "Execute"}
+ }
+ dst.MaxRows = binary.BigEndian.Uint32(buf.Next(4))
+
+ return nil
+}
+
+func (src *Execute) Encode(dst []byte) []byte {
+ dst = append(dst, 'E')
+ sp := len(dst)
+ dst = pgio.AppendInt32(dst, -1)
+
+ dst = append(dst, src.Portal...)
+ dst = append(dst, 0)
+
+ dst = pgio.AppendUint32(dst, src.MaxRows)
+
+ pgio.SetInt32(dst[sp:], int32(len(dst[sp:])))
+
+ return dst
+}
+
+func (src *Execute) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ Portal string
+ MaxRows uint32
+ }{
+ Type: "Execute",
+ Portal: src.Portal,
+ MaxRows: src.MaxRows,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/flush.go b/vendor/github.com/jackc/pgx/pgproto3/flush.go
new file mode 100644
index 0000000..7fd5e98
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/flush.go
@@ -0,0 +1,29 @@
+package pgproto3
+
+import (
+ "encoding/json"
+)
+
+type Flush struct{}
+
+func (*Flush) Frontend() {}
+
+func (dst *Flush) Decode(src []byte) error {
+ if len(src) != 0 {
+ return &invalidMessageLenErr{messageType: "Flush", expectedLen: 0, actualLen: len(src)}
+ }
+
+ return nil
+}
+
+func (src *Flush) Encode(dst []byte) []byte {
+ return append(dst, 'H', 0, 0, 0, 4)
+}
+
+func (src *Flush) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ }{
+ Type: "Flush",
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/frontend.go b/vendor/github.com/jackc/pgx/pgproto3/frontend.go
new file mode 100644
index 0000000..d803d36
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/frontend.go
@@ -0,0 +1,122 @@
+package pgproto3
+
+import (
+ "encoding/binary"
+ "io"
+
+ "github.com/jackc/pgx/chunkreader"
+ "github.com/pkg/errors"
+)
+
+type Frontend struct {
+ cr *chunkreader.ChunkReader
+ w io.Writer
+
+ // Backend message flyweights
+ authentication Authentication
+ backendKeyData BackendKeyData
+ bindComplete BindComplete
+ closeComplete CloseComplete
+ commandComplete CommandComplete
+ copyBothResponse CopyBothResponse
+ copyData CopyData
+ copyInResponse CopyInResponse
+ copyOutResponse CopyOutResponse
+ dataRow DataRow
+ emptyQueryResponse EmptyQueryResponse
+ errorResponse ErrorResponse
+ functionCallResponse FunctionCallResponse
+ noData NoData
+ noticeResponse NoticeResponse
+ notificationResponse NotificationResponse
+ parameterDescription ParameterDescription
+ parameterStatus ParameterStatus
+ parseComplete ParseComplete
+ readyForQuery ReadyForQuery
+ rowDescription RowDescription
+
+ bodyLen int
+ msgType byte
+ partialMsg bool
+}
+
+func NewFrontend(r io.Reader, w io.Writer) (*Frontend, error) {
+ cr := chunkreader.NewChunkReader(r)
+ return &Frontend{cr: cr, w: w}, nil
+}
+
+func (b *Frontend) Send(msg FrontendMessage) error {
+ _, err := b.w.Write(msg.Encode(nil))
+ return err
+}
+
+func (b *Frontend) Receive() (BackendMessage, error) {
+ if !b.partialMsg {
+ header, err := b.cr.Next(5)
+ if err != nil {
+ return nil, err
+ }
+
+ b.msgType = header[0]
+ b.bodyLen = int(binary.BigEndian.Uint32(header[1:])) - 4
+ b.partialMsg = true
+ }
+
+ var msg BackendMessage
+ switch b.msgType {
+ case '1':
+ msg = &b.parseComplete
+ case '2':
+ msg = &b.bindComplete
+ case '3':
+ msg = &b.closeComplete
+ case 'A':
+ msg = &b.notificationResponse
+ case 'C':
+ msg = &b.commandComplete
+ case 'd':
+ msg = &b.copyData
+ case 'D':
+ msg = &b.dataRow
+ case 'E':
+ msg = &b.errorResponse
+ case 'G':
+ msg = &b.copyInResponse
+ case 'H':
+ msg = &b.copyOutResponse
+ case 'I':
+ msg = &b.emptyQueryResponse
+ case 'K':
+ msg = &b.backendKeyData
+ case 'n':
+ msg = &b.noData
+ case 'N':
+ msg = &b.noticeResponse
+ case 'R':
+ msg = &b.authentication
+ case 'S':
+ msg = &b.parameterStatus
+ case 't':
+ msg = &b.parameterDescription
+ case 'T':
+ msg = &b.rowDescription
+ case 'V':
+ msg = &b.functionCallResponse
+ case 'W':
+ msg = &b.copyBothResponse
+ case 'Z':
+ msg = &b.readyForQuery
+ default:
+ return nil, errors.Errorf("unknown message type: %c", b.msgType)
+ }
+
+ msgBody, err := b.cr.Next(b.bodyLen)
+ if err != nil {
+ return nil, err
+ }
+
+ b.partialMsg = false
+
+ err = msg.Decode(msgBody)
+ return msg, err
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/function_call_response.go b/vendor/github.com/jackc/pgx/pgproto3/function_call_response.go
new file mode 100644
index 0000000..bb325b6
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/function_call_response.go
@@ -0,0 +1,78 @@
+package pgproto3
+
+import (
+ "encoding/binary"
+ "encoding/hex"
+ "encoding/json"
+
+ "github.com/jackc/pgx/pgio"
+)
+
+type FunctionCallResponse struct {
+ Result []byte
+}
+
+func (*FunctionCallResponse) Backend() {}
+
+func (dst *FunctionCallResponse) Decode(src []byte) error {
+ if len(src) < 4 {
+ return &invalidMessageFormatErr{messageType: "FunctionCallResponse"}
+ }
+ rp := 0
+ resultSize := int(binary.BigEndian.Uint32(src[rp:]))
+ rp += 4
+
+ if resultSize == -1 {
+ dst.Result = nil
+ return nil
+ }
+
+ if len(src[rp:]) != resultSize {
+ return &invalidMessageFormatErr{messageType: "FunctionCallResponse"}
+ }
+
+ dst.Result = src[rp:]
+ return nil
+}
+
+func (src *FunctionCallResponse) Encode(dst []byte) []byte {
+ dst = append(dst, 'V')
+ sp := len(dst)
+ dst = pgio.AppendInt32(dst, -1)
+
+ if src.Result == nil {
+ dst = pgio.AppendInt32(dst, -1)
+ } else {
+ dst = pgio.AppendInt32(dst, int32(len(src.Result)))
+ dst = append(dst, src.Result...)
+ }
+
+ pgio.SetInt32(dst[sp:], int32(len(dst[sp:])))
+
+ return dst
+}
+
+func (src *FunctionCallResponse) MarshalJSON() ([]byte, error) {
+ var formattedValue map[string]string
+ var hasNonPrintable bool
+ for _, b := range src.Result {
+ if b < 32 {
+ hasNonPrintable = true
+ break
+ }
+ }
+
+ if hasNonPrintable {
+ formattedValue = map[string]string{"binary": hex.EncodeToString(src.Result)}
+ } else {
+ formattedValue = map[string]string{"text": string(src.Result)}
+ }
+
+ return json.Marshal(struct {
+ Type string
+ Result map[string]string
+ }{
+ Type: "FunctionCallResponse",
+ Result: formattedValue,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/no_data.go b/vendor/github.com/jackc/pgx/pgproto3/no_data.go
new file mode 100644
index 0000000..1fb47c2
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/no_data.go
@@ -0,0 +1,29 @@
+package pgproto3
+
+import (
+ "encoding/json"
+)
+
+type NoData struct{}
+
+func (*NoData) Backend() {}
+
+func (dst *NoData) Decode(src []byte) error {
+ if len(src) != 0 {
+ return &invalidMessageLenErr{messageType: "NoData", expectedLen: 0, actualLen: len(src)}
+ }
+
+ return nil
+}
+
+func (src *NoData) Encode(dst []byte) []byte {
+ return append(dst, 'n', 0, 0, 0, 4)
+}
+
+func (src *NoData) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ }{
+ Type: "NoData",
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/notice_response.go b/vendor/github.com/jackc/pgx/pgproto3/notice_response.go
new file mode 100644
index 0000000..e4595aa
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/notice_response.go
@@ -0,0 +1,13 @@
+package pgproto3
+
+type NoticeResponse ErrorResponse
+
+func (*NoticeResponse) Backend() {}
+
+func (dst *NoticeResponse) Decode(src []byte) error {
+ return (*ErrorResponse)(dst).Decode(src)
+}
+
+func (src *NoticeResponse) Encode(dst []byte) []byte {
+ return append(dst, (*ErrorResponse)(src).marshalBinary('N')...)
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/notification_response.go b/vendor/github.com/jackc/pgx/pgproto3/notification_response.go
new file mode 100644
index 0000000..b14007b
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/notification_response.go
@@ -0,0 +1,67 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/json"
+
+ "github.com/jackc/pgx/pgio"
+)
+
+type NotificationResponse struct {
+ PID uint32
+ Channel string
+ Payload string
+}
+
+func (*NotificationResponse) Backend() {}
+
+func (dst *NotificationResponse) Decode(src []byte) error {
+ buf := bytes.NewBuffer(src)
+
+ pid := binary.BigEndian.Uint32(buf.Next(4))
+
+ b, err := buf.ReadBytes(0)
+ if err != nil {
+ return err
+ }
+ channel := string(b[:len(b)-1])
+
+ b, err = buf.ReadBytes(0)
+ if err != nil {
+ return err
+ }
+ payload := string(b[:len(b)-1])
+
+ *dst = NotificationResponse{PID: pid, Channel: channel, Payload: payload}
+ return nil
+}
+
+func (src *NotificationResponse) Encode(dst []byte) []byte {
+ dst = append(dst, 'A')
+ sp := len(dst)
+ dst = pgio.AppendInt32(dst, -1)
+
+ dst = append(dst, src.Channel...)
+ dst = append(dst, 0)
+ dst = append(dst, src.Payload...)
+ dst = append(dst, 0)
+
+ pgio.SetInt32(dst[sp:], int32(len(dst[sp:])))
+
+ return dst
+}
+
+func (src *NotificationResponse) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ PID uint32
+ Channel string
+ Payload string
+ }{
+ Type: "NotificationResponse",
+ PID: src.PID,
+ Channel: src.Channel,
+ Payload: src.Payload,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/parameter_description.go b/vendor/github.com/jackc/pgx/pgproto3/parameter_description.go
new file mode 100644
index 0000000..1fa3c92
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/parameter_description.go
@@ -0,0 +1,61 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/json"
+
+ "github.com/jackc/pgx/pgio"
+)
+
+type ParameterDescription struct {
+ ParameterOIDs []uint32
+}
+
+func (*ParameterDescription) Backend() {}
+
+func (dst *ParameterDescription) Decode(src []byte) error {
+ buf := bytes.NewBuffer(src)
+
+ if buf.Len() < 2 {
+ return &invalidMessageFormatErr{messageType: "ParameterDescription"}
+ }
+
+ // Reported parameter count will be incorrect when number of args is greater than uint16
+ buf.Next(2)
+ // Instead infer parameter count by remaining size of message
+ parameterCount := buf.Len() / 4
+
+ *dst = ParameterDescription{ParameterOIDs: make([]uint32, parameterCount)}
+
+ for i := 0; i < parameterCount; i++ {
+ dst.ParameterOIDs[i] = binary.BigEndian.Uint32(buf.Next(4))
+ }
+
+ return nil
+}
+
+func (src *ParameterDescription) Encode(dst []byte) []byte {
+ dst = append(dst, 't')
+ sp := len(dst)
+ dst = pgio.AppendInt32(dst, -1)
+
+ dst = pgio.AppendUint16(dst, uint16(len(src.ParameterOIDs)))
+ for _, oid := range src.ParameterOIDs {
+ dst = pgio.AppendUint32(dst, oid)
+ }
+
+ pgio.SetInt32(dst[sp:], int32(len(dst[sp:])))
+
+ return dst
+}
+
+func (src *ParameterDescription) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ ParameterOIDs []uint32
+ }{
+ Type: "ParameterDescription",
+ ParameterOIDs: src.ParameterOIDs,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/parameter_status.go b/vendor/github.com/jackc/pgx/pgproto3/parameter_status.go
new file mode 100644
index 0000000..b3bac33
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/parameter_status.go
@@ -0,0 +1,61 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/json"
+
+ "github.com/jackc/pgx/pgio"
+)
+
+type ParameterStatus struct {
+ Name string
+ Value string
+}
+
+func (*ParameterStatus) Backend() {}
+
+func (dst *ParameterStatus) Decode(src []byte) error {
+ buf := bytes.NewBuffer(src)
+
+ b, err := buf.ReadBytes(0)
+ if err != nil {
+ return err
+ }
+ name := string(b[:len(b)-1])
+
+ b, err = buf.ReadBytes(0)
+ if err != nil {
+ return err
+ }
+ value := string(b[:len(b)-1])
+
+ *dst = ParameterStatus{Name: name, Value: value}
+ return nil
+}
+
+func (src *ParameterStatus) Encode(dst []byte) []byte {
+ dst = append(dst, 'S')
+ sp := len(dst)
+ dst = pgio.AppendInt32(dst, -1)
+
+ dst = append(dst, src.Name...)
+ dst = append(dst, 0)
+ dst = append(dst, src.Value...)
+ dst = append(dst, 0)
+
+ pgio.SetInt32(dst[sp:], int32(len(dst[sp:])))
+
+ return dst
+}
+
+func (ps *ParameterStatus) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ Name string
+ Value string
+ }{
+ Type: "ParameterStatus",
+ Name: ps.Name,
+ Value: ps.Value,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/parse.go b/vendor/github.com/jackc/pgx/pgproto3/parse.go
new file mode 100644
index 0000000..ca4834c
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/parse.go
@@ -0,0 +1,83 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/json"
+
+ "github.com/jackc/pgx/pgio"
+)
+
+type Parse struct {
+ Name string
+ Query string
+ ParameterOIDs []uint32
+}
+
+func (*Parse) Frontend() {}
+
+func (dst *Parse) Decode(src []byte) error {
+ *dst = Parse{}
+
+ buf := bytes.NewBuffer(src)
+
+ b, err := buf.ReadBytes(0)
+ if err != nil {
+ return err
+ }
+ dst.Name = string(b[:len(b)-1])
+
+ b, err = buf.ReadBytes(0)
+ if err != nil {
+ return err
+ }
+ dst.Query = string(b[:len(b)-1])
+
+ if buf.Len() < 2 {
+ return &invalidMessageFormatErr{messageType: "Parse"}
+ }
+ parameterOIDCount := int(binary.BigEndian.Uint16(buf.Next(2)))
+
+ for i := 0; i < parameterOIDCount; i++ {
+ if buf.Len() < 4 {
+ return &invalidMessageFormatErr{messageType: "Parse"}
+ }
+ dst.ParameterOIDs = append(dst.ParameterOIDs, binary.BigEndian.Uint32(buf.Next(4)))
+ }
+
+ return nil
+}
+
+func (src *Parse) Encode(dst []byte) []byte {
+ dst = append(dst, 'P')
+ sp := len(dst)
+ dst = pgio.AppendInt32(dst, -1)
+
+ dst = append(dst, src.Name...)
+ dst = append(dst, 0)
+ dst = append(dst, src.Query...)
+ dst = append(dst, 0)
+
+ dst = pgio.AppendUint16(dst, uint16(len(src.ParameterOIDs)))
+ for _, oid := range src.ParameterOIDs {
+ dst = pgio.AppendUint32(dst, oid)
+ }
+
+ pgio.SetInt32(dst[sp:], int32(len(dst[sp:])))
+
+ return dst
+}
+
+func (src *Parse) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ Name string
+ Query string
+ ParameterOIDs []uint32
+ }{
+ Type: "Parse",
+ Name: src.Name,
+ Query: src.Query,
+ ParameterOIDs: src.ParameterOIDs,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/parse_complete.go b/vendor/github.com/jackc/pgx/pgproto3/parse_complete.go
new file mode 100644
index 0000000..462a89b
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/parse_complete.go
@@ -0,0 +1,29 @@
+package pgproto3
+
+import (
+ "encoding/json"
+)
+
+type ParseComplete struct{}
+
+func (*ParseComplete) Backend() {}
+
+func (dst *ParseComplete) Decode(src []byte) error {
+ if len(src) != 0 {
+ return &invalidMessageLenErr{messageType: "ParseComplete", expectedLen: 0, actualLen: len(src)}
+ }
+
+ return nil
+}
+
+func (src *ParseComplete) Encode(dst []byte) []byte {
+ return append(dst, '1', 0, 0, 0, 4)
+}
+
+func (src *ParseComplete) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ }{
+ Type: "ParseComplete",
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/password_message.go b/vendor/github.com/jackc/pgx/pgproto3/password_message.go
new file mode 100644
index 0000000..2ad3fe4
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/password_message.go
@@ -0,0 +1,46 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/json"
+
+ "github.com/jackc/pgx/pgio"
+)
+
+type PasswordMessage struct {
+ Password string
+}
+
+func (*PasswordMessage) Frontend() {}
+
+func (dst *PasswordMessage) Decode(src []byte) error {
+ buf := bytes.NewBuffer(src)
+
+ b, err := buf.ReadBytes(0)
+ if err != nil {
+ return err
+ }
+ dst.Password = string(b[:len(b)-1])
+
+ return nil
+}
+
+func (src *PasswordMessage) Encode(dst []byte) []byte {
+ dst = append(dst, 'p')
+ dst = pgio.AppendInt32(dst, int32(4+len(src.Password)+1))
+
+ dst = append(dst, src.Password...)
+ dst = append(dst, 0)
+
+ return dst
+}
+
+func (src *PasswordMessage) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ Password string
+ }{
+ Type: "PasswordMessage",
+ Password: src.Password,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/pgproto3.go b/vendor/github.com/jackc/pgx/pgproto3/pgproto3.go
new file mode 100644
index 0000000..fe7b085
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/pgproto3.go
@@ -0,0 +1,42 @@
+package pgproto3
+
+import "fmt"
+
+// Message is the interface implemented by an object that can decode and encode
+// a particular PostgreSQL message.
+type Message interface {
+ // Decode is allowed and expected to retain a reference to data after
+ // returning (unlike encoding.BinaryUnmarshaler).
+ Decode(data []byte) error
+
+ // Encode appends itself to dst and returns the new buffer.
+ Encode(dst []byte) []byte
+}
+
+type FrontendMessage interface {
+ Message
+ Frontend() // no-op method to distinguish frontend from backend methods
+}
+
+type BackendMessage interface {
+ Message
+ Backend() // no-op method to distinguish frontend from backend methods
+}
+
+type invalidMessageLenErr struct {
+ messageType string
+ expectedLen int
+ actualLen int
+}
+
+func (e *invalidMessageLenErr) Error() string {
+ return fmt.Sprintf("%s body must have length of %d, but it is %d", e.messageType, e.expectedLen, e.actualLen)
+}
+
+type invalidMessageFormatErr struct {
+ messageType string
+}
+
+func (e *invalidMessageFormatErr) Error() string {
+ return fmt.Sprintf("%s body is invalid", e.messageType)
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/query.go b/vendor/github.com/jackc/pgx/pgproto3/query.go
new file mode 100644
index 0000000..d80c0fb
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/query.go
@@ -0,0 +1,45 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/json"
+
+ "github.com/jackc/pgx/pgio"
+)
+
+type Query struct {
+ String string
+}
+
+func (*Query) Frontend() {}
+
+func (dst *Query) Decode(src []byte) error {
+ i := bytes.IndexByte(src, 0)
+ if i != len(src)-1 {
+ return &invalidMessageFormatErr{messageType: "Query"}
+ }
+
+ dst.String = string(src[:i])
+
+ return nil
+}
+
+func (src *Query) Encode(dst []byte) []byte {
+ dst = append(dst, 'Q')
+ dst = pgio.AppendInt32(dst, int32(4+len(src.String)+1))
+
+ dst = append(dst, src.String...)
+ dst = append(dst, 0)
+
+ return dst
+}
+
+func (src *Query) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ String string
+ }{
+ Type: "Query",
+ String: src.String,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/ready_for_query.go b/vendor/github.com/jackc/pgx/pgproto3/ready_for_query.go
new file mode 100644
index 0000000..63b902b
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/ready_for_query.go
@@ -0,0 +1,35 @@
+package pgproto3
+
+import (
+ "encoding/json"
+)
+
+type ReadyForQuery struct {
+ TxStatus byte
+}
+
+func (*ReadyForQuery) Backend() {}
+
+func (dst *ReadyForQuery) Decode(src []byte) error {
+ if len(src) != 1 {
+ return &invalidMessageLenErr{messageType: "ReadyForQuery", expectedLen: 1, actualLen: len(src)}
+ }
+
+ dst.TxStatus = src[0]
+
+ return nil
+}
+
+func (src *ReadyForQuery) Encode(dst []byte) []byte {
+ return append(dst, 'Z', 0, 0, 0, 5, src.TxStatus)
+}
+
+func (src *ReadyForQuery) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ TxStatus string
+ }{
+ Type: "ReadyForQuery",
+ TxStatus: string(src.TxStatus),
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/row_description.go b/vendor/github.com/jackc/pgx/pgproto3/row_description.go
new file mode 100644
index 0000000..d0df11b
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/row_description.go
@@ -0,0 +1,100 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/json"
+
+ "github.com/jackc/pgx/pgio"
+)
+
+const (
+ TextFormat = 0
+ BinaryFormat = 1
+)
+
+type FieldDescription struct {
+ Name string
+ TableOID uint32
+ TableAttributeNumber uint16
+ DataTypeOID uint32
+ DataTypeSize int16
+ TypeModifier uint32
+ Format int16
+}
+
+type RowDescription struct {
+ Fields []FieldDescription
+}
+
+func (*RowDescription) Backend() {}
+
+func (dst *RowDescription) Decode(src []byte) error {
+ buf := bytes.NewBuffer(src)
+
+ if buf.Len() < 2 {
+ return &invalidMessageFormatErr{messageType: "RowDescription"}
+ }
+ fieldCount := int(binary.BigEndian.Uint16(buf.Next(2)))
+
+ *dst = RowDescription{Fields: make([]FieldDescription, fieldCount)}
+
+ for i := 0; i < fieldCount; i++ {
+ var fd FieldDescription
+ bName, err := buf.ReadBytes(0)
+ if err != nil {
+ return err
+ }
+ fd.Name = string(bName[:len(bName)-1])
+
+ // Since buf.Next() doesn't return an error if we hit the end of the buffer
+ // check Len ahead of time
+ if buf.Len() < 18 {
+ return &invalidMessageFormatErr{messageType: "RowDescription"}
+ }
+
+ fd.TableOID = binary.BigEndian.Uint32(buf.Next(4))
+ fd.TableAttributeNumber = binary.BigEndian.Uint16(buf.Next(2))
+ fd.DataTypeOID = binary.BigEndian.Uint32(buf.Next(4))
+ fd.DataTypeSize = int16(binary.BigEndian.Uint16(buf.Next(2)))
+ fd.TypeModifier = binary.BigEndian.Uint32(buf.Next(4))
+ fd.Format = int16(binary.BigEndian.Uint16(buf.Next(2)))
+
+ dst.Fields[i] = fd
+ }
+
+ return nil
+}
+
+func (src *RowDescription) Encode(dst []byte) []byte {
+ dst = append(dst, 'T')
+ sp := len(dst)
+ dst = pgio.AppendInt32(dst, -1)
+
+ dst = pgio.AppendUint16(dst, uint16(len(src.Fields)))
+ for _, fd := range src.Fields {
+ dst = append(dst, fd.Name...)
+ dst = append(dst, 0)
+
+ dst = pgio.AppendUint32(dst, fd.TableOID)
+ dst = pgio.AppendUint16(dst, fd.TableAttributeNumber)
+ dst = pgio.AppendUint32(dst, fd.DataTypeOID)
+ dst = pgio.AppendInt16(dst, fd.DataTypeSize)
+ dst = pgio.AppendUint32(dst, fd.TypeModifier)
+ dst = pgio.AppendInt16(dst, fd.Format)
+ }
+
+ pgio.SetInt32(dst[sp:], int32(len(dst[sp:])))
+
+ return dst
+}
+
+func (src *RowDescription) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ Fields []FieldDescription
+ }{
+ Type: "RowDescription",
+ Fields: src.Fields,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/startup_message.go b/vendor/github.com/jackc/pgx/pgproto3/startup_message.go
new file mode 100644
index 0000000..6c5d4f9
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/startup_message.go
@@ -0,0 +1,97 @@
+package pgproto3
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/json"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+const (
+ ProtocolVersionNumber = 196608 // 3.0
+ sslRequestNumber = 80877103
+)
+
+type StartupMessage struct {
+ ProtocolVersion uint32
+ Parameters map[string]string
+}
+
+func (*StartupMessage) Frontend() {}
+
+func (dst *StartupMessage) Decode(src []byte) error {
+ if len(src) < 4 {
+ return errors.Errorf("startup message too short")
+ }
+
+ dst.ProtocolVersion = binary.BigEndian.Uint32(src)
+ rp := 4
+
+ if dst.ProtocolVersion == sslRequestNumber {
+ return errors.Errorf("can't handle ssl connection request")
+ }
+
+ if dst.ProtocolVersion != ProtocolVersionNumber {
+ return errors.Errorf("Bad startup message version number. Expected %d, got %d", ProtocolVersionNumber, dst.ProtocolVersion)
+ }
+
+ dst.Parameters = make(map[string]string)
+ for {
+ idx := bytes.IndexByte(src[rp:], 0)
+ if idx < 0 {
+ return &invalidMessageFormatErr{messageType: "StartupMesage"}
+ }
+ key := string(src[rp : rp+idx])
+ rp += idx + 1
+
+ idx = bytes.IndexByte(src[rp:], 0)
+ if idx < 0 {
+ return &invalidMessageFormatErr{messageType: "StartupMesage"}
+ }
+ value := string(src[rp : rp+idx])
+ rp += idx + 1
+
+ dst.Parameters[key] = value
+
+ if len(src[rp:]) == 1 {
+ if src[rp] != 0 {
+ return errors.Errorf("Bad startup message last byte. Expected 0, got %d", src[rp])
+ }
+ break
+ }
+ }
+
+ return nil
+}
+
+func (src *StartupMessage) Encode(dst []byte) []byte {
+ sp := len(dst)
+ dst = pgio.AppendInt32(dst, -1)
+
+ dst = pgio.AppendUint32(dst, src.ProtocolVersion)
+ for k, v := range src.Parameters {
+ dst = append(dst, k...)
+ dst = append(dst, 0)
+ dst = append(dst, v...)
+ dst = append(dst, 0)
+ }
+ dst = append(dst, 0)
+
+ pgio.SetInt32(dst[sp:], int32(len(dst[sp:])))
+
+ return dst
+}
+
+func (src *StartupMessage) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ ProtocolVersion uint32
+ Parameters map[string]string
+ }{
+ Type: "StartupMessage",
+ ProtocolVersion: src.ProtocolVersion,
+ Parameters: src.Parameters,
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/sync.go b/vendor/github.com/jackc/pgx/pgproto3/sync.go
new file mode 100644
index 0000000..85f4749
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/sync.go
@@ -0,0 +1,29 @@
+package pgproto3
+
+import (
+ "encoding/json"
+)
+
+type Sync struct{}
+
+func (*Sync) Frontend() {}
+
+func (dst *Sync) Decode(src []byte) error {
+ if len(src) != 0 {
+ return &invalidMessageLenErr{messageType: "Sync", expectedLen: 0, actualLen: len(src)}
+ }
+
+ return nil
+}
+
+func (src *Sync) Encode(dst []byte) []byte {
+ return append(dst, 'S', 0, 0, 0, 4)
+}
+
+func (src *Sync) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ }{
+ Type: "Sync",
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgproto3/terminate.go b/vendor/github.com/jackc/pgx/pgproto3/terminate.go
new file mode 100644
index 0000000..0a3310d
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgproto3/terminate.go
@@ -0,0 +1,29 @@
+package pgproto3
+
+import (
+ "encoding/json"
+)
+
+type Terminate struct{}
+
+func (*Terminate) Frontend() {}
+
+func (dst *Terminate) Decode(src []byte) error {
+ if len(src) != 0 {
+ return &invalidMessageLenErr{messageType: "Terminate", expectedLen: 0, actualLen: len(src)}
+ }
+
+ return nil
+}
+
+func (src *Terminate) Encode(dst []byte) []byte {
+ return append(dst, 'X', 0, 0, 0, 4)
+}
+
+func (src *Terminate) MarshalJSON() ([]byte, error) {
+ return json.Marshal(struct {
+ Type string
+ }{
+ Type: "Terminate",
+ })
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/aclitem.go b/vendor/github.com/jackc/pgx/pgtype/aclitem.go
new file mode 100644
index 0000000..35269e9
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/aclitem.go
@@ -0,0 +1,126 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+
+ "github.com/pkg/errors"
+)
+
+// ACLItem is used for PostgreSQL's aclitem data type. A sample aclitem
+// might look like this:
+//
+// postgres=arwdDxt/postgres
+//
+// Note, however, that because the user/role name part of an aclitem is
+// an identifier, it follows all the usual formatting rules for SQL
+// identifiers: if it contains spaces and other special characters,
+// it should appear in double-quotes:
+//
+// postgres=arwdDxt/"role with spaces"
+//
+type ACLItem struct {
+ String string
+ Status Status
+}
+
+func (dst *ACLItem) Set(src interface{}) error {
+ switch value := src.(type) {
+ case string:
+ *dst = ACLItem{String: value, Status: Present}
+ case *string:
+ if value == nil {
+ *dst = ACLItem{Status: Null}
+ } else {
+ *dst = ACLItem{String: *value, Status: Present}
+ }
+ default:
+ if originalSrc, ok := underlyingStringType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to ACLItem", value)
+ }
+
+ return nil
+}
+
+func (dst *ACLItem) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst.String
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *ACLItem) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+ case *string:
+ *v = src.String
+ return nil
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *ACLItem) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = ACLItem{Status: Null}
+ return nil
+ }
+
+ *dst = ACLItem{String: string(src), Status: Present}
+ return nil
+}
+
+func (src *ACLItem) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ return append(buf, src.String...), nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *ACLItem) Scan(src interface{}) error {
+ if src == nil {
+ *dst = ACLItem{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *ACLItem) Value() (driver.Value, error) {
+ switch src.Status {
+ case Present:
+ return src.String, nil
+ case Null:
+ return nil, nil
+ default:
+ return nil, errUndefined
+ }
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/aclitem_array.go b/vendor/github.com/jackc/pgx/pgtype/aclitem_array.go
new file mode 100644
index 0000000..0a82929
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/aclitem_array.go
@@ -0,0 +1,212 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+
+ "github.com/pkg/errors"
+)
+
+type ACLItemArray struct {
+ Elements []ACLItem
+ Dimensions []ArrayDimension
+ Status Status
+}
+
+func (dst *ACLItemArray) Set(src interface{}) error {
+ // untyped nil and typed nil interfaces are different
+ if src == nil {
+ *dst = ACLItemArray{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+
+ case []string:
+ if value == nil {
+ *dst = ACLItemArray{Status: Null}
+ } else if len(value) == 0 {
+ *dst = ACLItemArray{Status: Present}
+ } else {
+ elements := make([]ACLItem, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = ACLItemArray{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ default:
+ if originalSrc, ok := underlyingSliceType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to ACLItemArray", value)
+ }
+
+ return nil
+}
+
+func (dst *ACLItemArray) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *ACLItemArray) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+
+ case *[]string:
+ *v = make([]string, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *ACLItemArray) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = ACLItemArray{Status: Null}
+ return nil
+ }
+
+ uta, err := ParseUntypedTextArray(string(src))
+ if err != nil {
+ return err
+ }
+
+ var elements []ACLItem
+
+ if len(uta.Elements) > 0 {
+ elements = make([]ACLItem, len(uta.Elements))
+
+ for i, s := range uta.Elements {
+ var elem ACLItem
+ var elemSrc []byte
+ if s != "NULL" {
+ elemSrc = []byte(s)
+ }
+ err = elem.DecodeText(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+
+ elements[i] = elem
+ }
+ }
+
+ *dst = ACLItemArray{Elements: elements, Dimensions: uta.Dimensions, Status: Present}
+
+ return nil
+}
+
+func (src *ACLItemArray) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ if len(src.Dimensions) == 0 {
+ return append(buf, '{', '}'), nil
+ }
+
+ buf = EncodeTextArrayDimensions(buf, src.Dimensions)
+
+ // dimElemCounts is the multiples of elements that each array lies on. For
+ // example, a single dimension array of length 4 would have a dimElemCounts of
+ // [4]. A multi-dimensional array of lengths [3,5,2] would have a
+ // dimElemCounts of [30,10,2]. This is used to simplify when to render a '{'
+ // or '}'.
+ dimElemCounts := make([]int, len(src.Dimensions))
+ dimElemCounts[len(src.Dimensions)-1] = int(src.Dimensions[len(src.Dimensions)-1].Length)
+ for i := len(src.Dimensions) - 2; i > -1; i-- {
+ dimElemCounts[i] = int(src.Dimensions[i].Length) * dimElemCounts[i+1]
+ }
+
+ inElemBuf := make([]byte, 0, 32)
+ for i, elem := range src.Elements {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+
+ for _, dec := range dimElemCounts {
+ if i%dec == 0 {
+ buf = append(buf, '{')
+ }
+ }
+
+ elemBuf, err := elem.EncodeText(ci, inElemBuf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf == nil {
+ buf = append(buf, `NULL`...)
+ } else {
+ buf = append(buf, QuoteArrayElementIfNeeded(string(elemBuf))...)
+ }
+
+ for _, dec := range dimElemCounts {
+ if (i+1)%dec == 0 {
+ buf = append(buf, '}')
+ }
+ }
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *ACLItemArray) Scan(src interface{}) error {
+ if src == nil {
+ return dst.DecodeText(nil, nil)
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *ACLItemArray) Value() (driver.Value, error) {
+ buf, err := src.EncodeText(nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, nil
+ }
+
+ return string(buf), nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/array.go b/vendor/github.com/jackc/pgx/pgtype/array.go
new file mode 100644
index 0000000..5b852ed
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/array.go
@@ -0,0 +1,352 @@
+package pgtype
+
+import (
+ "bytes"
+ "encoding/binary"
+ "io"
+ "strconv"
+ "strings"
+ "unicode"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+// Information on the internals of PostgreSQL arrays can be found in
+// src/include/utils/array.h and src/backend/utils/adt/arrayfuncs.c. Of
+// particular interest is the array_send function.
+
+type ArrayHeader struct {
+ ContainsNull bool
+ ElementOID int32
+ Dimensions []ArrayDimension
+}
+
+type ArrayDimension struct {
+ Length int32
+ LowerBound int32
+}
+
+func (dst *ArrayHeader) DecodeBinary(ci *ConnInfo, src []byte) (int, error) {
+ if len(src) < 12 {
+ return 0, errors.Errorf("array header too short: %d", len(src))
+ }
+
+ rp := 0
+
+ numDims := int(binary.BigEndian.Uint32(src[rp:]))
+ rp += 4
+
+ dst.ContainsNull = binary.BigEndian.Uint32(src[rp:]) == 1
+ rp += 4
+
+ dst.ElementOID = int32(binary.BigEndian.Uint32(src[rp:]))
+ rp += 4
+
+ if numDims > 0 {
+ dst.Dimensions = make([]ArrayDimension, numDims)
+ }
+ if len(src) < 12+numDims*8 {
+ return 0, errors.Errorf("array header too short for %d dimensions: %d", numDims, len(src))
+ }
+ for i := range dst.Dimensions {
+ dst.Dimensions[i].Length = int32(binary.BigEndian.Uint32(src[rp:]))
+ rp += 4
+
+ dst.Dimensions[i].LowerBound = int32(binary.BigEndian.Uint32(src[rp:]))
+ rp += 4
+ }
+
+ return rp, nil
+}
+
+func (src *ArrayHeader) EncodeBinary(ci *ConnInfo, buf []byte) []byte {
+ buf = pgio.AppendInt32(buf, int32(len(src.Dimensions)))
+
+ var containsNull int32
+ if src.ContainsNull {
+ containsNull = 1
+ }
+ buf = pgio.AppendInt32(buf, containsNull)
+
+ buf = pgio.AppendInt32(buf, src.ElementOID)
+
+ for i := range src.Dimensions {
+ buf = pgio.AppendInt32(buf, src.Dimensions[i].Length)
+ buf = pgio.AppendInt32(buf, src.Dimensions[i].LowerBound)
+ }
+
+ return buf
+}
+
+type UntypedTextArray struct {
+ Elements []string
+ Dimensions []ArrayDimension
+}
+
+func ParseUntypedTextArray(src string) (*UntypedTextArray, error) {
+ dst := &UntypedTextArray{}
+
+ buf := bytes.NewBufferString(src)
+
+ skipWhitespace(buf)
+
+ r, _, err := buf.ReadRune()
+ if err != nil {
+ return nil, errors.Errorf("invalid array: %v", err)
+ }
+
+ var explicitDimensions []ArrayDimension
+
+ // Array has explicit dimensions
+ if r == '[' {
+ buf.UnreadRune()
+
+ for {
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return nil, errors.Errorf("invalid array: %v", err)
+ }
+
+ if r == '=' {
+ break
+ } else if r != '[' {
+ return nil, errors.Errorf("invalid array, expected '[' or '=' got %v", r)
+ }
+
+ lower, err := arrayParseInteger(buf)
+ if err != nil {
+ return nil, errors.Errorf("invalid array: %v", err)
+ }
+
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return nil, errors.Errorf("invalid array: %v", err)
+ }
+
+ if r != ':' {
+ return nil, errors.Errorf("invalid array, expected ':' got %v", r)
+ }
+
+ upper, err := arrayParseInteger(buf)
+ if err != nil {
+ return nil, errors.Errorf("invalid array: %v", err)
+ }
+
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return nil, errors.Errorf("invalid array: %v", err)
+ }
+
+ if r != ']' {
+ return nil, errors.Errorf("invalid array, expected ']' got %v", r)
+ }
+
+ explicitDimensions = append(explicitDimensions, ArrayDimension{LowerBound: lower, Length: upper - lower + 1})
+ }
+
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return nil, errors.Errorf("invalid array: %v", err)
+ }
+ }
+
+ if r != '{' {
+ return nil, errors.Errorf("invalid array, expected '{': %v", err)
+ }
+
+ implicitDimensions := []ArrayDimension{{LowerBound: 1, Length: 0}}
+
+ // Consume all initial opening brackets. This provides number of dimensions.
+ for {
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return nil, errors.Errorf("invalid array: %v", err)
+ }
+
+ if r == '{' {
+ implicitDimensions[len(implicitDimensions)-1].Length = 1
+ implicitDimensions = append(implicitDimensions, ArrayDimension{LowerBound: 1})
+ } else {
+ buf.UnreadRune()
+ break
+ }
+ }
+ currentDim := len(implicitDimensions) - 1
+ counterDim := currentDim
+
+ for {
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return nil, errors.Errorf("invalid array: %v", err)
+ }
+
+ switch r {
+ case '{':
+ if currentDim == counterDim {
+ implicitDimensions[currentDim].Length++
+ }
+ currentDim++
+ case ',':
+ case '}':
+ currentDim--
+ if currentDim < counterDim {
+ counterDim = currentDim
+ }
+ default:
+ buf.UnreadRune()
+ value, err := arrayParseValue(buf)
+ if err != nil {
+ return nil, errors.Errorf("invalid array value: %v", err)
+ }
+ if currentDim == counterDim {
+ implicitDimensions[currentDim].Length++
+ }
+ dst.Elements = append(dst.Elements, value)
+ }
+
+ if currentDim < 0 {
+ break
+ }
+ }
+
+ skipWhitespace(buf)
+
+ if buf.Len() > 0 {
+ return nil, errors.Errorf("unexpected trailing data: %v", buf.String())
+ }
+
+ if len(dst.Elements) == 0 {
+ dst.Dimensions = nil
+ } else if len(explicitDimensions) > 0 {
+ dst.Dimensions = explicitDimensions
+ } else {
+ dst.Dimensions = implicitDimensions
+ }
+
+ return dst, nil
+}
+
+func skipWhitespace(buf *bytes.Buffer) {
+ var r rune
+ var err error
+ for r, _, _ = buf.ReadRune(); unicode.IsSpace(r); r, _, _ = buf.ReadRune() {
+ }
+
+ if err != io.EOF {
+ buf.UnreadRune()
+ }
+}
+
+func arrayParseValue(buf *bytes.Buffer) (string, error) {
+ r, _, err := buf.ReadRune()
+ if err != nil {
+ return "", err
+ }
+ if r == '"' {
+ return arrayParseQuotedValue(buf)
+ }
+ buf.UnreadRune()
+
+ s := &bytes.Buffer{}
+
+ for {
+ r, _, err := buf.ReadRune()
+ if err != nil {
+ return "", err
+ }
+
+ switch r {
+ case ',', '}':
+ buf.UnreadRune()
+ return s.String(), nil
+ }
+
+ s.WriteRune(r)
+ }
+}
+
+func arrayParseQuotedValue(buf *bytes.Buffer) (string, error) {
+ s := &bytes.Buffer{}
+
+ for {
+ r, _, err := buf.ReadRune()
+ if err != nil {
+ return "", err
+ }
+
+ switch r {
+ case '\\':
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return "", err
+ }
+ case '"':
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return "", err
+ }
+ buf.UnreadRune()
+ return s.String(), nil
+ }
+ s.WriteRune(r)
+ }
+}
+
+func arrayParseInteger(buf *bytes.Buffer) (int32, error) {
+ s := &bytes.Buffer{}
+
+ for {
+ r, _, err := buf.ReadRune()
+ if err != nil {
+ return 0, err
+ }
+
+ if '0' <= r && r <= '9' {
+ s.WriteRune(r)
+ } else {
+ buf.UnreadRune()
+ n, err := strconv.ParseInt(s.String(), 10, 32)
+ if err != nil {
+ return 0, err
+ }
+ return int32(n), nil
+ }
+ }
+}
+
+func EncodeTextArrayDimensions(buf []byte, dimensions []ArrayDimension) []byte {
+ var customDimensions bool
+ for _, dim := range dimensions {
+ if dim.LowerBound != 1 {
+ customDimensions = true
+ }
+ }
+
+ if !customDimensions {
+ return buf
+ }
+
+ for _, dim := range dimensions {
+ buf = append(buf, '[')
+ buf = append(buf, strconv.FormatInt(int64(dim.LowerBound), 10)...)
+ buf = append(buf, ':')
+ buf = append(buf, strconv.FormatInt(int64(dim.LowerBound+dim.Length-1), 10)...)
+ buf = append(buf, ']')
+ }
+
+ return append(buf, '=')
+}
+
+var quoteArrayReplacer = strings.NewReplacer(`\`, `\\`, `"`, `\"`)
+
+func quoteArrayElement(src string) string {
+ return `"` + quoteArrayReplacer.Replace(src) + `"`
+}
+
+func QuoteArrayElementIfNeeded(src string) string {
+ if src == "" || (len(src) == 4 && strings.ToLower(src) == "null") || src[0] == ' ' || src[len(src)-1] == ' ' || strings.ContainsAny(src, `{},"\`) {
+ return quoteArrayElement(src)
+ }
+ return src
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/bit.go b/vendor/github.com/jackc/pgx/pgtype/bit.go
new file mode 100644
index 0000000..f892cee
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/bit.go
@@ -0,0 +1,37 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+)
+
+type Bit Varbit
+
+func (dst *Bit) Set(src interface{}) error {
+ return (*Varbit)(dst).Set(src)
+}
+
+func (dst *Bit) Get() interface{} {
+ return (*Varbit)(dst).Get()
+}
+
+func (src *Bit) AssignTo(dst interface{}) error {
+ return (*Varbit)(src).AssignTo(dst)
+}
+
+func (dst *Bit) DecodeBinary(ci *ConnInfo, src []byte) error {
+ return (*Varbit)(dst).DecodeBinary(ci, src)
+}
+
+func (src *Bit) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ return (*Varbit)(src).EncodeBinary(ci, buf)
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Bit) Scan(src interface{}) error {
+ return (*Varbit)(dst).Scan(src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Bit) Value() (driver.Value, error) {
+ return (*Varbit)(src).Value()
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/bool.go b/vendor/github.com/jackc/pgx/pgtype/bool.go
new file mode 100644
index 0000000..3a3eef4
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/bool.go
@@ -0,0 +1,159 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "strconv"
+
+ "github.com/pkg/errors"
+)
+
+type Bool struct {
+ Bool bool
+ Status Status
+}
+
+func (dst *Bool) Set(src interface{}) error {
+ switch value := src.(type) {
+ case bool:
+ *dst = Bool{Bool: value, Status: Present}
+ case string:
+ bb, err := strconv.ParseBool(value)
+ if err != nil {
+ return err
+ }
+ *dst = Bool{Bool: bb, Status: Present}
+ default:
+ if originalSrc, ok := underlyingBoolType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to Bool", value)
+ }
+
+ return nil
+}
+
+func (dst *Bool) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst.Bool
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Bool) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+ case *bool:
+ *v = src.Bool
+ return nil
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *Bool) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Bool{Status: Null}
+ return nil
+ }
+
+ if len(src) != 1 {
+ return errors.Errorf("invalid length for bool: %v", len(src))
+ }
+
+ *dst = Bool{Bool: src[0] == 't', Status: Present}
+ return nil
+}
+
+func (dst *Bool) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Bool{Status: Null}
+ return nil
+ }
+
+ if len(src) != 1 {
+ return errors.Errorf("invalid length for bool: %v", len(src))
+ }
+
+ *dst = Bool{Bool: src[0] == 1, Status: Present}
+ return nil
+}
+
+func (src *Bool) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ if src.Bool {
+ buf = append(buf, 't')
+ } else {
+ buf = append(buf, 'f')
+ }
+
+ return buf, nil
+}
+
+func (src *Bool) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ if src.Bool {
+ buf = append(buf, 1)
+ } else {
+ buf = append(buf, 0)
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Bool) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Bool{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case bool:
+ *dst = Bool{Bool: src, Status: Present}
+ return nil
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Bool) Value() (driver.Value, error) {
+ switch src.Status {
+ case Present:
+ return src.Bool, nil
+ case Null:
+ return nil, nil
+ default:
+ return nil, errUndefined
+ }
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/bool_array.go b/vendor/github.com/jackc/pgx/pgtype/bool_array.go
new file mode 100644
index 0000000..67dd92a
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/bool_array.go
@@ -0,0 +1,300 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type BoolArray struct {
+ Elements []Bool
+ Dimensions []ArrayDimension
+ Status Status
+}
+
+func (dst *BoolArray) Set(src interface{}) error {
+ // untyped nil and typed nil interfaces are different
+ if src == nil {
+ *dst = BoolArray{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+
+ case []bool:
+ if value == nil {
+ *dst = BoolArray{Status: Null}
+ } else if len(value) == 0 {
+ *dst = BoolArray{Status: Present}
+ } else {
+ elements := make([]Bool, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = BoolArray{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ default:
+ if originalSrc, ok := underlyingSliceType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to BoolArray", value)
+ }
+
+ return nil
+}
+
+func (dst *BoolArray) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *BoolArray) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+
+ case *[]bool:
+ *v = make([]bool, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *BoolArray) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = BoolArray{Status: Null}
+ return nil
+ }
+
+ uta, err := ParseUntypedTextArray(string(src))
+ if err != nil {
+ return err
+ }
+
+ var elements []Bool
+
+ if len(uta.Elements) > 0 {
+ elements = make([]Bool, len(uta.Elements))
+
+ for i, s := range uta.Elements {
+ var elem Bool
+ var elemSrc []byte
+ if s != "NULL" {
+ elemSrc = []byte(s)
+ }
+ err = elem.DecodeText(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+
+ elements[i] = elem
+ }
+ }
+
+ *dst = BoolArray{Elements: elements, Dimensions: uta.Dimensions, Status: Present}
+
+ return nil
+}
+
+func (dst *BoolArray) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = BoolArray{Status: Null}
+ return nil
+ }
+
+ var arrayHeader ArrayHeader
+ rp, err := arrayHeader.DecodeBinary(ci, src)
+ if err != nil {
+ return err
+ }
+
+ if len(arrayHeader.Dimensions) == 0 {
+ *dst = BoolArray{Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+ }
+
+ elementCount := arrayHeader.Dimensions[0].Length
+ for _, d := range arrayHeader.Dimensions[1:] {
+ elementCount *= d.Length
+ }
+
+ elements := make([]Bool, elementCount)
+
+ for i := range elements {
+ elemLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+ var elemSrc []byte
+ if elemLen >= 0 {
+ elemSrc = src[rp : rp+elemLen]
+ rp += elemLen
+ }
+ err = elements[i].DecodeBinary(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+ }
+
+ *dst = BoolArray{Elements: elements, Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+}
+
+func (src *BoolArray) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ if len(src.Dimensions) == 0 {
+ return append(buf, '{', '}'), nil
+ }
+
+ buf = EncodeTextArrayDimensions(buf, src.Dimensions)
+
+ // dimElemCounts is the multiples of elements that each array lies on. For
+ // example, a single dimension array of length 4 would have a dimElemCounts of
+ // [4]. A multi-dimensional array of lengths [3,5,2] would have a
+ // dimElemCounts of [30,10,2]. This is used to simplify when to render a '{'
+ // or '}'.
+ dimElemCounts := make([]int, len(src.Dimensions))
+ dimElemCounts[len(src.Dimensions)-1] = int(src.Dimensions[len(src.Dimensions)-1].Length)
+ for i := len(src.Dimensions) - 2; i > -1; i-- {
+ dimElemCounts[i] = int(src.Dimensions[i].Length) * dimElemCounts[i+1]
+ }
+
+ inElemBuf := make([]byte, 0, 32)
+ for i, elem := range src.Elements {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+
+ for _, dec := range dimElemCounts {
+ if i%dec == 0 {
+ buf = append(buf, '{')
+ }
+ }
+
+ elemBuf, err := elem.EncodeText(ci, inElemBuf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf == nil {
+ buf = append(buf, `NULL`...)
+ } else {
+ buf = append(buf, QuoteArrayElementIfNeeded(string(elemBuf))...)
+ }
+
+ for _, dec := range dimElemCounts {
+ if (i+1)%dec == 0 {
+ buf = append(buf, '}')
+ }
+ }
+ }
+
+ return buf, nil
+}
+
+func (src *BoolArray) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ arrayHeader := ArrayHeader{
+ Dimensions: src.Dimensions,
+ }
+
+ if dt, ok := ci.DataTypeForName("bool"); ok {
+ arrayHeader.ElementOID = int32(dt.OID)
+ } else {
+ return nil, errors.Errorf("unable to find oid for type name %v", "bool")
+ }
+
+ for i := range src.Elements {
+ if src.Elements[i].Status == Null {
+ arrayHeader.ContainsNull = true
+ break
+ }
+ }
+
+ buf = arrayHeader.EncodeBinary(ci, buf)
+
+ for i := range src.Elements {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ elemBuf, err := src.Elements[i].EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf != nil {
+ buf = elemBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *BoolArray) Scan(src interface{}) error {
+ if src == nil {
+ return dst.DecodeText(nil, nil)
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *BoolArray) Value() (driver.Value, error) {
+ buf, err := src.EncodeText(nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, nil
+ }
+
+ return string(buf), nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/box.go b/vendor/github.com/jackc/pgx/pgtype/box.go
new file mode 100644
index 0000000..83df049
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/box.go
@@ -0,0 +1,162 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type Box struct {
+ P [2]Vec2
+ Status Status
+}
+
+func (dst *Box) Set(src interface{}) error {
+ return errors.Errorf("cannot convert %v to Box", src)
+}
+
+func (dst *Box) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Box) AssignTo(dst interface{}) error {
+ return errors.Errorf("cannot assign %v to %T", src, dst)
+}
+
+func (dst *Box) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Box{Status: Null}
+ return nil
+ }
+
+ if len(src) < 11 {
+ return errors.Errorf("invalid length for Box: %v", len(src))
+ }
+
+ str := string(src[1:])
+
+ var end int
+ end = strings.IndexByte(str, ',')
+
+ x1, err := strconv.ParseFloat(str[:end], 64)
+ if err != nil {
+ return err
+ }
+
+ str = str[end+1:]
+ end = strings.IndexByte(str, ')')
+
+ y1, err := strconv.ParseFloat(str[:end], 64)
+ if err != nil {
+ return err
+ }
+
+ str = str[end+3:]
+ end = strings.IndexByte(str, ',')
+
+ x2, err := strconv.ParseFloat(str[:end], 64)
+ if err != nil {
+ return err
+ }
+
+ str = str[end+1 : len(str)-1]
+
+ y2, err := strconv.ParseFloat(str, 64)
+ if err != nil {
+ return err
+ }
+
+ *dst = Box{P: [2]Vec2{{x1, y1}, {x2, y2}}, Status: Present}
+ return nil
+}
+
+func (dst *Box) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Box{Status: Null}
+ return nil
+ }
+
+ if len(src) != 32 {
+ return errors.Errorf("invalid length for Box: %v", len(src))
+ }
+
+ x1 := binary.BigEndian.Uint64(src)
+ y1 := binary.BigEndian.Uint64(src[8:])
+ x2 := binary.BigEndian.Uint64(src[16:])
+ y2 := binary.BigEndian.Uint64(src[24:])
+
+ *dst = Box{
+ P: [2]Vec2{
+ {math.Float64frombits(x1), math.Float64frombits(y1)},
+ {math.Float64frombits(x2), math.Float64frombits(y2)},
+ },
+ Status: Present,
+ }
+ return nil
+}
+
+func (src *Box) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ buf = append(buf, fmt.Sprintf(`(%f,%f),(%f,%f)`,
+ src.P[0].X, src.P[0].Y, src.P[1].X, src.P[1].Y)...)
+ return buf, nil
+}
+
+func (src *Box) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ buf = pgio.AppendUint64(buf, math.Float64bits(src.P[0].X))
+ buf = pgio.AppendUint64(buf, math.Float64bits(src.P[0].Y))
+ buf = pgio.AppendUint64(buf, math.Float64bits(src.P[1].X))
+ buf = pgio.AppendUint64(buf, math.Float64bits(src.P[1].Y))
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Box) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Box{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Box) Value() (driver.Value, error) {
+ return EncodeValueText(src)
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/bpchar.go b/vendor/github.com/jackc/pgx/pgtype/bpchar.go
new file mode 100644
index 0000000..2126318
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/bpchar.go
@@ -0,0 +1,68 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+)
+
+// BPChar is fixed-length, blank padded char type
+// character(n), char(n)
+type BPChar Text
+
+// Set converts from src to dst.
+func (dst *BPChar) Set(src interface{}) error {
+ return (*Text)(dst).Set(src)
+}
+
+// Get returns underlying value
+func (dst *BPChar) Get() interface{} {
+ return (*Text)(dst).Get()
+}
+
+// AssignTo assigns from src to dst.
+func (src *BPChar) AssignTo(dst interface{}) error {
+ if src.Status == Present {
+ switch v := dst.(type) {
+ case *rune:
+ runes := []rune(src.String)
+ if len(runes) == 1 {
+ *v = runes[0]
+ return nil
+ }
+ }
+ }
+ return (*Text)(src).AssignTo(dst)
+}
+
+func (dst *BPChar) DecodeText(ci *ConnInfo, src []byte) error {
+ return (*Text)(dst).DecodeText(ci, src)
+}
+
+func (dst *BPChar) DecodeBinary(ci *ConnInfo, src []byte) error {
+ return (*Text)(dst).DecodeBinary(ci, src)
+}
+
+func (src *BPChar) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ return (*Text)(src).EncodeText(ci, buf)
+}
+
+func (src *BPChar) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ return (*Text)(src).EncodeBinary(ci, buf)
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *BPChar) Scan(src interface{}) error {
+ return (*Text)(dst).Scan(src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *BPChar) Value() (driver.Value, error) {
+ return (*Text)(src).Value()
+}
+
+func (src *BPChar) MarshalJSON() ([]byte, error) {
+ return (*Text)(src).MarshalJSON()
+}
+
+func (dst *BPChar) UnmarshalJSON(b []byte) error {
+ return (*Text)(dst).UnmarshalJSON(b)
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/bpchar_array.go b/vendor/github.com/jackc/pgx/pgtype/bpchar_array.go
new file mode 100644
index 0000000..1e6220f
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/bpchar_array.go
@@ -0,0 +1,300 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type BPCharArray struct {
+ Elements []BPChar
+ Dimensions []ArrayDimension
+ Status Status
+}
+
+func (dst *BPCharArray) Set(src interface{}) error {
+ // untyped nil and typed nil interfaces are different
+ if src == nil {
+ *dst = BPCharArray{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+
+ case []string:
+ if value == nil {
+ *dst = BPCharArray{Status: Null}
+ } else if len(value) == 0 {
+ *dst = BPCharArray{Status: Present}
+ } else {
+ elements := make([]BPChar, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = BPCharArray{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ default:
+ if originalSrc, ok := underlyingSliceType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to BPCharArray", value)
+ }
+
+ return nil
+}
+
+func (dst *BPCharArray) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *BPCharArray) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+
+ case *[]string:
+ *v = make([]string, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *BPCharArray) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = BPCharArray{Status: Null}
+ return nil
+ }
+
+ uta, err := ParseUntypedTextArray(string(src))
+ if err != nil {
+ return err
+ }
+
+ var elements []BPChar
+
+ if len(uta.Elements) > 0 {
+ elements = make([]BPChar, len(uta.Elements))
+
+ for i, s := range uta.Elements {
+ var elem BPChar
+ var elemSrc []byte
+ if s != "NULL" {
+ elemSrc = []byte(s)
+ }
+ err = elem.DecodeText(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+
+ elements[i] = elem
+ }
+ }
+
+ *dst = BPCharArray{Elements: elements, Dimensions: uta.Dimensions, Status: Present}
+
+ return nil
+}
+
+func (dst *BPCharArray) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = BPCharArray{Status: Null}
+ return nil
+ }
+
+ var arrayHeader ArrayHeader
+ rp, err := arrayHeader.DecodeBinary(ci, src)
+ if err != nil {
+ return err
+ }
+
+ if len(arrayHeader.Dimensions) == 0 {
+ *dst = BPCharArray{Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+ }
+
+ elementCount := arrayHeader.Dimensions[0].Length
+ for _, d := range arrayHeader.Dimensions[1:] {
+ elementCount *= d.Length
+ }
+
+ elements := make([]BPChar, elementCount)
+
+ for i := range elements {
+ elemLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+ var elemSrc []byte
+ if elemLen >= 0 {
+ elemSrc = src[rp : rp+elemLen]
+ rp += elemLen
+ }
+ err = elements[i].DecodeBinary(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+ }
+
+ *dst = BPCharArray{Elements: elements, Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+}
+
+func (src *BPCharArray) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ if len(src.Dimensions) == 0 {
+ return append(buf, '{', '}'), nil
+ }
+
+ buf = EncodeTextArrayDimensions(buf, src.Dimensions)
+
+ // dimElemCounts is the multiples of elements that each array lies on. For
+ // example, a single dimension array of length 4 would have a dimElemCounts of
+ // [4]. A multi-dimensional array of lengths [3,5,2] would have a
+ // dimElemCounts of [30,10,2]. This is used to simplify when to render a '{'
+ // or '}'.
+ dimElemCounts := make([]int, len(src.Dimensions))
+ dimElemCounts[len(src.Dimensions)-1] = int(src.Dimensions[len(src.Dimensions)-1].Length)
+ for i := len(src.Dimensions) - 2; i > -1; i-- {
+ dimElemCounts[i] = int(src.Dimensions[i].Length) * dimElemCounts[i+1]
+ }
+
+ inElemBuf := make([]byte, 0, 32)
+ for i, elem := range src.Elements {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+
+ for _, dec := range dimElemCounts {
+ if i%dec == 0 {
+ buf = append(buf, '{')
+ }
+ }
+
+ elemBuf, err := elem.EncodeText(ci, inElemBuf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf == nil {
+ buf = append(buf, `NULL`...)
+ } else {
+ buf = append(buf, QuoteArrayElementIfNeeded(string(elemBuf))...)
+ }
+
+ for _, dec := range dimElemCounts {
+ if (i+1)%dec == 0 {
+ buf = append(buf, '}')
+ }
+ }
+ }
+
+ return buf, nil
+}
+
+func (src *BPCharArray) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ arrayHeader := ArrayHeader{
+ Dimensions: src.Dimensions,
+ }
+
+ if dt, ok := ci.DataTypeForName("bpchar"); ok {
+ arrayHeader.ElementOID = int32(dt.OID)
+ } else {
+ return nil, errors.Errorf("unable to find oid for type name %v", "bpchar")
+ }
+
+ for i := range src.Elements {
+ if src.Elements[i].Status == Null {
+ arrayHeader.ContainsNull = true
+ break
+ }
+ }
+
+ buf = arrayHeader.EncodeBinary(ci, buf)
+
+ for i := range src.Elements {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ elemBuf, err := src.Elements[i].EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf != nil {
+ buf = elemBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *BPCharArray) Scan(src interface{}) error {
+ if src == nil {
+ return dst.DecodeText(nil, nil)
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *BPCharArray) Value() (driver.Value, error) {
+ buf, err := src.EncodeText(nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, nil
+ }
+
+ return string(buf), nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/bytea.go b/vendor/github.com/jackc/pgx/pgtype/bytea.go
new file mode 100644
index 0000000..c7117f4
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/bytea.go
@@ -0,0 +1,156 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/hex"
+
+ "github.com/pkg/errors"
+)
+
+type Bytea struct {
+ Bytes []byte
+ Status Status
+}
+
+func (dst *Bytea) Set(src interface{}) error {
+ if src == nil {
+ *dst = Bytea{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+ case []byte:
+ if value != nil {
+ *dst = Bytea{Bytes: value, Status: Present}
+ } else {
+ *dst = Bytea{Status: Null}
+ }
+ default:
+ if originalSrc, ok := underlyingBytesType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to Bytea", value)
+ }
+
+ return nil
+}
+
+func (dst *Bytea) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst.Bytes
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Bytea) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+ case *[]byte:
+ buf := make([]byte, len(src.Bytes))
+ copy(buf, src.Bytes)
+ *v = buf
+ return nil
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+// DecodeText only supports the hex format. This has been the default since
+// PostgreSQL 9.0.
+func (dst *Bytea) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Bytea{Status: Null}
+ return nil
+ }
+
+ if len(src) < 2 || src[0] != '\\' || src[1] != 'x' {
+ return errors.Errorf("invalid hex format")
+ }
+
+ buf := make([]byte, (len(src)-2)/2)
+ _, err := hex.Decode(buf, src[2:])
+ if err != nil {
+ return err
+ }
+
+ *dst = Bytea{Bytes: buf, Status: Present}
+ return nil
+}
+
+func (dst *Bytea) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Bytea{Status: Null}
+ return nil
+ }
+
+ *dst = Bytea{Bytes: src, Status: Present}
+ return nil
+}
+
+func (src *Bytea) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ buf = append(buf, `\x`...)
+ buf = append(buf, hex.EncodeToString(src.Bytes)...)
+ return buf, nil
+}
+
+func (src *Bytea) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ return append(buf, src.Bytes...), nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Bytea) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Bytea{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ buf := make([]byte, len(src))
+ copy(buf, src)
+ *dst = Bytea{Bytes: buf, Status: Present}
+ return nil
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Bytea) Value() (driver.Value, error) {
+ switch src.Status {
+ case Present:
+ return src.Bytes, nil
+ case Null:
+ return nil, nil
+ default:
+ return nil, errUndefined
+ }
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/bytea_array.go b/vendor/github.com/jackc/pgx/pgtype/bytea_array.go
new file mode 100644
index 0000000..c8eb566
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/bytea_array.go
@@ -0,0 +1,300 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type ByteaArray struct {
+ Elements []Bytea
+ Dimensions []ArrayDimension
+ Status Status
+}
+
+func (dst *ByteaArray) Set(src interface{}) error {
+ // untyped nil and typed nil interfaces are different
+ if src == nil {
+ *dst = ByteaArray{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+
+ case [][]byte:
+ if value == nil {
+ *dst = ByteaArray{Status: Null}
+ } else if len(value) == 0 {
+ *dst = ByteaArray{Status: Present}
+ } else {
+ elements := make([]Bytea, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = ByteaArray{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ default:
+ if originalSrc, ok := underlyingSliceType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to ByteaArray", value)
+ }
+
+ return nil
+}
+
+func (dst *ByteaArray) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *ByteaArray) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+
+ case *[][]byte:
+ *v = make([][]byte, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *ByteaArray) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = ByteaArray{Status: Null}
+ return nil
+ }
+
+ uta, err := ParseUntypedTextArray(string(src))
+ if err != nil {
+ return err
+ }
+
+ var elements []Bytea
+
+ if len(uta.Elements) > 0 {
+ elements = make([]Bytea, len(uta.Elements))
+
+ for i, s := range uta.Elements {
+ var elem Bytea
+ var elemSrc []byte
+ if s != "NULL" {
+ elemSrc = []byte(s)
+ }
+ err = elem.DecodeText(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+
+ elements[i] = elem
+ }
+ }
+
+ *dst = ByteaArray{Elements: elements, Dimensions: uta.Dimensions, Status: Present}
+
+ return nil
+}
+
+func (dst *ByteaArray) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = ByteaArray{Status: Null}
+ return nil
+ }
+
+ var arrayHeader ArrayHeader
+ rp, err := arrayHeader.DecodeBinary(ci, src)
+ if err != nil {
+ return err
+ }
+
+ if len(arrayHeader.Dimensions) == 0 {
+ *dst = ByteaArray{Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+ }
+
+ elementCount := arrayHeader.Dimensions[0].Length
+ for _, d := range arrayHeader.Dimensions[1:] {
+ elementCount *= d.Length
+ }
+
+ elements := make([]Bytea, elementCount)
+
+ for i := range elements {
+ elemLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+ var elemSrc []byte
+ if elemLen >= 0 {
+ elemSrc = src[rp : rp+elemLen]
+ rp += elemLen
+ }
+ err = elements[i].DecodeBinary(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+ }
+
+ *dst = ByteaArray{Elements: elements, Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+}
+
+func (src *ByteaArray) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ if len(src.Dimensions) == 0 {
+ return append(buf, '{', '}'), nil
+ }
+
+ buf = EncodeTextArrayDimensions(buf, src.Dimensions)
+
+ // dimElemCounts is the multiples of elements that each array lies on. For
+ // example, a single dimension array of length 4 would have a dimElemCounts of
+ // [4]. A multi-dimensional array of lengths [3,5,2] would have a
+ // dimElemCounts of [30,10,2]. This is used to simplify when to render a '{'
+ // or '}'.
+ dimElemCounts := make([]int, len(src.Dimensions))
+ dimElemCounts[len(src.Dimensions)-1] = int(src.Dimensions[len(src.Dimensions)-1].Length)
+ for i := len(src.Dimensions) - 2; i > -1; i-- {
+ dimElemCounts[i] = int(src.Dimensions[i].Length) * dimElemCounts[i+1]
+ }
+
+ inElemBuf := make([]byte, 0, 32)
+ for i, elem := range src.Elements {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+
+ for _, dec := range dimElemCounts {
+ if i%dec == 0 {
+ buf = append(buf, '{')
+ }
+ }
+
+ elemBuf, err := elem.EncodeText(ci, inElemBuf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf == nil {
+ buf = append(buf, `NULL`...)
+ } else {
+ buf = append(buf, QuoteArrayElementIfNeeded(string(elemBuf))...)
+ }
+
+ for _, dec := range dimElemCounts {
+ if (i+1)%dec == 0 {
+ buf = append(buf, '}')
+ }
+ }
+ }
+
+ return buf, nil
+}
+
+func (src *ByteaArray) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ arrayHeader := ArrayHeader{
+ Dimensions: src.Dimensions,
+ }
+
+ if dt, ok := ci.DataTypeForName("bytea"); ok {
+ arrayHeader.ElementOID = int32(dt.OID)
+ } else {
+ return nil, errors.Errorf("unable to find oid for type name %v", "bytea")
+ }
+
+ for i := range src.Elements {
+ if src.Elements[i].Status == Null {
+ arrayHeader.ContainsNull = true
+ break
+ }
+ }
+
+ buf = arrayHeader.EncodeBinary(ci, buf)
+
+ for i := range src.Elements {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ elemBuf, err := src.Elements[i].EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf != nil {
+ buf = elemBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *ByteaArray) Scan(src interface{}) error {
+ if src == nil {
+ return dst.DecodeText(nil, nil)
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *ByteaArray) Value() (driver.Value, error) {
+ buf, err := src.EncodeText(nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, nil
+ }
+
+ return string(buf), nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/cid.go b/vendor/github.com/jackc/pgx/pgtype/cid.go
new file mode 100644
index 0000000..0ed54f4
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/cid.go
@@ -0,0 +1,61 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+)
+
+// CID is PostgreSQL's Command Identifier type.
+//
+// When one does
+//
+// select cmin, cmax, * from some_table;
+//
+// it is the data type of the cmin and cmax hidden system columns.
+//
+// It is currently implemented as an unsigned four byte integer.
+// Its definition can be found in src/include/c.h as CommandId
+// in the PostgreSQL sources.
+type CID pguint32
+
+// Set converts from src to dst. Note that as CID is not a general
+// number type Set does not do automatic type conversion as other number
+// types do.
+func (dst *CID) Set(src interface{}) error {
+ return (*pguint32)(dst).Set(src)
+}
+
+func (dst *CID) Get() interface{} {
+ return (*pguint32)(dst).Get()
+}
+
+// AssignTo assigns from src to dst. Note that as CID is not a general number
+// type AssignTo does not do automatic type conversion as other number types do.
+func (src *CID) AssignTo(dst interface{}) error {
+ return (*pguint32)(src).AssignTo(dst)
+}
+
+func (dst *CID) DecodeText(ci *ConnInfo, src []byte) error {
+ return (*pguint32)(dst).DecodeText(ci, src)
+}
+
+func (dst *CID) DecodeBinary(ci *ConnInfo, src []byte) error {
+ return (*pguint32)(dst).DecodeBinary(ci, src)
+}
+
+func (src *CID) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ return (*pguint32)(src).EncodeText(ci, buf)
+}
+
+func (src *CID) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ return (*pguint32)(src).EncodeBinary(ci, buf)
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *CID) Scan(src interface{}) error {
+ return (*pguint32)(dst).Scan(src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *CID) Value() (driver.Value, error) {
+ return (*pguint32)(src).Value()
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/cidr.go b/vendor/github.com/jackc/pgx/pgtype/cidr.go
new file mode 100644
index 0000000..519b9ca
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/cidr.go
@@ -0,0 +1,31 @@
+package pgtype
+
+type CIDR Inet
+
+func (dst *CIDR) Set(src interface{}) error {
+ return (*Inet)(dst).Set(src)
+}
+
+func (dst *CIDR) Get() interface{} {
+ return (*Inet)(dst).Get()
+}
+
+func (src *CIDR) AssignTo(dst interface{}) error {
+ return (*Inet)(src).AssignTo(dst)
+}
+
+func (dst *CIDR) DecodeText(ci *ConnInfo, src []byte) error {
+ return (*Inet)(dst).DecodeText(ci, src)
+}
+
+func (dst *CIDR) DecodeBinary(ci *ConnInfo, src []byte) error {
+ return (*Inet)(dst).DecodeBinary(ci, src)
+}
+
+func (src *CIDR) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ return (*Inet)(src).EncodeText(ci, buf)
+}
+
+func (src *CIDR) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ return (*Inet)(src).EncodeBinary(ci, buf)
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/cidr_array.go b/vendor/github.com/jackc/pgx/pgtype/cidr_array.go
new file mode 100644
index 0000000..e4bb761
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/cidr_array.go
@@ -0,0 +1,329 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "net"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type CIDRArray struct {
+ Elements []CIDR
+ Dimensions []ArrayDimension
+ Status Status
+}
+
+func (dst *CIDRArray) Set(src interface{}) error {
+ // untyped nil and typed nil interfaces are different
+ if src == nil {
+ *dst = CIDRArray{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+
+ case []*net.IPNet:
+ if value == nil {
+ *dst = CIDRArray{Status: Null}
+ } else if len(value) == 0 {
+ *dst = CIDRArray{Status: Present}
+ } else {
+ elements := make([]CIDR, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = CIDRArray{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ case []net.IP:
+ if value == nil {
+ *dst = CIDRArray{Status: Null}
+ } else if len(value) == 0 {
+ *dst = CIDRArray{Status: Present}
+ } else {
+ elements := make([]CIDR, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = CIDRArray{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ default:
+ if originalSrc, ok := underlyingSliceType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to CIDRArray", value)
+ }
+
+ return nil
+}
+
+func (dst *CIDRArray) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *CIDRArray) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+
+ case *[]*net.IPNet:
+ *v = make([]*net.IPNet, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ case *[]net.IP:
+ *v = make([]net.IP, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *CIDRArray) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = CIDRArray{Status: Null}
+ return nil
+ }
+
+ uta, err := ParseUntypedTextArray(string(src))
+ if err != nil {
+ return err
+ }
+
+ var elements []CIDR
+
+ if len(uta.Elements) > 0 {
+ elements = make([]CIDR, len(uta.Elements))
+
+ for i, s := range uta.Elements {
+ var elem CIDR
+ var elemSrc []byte
+ if s != "NULL" {
+ elemSrc = []byte(s)
+ }
+ err = elem.DecodeText(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+
+ elements[i] = elem
+ }
+ }
+
+ *dst = CIDRArray{Elements: elements, Dimensions: uta.Dimensions, Status: Present}
+
+ return nil
+}
+
+func (dst *CIDRArray) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = CIDRArray{Status: Null}
+ return nil
+ }
+
+ var arrayHeader ArrayHeader
+ rp, err := arrayHeader.DecodeBinary(ci, src)
+ if err != nil {
+ return err
+ }
+
+ if len(arrayHeader.Dimensions) == 0 {
+ *dst = CIDRArray{Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+ }
+
+ elementCount := arrayHeader.Dimensions[0].Length
+ for _, d := range arrayHeader.Dimensions[1:] {
+ elementCount *= d.Length
+ }
+
+ elements := make([]CIDR, elementCount)
+
+ for i := range elements {
+ elemLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+ var elemSrc []byte
+ if elemLen >= 0 {
+ elemSrc = src[rp : rp+elemLen]
+ rp += elemLen
+ }
+ err = elements[i].DecodeBinary(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+ }
+
+ *dst = CIDRArray{Elements: elements, Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+}
+
+func (src *CIDRArray) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ if len(src.Dimensions) == 0 {
+ return append(buf, '{', '}'), nil
+ }
+
+ buf = EncodeTextArrayDimensions(buf, src.Dimensions)
+
+ // dimElemCounts is the multiples of elements that each array lies on. For
+ // example, a single dimension array of length 4 would have a dimElemCounts of
+ // [4]. A multi-dimensional array of lengths [3,5,2] would have a
+ // dimElemCounts of [30,10,2]. This is used to simplify when to render a '{'
+ // or '}'.
+ dimElemCounts := make([]int, len(src.Dimensions))
+ dimElemCounts[len(src.Dimensions)-1] = int(src.Dimensions[len(src.Dimensions)-1].Length)
+ for i := len(src.Dimensions) - 2; i > -1; i-- {
+ dimElemCounts[i] = int(src.Dimensions[i].Length) * dimElemCounts[i+1]
+ }
+
+ inElemBuf := make([]byte, 0, 32)
+ for i, elem := range src.Elements {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+
+ for _, dec := range dimElemCounts {
+ if i%dec == 0 {
+ buf = append(buf, '{')
+ }
+ }
+
+ elemBuf, err := elem.EncodeText(ci, inElemBuf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf == nil {
+ buf = append(buf, `NULL`...)
+ } else {
+ buf = append(buf, QuoteArrayElementIfNeeded(string(elemBuf))...)
+ }
+
+ for _, dec := range dimElemCounts {
+ if (i+1)%dec == 0 {
+ buf = append(buf, '}')
+ }
+ }
+ }
+
+ return buf, nil
+}
+
+func (src *CIDRArray) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ arrayHeader := ArrayHeader{
+ Dimensions: src.Dimensions,
+ }
+
+ if dt, ok := ci.DataTypeForName("cidr"); ok {
+ arrayHeader.ElementOID = int32(dt.OID)
+ } else {
+ return nil, errors.Errorf("unable to find oid for type name %v", "cidr")
+ }
+
+ for i := range src.Elements {
+ if src.Elements[i].Status == Null {
+ arrayHeader.ContainsNull = true
+ break
+ }
+ }
+
+ buf = arrayHeader.EncodeBinary(ci, buf)
+
+ for i := range src.Elements {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ elemBuf, err := src.Elements[i].EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf != nil {
+ buf = elemBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *CIDRArray) Scan(src interface{}) error {
+ if src == nil {
+ return dst.DecodeText(nil, nil)
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *CIDRArray) Value() (driver.Value, error) {
+ buf, err := src.EncodeText(nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, nil
+ }
+
+ return string(buf), nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/circle.go b/vendor/github.com/jackc/pgx/pgtype/circle.go
new file mode 100644
index 0000000..97ecbf3
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/circle.go
@@ -0,0 +1,146 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type Circle struct {
+ P Vec2
+ R float64
+ Status Status
+}
+
+func (dst *Circle) Set(src interface{}) error {
+ return errors.Errorf("cannot convert %v to Circle", src)
+}
+
+func (dst *Circle) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Circle) AssignTo(dst interface{}) error {
+ return errors.Errorf("cannot assign %v to %T", src, dst)
+}
+
+func (dst *Circle) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Circle{Status: Null}
+ return nil
+ }
+
+ if len(src) < 9 {
+ return errors.Errorf("invalid length for Circle: %v", len(src))
+ }
+
+ str := string(src[2:])
+ end := strings.IndexByte(str, ',')
+ x, err := strconv.ParseFloat(str[:end], 64)
+ if err != nil {
+ return err
+ }
+
+ str = str[end+1:]
+ end = strings.IndexByte(str, ')')
+
+ y, err := strconv.ParseFloat(str[:end], 64)
+ if err != nil {
+ return err
+ }
+
+ str = str[end+2 : len(str)-1]
+
+ r, err := strconv.ParseFloat(str, 64)
+ if err != nil {
+ return err
+ }
+
+ *dst = Circle{P: Vec2{x, y}, R: r, Status: Present}
+ return nil
+}
+
+func (dst *Circle) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Circle{Status: Null}
+ return nil
+ }
+
+ if len(src) != 24 {
+ return errors.Errorf("invalid length for Circle: %v", len(src))
+ }
+
+ x := binary.BigEndian.Uint64(src)
+ y := binary.BigEndian.Uint64(src[8:])
+ r := binary.BigEndian.Uint64(src[16:])
+
+ *dst = Circle{
+ P: Vec2{math.Float64frombits(x), math.Float64frombits(y)},
+ R: math.Float64frombits(r),
+ Status: Present,
+ }
+ return nil
+}
+
+func (src *Circle) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ buf = append(buf, fmt.Sprintf(`<(%f,%f),%f>`, src.P.X, src.P.Y, src.R)...)
+ return buf, nil
+}
+
+func (src *Circle) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ buf = pgio.AppendUint64(buf, math.Float64bits(src.P.X))
+ buf = pgio.AppendUint64(buf, math.Float64bits(src.P.Y))
+ buf = pgio.AppendUint64(buf, math.Float64bits(src.R))
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Circle) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Circle{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Circle) Value() (driver.Value, error) {
+ return EncodeValueText(src)
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/convert.go b/vendor/github.com/jackc/pgx/pgtype/convert.go
new file mode 100644
index 0000000..5dfb738
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/convert.go
@@ -0,0 +1,424 @@
+package pgtype
+
+import (
+ "math"
+ "reflect"
+ "time"
+
+ "github.com/pkg/errors"
+)
+
+const maxUint = ^uint(0)
+const maxInt = int(maxUint >> 1)
+const minInt = -maxInt - 1
+
+// underlyingNumberType gets the underlying type that can be converted to Int2, Int4, Int8, Float4, or Float8
+func underlyingNumberType(val interface{}) (interface{}, bool) {
+ refVal := reflect.ValueOf(val)
+
+ switch refVal.Kind() {
+ case reflect.Ptr:
+ if refVal.IsNil() {
+ return nil, false
+ }
+ convVal := refVal.Elem().Interface()
+ return convVal, true
+ case reflect.Int:
+ convVal := int(refVal.Int())
+ return convVal, reflect.TypeOf(convVal) != refVal.Type()
+ case reflect.Int8:
+ convVal := int8(refVal.Int())
+ return convVal, reflect.TypeOf(convVal) != refVal.Type()
+ case reflect.Int16:
+ convVal := int16(refVal.Int())
+ return convVal, reflect.TypeOf(convVal) != refVal.Type()
+ case reflect.Int32:
+ convVal := int32(refVal.Int())
+ return convVal, reflect.TypeOf(convVal) != refVal.Type()
+ case reflect.Int64:
+ convVal := int64(refVal.Int())
+ return convVal, reflect.TypeOf(convVal) != refVal.Type()
+ case reflect.Uint:
+ convVal := uint(refVal.Uint())
+ return convVal, reflect.TypeOf(convVal) != refVal.Type()
+ case reflect.Uint8:
+ convVal := uint8(refVal.Uint())
+ return convVal, reflect.TypeOf(convVal) != refVal.Type()
+ case reflect.Uint16:
+ convVal := uint16(refVal.Uint())
+ return convVal, reflect.TypeOf(convVal) != refVal.Type()
+ case reflect.Uint32:
+ convVal := uint32(refVal.Uint())
+ return convVal, reflect.TypeOf(convVal) != refVal.Type()
+ case reflect.Uint64:
+ convVal := uint64(refVal.Uint())
+ return convVal, reflect.TypeOf(convVal) != refVal.Type()
+ case reflect.Float32:
+ convVal := float32(refVal.Float())
+ return convVal, reflect.TypeOf(convVal) != refVal.Type()
+ case reflect.Float64:
+ convVal := refVal.Float()
+ return convVal, reflect.TypeOf(convVal) != refVal.Type()
+ case reflect.String:
+ convVal := refVal.String()
+ return convVal, reflect.TypeOf(convVal) != refVal.Type()
+ }
+
+ return nil, false
+}
+
+// underlyingBoolType gets the underlying type that can be converted to Bool
+func underlyingBoolType(val interface{}) (interface{}, bool) {
+ refVal := reflect.ValueOf(val)
+
+ switch refVal.Kind() {
+ case reflect.Ptr:
+ if refVal.IsNil() {
+ return nil, false
+ }
+ convVal := refVal.Elem().Interface()
+ return convVal, true
+ case reflect.Bool:
+ convVal := refVal.Bool()
+ return convVal, reflect.TypeOf(convVal) != refVal.Type()
+ }
+
+ return nil, false
+}
+
+// underlyingBytesType gets the underlying type that can be converted to []byte
+func underlyingBytesType(val interface{}) (interface{}, bool) {
+ refVal := reflect.ValueOf(val)
+
+ switch refVal.Kind() {
+ case reflect.Ptr:
+ if refVal.IsNil() {
+ return nil, false
+ }
+ convVal := refVal.Elem().Interface()
+ return convVal, true
+ case reflect.Slice:
+ if refVal.Type().Elem().Kind() == reflect.Uint8 {
+ convVal := refVal.Bytes()
+ return convVal, reflect.TypeOf(convVal) != refVal.Type()
+ }
+ }
+
+ return nil, false
+}
+
+// underlyingStringType gets the underlying type that can be converted to String
+func underlyingStringType(val interface{}) (interface{}, bool) {
+ refVal := reflect.ValueOf(val)
+
+ switch refVal.Kind() {
+ case reflect.Ptr:
+ if refVal.IsNil() {
+ return nil, false
+ }
+ convVal := refVal.Elem().Interface()
+ return convVal, true
+ case reflect.String:
+ convVal := refVal.String()
+ return convVal, reflect.TypeOf(convVal) != refVal.Type()
+ }
+
+ return nil, false
+}
+
+// underlyingPtrType dereferences a pointer
+func underlyingPtrType(val interface{}) (interface{}, bool) {
+ refVal := reflect.ValueOf(val)
+
+ switch refVal.Kind() {
+ case reflect.Ptr:
+ if refVal.IsNil() {
+ return nil, false
+ }
+ convVal := refVal.Elem().Interface()
+ return convVal, true
+ }
+
+ return nil, false
+}
+
+// underlyingTimeType gets the underlying type that can be converted to time.Time
+func underlyingTimeType(val interface{}) (interface{}, bool) {
+ refVal := reflect.ValueOf(val)
+
+ switch refVal.Kind() {
+ case reflect.Ptr:
+ if refVal.IsNil() {
+ return time.Time{}, false
+ }
+ convVal := refVal.Elem().Interface()
+ return convVal, true
+ }
+
+ timeType := reflect.TypeOf(time.Time{})
+ if refVal.Type().ConvertibleTo(timeType) {
+ return refVal.Convert(timeType).Interface(), true
+ }
+
+ return time.Time{}, false
+}
+
+// underlyingSliceType gets the underlying slice type
+func underlyingSliceType(val interface{}) (interface{}, bool) {
+ refVal := reflect.ValueOf(val)
+
+ switch refVal.Kind() {
+ case reflect.Ptr:
+ if refVal.IsNil() {
+ return nil, false
+ }
+ convVal := refVal.Elem().Interface()
+ return convVal, true
+ case reflect.Slice:
+ baseSliceType := reflect.SliceOf(refVal.Type().Elem())
+ if refVal.Type().ConvertibleTo(baseSliceType) {
+ convVal := refVal.Convert(baseSliceType)
+ return convVal.Interface(), reflect.TypeOf(convVal.Interface()) != refVal.Type()
+ }
+ }
+
+ return nil, false
+}
+
+func int64AssignTo(srcVal int64, srcStatus Status, dst interface{}) error {
+ if srcStatus == Present {
+ switch v := dst.(type) {
+ case *int:
+ if srcVal < int64(minInt) {
+ return errors.Errorf("%d is less than minimum value for int", srcVal)
+ } else if srcVal > int64(maxInt) {
+ return errors.Errorf("%d is greater than maximum value for int", srcVal)
+ }
+ *v = int(srcVal)
+ case *int8:
+ if srcVal < math.MinInt8 {
+ return errors.Errorf("%d is less than minimum value for int8", srcVal)
+ } else if srcVal > math.MaxInt8 {
+ return errors.Errorf("%d is greater than maximum value for int8", srcVal)
+ }
+ *v = int8(srcVal)
+ case *int16:
+ if srcVal < math.MinInt16 {
+ return errors.Errorf("%d is less than minimum value for int16", srcVal)
+ } else if srcVal > math.MaxInt16 {
+ return errors.Errorf("%d is greater than maximum value for int16", srcVal)
+ }
+ *v = int16(srcVal)
+ case *int32:
+ if srcVal < math.MinInt32 {
+ return errors.Errorf("%d is less than minimum value for int32", srcVal)
+ } else if srcVal > math.MaxInt32 {
+ return errors.Errorf("%d is greater than maximum value for int32", srcVal)
+ }
+ *v = int32(srcVal)
+ case *int64:
+ if srcVal < math.MinInt64 {
+ return errors.Errorf("%d is less than minimum value for int64", srcVal)
+ } else if srcVal > math.MaxInt64 {
+ return errors.Errorf("%d is greater than maximum value for int64", srcVal)
+ }
+ *v = int64(srcVal)
+ case *uint:
+ if srcVal < 0 {
+ return errors.Errorf("%d is less than zero for uint", srcVal)
+ } else if uint64(srcVal) > uint64(maxUint) {
+ return errors.Errorf("%d is greater than maximum value for uint", srcVal)
+ }
+ *v = uint(srcVal)
+ case *uint8:
+ if srcVal < 0 {
+ return errors.Errorf("%d is less than zero for uint8", srcVal)
+ } else if srcVal > math.MaxUint8 {
+ return errors.Errorf("%d is greater than maximum value for uint8", srcVal)
+ }
+ *v = uint8(srcVal)
+ case *uint16:
+ if srcVal < 0 {
+ return errors.Errorf("%d is less than zero for uint32", srcVal)
+ } else if srcVal > math.MaxUint16 {
+ return errors.Errorf("%d is greater than maximum value for uint16", srcVal)
+ }
+ *v = uint16(srcVal)
+ case *uint32:
+ if srcVal < 0 {
+ return errors.Errorf("%d is less than zero for uint32", srcVal)
+ } else if srcVal > math.MaxUint32 {
+ return errors.Errorf("%d is greater than maximum value for uint32", srcVal)
+ }
+ *v = uint32(srcVal)
+ case *uint64:
+ if srcVal < 0 {
+ return errors.Errorf("%d is less than zero for uint64", srcVal)
+ }
+ *v = uint64(srcVal)
+ default:
+ if v := reflect.ValueOf(dst); v.Kind() == reflect.Ptr {
+ el := v.Elem()
+ switch el.Kind() {
+ // if dst is a pointer to pointer, strip the pointer and try again
+ case reflect.Ptr:
+ if el.IsNil() {
+ // allocate destination
+ el.Set(reflect.New(el.Type().Elem()))
+ }
+ return int64AssignTo(srcVal, srcStatus, el.Interface())
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ if el.OverflowInt(int64(srcVal)) {
+ return errors.Errorf("cannot put %d into %T", srcVal, dst)
+ }
+ el.SetInt(int64(srcVal))
+ return nil
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ if srcVal < 0 {
+ return errors.Errorf("%d is less than zero for %T", srcVal, dst)
+ }
+ if el.OverflowUint(uint64(srcVal)) {
+ return errors.Errorf("cannot put %d into %T", srcVal, dst)
+ }
+ el.SetUint(uint64(srcVal))
+ return nil
+ }
+ }
+ return errors.Errorf("cannot assign %v into %T", srcVal, dst)
+ }
+ return nil
+ }
+
+ // if dst is a pointer to pointer and srcStatus is not Present, nil it out
+ if v := reflect.ValueOf(dst); v.Kind() == reflect.Ptr {
+ el := v.Elem()
+ if el.Kind() == reflect.Ptr {
+ el.Set(reflect.Zero(el.Type()))
+ return nil
+ }
+ }
+
+ return errors.Errorf("cannot assign %v %v into %T", srcVal, srcStatus, dst)
+}
+
+func float64AssignTo(srcVal float64, srcStatus Status, dst interface{}) error {
+ if srcStatus == Present {
+ switch v := dst.(type) {
+ case *float32:
+ *v = float32(srcVal)
+ case *float64:
+ *v = srcVal
+ default:
+ if v := reflect.ValueOf(dst); v.Kind() == reflect.Ptr {
+ el := v.Elem()
+ switch el.Kind() {
+ // if dst is a pointer to pointer, strip the pointer and try again
+ case reflect.Ptr:
+ if el.IsNil() {
+ // allocate destination
+ el.Set(reflect.New(el.Type().Elem()))
+ }
+ return float64AssignTo(srcVal, srcStatus, el.Interface())
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ i64 := int64(srcVal)
+ if float64(i64) == srcVal {
+ return int64AssignTo(i64, srcStatus, dst)
+ }
+ }
+ }
+ return errors.Errorf("cannot assign %v into %T", srcVal, dst)
+ }
+ return nil
+ }
+
+ // if dst is a pointer to pointer and srcStatus is not Present, nil it out
+ if v := reflect.ValueOf(dst); v.Kind() == reflect.Ptr {
+ el := v.Elem()
+ if el.Kind() == reflect.Ptr {
+ el.Set(reflect.Zero(el.Type()))
+ return nil
+ }
+ }
+
+ return errors.Errorf("cannot assign %v %v into %T", srcVal, srcStatus, dst)
+}
+
+func NullAssignTo(dst interface{}) error {
+ dstPtr := reflect.ValueOf(dst)
+
+ // AssignTo dst must always be a pointer
+ if dstPtr.Kind() != reflect.Ptr {
+ return errors.Errorf("cannot assign NULL to %T", dst)
+ }
+
+ dstVal := dstPtr.Elem()
+
+ switch dstVal.Kind() {
+ case reflect.Ptr, reflect.Slice, reflect.Map:
+ dstVal.Set(reflect.Zero(dstVal.Type()))
+ return nil
+ }
+
+ return errors.Errorf("cannot assign NULL to %T", dst)
+}
+
+var kindTypes map[reflect.Kind]reflect.Type
+
+// GetAssignToDstType attempts to convert dst to something AssignTo can assign
+// to. If dst is a pointer to pointer it allocates a value and returns the
+// dereferences pointer. If dst is a named type such as *Foo where Foo is type
+// Foo int16, it converts dst to *int16.
+//
+// GetAssignToDstType returns the converted dst and a bool representing if any
+// change was made.
+func GetAssignToDstType(dst interface{}) (interface{}, bool) {
+ dstPtr := reflect.ValueOf(dst)
+
+ // AssignTo dst must always be a pointer
+ if dstPtr.Kind() != reflect.Ptr {
+ return nil, false
+ }
+
+ dstVal := dstPtr.Elem()
+
+ // if dst is a pointer to pointer, allocate space try again with the dereferenced pointer
+ if dstVal.Kind() == reflect.Ptr {
+ dstVal.Set(reflect.New(dstVal.Type().Elem()))
+ return dstVal.Interface(), true
+ }
+
+ // if dst is pointer to a base type that has been renamed
+ if baseValType, ok := kindTypes[dstVal.Kind()]; ok {
+ nextDst := dstPtr.Convert(reflect.PtrTo(baseValType))
+ return nextDst.Interface(), dstPtr.Type() != nextDst.Type()
+ }
+
+ if dstVal.Kind() == reflect.Slice {
+ if baseElemType, ok := kindTypes[dstVal.Type().Elem().Kind()]; ok {
+ baseSliceType := reflect.PtrTo(reflect.SliceOf(baseElemType))
+ nextDst := dstPtr.Convert(baseSliceType)
+ return nextDst.Interface(), dstPtr.Type() != nextDst.Type()
+ }
+ }
+
+ return nil, false
+}
+
+func init() {
+ kindTypes = map[reflect.Kind]reflect.Type{
+ reflect.Bool: reflect.TypeOf(false),
+ reflect.Float32: reflect.TypeOf(float32(0)),
+ reflect.Float64: reflect.TypeOf(float64(0)),
+ reflect.Int: reflect.TypeOf(int(0)),
+ reflect.Int8: reflect.TypeOf(int8(0)),
+ reflect.Int16: reflect.TypeOf(int16(0)),
+ reflect.Int32: reflect.TypeOf(int32(0)),
+ reflect.Int64: reflect.TypeOf(int64(0)),
+ reflect.Uint: reflect.TypeOf(uint(0)),
+ reflect.Uint8: reflect.TypeOf(uint8(0)),
+ reflect.Uint16: reflect.TypeOf(uint16(0)),
+ reflect.Uint32: reflect.TypeOf(uint32(0)),
+ reflect.Uint64: reflect.TypeOf(uint64(0)),
+ reflect.String: reflect.TypeOf(""),
+ }
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/database_sql.go b/vendor/github.com/jackc/pgx/pgtype/database_sql.go
new file mode 100644
index 0000000..969536d
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/database_sql.go
@@ -0,0 +1,42 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+
+ "github.com/pkg/errors"
+)
+
+func DatabaseSQLValue(ci *ConnInfo, src Value) (interface{}, error) {
+ if valuer, ok := src.(driver.Valuer); ok {
+ return valuer.Value()
+ }
+
+ if textEncoder, ok := src.(TextEncoder); ok {
+ buf, err := textEncoder.EncodeText(ci, nil)
+ if err != nil {
+ return nil, err
+ }
+ return string(buf), nil
+ }
+
+ if binaryEncoder, ok := src.(BinaryEncoder); ok {
+ buf, err := binaryEncoder.EncodeBinary(ci, nil)
+ if err != nil {
+ return nil, err
+ }
+ return buf, nil
+ }
+
+ return nil, errors.New("cannot convert to database/sql compatible value")
+}
+
+func EncodeValueText(src TextEncoder) (interface{}, error) {
+ buf, err := src.EncodeText(nil, make([]byte, 0, 32))
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, nil
+ }
+ return string(buf), err
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/date.go b/vendor/github.com/jackc/pgx/pgtype/date.go
new file mode 100644
index 0000000..f1c0d8b
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/date.go
@@ -0,0 +1,209 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "time"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type Date struct {
+ Time time.Time
+ Status Status
+ InfinityModifier InfinityModifier
+}
+
+const (
+ negativeInfinityDayOffset = -2147483648
+ infinityDayOffset = 2147483647
+)
+
+func (dst *Date) Set(src interface{}) error {
+ if src == nil {
+ *dst = Date{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+ case time.Time:
+ *dst = Date{Time: value, Status: Present}
+ default:
+ if originalSrc, ok := underlyingTimeType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to Date", value)
+ }
+
+ return nil
+}
+
+func (dst *Date) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ if dst.InfinityModifier != None {
+ return dst.InfinityModifier
+ }
+ return dst.Time
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Date) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+ case *time.Time:
+ if src.InfinityModifier != None {
+ return errors.Errorf("cannot assign %v to %T", src, dst)
+ }
+ *v = src.Time
+ return nil
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *Date) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Date{Status: Null}
+ return nil
+ }
+
+ sbuf := string(src)
+ switch sbuf {
+ case "infinity":
+ *dst = Date{Status: Present, InfinityModifier: Infinity}
+ case "-infinity":
+ *dst = Date{Status: Present, InfinityModifier: -Infinity}
+ default:
+ t, err := time.ParseInLocation("2006-01-02", sbuf, time.UTC)
+ if err != nil {
+ return err
+ }
+
+ *dst = Date{Time: t, Status: Present}
+ }
+
+ return nil
+}
+
+func (dst *Date) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Date{Status: Null}
+ return nil
+ }
+
+ if len(src) != 4 {
+ return errors.Errorf("invalid length for date: %v", len(src))
+ }
+
+ dayOffset := int32(binary.BigEndian.Uint32(src))
+
+ switch dayOffset {
+ case infinityDayOffset:
+ *dst = Date{Status: Present, InfinityModifier: Infinity}
+ case negativeInfinityDayOffset:
+ *dst = Date{Status: Present, InfinityModifier: -Infinity}
+ default:
+ t := time.Date(2000, 1, int(1+dayOffset), 0, 0, 0, 0, time.UTC)
+ *dst = Date{Time: t, Status: Present}
+ }
+
+ return nil
+}
+
+func (src *Date) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ var s string
+
+ switch src.InfinityModifier {
+ case None:
+ s = src.Time.Format("2006-01-02")
+ case Infinity:
+ s = "infinity"
+ case NegativeInfinity:
+ s = "-infinity"
+ }
+
+ return append(buf, s...), nil
+}
+
+func (src *Date) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ var daysSinceDateEpoch int32
+ switch src.InfinityModifier {
+ case None:
+ tUnix := time.Date(src.Time.Year(), src.Time.Month(), src.Time.Day(), 0, 0, 0, 0, time.UTC).Unix()
+ dateEpoch := time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC).Unix()
+
+ secSinceDateEpoch := tUnix - dateEpoch
+ daysSinceDateEpoch = int32(secSinceDateEpoch / 86400)
+ case Infinity:
+ daysSinceDateEpoch = infinityDayOffset
+ case NegativeInfinity:
+ daysSinceDateEpoch = negativeInfinityDayOffset
+ }
+
+ return pgio.AppendInt32(buf, daysSinceDateEpoch), nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Date) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Date{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ case time.Time:
+ *dst = Date{Time: src, Status: Present}
+ return nil
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Date) Value() (driver.Value, error) {
+ switch src.Status {
+ case Present:
+ if src.InfinityModifier != None {
+ return src.InfinityModifier.String(), nil
+ }
+ return src.Time, nil
+ case Null:
+ return nil, nil
+ default:
+ return nil, errUndefined
+ }
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/date_array.go b/vendor/github.com/jackc/pgx/pgtype/date_array.go
new file mode 100644
index 0000000..0cb6458
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/date_array.go
@@ -0,0 +1,301 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "time"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type DateArray struct {
+ Elements []Date
+ Dimensions []ArrayDimension
+ Status Status
+}
+
+func (dst *DateArray) Set(src interface{}) error {
+ // untyped nil and typed nil interfaces are different
+ if src == nil {
+ *dst = DateArray{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+
+ case []time.Time:
+ if value == nil {
+ *dst = DateArray{Status: Null}
+ } else if len(value) == 0 {
+ *dst = DateArray{Status: Present}
+ } else {
+ elements := make([]Date, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = DateArray{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ default:
+ if originalSrc, ok := underlyingSliceType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to DateArray", value)
+ }
+
+ return nil
+}
+
+func (dst *DateArray) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *DateArray) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+
+ case *[]time.Time:
+ *v = make([]time.Time, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *DateArray) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = DateArray{Status: Null}
+ return nil
+ }
+
+ uta, err := ParseUntypedTextArray(string(src))
+ if err != nil {
+ return err
+ }
+
+ var elements []Date
+
+ if len(uta.Elements) > 0 {
+ elements = make([]Date, len(uta.Elements))
+
+ for i, s := range uta.Elements {
+ var elem Date
+ var elemSrc []byte
+ if s != "NULL" {
+ elemSrc = []byte(s)
+ }
+ err = elem.DecodeText(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+
+ elements[i] = elem
+ }
+ }
+
+ *dst = DateArray{Elements: elements, Dimensions: uta.Dimensions, Status: Present}
+
+ return nil
+}
+
+func (dst *DateArray) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = DateArray{Status: Null}
+ return nil
+ }
+
+ var arrayHeader ArrayHeader
+ rp, err := arrayHeader.DecodeBinary(ci, src)
+ if err != nil {
+ return err
+ }
+
+ if len(arrayHeader.Dimensions) == 0 {
+ *dst = DateArray{Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+ }
+
+ elementCount := arrayHeader.Dimensions[0].Length
+ for _, d := range arrayHeader.Dimensions[1:] {
+ elementCount *= d.Length
+ }
+
+ elements := make([]Date, elementCount)
+
+ for i := range elements {
+ elemLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+ var elemSrc []byte
+ if elemLen >= 0 {
+ elemSrc = src[rp : rp+elemLen]
+ rp += elemLen
+ }
+ err = elements[i].DecodeBinary(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+ }
+
+ *dst = DateArray{Elements: elements, Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+}
+
+func (src *DateArray) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ if len(src.Dimensions) == 0 {
+ return append(buf, '{', '}'), nil
+ }
+
+ buf = EncodeTextArrayDimensions(buf, src.Dimensions)
+
+ // dimElemCounts is the multiples of elements that each array lies on. For
+ // example, a single dimension array of length 4 would have a dimElemCounts of
+ // [4]. A multi-dimensional array of lengths [3,5,2] would have a
+ // dimElemCounts of [30,10,2]. This is used to simplify when to render a '{'
+ // or '}'.
+ dimElemCounts := make([]int, len(src.Dimensions))
+ dimElemCounts[len(src.Dimensions)-1] = int(src.Dimensions[len(src.Dimensions)-1].Length)
+ for i := len(src.Dimensions) - 2; i > -1; i-- {
+ dimElemCounts[i] = int(src.Dimensions[i].Length) * dimElemCounts[i+1]
+ }
+
+ inElemBuf := make([]byte, 0, 32)
+ for i, elem := range src.Elements {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+
+ for _, dec := range dimElemCounts {
+ if i%dec == 0 {
+ buf = append(buf, '{')
+ }
+ }
+
+ elemBuf, err := elem.EncodeText(ci, inElemBuf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf == nil {
+ buf = append(buf, `NULL`...)
+ } else {
+ buf = append(buf, QuoteArrayElementIfNeeded(string(elemBuf))...)
+ }
+
+ for _, dec := range dimElemCounts {
+ if (i+1)%dec == 0 {
+ buf = append(buf, '}')
+ }
+ }
+ }
+
+ return buf, nil
+}
+
+func (src *DateArray) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ arrayHeader := ArrayHeader{
+ Dimensions: src.Dimensions,
+ }
+
+ if dt, ok := ci.DataTypeForName("date"); ok {
+ arrayHeader.ElementOID = int32(dt.OID)
+ } else {
+ return nil, errors.Errorf("unable to find oid for type name %v", "date")
+ }
+
+ for i := range src.Elements {
+ if src.Elements[i].Status == Null {
+ arrayHeader.ContainsNull = true
+ break
+ }
+ }
+
+ buf = arrayHeader.EncodeBinary(ci, buf)
+
+ for i := range src.Elements {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ elemBuf, err := src.Elements[i].EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf != nil {
+ buf = elemBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *DateArray) Scan(src interface{}) error {
+ if src == nil {
+ return dst.DecodeText(nil, nil)
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *DateArray) Value() (driver.Value, error) {
+ buf, err := src.EncodeText(nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, nil
+ }
+
+ return string(buf), nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/daterange.go b/vendor/github.com/jackc/pgx/pgtype/daterange.go
new file mode 100644
index 0000000..47cd7e4
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/daterange.go
@@ -0,0 +1,250 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type Daterange struct {
+ Lower Date
+ Upper Date
+ LowerType BoundType
+ UpperType BoundType
+ Status Status
+}
+
+func (dst *Daterange) Set(src interface{}) error {
+ return errors.Errorf("cannot convert %v to Daterange", src)
+}
+
+func (dst *Daterange) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Daterange) AssignTo(dst interface{}) error {
+ return errors.Errorf("cannot assign %v to %T", src, dst)
+}
+
+func (dst *Daterange) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Daterange{Status: Null}
+ return nil
+ }
+
+ utr, err := ParseUntypedTextRange(string(src))
+ if err != nil {
+ return err
+ }
+
+ *dst = Daterange{Status: Present}
+
+ dst.LowerType = utr.LowerType
+ dst.UpperType = utr.UpperType
+
+ if dst.LowerType == Empty {
+ return nil
+ }
+
+ if dst.LowerType == Inclusive || dst.LowerType == Exclusive {
+ if err := dst.Lower.DecodeText(ci, []byte(utr.Lower)); err != nil {
+ return err
+ }
+ }
+
+ if dst.UpperType == Inclusive || dst.UpperType == Exclusive {
+ if err := dst.Upper.DecodeText(ci, []byte(utr.Upper)); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (dst *Daterange) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Daterange{Status: Null}
+ return nil
+ }
+
+ ubr, err := ParseUntypedBinaryRange(src)
+ if err != nil {
+ return err
+ }
+
+ *dst = Daterange{Status: Present}
+
+ dst.LowerType = ubr.LowerType
+ dst.UpperType = ubr.UpperType
+
+ if dst.LowerType == Empty {
+ return nil
+ }
+
+ if dst.LowerType == Inclusive || dst.LowerType == Exclusive {
+ if err := dst.Lower.DecodeBinary(ci, ubr.Lower); err != nil {
+ return err
+ }
+ }
+
+ if dst.UpperType == Inclusive || dst.UpperType == Exclusive {
+ if err := dst.Upper.DecodeBinary(ci, ubr.Upper); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (src Daterange) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ switch src.LowerType {
+ case Exclusive, Unbounded:
+ buf = append(buf, '(')
+ case Inclusive:
+ buf = append(buf, '[')
+ case Empty:
+ return append(buf, "empty"...), nil
+ default:
+ return nil, errors.Errorf("unknown lower bound type %v", src.LowerType)
+ }
+
+ var err error
+
+ if src.LowerType != Unbounded {
+ buf, err = src.Lower.EncodeText(ci, buf)
+ if err != nil {
+ return nil, err
+ } else if buf == nil {
+ return nil, errors.Errorf("Lower cannot be null unless LowerType is Unbounded")
+ }
+ }
+
+ buf = append(buf, ',')
+
+ if src.UpperType != Unbounded {
+ buf, err = src.Upper.EncodeText(ci, buf)
+ if err != nil {
+ return nil, err
+ } else if buf == nil {
+ return nil, errors.Errorf("Upper cannot be null unless UpperType is Unbounded")
+ }
+ }
+
+ switch src.UpperType {
+ case Exclusive, Unbounded:
+ buf = append(buf, ')')
+ case Inclusive:
+ buf = append(buf, ']')
+ default:
+ return nil, errors.Errorf("unknown upper bound type %v", src.UpperType)
+ }
+
+ return buf, nil
+}
+
+func (src Daterange) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ var rangeType byte
+ switch src.LowerType {
+ case Inclusive:
+ rangeType |= lowerInclusiveMask
+ case Unbounded:
+ rangeType |= lowerUnboundedMask
+ case Exclusive:
+ case Empty:
+ return append(buf, emptyMask), nil
+ default:
+ return nil, errors.Errorf("unknown LowerType: %v", src.LowerType)
+ }
+
+ switch src.UpperType {
+ case Inclusive:
+ rangeType |= upperInclusiveMask
+ case Unbounded:
+ rangeType |= upperUnboundedMask
+ case Exclusive:
+ default:
+ return nil, errors.Errorf("unknown UpperType: %v", src.UpperType)
+ }
+
+ buf = append(buf, rangeType)
+
+ var err error
+
+ if src.LowerType != Unbounded {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ buf, err = src.Lower.EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, errors.Errorf("Lower cannot be null unless LowerType is Unbounded")
+ }
+
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+
+ if src.UpperType != Unbounded {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ buf, err = src.Upper.EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, errors.Errorf("Upper cannot be null unless UpperType is Unbounded")
+ }
+
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Daterange) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Daterange{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src Daterange) Value() (driver.Value, error) {
+ return EncodeValueText(src)
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/decimal.go b/vendor/github.com/jackc/pgx/pgtype/decimal.go
new file mode 100644
index 0000000..79653cf
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/decimal.go
@@ -0,0 +1,31 @@
+package pgtype
+
+type Decimal Numeric
+
+func (dst *Decimal) Set(src interface{}) error {
+ return (*Numeric)(dst).Set(src)
+}
+
+func (dst *Decimal) Get() interface{} {
+ return (*Numeric)(dst).Get()
+}
+
+func (src *Decimal) AssignTo(dst interface{}) error {
+ return (*Numeric)(src).AssignTo(dst)
+}
+
+func (dst *Decimal) DecodeText(ci *ConnInfo, src []byte) error {
+ return (*Numeric)(dst).DecodeText(ci, src)
+}
+
+func (dst *Decimal) DecodeBinary(ci *ConnInfo, src []byte) error {
+ return (*Numeric)(dst).DecodeBinary(ci, src)
+}
+
+func (src *Decimal) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ return (*Numeric)(src).EncodeText(ci, buf)
+}
+
+func (src *Decimal) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ return (*Numeric)(src).EncodeBinary(ci, buf)
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/enum_array.go b/vendor/github.com/jackc/pgx/pgtype/enum_array.go
new file mode 100644
index 0000000..3a94801
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/enum_array.go
@@ -0,0 +1,212 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+
+ "github.com/pkg/errors"
+)
+
+type EnumArray struct {
+ Elements []GenericText
+ Dimensions []ArrayDimension
+ Status Status
+}
+
+func (dst *EnumArray) Set(src interface{}) error {
+ // untyped nil and typed nil interfaces are different
+ if src == nil {
+ *dst = EnumArray{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+
+ case []string:
+ if value == nil {
+ *dst = EnumArray{Status: Null}
+ } else if len(value) == 0 {
+ *dst = EnumArray{Status: Present}
+ } else {
+ elements := make([]GenericText, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = EnumArray{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ default:
+ if originalSrc, ok := underlyingSliceType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to EnumArray", value)
+ }
+
+ return nil
+}
+
+func (dst *EnumArray) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *EnumArray) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+
+ case *[]string:
+ *v = make([]string, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *EnumArray) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = EnumArray{Status: Null}
+ return nil
+ }
+
+ uta, err := ParseUntypedTextArray(string(src))
+ if err != nil {
+ return err
+ }
+
+ var elements []GenericText
+
+ if len(uta.Elements) > 0 {
+ elements = make([]GenericText, len(uta.Elements))
+
+ for i, s := range uta.Elements {
+ var elem GenericText
+ var elemSrc []byte
+ if s != "NULL" {
+ elemSrc = []byte(s)
+ }
+ err = elem.DecodeText(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+
+ elements[i] = elem
+ }
+ }
+
+ *dst = EnumArray{Elements: elements, Dimensions: uta.Dimensions, Status: Present}
+
+ return nil
+}
+
+func (src *EnumArray) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ if len(src.Dimensions) == 0 {
+ return append(buf, '{', '}'), nil
+ }
+
+ buf = EncodeTextArrayDimensions(buf, src.Dimensions)
+
+ // dimElemCounts is the multiples of elements that each array lies on. For
+ // example, a single dimension array of length 4 would have a dimElemCounts of
+ // [4]. A multi-dimensional array of lengths [3,5,2] would have a
+ // dimElemCounts of [30,10,2]. This is used to simplify when to render a '{'
+ // or '}'.
+ dimElemCounts := make([]int, len(src.Dimensions))
+ dimElemCounts[len(src.Dimensions)-1] = int(src.Dimensions[len(src.Dimensions)-1].Length)
+ for i := len(src.Dimensions) - 2; i > -1; i-- {
+ dimElemCounts[i] = int(src.Dimensions[i].Length) * dimElemCounts[i+1]
+ }
+
+ inElemBuf := make([]byte, 0, 32)
+ for i, elem := range src.Elements {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+
+ for _, dec := range dimElemCounts {
+ if i%dec == 0 {
+ buf = append(buf, '{')
+ }
+ }
+
+ elemBuf, err := elem.EncodeText(ci, inElemBuf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf == nil {
+ buf = append(buf, `NULL`...)
+ } else {
+ buf = append(buf, QuoteArrayElementIfNeeded(string(elemBuf))...)
+ }
+
+ for _, dec := range dimElemCounts {
+ if (i+1)%dec == 0 {
+ buf = append(buf, '}')
+ }
+ }
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *EnumArray) Scan(src interface{}) error {
+ if src == nil {
+ return dst.DecodeText(nil, nil)
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *EnumArray) Value() (driver.Value, error) {
+ buf, err := src.EncodeText(nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, nil
+ }
+
+ return string(buf), nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/float4.go b/vendor/github.com/jackc/pgx/pgtype/float4.go
new file mode 100644
index 0000000..2207594
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/float4.go
@@ -0,0 +1,197 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "math"
+ "strconv"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type Float4 struct {
+ Float float32
+ Status Status
+}
+
+func (dst *Float4) Set(src interface{}) error {
+ if src == nil {
+ *dst = Float4{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+ case float32:
+ *dst = Float4{Float: value, Status: Present}
+ case float64:
+ *dst = Float4{Float: float32(value), Status: Present}
+ case int8:
+ *dst = Float4{Float: float32(value), Status: Present}
+ case uint8:
+ *dst = Float4{Float: float32(value), Status: Present}
+ case int16:
+ *dst = Float4{Float: float32(value), Status: Present}
+ case uint16:
+ *dst = Float4{Float: float32(value), Status: Present}
+ case int32:
+ f32 := float32(value)
+ if int32(f32) == value {
+ *dst = Float4{Float: f32, Status: Present}
+ } else {
+ return errors.Errorf("%v cannot be exactly represented as float32", value)
+ }
+ case uint32:
+ f32 := float32(value)
+ if uint32(f32) == value {
+ *dst = Float4{Float: f32, Status: Present}
+ } else {
+ return errors.Errorf("%v cannot be exactly represented as float32", value)
+ }
+ case int64:
+ f32 := float32(value)
+ if int64(f32) == value {
+ *dst = Float4{Float: f32, Status: Present}
+ } else {
+ return errors.Errorf("%v cannot be exactly represented as float32", value)
+ }
+ case uint64:
+ f32 := float32(value)
+ if uint64(f32) == value {
+ *dst = Float4{Float: f32, Status: Present}
+ } else {
+ return errors.Errorf("%v cannot be exactly represented as float32", value)
+ }
+ case int:
+ f32 := float32(value)
+ if int(f32) == value {
+ *dst = Float4{Float: f32, Status: Present}
+ } else {
+ return errors.Errorf("%v cannot be exactly represented as float32", value)
+ }
+ case uint:
+ f32 := float32(value)
+ if uint(f32) == value {
+ *dst = Float4{Float: f32, Status: Present}
+ } else {
+ return errors.Errorf("%v cannot be exactly represented as float32", value)
+ }
+ case string:
+ num, err := strconv.ParseFloat(value, 32)
+ if err != nil {
+ return err
+ }
+ *dst = Float4{Float: float32(num), Status: Present}
+ default:
+ if originalSrc, ok := underlyingNumberType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to Float8", value)
+ }
+
+ return nil
+}
+
+func (dst *Float4) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst.Float
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Float4) AssignTo(dst interface{}) error {
+ return float64AssignTo(float64(src.Float), src.Status, dst)
+}
+
+func (dst *Float4) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Float4{Status: Null}
+ return nil
+ }
+
+ n, err := strconv.ParseFloat(string(src), 32)
+ if err != nil {
+ return err
+ }
+
+ *dst = Float4{Float: float32(n), Status: Present}
+ return nil
+}
+
+func (dst *Float4) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Float4{Status: Null}
+ return nil
+ }
+
+ if len(src) != 4 {
+ return errors.Errorf("invalid length for float4: %v", len(src))
+ }
+
+ n := int32(binary.BigEndian.Uint32(src))
+
+ *dst = Float4{Float: math.Float32frombits(uint32(n)), Status: Present}
+ return nil
+}
+
+func (src *Float4) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ buf = append(buf, strconv.FormatFloat(float64(src.Float), 'f', -1, 32)...)
+ return buf, nil
+}
+
+func (src *Float4) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ buf = pgio.AppendUint32(buf, math.Float32bits(src.Float))
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Float4) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Float4{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case float64:
+ *dst = Float4{Float: float32(src), Status: Present}
+ return nil
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Float4) Value() (driver.Value, error) {
+ switch src.Status {
+ case Present:
+ return float64(src.Float), nil
+ case Null:
+ return nil, nil
+ default:
+ return nil, errUndefined
+ }
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/float4_array.go b/vendor/github.com/jackc/pgx/pgtype/float4_array.go
new file mode 100644
index 0000000..02c28ca
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/float4_array.go
@@ -0,0 +1,300 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type Float4Array struct {
+ Elements []Float4
+ Dimensions []ArrayDimension
+ Status Status
+}
+
+func (dst *Float4Array) Set(src interface{}) error {
+ // untyped nil and typed nil interfaces are different
+ if src == nil {
+ *dst = Float4Array{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+
+ case []float32:
+ if value == nil {
+ *dst = Float4Array{Status: Null}
+ } else if len(value) == 0 {
+ *dst = Float4Array{Status: Present}
+ } else {
+ elements := make([]Float4, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = Float4Array{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ default:
+ if originalSrc, ok := underlyingSliceType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to Float4Array", value)
+ }
+
+ return nil
+}
+
+func (dst *Float4Array) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Float4Array) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+
+ case *[]float32:
+ *v = make([]float32, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *Float4Array) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Float4Array{Status: Null}
+ return nil
+ }
+
+ uta, err := ParseUntypedTextArray(string(src))
+ if err != nil {
+ return err
+ }
+
+ var elements []Float4
+
+ if len(uta.Elements) > 0 {
+ elements = make([]Float4, len(uta.Elements))
+
+ for i, s := range uta.Elements {
+ var elem Float4
+ var elemSrc []byte
+ if s != "NULL" {
+ elemSrc = []byte(s)
+ }
+ err = elem.DecodeText(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+
+ elements[i] = elem
+ }
+ }
+
+ *dst = Float4Array{Elements: elements, Dimensions: uta.Dimensions, Status: Present}
+
+ return nil
+}
+
+func (dst *Float4Array) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Float4Array{Status: Null}
+ return nil
+ }
+
+ var arrayHeader ArrayHeader
+ rp, err := arrayHeader.DecodeBinary(ci, src)
+ if err != nil {
+ return err
+ }
+
+ if len(arrayHeader.Dimensions) == 0 {
+ *dst = Float4Array{Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+ }
+
+ elementCount := arrayHeader.Dimensions[0].Length
+ for _, d := range arrayHeader.Dimensions[1:] {
+ elementCount *= d.Length
+ }
+
+ elements := make([]Float4, elementCount)
+
+ for i := range elements {
+ elemLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+ var elemSrc []byte
+ if elemLen >= 0 {
+ elemSrc = src[rp : rp+elemLen]
+ rp += elemLen
+ }
+ err = elements[i].DecodeBinary(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+ }
+
+ *dst = Float4Array{Elements: elements, Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+}
+
+func (src *Float4Array) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ if len(src.Dimensions) == 0 {
+ return append(buf, '{', '}'), nil
+ }
+
+ buf = EncodeTextArrayDimensions(buf, src.Dimensions)
+
+ // dimElemCounts is the multiples of elements that each array lies on. For
+ // example, a single dimension array of length 4 would have a dimElemCounts of
+ // [4]. A multi-dimensional array of lengths [3,5,2] would have a
+ // dimElemCounts of [30,10,2]. This is used to simplify when to render a '{'
+ // or '}'.
+ dimElemCounts := make([]int, len(src.Dimensions))
+ dimElemCounts[len(src.Dimensions)-1] = int(src.Dimensions[len(src.Dimensions)-1].Length)
+ for i := len(src.Dimensions) - 2; i > -1; i-- {
+ dimElemCounts[i] = int(src.Dimensions[i].Length) * dimElemCounts[i+1]
+ }
+
+ inElemBuf := make([]byte, 0, 32)
+ for i, elem := range src.Elements {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+
+ for _, dec := range dimElemCounts {
+ if i%dec == 0 {
+ buf = append(buf, '{')
+ }
+ }
+
+ elemBuf, err := elem.EncodeText(ci, inElemBuf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf == nil {
+ buf = append(buf, `NULL`...)
+ } else {
+ buf = append(buf, QuoteArrayElementIfNeeded(string(elemBuf))...)
+ }
+
+ for _, dec := range dimElemCounts {
+ if (i+1)%dec == 0 {
+ buf = append(buf, '}')
+ }
+ }
+ }
+
+ return buf, nil
+}
+
+func (src *Float4Array) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ arrayHeader := ArrayHeader{
+ Dimensions: src.Dimensions,
+ }
+
+ if dt, ok := ci.DataTypeForName("float4"); ok {
+ arrayHeader.ElementOID = int32(dt.OID)
+ } else {
+ return nil, errors.Errorf("unable to find oid for type name %v", "float4")
+ }
+
+ for i := range src.Elements {
+ if src.Elements[i].Status == Null {
+ arrayHeader.ContainsNull = true
+ break
+ }
+ }
+
+ buf = arrayHeader.EncodeBinary(ci, buf)
+
+ for i := range src.Elements {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ elemBuf, err := src.Elements[i].EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf != nil {
+ buf = elemBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Float4Array) Scan(src interface{}) error {
+ if src == nil {
+ return dst.DecodeText(nil, nil)
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Float4Array) Value() (driver.Value, error) {
+ buf, err := src.EncodeText(nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, nil
+ }
+
+ return string(buf), nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/float8.go b/vendor/github.com/jackc/pgx/pgtype/float8.go
new file mode 100644
index 0000000..dd34f54
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/float8.go
@@ -0,0 +1,187 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "math"
+ "strconv"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type Float8 struct {
+ Float float64
+ Status Status
+}
+
+func (dst *Float8) Set(src interface{}) error {
+ if src == nil {
+ *dst = Float8{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+ case float32:
+ *dst = Float8{Float: float64(value), Status: Present}
+ case float64:
+ *dst = Float8{Float: value, Status: Present}
+ case int8:
+ *dst = Float8{Float: float64(value), Status: Present}
+ case uint8:
+ *dst = Float8{Float: float64(value), Status: Present}
+ case int16:
+ *dst = Float8{Float: float64(value), Status: Present}
+ case uint16:
+ *dst = Float8{Float: float64(value), Status: Present}
+ case int32:
+ *dst = Float8{Float: float64(value), Status: Present}
+ case uint32:
+ *dst = Float8{Float: float64(value), Status: Present}
+ case int64:
+ f64 := float64(value)
+ if int64(f64) == value {
+ *dst = Float8{Float: f64, Status: Present}
+ } else {
+ return errors.Errorf("%v cannot be exactly represented as float64", value)
+ }
+ case uint64:
+ f64 := float64(value)
+ if uint64(f64) == value {
+ *dst = Float8{Float: f64, Status: Present}
+ } else {
+ return errors.Errorf("%v cannot be exactly represented as float64", value)
+ }
+ case int:
+ f64 := float64(value)
+ if int(f64) == value {
+ *dst = Float8{Float: f64, Status: Present}
+ } else {
+ return errors.Errorf("%v cannot be exactly represented as float64", value)
+ }
+ case uint:
+ f64 := float64(value)
+ if uint(f64) == value {
+ *dst = Float8{Float: f64, Status: Present}
+ } else {
+ return errors.Errorf("%v cannot be exactly represented as float64", value)
+ }
+ case string:
+ num, err := strconv.ParseFloat(value, 64)
+ if err != nil {
+ return err
+ }
+ *dst = Float8{Float: float64(num), Status: Present}
+ default:
+ if originalSrc, ok := underlyingNumberType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to Float8", value)
+ }
+
+ return nil
+}
+
+func (dst *Float8) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst.Float
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Float8) AssignTo(dst interface{}) error {
+ return float64AssignTo(src.Float, src.Status, dst)
+}
+
+func (dst *Float8) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Float8{Status: Null}
+ return nil
+ }
+
+ n, err := strconv.ParseFloat(string(src), 64)
+ if err != nil {
+ return err
+ }
+
+ *dst = Float8{Float: n, Status: Present}
+ return nil
+}
+
+func (dst *Float8) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Float8{Status: Null}
+ return nil
+ }
+
+ if len(src) != 8 {
+ return errors.Errorf("invalid length for float4: %v", len(src))
+ }
+
+ n := int64(binary.BigEndian.Uint64(src))
+
+ *dst = Float8{Float: math.Float64frombits(uint64(n)), Status: Present}
+ return nil
+}
+
+func (src *Float8) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ buf = append(buf, strconv.FormatFloat(float64(src.Float), 'f', -1, 64)...)
+ return buf, nil
+}
+
+func (src *Float8) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ buf = pgio.AppendUint64(buf, math.Float64bits(src.Float))
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Float8) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Float8{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case float64:
+ *dst = Float8{Float: src, Status: Present}
+ return nil
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Float8) Value() (driver.Value, error) {
+ switch src.Status {
+ case Present:
+ return src.Float, nil
+ case Null:
+ return nil, nil
+ default:
+ return nil, errUndefined
+ }
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/float8_array.go b/vendor/github.com/jackc/pgx/pgtype/float8_array.go
new file mode 100644
index 0000000..b92a820
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/float8_array.go
@@ -0,0 +1,300 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type Float8Array struct {
+ Elements []Float8
+ Dimensions []ArrayDimension
+ Status Status
+}
+
+func (dst *Float8Array) Set(src interface{}) error {
+ // untyped nil and typed nil interfaces are different
+ if src == nil {
+ *dst = Float8Array{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+
+ case []float64:
+ if value == nil {
+ *dst = Float8Array{Status: Null}
+ } else if len(value) == 0 {
+ *dst = Float8Array{Status: Present}
+ } else {
+ elements := make([]Float8, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = Float8Array{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ default:
+ if originalSrc, ok := underlyingSliceType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to Float8Array", value)
+ }
+
+ return nil
+}
+
+func (dst *Float8Array) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Float8Array) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+
+ case *[]float64:
+ *v = make([]float64, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *Float8Array) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Float8Array{Status: Null}
+ return nil
+ }
+
+ uta, err := ParseUntypedTextArray(string(src))
+ if err != nil {
+ return err
+ }
+
+ var elements []Float8
+
+ if len(uta.Elements) > 0 {
+ elements = make([]Float8, len(uta.Elements))
+
+ for i, s := range uta.Elements {
+ var elem Float8
+ var elemSrc []byte
+ if s != "NULL" {
+ elemSrc = []byte(s)
+ }
+ err = elem.DecodeText(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+
+ elements[i] = elem
+ }
+ }
+
+ *dst = Float8Array{Elements: elements, Dimensions: uta.Dimensions, Status: Present}
+
+ return nil
+}
+
+func (dst *Float8Array) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Float8Array{Status: Null}
+ return nil
+ }
+
+ var arrayHeader ArrayHeader
+ rp, err := arrayHeader.DecodeBinary(ci, src)
+ if err != nil {
+ return err
+ }
+
+ if len(arrayHeader.Dimensions) == 0 {
+ *dst = Float8Array{Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+ }
+
+ elementCount := arrayHeader.Dimensions[0].Length
+ for _, d := range arrayHeader.Dimensions[1:] {
+ elementCount *= d.Length
+ }
+
+ elements := make([]Float8, elementCount)
+
+ for i := range elements {
+ elemLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+ var elemSrc []byte
+ if elemLen >= 0 {
+ elemSrc = src[rp : rp+elemLen]
+ rp += elemLen
+ }
+ err = elements[i].DecodeBinary(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+ }
+
+ *dst = Float8Array{Elements: elements, Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+}
+
+func (src *Float8Array) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ if len(src.Dimensions) == 0 {
+ return append(buf, '{', '}'), nil
+ }
+
+ buf = EncodeTextArrayDimensions(buf, src.Dimensions)
+
+ // dimElemCounts is the multiples of elements that each array lies on. For
+ // example, a single dimension array of length 4 would have a dimElemCounts of
+ // [4]. A multi-dimensional array of lengths [3,5,2] would have a
+ // dimElemCounts of [30,10,2]. This is used to simplify when to render a '{'
+ // or '}'.
+ dimElemCounts := make([]int, len(src.Dimensions))
+ dimElemCounts[len(src.Dimensions)-1] = int(src.Dimensions[len(src.Dimensions)-1].Length)
+ for i := len(src.Dimensions) - 2; i > -1; i-- {
+ dimElemCounts[i] = int(src.Dimensions[i].Length) * dimElemCounts[i+1]
+ }
+
+ inElemBuf := make([]byte, 0, 32)
+ for i, elem := range src.Elements {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+
+ for _, dec := range dimElemCounts {
+ if i%dec == 0 {
+ buf = append(buf, '{')
+ }
+ }
+
+ elemBuf, err := elem.EncodeText(ci, inElemBuf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf == nil {
+ buf = append(buf, `NULL`...)
+ } else {
+ buf = append(buf, QuoteArrayElementIfNeeded(string(elemBuf))...)
+ }
+
+ for _, dec := range dimElemCounts {
+ if (i+1)%dec == 0 {
+ buf = append(buf, '}')
+ }
+ }
+ }
+
+ return buf, nil
+}
+
+func (src *Float8Array) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ arrayHeader := ArrayHeader{
+ Dimensions: src.Dimensions,
+ }
+
+ if dt, ok := ci.DataTypeForName("float8"); ok {
+ arrayHeader.ElementOID = int32(dt.OID)
+ } else {
+ return nil, errors.Errorf("unable to find oid for type name %v", "float8")
+ }
+
+ for i := range src.Elements {
+ if src.Elements[i].Status == Null {
+ arrayHeader.ContainsNull = true
+ break
+ }
+ }
+
+ buf = arrayHeader.EncodeBinary(ci, buf)
+
+ for i := range src.Elements {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ elemBuf, err := src.Elements[i].EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf != nil {
+ buf = elemBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Float8Array) Scan(src interface{}) error {
+ if src == nil {
+ return dst.DecodeText(nil, nil)
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Float8Array) Value() (driver.Value, error) {
+ buf, err := src.EncodeText(nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, nil
+ }
+
+ return string(buf), nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/generic_binary.go b/vendor/github.com/jackc/pgx/pgtype/generic_binary.go
new file mode 100644
index 0000000..2596eca
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/generic_binary.go
@@ -0,0 +1,39 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+)
+
+// GenericBinary is a placeholder for binary format values that no other type exists
+// to handle.
+type GenericBinary Bytea
+
+func (dst *GenericBinary) Set(src interface{}) error {
+ return (*Bytea)(dst).Set(src)
+}
+
+func (dst *GenericBinary) Get() interface{} {
+ return (*Bytea)(dst).Get()
+}
+
+func (src *GenericBinary) AssignTo(dst interface{}) error {
+ return (*Bytea)(src).AssignTo(dst)
+}
+
+func (dst *GenericBinary) DecodeBinary(ci *ConnInfo, src []byte) error {
+ return (*Bytea)(dst).DecodeBinary(ci, src)
+}
+
+func (src *GenericBinary) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ return (*Bytea)(src).EncodeBinary(ci, buf)
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *GenericBinary) Scan(src interface{}) error {
+ return (*Bytea)(dst).Scan(src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *GenericBinary) Value() (driver.Value, error) {
+ return (*Bytea)(src).Value()
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/generic_text.go b/vendor/github.com/jackc/pgx/pgtype/generic_text.go
new file mode 100644
index 0000000..0e3db9d
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/generic_text.go
@@ -0,0 +1,39 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+)
+
+// GenericText is a placeholder for text format values that no other type exists
+// to handle.
+type GenericText Text
+
+func (dst *GenericText) Set(src interface{}) error {
+ return (*Text)(dst).Set(src)
+}
+
+func (dst *GenericText) Get() interface{} {
+ return (*Text)(dst).Get()
+}
+
+func (src *GenericText) AssignTo(dst interface{}) error {
+ return (*Text)(src).AssignTo(dst)
+}
+
+func (dst *GenericText) DecodeText(ci *ConnInfo, src []byte) error {
+ return (*Text)(dst).DecodeText(ci, src)
+}
+
+func (src *GenericText) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ return (*Text)(src).EncodeText(ci, buf)
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *GenericText) Scan(src interface{}) error {
+ return (*Text)(dst).Scan(src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *GenericText) Value() (driver.Value, error) {
+ return (*Text)(src).Value()
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/hstore.go b/vendor/github.com/jackc/pgx/pgtype/hstore.go
new file mode 100644
index 0000000..347446a
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/hstore.go
@@ -0,0 +1,434 @@
+package pgtype
+
+import (
+ "bytes"
+ "database/sql/driver"
+ "encoding/binary"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/pkg/errors"
+
+ "github.com/jackc/pgx/pgio"
+)
+
+// Hstore represents an hstore column that can be null or have null values
+// associated with its keys.
+type Hstore struct {
+ Map map[string]Text
+ Status Status
+}
+
+func (dst *Hstore) Set(src interface{}) error {
+ if src == nil {
+ *dst = Hstore{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+ case map[string]string:
+ m := make(map[string]Text, len(value))
+ for k, v := range value {
+ m[k] = Text{String: v, Status: Present}
+ }
+ *dst = Hstore{Map: m, Status: Present}
+ default:
+ return errors.Errorf("cannot convert %v to Hstore", src)
+ }
+
+ return nil
+}
+
+func (dst *Hstore) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst.Map
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Hstore) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+ case *map[string]string:
+ *v = make(map[string]string, len(src.Map))
+ for k, val := range src.Map {
+ if val.Status != Present {
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+ }
+ (*v)[k] = val.String
+ }
+ return nil
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *Hstore) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Hstore{Status: Null}
+ return nil
+ }
+
+ keys, values, err := parseHstore(string(src))
+ if err != nil {
+ return err
+ }
+
+ m := make(map[string]Text, len(keys))
+ for i := range keys {
+ m[keys[i]] = values[i]
+ }
+
+ *dst = Hstore{Map: m, Status: Present}
+ return nil
+}
+
+func (dst *Hstore) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Hstore{Status: Null}
+ return nil
+ }
+
+ rp := 0
+
+ if len(src[rp:]) < 4 {
+ return errors.Errorf("hstore incomplete %v", src)
+ }
+ pairCount := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+
+ m := make(map[string]Text, pairCount)
+
+ for i := 0; i < pairCount; i++ {
+ if len(src[rp:]) < 4 {
+ return errors.Errorf("hstore incomplete %v", src)
+ }
+ keyLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+
+ if len(src[rp:]) < keyLen {
+ return errors.Errorf("hstore incomplete %v", src)
+ }
+ key := string(src[rp : rp+keyLen])
+ rp += keyLen
+
+ if len(src[rp:]) < 4 {
+ return errors.Errorf("hstore incomplete %v", src)
+ }
+ valueLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+
+ var valueBuf []byte
+ if valueLen >= 0 {
+ valueBuf = src[rp : rp+valueLen]
+ }
+ rp += valueLen
+
+ var value Text
+ err := value.DecodeBinary(ci, valueBuf)
+ if err != nil {
+ return err
+ }
+ m[key] = value
+ }
+
+ *dst = Hstore{Map: m, Status: Present}
+
+ return nil
+}
+
+func (src *Hstore) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ firstPair := true
+
+ for k, v := range src.Map {
+ if firstPair {
+ firstPair = false
+ } else {
+ buf = append(buf, ',')
+ }
+
+ buf = append(buf, quoteHstoreElementIfNeeded(k)...)
+ buf = append(buf, "=>"...)
+
+ elemBuf, err := v.EncodeText(ci, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ if elemBuf == nil {
+ buf = append(buf, "NULL"...)
+ } else {
+ buf = append(buf, quoteHstoreElementIfNeeded(string(elemBuf))...)
+ }
+ }
+
+ return buf, nil
+}
+
+func (src *Hstore) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ buf = pgio.AppendInt32(buf, int32(len(src.Map)))
+
+ var err error
+ for k, v := range src.Map {
+ buf = pgio.AppendInt32(buf, int32(len(k)))
+ buf = append(buf, k...)
+
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ elemBuf, err := v.EncodeText(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf != nil {
+ buf = elemBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+ }
+
+ return buf, err
+}
+
+var quoteHstoreReplacer = strings.NewReplacer(`\`, `\\`, `"`, `\"`)
+
+func quoteHstoreElement(src string) string {
+ return `"` + quoteArrayReplacer.Replace(src) + `"`
+}
+
+func quoteHstoreElementIfNeeded(src string) string {
+ if src == "" || (len(src) == 4 && strings.ToLower(src) == "null") || strings.ContainsAny(src, ` {},"\=>`) {
+ return quoteArrayElement(src)
+ }
+ return src
+}
+
+const (
+ hsPre = iota
+ hsKey
+ hsSep
+ hsVal
+ hsNul
+ hsNext
+)
+
+type hstoreParser struct {
+ str string
+ pos int
+}
+
+func newHSP(in string) *hstoreParser {
+ return &hstoreParser{
+ pos: 0,
+ str: in,
+ }
+}
+
+func (p *hstoreParser) Consume() (r rune, end bool) {
+ if p.pos >= len(p.str) {
+ end = true
+ return
+ }
+ r, w := utf8.DecodeRuneInString(p.str[p.pos:])
+ p.pos += w
+ return
+}
+
+func (p *hstoreParser) Peek() (r rune, end bool) {
+ if p.pos >= len(p.str) {
+ end = true
+ return
+ }
+ r, _ = utf8.DecodeRuneInString(p.str[p.pos:])
+ return
+}
+
+// parseHstore parses the string representation of an hstore column (the same
+// you would get from an ordinary SELECT) into two slices of keys and values. it
+// is used internally in the default parsing of hstores.
+func parseHstore(s string) (k []string, v []Text, err error) {
+ if s == "" {
+ return
+ }
+
+ buf := bytes.Buffer{}
+ keys := []string{}
+ values := []Text{}
+ p := newHSP(s)
+
+ r, end := p.Consume()
+ state := hsPre
+
+ for !end {
+ switch state {
+ case hsPre:
+ if r == '"' {
+ state = hsKey
+ } else {
+ err = errors.New("String does not begin with \"")
+ }
+ case hsKey:
+ switch r {
+ case '"': //End of the key
+ if buf.Len() == 0 {
+ err = errors.New("Empty Key is invalid")
+ } else {
+ keys = append(keys, buf.String())
+ buf = bytes.Buffer{}
+ state = hsSep
+ }
+ case '\\': //Potential escaped character
+ n, end := p.Consume()
+ switch {
+ case end:
+ err = errors.New("Found EOS in key, expecting character or \"")
+ case n == '"', n == '\\':
+ buf.WriteRune(n)
+ default:
+ buf.WriteRune(r)
+ buf.WriteRune(n)
+ }
+ default: //Any other character
+ buf.WriteRune(r)
+ }
+ case hsSep:
+ if r == '=' {
+ r, end = p.Consume()
+ switch {
+ case end:
+ err = errors.New("Found EOS after '=', expecting '>'")
+ case r == '>':
+ r, end = p.Consume()
+ switch {
+ case end:
+ err = errors.New("Found EOS after '=>', expecting '\"' or 'NULL'")
+ case r == '"':
+ state = hsVal
+ case r == 'N':
+ state = hsNul
+ default:
+ err = errors.Errorf("Invalid character '%c' after '=>', expecting '\"' or 'NULL'", r)
+ }
+ default:
+ err = errors.Errorf("Invalid character after '=', expecting '>'")
+ }
+ } else {
+ err = errors.Errorf("Invalid character '%c' after value, expecting '='", r)
+ }
+ case hsVal:
+ switch r {
+ case '"': //End of the value
+ values = append(values, Text{String: buf.String(), Status: Present})
+ buf = bytes.Buffer{}
+ state = hsNext
+ case '\\': //Potential escaped character
+ n, end := p.Consume()
+ switch {
+ case end:
+ err = errors.New("Found EOS in key, expecting character or \"")
+ case n == '"', n == '\\':
+ buf.WriteRune(n)
+ default:
+ buf.WriteRune(r)
+ buf.WriteRune(n)
+ }
+ default: //Any other character
+ buf.WriteRune(r)
+ }
+ case hsNul:
+ nulBuf := make([]rune, 3)
+ nulBuf[0] = r
+ for i := 1; i < 3; i++ {
+ r, end = p.Consume()
+ if end {
+ err = errors.New("Found EOS in NULL value")
+ return
+ }
+ nulBuf[i] = r
+ }
+ if nulBuf[0] == 'U' && nulBuf[1] == 'L' && nulBuf[2] == 'L' {
+ values = append(values, Text{Status: Null})
+ state = hsNext
+ } else {
+ err = errors.Errorf("Invalid NULL value: 'N%s'", string(nulBuf))
+ }
+ case hsNext:
+ if r == ',' {
+ r, end = p.Consume()
+ switch {
+ case end:
+ err = errors.New("Found EOS after ',', expcting space")
+ case (unicode.IsSpace(r)):
+ r, end = p.Consume()
+ state = hsKey
+ default:
+ err = errors.Errorf("Invalid character '%c' after ', ', expecting \"", r)
+ }
+ } else {
+ err = errors.Errorf("Invalid character '%c' after value, expecting ','", r)
+ }
+ }
+
+ if err != nil {
+ return
+ }
+ r, end = p.Consume()
+ }
+ if state != hsNext {
+ err = errors.New("Improperly formatted hstore")
+ return
+ }
+ k = keys
+ v = values
+ return
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Hstore) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Hstore{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Hstore) Value() (driver.Value, error) {
+ return EncodeValueText(src)
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/hstore_array.go b/vendor/github.com/jackc/pgx/pgtype/hstore_array.go
new file mode 100644
index 0000000..80530c2
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/hstore_array.go
@@ -0,0 +1,300 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type HstoreArray struct {
+ Elements []Hstore
+ Dimensions []ArrayDimension
+ Status Status
+}
+
+func (dst *HstoreArray) Set(src interface{}) error {
+ // untyped nil and typed nil interfaces are different
+ if src == nil {
+ *dst = HstoreArray{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+
+ case []map[string]string:
+ if value == nil {
+ *dst = HstoreArray{Status: Null}
+ } else if len(value) == 0 {
+ *dst = HstoreArray{Status: Present}
+ } else {
+ elements := make([]Hstore, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = HstoreArray{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ default:
+ if originalSrc, ok := underlyingSliceType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to HstoreArray", value)
+ }
+
+ return nil
+}
+
+func (dst *HstoreArray) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *HstoreArray) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+
+ case *[]map[string]string:
+ *v = make([]map[string]string, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *HstoreArray) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = HstoreArray{Status: Null}
+ return nil
+ }
+
+ uta, err := ParseUntypedTextArray(string(src))
+ if err != nil {
+ return err
+ }
+
+ var elements []Hstore
+
+ if len(uta.Elements) > 0 {
+ elements = make([]Hstore, len(uta.Elements))
+
+ for i, s := range uta.Elements {
+ var elem Hstore
+ var elemSrc []byte
+ if s != "NULL" {
+ elemSrc = []byte(s)
+ }
+ err = elem.DecodeText(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+
+ elements[i] = elem
+ }
+ }
+
+ *dst = HstoreArray{Elements: elements, Dimensions: uta.Dimensions, Status: Present}
+
+ return nil
+}
+
+func (dst *HstoreArray) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = HstoreArray{Status: Null}
+ return nil
+ }
+
+ var arrayHeader ArrayHeader
+ rp, err := arrayHeader.DecodeBinary(ci, src)
+ if err != nil {
+ return err
+ }
+
+ if len(arrayHeader.Dimensions) == 0 {
+ *dst = HstoreArray{Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+ }
+
+ elementCount := arrayHeader.Dimensions[0].Length
+ for _, d := range arrayHeader.Dimensions[1:] {
+ elementCount *= d.Length
+ }
+
+ elements := make([]Hstore, elementCount)
+
+ for i := range elements {
+ elemLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+ var elemSrc []byte
+ if elemLen >= 0 {
+ elemSrc = src[rp : rp+elemLen]
+ rp += elemLen
+ }
+ err = elements[i].DecodeBinary(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+ }
+
+ *dst = HstoreArray{Elements: elements, Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+}
+
+func (src *HstoreArray) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ if len(src.Dimensions) == 0 {
+ return append(buf, '{', '}'), nil
+ }
+
+ buf = EncodeTextArrayDimensions(buf, src.Dimensions)
+
+ // dimElemCounts is the multiples of elements that each array lies on. For
+ // example, a single dimension array of length 4 would have a dimElemCounts of
+ // [4]. A multi-dimensional array of lengths [3,5,2] would have a
+ // dimElemCounts of [30,10,2]. This is used to simplify when to render a '{'
+ // or '}'.
+ dimElemCounts := make([]int, len(src.Dimensions))
+ dimElemCounts[len(src.Dimensions)-1] = int(src.Dimensions[len(src.Dimensions)-1].Length)
+ for i := len(src.Dimensions) - 2; i > -1; i-- {
+ dimElemCounts[i] = int(src.Dimensions[i].Length) * dimElemCounts[i+1]
+ }
+
+ inElemBuf := make([]byte, 0, 32)
+ for i, elem := range src.Elements {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+
+ for _, dec := range dimElemCounts {
+ if i%dec == 0 {
+ buf = append(buf, '{')
+ }
+ }
+
+ elemBuf, err := elem.EncodeText(ci, inElemBuf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf == nil {
+ buf = append(buf, `NULL`...)
+ } else {
+ buf = append(buf, QuoteArrayElementIfNeeded(string(elemBuf))...)
+ }
+
+ for _, dec := range dimElemCounts {
+ if (i+1)%dec == 0 {
+ buf = append(buf, '}')
+ }
+ }
+ }
+
+ return buf, nil
+}
+
+func (src *HstoreArray) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ arrayHeader := ArrayHeader{
+ Dimensions: src.Dimensions,
+ }
+
+ if dt, ok := ci.DataTypeForName("hstore"); ok {
+ arrayHeader.ElementOID = int32(dt.OID)
+ } else {
+ return nil, errors.Errorf("unable to find oid for type name %v", "hstore")
+ }
+
+ for i := range src.Elements {
+ if src.Elements[i].Status == Null {
+ arrayHeader.ContainsNull = true
+ break
+ }
+ }
+
+ buf = arrayHeader.EncodeBinary(ci, buf)
+
+ for i := range src.Elements {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ elemBuf, err := src.Elements[i].EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf != nil {
+ buf = elemBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *HstoreArray) Scan(src interface{}) error {
+ if src == nil {
+ return dst.DecodeText(nil, nil)
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *HstoreArray) Value() (driver.Value, error) {
+ buf, err := src.EncodeText(nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, nil
+ }
+
+ return string(buf), nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/inet.go b/vendor/github.com/jackc/pgx/pgtype/inet.go
new file mode 100644
index 0000000..01fc0e5
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/inet.go
@@ -0,0 +1,215 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "net"
+
+ "github.com/pkg/errors"
+)
+
+// Network address family is dependent on server socket.h value for AF_INET.
+// In practice, all platforms appear to have the same value. See
+// src/include/utils/inet.h for more information.
+const (
+ defaultAFInet = 2
+ defaultAFInet6 = 3
+)
+
+// Inet represents both inet and cidr PostgreSQL types.
+type Inet struct {
+ IPNet *net.IPNet
+ Status Status
+}
+
+func (dst *Inet) Set(src interface{}) error {
+ if src == nil {
+ *dst = Inet{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+ case net.IPNet:
+ *dst = Inet{IPNet: &value, Status: Present}
+ case *net.IPNet:
+ *dst = Inet{IPNet: value, Status: Present}
+ case net.IP:
+ bitCount := len(value) * 8
+ mask := net.CIDRMask(bitCount, bitCount)
+ *dst = Inet{IPNet: &net.IPNet{Mask: mask, IP: value}, Status: Present}
+ case string:
+ _, ipnet, err := net.ParseCIDR(value)
+ if err != nil {
+ return err
+ }
+ *dst = Inet{IPNet: ipnet, Status: Present}
+ default:
+ if originalSrc, ok := underlyingPtrType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to Inet", value)
+ }
+
+ return nil
+}
+
+func (dst *Inet) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst.IPNet
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Inet) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+ case *net.IPNet:
+ *v = net.IPNet{
+ IP: make(net.IP, len(src.IPNet.IP)),
+ Mask: make(net.IPMask, len(src.IPNet.Mask)),
+ }
+ copy(v.IP, src.IPNet.IP)
+ copy(v.Mask, src.IPNet.Mask)
+ return nil
+ case *net.IP:
+ if oneCount, bitCount := src.IPNet.Mask.Size(); oneCount != bitCount {
+ return errors.Errorf("cannot assign %v to %T", src, dst)
+ }
+ *v = make(net.IP, len(src.IPNet.IP))
+ copy(*v, src.IPNet.IP)
+ return nil
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *Inet) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Inet{Status: Null}
+ return nil
+ }
+
+ var ipnet *net.IPNet
+ var err error
+
+ if ip := net.ParseIP(string(src)); ip != nil {
+ ipv4 := ip.To4()
+ if ipv4 != nil {
+ ip = ipv4
+ }
+ bitCount := len(ip) * 8
+ mask := net.CIDRMask(bitCount, bitCount)
+ ipnet = &net.IPNet{Mask: mask, IP: ip}
+ } else {
+ _, ipnet, err = net.ParseCIDR(string(src))
+ if err != nil {
+ return err
+ }
+ }
+
+ *dst = Inet{IPNet: ipnet, Status: Present}
+ return nil
+}
+
+func (dst *Inet) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Inet{Status: Null}
+ return nil
+ }
+
+ if len(src) != 8 && len(src) != 20 {
+ return errors.Errorf("Received an invalid size for a inet: %d", len(src))
+ }
+
+ // ignore family
+ bits := src[1]
+ // ignore is_cidr
+ addressLength := src[3]
+
+ var ipnet net.IPNet
+ ipnet.IP = make(net.IP, int(addressLength))
+ copy(ipnet.IP, src[4:])
+ ipnet.Mask = net.CIDRMask(int(bits), int(addressLength)*8)
+
+ *dst = Inet{IPNet: &ipnet, Status: Present}
+
+ return nil
+}
+
+func (src *Inet) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ return append(buf, src.IPNet.String()...), nil
+}
+
+// EncodeBinary encodes src into w.
+func (src *Inet) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ var family byte
+ switch len(src.IPNet.IP) {
+ case net.IPv4len:
+ family = defaultAFInet
+ case net.IPv6len:
+ family = defaultAFInet6
+ default:
+ return nil, errors.Errorf("Unexpected IP length: %v", len(src.IPNet.IP))
+ }
+
+ buf = append(buf, family)
+
+ ones, _ := src.IPNet.Mask.Size()
+ buf = append(buf, byte(ones))
+
+ // is_cidr is ignored on server
+ buf = append(buf, 0)
+
+ buf = append(buf, byte(len(src.IPNet.IP)))
+
+ return append(buf, src.IPNet.IP...), nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Inet) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Inet{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Inet) Value() (driver.Value, error) {
+ return EncodeValueText(src)
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/inet_array.go b/vendor/github.com/jackc/pgx/pgtype/inet_array.go
new file mode 100644
index 0000000..f3e4efb
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/inet_array.go
@@ -0,0 +1,329 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "net"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type InetArray struct {
+ Elements []Inet
+ Dimensions []ArrayDimension
+ Status Status
+}
+
+func (dst *InetArray) Set(src interface{}) error {
+ // untyped nil and typed nil interfaces are different
+ if src == nil {
+ *dst = InetArray{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+
+ case []*net.IPNet:
+ if value == nil {
+ *dst = InetArray{Status: Null}
+ } else if len(value) == 0 {
+ *dst = InetArray{Status: Present}
+ } else {
+ elements := make([]Inet, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = InetArray{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ case []net.IP:
+ if value == nil {
+ *dst = InetArray{Status: Null}
+ } else if len(value) == 0 {
+ *dst = InetArray{Status: Present}
+ } else {
+ elements := make([]Inet, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = InetArray{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ default:
+ if originalSrc, ok := underlyingSliceType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to InetArray", value)
+ }
+
+ return nil
+}
+
+func (dst *InetArray) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *InetArray) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+
+ case *[]*net.IPNet:
+ *v = make([]*net.IPNet, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ case *[]net.IP:
+ *v = make([]net.IP, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *InetArray) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = InetArray{Status: Null}
+ return nil
+ }
+
+ uta, err := ParseUntypedTextArray(string(src))
+ if err != nil {
+ return err
+ }
+
+ var elements []Inet
+
+ if len(uta.Elements) > 0 {
+ elements = make([]Inet, len(uta.Elements))
+
+ for i, s := range uta.Elements {
+ var elem Inet
+ var elemSrc []byte
+ if s != "NULL" {
+ elemSrc = []byte(s)
+ }
+ err = elem.DecodeText(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+
+ elements[i] = elem
+ }
+ }
+
+ *dst = InetArray{Elements: elements, Dimensions: uta.Dimensions, Status: Present}
+
+ return nil
+}
+
+func (dst *InetArray) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = InetArray{Status: Null}
+ return nil
+ }
+
+ var arrayHeader ArrayHeader
+ rp, err := arrayHeader.DecodeBinary(ci, src)
+ if err != nil {
+ return err
+ }
+
+ if len(arrayHeader.Dimensions) == 0 {
+ *dst = InetArray{Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+ }
+
+ elementCount := arrayHeader.Dimensions[0].Length
+ for _, d := range arrayHeader.Dimensions[1:] {
+ elementCount *= d.Length
+ }
+
+ elements := make([]Inet, elementCount)
+
+ for i := range elements {
+ elemLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+ var elemSrc []byte
+ if elemLen >= 0 {
+ elemSrc = src[rp : rp+elemLen]
+ rp += elemLen
+ }
+ err = elements[i].DecodeBinary(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+ }
+
+ *dst = InetArray{Elements: elements, Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+}
+
+func (src *InetArray) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ if len(src.Dimensions) == 0 {
+ return append(buf, '{', '}'), nil
+ }
+
+ buf = EncodeTextArrayDimensions(buf, src.Dimensions)
+
+ // dimElemCounts is the multiples of elements that each array lies on. For
+ // example, a single dimension array of length 4 would have a dimElemCounts of
+ // [4]. A multi-dimensional array of lengths [3,5,2] would have a
+ // dimElemCounts of [30,10,2]. This is used to simplify when to render a '{'
+ // or '}'.
+ dimElemCounts := make([]int, len(src.Dimensions))
+ dimElemCounts[len(src.Dimensions)-1] = int(src.Dimensions[len(src.Dimensions)-1].Length)
+ for i := len(src.Dimensions) - 2; i > -1; i-- {
+ dimElemCounts[i] = int(src.Dimensions[i].Length) * dimElemCounts[i+1]
+ }
+
+ inElemBuf := make([]byte, 0, 32)
+ for i, elem := range src.Elements {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+
+ for _, dec := range dimElemCounts {
+ if i%dec == 0 {
+ buf = append(buf, '{')
+ }
+ }
+
+ elemBuf, err := elem.EncodeText(ci, inElemBuf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf == nil {
+ buf = append(buf, `NULL`...)
+ } else {
+ buf = append(buf, QuoteArrayElementIfNeeded(string(elemBuf))...)
+ }
+
+ for _, dec := range dimElemCounts {
+ if (i+1)%dec == 0 {
+ buf = append(buf, '}')
+ }
+ }
+ }
+
+ return buf, nil
+}
+
+func (src *InetArray) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ arrayHeader := ArrayHeader{
+ Dimensions: src.Dimensions,
+ }
+
+ if dt, ok := ci.DataTypeForName("inet"); ok {
+ arrayHeader.ElementOID = int32(dt.OID)
+ } else {
+ return nil, errors.Errorf("unable to find oid for type name %v", "inet")
+ }
+
+ for i := range src.Elements {
+ if src.Elements[i].Status == Null {
+ arrayHeader.ContainsNull = true
+ break
+ }
+ }
+
+ buf = arrayHeader.EncodeBinary(ci, buf)
+
+ for i := range src.Elements {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ elemBuf, err := src.Elements[i].EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf != nil {
+ buf = elemBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *InetArray) Scan(src interface{}) error {
+ if src == nil {
+ return dst.DecodeText(nil, nil)
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *InetArray) Value() (driver.Value, error) {
+ buf, err := src.EncodeText(nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, nil
+ }
+
+ return string(buf), nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/int2.go b/vendor/github.com/jackc/pgx/pgtype/int2.go
new file mode 100644
index 0000000..6156ea7
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/int2.go
@@ -0,0 +1,209 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "math"
+ "strconv"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type Int2 struct {
+ Int int16
+ Status Status
+}
+
+func (dst *Int2) Set(src interface{}) error {
+ if src == nil {
+ *dst = Int2{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+ case int8:
+ *dst = Int2{Int: int16(value), Status: Present}
+ case uint8:
+ *dst = Int2{Int: int16(value), Status: Present}
+ case int16:
+ *dst = Int2{Int: int16(value), Status: Present}
+ case uint16:
+ if value > math.MaxInt16 {
+ return errors.Errorf("%d is greater than maximum value for Int2", value)
+ }
+ *dst = Int2{Int: int16(value), Status: Present}
+ case int32:
+ if value < math.MinInt16 {
+ return errors.Errorf("%d is greater than maximum value for Int2", value)
+ }
+ if value > math.MaxInt16 {
+ return errors.Errorf("%d is greater than maximum value for Int2", value)
+ }
+ *dst = Int2{Int: int16(value), Status: Present}
+ case uint32:
+ if value > math.MaxInt16 {
+ return errors.Errorf("%d is greater than maximum value for Int2", value)
+ }
+ *dst = Int2{Int: int16(value), Status: Present}
+ case int64:
+ if value < math.MinInt16 {
+ return errors.Errorf("%d is greater than maximum value for Int2", value)
+ }
+ if value > math.MaxInt16 {
+ return errors.Errorf("%d is greater than maximum value for Int2", value)
+ }
+ *dst = Int2{Int: int16(value), Status: Present}
+ case uint64:
+ if value > math.MaxInt16 {
+ return errors.Errorf("%d is greater than maximum value for Int2", value)
+ }
+ *dst = Int2{Int: int16(value), Status: Present}
+ case int:
+ if value < math.MinInt16 {
+ return errors.Errorf("%d is greater than maximum value for Int2", value)
+ }
+ if value > math.MaxInt16 {
+ return errors.Errorf("%d is greater than maximum value for Int2", value)
+ }
+ *dst = Int2{Int: int16(value), Status: Present}
+ case uint:
+ if value > math.MaxInt16 {
+ return errors.Errorf("%d is greater than maximum value for Int2", value)
+ }
+ *dst = Int2{Int: int16(value), Status: Present}
+ case string:
+ num, err := strconv.ParseInt(value, 10, 16)
+ if err != nil {
+ return err
+ }
+ *dst = Int2{Int: int16(num), Status: Present}
+ default:
+ if originalSrc, ok := underlyingNumberType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to Int2", value)
+ }
+
+ return nil
+}
+
+func (dst *Int2) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst.Int
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Int2) AssignTo(dst interface{}) error {
+ return int64AssignTo(int64(src.Int), src.Status, dst)
+}
+
+func (dst *Int2) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Int2{Status: Null}
+ return nil
+ }
+
+ n, err := strconv.ParseInt(string(src), 10, 16)
+ if err != nil {
+ return err
+ }
+
+ *dst = Int2{Int: int16(n), Status: Present}
+ return nil
+}
+
+func (dst *Int2) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Int2{Status: Null}
+ return nil
+ }
+
+ if len(src) != 2 {
+ return errors.Errorf("invalid length for int2: %v", len(src))
+ }
+
+ n := int16(binary.BigEndian.Uint16(src))
+ *dst = Int2{Int: n, Status: Present}
+ return nil
+}
+
+func (src *Int2) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ return append(buf, strconv.FormatInt(int64(src.Int), 10)...), nil
+}
+
+func (src *Int2) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ return pgio.AppendInt16(buf, src.Int), nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Int2) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Int2{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case int64:
+ if src < math.MinInt16 {
+ return errors.Errorf("%d is greater than maximum value for Int2", src)
+ }
+ if src > math.MaxInt16 {
+ return errors.Errorf("%d is greater than maximum value for Int2", src)
+ }
+ *dst = Int2{Int: int16(src), Status: Present}
+ return nil
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Int2) Value() (driver.Value, error) {
+ switch src.Status {
+ case Present:
+ return int64(src.Int), nil
+ case Null:
+ return nil, nil
+ default:
+ return nil, errUndefined
+ }
+}
+
+func (src *Int2) MarshalJSON() ([]byte, error) {
+ switch src.Status {
+ case Present:
+ return []byte(strconv.FormatInt(int64(src.Int), 10)), nil
+ case Null:
+ return []byte("null"), nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ return nil, errBadStatus
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/int2_array.go b/vendor/github.com/jackc/pgx/pgtype/int2_array.go
new file mode 100644
index 0000000..f50d927
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/int2_array.go
@@ -0,0 +1,328 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type Int2Array struct {
+ Elements []Int2
+ Dimensions []ArrayDimension
+ Status Status
+}
+
+func (dst *Int2Array) Set(src interface{}) error {
+ // untyped nil and typed nil interfaces are different
+ if src == nil {
+ *dst = Int2Array{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+
+ case []int16:
+ if value == nil {
+ *dst = Int2Array{Status: Null}
+ } else if len(value) == 0 {
+ *dst = Int2Array{Status: Present}
+ } else {
+ elements := make([]Int2, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = Int2Array{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ case []uint16:
+ if value == nil {
+ *dst = Int2Array{Status: Null}
+ } else if len(value) == 0 {
+ *dst = Int2Array{Status: Present}
+ } else {
+ elements := make([]Int2, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = Int2Array{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ default:
+ if originalSrc, ok := underlyingSliceType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to Int2Array", value)
+ }
+
+ return nil
+}
+
+func (dst *Int2Array) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Int2Array) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+
+ case *[]int16:
+ *v = make([]int16, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ case *[]uint16:
+ *v = make([]uint16, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *Int2Array) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Int2Array{Status: Null}
+ return nil
+ }
+
+ uta, err := ParseUntypedTextArray(string(src))
+ if err != nil {
+ return err
+ }
+
+ var elements []Int2
+
+ if len(uta.Elements) > 0 {
+ elements = make([]Int2, len(uta.Elements))
+
+ for i, s := range uta.Elements {
+ var elem Int2
+ var elemSrc []byte
+ if s != "NULL" {
+ elemSrc = []byte(s)
+ }
+ err = elem.DecodeText(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+
+ elements[i] = elem
+ }
+ }
+
+ *dst = Int2Array{Elements: elements, Dimensions: uta.Dimensions, Status: Present}
+
+ return nil
+}
+
+func (dst *Int2Array) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Int2Array{Status: Null}
+ return nil
+ }
+
+ var arrayHeader ArrayHeader
+ rp, err := arrayHeader.DecodeBinary(ci, src)
+ if err != nil {
+ return err
+ }
+
+ if len(arrayHeader.Dimensions) == 0 {
+ *dst = Int2Array{Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+ }
+
+ elementCount := arrayHeader.Dimensions[0].Length
+ for _, d := range arrayHeader.Dimensions[1:] {
+ elementCount *= d.Length
+ }
+
+ elements := make([]Int2, elementCount)
+
+ for i := range elements {
+ elemLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+ var elemSrc []byte
+ if elemLen >= 0 {
+ elemSrc = src[rp : rp+elemLen]
+ rp += elemLen
+ }
+ err = elements[i].DecodeBinary(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+ }
+
+ *dst = Int2Array{Elements: elements, Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+}
+
+func (src *Int2Array) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ if len(src.Dimensions) == 0 {
+ return append(buf, '{', '}'), nil
+ }
+
+ buf = EncodeTextArrayDimensions(buf, src.Dimensions)
+
+ // dimElemCounts is the multiples of elements that each array lies on. For
+ // example, a single dimension array of length 4 would have a dimElemCounts of
+ // [4]. A multi-dimensional array of lengths [3,5,2] would have a
+ // dimElemCounts of [30,10,2]. This is used to simplify when to render a '{'
+ // or '}'.
+ dimElemCounts := make([]int, len(src.Dimensions))
+ dimElemCounts[len(src.Dimensions)-1] = int(src.Dimensions[len(src.Dimensions)-1].Length)
+ for i := len(src.Dimensions) - 2; i > -1; i-- {
+ dimElemCounts[i] = int(src.Dimensions[i].Length) * dimElemCounts[i+1]
+ }
+
+ inElemBuf := make([]byte, 0, 32)
+ for i, elem := range src.Elements {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+
+ for _, dec := range dimElemCounts {
+ if i%dec == 0 {
+ buf = append(buf, '{')
+ }
+ }
+
+ elemBuf, err := elem.EncodeText(ci, inElemBuf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf == nil {
+ buf = append(buf, `NULL`...)
+ } else {
+ buf = append(buf, QuoteArrayElementIfNeeded(string(elemBuf))...)
+ }
+
+ for _, dec := range dimElemCounts {
+ if (i+1)%dec == 0 {
+ buf = append(buf, '}')
+ }
+ }
+ }
+
+ return buf, nil
+}
+
+func (src *Int2Array) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ arrayHeader := ArrayHeader{
+ Dimensions: src.Dimensions,
+ }
+
+ if dt, ok := ci.DataTypeForName("int2"); ok {
+ arrayHeader.ElementOID = int32(dt.OID)
+ } else {
+ return nil, errors.Errorf("unable to find oid for type name %v", "int2")
+ }
+
+ for i := range src.Elements {
+ if src.Elements[i].Status == Null {
+ arrayHeader.ContainsNull = true
+ break
+ }
+ }
+
+ buf = arrayHeader.EncodeBinary(ci, buf)
+
+ for i := range src.Elements {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ elemBuf, err := src.Elements[i].EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf != nil {
+ buf = elemBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Int2Array) Scan(src interface{}) error {
+ if src == nil {
+ return dst.DecodeText(nil, nil)
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Int2Array) Value() (driver.Value, error) {
+ buf, err := src.EncodeText(nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, nil
+ }
+
+ return string(buf), nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/int4.go b/vendor/github.com/jackc/pgx/pgtype/int4.go
new file mode 100644
index 0000000..261c511
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/int4.go
@@ -0,0 +1,213 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "encoding/json"
+ "math"
+ "strconv"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type Int4 struct {
+ Int int32
+ Status Status
+}
+
+func (dst *Int4) Set(src interface{}) error {
+ if src == nil {
+ *dst = Int4{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+ case int8:
+ *dst = Int4{Int: int32(value), Status: Present}
+ case uint8:
+ *dst = Int4{Int: int32(value), Status: Present}
+ case int16:
+ *dst = Int4{Int: int32(value), Status: Present}
+ case uint16:
+ *dst = Int4{Int: int32(value), Status: Present}
+ case int32:
+ *dst = Int4{Int: int32(value), Status: Present}
+ case uint32:
+ if value > math.MaxInt32 {
+ return errors.Errorf("%d is greater than maximum value for Int4", value)
+ }
+ *dst = Int4{Int: int32(value), Status: Present}
+ case int64:
+ if value < math.MinInt32 {
+ return errors.Errorf("%d is greater than maximum value for Int4", value)
+ }
+ if value > math.MaxInt32 {
+ return errors.Errorf("%d is greater than maximum value for Int4", value)
+ }
+ *dst = Int4{Int: int32(value), Status: Present}
+ case uint64:
+ if value > math.MaxInt32 {
+ return errors.Errorf("%d is greater than maximum value for Int4", value)
+ }
+ *dst = Int4{Int: int32(value), Status: Present}
+ case int:
+ if value < math.MinInt32 {
+ return errors.Errorf("%d is greater than maximum value for Int4", value)
+ }
+ if value > math.MaxInt32 {
+ return errors.Errorf("%d is greater than maximum value for Int4", value)
+ }
+ *dst = Int4{Int: int32(value), Status: Present}
+ case uint:
+ if value > math.MaxInt32 {
+ return errors.Errorf("%d is greater than maximum value for Int4", value)
+ }
+ *dst = Int4{Int: int32(value), Status: Present}
+ case string:
+ num, err := strconv.ParseInt(value, 10, 32)
+ if err != nil {
+ return err
+ }
+ *dst = Int4{Int: int32(num), Status: Present}
+ default:
+ if originalSrc, ok := underlyingNumberType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to Int4", value)
+ }
+
+ return nil
+}
+
+func (dst *Int4) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst.Int
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Int4) AssignTo(dst interface{}) error {
+ return int64AssignTo(int64(src.Int), src.Status, dst)
+}
+
+func (dst *Int4) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Int4{Status: Null}
+ return nil
+ }
+
+ n, err := strconv.ParseInt(string(src), 10, 32)
+ if err != nil {
+ return err
+ }
+
+ *dst = Int4{Int: int32(n), Status: Present}
+ return nil
+}
+
+func (dst *Int4) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Int4{Status: Null}
+ return nil
+ }
+
+ if len(src) != 4 {
+ return errors.Errorf("invalid length for int4: %v", len(src))
+ }
+
+ n := int32(binary.BigEndian.Uint32(src))
+ *dst = Int4{Int: n, Status: Present}
+ return nil
+}
+
+func (src *Int4) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ return append(buf, strconv.FormatInt(int64(src.Int), 10)...), nil
+}
+
+func (src *Int4) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ return pgio.AppendInt32(buf, src.Int), nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Int4) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Int4{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case int64:
+ if src < math.MinInt32 {
+ return errors.Errorf("%d is greater than maximum value for Int4", src)
+ }
+ if src > math.MaxInt32 {
+ return errors.Errorf("%d is greater than maximum value for Int4", src)
+ }
+ *dst = Int4{Int: int32(src), Status: Present}
+ return nil
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Int4) Value() (driver.Value, error) {
+ switch src.Status {
+ case Present:
+ return int64(src.Int), nil
+ case Null:
+ return nil, nil
+ default:
+ return nil, errUndefined
+ }
+}
+
+func (src *Int4) MarshalJSON() ([]byte, error) {
+ switch src.Status {
+ case Present:
+ return []byte(strconv.FormatInt(int64(src.Int), 10)), nil
+ case Null:
+ return []byte("null"), nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ return nil, errBadStatus
+}
+
+func (dst *Int4) UnmarshalJSON(b []byte) error {
+ var n int32
+ err := json.Unmarshal(b, &n)
+ if err != nil {
+ return err
+ }
+
+ *dst = Int4{Int: n, Status: Present}
+
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/int4_array.go b/vendor/github.com/jackc/pgx/pgtype/int4_array.go
new file mode 100644
index 0000000..6c9418b
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/int4_array.go
@@ -0,0 +1,328 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type Int4Array struct {
+ Elements []Int4
+ Dimensions []ArrayDimension
+ Status Status
+}
+
+func (dst *Int4Array) Set(src interface{}) error {
+ // untyped nil and typed nil interfaces are different
+ if src == nil {
+ *dst = Int4Array{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+
+ case []int32:
+ if value == nil {
+ *dst = Int4Array{Status: Null}
+ } else if len(value) == 0 {
+ *dst = Int4Array{Status: Present}
+ } else {
+ elements := make([]Int4, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = Int4Array{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ case []uint32:
+ if value == nil {
+ *dst = Int4Array{Status: Null}
+ } else if len(value) == 0 {
+ *dst = Int4Array{Status: Present}
+ } else {
+ elements := make([]Int4, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = Int4Array{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ default:
+ if originalSrc, ok := underlyingSliceType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to Int4Array", value)
+ }
+
+ return nil
+}
+
+func (dst *Int4Array) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Int4Array) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+
+ case *[]int32:
+ *v = make([]int32, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ case *[]uint32:
+ *v = make([]uint32, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *Int4Array) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Int4Array{Status: Null}
+ return nil
+ }
+
+ uta, err := ParseUntypedTextArray(string(src))
+ if err != nil {
+ return err
+ }
+
+ var elements []Int4
+
+ if len(uta.Elements) > 0 {
+ elements = make([]Int4, len(uta.Elements))
+
+ for i, s := range uta.Elements {
+ var elem Int4
+ var elemSrc []byte
+ if s != "NULL" {
+ elemSrc = []byte(s)
+ }
+ err = elem.DecodeText(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+
+ elements[i] = elem
+ }
+ }
+
+ *dst = Int4Array{Elements: elements, Dimensions: uta.Dimensions, Status: Present}
+
+ return nil
+}
+
+func (dst *Int4Array) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Int4Array{Status: Null}
+ return nil
+ }
+
+ var arrayHeader ArrayHeader
+ rp, err := arrayHeader.DecodeBinary(ci, src)
+ if err != nil {
+ return err
+ }
+
+ if len(arrayHeader.Dimensions) == 0 {
+ *dst = Int4Array{Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+ }
+
+ elementCount := arrayHeader.Dimensions[0].Length
+ for _, d := range arrayHeader.Dimensions[1:] {
+ elementCount *= d.Length
+ }
+
+ elements := make([]Int4, elementCount)
+
+ for i := range elements {
+ elemLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+ var elemSrc []byte
+ if elemLen >= 0 {
+ elemSrc = src[rp : rp+elemLen]
+ rp += elemLen
+ }
+ err = elements[i].DecodeBinary(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+ }
+
+ *dst = Int4Array{Elements: elements, Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+}
+
+func (src *Int4Array) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ if len(src.Dimensions) == 0 {
+ return append(buf, '{', '}'), nil
+ }
+
+ buf = EncodeTextArrayDimensions(buf, src.Dimensions)
+
+ // dimElemCounts is the multiples of elements that each array lies on. For
+ // example, a single dimension array of length 4 would have a dimElemCounts of
+ // [4]. A multi-dimensional array of lengths [3,5,2] would have a
+ // dimElemCounts of [30,10,2]. This is used to simplify when to render a '{'
+ // or '}'.
+ dimElemCounts := make([]int, len(src.Dimensions))
+ dimElemCounts[len(src.Dimensions)-1] = int(src.Dimensions[len(src.Dimensions)-1].Length)
+ for i := len(src.Dimensions) - 2; i > -1; i-- {
+ dimElemCounts[i] = int(src.Dimensions[i].Length) * dimElemCounts[i+1]
+ }
+
+ inElemBuf := make([]byte, 0, 32)
+ for i, elem := range src.Elements {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+
+ for _, dec := range dimElemCounts {
+ if i%dec == 0 {
+ buf = append(buf, '{')
+ }
+ }
+
+ elemBuf, err := elem.EncodeText(ci, inElemBuf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf == nil {
+ buf = append(buf, `NULL`...)
+ } else {
+ buf = append(buf, QuoteArrayElementIfNeeded(string(elemBuf))...)
+ }
+
+ for _, dec := range dimElemCounts {
+ if (i+1)%dec == 0 {
+ buf = append(buf, '}')
+ }
+ }
+ }
+
+ return buf, nil
+}
+
+func (src *Int4Array) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ arrayHeader := ArrayHeader{
+ Dimensions: src.Dimensions,
+ }
+
+ if dt, ok := ci.DataTypeForName("int4"); ok {
+ arrayHeader.ElementOID = int32(dt.OID)
+ } else {
+ return nil, errors.Errorf("unable to find oid for type name %v", "int4")
+ }
+
+ for i := range src.Elements {
+ if src.Elements[i].Status == Null {
+ arrayHeader.ContainsNull = true
+ break
+ }
+ }
+
+ buf = arrayHeader.EncodeBinary(ci, buf)
+
+ for i := range src.Elements {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ elemBuf, err := src.Elements[i].EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf != nil {
+ buf = elemBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Int4Array) Scan(src interface{}) error {
+ if src == nil {
+ return dst.DecodeText(nil, nil)
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Int4Array) Value() (driver.Value, error) {
+ buf, err := src.EncodeText(nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, nil
+ }
+
+ return string(buf), nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/int4range.go b/vendor/github.com/jackc/pgx/pgtype/int4range.go
new file mode 100644
index 0000000..95ad152
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/int4range.go
@@ -0,0 +1,250 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type Int4range struct {
+ Lower Int4
+ Upper Int4
+ LowerType BoundType
+ UpperType BoundType
+ Status Status
+}
+
+func (dst *Int4range) Set(src interface{}) error {
+ return errors.Errorf("cannot convert %v to Int4range", src)
+}
+
+func (dst *Int4range) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Int4range) AssignTo(dst interface{}) error {
+ return errors.Errorf("cannot assign %v to %T", src, dst)
+}
+
+func (dst *Int4range) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Int4range{Status: Null}
+ return nil
+ }
+
+ utr, err := ParseUntypedTextRange(string(src))
+ if err != nil {
+ return err
+ }
+
+ *dst = Int4range{Status: Present}
+
+ dst.LowerType = utr.LowerType
+ dst.UpperType = utr.UpperType
+
+ if dst.LowerType == Empty {
+ return nil
+ }
+
+ if dst.LowerType == Inclusive || dst.LowerType == Exclusive {
+ if err := dst.Lower.DecodeText(ci, []byte(utr.Lower)); err != nil {
+ return err
+ }
+ }
+
+ if dst.UpperType == Inclusive || dst.UpperType == Exclusive {
+ if err := dst.Upper.DecodeText(ci, []byte(utr.Upper)); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (dst *Int4range) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Int4range{Status: Null}
+ return nil
+ }
+
+ ubr, err := ParseUntypedBinaryRange(src)
+ if err != nil {
+ return err
+ }
+
+ *dst = Int4range{Status: Present}
+
+ dst.LowerType = ubr.LowerType
+ dst.UpperType = ubr.UpperType
+
+ if dst.LowerType == Empty {
+ return nil
+ }
+
+ if dst.LowerType == Inclusive || dst.LowerType == Exclusive {
+ if err := dst.Lower.DecodeBinary(ci, ubr.Lower); err != nil {
+ return err
+ }
+ }
+
+ if dst.UpperType == Inclusive || dst.UpperType == Exclusive {
+ if err := dst.Upper.DecodeBinary(ci, ubr.Upper); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (src Int4range) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ switch src.LowerType {
+ case Exclusive, Unbounded:
+ buf = append(buf, '(')
+ case Inclusive:
+ buf = append(buf, '[')
+ case Empty:
+ return append(buf, "empty"...), nil
+ default:
+ return nil, errors.Errorf("unknown lower bound type %v", src.LowerType)
+ }
+
+ var err error
+
+ if src.LowerType != Unbounded {
+ buf, err = src.Lower.EncodeText(ci, buf)
+ if err != nil {
+ return nil, err
+ } else if buf == nil {
+ return nil, errors.Errorf("Lower cannot be null unless LowerType is Unbounded")
+ }
+ }
+
+ buf = append(buf, ',')
+
+ if src.UpperType != Unbounded {
+ buf, err = src.Upper.EncodeText(ci, buf)
+ if err != nil {
+ return nil, err
+ } else if buf == nil {
+ return nil, errors.Errorf("Upper cannot be null unless UpperType is Unbounded")
+ }
+ }
+
+ switch src.UpperType {
+ case Exclusive, Unbounded:
+ buf = append(buf, ')')
+ case Inclusive:
+ buf = append(buf, ']')
+ default:
+ return nil, errors.Errorf("unknown upper bound type %v", src.UpperType)
+ }
+
+ return buf, nil
+}
+
+func (src Int4range) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ var rangeType byte
+ switch src.LowerType {
+ case Inclusive:
+ rangeType |= lowerInclusiveMask
+ case Unbounded:
+ rangeType |= lowerUnboundedMask
+ case Exclusive:
+ case Empty:
+ return append(buf, emptyMask), nil
+ default:
+ return nil, errors.Errorf("unknown LowerType: %v", src.LowerType)
+ }
+
+ switch src.UpperType {
+ case Inclusive:
+ rangeType |= upperInclusiveMask
+ case Unbounded:
+ rangeType |= upperUnboundedMask
+ case Exclusive:
+ default:
+ return nil, errors.Errorf("unknown UpperType: %v", src.UpperType)
+ }
+
+ buf = append(buf, rangeType)
+
+ var err error
+
+ if src.LowerType != Unbounded {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ buf, err = src.Lower.EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, errors.Errorf("Lower cannot be null unless LowerType is Unbounded")
+ }
+
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+
+ if src.UpperType != Unbounded {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ buf, err = src.Upper.EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, errors.Errorf("Upper cannot be null unless UpperType is Unbounded")
+ }
+
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Int4range) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Int4range{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src Int4range) Value() (driver.Value, error) {
+ return EncodeValueText(src)
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/int8.go b/vendor/github.com/jackc/pgx/pgtype/int8.go
new file mode 100644
index 0000000..00a8cd0
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/int8.go
@@ -0,0 +1,199 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "encoding/json"
+ "math"
+ "strconv"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type Int8 struct {
+ Int int64
+ Status Status
+}
+
+func (dst *Int8) Set(src interface{}) error {
+ if src == nil {
+ *dst = Int8{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+ case int8:
+ *dst = Int8{Int: int64(value), Status: Present}
+ case uint8:
+ *dst = Int8{Int: int64(value), Status: Present}
+ case int16:
+ *dst = Int8{Int: int64(value), Status: Present}
+ case uint16:
+ *dst = Int8{Int: int64(value), Status: Present}
+ case int32:
+ *dst = Int8{Int: int64(value), Status: Present}
+ case uint32:
+ *dst = Int8{Int: int64(value), Status: Present}
+ case int64:
+ *dst = Int8{Int: int64(value), Status: Present}
+ case uint64:
+ if value > math.MaxInt64 {
+ return errors.Errorf("%d is greater than maximum value for Int8", value)
+ }
+ *dst = Int8{Int: int64(value), Status: Present}
+ case int:
+ if int64(value) < math.MinInt64 {
+ return errors.Errorf("%d is greater than maximum value for Int8", value)
+ }
+ if int64(value) > math.MaxInt64 {
+ return errors.Errorf("%d is greater than maximum value for Int8", value)
+ }
+ *dst = Int8{Int: int64(value), Status: Present}
+ case uint:
+ if uint64(value) > math.MaxInt64 {
+ return errors.Errorf("%d is greater than maximum value for Int8", value)
+ }
+ *dst = Int8{Int: int64(value), Status: Present}
+ case string:
+ num, err := strconv.ParseInt(value, 10, 64)
+ if err != nil {
+ return err
+ }
+ *dst = Int8{Int: num, Status: Present}
+ default:
+ if originalSrc, ok := underlyingNumberType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to Int8", value)
+ }
+
+ return nil
+}
+
+func (dst *Int8) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst.Int
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Int8) AssignTo(dst interface{}) error {
+ return int64AssignTo(int64(src.Int), src.Status, dst)
+}
+
+func (dst *Int8) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Int8{Status: Null}
+ return nil
+ }
+
+ n, err := strconv.ParseInt(string(src), 10, 64)
+ if err != nil {
+ return err
+ }
+
+ *dst = Int8{Int: n, Status: Present}
+ return nil
+}
+
+func (dst *Int8) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Int8{Status: Null}
+ return nil
+ }
+
+ if len(src) != 8 {
+ return errors.Errorf("invalid length for int8: %v", len(src))
+ }
+
+ n := int64(binary.BigEndian.Uint64(src))
+
+ *dst = Int8{Int: n, Status: Present}
+ return nil
+}
+
+func (src *Int8) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ return append(buf, strconv.FormatInt(src.Int, 10)...), nil
+}
+
+func (src *Int8) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ return pgio.AppendInt64(buf, src.Int), nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Int8) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Int8{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case int64:
+ *dst = Int8{Int: src, Status: Present}
+ return nil
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Int8) Value() (driver.Value, error) {
+ switch src.Status {
+ case Present:
+ return int64(src.Int), nil
+ case Null:
+ return nil, nil
+ default:
+ return nil, errUndefined
+ }
+}
+
+func (src *Int8) MarshalJSON() ([]byte, error) {
+ switch src.Status {
+ case Present:
+ return []byte(strconv.FormatInt(src.Int, 10)), nil
+ case Null:
+ return []byte("null"), nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ return nil, errBadStatus
+}
+
+func (dst *Int8) UnmarshalJSON(b []byte) error {
+ var n int64
+ err := json.Unmarshal(b, &n)
+ if err != nil {
+ return err
+ }
+
+ *dst = Int8{Int: n, Status: Present}
+
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/int8_array.go b/vendor/github.com/jackc/pgx/pgtype/int8_array.go
new file mode 100644
index 0000000..bb6ce00
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/int8_array.go
@@ -0,0 +1,328 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type Int8Array struct {
+ Elements []Int8
+ Dimensions []ArrayDimension
+ Status Status
+}
+
+func (dst *Int8Array) Set(src interface{}) error {
+ // untyped nil and typed nil interfaces are different
+ if src == nil {
+ *dst = Int8Array{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+
+ case []int64:
+ if value == nil {
+ *dst = Int8Array{Status: Null}
+ } else if len(value) == 0 {
+ *dst = Int8Array{Status: Present}
+ } else {
+ elements := make([]Int8, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = Int8Array{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ case []uint64:
+ if value == nil {
+ *dst = Int8Array{Status: Null}
+ } else if len(value) == 0 {
+ *dst = Int8Array{Status: Present}
+ } else {
+ elements := make([]Int8, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = Int8Array{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ default:
+ if originalSrc, ok := underlyingSliceType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to Int8Array", value)
+ }
+
+ return nil
+}
+
+func (dst *Int8Array) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Int8Array) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+
+ case *[]int64:
+ *v = make([]int64, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ case *[]uint64:
+ *v = make([]uint64, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *Int8Array) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Int8Array{Status: Null}
+ return nil
+ }
+
+ uta, err := ParseUntypedTextArray(string(src))
+ if err != nil {
+ return err
+ }
+
+ var elements []Int8
+
+ if len(uta.Elements) > 0 {
+ elements = make([]Int8, len(uta.Elements))
+
+ for i, s := range uta.Elements {
+ var elem Int8
+ var elemSrc []byte
+ if s != "NULL" {
+ elemSrc = []byte(s)
+ }
+ err = elem.DecodeText(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+
+ elements[i] = elem
+ }
+ }
+
+ *dst = Int8Array{Elements: elements, Dimensions: uta.Dimensions, Status: Present}
+
+ return nil
+}
+
+func (dst *Int8Array) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Int8Array{Status: Null}
+ return nil
+ }
+
+ var arrayHeader ArrayHeader
+ rp, err := arrayHeader.DecodeBinary(ci, src)
+ if err != nil {
+ return err
+ }
+
+ if len(arrayHeader.Dimensions) == 0 {
+ *dst = Int8Array{Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+ }
+
+ elementCount := arrayHeader.Dimensions[0].Length
+ for _, d := range arrayHeader.Dimensions[1:] {
+ elementCount *= d.Length
+ }
+
+ elements := make([]Int8, elementCount)
+
+ for i := range elements {
+ elemLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+ var elemSrc []byte
+ if elemLen >= 0 {
+ elemSrc = src[rp : rp+elemLen]
+ rp += elemLen
+ }
+ err = elements[i].DecodeBinary(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+ }
+
+ *dst = Int8Array{Elements: elements, Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+}
+
+func (src *Int8Array) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ if len(src.Dimensions) == 0 {
+ return append(buf, '{', '}'), nil
+ }
+
+ buf = EncodeTextArrayDimensions(buf, src.Dimensions)
+
+ // dimElemCounts is the multiples of elements that each array lies on. For
+ // example, a single dimension array of length 4 would have a dimElemCounts of
+ // [4]. A multi-dimensional array of lengths [3,5,2] would have a
+ // dimElemCounts of [30,10,2]. This is used to simplify when to render a '{'
+ // or '}'.
+ dimElemCounts := make([]int, len(src.Dimensions))
+ dimElemCounts[len(src.Dimensions)-1] = int(src.Dimensions[len(src.Dimensions)-1].Length)
+ for i := len(src.Dimensions) - 2; i > -1; i-- {
+ dimElemCounts[i] = int(src.Dimensions[i].Length) * dimElemCounts[i+1]
+ }
+
+ inElemBuf := make([]byte, 0, 32)
+ for i, elem := range src.Elements {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+
+ for _, dec := range dimElemCounts {
+ if i%dec == 0 {
+ buf = append(buf, '{')
+ }
+ }
+
+ elemBuf, err := elem.EncodeText(ci, inElemBuf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf == nil {
+ buf = append(buf, `NULL`...)
+ } else {
+ buf = append(buf, QuoteArrayElementIfNeeded(string(elemBuf))...)
+ }
+
+ for _, dec := range dimElemCounts {
+ if (i+1)%dec == 0 {
+ buf = append(buf, '}')
+ }
+ }
+ }
+
+ return buf, nil
+}
+
+func (src *Int8Array) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ arrayHeader := ArrayHeader{
+ Dimensions: src.Dimensions,
+ }
+
+ if dt, ok := ci.DataTypeForName("int8"); ok {
+ arrayHeader.ElementOID = int32(dt.OID)
+ } else {
+ return nil, errors.Errorf("unable to find oid for type name %v", "int8")
+ }
+
+ for i := range src.Elements {
+ if src.Elements[i].Status == Null {
+ arrayHeader.ContainsNull = true
+ break
+ }
+ }
+
+ buf = arrayHeader.EncodeBinary(ci, buf)
+
+ for i := range src.Elements {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ elemBuf, err := src.Elements[i].EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf != nil {
+ buf = elemBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Int8Array) Scan(src interface{}) error {
+ if src == nil {
+ return dst.DecodeText(nil, nil)
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Int8Array) Value() (driver.Value, error) {
+ buf, err := src.EncodeText(nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, nil
+ }
+
+ return string(buf), nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/int8range.go b/vendor/github.com/jackc/pgx/pgtype/int8range.go
new file mode 100644
index 0000000..61d860d
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/int8range.go
@@ -0,0 +1,250 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type Int8range struct {
+ Lower Int8
+ Upper Int8
+ LowerType BoundType
+ UpperType BoundType
+ Status Status
+}
+
+func (dst *Int8range) Set(src interface{}) error {
+ return errors.Errorf("cannot convert %v to Int8range", src)
+}
+
+func (dst *Int8range) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Int8range) AssignTo(dst interface{}) error {
+ return errors.Errorf("cannot assign %v to %T", src, dst)
+}
+
+func (dst *Int8range) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Int8range{Status: Null}
+ return nil
+ }
+
+ utr, err := ParseUntypedTextRange(string(src))
+ if err != nil {
+ return err
+ }
+
+ *dst = Int8range{Status: Present}
+
+ dst.LowerType = utr.LowerType
+ dst.UpperType = utr.UpperType
+
+ if dst.LowerType == Empty {
+ return nil
+ }
+
+ if dst.LowerType == Inclusive || dst.LowerType == Exclusive {
+ if err := dst.Lower.DecodeText(ci, []byte(utr.Lower)); err != nil {
+ return err
+ }
+ }
+
+ if dst.UpperType == Inclusive || dst.UpperType == Exclusive {
+ if err := dst.Upper.DecodeText(ci, []byte(utr.Upper)); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (dst *Int8range) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Int8range{Status: Null}
+ return nil
+ }
+
+ ubr, err := ParseUntypedBinaryRange(src)
+ if err != nil {
+ return err
+ }
+
+ *dst = Int8range{Status: Present}
+
+ dst.LowerType = ubr.LowerType
+ dst.UpperType = ubr.UpperType
+
+ if dst.LowerType == Empty {
+ return nil
+ }
+
+ if dst.LowerType == Inclusive || dst.LowerType == Exclusive {
+ if err := dst.Lower.DecodeBinary(ci, ubr.Lower); err != nil {
+ return err
+ }
+ }
+
+ if dst.UpperType == Inclusive || dst.UpperType == Exclusive {
+ if err := dst.Upper.DecodeBinary(ci, ubr.Upper); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (src Int8range) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ switch src.LowerType {
+ case Exclusive, Unbounded:
+ buf = append(buf, '(')
+ case Inclusive:
+ buf = append(buf, '[')
+ case Empty:
+ return append(buf, "empty"...), nil
+ default:
+ return nil, errors.Errorf("unknown lower bound type %v", src.LowerType)
+ }
+
+ var err error
+
+ if src.LowerType != Unbounded {
+ buf, err = src.Lower.EncodeText(ci, buf)
+ if err != nil {
+ return nil, err
+ } else if buf == nil {
+ return nil, errors.Errorf("Lower cannot be null unless LowerType is Unbounded")
+ }
+ }
+
+ buf = append(buf, ',')
+
+ if src.UpperType != Unbounded {
+ buf, err = src.Upper.EncodeText(ci, buf)
+ if err != nil {
+ return nil, err
+ } else if buf == nil {
+ return nil, errors.Errorf("Upper cannot be null unless UpperType is Unbounded")
+ }
+ }
+
+ switch src.UpperType {
+ case Exclusive, Unbounded:
+ buf = append(buf, ')')
+ case Inclusive:
+ buf = append(buf, ']')
+ default:
+ return nil, errors.Errorf("unknown upper bound type %v", src.UpperType)
+ }
+
+ return buf, nil
+}
+
+func (src Int8range) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ var rangeType byte
+ switch src.LowerType {
+ case Inclusive:
+ rangeType |= lowerInclusiveMask
+ case Unbounded:
+ rangeType |= lowerUnboundedMask
+ case Exclusive:
+ case Empty:
+ return append(buf, emptyMask), nil
+ default:
+ return nil, errors.Errorf("unknown LowerType: %v", src.LowerType)
+ }
+
+ switch src.UpperType {
+ case Inclusive:
+ rangeType |= upperInclusiveMask
+ case Unbounded:
+ rangeType |= upperUnboundedMask
+ case Exclusive:
+ default:
+ return nil, errors.Errorf("unknown UpperType: %v", src.UpperType)
+ }
+
+ buf = append(buf, rangeType)
+
+ var err error
+
+ if src.LowerType != Unbounded {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ buf, err = src.Lower.EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, errors.Errorf("Lower cannot be null unless LowerType is Unbounded")
+ }
+
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+
+ if src.UpperType != Unbounded {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ buf, err = src.Upper.EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, errors.Errorf("Upper cannot be null unless UpperType is Unbounded")
+ }
+
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Int8range) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Int8range{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src Int8range) Value() (driver.Value, error) {
+ return EncodeValueText(src)
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/interval.go b/vendor/github.com/jackc/pgx/pgtype/interval.go
new file mode 100644
index 0000000..799ce53
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/interval.go
@@ -0,0 +1,250 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+const (
+ microsecondsPerSecond = 1000000
+ microsecondsPerMinute = 60 * microsecondsPerSecond
+ microsecondsPerHour = 60 * microsecondsPerMinute
+)
+
+type Interval struct {
+ Microseconds int64
+ Days int32
+ Months int32
+ Status Status
+}
+
+func (dst *Interval) Set(src interface{}) error {
+ if src == nil {
+ *dst = Interval{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+ case time.Duration:
+ *dst = Interval{Microseconds: int64(value) / 1000, Status: Present}
+ default:
+ if originalSrc, ok := underlyingPtrType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to Interval", value)
+ }
+
+ return nil
+}
+
+func (dst *Interval) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Interval) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+ case *time.Duration:
+ if src.Days > 0 || src.Months > 0 {
+ return errors.Errorf("interval with months or days cannot be decoded into %T", dst)
+ }
+ *v = time.Duration(src.Microseconds) * time.Microsecond
+ return nil
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *Interval) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Interval{Status: Null}
+ return nil
+ }
+
+ var microseconds int64
+ var days int32
+ var months int32
+
+ parts := strings.Split(string(src), " ")
+
+ for i := 0; i < len(parts)-1; i += 2 {
+ scalar, err := strconv.ParseInt(parts[i], 10, 64)
+ if err != nil {
+ return errors.Errorf("bad interval format")
+ }
+
+ switch parts[i+1] {
+ case "year", "years":
+ months += int32(scalar * 12)
+ case "mon", "mons":
+ months += int32(scalar)
+ case "day", "days":
+ days = int32(scalar)
+ }
+ }
+
+ if len(parts)%2 == 1 {
+ timeParts := strings.SplitN(parts[len(parts)-1], ":", 3)
+ if len(timeParts) != 3 {
+ return errors.Errorf("bad interval format")
+ }
+
+ var negative bool
+ if timeParts[0][0] == '-' {
+ negative = true
+ timeParts[0] = timeParts[0][1:]
+ }
+
+ hours, err := strconv.ParseInt(timeParts[0], 10, 64)
+ if err != nil {
+ return errors.Errorf("bad interval hour format: %s", timeParts[0])
+ }
+
+ minutes, err := strconv.ParseInt(timeParts[1], 10, 64)
+ if err != nil {
+ return errors.Errorf("bad interval minute format: %s", timeParts[1])
+ }
+
+ secondParts := strings.SplitN(timeParts[2], ".", 2)
+
+ seconds, err := strconv.ParseInt(secondParts[0], 10, 64)
+ if err != nil {
+ return errors.Errorf("bad interval second format: %s", secondParts[0])
+ }
+
+ var uSeconds int64
+ if len(secondParts) == 2 {
+ uSeconds, err = strconv.ParseInt(secondParts[1], 10, 64)
+ if err != nil {
+ return errors.Errorf("bad interval decimal format: %s", secondParts[1])
+ }
+
+ for i := 0; i < 6-len(secondParts[1]); i++ {
+ uSeconds *= 10
+ }
+ }
+
+ microseconds = hours * microsecondsPerHour
+ microseconds += minutes * microsecondsPerMinute
+ microseconds += seconds * microsecondsPerSecond
+ microseconds += uSeconds
+
+ if negative {
+ microseconds = -microseconds
+ }
+ }
+
+ *dst = Interval{Months: months, Days: days, Microseconds: microseconds, Status: Present}
+ return nil
+}
+
+func (dst *Interval) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Interval{Status: Null}
+ return nil
+ }
+
+ if len(src) != 16 {
+ return errors.Errorf("Received an invalid size for a interval: %d", len(src))
+ }
+
+ microseconds := int64(binary.BigEndian.Uint64(src))
+ days := int32(binary.BigEndian.Uint32(src[8:]))
+ months := int32(binary.BigEndian.Uint32(src[12:]))
+
+ *dst = Interval{Microseconds: microseconds, Days: days, Months: months, Status: Present}
+ return nil
+}
+
+func (src *Interval) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ if src.Months != 0 {
+ buf = append(buf, strconv.FormatInt(int64(src.Months), 10)...)
+ buf = append(buf, " mon "...)
+ }
+
+ if src.Days != 0 {
+ buf = append(buf, strconv.FormatInt(int64(src.Days), 10)...)
+ buf = append(buf, " day "...)
+ }
+
+ absMicroseconds := src.Microseconds
+ if absMicroseconds < 0 {
+ absMicroseconds = -absMicroseconds
+ buf = append(buf, '-')
+ }
+
+ hours := absMicroseconds / microsecondsPerHour
+ minutes := (absMicroseconds % microsecondsPerHour) / microsecondsPerMinute
+ seconds := (absMicroseconds % microsecondsPerMinute) / microsecondsPerSecond
+ microseconds := absMicroseconds % microsecondsPerSecond
+
+ timeStr := fmt.Sprintf("%02d:%02d:%02d.%06d", hours, minutes, seconds, microseconds)
+ return append(buf, timeStr...), nil
+}
+
+// EncodeBinary encodes src into w.
+func (src *Interval) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ buf = pgio.AppendInt64(buf, src.Microseconds)
+ buf = pgio.AppendInt32(buf, src.Days)
+ return pgio.AppendInt32(buf, src.Months), nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Interval) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Interval{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Interval) Value() (driver.Value, error) {
+ return EncodeValueText(src)
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/json.go b/vendor/github.com/jackc/pgx/pgtype/json.go
new file mode 100644
index 0000000..ef8231b
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/json.go
@@ -0,0 +1,161 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/json"
+
+ "github.com/pkg/errors"
+)
+
+type JSON struct {
+ Bytes []byte
+ Status Status
+}
+
+func (dst *JSON) Set(src interface{}) error {
+ if src == nil {
+ *dst = JSON{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+ case string:
+ *dst = JSON{Bytes: []byte(value), Status: Present}
+ case *string:
+ if value == nil {
+ *dst = JSON{Status: Null}
+ } else {
+ *dst = JSON{Bytes: []byte(*value), Status: Present}
+ }
+ case []byte:
+ if value == nil {
+ *dst = JSON{Status: Null}
+ } else {
+ *dst = JSON{Bytes: value, Status: Present}
+ }
+ // Encode* methods are defined on *JSON. If JSON is passed directly then the
+ // struct itself would be encoded instead of Bytes. This is clearly a footgun
+ // so detect and return an error. See https://github.com/jackc/pgx/issues/350.
+ case JSON:
+ return errors.New("use pointer to pgtype.JSON instead of value")
+ // Same as above but for JSONB (because they share implementation)
+ case JSONB:
+ return errors.New("use pointer to pgtype.JSONB instead of value")
+
+ default:
+ buf, err := json.Marshal(value)
+ if err != nil {
+ return err
+ }
+ *dst = JSON{Bytes: buf, Status: Present}
+ }
+
+ return nil
+}
+
+func (dst *JSON) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ var i interface{}
+ err := json.Unmarshal(dst.Bytes, &i)
+ if err != nil {
+ return dst
+ }
+ return i
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *JSON) AssignTo(dst interface{}) error {
+ switch v := dst.(type) {
+ case *string:
+ if src.Status != Present {
+ v = nil
+ } else {
+ *v = string(src.Bytes)
+ }
+ case **string:
+ *v = new(string)
+ return src.AssignTo(*v)
+ case *[]byte:
+ if src.Status != Present {
+ *v = nil
+ } else {
+ buf := make([]byte, len(src.Bytes))
+ copy(buf, src.Bytes)
+ *v = buf
+ }
+ default:
+ data := src.Bytes
+ if data == nil || src.Status != Present {
+ data = []byte("null")
+ }
+
+ return json.Unmarshal(data, dst)
+ }
+
+ return nil
+}
+
+func (dst *JSON) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = JSON{Status: Null}
+ return nil
+ }
+
+ *dst = JSON{Bytes: src, Status: Present}
+ return nil
+}
+
+func (dst *JSON) DecodeBinary(ci *ConnInfo, src []byte) error {
+ return dst.DecodeText(ci, src)
+}
+
+func (src *JSON) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ return append(buf, src.Bytes...), nil
+}
+
+func (src *JSON) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ return src.EncodeText(ci, buf)
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *JSON) Scan(src interface{}) error {
+ if src == nil {
+ *dst = JSON{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *JSON) Value() (driver.Value, error) {
+ switch src.Status {
+ case Present:
+ return string(src.Bytes), nil
+ case Null:
+ return nil, nil
+ default:
+ return nil, errUndefined
+ }
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/jsonb.go b/vendor/github.com/jackc/pgx/pgtype/jsonb.go
new file mode 100644
index 0000000..c315c58
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/jsonb.go
@@ -0,0 +1,70 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+
+ "github.com/pkg/errors"
+)
+
+type JSONB JSON
+
+func (dst *JSONB) Set(src interface{}) error {
+ return (*JSON)(dst).Set(src)
+}
+
+func (dst *JSONB) Get() interface{} {
+ return (*JSON)(dst).Get()
+}
+
+func (src *JSONB) AssignTo(dst interface{}) error {
+ return (*JSON)(src).AssignTo(dst)
+}
+
+func (dst *JSONB) DecodeText(ci *ConnInfo, src []byte) error {
+ return (*JSON)(dst).DecodeText(ci, src)
+}
+
+func (dst *JSONB) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = JSONB{Status: Null}
+ return nil
+ }
+
+ if len(src) == 0 {
+ return errors.Errorf("jsonb too short")
+ }
+
+ if src[0] != 1 {
+ return errors.Errorf("unknown jsonb version number %d", src[0])
+ }
+
+ *dst = JSONB{Bytes: src[1:], Status: Present}
+ return nil
+
+}
+
+func (src *JSONB) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ return (*JSON)(src).EncodeText(ci, buf)
+}
+
+func (src *JSONB) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ buf = append(buf, 1)
+ return append(buf, src.Bytes...), nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *JSONB) Scan(src interface{}) error {
+ return (*JSON)(dst).Scan(src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *JSONB) Value() (driver.Value, error) {
+ return (*JSON)(src).Value()
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/line.go b/vendor/github.com/jackc/pgx/pgtype/line.go
new file mode 100644
index 0000000..f6eadf0
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/line.go
@@ -0,0 +1,143 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type Line struct {
+ A, B, C float64
+ Status Status
+}
+
+func (dst *Line) Set(src interface{}) error {
+ return errors.Errorf("cannot convert %v to Line", src)
+}
+
+func (dst *Line) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Line) AssignTo(dst interface{}) error {
+ return errors.Errorf("cannot assign %v to %T", src, dst)
+}
+
+func (dst *Line) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Line{Status: Null}
+ return nil
+ }
+
+ if len(src) < 7 {
+ return errors.Errorf("invalid length for Line: %v", len(src))
+ }
+
+ parts := strings.SplitN(string(src[1:len(src)-1]), ",", 3)
+ if len(parts) < 3 {
+ return errors.Errorf("invalid format for line")
+ }
+
+ a, err := strconv.ParseFloat(parts[0], 64)
+ if err != nil {
+ return err
+ }
+
+ b, err := strconv.ParseFloat(parts[1], 64)
+ if err != nil {
+ return err
+ }
+
+ c, err := strconv.ParseFloat(parts[2], 64)
+ if err != nil {
+ return err
+ }
+
+ *dst = Line{A: a, B: b, C: c, Status: Present}
+ return nil
+}
+
+func (dst *Line) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Line{Status: Null}
+ return nil
+ }
+
+ if len(src) != 24 {
+ return errors.Errorf("invalid length for Line: %v", len(src))
+ }
+
+ a := binary.BigEndian.Uint64(src)
+ b := binary.BigEndian.Uint64(src[8:])
+ c := binary.BigEndian.Uint64(src[16:])
+
+ *dst = Line{
+ A: math.Float64frombits(a),
+ B: math.Float64frombits(b),
+ C: math.Float64frombits(c),
+ Status: Present,
+ }
+ return nil
+}
+
+func (src *Line) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ return append(buf, fmt.Sprintf(`{%f,%f,%f}`, src.A, src.B, src.C)...), nil
+}
+
+func (src *Line) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ buf = pgio.AppendUint64(buf, math.Float64bits(src.A))
+ buf = pgio.AppendUint64(buf, math.Float64bits(src.B))
+ buf = pgio.AppendUint64(buf, math.Float64bits(src.C))
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Line) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Line{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Line) Value() (driver.Value, error) {
+ return EncodeValueText(src)
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/lseg.go b/vendor/github.com/jackc/pgx/pgtype/lseg.go
new file mode 100644
index 0000000..a9d740c
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/lseg.go
@@ -0,0 +1,161 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type Lseg struct {
+ P [2]Vec2
+ Status Status
+}
+
+func (dst *Lseg) Set(src interface{}) error {
+ return errors.Errorf("cannot convert %v to Lseg", src)
+}
+
+func (dst *Lseg) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Lseg) AssignTo(dst interface{}) error {
+ return errors.Errorf("cannot assign %v to %T", src, dst)
+}
+
+func (dst *Lseg) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Lseg{Status: Null}
+ return nil
+ }
+
+ if len(src) < 11 {
+ return errors.Errorf("invalid length for Lseg: %v", len(src))
+ }
+
+ str := string(src[2:])
+
+ var end int
+ end = strings.IndexByte(str, ',')
+
+ x1, err := strconv.ParseFloat(str[:end], 64)
+ if err != nil {
+ return err
+ }
+
+ str = str[end+1:]
+ end = strings.IndexByte(str, ')')
+
+ y1, err := strconv.ParseFloat(str[:end], 64)
+ if err != nil {
+ return err
+ }
+
+ str = str[end+3:]
+ end = strings.IndexByte(str, ',')
+
+ x2, err := strconv.ParseFloat(str[:end], 64)
+ if err != nil {
+ return err
+ }
+
+ str = str[end+1 : len(str)-2]
+
+ y2, err := strconv.ParseFloat(str, 64)
+ if err != nil {
+ return err
+ }
+
+ *dst = Lseg{P: [2]Vec2{{x1, y1}, {x2, y2}}, Status: Present}
+ return nil
+}
+
+func (dst *Lseg) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Lseg{Status: Null}
+ return nil
+ }
+
+ if len(src) != 32 {
+ return errors.Errorf("invalid length for Lseg: %v", len(src))
+ }
+
+ x1 := binary.BigEndian.Uint64(src)
+ y1 := binary.BigEndian.Uint64(src[8:])
+ x2 := binary.BigEndian.Uint64(src[16:])
+ y2 := binary.BigEndian.Uint64(src[24:])
+
+ *dst = Lseg{
+ P: [2]Vec2{
+ {math.Float64frombits(x1), math.Float64frombits(y1)},
+ {math.Float64frombits(x2), math.Float64frombits(y2)},
+ },
+ Status: Present,
+ }
+ return nil
+}
+
+func (src *Lseg) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ buf = append(buf, fmt.Sprintf(`(%f,%f),(%f,%f)`,
+ src.P[0].X, src.P[0].Y, src.P[1].X, src.P[1].Y)...)
+ return buf, nil
+}
+
+func (src *Lseg) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ buf = pgio.AppendUint64(buf, math.Float64bits(src.P[0].X))
+ buf = pgio.AppendUint64(buf, math.Float64bits(src.P[0].Y))
+ buf = pgio.AppendUint64(buf, math.Float64bits(src.P[1].X))
+ buf = pgio.AppendUint64(buf, math.Float64bits(src.P[1].Y))
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Lseg) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Lseg{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Lseg) Value() (driver.Value, error) {
+ return EncodeValueText(src)
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/macaddr.go b/vendor/github.com/jackc/pgx/pgtype/macaddr.go
new file mode 100644
index 0000000..4c6e221
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/macaddr.go
@@ -0,0 +1,154 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "net"
+
+ "github.com/pkg/errors"
+)
+
+type Macaddr struct {
+ Addr net.HardwareAddr
+ Status Status
+}
+
+func (dst *Macaddr) Set(src interface{}) error {
+ if src == nil {
+ *dst = Macaddr{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+ case net.HardwareAddr:
+ addr := make(net.HardwareAddr, len(value))
+ copy(addr, value)
+ *dst = Macaddr{Addr: addr, Status: Present}
+ case string:
+ addr, err := net.ParseMAC(value)
+ if err != nil {
+ return err
+ }
+ *dst = Macaddr{Addr: addr, Status: Present}
+ default:
+ if originalSrc, ok := underlyingPtrType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to Macaddr", value)
+ }
+
+ return nil
+}
+
+func (dst *Macaddr) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst.Addr
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Macaddr) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+ case *net.HardwareAddr:
+ *v = make(net.HardwareAddr, len(src.Addr))
+ copy(*v, src.Addr)
+ return nil
+ case *string:
+ *v = src.Addr.String()
+ return nil
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *Macaddr) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Macaddr{Status: Null}
+ return nil
+ }
+
+ addr, err := net.ParseMAC(string(src))
+ if err != nil {
+ return err
+ }
+
+ *dst = Macaddr{Addr: addr, Status: Present}
+ return nil
+}
+
+func (dst *Macaddr) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Macaddr{Status: Null}
+ return nil
+ }
+
+ if len(src) != 6 {
+ return errors.Errorf("Received an invalid size for a macaddr: %d", len(src))
+ }
+
+ addr := make(net.HardwareAddr, 6)
+ copy(addr, src)
+
+ *dst = Macaddr{Addr: addr, Status: Present}
+
+ return nil
+}
+
+func (src *Macaddr) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ return append(buf, src.Addr.String()...), nil
+}
+
+// EncodeBinary encodes src into w.
+func (src *Macaddr) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ return append(buf, src.Addr...), nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Macaddr) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Macaddr{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Macaddr) Value() (driver.Value, error) {
+ return EncodeValueText(src)
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/name.go b/vendor/github.com/jackc/pgx/pgtype/name.go
new file mode 100644
index 0000000..af064a8
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/name.go
@@ -0,0 +1,58 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+)
+
+// Name is a type used for PostgreSQL's special 63-byte
+// name data type, used for identifiers like table names.
+// The pg_class.relname column is a good example of where the
+// name data type is used.
+//
+// Note that the underlying Go data type of pgx.Name is string,
+// so there is no way to enforce the 63-byte length. Inputting
+// a longer name into PostgreSQL will result in silent truncation
+// to 63 bytes.
+//
+// Also, if you have custom-compiled PostgreSQL and set
+// NAMEDATALEN to a different value, obviously that number of
+// bytes applies, rather than the default 63.
+type Name Text
+
+func (dst *Name) Set(src interface{}) error {
+ return (*Text)(dst).Set(src)
+}
+
+func (dst *Name) Get() interface{} {
+ return (*Text)(dst).Get()
+}
+
+func (src *Name) AssignTo(dst interface{}) error {
+ return (*Text)(src).AssignTo(dst)
+}
+
+func (dst *Name) DecodeText(ci *ConnInfo, src []byte) error {
+ return (*Text)(dst).DecodeText(ci, src)
+}
+
+func (dst *Name) DecodeBinary(ci *ConnInfo, src []byte) error {
+ return (*Text)(dst).DecodeBinary(ci, src)
+}
+
+func (src *Name) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ return (*Text)(src).EncodeText(ci, buf)
+}
+
+func (src *Name) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ return (*Text)(src).EncodeBinary(ci, buf)
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Name) Scan(src interface{}) error {
+ return (*Text)(dst).Scan(src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Name) Value() (driver.Value, error) {
+ return (*Text)(src).Value()
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/numeric.go b/vendor/github.com/jackc/pgx/pgtype/numeric.go
new file mode 100644
index 0000000..fb63df7
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/numeric.go
@@ -0,0 +1,600 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "math"
+ "math/big"
+ "strconv"
+ "strings"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+// PostgreSQL internal numeric storage uses 16-bit "digits" with base of 10,000
+const nbase = 10000
+
+var big0 *big.Int = big.NewInt(0)
+var big1 *big.Int = big.NewInt(1)
+var big10 *big.Int = big.NewInt(10)
+var big100 *big.Int = big.NewInt(100)
+var big1000 *big.Int = big.NewInt(1000)
+
+var bigMaxInt8 *big.Int = big.NewInt(math.MaxInt8)
+var bigMinInt8 *big.Int = big.NewInt(math.MinInt8)
+var bigMaxInt16 *big.Int = big.NewInt(math.MaxInt16)
+var bigMinInt16 *big.Int = big.NewInt(math.MinInt16)
+var bigMaxInt32 *big.Int = big.NewInt(math.MaxInt32)
+var bigMinInt32 *big.Int = big.NewInt(math.MinInt32)
+var bigMaxInt64 *big.Int = big.NewInt(math.MaxInt64)
+var bigMinInt64 *big.Int = big.NewInt(math.MinInt64)
+var bigMaxInt *big.Int = big.NewInt(int64(maxInt))
+var bigMinInt *big.Int = big.NewInt(int64(minInt))
+
+var bigMaxUint8 *big.Int = big.NewInt(math.MaxUint8)
+var bigMaxUint16 *big.Int = big.NewInt(math.MaxUint16)
+var bigMaxUint32 *big.Int = big.NewInt(math.MaxUint32)
+var bigMaxUint64 *big.Int = (&big.Int{}).SetUint64(uint64(math.MaxUint64))
+var bigMaxUint *big.Int = (&big.Int{}).SetUint64(uint64(maxUint))
+
+var bigNBase *big.Int = big.NewInt(nbase)
+var bigNBaseX2 *big.Int = big.NewInt(nbase * nbase)
+var bigNBaseX3 *big.Int = big.NewInt(nbase * nbase * nbase)
+var bigNBaseX4 *big.Int = big.NewInt(nbase * nbase * nbase * nbase)
+
+type Numeric struct {
+ Int *big.Int
+ Exp int32
+ Status Status
+}
+
+func (dst *Numeric) Set(src interface{}) error {
+ if src == nil {
+ *dst = Numeric{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+ case float32:
+ num, exp, err := parseNumericString(strconv.FormatFloat(float64(value), 'f', -1, 64))
+ if err != nil {
+ return err
+ }
+ *dst = Numeric{Int: num, Exp: exp, Status: Present}
+ case float64:
+ num, exp, err := parseNumericString(strconv.FormatFloat(value, 'f', -1, 64))
+ if err != nil {
+ return err
+ }
+ *dst = Numeric{Int: num, Exp: exp, Status: Present}
+ case int8:
+ *dst = Numeric{Int: big.NewInt(int64(value)), Status: Present}
+ case uint8:
+ *dst = Numeric{Int: big.NewInt(int64(value)), Status: Present}
+ case int16:
+ *dst = Numeric{Int: big.NewInt(int64(value)), Status: Present}
+ case uint16:
+ *dst = Numeric{Int: big.NewInt(int64(value)), Status: Present}
+ case int32:
+ *dst = Numeric{Int: big.NewInt(int64(value)), Status: Present}
+ case uint32:
+ *dst = Numeric{Int: big.NewInt(int64(value)), Status: Present}
+ case int64:
+ *dst = Numeric{Int: big.NewInt(value), Status: Present}
+ case uint64:
+ *dst = Numeric{Int: (&big.Int{}).SetUint64(value), Status: Present}
+ case int:
+ *dst = Numeric{Int: big.NewInt(int64(value)), Status: Present}
+ case uint:
+ *dst = Numeric{Int: (&big.Int{}).SetUint64(uint64(value)), Status: Present}
+ case string:
+ num, exp, err := parseNumericString(value)
+ if err != nil {
+ return err
+ }
+ *dst = Numeric{Int: num, Exp: exp, Status: Present}
+ default:
+ if originalSrc, ok := underlyingNumberType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to Numeric", value)
+ }
+
+ return nil
+}
+
+func (dst *Numeric) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Numeric) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+ case *float32:
+ f, err := src.toFloat64()
+ if err != nil {
+ return err
+ }
+ return float64AssignTo(f, src.Status, dst)
+ case *float64:
+ f, err := src.toFloat64()
+ if err != nil {
+ return err
+ }
+ return float64AssignTo(f, src.Status, dst)
+ case *int:
+ normalizedInt, err := src.toBigInt()
+ if err != nil {
+ return err
+ }
+ if normalizedInt.Cmp(bigMaxInt) > 0 {
+ return errors.Errorf("%v is greater than maximum value for %T", normalizedInt, *v)
+ }
+ if normalizedInt.Cmp(bigMinInt) < 0 {
+ return errors.Errorf("%v is less than minimum value for %T", normalizedInt, *v)
+ }
+ *v = int(normalizedInt.Int64())
+ case *int8:
+ normalizedInt, err := src.toBigInt()
+ if err != nil {
+ return err
+ }
+ if normalizedInt.Cmp(bigMaxInt8) > 0 {
+ return errors.Errorf("%v is greater than maximum value for %T", normalizedInt, *v)
+ }
+ if normalizedInt.Cmp(bigMinInt8) < 0 {
+ return errors.Errorf("%v is less than minimum value for %T", normalizedInt, *v)
+ }
+ *v = int8(normalizedInt.Int64())
+ case *int16:
+ normalizedInt, err := src.toBigInt()
+ if err != nil {
+ return err
+ }
+ if normalizedInt.Cmp(bigMaxInt16) > 0 {
+ return errors.Errorf("%v is greater than maximum value for %T", normalizedInt, *v)
+ }
+ if normalizedInt.Cmp(bigMinInt16) < 0 {
+ return errors.Errorf("%v is less than minimum value for %T", normalizedInt, *v)
+ }
+ *v = int16(normalizedInt.Int64())
+ case *int32:
+ normalizedInt, err := src.toBigInt()
+ if err != nil {
+ return err
+ }
+ if normalizedInt.Cmp(bigMaxInt32) > 0 {
+ return errors.Errorf("%v is greater than maximum value for %T", normalizedInt, *v)
+ }
+ if normalizedInt.Cmp(bigMinInt32) < 0 {
+ return errors.Errorf("%v is less than minimum value for %T", normalizedInt, *v)
+ }
+ *v = int32(normalizedInt.Int64())
+ case *int64:
+ normalizedInt, err := src.toBigInt()
+ if err != nil {
+ return err
+ }
+ if normalizedInt.Cmp(bigMaxInt64) > 0 {
+ return errors.Errorf("%v is greater than maximum value for %T", normalizedInt, *v)
+ }
+ if normalizedInt.Cmp(bigMinInt64) < 0 {
+ return errors.Errorf("%v is less than minimum value for %T", normalizedInt, *v)
+ }
+ *v = normalizedInt.Int64()
+ case *uint:
+ normalizedInt, err := src.toBigInt()
+ if err != nil {
+ return err
+ }
+ if normalizedInt.Cmp(big0) < 0 {
+ return errors.Errorf("%d is less than zero for %T", normalizedInt, *v)
+ } else if normalizedInt.Cmp(bigMaxUint) > 0 {
+ return errors.Errorf("%d is greater than maximum value for %T", normalizedInt, *v)
+ }
+ *v = uint(normalizedInt.Uint64())
+ case *uint8:
+ normalizedInt, err := src.toBigInt()
+ if err != nil {
+ return err
+ }
+ if normalizedInt.Cmp(big0) < 0 {
+ return errors.Errorf("%d is less than zero for %T", normalizedInt, *v)
+ } else if normalizedInt.Cmp(bigMaxUint8) > 0 {
+ return errors.Errorf("%d is greater than maximum value for %T", normalizedInt, *v)
+ }
+ *v = uint8(normalizedInt.Uint64())
+ case *uint16:
+ normalizedInt, err := src.toBigInt()
+ if err != nil {
+ return err
+ }
+ if normalizedInt.Cmp(big0) < 0 {
+ return errors.Errorf("%d is less than zero for %T", normalizedInt, *v)
+ } else if normalizedInt.Cmp(bigMaxUint16) > 0 {
+ return errors.Errorf("%d is greater than maximum value for %T", normalizedInt, *v)
+ }
+ *v = uint16(normalizedInt.Uint64())
+ case *uint32:
+ normalizedInt, err := src.toBigInt()
+ if err != nil {
+ return err
+ }
+ if normalizedInt.Cmp(big0) < 0 {
+ return errors.Errorf("%d is less than zero for %T", normalizedInt, *v)
+ } else if normalizedInt.Cmp(bigMaxUint32) > 0 {
+ return errors.Errorf("%d is greater than maximum value for %T", normalizedInt, *v)
+ }
+ *v = uint32(normalizedInt.Uint64())
+ case *uint64:
+ normalizedInt, err := src.toBigInt()
+ if err != nil {
+ return err
+ }
+ if normalizedInt.Cmp(big0) < 0 {
+ return errors.Errorf("%d is less than zero for %T", normalizedInt, *v)
+ } else if normalizedInt.Cmp(bigMaxUint64) > 0 {
+ return errors.Errorf("%d is greater than maximum value for %T", normalizedInt, *v)
+ }
+ *v = normalizedInt.Uint64()
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return nil
+}
+
+func (dst *Numeric) toBigInt() (*big.Int, error) {
+ if dst.Exp == 0 {
+ return dst.Int, nil
+ }
+
+ num := &big.Int{}
+ num.Set(dst.Int)
+ if dst.Exp > 0 {
+ mul := &big.Int{}
+ mul.Exp(big10, big.NewInt(int64(dst.Exp)), nil)
+ num.Mul(num, mul)
+ return num, nil
+ }
+
+ div := &big.Int{}
+ div.Exp(big10, big.NewInt(int64(-dst.Exp)), nil)
+ remainder := &big.Int{}
+ num.DivMod(num, div, remainder)
+ if remainder.Cmp(big0) != 0 {
+ return nil, errors.Errorf("cannot convert %v to integer", dst)
+ }
+ return num, nil
+}
+
+func (src *Numeric) toFloat64() (float64, error) {
+ f, err := strconv.ParseFloat(src.Int.String(), 64)
+ if err != nil {
+ return 0, err
+ }
+ if src.Exp > 0 {
+ for i := 0; i < int(src.Exp); i++ {
+ f *= 10
+ }
+ } else if src.Exp < 0 {
+ for i := 0; i > int(src.Exp); i-- {
+ f /= 10
+ }
+ }
+ return f, nil
+}
+
+func (dst *Numeric) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Numeric{Status: Null}
+ return nil
+ }
+
+ num, exp, err := parseNumericString(string(src))
+ if err != nil {
+ return err
+ }
+
+ *dst = Numeric{Int: num, Exp: exp, Status: Present}
+ return nil
+}
+
+func parseNumericString(str string) (n *big.Int, exp int32, err error) {
+ parts := strings.SplitN(str, ".", 2)
+ digits := strings.Join(parts, "")
+
+ if len(parts) > 1 {
+ exp = int32(-len(parts[1]))
+ } else {
+ for len(digits) > 1 && digits[len(digits)-1] == '0' {
+ digits = digits[:len(digits)-1]
+ exp++
+ }
+ }
+
+ accum := &big.Int{}
+ if _, ok := accum.SetString(digits, 10); !ok {
+ return nil, 0, errors.Errorf("%s is not a number", str)
+ }
+
+ return accum, exp, nil
+}
+
+func (dst *Numeric) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Numeric{Status: Null}
+ return nil
+ }
+
+ if len(src) < 8 {
+ return errors.Errorf("numeric incomplete %v", src)
+ }
+
+ rp := 0
+ ndigits := int16(binary.BigEndian.Uint16(src[rp:]))
+ rp += 2
+
+ if ndigits == 0 {
+ *dst = Numeric{Int: big.NewInt(0), Status: Present}
+ return nil
+ }
+
+ weight := int16(binary.BigEndian.Uint16(src[rp:]))
+ rp += 2
+ sign := int16(binary.BigEndian.Uint16(src[rp:]))
+ rp += 2
+ dscale := int16(binary.BigEndian.Uint16(src[rp:]))
+ rp += 2
+
+ if len(src[rp:]) < int(ndigits)*2 {
+ return errors.Errorf("numeric incomplete %v", src)
+ }
+
+ accum := &big.Int{}
+
+ for i := 0; i < int(ndigits+3)/4; i++ {
+ int64accum, bytesRead, digitsRead := nbaseDigitsToInt64(src[rp:])
+ rp += bytesRead
+
+ if i > 0 {
+ var mul *big.Int
+ switch digitsRead {
+ case 1:
+ mul = bigNBase
+ case 2:
+ mul = bigNBaseX2
+ case 3:
+ mul = bigNBaseX3
+ case 4:
+ mul = bigNBaseX4
+ default:
+ return errors.Errorf("invalid digitsRead: %d (this can't happen)", digitsRead)
+ }
+ accum.Mul(accum, mul)
+ }
+
+ accum.Add(accum, big.NewInt(int64accum))
+ }
+
+ exp := (int32(weight) - int32(ndigits) + 1) * 4
+
+ if dscale > 0 {
+ fracNBaseDigits := ndigits - weight - 1
+ fracDecimalDigits := fracNBaseDigits * 4
+
+ if dscale > fracDecimalDigits {
+ multCount := int(dscale - fracDecimalDigits)
+ for i := 0; i < multCount; i++ {
+ accum.Mul(accum, big10)
+ exp--
+ }
+ } else if dscale < fracDecimalDigits {
+ divCount := int(fracDecimalDigits - dscale)
+ for i := 0; i < divCount; i++ {
+ accum.Div(accum, big10)
+ exp++
+ }
+ }
+ }
+
+ reduced := &big.Int{}
+ remainder := &big.Int{}
+ if exp >= 0 {
+ for {
+ reduced.DivMod(accum, big10, remainder)
+ if remainder.Cmp(big0) != 0 {
+ break
+ }
+ accum.Set(reduced)
+ exp++
+ }
+ }
+
+ if sign != 0 {
+ accum.Neg(accum)
+ }
+
+ *dst = Numeric{Int: accum, Exp: exp, Status: Present}
+
+ return nil
+
+}
+
+func nbaseDigitsToInt64(src []byte) (accum int64, bytesRead, digitsRead int) {
+ digits := len(src) / 2
+ if digits > 4 {
+ digits = 4
+ }
+
+ rp := 0
+
+ for i := 0; i < digits; i++ {
+ if i > 0 {
+ accum *= nbase
+ }
+ accum += int64(binary.BigEndian.Uint16(src[rp:]))
+ rp += 2
+ }
+
+ return accum, rp, digits
+}
+
+func (src *Numeric) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ buf = append(buf, src.Int.String()...)
+ buf = append(buf, 'e')
+ buf = append(buf, strconv.FormatInt(int64(src.Exp), 10)...)
+ return buf, nil
+}
+
+func (src *Numeric) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ var sign int16
+ if src.Int.Cmp(big0) < 0 {
+ sign = 16384
+ }
+
+ absInt := &big.Int{}
+ wholePart := &big.Int{}
+ fracPart := &big.Int{}
+ remainder := &big.Int{}
+ absInt.Abs(src.Int)
+
+ // Normalize absInt and exp to where exp is always a multiple of 4. This makes
+ // converting to 16-bit base 10,000 digits easier.
+ var exp int32
+ switch src.Exp % 4 {
+ case 1, -3:
+ exp = src.Exp - 1
+ absInt.Mul(absInt, big10)
+ case 2, -2:
+ exp = src.Exp - 2
+ absInt.Mul(absInt, big100)
+ case 3, -1:
+ exp = src.Exp - 3
+ absInt.Mul(absInt, big1000)
+ default:
+ exp = src.Exp
+ }
+
+ if exp < 0 {
+ divisor := &big.Int{}
+ divisor.Exp(big10, big.NewInt(int64(-exp)), nil)
+ wholePart.DivMod(absInt, divisor, fracPart)
+ fracPart.Add(fracPart, divisor)
+ } else {
+ wholePart = absInt
+ }
+
+ var wholeDigits, fracDigits []int16
+
+ for wholePart.Cmp(big0) != 0 {
+ wholePart.DivMod(wholePart, bigNBase, remainder)
+ wholeDigits = append(wholeDigits, int16(remainder.Int64()))
+ }
+
+ if fracPart.Cmp(big0) != 0 {
+ for fracPart.Cmp(big1) != 0 {
+ fracPart.DivMod(fracPart, bigNBase, remainder)
+ fracDigits = append(fracDigits, int16(remainder.Int64()))
+ }
+ }
+
+ buf = pgio.AppendInt16(buf, int16(len(wholeDigits)+len(fracDigits)))
+
+ var weight int16
+ if len(wholeDigits) > 0 {
+ weight = int16(len(wholeDigits) - 1)
+ if exp > 0 {
+ weight += int16(exp / 4)
+ }
+ } else {
+ weight = int16(exp/4) - 1 + int16(len(fracDigits))
+ }
+ buf = pgio.AppendInt16(buf, weight)
+
+ buf = pgio.AppendInt16(buf, sign)
+
+ var dscale int16
+ if src.Exp < 0 {
+ dscale = int16(-src.Exp)
+ }
+ buf = pgio.AppendInt16(buf, dscale)
+
+ for i := len(wholeDigits) - 1; i >= 0; i-- {
+ buf = pgio.AppendInt16(buf, wholeDigits[i])
+ }
+
+ for i := len(fracDigits) - 1; i >= 0; i-- {
+ buf = pgio.AppendInt16(buf, fracDigits[i])
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Numeric) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Numeric{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case float64:
+ // TODO
+ // *dst = Numeric{Float: src, Status: Present}
+ return nil
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Numeric) Value() (driver.Value, error) {
+ switch src.Status {
+ case Present:
+ buf, err := src.EncodeText(nil, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return string(buf), nil
+ case Null:
+ return nil, nil
+ default:
+ return nil, errUndefined
+ }
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/numeric_array.go b/vendor/github.com/jackc/pgx/pgtype/numeric_array.go
new file mode 100644
index 0000000..d991234
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/numeric_array.go
@@ -0,0 +1,328 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type NumericArray struct {
+ Elements []Numeric
+ Dimensions []ArrayDimension
+ Status Status
+}
+
+func (dst *NumericArray) Set(src interface{}) error {
+ // untyped nil and typed nil interfaces are different
+ if src == nil {
+ *dst = NumericArray{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+
+ case []float32:
+ if value == nil {
+ *dst = NumericArray{Status: Null}
+ } else if len(value) == 0 {
+ *dst = NumericArray{Status: Present}
+ } else {
+ elements := make([]Numeric, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = NumericArray{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ case []float64:
+ if value == nil {
+ *dst = NumericArray{Status: Null}
+ } else if len(value) == 0 {
+ *dst = NumericArray{Status: Present}
+ } else {
+ elements := make([]Numeric, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = NumericArray{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ default:
+ if originalSrc, ok := underlyingSliceType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to NumericArray", value)
+ }
+
+ return nil
+}
+
+func (dst *NumericArray) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *NumericArray) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+
+ case *[]float32:
+ *v = make([]float32, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ case *[]float64:
+ *v = make([]float64, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *NumericArray) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = NumericArray{Status: Null}
+ return nil
+ }
+
+ uta, err := ParseUntypedTextArray(string(src))
+ if err != nil {
+ return err
+ }
+
+ var elements []Numeric
+
+ if len(uta.Elements) > 0 {
+ elements = make([]Numeric, len(uta.Elements))
+
+ for i, s := range uta.Elements {
+ var elem Numeric
+ var elemSrc []byte
+ if s != "NULL" {
+ elemSrc = []byte(s)
+ }
+ err = elem.DecodeText(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+
+ elements[i] = elem
+ }
+ }
+
+ *dst = NumericArray{Elements: elements, Dimensions: uta.Dimensions, Status: Present}
+
+ return nil
+}
+
+func (dst *NumericArray) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = NumericArray{Status: Null}
+ return nil
+ }
+
+ var arrayHeader ArrayHeader
+ rp, err := arrayHeader.DecodeBinary(ci, src)
+ if err != nil {
+ return err
+ }
+
+ if len(arrayHeader.Dimensions) == 0 {
+ *dst = NumericArray{Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+ }
+
+ elementCount := arrayHeader.Dimensions[0].Length
+ for _, d := range arrayHeader.Dimensions[1:] {
+ elementCount *= d.Length
+ }
+
+ elements := make([]Numeric, elementCount)
+
+ for i := range elements {
+ elemLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+ var elemSrc []byte
+ if elemLen >= 0 {
+ elemSrc = src[rp : rp+elemLen]
+ rp += elemLen
+ }
+ err = elements[i].DecodeBinary(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+ }
+
+ *dst = NumericArray{Elements: elements, Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+}
+
+func (src *NumericArray) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ if len(src.Dimensions) == 0 {
+ return append(buf, '{', '}'), nil
+ }
+
+ buf = EncodeTextArrayDimensions(buf, src.Dimensions)
+
+ // dimElemCounts is the multiples of elements that each array lies on. For
+ // example, a single dimension array of length 4 would have a dimElemCounts of
+ // [4]. A multi-dimensional array of lengths [3,5,2] would have a
+ // dimElemCounts of [30,10,2]. This is used to simplify when to render a '{'
+ // or '}'.
+ dimElemCounts := make([]int, len(src.Dimensions))
+ dimElemCounts[len(src.Dimensions)-1] = int(src.Dimensions[len(src.Dimensions)-1].Length)
+ for i := len(src.Dimensions) - 2; i > -1; i-- {
+ dimElemCounts[i] = int(src.Dimensions[i].Length) * dimElemCounts[i+1]
+ }
+
+ inElemBuf := make([]byte, 0, 32)
+ for i, elem := range src.Elements {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+
+ for _, dec := range dimElemCounts {
+ if i%dec == 0 {
+ buf = append(buf, '{')
+ }
+ }
+
+ elemBuf, err := elem.EncodeText(ci, inElemBuf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf == nil {
+ buf = append(buf, `NULL`...)
+ } else {
+ buf = append(buf, QuoteArrayElementIfNeeded(string(elemBuf))...)
+ }
+
+ for _, dec := range dimElemCounts {
+ if (i+1)%dec == 0 {
+ buf = append(buf, '}')
+ }
+ }
+ }
+
+ return buf, nil
+}
+
+func (src *NumericArray) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ arrayHeader := ArrayHeader{
+ Dimensions: src.Dimensions,
+ }
+
+ if dt, ok := ci.DataTypeForName("numeric"); ok {
+ arrayHeader.ElementOID = int32(dt.OID)
+ } else {
+ return nil, errors.Errorf("unable to find oid for type name %v", "numeric")
+ }
+
+ for i := range src.Elements {
+ if src.Elements[i].Status == Null {
+ arrayHeader.ContainsNull = true
+ break
+ }
+ }
+
+ buf = arrayHeader.EncodeBinary(ci, buf)
+
+ for i := range src.Elements {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ elemBuf, err := src.Elements[i].EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf != nil {
+ buf = elemBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *NumericArray) Scan(src interface{}) error {
+ if src == nil {
+ return dst.DecodeText(nil, nil)
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *NumericArray) Value() (driver.Value, error) {
+ buf, err := src.EncodeText(nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, nil
+ }
+
+ return string(buf), nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/numrange.go b/vendor/github.com/jackc/pgx/pgtype/numrange.go
new file mode 100644
index 0000000..aaed62c
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/numrange.go
@@ -0,0 +1,250 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type Numrange struct {
+ Lower Numeric
+ Upper Numeric
+ LowerType BoundType
+ UpperType BoundType
+ Status Status
+}
+
+func (dst *Numrange) Set(src interface{}) error {
+ return errors.Errorf("cannot convert %v to Numrange", src)
+}
+
+func (dst *Numrange) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Numrange) AssignTo(dst interface{}) error {
+ return errors.Errorf("cannot assign %v to %T", src, dst)
+}
+
+func (dst *Numrange) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Numrange{Status: Null}
+ return nil
+ }
+
+ utr, err := ParseUntypedTextRange(string(src))
+ if err != nil {
+ return err
+ }
+
+ *dst = Numrange{Status: Present}
+
+ dst.LowerType = utr.LowerType
+ dst.UpperType = utr.UpperType
+
+ if dst.LowerType == Empty {
+ return nil
+ }
+
+ if dst.LowerType == Inclusive || dst.LowerType == Exclusive {
+ if err := dst.Lower.DecodeText(ci, []byte(utr.Lower)); err != nil {
+ return err
+ }
+ }
+
+ if dst.UpperType == Inclusive || dst.UpperType == Exclusive {
+ if err := dst.Upper.DecodeText(ci, []byte(utr.Upper)); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (dst *Numrange) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Numrange{Status: Null}
+ return nil
+ }
+
+ ubr, err := ParseUntypedBinaryRange(src)
+ if err != nil {
+ return err
+ }
+
+ *dst = Numrange{Status: Present}
+
+ dst.LowerType = ubr.LowerType
+ dst.UpperType = ubr.UpperType
+
+ if dst.LowerType == Empty {
+ return nil
+ }
+
+ if dst.LowerType == Inclusive || dst.LowerType == Exclusive {
+ if err := dst.Lower.DecodeBinary(ci, ubr.Lower); err != nil {
+ return err
+ }
+ }
+
+ if dst.UpperType == Inclusive || dst.UpperType == Exclusive {
+ if err := dst.Upper.DecodeBinary(ci, ubr.Upper); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (src Numrange) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ switch src.LowerType {
+ case Exclusive, Unbounded:
+ buf = append(buf, '(')
+ case Inclusive:
+ buf = append(buf, '[')
+ case Empty:
+ return append(buf, "empty"...), nil
+ default:
+ return nil, errors.Errorf("unknown lower bound type %v", src.LowerType)
+ }
+
+ var err error
+
+ if src.LowerType != Unbounded {
+ buf, err = src.Lower.EncodeText(ci, buf)
+ if err != nil {
+ return nil, err
+ } else if buf == nil {
+ return nil, errors.Errorf("Lower cannot be null unless LowerType is Unbounded")
+ }
+ }
+
+ buf = append(buf, ',')
+
+ if src.UpperType != Unbounded {
+ buf, err = src.Upper.EncodeText(ci, buf)
+ if err != nil {
+ return nil, err
+ } else if buf == nil {
+ return nil, errors.Errorf("Upper cannot be null unless UpperType is Unbounded")
+ }
+ }
+
+ switch src.UpperType {
+ case Exclusive, Unbounded:
+ buf = append(buf, ')')
+ case Inclusive:
+ buf = append(buf, ']')
+ default:
+ return nil, errors.Errorf("unknown upper bound type %v", src.UpperType)
+ }
+
+ return buf, nil
+}
+
+func (src Numrange) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ var rangeType byte
+ switch src.LowerType {
+ case Inclusive:
+ rangeType |= lowerInclusiveMask
+ case Unbounded:
+ rangeType |= lowerUnboundedMask
+ case Exclusive:
+ case Empty:
+ return append(buf, emptyMask), nil
+ default:
+ return nil, errors.Errorf("unknown LowerType: %v", src.LowerType)
+ }
+
+ switch src.UpperType {
+ case Inclusive:
+ rangeType |= upperInclusiveMask
+ case Unbounded:
+ rangeType |= upperUnboundedMask
+ case Exclusive:
+ default:
+ return nil, errors.Errorf("unknown UpperType: %v", src.UpperType)
+ }
+
+ buf = append(buf, rangeType)
+
+ var err error
+
+ if src.LowerType != Unbounded {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ buf, err = src.Lower.EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, errors.Errorf("Lower cannot be null unless LowerType is Unbounded")
+ }
+
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+
+ if src.UpperType != Unbounded {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ buf, err = src.Upper.EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, errors.Errorf("Upper cannot be null unless UpperType is Unbounded")
+ }
+
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Numrange) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Numrange{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src Numrange) Value() (driver.Value, error) {
+ return EncodeValueText(src)
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/oid.go b/vendor/github.com/jackc/pgx/pgtype/oid.go
new file mode 100644
index 0000000..59370d6
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/oid.go
@@ -0,0 +1,81 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "strconv"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+// OID (Object Identifier Type) is, according to
+// https://www.postgresql.org/docs/current/static/datatype-oid.html, used
+// internally by PostgreSQL as a primary key for various system tables. It is
+// currently implemented as an unsigned four-byte integer. Its definition can be
+// found in src/include/postgres_ext.h in the PostgreSQL sources. Because it is
+// so frequently required to be in a NOT NULL condition OID cannot be NULL. To
+// allow for NULL OIDs use OIDValue.
+type OID uint32
+
+func (dst *OID) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ return errors.Errorf("cannot decode nil into OID")
+ }
+
+ n, err := strconv.ParseUint(string(src), 10, 32)
+ if err != nil {
+ return err
+ }
+
+ *dst = OID(n)
+ return nil
+}
+
+func (dst *OID) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ return errors.Errorf("cannot decode nil into OID")
+ }
+
+ if len(src) != 4 {
+ return errors.Errorf("invalid length: %v", len(src))
+ }
+
+ n := binary.BigEndian.Uint32(src)
+ *dst = OID(n)
+ return nil
+}
+
+func (src OID) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ return append(buf, strconv.FormatUint(uint64(src), 10)...), nil
+}
+
+func (src OID) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ return pgio.AppendUint32(buf, uint32(src)), nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *OID) Scan(src interface{}) error {
+ if src == nil {
+ return errors.Errorf("cannot scan NULL into %T", src)
+ }
+
+ switch src := src.(type) {
+ case int64:
+ *dst = OID(src)
+ return nil
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src OID) Value() (driver.Value, error) {
+ return int64(src), nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/oid_value.go b/vendor/github.com/jackc/pgx/pgtype/oid_value.go
new file mode 100644
index 0000000..7eae4bf
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/oid_value.go
@@ -0,0 +1,55 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+)
+
+// OIDValue (Object Identifier Type) is, according to
+// https://www.postgresql.org/docs/current/static/datatype-OIDValue.html, used
+// internally by PostgreSQL as a primary key for various system tables. It is
+// currently implemented as an unsigned four-byte integer. Its definition can be
+// found in src/include/postgres_ext.h in the PostgreSQL sources.
+type OIDValue pguint32
+
+// Set converts from src to dst. Note that as OIDValue is not a general
+// number type Set does not do automatic type conversion as other number
+// types do.
+func (dst *OIDValue) Set(src interface{}) error {
+ return (*pguint32)(dst).Set(src)
+}
+
+func (dst *OIDValue) Get() interface{} {
+ return (*pguint32)(dst).Get()
+}
+
+// AssignTo assigns from src to dst. Note that as OIDValue is not a general number
+// type AssignTo does not do automatic type conversion as other number types do.
+func (src *OIDValue) AssignTo(dst interface{}) error {
+ return (*pguint32)(src).AssignTo(dst)
+}
+
+func (dst *OIDValue) DecodeText(ci *ConnInfo, src []byte) error {
+ return (*pguint32)(dst).DecodeText(ci, src)
+}
+
+func (dst *OIDValue) DecodeBinary(ci *ConnInfo, src []byte) error {
+ return (*pguint32)(dst).DecodeBinary(ci, src)
+}
+
+func (src *OIDValue) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ return (*pguint32)(src).EncodeText(ci, buf)
+}
+
+func (src *OIDValue) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ return (*pguint32)(src).EncodeBinary(ci, buf)
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *OIDValue) Scan(src interface{}) error {
+ return (*pguint32)(dst).Scan(src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *OIDValue) Value() (driver.Value, error) {
+ return (*pguint32)(src).Value()
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/path.go b/vendor/github.com/jackc/pgx/pgtype/path.go
new file mode 100644
index 0000000..aa0cee8
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/path.go
@@ -0,0 +1,193 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type Path struct {
+ P []Vec2
+ Closed bool
+ Status Status
+}
+
+func (dst *Path) Set(src interface{}) error {
+ return errors.Errorf("cannot convert %v to Path", src)
+}
+
+func (dst *Path) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Path) AssignTo(dst interface{}) error {
+ return errors.Errorf("cannot assign %v to %T", src, dst)
+}
+
+func (dst *Path) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Path{Status: Null}
+ return nil
+ }
+
+ if len(src) < 7 {
+ return errors.Errorf("invalid length for Path: %v", len(src))
+ }
+
+ closed := src[0] == '('
+ points := make([]Vec2, 0)
+
+ str := string(src[2:])
+
+ for {
+ end := strings.IndexByte(str, ',')
+ x, err := strconv.ParseFloat(str[:end], 64)
+ if err != nil {
+ return err
+ }
+
+ str = str[end+1:]
+ end = strings.IndexByte(str, ')')
+
+ y, err := strconv.ParseFloat(str[:end], 64)
+ if err != nil {
+ return err
+ }
+
+ points = append(points, Vec2{x, y})
+
+ if end+3 < len(str) {
+ str = str[end+3:]
+ } else {
+ break
+ }
+ }
+
+ *dst = Path{P: points, Closed: closed, Status: Present}
+ return nil
+}
+
+func (dst *Path) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Path{Status: Null}
+ return nil
+ }
+
+ if len(src) < 5 {
+ return errors.Errorf("invalid length for Path: %v", len(src))
+ }
+
+ closed := src[0] == 1
+ pointCount := int(binary.BigEndian.Uint32(src[1:]))
+
+ rp := 5
+
+ if 5+pointCount*16 != len(src) {
+ return errors.Errorf("invalid length for Path with %d points: %v", pointCount, len(src))
+ }
+
+ points := make([]Vec2, pointCount)
+ for i := 0; i < len(points); i++ {
+ x := binary.BigEndian.Uint64(src[rp:])
+ rp += 8
+ y := binary.BigEndian.Uint64(src[rp:])
+ rp += 8
+ points[i] = Vec2{math.Float64frombits(x), math.Float64frombits(y)}
+ }
+
+ *dst = Path{
+ P: points,
+ Closed: closed,
+ Status: Present,
+ }
+ return nil
+}
+
+func (src *Path) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ var startByte, endByte byte
+ if src.Closed {
+ startByte = '('
+ endByte = ')'
+ } else {
+ startByte = '['
+ endByte = ']'
+ }
+ buf = append(buf, startByte)
+
+ for i, p := range src.P {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+ buf = append(buf, fmt.Sprintf(`(%f,%f)`, p.X, p.Y)...)
+ }
+
+ return append(buf, endByte), nil
+}
+
+func (src *Path) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ var closeByte byte
+ if src.Closed {
+ closeByte = 1
+ }
+ buf = append(buf, closeByte)
+
+ buf = pgio.AppendInt32(buf, int32(len(src.P)))
+
+ for _, p := range src.P {
+ buf = pgio.AppendUint64(buf, math.Float64bits(p.X))
+ buf = pgio.AppendUint64(buf, math.Float64bits(p.Y))
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Path) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Path{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Path) Value() (driver.Value, error) {
+ return EncodeValueText(src)
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/pgtype.go b/vendor/github.com/jackc/pgx/pgtype/pgtype.go
new file mode 100644
index 0000000..2643314
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/pgtype.go
@@ -0,0 +1,280 @@
+package pgtype
+
+import (
+ "reflect"
+
+ "github.com/pkg/errors"
+)
+
+// PostgreSQL oids for common types
+const (
+ BoolOID = 16
+ ByteaOID = 17
+ CharOID = 18
+ NameOID = 19
+ Int8OID = 20
+ Int2OID = 21
+ Int4OID = 23
+ TextOID = 25
+ OIDOID = 26
+ TIDOID = 27
+ XIDOID = 28
+ CIDOID = 29
+ JSONOID = 114
+ CIDROID = 650
+ CIDRArrayOID = 651
+ Float4OID = 700
+ Float8OID = 701
+ UnknownOID = 705
+ InetOID = 869
+ BoolArrayOID = 1000
+ Int2ArrayOID = 1005
+ Int4ArrayOID = 1007
+ TextArrayOID = 1009
+ ByteaArrayOID = 1001
+ BPCharArrayOID = 1014
+ VarcharArrayOID = 1015
+ Int8ArrayOID = 1016
+ Float4ArrayOID = 1021
+ Float8ArrayOID = 1022
+ ACLItemOID = 1033
+ ACLItemArrayOID = 1034
+ InetArrayOID = 1041
+ BPCharOID = 1042
+ VarcharOID = 1043
+ DateOID = 1082
+ TimestampOID = 1114
+ TimestampArrayOID = 1115
+ DateArrayOID = 1182
+ TimestamptzOID = 1184
+ TimestamptzArrayOID = 1185
+ NumericOID = 1700
+ RecordOID = 2249
+ UUIDOID = 2950
+ UUIDArrayOID = 2951
+ JSONBOID = 3802
+)
+
+type Status byte
+
+const (
+ Undefined Status = iota
+ Null
+ Present
+)
+
+type InfinityModifier int8
+
+const (
+ Infinity InfinityModifier = 1
+ None InfinityModifier = 0
+ NegativeInfinity InfinityModifier = -Infinity
+)
+
+func (im InfinityModifier) String() string {
+ switch im {
+ case None:
+ return "none"
+ case Infinity:
+ return "infinity"
+ case NegativeInfinity:
+ return "-infinity"
+ default:
+ return "invalid"
+ }
+}
+
+type Value interface {
+ // Set converts and assigns src to itself.
+ Set(src interface{}) error
+
+ // Get returns the simplest representation of Value. If the Value is Null or
+ // Undefined that is the return value. If no simpler representation is
+ // possible, then Get() returns Value.
+ Get() interface{}
+
+ // AssignTo converts and assigns the Value to dst. It MUST make a deep copy of
+ // any reference types.
+ AssignTo(dst interface{}) error
+}
+
+type BinaryDecoder interface {
+ // DecodeBinary decodes src into BinaryDecoder. If src is nil then the
+ // original SQL value is NULL. BinaryDecoder takes ownership of src. The
+ // caller MUST not use it again.
+ DecodeBinary(ci *ConnInfo, src []byte) error
+}
+
+type TextDecoder interface {
+ // DecodeText decodes src into TextDecoder. If src is nil then the original
+ // SQL value is NULL. TextDecoder takes ownership of src. The caller MUST not
+ // use it again.
+ DecodeText(ci *ConnInfo, src []byte) error
+}
+
+// BinaryEncoder is implemented by types that can encode themselves into the
+// PostgreSQL binary wire format.
+type BinaryEncoder interface {
+ // EncodeBinary should append the binary format of self to buf. If self is the
+ // SQL value NULL then append nothing and return (nil, nil). The caller of
+ // EncodeBinary is responsible for writing the correct NULL value or the
+ // length of the data written.
+ EncodeBinary(ci *ConnInfo, buf []byte) (newBuf []byte, err error)
+}
+
+// TextEncoder is implemented by types that can encode themselves into the
+// PostgreSQL text wire format.
+type TextEncoder interface {
+ // EncodeText should append the text format of self to buf. If self is the
+ // SQL value NULL then append nothing and return (nil, nil). The caller of
+ // EncodeText is responsible for writing the correct NULL value or the
+ // length of the data written.
+ EncodeText(ci *ConnInfo, buf []byte) (newBuf []byte, err error)
+}
+
+var errUndefined = errors.New("cannot encode status undefined")
+var errBadStatus = errors.New("invalid status")
+
+type DataType struct {
+ Value Value
+ Name string
+ OID OID
+}
+
+type ConnInfo struct {
+ oidToDataType map[OID]*DataType
+ nameToDataType map[string]*DataType
+ reflectTypeToDataType map[reflect.Type]*DataType
+}
+
+func NewConnInfo() *ConnInfo {
+ return &ConnInfo{
+ oidToDataType: make(map[OID]*DataType, 256),
+ nameToDataType: make(map[string]*DataType, 256),
+ reflectTypeToDataType: make(map[reflect.Type]*DataType, 256),
+ }
+}
+
+func (ci *ConnInfo) InitializeDataTypes(nameOIDs map[string]OID) {
+ for name, oid := range nameOIDs {
+ var value Value
+ if t, ok := nameValues[name]; ok {
+ value = reflect.New(reflect.ValueOf(t).Elem().Type()).Interface().(Value)
+ } else {
+ value = &GenericText{}
+ }
+ ci.RegisterDataType(DataType{Value: value, Name: name, OID: oid})
+ }
+}
+
+func (ci *ConnInfo) RegisterDataType(t DataType) {
+ ci.oidToDataType[t.OID] = &t
+ ci.nameToDataType[t.Name] = &t
+ ci.reflectTypeToDataType[reflect.ValueOf(t.Value).Type()] = &t
+}
+
+func (ci *ConnInfo) DataTypeForOID(oid OID) (*DataType, bool) {
+ dt, ok := ci.oidToDataType[oid]
+ return dt, ok
+}
+
+func (ci *ConnInfo) DataTypeForName(name string) (*DataType, bool) {
+ dt, ok := ci.nameToDataType[name]
+ return dt, ok
+}
+
+func (ci *ConnInfo) DataTypeForValue(v Value) (*DataType, bool) {
+ dt, ok := ci.reflectTypeToDataType[reflect.ValueOf(v).Type()]
+ return dt, ok
+}
+
+// DeepCopy makes a deep copy of the ConnInfo.
+func (ci *ConnInfo) DeepCopy() *ConnInfo {
+ ci2 := &ConnInfo{
+ oidToDataType: make(map[OID]*DataType, len(ci.oidToDataType)),
+ nameToDataType: make(map[string]*DataType, len(ci.nameToDataType)),
+ reflectTypeToDataType: make(map[reflect.Type]*DataType, len(ci.reflectTypeToDataType)),
+ }
+
+ for _, dt := range ci.oidToDataType {
+ ci2.RegisterDataType(DataType{
+ Value: reflect.New(reflect.ValueOf(dt.Value).Elem().Type()).Interface().(Value),
+ Name: dt.Name,
+ OID: dt.OID,
+ })
+ }
+
+ return ci2
+}
+
+var nameValues map[string]Value
+
+func init() {
+ nameValues = map[string]Value{
+ "_aclitem": &ACLItemArray{},
+ "_bool": &BoolArray{},
+ "_bpchar": &BPCharArray{},
+ "_bytea": &ByteaArray{},
+ "_cidr": &CIDRArray{},
+ "_date": &DateArray{},
+ "_float4": &Float4Array{},
+ "_float8": &Float8Array{},
+ "_inet": &InetArray{},
+ "_int2": &Int2Array{},
+ "_int4": &Int4Array{},
+ "_int8": &Int8Array{},
+ "_numeric": &NumericArray{},
+ "_text": &TextArray{},
+ "_timestamp": &TimestampArray{},
+ "_timestamptz": &TimestamptzArray{},
+ "_uuid": &UUIDArray{},
+ "_varchar": &VarcharArray{},
+ "aclitem": &ACLItem{},
+ "bit": &Bit{},
+ "bool": &Bool{},
+ "box": &Box{},
+ "bpchar": &BPChar{},
+ "bytea": &Bytea{},
+ "char": &QChar{},
+ "cid": &CID{},
+ "cidr": &CIDR{},
+ "circle": &Circle{},
+ "date": &Date{},
+ "daterange": &Daterange{},
+ "decimal": &Decimal{},
+ "float4": &Float4{},
+ "float8": &Float8{},
+ "hstore": &Hstore{},
+ "inet": &Inet{},
+ "int2": &Int2{},
+ "int4": &Int4{},
+ "int4range": &Int4range{},
+ "int8": &Int8{},
+ "int8range": &Int8range{},
+ "interval": &Interval{},
+ "json": &JSON{},
+ "jsonb": &JSONB{},
+ "line": &Line{},
+ "lseg": &Lseg{},
+ "macaddr": &Macaddr{},
+ "name": &Name{},
+ "numeric": &Numeric{},
+ "numrange": &Numrange{},
+ "oid": &OIDValue{},
+ "path": &Path{},
+ "point": &Point{},
+ "polygon": &Polygon{},
+ "record": &Record{},
+ "text": &Text{},
+ "tid": &TID{},
+ "timestamp": &Timestamp{},
+ "timestamptz": &Timestamptz{},
+ "tsrange": &Tsrange{},
+ "tstzrange": &Tstzrange{},
+ "unknown": &Unknown{},
+ "uuid": &UUID{},
+ "varbit": &Varbit{},
+ "varchar": &Varchar{},
+ "xid": &XID{},
+ }
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/pguint32.go b/vendor/github.com/jackc/pgx/pgtype/pguint32.go
new file mode 100644
index 0000000..e441a69
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/pguint32.go
@@ -0,0 +1,162 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "math"
+ "strconv"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+// pguint32 is the core type that is used to implement PostgreSQL types such as
+// CID and XID.
+type pguint32 struct {
+ Uint uint32
+ Status Status
+}
+
+// Set converts from src to dst. Note that as pguint32 is not a general
+// number type Set does not do automatic type conversion as other number
+// types do.
+func (dst *pguint32) Set(src interface{}) error {
+ switch value := src.(type) {
+ case int64:
+ if value < 0 {
+ return errors.Errorf("%d is less than minimum value for pguint32", value)
+ }
+ if value > math.MaxUint32 {
+ return errors.Errorf("%d is greater than maximum value for pguint32", value)
+ }
+ *dst = pguint32{Uint: uint32(value), Status: Present}
+ case uint32:
+ *dst = pguint32{Uint: value, Status: Present}
+ default:
+ return errors.Errorf("cannot convert %v to pguint32", value)
+ }
+
+ return nil
+}
+
+func (dst *pguint32) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst.Uint
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+// AssignTo assigns from src to dst. Note that as pguint32 is not a general number
+// type AssignTo does not do automatic type conversion as other number types do.
+func (src *pguint32) AssignTo(dst interface{}) error {
+ switch v := dst.(type) {
+ case *uint32:
+ if src.Status == Present {
+ *v = src.Uint
+ } else {
+ return errors.Errorf("cannot assign %v into %T", src, dst)
+ }
+ case **uint32:
+ if src.Status == Present {
+ n := src.Uint
+ *v = &n
+ } else {
+ *v = nil
+ }
+ }
+
+ return nil
+}
+
+func (dst *pguint32) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = pguint32{Status: Null}
+ return nil
+ }
+
+ n, err := strconv.ParseUint(string(src), 10, 32)
+ if err != nil {
+ return err
+ }
+
+ *dst = pguint32{Uint: uint32(n), Status: Present}
+ return nil
+}
+
+func (dst *pguint32) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = pguint32{Status: Null}
+ return nil
+ }
+
+ if len(src) != 4 {
+ return errors.Errorf("invalid length: %v", len(src))
+ }
+
+ n := binary.BigEndian.Uint32(src)
+ *dst = pguint32{Uint: n, Status: Present}
+ return nil
+}
+
+func (src *pguint32) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ return append(buf, strconv.FormatUint(uint64(src.Uint), 10)...), nil
+}
+
+func (src *pguint32) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ return pgio.AppendUint32(buf, src.Uint), nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *pguint32) Scan(src interface{}) error {
+ if src == nil {
+ *dst = pguint32{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case uint32:
+ *dst = pguint32{Uint: src, Status: Present}
+ return nil
+ case int64:
+ *dst = pguint32{Uint: uint32(src), Status: Present}
+ return nil
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *pguint32) Value() (driver.Value, error) {
+ switch src.Status {
+ case Present:
+ return int64(src.Uint), nil
+ case Null:
+ return nil, nil
+ default:
+ return nil, errUndefined
+ }
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/point.go b/vendor/github.com/jackc/pgx/pgtype/point.go
new file mode 100644
index 0000000..3132a93
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/point.go
@@ -0,0 +1,139 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type Vec2 struct {
+ X float64
+ Y float64
+}
+
+type Point struct {
+ P Vec2
+ Status Status
+}
+
+func (dst *Point) Set(src interface{}) error {
+ return errors.Errorf("cannot convert %v to Point", src)
+}
+
+func (dst *Point) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Point) AssignTo(dst interface{}) error {
+ return errors.Errorf("cannot assign %v to %T", src, dst)
+}
+
+func (dst *Point) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Point{Status: Null}
+ return nil
+ }
+
+ if len(src) < 5 {
+ return errors.Errorf("invalid length for point: %v", len(src))
+ }
+
+ parts := strings.SplitN(string(src[1:len(src)-1]), ",", 2)
+ if len(parts) < 2 {
+ return errors.Errorf("invalid format for point")
+ }
+
+ x, err := strconv.ParseFloat(parts[0], 64)
+ if err != nil {
+ return err
+ }
+
+ y, err := strconv.ParseFloat(parts[1], 64)
+ if err != nil {
+ return err
+ }
+
+ *dst = Point{P: Vec2{x, y}, Status: Present}
+ return nil
+}
+
+func (dst *Point) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Point{Status: Null}
+ return nil
+ }
+
+ if len(src) != 16 {
+ return errors.Errorf("invalid length for point: %v", len(src))
+ }
+
+ x := binary.BigEndian.Uint64(src)
+ y := binary.BigEndian.Uint64(src[8:])
+
+ *dst = Point{
+ P: Vec2{math.Float64frombits(x), math.Float64frombits(y)},
+ Status: Present,
+ }
+ return nil
+}
+
+func (src *Point) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ return append(buf, fmt.Sprintf(`(%f,%f)`, src.P.X, src.P.Y)...), nil
+}
+
+func (src *Point) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ buf = pgio.AppendUint64(buf, math.Float64bits(src.P.X))
+ buf = pgio.AppendUint64(buf, math.Float64bits(src.P.Y))
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Point) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Point{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Point) Value() (driver.Value, error) {
+ return EncodeValueText(src)
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/polygon.go b/vendor/github.com/jackc/pgx/pgtype/polygon.go
new file mode 100644
index 0000000..3f3d9f5
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/polygon.go
@@ -0,0 +1,174 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type Polygon struct {
+ P []Vec2
+ Status Status
+}
+
+func (dst *Polygon) Set(src interface{}) error {
+ return errors.Errorf("cannot convert %v to Polygon", src)
+}
+
+func (dst *Polygon) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Polygon) AssignTo(dst interface{}) error {
+ return errors.Errorf("cannot assign %v to %T", src, dst)
+}
+
+func (dst *Polygon) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Polygon{Status: Null}
+ return nil
+ }
+
+ if len(src) < 7 {
+ return errors.Errorf("invalid length for Polygon: %v", len(src))
+ }
+
+ points := make([]Vec2, 0)
+
+ str := string(src[2:])
+
+ for {
+ end := strings.IndexByte(str, ',')
+ x, err := strconv.ParseFloat(str[:end], 64)
+ if err != nil {
+ return err
+ }
+
+ str = str[end+1:]
+ end = strings.IndexByte(str, ')')
+
+ y, err := strconv.ParseFloat(str[:end], 64)
+ if err != nil {
+ return err
+ }
+
+ points = append(points, Vec2{x, y})
+
+ if end+3 < len(str) {
+ str = str[end+3:]
+ } else {
+ break
+ }
+ }
+
+ *dst = Polygon{P: points, Status: Present}
+ return nil
+}
+
+func (dst *Polygon) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Polygon{Status: Null}
+ return nil
+ }
+
+ if len(src) < 5 {
+ return errors.Errorf("invalid length for Polygon: %v", len(src))
+ }
+
+ pointCount := int(binary.BigEndian.Uint32(src))
+ rp := 4
+
+ if 4+pointCount*16 != len(src) {
+ return errors.Errorf("invalid length for Polygon with %d points: %v", pointCount, len(src))
+ }
+
+ points := make([]Vec2, pointCount)
+ for i := 0; i < len(points); i++ {
+ x := binary.BigEndian.Uint64(src[rp:])
+ rp += 8
+ y := binary.BigEndian.Uint64(src[rp:])
+ rp += 8
+ points[i] = Vec2{math.Float64frombits(x), math.Float64frombits(y)}
+ }
+
+ *dst = Polygon{
+ P: points,
+ Status: Present,
+ }
+ return nil
+}
+
+func (src *Polygon) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ buf = append(buf, '(')
+
+ for i, p := range src.P {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+ buf = append(buf, fmt.Sprintf(`(%f,%f)`, p.X, p.Y)...)
+ }
+
+ return append(buf, ')'), nil
+}
+
+func (src *Polygon) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ buf = pgio.AppendInt32(buf, int32(len(src.P)))
+
+ for _, p := range src.P {
+ buf = pgio.AppendUint64(buf, math.Float64bits(p.X))
+ buf = pgio.AppendUint64(buf, math.Float64bits(p.Y))
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Polygon) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Polygon{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Polygon) Value() (driver.Value, error) {
+ return EncodeValueText(src)
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/qchar.go b/vendor/github.com/jackc/pgx/pgtype/qchar.go
new file mode 100644
index 0000000..064dab1
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/qchar.go
@@ -0,0 +1,146 @@
+package pgtype
+
+import (
+ "math"
+ "strconv"
+
+ "github.com/pkg/errors"
+)
+
+// QChar is for PostgreSQL's special 8-bit-only "char" type more akin to the C
+// language's char type, or Go's byte type. (Note that the name in PostgreSQL
+// itself is "char", in double-quotes, and not char.) It gets used a lot in
+// PostgreSQL's system tables to hold a single ASCII character value (eg
+// pg_class.relkind). It is named Qchar for quoted char to disambiguate from SQL
+// standard type char.
+//
+// Not all possible values of QChar are representable in the text format.
+// Therefore, QChar does not implement TextEncoder and TextDecoder. In
+// addition, database/sql Scanner and database/sql/driver Value are not
+// implemented.
+type QChar struct {
+ Int int8
+ Status Status
+}
+
+func (dst *QChar) Set(src interface{}) error {
+ if src == nil {
+ *dst = QChar{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+ case int8:
+ *dst = QChar{Int: value, Status: Present}
+ case uint8:
+ if value > math.MaxInt8 {
+ return errors.Errorf("%d is greater than maximum value for QChar", value)
+ }
+ *dst = QChar{Int: int8(value), Status: Present}
+ case int16:
+ if value < math.MinInt8 {
+ return errors.Errorf("%d is greater than maximum value for QChar", value)
+ }
+ if value > math.MaxInt8 {
+ return errors.Errorf("%d is greater than maximum value for QChar", value)
+ }
+ *dst = QChar{Int: int8(value), Status: Present}
+ case uint16:
+ if value > math.MaxInt8 {
+ return errors.Errorf("%d is greater than maximum value for QChar", value)
+ }
+ *dst = QChar{Int: int8(value), Status: Present}
+ case int32:
+ if value < math.MinInt8 {
+ return errors.Errorf("%d is greater than maximum value for QChar", value)
+ }
+ if value > math.MaxInt8 {
+ return errors.Errorf("%d is greater than maximum value for QChar", value)
+ }
+ *dst = QChar{Int: int8(value), Status: Present}
+ case uint32:
+ if value > math.MaxInt8 {
+ return errors.Errorf("%d is greater than maximum value for QChar", value)
+ }
+ *dst = QChar{Int: int8(value), Status: Present}
+ case int64:
+ if value < math.MinInt8 {
+ return errors.Errorf("%d is greater than maximum value for QChar", value)
+ }
+ if value > math.MaxInt8 {
+ return errors.Errorf("%d is greater than maximum value for QChar", value)
+ }
+ *dst = QChar{Int: int8(value), Status: Present}
+ case uint64:
+ if value > math.MaxInt8 {
+ return errors.Errorf("%d is greater than maximum value for QChar", value)
+ }
+ *dst = QChar{Int: int8(value), Status: Present}
+ case int:
+ if value < math.MinInt8 {
+ return errors.Errorf("%d is greater than maximum value for QChar", value)
+ }
+ if value > math.MaxInt8 {
+ return errors.Errorf("%d is greater than maximum value for QChar", value)
+ }
+ *dst = QChar{Int: int8(value), Status: Present}
+ case uint:
+ if value > math.MaxInt8 {
+ return errors.Errorf("%d is greater than maximum value for QChar", value)
+ }
+ *dst = QChar{Int: int8(value), Status: Present}
+ case string:
+ num, err := strconv.ParseInt(value, 10, 8)
+ if err != nil {
+ return err
+ }
+ *dst = QChar{Int: int8(num), Status: Present}
+ default:
+ if originalSrc, ok := underlyingNumberType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to QChar", value)
+ }
+
+ return nil
+}
+
+func (dst *QChar) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst.Int
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *QChar) AssignTo(dst interface{}) error {
+ return int64AssignTo(int64(src.Int), src.Status, dst)
+}
+
+func (dst *QChar) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = QChar{Status: Null}
+ return nil
+ }
+
+ if len(src) != 1 {
+ return errors.Errorf(`invalid length for "char": %v`, len(src))
+ }
+
+ *dst = QChar{Int: int8(src[0]), Status: Present}
+ return nil
+}
+
+func (src *QChar) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ return append(buf, byte(src.Int)), nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/range.go b/vendor/github.com/jackc/pgx/pgtype/range.go
new file mode 100644
index 0000000..54fc6ca
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/range.go
@@ -0,0 +1,278 @@
+package pgtype
+
+import (
+ "bytes"
+ "encoding/binary"
+
+ "github.com/pkg/errors"
+)
+
+type BoundType byte
+
+const (
+ Inclusive = BoundType('i')
+ Exclusive = BoundType('e')
+ Unbounded = BoundType('U')
+ Empty = BoundType('E')
+)
+
+func (bt BoundType) String() string {
+ return string(bt)
+}
+
+type UntypedTextRange struct {
+ Lower string
+ Upper string
+ LowerType BoundType
+ UpperType BoundType
+}
+
+func ParseUntypedTextRange(src string) (*UntypedTextRange, error) {
+ utr := &UntypedTextRange{}
+ if src == "empty" {
+ utr.LowerType = Empty
+ utr.UpperType = Empty
+ return utr, nil
+ }
+
+ buf := bytes.NewBufferString(src)
+
+ skipWhitespace(buf)
+
+ r, _, err := buf.ReadRune()
+ if err != nil {
+ return nil, errors.Errorf("invalid lower bound: %v", err)
+ }
+ switch r {
+ case '(':
+ utr.LowerType = Exclusive
+ case '[':
+ utr.LowerType = Inclusive
+ default:
+ return nil, errors.Errorf("missing lower bound, instead got: %v", string(r))
+ }
+
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return nil, errors.Errorf("invalid lower value: %v", err)
+ }
+ buf.UnreadRune()
+
+ if r == ',' {
+ utr.LowerType = Unbounded
+ } else {
+ utr.Lower, err = rangeParseValue(buf)
+ if err != nil {
+ return nil, errors.Errorf("invalid lower value: %v", err)
+ }
+ }
+
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return nil, errors.Errorf("missing range separator: %v", err)
+ }
+ if r != ',' {
+ return nil, errors.Errorf("missing range separator: %v", r)
+ }
+
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return nil, errors.Errorf("invalid upper value: %v", err)
+ }
+
+ if r == ')' || r == ']' {
+ utr.UpperType = Unbounded
+ } else {
+ buf.UnreadRune()
+ utr.Upper, err = rangeParseValue(buf)
+ if err != nil {
+ return nil, errors.Errorf("invalid upper value: %v", err)
+ }
+
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return nil, errors.Errorf("missing upper bound: %v", err)
+ }
+ switch r {
+ case ')':
+ utr.UpperType = Exclusive
+ case ']':
+ utr.UpperType = Inclusive
+ default:
+ return nil, errors.Errorf("missing upper bound, instead got: %v", string(r))
+ }
+ }
+
+ skipWhitespace(buf)
+
+ if buf.Len() > 0 {
+ return nil, errors.Errorf("unexpected trailing data: %v", buf.String())
+ }
+
+ return utr, nil
+}
+
+func rangeParseValue(buf *bytes.Buffer) (string, error) {
+ r, _, err := buf.ReadRune()
+ if err != nil {
+ return "", err
+ }
+ if r == '"' {
+ return rangeParseQuotedValue(buf)
+ }
+ buf.UnreadRune()
+
+ s := &bytes.Buffer{}
+
+ for {
+ r, _, err := buf.ReadRune()
+ if err != nil {
+ return "", err
+ }
+
+ switch r {
+ case '\\':
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return "", err
+ }
+ case ',', '[', ']', '(', ')':
+ buf.UnreadRune()
+ return s.String(), nil
+ }
+
+ s.WriteRune(r)
+ }
+}
+
+func rangeParseQuotedValue(buf *bytes.Buffer) (string, error) {
+ s := &bytes.Buffer{}
+
+ for {
+ r, _, err := buf.ReadRune()
+ if err != nil {
+ return "", err
+ }
+
+ switch r {
+ case '\\':
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return "", err
+ }
+ case '"':
+ r, _, err = buf.ReadRune()
+ if err != nil {
+ return "", err
+ }
+ if r != '"' {
+ buf.UnreadRune()
+ return s.String(), nil
+ }
+ }
+ s.WriteRune(r)
+ }
+}
+
+type UntypedBinaryRange struct {
+ Lower []byte
+ Upper []byte
+ LowerType BoundType
+ UpperType BoundType
+}
+
+// 0 = () = 00000
+// 1 = empty = 00001
+// 2 = [) = 00010
+// 4 = (] = 00100
+// 6 = [] = 00110
+// 8 = ) = 01000
+// 12 = ] = 01100
+// 16 = ( = 10000
+// 18 = [ = 10010
+// 24 = = 11000
+
+const emptyMask = 1
+const lowerInclusiveMask = 2
+const upperInclusiveMask = 4
+const lowerUnboundedMask = 8
+const upperUnboundedMask = 16
+
+func ParseUntypedBinaryRange(src []byte) (*UntypedBinaryRange, error) {
+ ubr := &UntypedBinaryRange{}
+
+ if len(src) == 0 {
+ return nil, errors.Errorf("range too short: %v", len(src))
+ }
+
+ rangeType := src[0]
+ rp := 1
+
+ if rangeType&emptyMask > 0 {
+ if len(src[rp:]) > 0 {
+ return nil, errors.Errorf("unexpected trailing bytes parsing empty range: %v", len(src[rp:]))
+ }
+ ubr.LowerType = Empty
+ ubr.UpperType = Empty
+ return ubr, nil
+ }
+
+ if rangeType&lowerInclusiveMask > 0 {
+ ubr.LowerType = Inclusive
+ } else if rangeType&lowerUnboundedMask > 0 {
+ ubr.LowerType = Unbounded
+ } else {
+ ubr.LowerType = Exclusive
+ }
+
+ if rangeType&upperInclusiveMask > 0 {
+ ubr.UpperType = Inclusive
+ } else if rangeType&upperUnboundedMask > 0 {
+ ubr.UpperType = Unbounded
+ } else {
+ ubr.UpperType = Exclusive
+ }
+
+ if ubr.LowerType == Unbounded && ubr.UpperType == Unbounded {
+ if len(src[rp:]) > 0 {
+ return nil, errors.Errorf("unexpected trailing bytes parsing unbounded range: %v", len(src[rp:]))
+ }
+ return ubr, nil
+ }
+
+ if len(src[rp:]) < 4 {
+ return nil, errors.Errorf("too few bytes for size: %v", src[rp:])
+ }
+ valueLen := int(binary.BigEndian.Uint32(src[rp:]))
+ rp += 4
+
+ val := src[rp : rp+valueLen]
+ rp += valueLen
+
+ if ubr.LowerType != Unbounded {
+ ubr.Lower = val
+ } else {
+ ubr.Upper = val
+ if len(src[rp:]) > 0 {
+ return nil, errors.Errorf("unexpected trailing bytes parsing range: %v", len(src[rp:]))
+ }
+ return ubr, nil
+ }
+
+ if ubr.UpperType != Unbounded {
+ if len(src[rp:]) < 4 {
+ return nil, errors.Errorf("too few bytes for size: %v", src[rp:])
+ }
+ valueLen := int(binary.BigEndian.Uint32(src[rp:]))
+ rp += 4
+ ubr.Upper = src[rp : rp+valueLen]
+ rp += valueLen
+ }
+
+ if len(src[rp:]) > 0 {
+ return nil, errors.Errorf("unexpected trailing bytes parsing range: %v", len(src[rp:]))
+ }
+
+ return ubr, nil
+
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/record.go b/vendor/github.com/jackc/pgx/pgtype/record.go
new file mode 100644
index 0000000..aeca1c5
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/record.go
@@ -0,0 +1,129 @@
+package pgtype
+
+import (
+ "encoding/binary"
+ "reflect"
+
+ "github.com/pkg/errors"
+)
+
+// Record is the generic PostgreSQL record type such as is created with the
+// "row" function. Record only implements BinaryEncoder and Value. The text
+// format output format from PostgreSQL does not include type information and is
+// therefore impossible to decode. No encoders are implemented because
+// PostgreSQL does not support input of generic records.
+type Record struct {
+ Fields []Value
+ Status Status
+}
+
+func (dst *Record) Set(src interface{}) error {
+ if src == nil {
+ *dst = Record{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+ case []Value:
+ *dst = Record{Fields: value, Status: Present}
+ default:
+ return errors.Errorf("cannot convert %v to Record", src)
+ }
+
+ return nil
+}
+
+func (dst *Record) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst.Fields
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Record) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+ case *[]Value:
+ *v = make([]Value, len(src.Fields))
+ copy(*v, src.Fields)
+ return nil
+ case *[]interface{}:
+ *v = make([]interface{}, len(src.Fields))
+ for i := range *v {
+ (*v)[i] = src.Fields[i].Get()
+ }
+ return nil
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *Record) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Record{Status: Null}
+ return nil
+ }
+
+ rp := 0
+
+ if len(src[rp:]) < 4 {
+ return errors.Errorf("Record incomplete %v", src)
+ }
+ fieldCount := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+
+ fields := make([]Value, fieldCount)
+
+ for i := 0; i < fieldCount; i++ {
+ if len(src[rp:]) < 8 {
+ return errors.Errorf("Record incomplete %v", src)
+ }
+ fieldOID := OID(binary.BigEndian.Uint32(src[rp:]))
+ rp += 4
+
+ fieldLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+
+ var binaryDecoder BinaryDecoder
+ if dt, ok := ci.DataTypeForOID(fieldOID); ok {
+ binaryDecoder, _ = dt.Value.(BinaryDecoder)
+ }
+ if binaryDecoder == nil {
+ return errors.Errorf("unknown oid while decoding record: %v", fieldOID)
+ }
+
+ var fieldBytes []byte
+ if fieldLen >= 0 {
+ if len(src[rp:]) < fieldLen {
+ return errors.Errorf("Record incomplete %v", src)
+ }
+ fieldBytes = src[rp : rp+fieldLen]
+ rp += fieldLen
+ }
+
+ // Duplicate struct to scan into
+ binaryDecoder = reflect.New(reflect.ValueOf(binaryDecoder).Elem().Type()).Interface().(BinaryDecoder)
+
+ if err := binaryDecoder.DecodeBinary(ci, fieldBytes); err != nil {
+ return err
+ }
+
+ fields[i] = binaryDecoder.(Value)
+ }
+
+ *dst = Record{Fields: fields, Status: Present}
+
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/text.go b/vendor/github.com/jackc/pgx/pgtype/text.go
new file mode 100644
index 0000000..bceeffd
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/text.go
@@ -0,0 +1,163 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/json"
+
+ "github.com/pkg/errors"
+)
+
+type Text struct {
+ String string
+ Status Status
+}
+
+func (dst *Text) Set(src interface{}) error {
+ if src == nil {
+ *dst = Text{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+ case string:
+ *dst = Text{String: value, Status: Present}
+ case *string:
+ if value == nil {
+ *dst = Text{Status: Null}
+ } else {
+ *dst = Text{String: *value, Status: Present}
+ }
+ case []byte:
+ if value == nil {
+ *dst = Text{Status: Null}
+ } else {
+ *dst = Text{String: string(value), Status: Present}
+ }
+ default:
+ if originalSrc, ok := underlyingStringType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to Text", value)
+ }
+
+ return nil
+}
+
+func (dst *Text) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst.String
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Text) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+ case *string:
+ *v = src.String
+ return nil
+ case *[]byte:
+ *v = make([]byte, len(src.String))
+ copy(*v, src.String)
+ return nil
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *Text) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Text{Status: Null}
+ return nil
+ }
+
+ *dst = Text{String: string(src), Status: Present}
+ return nil
+}
+
+func (dst *Text) DecodeBinary(ci *ConnInfo, src []byte) error {
+ return dst.DecodeText(ci, src)
+}
+
+func (src *Text) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ return append(buf, src.String...), nil
+}
+
+func (src *Text) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ return src.EncodeText(ci, buf)
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Text) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Text{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Text) Value() (driver.Value, error) {
+ switch src.Status {
+ case Present:
+ return src.String, nil
+ case Null:
+ return nil, nil
+ default:
+ return nil, errUndefined
+ }
+}
+
+func (src *Text) MarshalJSON() ([]byte, error) {
+ switch src.Status {
+ case Present:
+ return json.Marshal(src.String)
+ case Null:
+ return []byte("null"), nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ return nil, errBadStatus
+}
+
+func (dst *Text) UnmarshalJSON(b []byte) error {
+ var s string
+ err := json.Unmarshal(b, &s)
+ if err != nil {
+ return err
+ }
+
+ *dst = Text{String: s, Status: Present}
+
+ return nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/text_array.go b/vendor/github.com/jackc/pgx/pgtype/text_array.go
new file mode 100644
index 0000000..e40f4b8
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/text_array.go
@@ -0,0 +1,300 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type TextArray struct {
+ Elements []Text
+ Dimensions []ArrayDimension
+ Status Status
+}
+
+func (dst *TextArray) Set(src interface{}) error {
+ // untyped nil and typed nil interfaces are different
+ if src == nil {
+ *dst = TextArray{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+
+ case []string:
+ if value == nil {
+ *dst = TextArray{Status: Null}
+ } else if len(value) == 0 {
+ *dst = TextArray{Status: Present}
+ } else {
+ elements := make([]Text, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = TextArray{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ default:
+ if originalSrc, ok := underlyingSliceType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to TextArray", value)
+ }
+
+ return nil
+}
+
+func (dst *TextArray) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *TextArray) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+
+ case *[]string:
+ *v = make([]string, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *TextArray) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = TextArray{Status: Null}
+ return nil
+ }
+
+ uta, err := ParseUntypedTextArray(string(src))
+ if err != nil {
+ return err
+ }
+
+ var elements []Text
+
+ if len(uta.Elements) > 0 {
+ elements = make([]Text, len(uta.Elements))
+
+ for i, s := range uta.Elements {
+ var elem Text
+ var elemSrc []byte
+ if s != "NULL" {
+ elemSrc = []byte(s)
+ }
+ err = elem.DecodeText(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+
+ elements[i] = elem
+ }
+ }
+
+ *dst = TextArray{Elements: elements, Dimensions: uta.Dimensions, Status: Present}
+
+ return nil
+}
+
+func (dst *TextArray) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = TextArray{Status: Null}
+ return nil
+ }
+
+ var arrayHeader ArrayHeader
+ rp, err := arrayHeader.DecodeBinary(ci, src)
+ if err != nil {
+ return err
+ }
+
+ if len(arrayHeader.Dimensions) == 0 {
+ *dst = TextArray{Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+ }
+
+ elementCount := arrayHeader.Dimensions[0].Length
+ for _, d := range arrayHeader.Dimensions[1:] {
+ elementCount *= d.Length
+ }
+
+ elements := make([]Text, elementCount)
+
+ for i := range elements {
+ elemLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+ var elemSrc []byte
+ if elemLen >= 0 {
+ elemSrc = src[rp : rp+elemLen]
+ rp += elemLen
+ }
+ err = elements[i].DecodeBinary(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+ }
+
+ *dst = TextArray{Elements: elements, Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+}
+
+func (src *TextArray) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ if len(src.Dimensions) == 0 {
+ return append(buf, '{', '}'), nil
+ }
+
+ buf = EncodeTextArrayDimensions(buf, src.Dimensions)
+
+ // dimElemCounts is the multiples of elements that each array lies on. For
+ // example, a single dimension array of length 4 would have a dimElemCounts of
+ // [4]. A multi-dimensional array of lengths [3,5,2] would have a
+ // dimElemCounts of [30,10,2]. This is used to simplify when to render a '{'
+ // or '}'.
+ dimElemCounts := make([]int, len(src.Dimensions))
+ dimElemCounts[len(src.Dimensions)-1] = int(src.Dimensions[len(src.Dimensions)-1].Length)
+ for i := len(src.Dimensions) - 2; i > -1; i-- {
+ dimElemCounts[i] = int(src.Dimensions[i].Length) * dimElemCounts[i+1]
+ }
+
+ inElemBuf := make([]byte, 0, 32)
+ for i, elem := range src.Elements {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+
+ for _, dec := range dimElemCounts {
+ if i%dec == 0 {
+ buf = append(buf, '{')
+ }
+ }
+
+ elemBuf, err := elem.EncodeText(ci, inElemBuf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf == nil {
+ buf = append(buf, `"NULL"`...)
+ } else {
+ buf = append(buf, QuoteArrayElementIfNeeded(string(elemBuf))...)
+ }
+
+ for _, dec := range dimElemCounts {
+ if (i+1)%dec == 0 {
+ buf = append(buf, '}')
+ }
+ }
+ }
+
+ return buf, nil
+}
+
+func (src *TextArray) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ arrayHeader := ArrayHeader{
+ Dimensions: src.Dimensions,
+ }
+
+ if dt, ok := ci.DataTypeForName("text"); ok {
+ arrayHeader.ElementOID = int32(dt.OID)
+ } else {
+ return nil, errors.Errorf("unable to find oid for type name %v", "text")
+ }
+
+ for i := range src.Elements {
+ if src.Elements[i].Status == Null {
+ arrayHeader.ContainsNull = true
+ break
+ }
+ }
+
+ buf = arrayHeader.EncodeBinary(ci, buf)
+
+ for i := range src.Elements {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ elemBuf, err := src.Elements[i].EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf != nil {
+ buf = elemBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *TextArray) Scan(src interface{}) error {
+ if src == nil {
+ return dst.DecodeText(nil, nil)
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *TextArray) Value() (driver.Value, error) {
+ buf, err := src.EncodeText(nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, nil
+ }
+
+ return string(buf), nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/tid.go b/vendor/github.com/jackc/pgx/pgtype/tid.go
new file mode 100644
index 0000000..21852a1
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/tid.go
@@ -0,0 +1,144 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+// TID is PostgreSQL's Tuple Identifier type.
+//
+// When one does
+//
+// select ctid, * from some_table;
+//
+// it is the data type of the ctid hidden system column.
+//
+// It is currently implemented as a pair unsigned two byte integers.
+// Its conversion functions can be found in src/backend/utils/adt/tid.c
+// in the PostgreSQL sources.
+type TID struct {
+ BlockNumber uint32
+ OffsetNumber uint16
+ Status Status
+}
+
+func (dst *TID) Set(src interface{}) error {
+ return errors.Errorf("cannot convert %v to TID", src)
+}
+
+func (dst *TID) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *TID) AssignTo(dst interface{}) error {
+ return errors.Errorf("cannot assign %v to %T", src, dst)
+}
+
+func (dst *TID) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = TID{Status: Null}
+ return nil
+ }
+
+ if len(src) < 5 {
+ return errors.Errorf("invalid length for tid: %v", len(src))
+ }
+
+ parts := strings.SplitN(string(src[1:len(src)-1]), ",", 2)
+ if len(parts) < 2 {
+ return errors.Errorf("invalid format for tid")
+ }
+
+ blockNumber, err := strconv.ParseUint(parts[0], 10, 32)
+ if err != nil {
+ return err
+ }
+
+ offsetNumber, err := strconv.ParseUint(parts[1], 10, 16)
+ if err != nil {
+ return err
+ }
+
+ *dst = TID{BlockNumber: uint32(blockNumber), OffsetNumber: uint16(offsetNumber), Status: Present}
+ return nil
+}
+
+func (dst *TID) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = TID{Status: Null}
+ return nil
+ }
+
+ if len(src) != 6 {
+ return errors.Errorf("invalid length for tid: %v", len(src))
+ }
+
+ *dst = TID{
+ BlockNumber: binary.BigEndian.Uint32(src),
+ OffsetNumber: binary.BigEndian.Uint16(src[4:]),
+ Status: Present,
+ }
+ return nil
+}
+
+func (src *TID) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ buf = append(buf, fmt.Sprintf(`(%d,%d)`, src.BlockNumber, src.OffsetNumber)...)
+ return buf, nil
+}
+
+func (src *TID) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ buf = pgio.AppendUint32(buf, src.BlockNumber)
+ buf = pgio.AppendUint16(buf, src.OffsetNumber)
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *TID) Scan(src interface{}) error {
+ if src == nil {
+ *dst = TID{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *TID) Value() (driver.Value, error) {
+ return EncodeValueText(src)
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/timestamp.go b/vendor/github.com/jackc/pgx/pgtype/timestamp.go
new file mode 100644
index 0000000..d906f46
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/timestamp.go
@@ -0,0 +1,225 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "time"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+const pgTimestampFormat = "2006-01-02 15:04:05.999999999"
+
+// Timestamp represents the PostgreSQL timestamp type. The PostgreSQL
+// timestamp does not have a time zone. This presents a problem when
+// translating to and from time.Time which requires a time zone. It is highly
+// recommended to use timestamptz whenever possible. Timestamp methods either
+// convert to UTC or return an error on non-UTC times.
+type Timestamp struct {
+ Time time.Time // Time must always be in UTC.
+ Status Status
+ InfinityModifier InfinityModifier
+}
+
+// Set converts src into a Timestamp and stores in dst. If src is a
+// time.Time in a non-UTC time zone, the time zone is discarded.
+func (dst *Timestamp) Set(src interface{}) error {
+ if src == nil {
+ *dst = Timestamp{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+ case time.Time:
+ *dst = Timestamp{Time: time.Date(value.Year(), value.Month(), value.Day(), value.Hour(), value.Minute(), value.Second(), value.Nanosecond(), time.UTC), Status: Present}
+ default:
+ if originalSrc, ok := underlyingTimeType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to Timestamp", value)
+ }
+
+ return nil
+}
+
+func (dst *Timestamp) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ if dst.InfinityModifier != None {
+ return dst.InfinityModifier
+ }
+ return dst.Time
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Timestamp) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+ case *time.Time:
+ if src.InfinityModifier != None {
+ return errors.Errorf("cannot assign %v to %T", src, dst)
+ }
+ *v = src.Time
+ return nil
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+// DecodeText decodes from src into dst. The decoded time is considered to
+// be in UTC.
+func (dst *Timestamp) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Timestamp{Status: Null}
+ return nil
+ }
+
+ sbuf := string(src)
+ switch sbuf {
+ case "infinity":
+ *dst = Timestamp{Status: Present, InfinityModifier: Infinity}
+ case "-infinity":
+ *dst = Timestamp{Status: Present, InfinityModifier: -Infinity}
+ default:
+ tim, err := time.Parse(pgTimestampFormat, sbuf)
+ if err != nil {
+ return err
+ }
+
+ *dst = Timestamp{Time: tim, Status: Present}
+ }
+
+ return nil
+}
+
+// DecodeBinary decodes from src into dst. The decoded time is considered to
+// be in UTC.
+func (dst *Timestamp) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Timestamp{Status: Null}
+ return nil
+ }
+
+ if len(src) != 8 {
+ return errors.Errorf("invalid length for timestamp: %v", len(src))
+ }
+
+ microsecSinceY2K := int64(binary.BigEndian.Uint64(src))
+
+ switch microsecSinceY2K {
+ case infinityMicrosecondOffset:
+ *dst = Timestamp{Status: Present, InfinityModifier: Infinity}
+ case negativeInfinityMicrosecondOffset:
+ *dst = Timestamp{Status: Present, InfinityModifier: -Infinity}
+ default:
+ microsecSinceUnixEpoch := microsecFromUnixEpochToY2K + microsecSinceY2K
+ tim := time.Unix(microsecSinceUnixEpoch/1000000, (microsecSinceUnixEpoch%1000000)*1000).UTC()
+ *dst = Timestamp{Time: tim, Status: Present}
+ }
+
+ return nil
+}
+
+// EncodeText writes the text encoding of src into w. If src.Time is not in
+// the UTC time zone it returns an error.
+func (src *Timestamp) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+ if src.Time.Location() != time.UTC {
+ return nil, errors.Errorf("cannot encode non-UTC time into timestamp")
+ }
+
+ var s string
+
+ switch src.InfinityModifier {
+ case None:
+ s = src.Time.Format(pgTimestampFormat)
+ case Infinity:
+ s = "infinity"
+ case NegativeInfinity:
+ s = "-infinity"
+ }
+
+ return append(buf, s...), nil
+}
+
+// EncodeBinary writes the binary encoding of src into w. If src.Time is not in
+// the UTC time zone it returns an error.
+func (src *Timestamp) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+ if src.Time.Location() != time.UTC {
+ return nil, errors.Errorf("cannot encode non-UTC time into timestamp")
+ }
+
+ var microsecSinceY2K int64
+ switch src.InfinityModifier {
+ case None:
+ microsecSinceUnixEpoch := src.Time.Unix()*1000000 + int64(src.Time.Nanosecond())/1000
+ microsecSinceY2K = microsecSinceUnixEpoch - microsecFromUnixEpochToY2K
+ case Infinity:
+ microsecSinceY2K = infinityMicrosecondOffset
+ case NegativeInfinity:
+ microsecSinceY2K = negativeInfinityMicrosecondOffset
+ }
+
+ return pgio.AppendInt64(buf, microsecSinceY2K), nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Timestamp) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Timestamp{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ case time.Time:
+ *dst = Timestamp{Time: src, Status: Present}
+ return nil
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Timestamp) Value() (driver.Value, error) {
+ switch src.Status {
+ case Present:
+ if src.InfinityModifier != None {
+ return src.InfinityModifier.String(), nil
+ }
+ return src.Time, nil
+ case Null:
+ return nil, nil
+ default:
+ return nil, errUndefined
+ }
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/timestamp_array.go b/vendor/github.com/jackc/pgx/pgtype/timestamp_array.go
new file mode 100644
index 0000000..546a381
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/timestamp_array.go
@@ -0,0 +1,301 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "time"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type TimestampArray struct {
+ Elements []Timestamp
+ Dimensions []ArrayDimension
+ Status Status
+}
+
+func (dst *TimestampArray) Set(src interface{}) error {
+ // untyped nil and typed nil interfaces are different
+ if src == nil {
+ *dst = TimestampArray{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+
+ case []time.Time:
+ if value == nil {
+ *dst = TimestampArray{Status: Null}
+ } else if len(value) == 0 {
+ *dst = TimestampArray{Status: Present}
+ } else {
+ elements := make([]Timestamp, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = TimestampArray{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ default:
+ if originalSrc, ok := underlyingSliceType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to TimestampArray", value)
+ }
+
+ return nil
+}
+
+func (dst *TimestampArray) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *TimestampArray) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+
+ case *[]time.Time:
+ *v = make([]time.Time, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *TimestampArray) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = TimestampArray{Status: Null}
+ return nil
+ }
+
+ uta, err := ParseUntypedTextArray(string(src))
+ if err != nil {
+ return err
+ }
+
+ var elements []Timestamp
+
+ if len(uta.Elements) > 0 {
+ elements = make([]Timestamp, len(uta.Elements))
+
+ for i, s := range uta.Elements {
+ var elem Timestamp
+ var elemSrc []byte
+ if s != "NULL" {
+ elemSrc = []byte(s)
+ }
+ err = elem.DecodeText(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+
+ elements[i] = elem
+ }
+ }
+
+ *dst = TimestampArray{Elements: elements, Dimensions: uta.Dimensions, Status: Present}
+
+ return nil
+}
+
+func (dst *TimestampArray) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = TimestampArray{Status: Null}
+ return nil
+ }
+
+ var arrayHeader ArrayHeader
+ rp, err := arrayHeader.DecodeBinary(ci, src)
+ if err != nil {
+ return err
+ }
+
+ if len(arrayHeader.Dimensions) == 0 {
+ *dst = TimestampArray{Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+ }
+
+ elementCount := arrayHeader.Dimensions[0].Length
+ for _, d := range arrayHeader.Dimensions[1:] {
+ elementCount *= d.Length
+ }
+
+ elements := make([]Timestamp, elementCount)
+
+ for i := range elements {
+ elemLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+ var elemSrc []byte
+ if elemLen >= 0 {
+ elemSrc = src[rp : rp+elemLen]
+ rp += elemLen
+ }
+ err = elements[i].DecodeBinary(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+ }
+
+ *dst = TimestampArray{Elements: elements, Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+}
+
+func (src *TimestampArray) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ if len(src.Dimensions) == 0 {
+ return append(buf, '{', '}'), nil
+ }
+
+ buf = EncodeTextArrayDimensions(buf, src.Dimensions)
+
+ // dimElemCounts is the multiples of elements that each array lies on. For
+ // example, a single dimension array of length 4 would have a dimElemCounts of
+ // [4]. A multi-dimensional array of lengths [3,5,2] would have a
+ // dimElemCounts of [30,10,2]. This is used to simplify when to render a '{'
+ // or '}'.
+ dimElemCounts := make([]int, len(src.Dimensions))
+ dimElemCounts[len(src.Dimensions)-1] = int(src.Dimensions[len(src.Dimensions)-1].Length)
+ for i := len(src.Dimensions) - 2; i > -1; i-- {
+ dimElemCounts[i] = int(src.Dimensions[i].Length) * dimElemCounts[i+1]
+ }
+
+ inElemBuf := make([]byte, 0, 32)
+ for i, elem := range src.Elements {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+
+ for _, dec := range dimElemCounts {
+ if i%dec == 0 {
+ buf = append(buf, '{')
+ }
+ }
+
+ elemBuf, err := elem.EncodeText(ci, inElemBuf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf == nil {
+ buf = append(buf, `NULL`...)
+ } else {
+ buf = append(buf, QuoteArrayElementIfNeeded(string(elemBuf))...)
+ }
+
+ for _, dec := range dimElemCounts {
+ if (i+1)%dec == 0 {
+ buf = append(buf, '}')
+ }
+ }
+ }
+
+ return buf, nil
+}
+
+func (src *TimestampArray) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ arrayHeader := ArrayHeader{
+ Dimensions: src.Dimensions,
+ }
+
+ if dt, ok := ci.DataTypeForName("timestamp"); ok {
+ arrayHeader.ElementOID = int32(dt.OID)
+ } else {
+ return nil, errors.Errorf("unable to find oid for type name %v", "timestamp")
+ }
+
+ for i := range src.Elements {
+ if src.Elements[i].Status == Null {
+ arrayHeader.ContainsNull = true
+ break
+ }
+ }
+
+ buf = arrayHeader.EncodeBinary(ci, buf)
+
+ for i := range src.Elements {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ elemBuf, err := src.Elements[i].EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf != nil {
+ buf = elemBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *TimestampArray) Scan(src interface{}) error {
+ if src == nil {
+ return dst.DecodeText(nil, nil)
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *TimestampArray) Value() (driver.Value, error) {
+ buf, err := src.EncodeText(nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, nil
+ }
+
+ return string(buf), nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/timestamptz.go b/vendor/github.com/jackc/pgx/pgtype/timestamptz.go
new file mode 100644
index 0000000..74fe495
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/timestamptz.go
@@ -0,0 +1,221 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "time"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+const pgTimestamptzHourFormat = "2006-01-02 15:04:05.999999999Z07"
+const pgTimestamptzMinuteFormat = "2006-01-02 15:04:05.999999999Z07:00"
+const pgTimestamptzSecondFormat = "2006-01-02 15:04:05.999999999Z07:00:00"
+const microsecFromUnixEpochToY2K = 946684800 * 1000000
+
+const (
+ negativeInfinityMicrosecondOffset = -9223372036854775808
+ infinityMicrosecondOffset = 9223372036854775807
+)
+
+type Timestamptz struct {
+ Time time.Time
+ Status Status
+ InfinityModifier InfinityModifier
+}
+
+func (dst *Timestamptz) Set(src interface{}) error {
+ if src == nil {
+ *dst = Timestamptz{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+ case time.Time:
+ *dst = Timestamptz{Time: value, Status: Present}
+ default:
+ if originalSrc, ok := underlyingTimeType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to Timestamptz", value)
+ }
+
+ return nil
+}
+
+func (dst *Timestamptz) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ if dst.InfinityModifier != None {
+ return dst.InfinityModifier
+ }
+ return dst.Time
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Timestamptz) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+ case *time.Time:
+ if src.InfinityModifier != None {
+ return errors.Errorf("cannot assign %v to %T", src, dst)
+ }
+ *v = src.Time
+ return nil
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *Timestamptz) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Timestamptz{Status: Null}
+ return nil
+ }
+
+ sbuf := string(src)
+ switch sbuf {
+ case "infinity":
+ *dst = Timestamptz{Status: Present, InfinityModifier: Infinity}
+ case "-infinity":
+ *dst = Timestamptz{Status: Present, InfinityModifier: -Infinity}
+ default:
+ var format string
+ if sbuf[len(sbuf)-9] == '-' || sbuf[len(sbuf)-9] == '+' {
+ format = pgTimestamptzSecondFormat
+ } else if sbuf[len(sbuf)-6] == '-' || sbuf[len(sbuf)-6] == '+' {
+ format = pgTimestamptzMinuteFormat
+ } else {
+ format = pgTimestamptzHourFormat
+ }
+
+ tim, err := time.Parse(format, sbuf)
+ if err != nil {
+ return err
+ }
+
+ *dst = Timestamptz{Time: tim, Status: Present}
+ }
+
+ return nil
+}
+
+func (dst *Timestamptz) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Timestamptz{Status: Null}
+ return nil
+ }
+
+ if len(src) != 8 {
+ return errors.Errorf("invalid length for timestamptz: %v", len(src))
+ }
+
+ microsecSinceY2K := int64(binary.BigEndian.Uint64(src))
+
+ switch microsecSinceY2K {
+ case infinityMicrosecondOffset:
+ *dst = Timestamptz{Status: Present, InfinityModifier: Infinity}
+ case negativeInfinityMicrosecondOffset:
+ *dst = Timestamptz{Status: Present, InfinityModifier: -Infinity}
+ default:
+ microsecSinceUnixEpoch := microsecFromUnixEpochToY2K + microsecSinceY2K
+ tim := time.Unix(microsecSinceUnixEpoch/1000000, (microsecSinceUnixEpoch%1000000)*1000)
+ *dst = Timestamptz{Time: tim, Status: Present}
+ }
+
+ return nil
+}
+
+func (src *Timestamptz) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ var s string
+
+ switch src.InfinityModifier {
+ case None:
+ s = src.Time.UTC().Format(pgTimestamptzSecondFormat)
+ case Infinity:
+ s = "infinity"
+ case NegativeInfinity:
+ s = "-infinity"
+ }
+
+ return append(buf, s...), nil
+}
+
+func (src *Timestamptz) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ var microsecSinceY2K int64
+ switch src.InfinityModifier {
+ case None:
+ microsecSinceUnixEpoch := src.Time.Unix()*1000000 + int64(src.Time.Nanosecond())/1000
+ microsecSinceY2K = microsecSinceUnixEpoch - microsecFromUnixEpochToY2K
+ case Infinity:
+ microsecSinceY2K = infinityMicrosecondOffset
+ case NegativeInfinity:
+ microsecSinceY2K = negativeInfinityMicrosecondOffset
+ }
+
+ return pgio.AppendInt64(buf, microsecSinceY2K), nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Timestamptz) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Timestamptz{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ case time.Time:
+ *dst = Timestamptz{Time: src, Status: Present}
+ return nil
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Timestamptz) Value() (driver.Value, error) {
+ switch src.Status {
+ case Present:
+ if src.InfinityModifier != None {
+ return src.InfinityModifier.String(), nil
+ }
+ return src.Time, nil
+ case Null:
+ return nil, nil
+ default:
+ return nil, errUndefined
+ }
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/timestamptz_array.go b/vendor/github.com/jackc/pgx/pgtype/timestamptz_array.go
new file mode 100644
index 0000000..88b6cc5
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/timestamptz_array.go
@@ -0,0 +1,301 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "time"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type TimestamptzArray struct {
+ Elements []Timestamptz
+ Dimensions []ArrayDimension
+ Status Status
+}
+
+func (dst *TimestamptzArray) Set(src interface{}) error {
+ // untyped nil and typed nil interfaces are different
+ if src == nil {
+ *dst = TimestamptzArray{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+
+ case []time.Time:
+ if value == nil {
+ *dst = TimestamptzArray{Status: Null}
+ } else if len(value) == 0 {
+ *dst = TimestamptzArray{Status: Present}
+ } else {
+ elements := make([]Timestamptz, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = TimestamptzArray{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ default:
+ if originalSrc, ok := underlyingSliceType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to TimestamptzArray", value)
+ }
+
+ return nil
+}
+
+func (dst *TimestamptzArray) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *TimestamptzArray) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+
+ case *[]time.Time:
+ *v = make([]time.Time, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *TimestamptzArray) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = TimestamptzArray{Status: Null}
+ return nil
+ }
+
+ uta, err := ParseUntypedTextArray(string(src))
+ if err != nil {
+ return err
+ }
+
+ var elements []Timestamptz
+
+ if len(uta.Elements) > 0 {
+ elements = make([]Timestamptz, len(uta.Elements))
+
+ for i, s := range uta.Elements {
+ var elem Timestamptz
+ var elemSrc []byte
+ if s != "NULL" {
+ elemSrc = []byte(s)
+ }
+ err = elem.DecodeText(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+
+ elements[i] = elem
+ }
+ }
+
+ *dst = TimestamptzArray{Elements: elements, Dimensions: uta.Dimensions, Status: Present}
+
+ return nil
+}
+
+func (dst *TimestamptzArray) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = TimestamptzArray{Status: Null}
+ return nil
+ }
+
+ var arrayHeader ArrayHeader
+ rp, err := arrayHeader.DecodeBinary(ci, src)
+ if err != nil {
+ return err
+ }
+
+ if len(arrayHeader.Dimensions) == 0 {
+ *dst = TimestamptzArray{Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+ }
+
+ elementCount := arrayHeader.Dimensions[0].Length
+ for _, d := range arrayHeader.Dimensions[1:] {
+ elementCount *= d.Length
+ }
+
+ elements := make([]Timestamptz, elementCount)
+
+ for i := range elements {
+ elemLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+ var elemSrc []byte
+ if elemLen >= 0 {
+ elemSrc = src[rp : rp+elemLen]
+ rp += elemLen
+ }
+ err = elements[i].DecodeBinary(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+ }
+
+ *dst = TimestamptzArray{Elements: elements, Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+}
+
+func (src *TimestamptzArray) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ if len(src.Dimensions) == 0 {
+ return append(buf, '{', '}'), nil
+ }
+
+ buf = EncodeTextArrayDimensions(buf, src.Dimensions)
+
+ // dimElemCounts is the multiples of elements that each array lies on. For
+ // example, a single dimension array of length 4 would have a dimElemCounts of
+ // [4]. A multi-dimensional array of lengths [3,5,2] would have a
+ // dimElemCounts of [30,10,2]. This is used to simplify when to render a '{'
+ // or '}'.
+ dimElemCounts := make([]int, len(src.Dimensions))
+ dimElemCounts[len(src.Dimensions)-1] = int(src.Dimensions[len(src.Dimensions)-1].Length)
+ for i := len(src.Dimensions) - 2; i > -1; i-- {
+ dimElemCounts[i] = int(src.Dimensions[i].Length) * dimElemCounts[i+1]
+ }
+
+ inElemBuf := make([]byte, 0, 32)
+ for i, elem := range src.Elements {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+
+ for _, dec := range dimElemCounts {
+ if i%dec == 0 {
+ buf = append(buf, '{')
+ }
+ }
+
+ elemBuf, err := elem.EncodeText(ci, inElemBuf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf == nil {
+ buf = append(buf, `NULL`...)
+ } else {
+ buf = append(buf, QuoteArrayElementIfNeeded(string(elemBuf))...)
+ }
+
+ for _, dec := range dimElemCounts {
+ if (i+1)%dec == 0 {
+ buf = append(buf, '}')
+ }
+ }
+ }
+
+ return buf, nil
+}
+
+func (src *TimestamptzArray) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ arrayHeader := ArrayHeader{
+ Dimensions: src.Dimensions,
+ }
+
+ if dt, ok := ci.DataTypeForName("timestamptz"); ok {
+ arrayHeader.ElementOID = int32(dt.OID)
+ } else {
+ return nil, errors.Errorf("unable to find oid for type name %v", "timestamptz")
+ }
+
+ for i := range src.Elements {
+ if src.Elements[i].Status == Null {
+ arrayHeader.ContainsNull = true
+ break
+ }
+ }
+
+ buf = arrayHeader.EncodeBinary(ci, buf)
+
+ for i := range src.Elements {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ elemBuf, err := src.Elements[i].EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf != nil {
+ buf = elemBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *TimestamptzArray) Scan(src interface{}) error {
+ if src == nil {
+ return dst.DecodeText(nil, nil)
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *TimestamptzArray) Value() (driver.Value, error) {
+ buf, err := src.EncodeText(nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, nil
+ }
+
+ return string(buf), nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/tsrange.go b/vendor/github.com/jackc/pgx/pgtype/tsrange.go
new file mode 100644
index 0000000..8a67d65
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/tsrange.go
@@ -0,0 +1,250 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type Tsrange struct {
+ Lower Timestamp
+ Upper Timestamp
+ LowerType BoundType
+ UpperType BoundType
+ Status Status
+}
+
+func (dst *Tsrange) Set(src interface{}) error {
+ return errors.Errorf("cannot convert %v to Tsrange", src)
+}
+
+func (dst *Tsrange) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Tsrange) AssignTo(dst interface{}) error {
+ return errors.Errorf("cannot assign %v to %T", src, dst)
+}
+
+func (dst *Tsrange) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Tsrange{Status: Null}
+ return nil
+ }
+
+ utr, err := ParseUntypedTextRange(string(src))
+ if err != nil {
+ return err
+ }
+
+ *dst = Tsrange{Status: Present}
+
+ dst.LowerType = utr.LowerType
+ dst.UpperType = utr.UpperType
+
+ if dst.LowerType == Empty {
+ return nil
+ }
+
+ if dst.LowerType == Inclusive || dst.LowerType == Exclusive {
+ if err := dst.Lower.DecodeText(ci, []byte(utr.Lower)); err != nil {
+ return err
+ }
+ }
+
+ if dst.UpperType == Inclusive || dst.UpperType == Exclusive {
+ if err := dst.Upper.DecodeText(ci, []byte(utr.Upper)); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (dst *Tsrange) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Tsrange{Status: Null}
+ return nil
+ }
+
+ ubr, err := ParseUntypedBinaryRange(src)
+ if err != nil {
+ return err
+ }
+
+ *dst = Tsrange{Status: Present}
+
+ dst.LowerType = ubr.LowerType
+ dst.UpperType = ubr.UpperType
+
+ if dst.LowerType == Empty {
+ return nil
+ }
+
+ if dst.LowerType == Inclusive || dst.LowerType == Exclusive {
+ if err := dst.Lower.DecodeBinary(ci, ubr.Lower); err != nil {
+ return err
+ }
+ }
+
+ if dst.UpperType == Inclusive || dst.UpperType == Exclusive {
+ if err := dst.Upper.DecodeBinary(ci, ubr.Upper); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (src Tsrange) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ switch src.LowerType {
+ case Exclusive, Unbounded:
+ buf = append(buf, '(')
+ case Inclusive:
+ buf = append(buf, '[')
+ case Empty:
+ return append(buf, "empty"...), nil
+ default:
+ return nil, errors.Errorf("unknown lower bound type %v", src.LowerType)
+ }
+
+ var err error
+
+ if src.LowerType != Unbounded {
+ buf, err = src.Lower.EncodeText(ci, buf)
+ if err != nil {
+ return nil, err
+ } else if buf == nil {
+ return nil, errors.Errorf("Lower cannot be null unless LowerType is Unbounded")
+ }
+ }
+
+ buf = append(buf, ',')
+
+ if src.UpperType != Unbounded {
+ buf, err = src.Upper.EncodeText(ci, buf)
+ if err != nil {
+ return nil, err
+ } else if buf == nil {
+ return nil, errors.Errorf("Upper cannot be null unless UpperType is Unbounded")
+ }
+ }
+
+ switch src.UpperType {
+ case Exclusive, Unbounded:
+ buf = append(buf, ')')
+ case Inclusive:
+ buf = append(buf, ']')
+ default:
+ return nil, errors.Errorf("unknown upper bound type %v", src.UpperType)
+ }
+
+ return buf, nil
+}
+
+func (src Tsrange) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ var rangeType byte
+ switch src.LowerType {
+ case Inclusive:
+ rangeType |= lowerInclusiveMask
+ case Unbounded:
+ rangeType |= lowerUnboundedMask
+ case Exclusive:
+ case Empty:
+ return append(buf, emptyMask), nil
+ default:
+ return nil, errors.Errorf("unknown LowerType: %v", src.LowerType)
+ }
+
+ switch src.UpperType {
+ case Inclusive:
+ rangeType |= upperInclusiveMask
+ case Unbounded:
+ rangeType |= upperUnboundedMask
+ case Exclusive:
+ default:
+ return nil, errors.Errorf("unknown UpperType: %v", src.UpperType)
+ }
+
+ buf = append(buf, rangeType)
+
+ var err error
+
+ if src.LowerType != Unbounded {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ buf, err = src.Lower.EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, errors.Errorf("Lower cannot be null unless LowerType is Unbounded")
+ }
+
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+
+ if src.UpperType != Unbounded {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ buf, err = src.Upper.EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, errors.Errorf("Upper cannot be null unless UpperType is Unbounded")
+ }
+
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Tsrange) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Tsrange{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src Tsrange) Value() (driver.Value, error) {
+ return EncodeValueText(src)
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/tstzrange.go b/vendor/github.com/jackc/pgx/pgtype/tstzrange.go
new file mode 100644
index 0000000..b512909
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/tstzrange.go
@@ -0,0 +1,250 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type Tstzrange struct {
+ Lower Timestamptz
+ Upper Timestamptz
+ LowerType BoundType
+ UpperType BoundType
+ Status Status
+}
+
+func (dst *Tstzrange) Set(src interface{}) error {
+ return errors.Errorf("cannot convert %v to Tstzrange", src)
+}
+
+func (dst *Tstzrange) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Tstzrange) AssignTo(dst interface{}) error {
+ return errors.Errorf("cannot assign %v to %T", src, dst)
+}
+
+func (dst *Tstzrange) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Tstzrange{Status: Null}
+ return nil
+ }
+
+ utr, err := ParseUntypedTextRange(string(src))
+ if err != nil {
+ return err
+ }
+
+ *dst = Tstzrange{Status: Present}
+
+ dst.LowerType = utr.LowerType
+ dst.UpperType = utr.UpperType
+
+ if dst.LowerType == Empty {
+ return nil
+ }
+
+ if dst.LowerType == Inclusive || dst.LowerType == Exclusive {
+ if err := dst.Lower.DecodeText(ci, []byte(utr.Lower)); err != nil {
+ return err
+ }
+ }
+
+ if dst.UpperType == Inclusive || dst.UpperType == Exclusive {
+ if err := dst.Upper.DecodeText(ci, []byte(utr.Upper)); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (dst *Tstzrange) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Tstzrange{Status: Null}
+ return nil
+ }
+
+ ubr, err := ParseUntypedBinaryRange(src)
+ if err != nil {
+ return err
+ }
+
+ *dst = Tstzrange{Status: Present}
+
+ dst.LowerType = ubr.LowerType
+ dst.UpperType = ubr.UpperType
+
+ if dst.LowerType == Empty {
+ return nil
+ }
+
+ if dst.LowerType == Inclusive || dst.LowerType == Exclusive {
+ if err := dst.Lower.DecodeBinary(ci, ubr.Lower); err != nil {
+ return err
+ }
+ }
+
+ if dst.UpperType == Inclusive || dst.UpperType == Exclusive {
+ if err := dst.Upper.DecodeBinary(ci, ubr.Upper); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (src Tstzrange) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ switch src.LowerType {
+ case Exclusive, Unbounded:
+ buf = append(buf, '(')
+ case Inclusive:
+ buf = append(buf, '[')
+ case Empty:
+ return append(buf, "empty"...), nil
+ default:
+ return nil, errors.Errorf("unknown lower bound type %v", src.LowerType)
+ }
+
+ var err error
+
+ if src.LowerType != Unbounded {
+ buf, err = src.Lower.EncodeText(ci, buf)
+ if err != nil {
+ return nil, err
+ } else if buf == nil {
+ return nil, errors.Errorf("Lower cannot be null unless LowerType is Unbounded")
+ }
+ }
+
+ buf = append(buf, ',')
+
+ if src.UpperType != Unbounded {
+ buf, err = src.Upper.EncodeText(ci, buf)
+ if err != nil {
+ return nil, err
+ } else if buf == nil {
+ return nil, errors.Errorf("Upper cannot be null unless UpperType is Unbounded")
+ }
+ }
+
+ switch src.UpperType {
+ case Exclusive, Unbounded:
+ buf = append(buf, ')')
+ case Inclusive:
+ buf = append(buf, ']')
+ default:
+ return nil, errors.Errorf("unknown upper bound type %v", src.UpperType)
+ }
+
+ return buf, nil
+}
+
+func (src Tstzrange) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ var rangeType byte
+ switch src.LowerType {
+ case Inclusive:
+ rangeType |= lowerInclusiveMask
+ case Unbounded:
+ rangeType |= lowerUnboundedMask
+ case Exclusive:
+ case Empty:
+ return append(buf, emptyMask), nil
+ default:
+ return nil, errors.Errorf("unknown LowerType: %v", src.LowerType)
+ }
+
+ switch src.UpperType {
+ case Inclusive:
+ rangeType |= upperInclusiveMask
+ case Unbounded:
+ rangeType |= upperUnboundedMask
+ case Exclusive:
+ default:
+ return nil, errors.Errorf("unknown UpperType: %v", src.UpperType)
+ }
+
+ buf = append(buf, rangeType)
+
+ var err error
+
+ if src.LowerType != Unbounded {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ buf, err = src.Lower.EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, errors.Errorf("Lower cannot be null unless LowerType is Unbounded")
+ }
+
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+
+ if src.UpperType != Unbounded {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ buf, err = src.Upper.EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, errors.Errorf("Upper cannot be null unless UpperType is Unbounded")
+ }
+
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Tstzrange) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Tstzrange{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src Tstzrange) Value() (driver.Value, error) {
+ return EncodeValueText(src)
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/typed_array.go.erb b/vendor/github.com/jackc/pgx/pgtype/typed_array.go.erb
new file mode 100644
index 0000000..6fafc2d
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/typed_array.go.erb
@@ -0,0 +1,304 @@
+package pgtype
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+
+ "github.com/jackc/pgx/pgio"
+)
+
+type <%= pgtype_array_type %> struct {
+ Elements []<%= pgtype_element_type %>
+ Dimensions []ArrayDimension
+ Status Status
+}
+
+func (dst *<%= pgtype_array_type %>) Set(src interface{}) error {
+ // untyped nil and typed nil interfaces are different
+ if src == nil {
+ *dst = <%= pgtype_array_type %>{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+ <% go_array_types.split(",").each do |t| %>
+ case <%= t %>:
+ if value == nil {
+ *dst = <%= pgtype_array_type %>{Status: Null}
+ } else if len(value) == 0 {
+ *dst = <%= pgtype_array_type %>{Status: Present}
+ } else {
+ elements := make([]<%= pgtype_element_type %>, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = <%= pgtype_array_type %>{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+ <% end %>
+ default:
+ if originalSrc, ok := underlyingSliceType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to <%= pgtype_array_type %>", value)
+ }
+
+ return nil
+}
+
+func (dst *<%= pgtype_array_type %>) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *<%= pgtype_array_type %>) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+ <% go_array_types.split(",").each do |t| %>
+ case *<%= t %>:
+ *v = make(<%= t %>, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+ <% end %>
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *<%= pgtype_array_type %>) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = <%= pgtype_array_type %>{Status: Null}
+ return nil
+ }
+
+ uta, err := ParseUntypedTextArray(string(src))
+ if err != nil {
+ return err
+ }
+
+ var elements []<%= pgtype_element_type %>
+
+ if len(uta.Elements) > 0 {
+ elements = make([]<%= pgtype_element_type %>, len(uta.Elements))
+
+ for i, s := range uta.Elements {
+ var elem <%= pgtype_element_type %>
+ var elemSrc []byte
+ if s != "NULL" {
+ elemSrc = []byte(s)
+ }
+ err = elem.DecodeText(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+
+ elements[i] = elem
+ }
+ }
+
+ *dst = <%= pgtype_array_type %>{Elements: elements, Dimensions: uta.Dimensions, Status: Present}
+
+ return nil
+}
+
+<% if binary_format == "true" %>
+func (dst *<%= pgtype_array_type %>) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = <%= pgtype_array_type %>{Status: Null}
+ return nil
+ }
+
+ var arrayHeader ArrayHeader
+ rp, err := arrayHeader.DecodeBinary(ci, src)
+ if err != nil {
+ return err
+ }
+
+ if len(arrayHeader.Dimensions) == 0 {
+ *dst = <%= pgtype_array_type %>{Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+ }
+
+ elementCount := arrayHeader.Dimensions[0].Length
+ for _, d := range arrayHeader.Dimensions[1:] {
+ elementCount *= d.Length
+ }
+
+ elements := make([]<%= pgtype_element_type %>, elementCount)
+
+ for i := range elements {
+ elemLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+ var elemSrc []byte
+ if elemLen >= 0 {
+ elemSrc = src[rp:rp+elemLen]
+ rp += elemLen
+ }
+ err = elements[i].DecodeBinary(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+ }
+
+ *dst = <%= pgtype_array_type %>{Elements: elements, Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+}
+<% end %>
+
+func (src *<%= pgtype_array_type %>) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ if len(src.Dimensions) == 0 {
+ return append(buf, '{', '}'), nil
+ }
+
+ buf = EncodeTextArrayDimensions(buf, src.Dimensions)
+
+ // dimElemCounts is the multiples of elements that each array lies on. For
+ // example, a single dimension array of length 4 would have a dimElemCounts of
+ // [4]. A multi-dimensional array of lengths [3,5,2] would have a
+ // dimElemCounts of [30,10,2]. This is used to simplify when to render a '{'
+ // or '}'.
+ dimElemCounts := make([]int, len(src.Dimensions))
+ dimElemCounts[len(src.Dimensions)-1] = int(src.Dimensions[len(src.Dimensions)-1].Length)
+ for i := len(src.Dimensions) - 2; i > -1; i-- {
+ dimElemCounts[i] = int(src.Dimensions[i].Length) * dimElemCounts[i+1]
+ }
+
+ inElemBuf := make([]byte, 0, 32)
+ for i, elem := range src.Elements {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+
+ for _, dec := range dimElemCounts {
+ if i%dec == 0 {
+ buf = append(buf, '{')
+ }
+ }
+
+ elemBuf, err := elem.EncodeText(ci, inElemBuf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf == nil {
+ buf = append(buf, `<%= text_null %>`...)
+ } else {
+ buf = append(buf, QuoteArrayElementIfNeeded(string(elemBuf))...)
+ }
+
+ for _, dec := range dimElemCounts {
+ if (i+1)%dec == 0 {
+ buf = append(buf, '}')
+ }
+ }
+ }
+
+ return buf, nil
+}
+
+<% if binary_format == "true" %>
+ func (src *<%= pgtype_array_type %>) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ arrayHeader := ArrayHeader{
+ Dimensions: src.Dimensions,
+ }
+
+ if dt, ok := ci.DataTypeForName("<%= element_type_name %>"); ok {
+ arrayHeader.ElementOID = int32(dt.OID)
+ } else {
+ return nil, errors.Errorf("unable to find oid for type name %v", "<%= element_type_name %>")
+ }
+
+ for i := range src.Elements {
+ if src.Elements[i].Status == Null {
+ arrayHeader.ContainsNull = true
+ break
+ }
+ }
+
+ buf = arrayHeader.EncodeBinary(ci, buf)
+
+ for i := range src.Elements {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ elemBuf, err := src.Elements[i].EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf != nil {
+ buf = elemBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+ }
+
+ return buf, nil
+ }
+<% end %>
+
+// Scan implements the database/sql Scanner interface.
+func (dst *<%= pgtype_array_type %>) Scan(src interface{}) error {
+ if src == nil {
+ return dst.DecodeText(nil, nil)
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *<%= pgtype_array_type %>) Value() (driver.Value, error) {
+ buf, err := src.EncodeText(nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, nil
+ }
+
+ return string(buf), nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/typed_array_gen.sh b/vendor/github.com/jackc/pgx/pgtype/typed_array_gen.sh
new file mode 100644
index 0000000..4a8211b
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/typed_array_gen.sh
@@ -0,0 +1,24 @@
+erb pgtype_array_type=Int2Array pgtype_element_type=Int2 go_array_types=[]int16,[]uint16 element_type_name=int2 text_null=NULL binary_format=true typed_array.go.erb > int2_array.go
+erb pgtype_array_type=Int4Array pgtype_element_type=Int4 go_array_types=[]int32,[]uint32 element_type_name=int4 text_null=NULL binary_format=true typed_array.go.erb > int4_array.go
+erb pgtype_array_type=Int8Array pgtype_element_type=Int8 go_array_types=[]int64,[]uint64 element_type_name=int8 text_null=NULL binary_format=true typed_array.go.erb > int8_array.go
+erb pgtype_array_type=BoolArray pgtype_element_type=Bool go_array_types=[]bool element_type_name=bool text_null=NULL binary_format=true typed_array.go.erb > bool_array.go
+erb pgtype_array_type=DateArray pgtype_element_type=Date go_array_types=[]time.Time element_type_name=date text_null=NULL binary_format=true typed_array.go.erb > date_array.go
+erb pgtype_array_type=TimestamptzArray pgtype_element_type=Timestamptz go_array_types=[]time.Time element_type_name=timestamptz text_null=NULL binary_format=true typed_array.go.erb > timestamptz_array.go
+erb pgtype_array_type=TimestampArray pgtype_element_type=Timestamp go_array_types=[]time.Time element_type_name=timestamp text_null=NULL binary_format=true typed_array.go.erb > timestamp_array.go
+erb pgtype_array_type=Float4Array pgtype_element_type=Float4 go_array_types=[]float32 element_type_name=float4 text_null=NULL binary_format=true typed_array.go.erb > float4_array.go
+erb pgtype_array_type=Float8Array pgtype_element_type=Float8 go_array_types=[]float64 element_type_name=float8 text_null=NULL binary_format=true typed_array.go.erb > float8_array.go
+erb pgtype_array_type=InetArray pgtype_element_type=Inet go_array_types=[]*net.IPNet,[]net.IP element_type_name=inet text_null=NULL binary_format=true typed_array.go.erb > inet_array.go
+erb pgtype_array_type=CIDRArray pgtype_element_type=CIDR go_array_types=[]*net.IPNet,[]net.IP element_type_name=cidr text_null=NULL binary_format=true typed_array.go.erb > cidr_array.go
+erb pgtype_array_type=TextArray pgtype_element_type=Text go_array_types=[]string element_type_name=text text_null='"NULL"' binary_format=true typed_array.go.erb > text_array.go
+erb pgtype_array_type=VarcharArray pgtype_element_type=Varchar go_array_types=[]string element_type_name=varchar text_null='"NULL"' binary_format=true typed_array.go.erb > varchar_array.go
+erb pgtype_array_type=BPCharArray pgtype_element_type=BPChar go_array_types=[]string element_type_name=bpchar text_null='NULL' binary_format=true typed_array.go.erb > bpchar_array.go
+erb pgtype_array_type=ByteaArray pgtype_element_type=Bytea go_array_types=[][]byte element_type_name=bytea text_null=NULL binary_format=true typed_array.go.erb > bytea_array.go
+erb pgtype_array_type=ACLItemArray pgtype_element_type=ACLItem go_array_types=[]string element_type_name=aclitem text_null=NULL binary_format=false typed_array.go.erb > aclitem_array.go
+erb pgtype_array_type=HstoreArray pgtype_element_type=Hstore go_array_types=[]map[string]string element_type_name=hstore text_null=NULL binary_format=true typed_array.go.erb > hstore_array.go
+erb pgtype_array_type=NumericArray pgtype_element_type=Numeric go_array_types=[]float32,[]float64 element_type_name=numeric text_null=NULL binary_format=true typed_array.go.erb > numeric_array.go
+erb pgtype_array_type=UUIDArray pgtype_element_type=UUID go_array_types=[][16]byte,[][]byte,[]string element_type_name=uuid text_null=NULL binary_format=true typed_array.go.erb > uuid_array.go
+
+# While the binary format is theoretically possible it is only practical to use the text format. In addition, the text format for NULL enums is unquoted so TextArray or a possible GenericTextArray cannot be used.
+erb pgtype_array_type=EnumArray pgtype_element_type=GenericText go_array_types=[]string text_null='NULL' binary_format=false typed_array.go.erb > enum_array.go
+
+goimports -w *_array.go
diff --git a/vendor/github.com/jackc/pgx/pgtype/typed_range.go.erb b/vendor/github.com/jackc/pgx/pgtype/typed_range.go.erb
new file mode 100644
index 0000000..91a5cb9
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/typed_range.go.erb
@@ -0,0 +1,252 @@
+package pgtype
+
+import (
+ "bytes"
+ "database/sql/driver"
+ "fmt"
+ "io"
+
+ "github.com/jackc/pgx/pgio"
+)
+
+type <%= range_type %> struct {
+ Lower <%= element_type %>
+ Upper <%= element_type %>
+ LowerType BoundType
+ UpperType BoundType
+ Status Status
+}
+
+func (dst *<%= range_type %>) Set(src interface{}) error {
+ return errors.Errorf("cannot convert %v to <%= range_type %>", src)
+}
+
+func (dst *<%= range_type %>) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *<%= range_type %>) AssignTo(dst interface{}) error {
+ return errors.Errorf("cannot assign %v to %T", src, dst)
+}
+
+func (dst *<%= range_type %>) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = <%= range_type %>{Status: Null}
+ return nil
+ }
+
+ utr, err := ParseUntypedTextRange(string(src))
+ if err != nil {
+ return err
+ }
+
+ *dst = <%= range_type %>{Status: Present}
+
+ dst.LowerType = utr.LowerType
+ dst.UpperType = utr.UpperType
+
+ if dst.LowerType == Empty {
+ return nil
+ }
+
+ if dst.LowerType == Inclusive || dst.LowerType == Exclusive {
+ if err := dst.Lower.DecodeText(ci, []byte(utr.Lower)); err != nil {
+ return err
+ }
+ }
+
+ if dst.UpperType == Inclusive || dst.UpperType == Exclusive {
+ if err := dst.Upper.DecodeText(ci, []byte(utr.Upper)); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (dst *<%= range_type %>) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = <%= range_type %>{Status: Null}
+ return nil
+ }
+
+ ubr, err := ParseUntypedBinaryRange(src)
+ if err != nil {
+ return err
+ }
+
+ *dst = <%= range_type %>{Status: Present}
+
+ dst.LowerType = ubr.LowerType
+ dst.UpperType = ubr.UpperType
+
+ if dst.LowerType == Empty {
+ return nil
+ }
+
+ if dst.LowerType == Inclusive || dst.LowerType == Exclusive {
+ if err := dst.Lower.DecodeBinary(ci, ubr.Lower); err != nil {
+ return err
+ }
+ }
+
+ if dst.UpperType == Inclusive || dst.UpperType == Exclusive {
+ if err := dst.Upper.DecodeBinary(ci, ubr.Upper); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (src <%= range_type %>) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ switch src.LowerType {
+ case Exclusive, Unbounded:
+ buf = append(buf, '(')
+ case Inclusive:
+ buf = append(buf, '[')
+ case Empty:
+ return append(buf, "empty"...), nil
+ default:
+ return nil, errors.Errorf("unknown lower bound type %v", src.LowerType)
+ }
+
+ var err error
+
+ if src.LowerType != Unbounded {
+ buf, err = src.Lower.EncodeText(ci, buf)
+ if err != nil {
+ return nil, err
+ } else if buf == nil {
+ return nil, errors.Errorf("Lower cannot be null unless LowerType is Unbounded")
+ }
+ }
+
+ buf = append(buf, ',')
+
+ if src.UpperType != Unbounded {
+ buf, err = src.Upper.EncodeText(ci, buf)
+ if err != nil {
+ return nil, err
+ } else if buf == nil {
+ return nil, errors.Errorf("Upper cannot be null unless UpperType is Unbounded")
+ }
+ }
+
+ switch src.UpperType {
+ case Exclusive, Unbounded:
+ buf = append(buf, ')')
+ case Inclusive:
+ buf = append(buf, ']')
+ default:
+ return nil, errors.Errorf("unknown upper bound type %v", src.UpperType)
+ }
+
+ return buf, nil
+}
+
+func (src <%= range_type %>) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ var rangeType byte
+ switch src.LowerType {
+ case Inclusive:
+ rangeType |= lowerInclusiveMask
+ case Unbounded:
+ rangeType |= lowerUnboundedMask
+ case Exclusive:
+ case Empty:
+ return append(buf, emptyMask), nil
+ default:
+ return nil, errors.Errorf("unknown LowerType: %v", src.LowerType)
+ }
+
+ switch src.UpperType {
+ case Inclusive:
+ rangeType |= upperInclusiveMask
+ case Unbounded:
+ rangeType |= upperUnboundedMask
+ case Exclusive:
+ default:
+ return nil, errors.Errorf("unknown UpperType: %v", src.UpperType)
+ }
+
+ buf = append(buf, rangeType)
+
+ var err error
+
+ if src.LowerType != Unbounded {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ buf, err = src.Lower.EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, errors.Errorf("Lower cannot be null unless LowerType is Unbounded")
+ }
+
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+
+ if src.UpperType != Unbounded {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ buf, err = src.Upper.EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, errors.Errorf("Upper cannot be null unless UpperType is Unbounded")
+ }
+
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *<%= range_type %>) Scan(src interface{}) error {
+ if src == nil {
+ *dst = <%= range_type %>{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src <%= range_type %>) Value() (driver.Value, error) {
+ return EncodeValueText(src)
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/typed_range_gen.sh b/vendor/github.com/jackc/pgx/pgtype/typed_range_gen.sh
new file mode 100644
index 0000000..bedda29
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/typed_range_gen.sh
@@ -0,0 +1,7 @@
+erb range_type=Int4range element_type=Int4 typed_range.go.erb > int4range.go
+erb range_type=Int8range element_type=Int8 typed_range.go.erb > int8range.go
+erb range_type=Tsrange element_type=Timestamp typed_range.go.erb > tsrange.go
+erb range_type=Tstzrange element_type=Timestamptz typed_range.go.erb > tstzrange.go
+erb range_type=Daterange element_type=Date typed_range.go.erb > daterange.go
+erb range_type=Numrange element_type=Numeric typed_range.go.erb > numrange.go
+goimports -w *range.go
diff --git a/vendor/github.com/jackc/pgx/pgtype/unknown.go b/vendor/github.com/jackc/pgx/pgtype/unknown.go
new file mode 100644
index 0000000..567831d
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/unknown.go
@@ -0,0 +1,44 @@
+package pgtype
+
+import "database/sql/driver"
+
+// Unknown represents the PostgreSQL unknown type. It is either a string literal
+// or NULL. It is used when PostgreSQL does not know the type of a value. In
+// general, this will only be used in pgx when selecting a null value without
+// type information. e.g. SELECT NULL;
+type Unknown struct {
+ String string
+ Status Status
+}
+
+func (dst *Unknown) Set(src interface{}) error {
+ return (*Text)(dst).Set(src)
+}
+
+func (dst *Unknown) Get() interface{} {
+ return (*Text)(dst).Get()
+}
+
+// AssignTo assigns from src to dst. Note that as Unknown is not a general number
+// type AssignTo does not do automatic type conversion as other number types do.
+func (src *Unknown) AssignTo(dst interface{}) error {
+ return (*Text)(src).AssignTo(dst)
+}
+
+func (dst *Unknown) DecodeText(ci *ConnInfo, src []byte) error {
+ return (*Text)(dst).DecodeText(ci, src)
+}
+
+func (dst *Unknown) DecodeBinary(ci *ConnInfo, src []byte) error {
+ return (*Text)(dst).DecodeBinary(ci, src)
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Unknown) Scan(src interface{}) error {
+ return (*Text)(dst).Scan(src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Unknown) Value() (driver.Value, error) {
+ return (*Text)(src).Value()
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/uuid.go b/vendor/github.com/jackc/pgx/pgtype/uuid.go
new file mode 100644
index 0000000..f8297b3
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/uuid.go
@@ -0,0 +1,183 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/hex"
+ "fmt"
+
+ "github.com/pkg/errors"
+)
+
+type UUID struct {
+ Bytes [16]byte
+ Status Status
+}
+
+func (dst *UUID) Set(src interface{}) error {
+ if src == nil {
+ *dst = UUID{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+ case [16]byte:
+ *dst = UUID{Bytes: value, Status: Present}
+ case []byte:
+ if value != nil {
+ if len(value) != 16 {
+ return errors.Errorf("[]byte must be 16 bytes to convert to UUID: %d", len(value))
+ }
+ *dst = UUID{Status: Present}
+ copy(dst.Bytes[:], value)
+ } else {
+ *dst = UUID{Status: Null}
+ }
+ case string:
+ uuid, err := parseUUID(value)
+ if err != nil {
+ return err
+ }
+ *dst = UUID{Bytes: uuid, Status: Present}
+ default:
+ if originalSrc, ok := underlyingPtrType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to UUID", value)
+ }
+
+ return nil
+}
+
+func (dst *UUID) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst.Bytes
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *UUID) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+ case *[16]byte:
+ *v = src.Bytes
+ return nil
+ case *[]byte:
+ *v = make([]byte, 16)
+ copy(*v, src.Bytes[:])
+ return nil
+ case *string:
+ *v = encodeUUID(src.Bytes)
+ return nil
+ default:
+ if nextDst, retry := GetAssignToDstType(v); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot assign %v into %T", src, dst)
+}
+
+// parseUUID converts a string UUID in standard form to a byte array.
+func parseUUID(src string) (dst [16]byte, err error) {
+ src = src[0:8] + src[9:13] + src[14:18] + src[19:23] + src[24:]
+ buf, err := hex.DecodeString(src)
+ if err != nil {
+ return dst, err
+ }
+
+ copy(dst[:], buf)
+ return dst, err
+}
+
+// encodeUUID converts a uuid byte array to UUID standard string form.
+func encodeUUID(src [16]byte) string {
+ return fmt.Sprintf("%x-%x-%x-%x-%x", src[0:4], src[4:6], src[6:8], src[8:10], src[10:16])
+}
+
+func (dst *UUID) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = UUID{Status: Null}
+ return nil
+ }
+
+ if len(src) != 36 {
+ return errors.Errorf("invalid length for UUID: %v", len(src))
+ }
+
+ buf, err := parseUUID(string(src))
+ if err != nil {
+ return err
+ }
+
+ *dst = UUID{Bytes: buf, Status: Present}
+ return nil
+}
+
+func (dst *UUID) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = UUID{Status: Null}
+ return nil
+ }
+
+ if len(src) != 16 {
+ return errors.Errorf("invalid length for UUID: %v", len(src))
+ }
+
+ *dst = UUID{Status: Present}
+ copy(dst.Bytes[:], src)
+ return nil
+}
+
+func (src *UUID) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ return append(buf, encodeUUID(src.Bytes)...), nil
+}
+
+func (src *UUID) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ return append(buf, src.Bytes[:]...), nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *UUID) Scan(src interface{}) error {
+ if src == nil {
+ *dst = UUID{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *UUID) Value() (driver.Value, error) {
+ return EncodeValueText(src)
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/uuid_array.go b/vendor/github.com/jackc/pgx/pgtype/uuid_array.go
new file mode 100644
index 0000000..9c7843a
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/uuid_array.go
@@ -0,0 +1,356 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type UUIDArray struct {
+ Elements []UUID
+ Dimensions []ArrayDimension
+ Status Status
+}
+
+func (dst *UUIDArray) Set(src interface{}) error {
+ // untyped nil and typed nil interfaces are different
+ if src == nil {
+ *dst = UUIDArray{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+
+ case [][16]byte:
+ if value == nil {
+ *dst = UUIDArray{Status: Null}
+ } else if len(value) == 0 {
+ *dst = UUIDArray{Status: Present}
+ } else {
+ elements := make([]UUID, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = UUIDArray{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ case [][]byte:
+ if value == nil {
+ *dst = UUIDArray{Status: Null}
+ } else if len(value) == 0 {
+ *dst = UUIDArray{Status: Present}
+ } else {
+ elements := make([]UUID, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = UUIDArray{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ case []string:
+ if value == nil {
+ *dst = UUIDArray{Status: Null}
+ } else if len(value) == 0 {
+ *dst = UUIDArray{Status: Present}
+ } else {
+ elements := make([]UUID, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = UUIDArray{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ default:
+ if originalSrc, ok := underlyingSliceType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to UUIDArray", value)
+ }
+
+ return nil
+}
+
+func (dst *UUIDArray) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *UUIDArray) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+
+ case *[][16]byte:
+ *v = make([][16]byte, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ case *[][]byte:
+ *v = make([][]byte, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ case *[]string:
+ *v = make([]string, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *UUIDArray) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = UUIDArray{Status: Null}
+ return nil
+ }
+
+ uta, err := ParseUntypedTextArray(string(src))
+ if err != nil {
+ return err
+ }
+
+ var elements []UUID
+
+ if len(uta.Elements) > 0 {
+ elements = make([]UUID, len(uta.Elements))
+
+ for i, s := range uta.Elements {
+ var elem UUID
+ var elemSrc []byte
+ if s != "NULL" {
+ elemSrc = []byte(s)
+ }
+ err = elem.DecodeText(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+
+ elements[i] = elem
+ }
+ }
+
+ *dst = UUIDArray{Elements: elements, Dimensions: uta.Dimensions, Status: Present}
+
+ return nil
+}
+
+func (dst *UUIDArray) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = UUIDArray{Status: Null}
+ return nil
+ }
+
+ var arrayHeader ArrayHeader
+ rp, err := arrayHeader.DecodeBinary(ci, src)
+ if err != nil {
+ return err
+ }
+
+ if len(arrayHeader.Dimensions) == 0 {
+ *dst = UUIDArray{Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+ }
+
+ elementCount := arrayHeader.Dimensions[0].Length
+ for _, d := range arrayHeader.Dimensions[1:] {
+ elementCount *= d.Length
+ }
+
+ elements := make([]UUID, elementCount)
+
+ for i := range elements {
+ elemLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+ var elemSrc []byte
+ if elemLen >= 0 {
+ elemSrc = src[rp : rp+elemLen]
+ rp += elemLen
+ }
+ err = elements[i].DecodeBinary(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+ }
+
+ *dst = UUIDArray{Elements: elements, Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+}
+
+func (src *UUIDArray) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ if len(src.Dimensions) == 0 {
+ return append(buf, '{', '}'), nil
+ }
+
+ buf = EncodeTextArrayDimensions(buf, src.Dimensions)
+
+ // dimElemCounts is the multiples of elements that each array lies on. For
+ // example, a single dimension array of length 4 would have a dimElemCounts of
+ // [4]. A multi-dimensional array of lengths [3,5,2] would have a
+ // dimElemCounts of [30,10,2]. This is used to simplify when to render a '{'
+ // or '}'.
+ dimElemCounts := make([]int, len(src.Dimensions))
+ dimElemCounts[len(src.Dimensions)-1] = int(src.Dimensions[len(src.Dimensions)-1].Length)
+ for i := len(src.Dimensions) - 2; i > -1; i-- {
+ dimElemCounts[i] = int(src.Dimensions[i].Length) * dimElemCounts[i+1]
+ }
+
+ inElemBuf := make([]byte, 0, 32)
+ for i, elem := range src.Elements {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+
+ for _, dec := range dimElemCounts {
+ if i%dec == 0 {
+ buf = append(buf, '{')
+ }
+ }
+
+ elemBuf, err := elem.EncodeText(ci, inElemBuf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf == nil {
+ buf = append(buf, `NULL`...)
+ } else {
+ buf = append(buf, QuoteArrayElementIfNeeded(string(elemBuf))...)
+ }
+
+ for _, dec := range dimElemCounts {
+ if (i+1)%dec == 0 {
+ buf = append(buf, '}')
+ }
+ }
+ }
+
+ return buf, nil
+}
+
+func (src *UUIDArray) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ arrayHeader := ArrayHeader{
+ Dimensions: src.Dimensions,
+ }
+
+ if dt, ok := ci.DataTypeForName("uuid"); ok {
+ arrayHeader.ElementOID = int32(dt.OID)
+ } else {
+ return nil, errors.Errorf("unable to find oid for type name %v", "uuid")
+ }
+
+ for i := range src.Elements {
+ if src.Elements[i].Status == Null {
+ arrayHeader.ContainsNull = true
+ break
+ }
+ }
+
+ buf = arrayHeader.EncodeBinary(ci, buf)
+
+ for i := range src.Elements {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ elemBuf, err := src.Elements[i].EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf != nil {
+ buf = elemBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *UUIDArray) Scan(src interface{}) error {
+ if src == nil {
+ return dst.DecodeText(nil, nil)
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *UUIDArray) Value() (driver.Value, error) {
+ buf, err := src.EncodeText(nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, nil
+ }
+
+ return string(buf), nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/varbit.go b/vendor/github.com/jackc/pgx/pgtype/varbit.go
new file mode 100644
index 0000000..dfa194d
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/varbit.go
@@ -0,0 +1,133 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type Varbit struct {
+ Bytes []byte
+ Len int32 // Number of bits
+ Status Status
+}
+
+func (dst *Varbit) Set(src interface{}) error {
+ return errors.Errorf("cannot convert %v to Varbit", src)
+}
+
+func (dst *Varbit) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *Varbit) AssignTo(dst interface{}) error {
+ return errors.Errorf("cannot assign %v to %T", src, dst)
+}
+
+func (dst *Varbit) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Varbit{Status: Null}
+ return nil
+ }
+
+ bitLen := len(src)
+ byteLen := bitLen / 8
+ if bitLen%8 > 0 {
+ byteLen++
+ }
+ buf := make([]byte, byteLen)
+
+ for i, b := range src {
+ if b == '1' {
+ byteIdx := i / 8
+ bitIdx := uint(i % 8)
+ buf[byteIdx] = buf[byteIdx] | (128 >> bitIdx)
+ }
+ }
+
+ *dst = Varbit{Bytes: buf, Len: int32(bitLen), Status: Present}
+ return nil
+}
+
+func (dst *Varbit) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = Varbit{Status: Null}
+ return nil
+ }
+
+ if len(src) < 4 {
+ return errors.Errorf("invalid length for varbit: %v", len(src))
+ }
+
+ bitLen := int32(binary.BigEndian.Uint32(src))
+ rp := 4
+
+ *dst = Varbit{Bytes: src[rp:], Len: bitLen, Status: Present}
+ return nil
+}
+
+func (src *Varbit) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ for i := int32(0); i < src.Len; i++ {
+ byteIdx := i / 8
+ bitMask := byte(128 >> byte(i%8))
+ char := byte('0')
+ if src.Bytes[byteIdx]&bitMask > 0 {
+ char = '1'
+ }
+ buf = append(buf, char)
+ }
+
+ return buf, nil
+}
+
+func (src *Varbit) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ buf = pgio.AppendInt32(buf, src.Len)
+ return append(buf, src.Bytes...), nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Varbit) Scan(src interface{}) error {
+ if src == nil {
+ *dst = Varbit{Status: Null}
+ return nil
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Varbit) Value() (driver.Value, error) {
+ return EncodeValueText(src)
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/varchar.go b/vendor/github.com/jackc/pgx/pgtype/varchar.go
new file mode 100644
index 0000000..6be1a03
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/varchar.go
@@ -0,0 +1,58 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+)
+
+type Varchar Text
+
+// Set converts from src to dst. Note that as Varchar is not a general
+// number type Set does not do automatic type conversion as other number
+// types do.
+func (dst *Varchar) Set(src interface{}) error {
+ return (*Text)(dst).Set(src)
+}
+
+func (dst *Varchar) Get() interface{} {
+ return (*Text)(dst).Get()
+}
+
+// AssignTo assigns from src to dst. Note that as Varchar is not a general number
+// type AssignTo does not do automatic type conversion as other number types do.
+func (src *Varchar) AssignTo(dst interface{}) error {
+ return (*Text)(src).AssignTo(dst)
+}
+
+func (dst *Varchar) DecodeText(ci *ConnInfo, src []byte) error {
+ return (*Text)(dst).DecodeText(ci, src)
+}
+
+func (dst *Varchar) DecodeBinary(ci *ConnInfo, src []byte) error {
+ return (*Text)(dst).DecodeBinary(ci, src)
+}
+
+func (src *Varchar) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ return (*Text)(src).EncodeText(ci, buf)
+}
+
+func (src *Varchar) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ return (*Text)(src).EncodeBinary(ci, buf)
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *Varchar) Scan(src interface{}) error {
+ return (*Text)(dst).Scan(src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *Varchar) Value() (driver.Value, error) {
+ return (*Text)(src).Value()
+}
+
+func (src *Varchar) MarshalJSON() ([]byte, error) {
+ return (*Text)(src).MarshalJSON()
+}
+
+func (dst *Varchar) UnmarshalJSON(b []byte) error {
+ return (*Text)(dst).UnmarshalJSON(b)
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/varchar_array.go b/vendor/github.com/jackc/pgx/pgtype/varchar_array.go
new file mode 100644
index 0000000..09eba3e
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/varchar_array.go
@@ -0,0 +1,300 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/pkg/errors"
+)
+
+type VarcharArray struct {
+ Elements []Varchar
+ Dimensions []ArrayDimension
+ Status Status
+}
+
+func (dst *VarcharArray) Set(src interface{}) error {
+ // untyped nil and typed nil interfaces are different
+ if src == nil {
+ *dst = VarcharArray{Status: Null}
+ return nil
+ }
+
+ switch value := src.(type) {
+
+ case []string:
+ if value == nil {
+ *dst = VarcharArray{Status: Null}
+ } else if len(value) == 0 {
+ *dst = VarcharArray{Status: Present}
+ } else {
+ elements := make([]Varchar, len(value))
+ for i := range value {
+ if err := elements[i].Set(value[i]); err != nil {
+ return err
+ }
+ }
+ *dst = VarcharArray{
+ Elements: elements,
+ Dimensions: []ArrayDimension{{Length: int32(len(elements)), LowerBound: 1}},
+ Status: Present,
+ }
+ }
+
+ default:
+ if originalSrc, ok := underlyingSliceType(src); ok {
+ return dst.Set(originalSrc)
+ }
+ return errors.Errorf("cannot convert %v to VarcharArray", value)
+ }
+
+ return nil
+}
+
+func (dst *VarcharArray) Get() interface{} {
+ switch dst.Status {
+ case Present:
+ return dst
+ case Null:
+ return nil
+ default:
+ return dst.Status
+ }
+}
+
+func (src *VarcharArray) AssignTo(dst interface{}) error {
+ switch src.Status {
+ case Present:
+ switch v := dst.(type) {
+
+ case *[]string:
+ *v = make([]string, len(src.Elements))
+ for i := range src.Elements {
+ if err := src.Elements[i].AssignTo(&((*v)[i])); err != nil {
+ return err
+ }
+ }
+ return nil
+
+ default:
+ if nextDst, retry := GetAssignToDstType(dst); retry {
+ return src.AssignTo(nextDst)
+ }
+ }
+ case Null:
+ return NullAssignTo(dst)
+ }
+
+ return errors.Errorf("cannot decode %v into %T", src, dst)
+}
+
+func (dst *VarcharArray) DecodeText(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = VarcharArray{Status: Null}
+ return nil
+ }
+
+ uta, err := ParseUntypedTextArray(string(src))
+ if err != nil {
+ return err
+ }
+
+ var elements []Varchar
+
+ if len(uta.Elements) > 0 {
+ elements = make([]Varchar, len(uta.Elements))
+
+ for i, s := range uta.Elements {
+ var elem Varchar
+ var elemSrc []byte
+ if s != "NULL" {
+ elemSrc = []byte(s)
+ }
+ err = elem.DecodeText(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+
+ elements[i] = elem
+ }
+ }
+
+ *dst = VarcharArray{Elements: elements, Dimensions: uta.Dimensions, Status: Present}
+
+ return nil
+}
+
+func (dst *VarcharArray) DecodeBinary(ci *ConnInfo, src []byte) error {
+ if src == nil {
+ *dst = VarcharArray{Status: Null}
+ return nil
+ }
+
+ var arrayHeader ArrayHeader
+ rp, err := arrayHeader.DecodeBinary(ci, src)
+ if err != nil {
+ return err
+ }
+
+ if len(arrayHeader.Dimensions) == 0 {
+ *dst = VarcharArray{Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+ }
+
+ elementCount := arrayHeader.Dimensions[0].Length
+ for _, d := range arrayHeader.Dimensions[1:] {
+ elementCount *= d.Length
+ }
+
+ elements := make([]Varchar, elementCount)
+
+ for i := range elements {
+ elemLen := int(int32(binary.BigEndian.Uint32(src[rp:])))
+ rp += 4
+ var elemSrc []byte
+ if elemLen >= 0 {
+ elemSrc = src[rp : rp+elemLen]
+ rp += elemLen
+ }
+ err = elements[i].DecodeBinary(ci, elemSrc)
+ if err != nil {
+ return err
+ }
+ }
+
+ *dst = VarcharArray{Elements: elements, Dimensions: arrayHeader.Dimensions, Status: Present}
+ return nil
+}
+
+func (src *VarcharArray) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ if len(src.Dimensions) == 0 {
+ return append(buf, '{', '}'), nil
+ }
+
+ buf = EncodeTextArrayDimensions(buf, src.Dimensions)
+
+ // dimElemCounts is the multiples of elements that each array lies on. For
+ // example, a single dimension array of length 4 would have a dimElemCounts of
+ // [4]. A multi-dimensional array of lengths [3,5,2] would have a
+ // dimElemCounts of [30,10,2]. This is used to simplify when to render a '{'
+ // or '}'.
+ dimElemCounts := make([]int, len(src.Dimensions))
+ dimElemCounts[len(src.Dimensions)-1] = int(src.Dimensions[len(src.Dimensions)-1].Length)
+ for i := len(src.Dimensions) - 2; i > -1; i-- {
+ dimElemCounts[i] = int(src.Dimensions[i].Length) * dimElemCounts[i+1]
+ }
+
+ inElemBuf := make([]byte, 0, 32)
+ for i, elem := range src.Elements {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+
+ for _, dec := range dimElemCounts {
+ if i%dec == 0 {
+ buf = append(buf, '{')
+ }
+ }
+
+ elemBuf, err := elem.EncodeText(ci, inElemBuf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf == nil {
+ buf = append(buf, `"NULL"`...)
+ } else {
+ buf = append(buf, QuoteArrayElementIfNeeded(string(elemBuf))...)
+ }
+
+ for _, dec := range dimElemCounts {
+ if (i+1)%dec == 0 {
+ buf = append(buf, '}')
+ }
+ }
+ }
+
+ return buf, nil
+}
+
+func (src *VarcharArray) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ switch src.Status {
+ case Null:
+ return nil, nil
+ case Undefined:
+ return nil, errUndefined
+ }
+
+ arrayHeader := ArrayHeader{
+ Dimensions: src.Dimensions,
+ }
+
+ if dt, ok := ci.DataTypeForName("varchar"); ok {
+ arrayHeader.ElementOID = int32(dt.OID)
+ } else {
+ return nil, errors.Errorf("unable to find oid for type name %v", "varchar")
+ }
+
+ for i := range src.Elements {
+ if src.Elements[i].Status == Null {
+ arrayHeader.ContainsNull = true
+ break
+ }
+ }
+
+ buf = arrayHeader.EncodeBinary(ci, buf)
+
+ for i := range src.Elements {
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ elemBuf, err := src.Elements[i].EncodeBinary(ci, buf)
+ if err != nil {
+ return nil, err
+ }
+ if elemBuf != nil {
+ buf = elemBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
+ }
+ }
+
+ return buf, nil
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *VarcharArray) Scan(src interface{}) error {
+ if src == nil {
+ return dst.DecodeText(nil, nil)
+ }
+
+ switch src := src.(type) {
+ case string:
+ return dst.DecodeText(nil, []byte(src))
+ case []byte:
+ srcCopy := make([]byte, len(src))
+ copy(srcCopy, src)
+ return dst.DecodeText(nil, srcCopy)
+ }
+
+ return errors.Errorf("cannot scan %T", src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *VarcharArray) Value() (driver.Value, error) {
+ buf, err := src.EncodeText(nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ if buf == nil {
+ return nil, nil
+ }
+
+ return string(buf), nil
+}
diff --git a/vendor/github.com/jackc/pgx/pgtype/xid.go b/vendor/github.com/jackc/pgx/pgtype/xid.go
new file mode 100644
index 0000000..f66f536
--- /dev/null
+++ b/vendor/github.com/jackc/pgx/pgtype/xid.go
@@ -0,0 +1,64 @@
+package pgtype
+
+import (
+ "database/sql/driver"
+)
+
+// XID is PostgreSQL's Transaction ID type.
+//
+// In later versions of PostgreSQL, it is the type used for the backend_xid
+// and backend_xmin columns of the pg_stat_activity system view.
+//
+// Also, when one does
+//
+// select xmin, xmax, * from some_table;
+//
+// it is the data type of the xmin and xmax hidden system columns.
+//
+// It is currently implemented as an unsigned four byte integer.
+// Its definition can be found in src/include/postgres_ext.h as TransactionId
+// in the PostgreSQL sources.
+type XID pguint32
+
+// Set converts from src to dst. Note that as XID is not a general
+// number type Set does not do automatic type conversion as other number
+// types do.
+func (dst *XID) Set(src interface{}) error {
+ return (*pguint32)(dst).Set(src)
+}
+
+func (dst *XID) Get() interface{} {
+ return (*pguint32)(dst).Get()
+}
+
+// AssignTo assigns from src to dst. Note that as XID is not a general number
+// type AssignTo does not do automatic type conversion as other number types do.
+func (src *XID) AssignTo(dst interface{}) error {
+ return (*pguint32)(src).AssignTo(dst)
+}
+
+func (dst *XID) DecodeText(ci *ConnInfo, src []byte) error {
+ return (*pguint32)(dst).DecodeText(ci, src)
+}
+
+func (dst *XID) DecodeBinary(ci *ConnInfo, src []byte) error {
+ return (*pguint32)(dst).DecodeBinary(ci, src)
+}
+
+func (src *XID) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {
+ return (*pguint32)(src).EncodeText(ci, buf)
+}
+
+func (src *XID) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {
+ return (*pguint32)(src).EncodeBinary(ci, buf)
+}
+
+// Scan implements the database/sql Scanner interface.
+func (dst *XID) Scan(src interface{}) error {
+ return (*pguint32)(dst).Scan(src)
+}
+
+// Value implements the database/sql/driver Valuer interface.
+func (src *XID) Value() (driver.Value, error) {
+ return (*pguint32)(src).Value()
+}
diff --git a/vendor/github.com/jackc/pgx/query.go b/vendor/github.com/jackc/pgx/query.go
index 19b867e..3576091 100644
--- a/vendor/github.com/jackc/pgx/query.go
+++ b/vendor/github.com/jackc/pgx/query.go
@@ -1,10 +1,16 @@
package pgx
import (
+ "context"
"database/sql"
- "errors"
"fmt"
"time"
+
+ "github.com/pkg/errors"
+
+ "github.com/jackc/pgx/internal/sanitize"
+ "github.com/jackc/pgx/pgproto3"
+ "github.com/jackc/pgx/pgtype"
)
// Row is a convenience wrapper over Rows that is returned by QueryRow.
@@ -37,16 +43,16 @@ func (r *Row) Scan(dest ...interface{}) (err error) {
// calling Next() until it returns false, or when a fatal error occurs.
type Rows struct {
conn *Conn
- mr *msgReader
+ connPool *ConnPool
+ batch *Batch
+ values [][]byte
fields []FieldDescription
- vr ValueReader
rowCount int
columnIdx int
err error
startTime time.Time
sql string
args []interface{}
- afterClose func(*Rows)
unlockConn bool
closed bool
}
@@ -55,7 +61,9 @@ func (rows *Rows) FieldDescriptions() []FieldDescription {
return rows.fields
}
-func (rows *Rows) close() {
+// Close closes the rows, making the connection ready for use again. It is safe
+// to call Close after rows is already closed.
+func (rows *Rows) Close() {
if rows.closed {
return
}
@@ -67,80 +75,33 @@ func (rows *Rows) close() {
rows.closed = true
+ rows.err = rows.conn.termContext(rows.err)
+
if rows.err == nil {
if rows.conn.shouldLog(LogLevelInfo) {
endTime := time.Now()
- rows.conn.log(LogLevelInfo, "Query", "sql", rows.sql, "args", logQueryArgs(rows.args), "time", endTime.Sub(rows.startTime), "rowCount", rows.rowCount)
+ rows.conn.log(LogLevelInfo, "Query", map[string]interface{}{"sql": rows.sql, "args": logQueryArgs(rows.args), "time": endTime.Sub(rows.startTime), "rowCount": rows.rowCount})
}
} else if rows.conn.shouldLog(LogLevelError) {
- rows.conn.log(LogLevelError, "Query", "sql", rows.sql, "args", logQueryArgs(rows.args))
+ rows.conn.log(LogLevelError, "Query", map[string]interface{}{"sql": rows.sql, "args": logQueryArgs(rows.args)})
}
- if rows.afterClose != nil {
- rows.afterClose(rows)
- }
-}
-
-func (rows *Rows) readUntilReadyForQuery() {
- for {
- t, r, err := rows.conn.rxMsg()
- if err != nil {
- rows.close()
- return
- }
-
- switch t {
- case readyForQuery:
- rows.conn.rxReadyForQuery(r)
- rows.close()
- return
- case rowDescription:
- case dataRow:
- case commandComplete:
- case bindComplete:
- case errorResponse:
- err = rows.conn.rxErrorResponse(r)
- if rows.err == nil {
- rows.err = err
- }
- default:
- err = rows.conn.processContextFreeMsg(t, r)
- if err != nil {
- rows.close()
- return
- }
- }
+ if rows.batch != nil && rows.err != nil {
+ rows.batch.die(rows.err)
}
-}
-// Close closes the rows, making the connection ready for use again. It is safe
-// to call Close after rows is already closed.
-func (rows *Rows) Close() {
- if rows.closed {
- return
+ if rows.connPool != nil {
+ rows.connPool.Release(rows.conn)
}
- rows.readUntilReadyForQuery()
- rows.close()
}
func (rows *Rows) Err() error {
return rows.err
}
-// abort signals that the query was not successfully sent to the server.
-// This differs from Fatal in that it is not necessary to readUntilReadyForQuery
-func (rows *Rows) abort(err error) {
- if rows.err != nil {
- return
- }
-
- rows.err = err
- rows.close()
-}
-
-// Fatal signals an error occurred after the query was sent to the server. It
+// fatal signals an error occurred after the query was sent to the server. It
// closes the rows automatically.
-func (rows *Rows) Fatal(err error) {
+func (rows *Rows) fatal(err error) {
if rows.err != nil {
return
}
@@ -159,64 +120,64 @@ func (rows *Rows) Next() bool {
rows.rowCount++
rows.columnIdx = 0
- rows.vr = ValueReader{}
for {
- t, r, err := rows.conn.rxMsg()
+ msg, err := rows.conn.rxMsg()
if err != nil {
- rows.Fatal(err)
+ rows.fatal(err)
return false
}
- switch t {
- case readyForQuery:
- rows.conn.rxReadyForQuery(r)
- rows.close()
- return false
- case dataRow:
- fieldCount := r.readInt16()
- if int(fieldCount) != len(rows.fields) {
- rows.Fatal(ProtocolError(fmt.Sprintf("Row description field count (%v) and data row field count (%v) do not match", len(rows.fields), fieldCount)))
+ switch msg := msg.(type) {
+ case *pgproto3.RowDescription:
+ rows.fields = rows.conn.rxRowDescription(msg)
+ for i := range rows.fields {
+ if dt, ok := rows.conn.ConnInfo.DataTypeForOID(rows.fields[i].DataType); ok {
+ rows.fields[i].DataTypeName = dt.Name
+ rows.fields[i].FormatCode = TextFormatCode
+ } else {
+ rows.fatal(errors.Errorf("unknown oid: %d", rows.fields[i].DataType))
+ return false
+ }
+ }
+ case *pgproto3.DataRow:
+ if len(msg.Values) != len(rows.fields) {
+ rows.fatal(ProtocolError(fmt.Sprintf("Row description field count (%v) and data row field count (%v) do not match", len(rows.fields), len(msg.Values))))
return false
}
- rows.mr = r
+ rows.values = msg.Values
return true
- case commandComplete:
- case bindComplete:
+ case *pgproto3.CommandComplete:
+ if rows.batch != nil {
+ rows.batch.pendingCommandComplete = false
+ }
+ rows.Close()
+ return false
+
default:
- err = rows.conn.processContextFreeMsg(t, r)
+ err = rows.conn.processContextFreeMsg(msg)
if err != nil {
- rows.Fatal(err)
+ rows.fatal(err)
return false
}
}
}
}
-// Conn returns the *Conn this *Rows is using.
-func (rows *Rows) Conn() *Conn {
- return rows.conn
-}
-
-func (rows *Rows) nextColumn() (*ValueReader, bool) {
+func (rows *Rows) nextColumn() ([]byte, *FieldDescription, bool) {
if rows.closed {
- return nil, false
+ return nil, nil, false
}
if len(rows.fields) <= rows.columnIdx {
- rows.Fatal(ProtocolError("No next column available"))
- return nil, false
- }
-
- if rows.vr.Len() > 0 {
- rows.mr.readBytes(rows.vr.Len())
+ rows.fatal(ProtocolError("No next column available"))
+ return nil, nil, false
}
+ buf := rows.values[rows.columnIdx]
fd := &rows.fields[rows.columnIdx]
rows.columnIdx++
- size := rows.mr.readInt32()
- rows.vr = ValueReader{mr: rows.mr, fd: fd, valueBytesRemaining: size}
- return &rows.vr, true
+ return buf, fd, true
}
type scanArgError struct {
@@ -234,93 +195,71 @@ func (e scanArgError) Error() string {
// copy the raw bytes received from PostgreSQL. nil will skip the value entirely.
func (rows *Rows) Scan(dest ...interface{}) (err error) {
if len(rows.fields) != len(dest) {
- err = fmt.Errorf("Scan received wrong number of arguments, got %d but expected %d", len(dest), len(rows.fields))
- rows.Fatal(err)
+ err = errors.Errorf("Scan received wrong number of arguments, got %d but expected %d", len(dest), len(rows.fields))
+ rows.fatal(err)
return err
}
for i, d := range dest {
- vr, _ := rows.nextColumn()
+ buf, fd, _ := rows.nextColumn()
if d == nil {
continue
}
- // Check for []byte first as we allow sidestepping the decoding process and retrieving the raw bytes
- if b, ok := d.(*[]byte); ok {
- // If it actually is a bytea then pass it through decodeBytea (so it can be decoded if it is in text format)
- // Otherwise read the bytes directly regardless of what the actual type is.
- if vr.Type().DataType == ByteaOid {
- *b = decodeBytea(vr)
- } else {
- if vr.Len() != -1 {
- *b = vr.ReadBytes(vr.Len())
- } else {
- *b = nil
- }
- }
- } else if s, ok := d.(Scanner); ok {
- err = s.Scan(vr)
+ if s, ok := d.(pgtype.BinaryDecoder); ok && fd.FormatCode == BinaryFormatCode {
+ err = s.DecodeBinary(rows.conn.ConnInfo, buf)
if err != nil {
- rows.Fatal(scanArgError{col: i, err: err})
+ rows.fatal(scanArgError{col: i, err: err})
}
- } else if s, ok := d.(PgxScanner); ok {
- err = s.ScanPgx(vr)
+ } else if s, ok := d.(pgtype.TextDecoder); ok && fd.FormatCode == TextFormatCode {
+ err = s.DecodeText(rows.conn.ConnInfo, buf)
if err != nil {
- rows.Fatal(scanArgError{col: i, err: err})
+ rows.fatal(scanArgError{col: i, err: err})
}
- } else if s, ok := d.(sql.Scanner); ok {
- var val interface{}
- if 0 <= vr.Len() {
- switch vr.Type().DataType {
- case BoolOid:
- val = decodeBool(vr)
- case Int8Oid:
- val = int64(decodeInt8(vr))
- case Int2Oid:
- val = int64(decodeInt2(vr))
- case Int4Oid:
- val = int64(decodeInt4(vr))
- case TextOid, VarcharOid:
- val = decodeText(vr)
- case OidOid:
- val = int64(decodeOid(vr))
- case Float4Oid:
- val = float64(decodeFloat4(vr))
- case Float8Oid:
- val = decodeFloat8(vr)
- case DateOid:
- val = decodeDate(vr)
- case TimestampOid:
- val = decodeTimestamp(vr)
- case TimestampTzOid:
- val = decodeTimestampTz(vr)
+ } else {
+ if dt, ok := rows.conn.ConnInfo.DataTypeForOID(fd.DataType); ok {
+ value := dt.Value
+ switch fd.FormatCode {
+ case TextFormatCode:
+ if textDecoder, ok := value.(pgtype.TextDecoder); ok {
+ err = textDecoder.DecodeText(rows.conn.ConnInfo, buf)
+ if err != nil {
+ rows.fatal(scanArgError{col: i, err: err})
+ }
+ } else {
+ rows.fatal(scanArgError{col: i, err: errors.Errorf("%T is not a pgtype.TextDecoder", value)})
+ }
+ case BinaryFormatCode:
+ if binaryDecoder, ok := value.(pgtype.BinaryDecoder); ok {
+ err = binaryDecoder.DecodeBinary(rows.conn.ConnInfo, buf)
+ if err != nil {
+ rows.fatal(scanArgError{col: i, err: err})
+ }
+ } else {
+ rows.fatal(scanArgError{col: i, err: errors.Errorf("%T is not a pgtype.BinaryDecoder", value)})
+ }
default:
- val = vr.ReadBytes(vr.Len())
+ rows.fatal(scanArgError{col: i, err: errors.Errorf("unknown format code: %v", fd.FormatCode)})
}
+
+ if rows.Err() == nil {
+ if scanner, ok := d.(sql.Scanner); ok {
+ sqlSrc, err := pgtype.DatabaseSQLValue(rows.conn.ConnInfo, value)
+ if err != nil {
+ rows.fatal(err)
+ }
+ err = scanner.Scan(sqlSrc)
+ if err != nil {
+ rows.fatal(scanArgError{col: i, err: err})
+ }
+ } else if err := value.AssignTo(d); err != nil {
+ rows.fatal(scanArgError{col: i, err: err})
+ }
+ }
+ } else {
+ rows.fatal(scanArgError{col: i, err: errors.Errorf("unknown oid: %v", fd.DataType)})
}
- err = s.Scan(val)
- if err != nil {
- rows.Fatal(scanArgError{col: i, err: err})
- }
- } else if vr.Type().DataType == JsonOid {
- // Because the argument passed to decodeJSON will escape the heap.
- // This allows d to be stack allocated and only copied to the heap when
- // we actually are decoding JSON. This saves one memory allocation per
- // row.
- d2 := d
- decodeJSON(vr, &d2)
- } else if vr.Type().DataType == JsonbOid {
- // Same trick as above for getting stack allocation
- d2 := d
- decodeJSONB(vr, &d2)
- } else {
- if err := Decode(vr, d); err != nil {
- rows.Fatal(scanArgError{col: i, err: err})
- }
- }
- if vr.Err() != nil {
- rows.Fatal(scanArgError{col: i, err: vr.Err()})
}
if rows.Err() != nil {
@@ -340,79 +279,42 @@ func (rows *Rows) Values() ([]interface{}, error) {
values := make([]interface{}, 0, len(rows.fields))
for range rows.fields {
- vr, _ := rows.nextColumn()
+ buf, fd, _ := rows.nextColumn()
- if vr.Len() == -1 {
+ if buf == nil {
values = append(values, nil)
continue
}
- switch vr.Type().FormatCode {
- // All intrinsic types (except string) are encoded with binary
- // encoding so anything else should be treated as a string
- case TextFormatCode:
- values = append(values, vr.ReadString(vr.Len()))
- case BinaryFormatCode:
- switch vr.Type().DataType {
- case TextOid, VarcharOid:
- values = append(values, decodeText(vr))
- case BoolOid:
- values = append(values, decodeBool(vr))
- case ByteaOid:
- values = append(values, decodeBytea(vr))
- case Int8Oid:
- values = append(values, decodeInt8(vr))
- case Int2Oid:
- values = append(values, decodeInt2(vr))
- case Int4Oid:
- values = append(values, decodeInt4(vr))
- case OidOid:
- values = append(values, decodeOid(vr))
- case Float4Oid:
- values = append(values, decodeFloat4(vr))
- case Float8Oid:
- values = append(values, decodeFloat8(vr))
- case BoolArrayOid:
- values = append(values, decodeBoolArray(vr))
- case Int2ArrayOid:
- values = append(values, decodeInt2Array(vr))
- case Int4ArrayOid:
- values = append(values, decodeInt4Array(vr))
- case Int8ArrayOid:
- values = append(values, decodeInt8Array(vr))
- case Float4ArrayOid:
- values = append(values, decodeFloat4Array(vr))
- case Float8ArrayOid:
- values = append(values, decodeFloat8Array(vr))
- case TextArrayOid, VarcharArrayOid:
- values = append(values, decodeTextArray(vr))
- case TimestampArrayOid, TimestampTzArrayOid:
- values = append(values, decodeTimestampArray(vr))
- case DateOid:
- values = append(values, decodeDate(vr))
- case TimestampTzOid:
- values = append(values, decodeTimestampTz(vr))
- case TimestampOid:
- values = append(values, decodeTimestamp(vr))
- case InetOid, CidrOid:
- values = append(values, decodeInet(vr))
- case JsonOid:
- var d interface{}
- decodeJSON(vr, &d)
- values = append(values, d)
- case JsonbOid:
- var d interface{}
- decodeJSONB(vr, &d)
- values = append(values, d)
+ if dt, ok := rows.conn.ConnInfo.DataTypeForOID(fd.DataType); ok {
+ value := dt.Value
+
+ switch fd.FormatCode {
+ case TextFormatCode:
+ decoder := value.(pgtype.TextDecoder)
+ if decoder == nil {
+ decoder = &pgtype.GenericText{}
+ }
+ err := decoder.DecodeText(rows.conn.ConnInfo, buf)
+ if err != nil {
+ rows.fatal(err)
+ }
+ values = append(values, decoder.(pgtype.Value).Get())
+ case BinaryFormatCode:
+ decoder := value.(pgtype.BinaryDecoder)
+ if decoder == nil {
+ decoder = &pgtype.GenericBinary{}
+ }
+ err := decoder.DecodeBinary(rows.conn.ConnInfo, buf)
+ if err != nil {
+ rows.fatal(err)
+ }
+ values = append(values, value.Get())
default:
- rows.Fatal(errors.New("Values cannot handle binary format non-intrinsic types"))
+ rows.fatal(errors.New("Unknown format code"))
}
- default:
- rows.Fatal(errors.New("Unknown format code"))
- }
-
- if vr.Err() != nil {
- rows.Fatal(vr.Err())
+ } else {
+ rows.fatal(errors.New("Unknown type"))
}
if rows.Err() != nil {
@@ -423,72 +325,221 @@ func (rows *Rows) Values() ([]interface{}, error) {
return values, rows.Err()
}
-// AfterClose adds f to a LILO queue of functions that will be called when
-// rows is closed.
-func (rows *Rows) AfterClose(f func(*Rows)) {
- if rows.afterClose == nil {
- rows.afterClose = f
- } else {
- prevFn := rows.afterClose
- rows.afterClose = func(rows *Rows) {
- f(rows)
- prevFn(rows)
- }
- }
-}
-
// Query executes sql with args. If there is an error the returned *Rows will
// be returned in an error state. So it is allowed to ignore the error returned
// from Query and handle it in *Rows.
func (c *Conn) Query(sql string, args ...interface{}) (*Rows, error) {
+ return c.QueryEx(context.Background(), sql, nil, args...)
+}
+
+func (c *Conn) getRows(sql string, args []interface{}) *Rows {
+ if len(c.preallocatedRows) == 0 {
+ c.preallocatedRows = make([]Rows, 64)
+ }
+
+ r := &c.preallocatedRows[len(c.preallocatedRows)-1]
+ c.preallocatedRows = c.preallocatedRows[0 : len(c.preallocatedRows)-1]
+
+ r.conn = c
+ r.startTime = c.lastActivityTime
+ r.sql = sql
+ r.args = args
+
+ return r
+}
+
+// QueryRow is a convenience wrapper over Query. Any error that occurs while
+// querying is deferred until calling Scan on the returned *Row. That *Row will
+// error with ErrNoRows if no rows are returned.
+func (c *Conn) QueryRow(sql string, args ...interface{}) *Row {
+ rows, _ := c.Query(sql, args...)
+ return (*Row)(rows)
+}
+
+type QueryExOptions struct {
+ // When ParameterOIDs are present and the query is not a prepared statement,
+ // then ParameterOIDs and ResultFormatCodes will be used to avoid an extra
+ // network round-trip.
+ ParameterOIDs []pgtype.OID
+ ResultFormatCodes []int16
+
+ SimpleProtocol bool
+}
+
+func (c *Conn) QueryEx(ctx context.Context, sql string, options *QueryExOptions, args ...interface{}) (rows *Rows, err error) {
c.lastActivityTime = time.Now()
+ rows = c.getRows(sql, args)
+
+ err = c.waitForPreviousCancelQuery(ctx)
+ if err != nil {
+ rows.fatal(err)
+ return rows, err
+ }
- rows := c.getRows(sql, args)
+ if err := c.ensureConnectionReadyForQuery(); err != nil {
+ rows.fatal(err)
+ return rows, err
+ }
if err := c.lock(); err != nil {
- rows.abort(err)
+ rows.fatal(err)
return rows, err
}
rows.unlockConn = true
+ err = c.initContext(ctx)
+ if err != nil {
+ rows.fatal(err)
+ return rows, rows.err
+ }
+
+ if (options == nil && c.config.PreferSimpleProtocol) || (options != nil && options.SimpleProtocol) {
+ err = c.sanitizeAndSendSimpleQuery(sql, args...)
+ if err != nil {
+ rows.fatal(err)
+ return rows, err
+ }
+
+ return rows, nil
+ }
+
+ if options != nil && len(options.ParameterOIDs) > 0 {
+
+ buf, err := c.buildOneRoundTripQueryEx(c.wbuf, sql, options, args)
+ if err != nil {
+ rows.fatal(err)
+ return rows, err
+ }
+
+ buf = appendSync(buf)
+
+ n, err := c.conn.Write(buf)
+ if err != nil && fatalWriteErr(n, err) {
+ rows.fatal(err)
+ c.die(err)
+ return rows, err
+ }
+ c.pendingReadyForQueryCount++
+
+ fieldDescriptions, err := c.readUntilRowDescription()
+ if err != nil {
+ rows.fatal(err)
+ return rows, err
+ }
+
+ if len(options.ResultFormatCodes) == 0 {
+ for i := range fieldDescriptions {
+ fieldDescriptions[i].FormatCode = TextFormatCode
+ }
+ } else if len(options.ResultFormatCodes) == 1 {
+ fc := options.ResultFormatCodes[0]
+ for i := range fieldDescriptions {
+ fieldDescriptions[i].FormatCode = fc
+ }
+ } else {
+ for i := range options.ResultFormatCodes {
+ fieldDescriptions[i].FormatCode = options.ResultFormatCodes[i]
+ }
+ }
+
+ rows.sql = sql
+ rows.fields = fieldDescriptions
+ return rows, nil
+ }
+
ps, ok := c.preparedStatements[sql]
if !ok {
var err error
- ps, err = c.Prepare("", sql)
+ ps, err = c.prepareEx("", sql, nil)
if err != nil {
- rows.abort(err)
+ rows.fatal(err)
return rows, rows.err
}
}
rows.sql = ps.SQL
rows.fields = ps.FieldDescriptions
- err := c.sendPreparedQuery(ps, args...)
+
+ err = c.sendPreparedQuery(ps, args...)
if err != nil {
- rows.abort(err)
+ rows.fatal(err)
}
+
return rows, rows.err
}
-func (c *Conn) getRows(sql string, args []interface{}) *Rows {
- if len(c.preallocatedRows) == 0 {
- c.preallocatedRows = make([]Rows, 64)
+func (c *Conn) buildOneRoundTripQueryEx(buf []byte, sql string, options *QueryExOptions, arguments []interface{}) ([]byte, error) {
+ if len(arguments) != len(options.ParameterOIDs) {
+ return nil, errors.Errorf("mismatched number of arguments (%d) and options.ParameterOIDs (%d)", len(arguments), len(options.ParameterOIDs))
}
- r := &c.preallocatedRows[len(c.preallocatedRows)-1]
- c.preallocatedRows = c.preallocatedRows[0 : len(c.preallocatedRows)-1]
+ if len(options.ParameterOIDs) > 65535 {
+ return nil, errors.Errorf("Number of QueryExOptions ParameterOIDs must be between 0 and 65535, received %d", len(options.ParameterOIDs))
+ }
- r.conn = c
- r.startTime = c.lastActivityTime
- r.sql = sql
- r.args = args
+ buf = appendParse(buf, "", sql, options.ParameterOIDs)
+ buf = appendDescribe(buf, 'S', "")
+ buf, err := appendBind(buf, "", "", c.ConnInfo, options.ParameterOIDs, arguments, options.ResultFormatCodes)
+ if err != nil {
+ return nil, err
+ }
+ buf = appendExecute(buf, "", 0)
- return r
+ return buf, nil
}
-// QueryRow is a convenience wrapper over Query. Any error that occurs while
-// querying is deferred until calling Scan on the returned *Row. That *Row will
-// error with ErrNoRows if no rows are returned.
-func (c *Conn) QueryRow(sql string, args ...interface{}) *Row {
- rows, _ := c.Query(sql, args...)
+func (c *Conn) readUntilRowDescription() ([]FieldDescription, error) {
+ for {
+ msg, err := c.rxMsg()
+ if err != nil {
+ return nil, err
+ }
+
+ switch msg := msg.(type) {
+ case *pgproto3.ParameterDescription:
+ case *pgproto3.RowDescription:
+ fieldDescriptions := c.rxRowDescription(msg)
+ for i := range fieldDescriptions {
+ if dt, ok := c.ConnInfo.DataTypeForOID(fieldDescriptions[i].DataType); ok {
+ fieldDescriptions[i].DataTypeName = dt.Name
+ } else {
+ return nil, errors.Errorf("unknown oid: %d", fieldDescriptions[i].DataType)
+ }
+ }
+ return fieldDescriptions, nil
+ default:
+ if err := c.processContextFreeMsg(msg); err != nil {
+ return nil, err
+ }
+ }
+ }
+}
+
+func (c *Conn) sanitizeAndSendSimpleQuery(sql string, args ...interface{}) (err error) {
+ if c.RuntimeParams["standard_conforming_strings"] != "on" {
+ return errors.New("simple protocol queries must be run with standard_conforming_strings=on")
+ }
+
+ if c.RuntimeParams["client_encoding"] != "UTF8" {
+ return errors.New("simple protocol queries must be run with client_encoding=UTF8")
+ }
+
+ valueArgs := make([]interface{}, len(args))
+ for i, a := range args {
+ valueArgs[i], err = convertSimpleArgument(c.ConnInfo, a)
+ if err != nil {
+ return err
+ }
+ }
+
+ sql, err = sanitize.SanitizeSQL(sql, valueArgs...)
+ if err != nil {
+ return err
+ }
+
+ return c.sendSimpleQuery(sql)
+}
+
+func (c *Conn) QueryRowEx(ctx context.Context, sql string, options *QueryExOptions, args ...interface{}) *Row {
+ rows, _ := c.QueryEx(ctx, sql, options, args...)
return (*Row)(rows)
}
diff --git a/vendor/github.com/jackc/pgx/query_test.go b/vendor/github.com/jackc/pgx/query_test.go
deleted file mode 100644
index f08887b..0000000
--- a/vendor/github.com/jackc/pgx/query_test.go
+++ /dev/null
@@ -1,1414 +0,0 @@
-package pgx_test
-
-import (
- "bytes"
- "database/sql"
- "fmt"
- "strings"
- "testing"
- "time"
-
- "github.com/jackc/pgx"
-
- "github.com/shopspring/decimal"
-)
-
-func TestConnQueryScan(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- var sum, rowCount int32
-
- rows, err := conn.Query("select generate_series(1,$1)", 10)
- if err != nil {
- t.Fatalf("conn.Query failed: %v", err)
- }
- defer rows.Close()
-
- for rows.Next() {
- var n int32
- rows.Scan(&n)
- sum += n
- rowCount++
- }
-
- if rows.Err() != nil {
- t.Fatalf("conn.Query failed: %v", err)
- }
-
- if rowCount != 10 {
- t.Error("Select called onDataRow wrong number of times")
- }
- if sum != 55 {
- t.Error("Wrong values returned")
- }
-}
-
-func TestConnQueryValues(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- var rowCount int32
-
- rows, err := conn.Query("select 'foo'::text, 'bar'::varchar, n, null, n::oid from generate_series(1,$1) n", 10)
- if err != nil {
- t.Fatalf("conn.Query failed: %v", err)
- }
- defer rows.Close()
-
- for rows.Next() {
- rowCount++
-
- values, err := rows.Values()
- if err != nil {
- t.Fatalf("rows.Values failed: %v", err)
- }
- if len(values) != 5 {
- t.Errorf("Expected rows.Values to return 5 values, but it returned %d", len(values))
- }
- if values[0] != "foo" {
- t.Errorf(`Expected values[0] to be "foo", but it was %v`, values[0])
- }
- if values[1] != "bar" {
- t.Errorf(`Expected values[1] to be "bar", but it was %v`, values[1])
- }
-
- if values[2] != rowCount {
- t.Errorf(`Expected values[2] to be %d, but it was %d`, rowCount, values[2])
- }
-
- if values[3] != nil {
- t.Errorf(`Expected values[3] to be %v, but it was %d`, nil, values[3])
- }
-
- if values[4] != pgx.Oid(rowCount) {
- t.Errorf(`Expected values[4] to be %d, but it was %d`, rowCount, values[4])
- }
- }
-
- if rows.Err() != nil {
- t.Fatalf("conn.Query failed: %v", err)
- }
-
- if rowCount != 10 {
- t.Error("Select called onDataRow wrong number of times")
- }
-}
-
-// Test that a connection stays valid when query results are closed early
-func TestConnQueryCloseEarly(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- // Immediately close query without reading any rows
- rows, err := conn.Query("select generate_series(1,$1)", 10)
- if err != nil {
- t.Fatalf("conn.Query failed: %v", err)
- }
- rows.Close()
-
- ensureConnValid(t, conn)
-
- // Read partial response then close
- rows, err = conn.Query("select generate_series(1,$1)", 10)
- if err != nil {
- t.Fatalf("conn.Query failed: %v", err)
- }
-
- ok := rows.Next()
- if !ok {
- t.Fatal("rows.Next terminated early")
- }
-
- var n int32
- rows.Scan(&n)
- if n != 1 {
- t.Fatalf("Expected 1 from first row, but got %v", n)
- }
-
- rows.Close()
-
- ensureConnValid(t, conn)
-}
-
-func TestConnQueryCloseEarlyWithErrorOnWire(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- rows, err := conn.Query("select 1/(10-n) from generate_series(1,10) n")
- if err != nil {
- t.Fatalf("conn.Query failed: %v", err)
- }
- rows.Close()
-
- ensureConnValid(t, conn)
-}
-
-// Test that a connection stays valid when query results read incorrectly
-func TestConnQueryReadWrongTypeError(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- // Read a single value incorrectly
- rows, err := conn.Query("select generate_series(1,$1)", 10)
- if err != nil {
- t.Fatalf("conn.Query failed: %v", err)
- }
-
- rowsRead := 0
-
- for rows.Next() {
- var t time.Time
- rows.Scan(&t)
- rowsRead++
- }
-
- if rowsRead != 1 {
- t.Fatalf("Expected error to cause only 1 row to be read, but %d were read", rowsRead)
- }
-
- if rows.Err() == nil {
- t.Fatal("Expected Rows to have an error after an improper read but it didn't")
- }
-
- if rows.Err().Error() != "can't scan into dest[0]: Can't convert OID 23 to time.Time" {
- t.Fatalf("Expected different Rows.Err(): %v", rows.Err())
- }
-
- ensureConnValid(t, conn)
-}
-
-// Test that a connection stays valid when query results read incorrectly
-func TestConnQueryReadTooManyValues(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- // Read too many values
- rows, err := conn.Query("select generate_series(1,$1)", 10)
- if err != nil {
- t.Fatalf("conn.Query failed: %v", err)
- }
-
- rowsRead := 0
-
- for rows.Next() {
- var n, m int32
- rows.Scan(&n, &m)
- rowsRead++
- }
-
- if rowsRead != 1 {
- t.Fatalf("Expected error to cause only 1 row to be read, but %d were read", rowsRead)
- }
-
- if rows.Err() == nil {
- t.Fatal("Expected Rows to have an error after an improper read but it didn't")
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestConnQueryScanIgnoreColumn(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- rows, err := conn.Query("select 1::int8, 2::int8, 3::int8")
- if err != nil {
- t.Fatalf("conn.Query failed: %v", err)
- }
-
- ok := rows.Next()
- if !ok {
- t.Fatal("rows.Next terminated early")
- }
-
- var n, m int64
- err = rows.Scan(&n, nil, &m)
- if err != nil {
- t.Fatalf("rows.Scan failed: %v", err)
- }
- rows.Close()
-
- if n != 1 {
- t.Errorf("Expected n to equal 1, but it was %d", n)
- }
-
- if m != 3 {
- t.Errorf("Expected n to equal 3, but it was %d", m)
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestConnQueryScanner(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- rows, err := conn.Query("select null::int8, 1::int8")
- if err != nil {
- t.Fatalf("conn.Query failed: %v", err)
- }
-
- ok := rows.Next()
- if !ok {
- t.Fatal("rows.Next terminated early")
- }
-
- var n, m pgx.NullInt64
- err = rows.Scan(&n, &m)
- if err != nil {
- t.Fatalf("rows.Scan failed: %v", err)
- }
- rows.Close()
-
- if n.Valid {
- t.Error("Null should not be valid, but it was")
- }
-
- if !m.Valid {
- t.Error("1 should be valid, but it wasn't")
- }
-
- if m.Int64 != 1 {
- t.Errorf("m.Int64 should have been 1, but it was %v", m.Int64)
- }
-
- ensureConnValid(t, conn)
-}
-
-type pgxNullInt64 struct {
- Int64 int64
- Valid bool // Valid is true if Int64 is not NULL
-}
-
-func (n *pgxNullInt64) ScanPgx(vr *pgx.ValueReader) error {
- if vr.Type().DataType != pgx.Int8Oid {
- return pgx.SerializationError(fmt.Sprintf("pgxNullInt64.Scan cannot decode OID %d", vr.Type().DataType))
- }
-
- if vr.Len() == -1 {
- n.Int64, n.Valid = 0, false
- return nil
- }
- n.Valid = true
-
- err := pgx.Decode(vr, &n.Int64)
- if err != nil {
- return err
- }
- return vr.Err()
-}
-
-func TestConnQueryPgxScanner(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- rows, err := conn.Query("select null::int8, 1::int8")
- if err != nil {
- t.Fatalf("conn.Query failed: %v", err)
- }
-
- ok := rows.Next()
- if !ok {
- t.Fatal("rows.Next terminated early")
- }
-
- var n, m pgxNullInt64
- err = rows.Scan(&n, &m)
- if err != nil {
- t.Fatalf("rows.Scan failed: %v", err)
- }
- rows.Close()
-
- if n.Valid {
- t.Error("Null should not be valid, but it was")
- }
-
- if !m.Valid {
- t.Error("1 should be valid, but it wasn't")
- }
-
- if m.Int64 != 1 {
- t.Errorf("m.Int64 should have been 1, but it was %v", m.Int64)
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestConnQueryErrorWhileReturningRows(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- for i := 0; i < 100; i++ {
- func() {
- sql := `select 42 / (random() * 20)::integer from generate_series(1,100000)`
-
- rows, err := conn.Query(sql)
- if err != nil {
- t.Fatal(err)
- }
- defer rows.Close()
-
- for rows.Next() {
- var n int32
- rows.Scan(&n)
- }
-
- if err, ok := rows.Err().(pgx.PgError); !ok {
- t.Fatalf("Expected pgx.PgError, got %v", err)
- }
-
- ensureConnValid(t, conn)
- }()
- }
-
-}
-
-func TestConnQueryEncoder(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- n := pgx.NullInt64{Int64: 1, Valid: true}
-
- rows, err := conn.Query("select $1::int8", &n)
- if err != nil {
- t.Fatalf("conn.Query failed: %v", err)
- }
-
- ok := rows.Next()
- if !ok {
- t.Fatal("rows.Next terminated early")
- }
-
- var m pgx.NullInt64
- err = rows.Scan(&m)
- if err != nil {
- t.Fatalf("rows.Scan failed: %v", err)
- }
- rows.Close()
-
- if !m.Valid {
- t.Error("m should be valid, but it wasn't")
- }
-
- if m.Int64 != 1 {
- t.Errorf("m.Int64 should have been 1, but it was %v", m.Int64)
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestQueryEncodeError(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- rows, err := conn.Query("select $1::integer", "wrong")
- if err != nil {
- t.Errorf("conn.Query failure: %v", err)
- }
- defer rows.Close()
-
- rows.Next()
-
- if rows.Err() == nil {
- t.Error("Expected rows.Err() to return error, but it didn't")
- }
- if rows.Err().Error() != `ERROR: invalid input syntax for integer: "wrong" (SQLSTATE 22P02)` {
- t.Error("Expected rows.Err() to return different error:", rows.Err())
- }
-}
-
-// Ensure that an argument that implements Encoder works when the parameter type
-// is a core type.
-type coreEncoder struct{}
-
-func (n coreEncoder) FormatCode() int16 { return pgx.TextFormatCode }
-
-func (n *coreEncoder) Encode(w *pgx.WriteBuf, oid pgx.Oid) error {
- w.WriteInt32(int32(2))
- w.WriteBytes([]byte("42"))
- return nil
-}
-
-func TestQueryEncodeCoreTextFormatError(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- var n int32
- err := conn.QueryRow("select $1::integer", &coreEncoder{}).Scan(&n)
- if err != nil {
- t.Fatalf("Unexpected conn.QueryRow error: %v", err)
- }
-
- if n != 42 {
- t.Errorf("Expected 42, got %v", n)
- }
-}
-
-func TestQueryRowCoreTypes(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- type allTypes struct {
- s string
- f32 float32
- f64 float64
- b bool
- t time.Time
- oid pgx.Oid
- }
-
- var actual, zero allTypes
-
- tests := []struct {
- sql string
- queryArgs []interface{}
- scanArgs []interface{}
- expected allTypes
- }{
- {"select $1::text", []interface{}{"Jack"}, []interface{}{&actual.s}, allTypes{s: "Jack"}},
- {"select $1::float4", []interface{}{float32(1.23)}, []interface{}{&actual.f32}, allTypes{f32: 1.23}},
- {"select $1::float8", []interface{}{float64(1.23)}, []interface{}{&actual.f64}, allTypes{f64: 1.23}},
- {"select $1::bool", []interface{}{true}, []interface{}{&actual.b}, allTypes{b: true}},
- {"select $1::timestamptz", []interface{}{time.Unix(123, 5000)}, []interface{}{&actual.t}, allTypes{t: time.Unix(123, 5000)}},
- {"select $1::timestamp", []interface{}{time.Date(2010, 1, 2, 3, 4, 5, 0, time.Local)}, []interface{}{&actual.t}, allTypes{t: time.Date(2010, 1, 2, 3, 4, 5, 0, time.Local)}},
- {"select $1::date", []interface{}{time.Date(1987, 1, 2, 0, 0, 0, 0, time.Local)}, []interface{}{&actual.t}, allTypes{t: time.Date(1987, 1, 2, 0, 0, 0, 0, time.Local)}},
- {"select $1::oid", []interface{}{pgx.Oid(42)}, []interface{}{&actual.oid}, allTypes{oid: 42}},
- }
-
- for i, tt := range tests {
- actual = zero
-
- err := conn.QueryRow(tt.sql, tt.queryArgs...).Scan(tt.scanArgs...)
- if err != nil {
- t.Errorf("%d. Unexpected failure: %v (sql -> %v, queryArgs -> %v)", i, err, tt.sql, tt.queryArgs)
- }
-
- if actual != tt.expected {
- t.Errorf("%d. Expected %v, got %v (sql -> %v, queryArgs -> %v)", i, tt.expected, actual, tt.sql, tt.queryArgs)
- }
-
- ensureConnValid(t, conn)
-
- // Check that Scan errors when a core type is null
- err = conn.QueryRow(tt.sql, nil).Scan(tt.scanArgs...)
- if err == nil {
- t.Errorf("%d. Expected null to cause error, but it didn't (sql -> %v)", i, tt.sql)
- }
- if err != nil && !strings.Contains(err.Error(), "Cannot decode null") {
- t.Errorf(`%d. Expected null to cause error "Cannot decode null..." but it was %v (sql -> %v)`, i, err, tt.sql)
- }
-
- ensureConnValid(t, conn)
- }
-}
-
-func TestQueryRowCoreIntegerEncoding(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- type allTypes struct {
- ui uint
- ui8 uint8
- ui16 uint16
- ui32 uint32
- ui64 uint64
- i int
- i8 int8
- i16 int16
- i32 int32
- i64 int64
- }
-
- var actual, zero allTypes
-
- successfulEncodeTests := []struct {
- sql string
- queryArg interface{}
- scanArg interface{}
- expected allTypes
- }{
- // Check any integer type where value is within int2 range can be encoded
- {"select $1::int2", int(42), &actual.i16, allTypes{i16: 42}},
- {"select $1::int2", int8(42), &actual.i16, allTypes{i16: 42}},
- {"select $1::int2", int16(42), &actual.i16, allTypes{i16: 42}},
- {"select $1::int2", int32(42), &actual.i16, allTypes{i16: 42}},
- {"select $1::int2", int64(42), &actual.i16, allTypes{i16: 42}},
- {"select $1::int2", uint(42), &actual.i16, allTypes{i16: 42}},
- {"select $1::int2", uint8(42), &actual.i16, allTypes{i16: 42}},
- {"select $1::int2", uint16(42), &actual.i16, allTypes{i16: 42}},
- {"select $1::int2", uint32(42), &actual.i16, allTypes{i16: 42}},
- {"select $1::int2", uint64(42), &actual.i16, allTypes{i16: 42}},
-
- // Check any integer type where value is within int4 range can be encoded
- {"select $1::int4", int(42), &actual.i32, allTypes{i32: 42}},
- {"select $1::int4", int8(42), &actual.i32, allTypes{i32: 42}},
- {"select $1::int4", int16(42), &actual.i32, allTypes{i32: 42}},
- {"select $1::int4", int32(42), &actual.i32, allTypes{i32: 42}},
- {"select $1::int4", int64(42), &actual.i32, allTypes{i32: 42}},
- {"select $1::int4", uint(42), &actual.i32, allTypes{i32: 42}},
- {"select $1::int4", uint8(42), &actual.i32, allTypes{i32: 42}},
- {"select $1::int4", uint16(42), &actual.i32, allTypes{i32: 42}},
- {"select $1::int4", uint32(42), &actual.i32, allTypes{i32: 42}},
- {"select $1::int4", uint64(42), &actual.i32, allTypes{i32: 42}},
-
- // Check any integer type where value is within int8 range can be encoded
- {"select $1::int8", int(42), &actual.i64, allTypes{i64: 42}},
- {"select $1::int8", int8(42), &actual.i64, allTypes{i64: 42}},
- {"select $1::int8", int16(42), &actual.i64, allTypes{i64: 42}},
- {"select $1::int8", int32(42), &actual.i64, allTypes{i64: 42}},
- {"select $1::int8", int64(42), &actual.i64, allTypes{i64: 42}},
- {"select $1::int8", uint(42), &actual.i64, allTypes{i64: 42}},
- {"select $1::int8", uint8(42), &actual.i64, allTypes{i64: 42}},
- {"select $1::int8", uint16(42), &actual.i64, allTypes{i64: 42}},
- {"select $1::int8", uint32(42), &actual.i64, allTypes{i64: 42}},
- {"select $1::int8", uint64(42), &actual.i64, allTypes{i64: 42}},
- }
-
- for i, tt := range successfulEncodeTests {
- actual = zero
-
- err := conn.QueryRow(tt.sql, tt.queryArg).Scan(tt.scanArg)
- if err != nil {
- t.Errorf("%d. Unexpected failure: %v (sql -> %v, queryArg -> %v)", i, err, tt.sql, tt.queryArg)
- continue
- }
-
- if actual != tt.expected {
- t.Errorf("%d. Expected %v, got %v (sql -> %v, queryArg -> %v)", i, tt.expected, actual, tt.sql, tt.queryArg)
- }
-
- ensureConnValid(t, conn)
- }
-
- failedEncodeTests := []struct {
- sql string
- queryArg interface{}
- }{
- // Check any integer type where value is outside pg:int2 range cannot be encoded
- {"select $1::int2", int(32769)},
- {"select $1::int2", int32(32769)},
- {"select $1::int2", int32(32769)},
- {"select $1::int2", int64(32769)},
- {"select $1::int2", uint(32769)},
- {"select $1::int2", uint16(32769)},
- {"select $1::int2", uint32(32769)},
- {"select $1::int2", uint64(32769)},
-
- // Check any integer type where value is outside pg:int4 range cannot be encoded
- {"select $1::int4", int64(2147483649)},
- {"select $1::int4", uint32(2147483649)},
- {"select $1::int4", uint64(2147483649)},
-
- // Check any integer type where value is outside pg:int8 range cannot be encoded
- {"select $1::int8", uint64(9223372036854775809)},
- }
-
- for i, tt := range failedEncodeTests {
- err := conn.QueryRow(tt.sql, tt.queryArg).Scan(nil)
- if err == nil {
- t.Errorf("%d. Expected failure to encode, but unexpectedly succeeded: %v (sql -> %v, queryArg -> %v)", i, err, tt.sql, tt.queryArg)
- } else if !strings.Contains(err.Error(), "is greater than") {
- t.Errorf("%d. Expected failure to encode, but got: %v (sql -> %v, queryArg -> %v)", i, err, tt.sql, tt.queryArg)
- }
-
- ensureConnValid(t, conn)
- }
-}
-
-func TestQueryRowCoreIntegerDecoding(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- type allTypes struct {
- ui uint
- ui8 uint8
- ui16 uint16
- ui32 uint32
- ui64 uint64
- i int
- i8 int8
- i16 int16
- i32 int32
- i64 int64
- }
-
- var actual, zero allTypes
-
- successfulDecodeTests := []struct {
- sql string
- scanArg interface{}
- expected allTypes
- }{
- // Check any integer type where value is within Go:int range can be decoded
- {"select 42::int2", &actual.i, allTypes{i: 42}},
- {"select 42::int4", &actual.i, allTypes{i: 42}},
- {"select 42::int8", &actual.i, allTypes{i: 42}},
- {"select -42::int2", &actual.i, allTypes{i: -42}},
- {"select -42::int4", &actual.i, allTypes{i: -42}},
- {"select -42::int8", &actual.i, allTypes{i: -42}},
-
- // Check any integer type where value is within Go:int8 range can be decoded
- {"select 42::int2", &actual.i8, allTypes{i8: 42}},
- {"select 42::int4", &actual.i8, allTypes{i8: 42}},
- {"select 42::int8", &actual.i8, allTypes{i8: 42}},
- {"select -42::int2", &actual.i8, allTypes{i8: -42}},
- {"select -42::int4", &actual.i8, allTypes{i8: -42}},
- {"select -42::int8", &actual.i8, allTypes{i8: -42}},
-
- // Check any integer type where value is within Go:int16 range can be decoded
- {"select 42::int2", &actual.i16, allTypes{i16: 42}},
- {"select 42::int4", &actual.i16, allTypes{i16: 42}},
- {"select 42::int8", &actual.i16, allTypes{i16: 42}},
- {"select -42::int2", &actual.i16, allTypes{i16: -42}},
- {"select -42::int4", &actual.i16, allTypes{i16: -42}},
- {"select -42::int8", &actual.i16, allTypes{i16: -42}},
-
- // Check any integer type where value is within Go:int32 range can be decoded
- {"select 42::int2", &actual.i32, allTypes{i32: 42}},
- {"select 42::int4", &actual.i32, allTypes{i32: 42}},
- {"select 42::int8", &actual.i32, allTypes{i32: 42}},
- {"select -42::int2", &actual.i32, allTypes{i32: -42}},
- {"select -42::int4", &actual.i32, allTypes{i32: -42}},
- {"select -42::int8", &actual.i32, allTypes{i32: -42}},
-
- // Check any integer type where value is within Go:int64 range can be decoded
- {"select 42::int2", &actual.i64, allTypes{i64: 42}},
- {"select 42::int4", &actual.i64, allTypes{i64: 42}},
- {"select 42::int8", &actual.i64, allTypes{i64: 42}},
- {"select -42::int2", &actual.i64, allTypes{i64: -42}},
- {"select -42::int4", &actual.i64, allTypes{i64: -42}},
- {"select -42::int8", &actual.i64, allTypes{i64: -42}},
-
- // Check any integer type where value is within Go:uint range can be decoded
- {"select 128::int2", &actual.ui, allTypes{ui: 128}},
- {"select 128::int4", &actual.ui, allTypes{ui: 128}},
- {"select 128::int8", &actual.ui, allTypes{ui: 128}},
-
- // Check any integer type where value is within Go:uint8 range can be decoded
- {"select 128::int2", &actual.ui8, allTypes{ui8: 128}},
- {"select 128::int4", &actual.ui8, allTypes{ui8: 128}},
- {"select 128::int8", &actual.ui8, allTypes{ui8: 128}},
-
- // Check any integer type where value is within Go:uint16 range can be decoded
- {"select 42::int2", &actual.ui16, allTypes{ui16: 42}},
- {"select 32768::int4", &actual.ui16, allTypes{ui16: 32768}},
- {"select 32768::int8", &actual.ui16, allTypes{ui16: 32768}},
-
- // Check any integer type where value is within Go:uint32 range can be decoded
- {"select 42::int2", &actual.ui32, allTypes{ui32: 42}},
- {"select 42::int4", &actual.ui32, allTypes{ui32: 42}},
- {"select 2147483648::int8", &actual.ui32, allTypes{ui32: 2147483648}},
-
- // Check any integer type where value is within Go:uint64 range can be decoded
- {"select 42::int2", &actual.ui64, allTypes{ui64: 42}},
- {"select 42::int4", &actual.ui64, allTypes{ui64: 42}},
- {"select 42::int8", &actual.ui64, allTypes{ui64: 42}},
- }
-
- for i, tt := range successfulDecodeTests {
- actual = zero
-
- err := conn.QueryRow(tt.sql).Scan(tt.scanArg)
- if err != nil {
- t.Errorf("%d. Unexpected failure: %v (sql -> %v)", i, err, tt.sql)
- continue
- }
-
- if actual != tt.expected {
- t.Errorf("%d. Expected %v, got %v (sql -> %v)", i, tt.expected, actual, tt.sql)
- }
-
- ensureConnValid(t, conn)
- }
-
- failedDecodeTests := []struct {
- sql string
- scanArg interface{}
- expectedErr string
- }{
- // Check any integer type where value is outside Go:int8 range cannot be decoded
- {"select 128::int2", &actual.i8, "is greater than"},
- {"select 128::int4", &actual.i8, "is greater than"},
- {"select 128::int8", &actual.i8, "is greater than"},
- {"select -129::int2", &actual.i8, "is less than"},
- {"select -129::int4", &actual.i8, "is less than"},
- {"select -129::int8", &actual.i8, "is less than"},
-
- // Check any integer type where value is outside Go:int16 range cannot be decoded
- {"select 32768::int4", &actual.i16, "is greater than"},
- {"select 32768::int8", &actual.i16, "is greater than"},
- {"select -32769::int4", &actual.i16, "is less than"},
- {"select -32769::int8", &actual.i16, "is less than"},
-
- // Check any integer type where value is outside Go:int32 range cannot be decoded
- {"select 2147483648::int8", &actual.i32, "is greater than"},
- {"select -2147483649::int8", &actual.i32, "is less than"},
-
- // Check any integer type where value is outside Go:uint range cannot be decoded
- {"select -1::int2", &actual.ui, "is less than"},
- {"select -1::int4", &actual.ui, "is less than"},
- {"select -1::int8", &actual.ui, "is less than"},
-
- // Check any integer type where value is outside Go:uint8 range cannot be decoded
- {"select 256::int2", &actual.ui8, "is greater than"},
- {"select 256::int4", &actual.ui8, "is greater than"},
- {"select 256::int8", &actual.ui8, "is greater than"},
- {"select -1::int2", &actual.ui8, "is less than"},
- {"select -1::int4", &actual.ui8, "is less than"},
- {"select -1::int8", &actual.ui8, "is less than"},
-
- // Check any integer type where value is outside Go:uint16 cannot be decoded
- {"select 65536::int4", &actual.ui16, "is greater than"},
- {"select 65536::int8", &actual.ui16, "is greater than"},
- {"select -1::int2", &actual.ui16, "is less than"},
- {"select -1::int4", &actual.ui16, "is less than"},
- {"select -1::int8", &actual.ui16, "is less than"},
-
- // Check any integer type where value is outside Go:uint32 range cannot be decoded
- {"select 4294967296::int8", &actual.ui32, "is greater than"},
- {"select -1::int2", &actual.ui32, "is less than"},
- {"select -1::int4", &actual.ui32, "is less than"},
- {"select -1::int8", &actual.ui32, "is less than"},
-
- // Check any integer type where value is outside Go:uint64 range cannot be decoded
- {"select -1::int2", &actual.ui64, "is less than"},
- {"select -1::int4", &actual.ui64, "is less than"},
- {"select -1::int8", &actual.ui64, "is less than"},
- }
-
- for i, tt := range failedDecodeTests {
- err := conn.QueryRow(tt.sql).Scan(tt.scanArg)
- if err == nil {
- t.Errorf("%d. Expected failure to decode, but unexpectedly succeeded: %v (sql -> %v)", i, err, tt.sql)
- } else if !strings.Contains(err.Error(), tt.expectedErr) {
- t.Errorf("%d. Expected failure to decode, but got: %v (sql -> %v)", i, err, tt.sql)
- }
-
- ensureConnValid(t, conn)
- }
-}
-
-func TestQueryRowCoreByteSlice(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- tests := []struct {
- sql string
- queryArg interface{}
- expected []byte
- }{
- {"select $1::text", "Jack", []byte("Jack")},
- {"select $1::text", []byte("Jack"), []byte("Jack")},
- {"select $1::int4", int32(239023409), []byte{14, 63, 53, 49}},
- {"select $1::varchar", []byte("Jack"), []byte("Jack")},
- {"select $1::bytea", []byte{0, 15, 255, 17}, []byte{0, 15, 255, 17}},
- }
-
- for i, tt := range tests {
- var actual []byte
-
- err := conn.QueryRow(tt.sql, tt.queryArg).Scan(&actual)
- if err != nil {
- t.Errorf("%d. Unexpected failure: %v (sql -> %v)", i, err, tt.sql)
- }
-
- if !bytes.Equal(actual, tt.expected) {
- t.Errorf("%d. Expected %v, got %v (sql -> %v)", i, tt.expected, actual, tt.sql)
- }
-
- ensureConnValid(t, conn)
- }
-}
-
-func TestQueryRowByteSliceArgument(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- sql := "select $1::int4"
- queryArg := []byte{14, 63, 53, 49}
- expected := int32(239023409)
-
- var actual int32
-
- err := conn.QueryRow(sql, queryArg).Scan(&actual)
- if err != nil {
- t.Errorf("Unexpected failure: %v (sql -> %v)", err, sql)
- }
-
- if expected != actual {
- t.Errorf("Expected %v, got %v (sql -> %v)", expected, actual, sql)
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestQueryRowUnknownType(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- sql := "select $1::point"
- expected := "(1,0)"
- var actual string
-
- err := conn.QueryRow(sql, expected).Scan(&actual)
- if err != nil {
- t.Errorf("Unexpected failure: %v (sql -> %v)", err, sql)
- }
-
- if actual != expected {
- t.Errorf(`Expected "%v", got "%v" (sql -> %v)`, expected, actual, sql)
-
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestQueryRowErrors(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- type allTypes struct {
- i16 int16
- i int
- s string
- }
-
- var actual, zero allTypes
-
- tests := []struct {
- sql string
- queryArgs []interface{}
- scanArgs []interface{}
- err string
- }{
- {"select $1", []interface{}{"Jack"}, []interface{}{&actual.i16}, "could not determine data type of parameter $1 (SQLSTATE 42P18)"},
- {"select $1::badtype", []interface{}{"Jack"}, []interface{}{&actual.i16}, `type "badtype" does not exist`},
- {"SYNTAX ERROR", []interface{}{}, []interface{}{&actual.i16}, "SQLSTATE 42601"},
- {"select $1::text", []interface{}{"Jack"}, []interface{}{&actual.i16}, "Cannot decode oid 25 into any integer type"},
- {"select $1::point", []interface{}{int(705)}, []interface{}{&actual.s}, "cannot encode int8 into oid 600"},
- }
-
- for i, tt := range tests {
- actual = zero
-
- err := conn.QueryRow(tt.sql, tt.queryArgs...).Scan(tt.scanArgs...)
- if err == nil {
- t.Errorf("%d. Unexpected success (sql -> %v, queryArgs -> %v)", i, tt.sql, tt.queryArgs)
- }
- if err != nil && !strings.Contains(err.Error(), tt.err) {
- t.Errorf("%d. Expected error to contain %s, but got %v (sql -> %v, queryArgs -> %v)", i, tt.err, err, tt.sql, tt.queryArgs)
- }
-
- ensureConnValid(t, conn)
- }
-}
-
-func TestQueryRowNoResults(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- var n int32
- err := conn.QueryRow("select 1 where 1=0").Scan(&n)
- if err != pgx.ErrNoRows {
- t.Errorf("Expected pgx.ErrNoRows, got %v", err)
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestQueryRowCoreInt16Slice(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- var actual []int16
-
- tests := []struct {
- sql string
- expected []int16
- }{
- {"select $1::int2[]", []int16{1, 2, 3, 4, 5}},
- {"select $1::int2[]", []int16{}},
- }
-
- for i, tt := range tests {
- err := conn.QueryRow(tt.sql, tt.expected).Scan(&actual)
- if err != nil {
- t.Errorf("%d. Unexpected failure: %v", i, err)
- }
-
- if len(actual) != len(tt.expected) {
- t.Errorf("%d. Expected %v, got %v", i, tt.expected, actual)
- }
-
- for j := 0; j < len(actual); j++ {
- if actual[j] != tt.expected[j] {
- t.Errorf("%d. Expected actual[%d] to be %v, got %v", i, j, tt.expected[j], actual[j])
- }
- }
-
- ensureConnValid(t, conn)
- }
-
- // Check that Scan errors when an array with a null is scanned into a core slice type
- err := conn.QueryRow("select '{1, 2, 3, 4, 5, null}'::int2[];").Scan(&actual)
- if err == nil {
- t.Error("Expected null to cause error when scanned into slice, but it didn't")
- }
- if err != nil && !strings.Contains(err.Error(), "Cannot decode null") {
- t.Errorf(`Expected null to cause error "Cannot decode null..." but it was %v`, err)
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestQueryRowCoreInt32Slice(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- var actual []int32
-
- tests := []struct {
- sql string
- expected []int32
- }{
- {"select $1::int4[]", []int32{1, 2, 3, 4, 5}},
- {"select $1::int4[]", []int32{}},
- }
-
- for i, tt := range tests {
- err := conn.QueryRow(tt.sql, tt.expected).Scan(&actual)
- if err != nil {
- t.Errorf("%d. Unexpected failure: %v", i, err)
- }
-
- if len(actual) != len(tt.expected) {
- t.Errorf("%d. Expected %v, got %v", i, tt.expected, actual)
- }
-
- for j := 0; j < len(actual); j++ {
- if actual[j] != tt.expected[j] {
- t.Errorf("%d. Expected actual[%d] to be %v, got %v", i, j, tt.expected[j], actual[j])
- }
- }
-
- ensureConnValid(t, conn)
- }
-
- // Check that Scan errors when an array with a null is scanned into a core slice type
- err := conn.QueryRow("select '{1, 2, 3, 4, 5, null}'::int4[];").Scan(&actual)
- if err == nil {
- t.Error("Expected null to cause error when scanned into slice, but it didn't")
- }
- if err != nil && !strings.Contains(err.Error(), "Cannot decode null") {
- t.Errorf(`Expected null to cause error "Cannot decode null..." but it was %v`, err)
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestQueryRowCoreInt64Slice(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- var actual []int64
-
- tests := []struct {
- sql string
- expected []int64
- }{
- {"select $1::int8[]", []int64{1, 2, 3, 4, 5}},
- {"select $1::int8[]", []int64{}},
- }
-
- for i, tt := range tests {
- err := conn.QueryRow(tt.sql, tt.expected).Scan(&actual)
- if err != nil {
- t.Errorf("%d. Unexpected failure: %v", i, err)
- }
-
- if len(actual) != len(tt.expected) {
- t.Errorf("%d. Expected %v, got %v", i, tt.expected, actual)
- }
-
- for j := 0; j < len(actual); j++ {
- if actual[j] != tt.expected[j] {
- t.Errorf("%d. Expected actual[%d] to be %v, got %v", i, j, tt.expected[j], actual[j])
- }
- }
-
- ensureConnValid(t, conn)
- }
-
- // Check that Scan errors when an array with a null is scanned into a core slice type
- err := conn.QueryRow("select '{1, 2, 3, 4, 5, null}'::int8[];").Scan(&actual)
- if err == nil {
- t.Error("Expected null to cause error when scanned into slice, but it didn't")
- }
- if err != nil && !strings.Contains(err.Error(), "Cannot decode null") {
- t.Errorf(`Expected null to cause error "Cannot decode null..." but it was %v`, err)
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestQueryRowCoreFloat32Slice(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- var actual []float32
-
- tests := []struct {
- sql string
- expected []float32
- }{
- {"select $1::float4[]", []float32{1.5, 2.0, 3.5}},
- {"select $1::float4[]", []float32{}},
- }
-
- for i, tt := range tests {
- err := conn.QueryRow(tt.sql, tt.expected).Scan(&actual)
- if err != nil {
- t.Errorf("%d. Unexpected failure: %v", i, err)
- }
-
- if len(actual) != len(tt.expected) {
- t.Errorf("%d. Expected %v, got %v", i, tt.expected, actual)
- }
-
- for j := 0; j < len(actual); j++ {
- if actual[j] != tt.expected[j] {
- t.Errorf("%d. Expected actual[%d] to be %v, got %v", i, j, tt.expected[j], actual[j])
- }
- }
-
- ensureConnValid(t, conn)
- }
-
- // Check that Scan errors when an array with a null is scanned into a core slice type
- err := conn.QueryRow("select '{1.5, 2.0, 3.5, null}'::float4[];").Scan(&actual)
- if err == nil {
- t.Error("Expected null to cause error when scanned into slice, but it didn't")
- }
- if err != nil && !strings.Contains(err.Error(), "Cannot decode null") {
- t.Errorf(`Expected null to cause error "Cannot decode null..." but it was %v`, err)
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestQueryRowCoreFloat64Slice(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- var actual []float64
-
- tests := []struct {
- sql string
- expected []float64
- }{
- {"select $1::float8[]", []float64{1.5, 2.0, 3.5}},
- {"select $1::float8[]", []float64{}},
- }
-
- for i, tt := range tests {
- err := conn.QueryRow(tt.sql, tt.expected).Scan(&actual)
- if err != nil {
- t.Errorf("%d. Unexpected failure: %v", i, err)
- }
-
- if len(actual) != len(tt.expected) {
- t.Errorf("%d. Expected %v, got %v", i, tt.expected, actual)
- }
-
- for j := 0; j < len(actual); j++ {
- if actual[j] != tt.expected[j] {
- t.Errorf("%d. Expected actual[%d] to be %v, got %v", i, j, tt.expected[j], actual[j])
- }
- }
-
- ensureConnValid(t, conn)
- }
-
- // Check that Scan errors when an array with a null is scanned into a core slice type
- err := conn.QueryRow("select '{1.5, 2.0, 3.5, null}'::float8[];").Scan(&actual)
- if err == nil {
- t.Error("Expected null to cause error when scanned into slice, but it didn't")
- }
- if err != nil && !strings.Contains(err.Error(), "Cannot decode null") {
- t.Errorf(`Expected null to cause error "Cannot decode null..." but it was %v`, err)
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestQueryRowCoreStringSlice(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- var actual []string
-
- tests := []struct {
- sql string
- expected []string
- }{
- {"select $1::text[]", []string{"Adam", "Eve", "UTF-8 Characters Å Æ Ë Ͽ"}},
- {"select $1::text[]", []string{}},
- {"select $1::varchar[]", []string{"Adam", "Eve", "UTF-8 Characters Å Æ Ë Ͽ"}},
- {"select $1::varchar[]", []string{}},
- }
-
- for i, tt := range tests {
- err := conn.QueryRow(tt.sql, tt.expected).Scan(&actual)
- if err != nil {
- t.Errorf("%d. Unexpected failure: %v", i, err)
- }
-
- if len(actual) != len(tt.expected) {
- t.Errorf("%d. Expected %v, got %v", i, tt.expected, actual)
- }
-
- for j := 0; j < len(actual); j++ {
- if actual[j] != tt.expected[j] {
- t.Errorf("%d. Expected actual[%d] to be %v, got %v", i, j, tt.expected[j], actual[j])
- }
- }
-
- ensureConnValid(t, conn)
- }
-
- // Check that Scan errors when an array with a null is scanned into a core slice type
- err := conn.QueryRow("select '{Adam,Eve,NULL}'::text[];").Scan(&actual)
- if err == nil {
- t.Error("Expected null to cause error when scanned into slice, but it didn't")
- }
- if err != nil && !strings.Contains(err.Error(), "Cannot decode null") {
- t.Errorf(`Expected null to cause error "Cannot decode null..." but it was %v`, err)
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestReadingValueAfterEmptyArray(t *testing.T) {
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- var a []string
- var b int32
- err := conn.QueryRow("select '{}'::text[], 42::integer").Scan(&a, &b)
- if err != nil {
- t.Fatalf("conn.QueryRow failed: %v", err)
- }
-
- if len(a) != 0 {
- t.Errorf("Expected 'a' to have length 0, but it was: %d", len(a))
- }
-
- if b != 42 {
- t.Errorf("Expected 'b' to 42, but it was: %d", b)
- }
-}
-
-func TestReadingNullByteArray(t *testing.T) {
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- var a []byte
- err := conn.QueryRow("select null::text").Scan(&a)
- if err != nil {
- t.Fatalf("conn.QueryRow failed: %v", err)
- }
-
- if a != nil {
- t.Errorf("Expected 'a' to be nil, but it was: %v", a)
- }
-}
-
-func TestReadingNullByteArrays(t *testing.T) {
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- rows, err := conn.Query("select null::text union all select null::text")
- if err != nil {
- t.Fatalf("conn.Query failed: %v", err)
- }
-
- count := 0
- for rows.Next() {
- count++
- var a []byte
- if err := rows.Scan(&a); err != nil {
- t.Fatalf("failed to scan row: %v", err)
- }
- if a != nil {
- t.Errorf("Expected 'a' to be nil, but it was: %v", a)
- }
- }
- if count != 2 {
- t.Errorf("Expected to read 2 rows, read: %d", count)
- }
-}
-
-// Use github.com/shopspring/decimal as real-world database/sql custom type
-// to test against.
-func TestConnQueryDatabaseSQLScanner(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- var num decimal.Decimal
-
- err := conn.QueryRow("select '1234.567'::decimal").Scan(&num)
- if err != nil {
- t.Fatalf("Scan failed: %v", err)
- }
-
- expected, err := decimal.NewFromString("1234.567")
- if err != nil {
- t.Fatal(err)
- }
-
- if !num.Equals(expected) {
- t.Errorf("Expected num to be %v, but it was %v", expected, num)
- }
-
- ensureConnValid(t, conn)
-}
-
-// Use github.com/shopspring/decimal as real-world database/sql custom type
-// to test against.
-func TestConnQueryDatabaseSQLDriverValuer(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- expected, err := decimal.NewFromString("1234.567")
- if err != nil {
- t.Fatal(err)
- }
- var num decimal.Decimal
-
- err = conn.QueryRow("select $1::decimal", &expected).Scan(&num)
- if err != nil {
- t.Fatalf("Scan failed: %v", err)
- }
-
- if !num.Equals(expected) {
- t.Errorf("Expected num to be %v, but it was %v", expected, num)
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestConnQueryDatabaseSQLNullX(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- type row struct {
- boolValid sql.NullBool
- boolNull sql.NullBool
- int64Valid sql.NullInt64
- int64Null sql.NullInt64
- float64Valid sql.NullFloat64
- float64Null sql.NullFloat64
- stringValid sql.NullString
- stringNull sql.NullString
- }
-
- expected := row{
- boolValid: sql.NullBool{Bool: true, Valid: true},
- int64Valid: sql.NullInt64{Int64: 123, Valid: true},
- float64Valid: sql.NullFloat64{Float64: 3.14, Valid: true},
- stringValid: sql.NullString{String: "pgx", Valid: true},
- }
-
- var actual row
-
- err := conn.QueryRow(
- "select $1::bool, $2::bool, $3::int8, $4::int8, $5::float8, $6::float8, $7::text, $8::text",
- expected.boolValid,
- expected.boolNull,
- expected.int64Valid,
- expected.int64Null,
- expected.float64Valid,
- expected.float64Null,
- expected.stringValid,
- expected.stringNull,
- ).Scan(
- &actual.boolValid,
- &actual.boolNull,
- &actual.int64Valid,
- &actual.int64Null,
- &actual.float64Valid,
- &actual.float64Null,
- &actual.stringValid,
- &actual.stringNull,
- )
- if err != nil {
- t.Fatalf("Scan failed: %v", err)
- }
-
- if expected != actual {
- t.Errorf("Expected %v, but got %v", expected, actual)
- }
-
- ensureConnValid(t, conn)
-}
diff --git a/vendor/github.com/jackc/pgx/replication.go b/vendor/github.com/jackc/pgx/replication.go
index 7b28d6b..7dd5efe 100644
--- a/vendor/github.com/jackc/pgx/replication.go
+++ b/vendor/github.com/jackc/pgx/replication.go
@@ -1,10 +1,15 @@
package pgx
import (
- "errors"
+ "context"
+ "encoding/binary"
"fmt"
- "net"
"time"
+
+ "github.com/pkg/errors"
+
+ "github.com/jackc/pgx/pgio"
+ "github.com/jackc/pgx/pgproto3"
)
const (
@@ -172,17 +177,21 @@ type ReplicationConn struct {
// message to the server, as well as carries the WAL position of the
// client, which then updates the server's replication slot position.
func (rc *ReplicationConn) SendStandbyStatus(k *StandbyStatus) (err error) {
- writeBuf := newWriteBuf(rc.c, copyData)
- writeBuf.WriteByte(standbyStatusUpdate)
- writeBuf.WriteInt64(int64(k.WalWritePosition))
- writeBuf.WriteInt64(int64(k.WalFlushPosition))
- writeBuf.WriteInt64(int64(k.WalApplyPosition))
- writeBuf.WriteInt64(int64(k.ClientTime))
- writeBuf.WriteByte(k.ReplyRequested)
+ buf := rc.c.wbuf
+ buf = append(buf, copyData)
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+
+ buf = append(buf, standbyStatusUpdate)
+ buf = pgio.AppendInt64(buf, int64(k.WalWritePosition))
+ buf = pgio.AppendInt64(buf, int64(k.WalFlushPosition))
+ buf = pgio.AppendInt64(buf, int64(k.WalApplyPosition))
+ buf = pgio.AppendInt64(buf, int64(k.ClientTime))
+ buf = append(buf, k.ReplyRequested)
- writeBuf.closeMsg()
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])))
- _, err = rc.c.conn.Write(writeBuf.buf)
+ _, err = rc.c.conn.Write(buf)
if err != nil {
rc.c.die(err)
}
@@ -203,107 +212,115 @@ func (rc *ReplicationConn) CauseOfDeath() error {
}
func (rc *ReplicationConn) readReplicationMessage() (r *ReplicationMessage, err error) {
- var t byte
- var reader *msgReader
- t, reader, err = rc.c.rxMsg()
+ msg, err := rc.c.rxMsg()
if err != nil {
return
}
- switch t {
- case noticeResponse:
- pgError := rc.c.rxErrorResponse(reader)
+ switch msg := msg.(type) {
+ case *pgproto3.NoticeResponse:
+ pgError := rc.c.rxErrorResponse((*pgproto3.ErrorResponse)(msg))
if rc.c.shouldLog(LogLevelInfo) {
- rc.c.log(LogLevelInfo, pgError.Error())
+ rc.c.log(LogLevelInfo, pgError.Error(), nil)
}
- case errorResponse:
- err = rc.c.rxErrorResponse(reader)
+ case *pgproto3.ErrorResponse:
+ err = rc.c.rxErrorResponse(msg)
if rc.c.shouldLog(LogLevelError) {
- rc.c.log(LogLevelError, err.Error())
+ rc.c.log(LogLevelError, err.Error(), nil)
}
return
- case copyBothResponse:
+ case *pgproto3.CopyBothResponse:
// This is the tail end of the replication process start,
// and can be safely ignored
return
- case copyData:
- var msgType byte
- msgType = reader.readByte()
+ case *pgproto3.CopyData:
+ msgType := msg.Data[0]
+ rp := 1
+
switch msgType {
case walData:
- walStart := reader.readInt64()
- serverWalEnd := reader.readInt64()
- serverTime := reader.readInt64()
- walData := reader.readBytes(reader.msgBytesRemaining)
- walMessage := WalMessage{WalStart: uint64(walStart),
- ServerWalEnd: uint64(serverWalEnd),
- ServerTime: uint64(serverTime),
+ walStart := binary.BigEndian.Uint64(msg.Data[rp:])
+ rp += 8
+ serverWalEnd := binary.BigEndian.Uint64(msg.Data[rp:])
+ rp += 8
+ serverTime := binary.BigEndian.Uint64(msg.Data[rp:])
+ rp += 8
+ walData := msg.Data[rp:]
+ walMessage := WalMessage{WalStart: walStart,
+ ServerWalEnd: serverWalEnd,
+ ServerTime: serverTime,
WalData: walData,
}
return &ReplicationMessage{WalMessage: &walMessage}, nil
case senderKeepalive:
- serverWalEnd := reader.readInt64()
- serverTime := reader.readInt64()
- replyNow := reader.readByte()
- h := &ServerHeartbeat{ServerWalEnd: uint64(serverWalEnd), ServerTime: uint64(serverTime), ReplyRequested: replyNow}
+ serverWalEnd := binary.BigEndian.Uint64(msg.Data[rp:])
+ rp += 8
+ serverTime := binary.BigEndian.Uint64(msg.Data[rp:])
+ rp += 8
+ replyNow := msg.Data[rp]
+ rp += 1
+ h := &ServerHeartbeat{ServerWalEnd: serverWalEnd, ServerTime: serverTime, ReplyRequested: replyNow}
return &ReplicationMessage{ServerHeartbeat: h}, nil
default:
if rc.c.shouldLog(LogLevelError) {
- rc.c.log(LogLevelError, "Unexpected data playload message type %v", t)
+ rc.c.log(LogLevelError, "Unexpected data playload message type", map[string]interface{}{"type": msgType})
}
}
default:
if rc.c.shouldLog(LogLevelError) {
- rc.c.log(LogLevelError, "Unexpected replication message type %v", t)
+ rc.c.log(LogLevelError, "Unexpected replication message type", map[string]interface{}{"type": msg})
}
}
return
}
-// Wait for a single replication message up to timeout time.
+// Wait for a single replication message.
//
// Properly using this requires some knowledge of the postgres replication mechanisms,
// as the client can receive both WAL data (the ultimate payload) and server heartbeat
// updates. The caller also must send standby status updates in order to keep the connection
// alive and working.
//
-// This returns pgx.ErrNotificationTimeout when there is no replication message by the specified
-// duration.
-func (rc *ReplicationConn) WaitForReplicationMessage(timeout time.Duration) (r *ReplicationMessage, err error) {
- var zeroTime time.Time
-
- deadline := time.Now().Add(timeout)
-
- // Use SetReadDeadline to implement the timeout. SetReadDeadline will
- // cause operations to fail with a *net.OpError that has a Timeout()
- // of true. Because the normal pgx rxMsg path considers any error to
- // have potentially corrupted the state of the connection, it dies
- // on any errors. So to avoid timeout errors in rxMsg we set the
- // deadline and peek into the reader. If a timeout error occurs there
- // we don't break the pgx connection. If the Peek returns that data
- // is available then we turn off the read deadline before the rxMsg.
- err = rc.c.conn.SetReadDeadline(deadline)
- if err != nil {
- return nil, err
+// This returns the context error when there is no replication message before
+// the context is canceled.
+func (rc *ReplicationConn) WaitForReplicationMessage(ctx context.Context) (*ReplicationMessage, error) {
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ default:
}
- // Wait until there is a byte available before continuing onto the normal msg reading path
- _, err = rc.c.reader.Peek(1)
- if err != nil {
- rc.c.conn.SetReadDeadline(zeroTime) // we can only return one error and we already have one -- so ignore possiple error from SetReadDeadline
- if err, ok := err.(*net.OpError); ok && err.Timeout() {
- return nil, ErrNotificationTimeout
+ go func() {
+ select {
+ case <-ctx.Done():
+ if err := rc.c.conn.SetDeadline(time.Now()); err != nil {
+ rc.Close() // Close connection if unable to set deadline
+ return
+ }
+ rc.c.closedChan <- ctx.Err()
+ case <-rc.c.doneChan:
}
- return nil, err
- }
+ }()
- err = rc.c.conn.SetReadDeadline(zeroTime)
- if err != nil {
- return nil, err
+ r, opErr := rc.readReplicationMessage()
+
+ var err error
+ select {
+ case err = <-rc.c.closedChan:
+ if err := rc.c.conn.SetDeadline(time.Time{}); err != nil {
+ rc.Close() // Close connection if unable to disable deadline
+ return nil, err
+ }
+
+ if opErr == nil {
+ err = nil
+ }
+ case rc.c.doneChan <- struct{}{}:
+ err = opErr
}
- return rc.readReplicationMessage()
+ return r, err
}
func (rc *ReplicationConn) sendReplicationModeQuery(sql string) (*Rows, error) {
@@ -312,32 +329,30 @@ func (rc *ReplicationConn) sendReplicationModeQuery(sql string) (*Rows, error) {
rows := rc.c.getRows(sql, nil)
if err := rc.c.lock(); err != nil {
- rows.abort(err)
+ rows.fatal(err)
return rows, err
}
rows.unlockConn = true
err := rc.c.sendSimpleQuery(sql)
if err != nil {
- rows.abort(err)
+ rows.fatal(err)
}
- var t byte
- var r *msgReader
- t, r, err = rc.c.rxMsg()
+ msg, err := rc.c.rxMsg()
if err != nil {
return nil, err
}
- switch t {
- case rowDescription:
- rows.fields = rc.c.rxRowDescription(r)
+ switch msg := msg.(type) {
+ case *pgproto3.RowDescription:
+ rows.fields = rc.c.rxRowDescription(msg)
// We don't have c.PgTypes here because we're a replication
// connection. This means the field descriptions will have
- // only Oids. Not much we can do about this.
+ // only OIDs. Not much we can do about this.
default:
- if e := rc.c.processContextFreeMsg(t, r); e != nil {
- rows.abort(e)
+ if e := rc.c.processContextFreeMsg(msg); e != nil {
+ rows.fatal(e)
return rows, e
}
}
@@ -354,7 +369,7 @@ func (rc *ReplicationConn) sendReplicationModeQuery(sql string) (*Rows, error) {
//
// NOTE: Because this is a replication mode connection, we don't have
// type names, so the field descriptions in the result will have only
-// Oids and no DataTypeName values
+// OIDs and no DataTypeName values
func (rc *ReplicationConn) IdentifySystem() (r *Rows, err error) {
return rc.sendReplicationModeQuery("IDENTIFY_SYSTEM")
}
@@ -369,7 +384,7 @@ func (rc *ReplicationConn) IdentifySystem() (r *Rows, err error) {
//
// NOTE: Because this is a replication mode connection, we don't have
// type names, so the field descriptions in the result will have only
-// Oids and no DataTypeName values
+// OIDs and no DataTypeName values
func (rc *ReplicationConn) TimelineHistory(timeline int) (r *Rows, err error) {
return rc.sendReplicationModeQuery(fmt.Sprintf("TIMELINE_HISTORY %d", timeline))
}
@@ -401,15 +416,18 @@ func (rc *ReplicationConn) StartReplication(slotName string, startLsn uint64, ti
return
}
+ ctx, cancelFn := context.WithTimeout(context.Background(), initialReplicationResponseTimeout)
+ defer cancelFn()
+
// The first replication message that comes back here will be (in a success case)
// a empty CopyBoth that is (apparently) sent as the confirmation that the replication has
// started. This call will either return nil, nil or if it returns an error
// that indicates the start replication command failed
var r *ReplicationMessage
- r, err = rc.WaitForReplicationMessage(initialReplicationResponseTimeout)
+ r, err = rc.WaitForReplicationMessage(ctx)
if err != nil && r != nil {
if rc.c.shouldLog(LogLevelError) {
- rc.c.log(LogLevelError, "Unxpected replication message %v", r)
+ rc.c.log(LogLevelError, "Unexpected replication message", map[string]interface{}{"msg": r, "err": err})
}
}
@@ -422,6 +440,18 @@ func (rc *ReplicationConn) CreateReplicationSlot(slotName, outputPlugin string)
return
}
+// Create the replication slot, using the given name and output plugin, and return the consistent_point and snapshot_name values.
+func (rc *ReplicationConn) CreateReplicationSlotEx(slotName, outputPlugin string) (consistentPoint string, snapshotName string, err error) {
+ var dummy string
+ var rows *Rows
+ rows, err = rc.sendReplicationModeQuery(fmt.Sprintf("CREATE_REPLICATION_SLOT %s LOGICAL %s", slotName, outputPlugin))
+ defer rows.Close()
+ for rows.Next() {
+ rows.Scan(&dummy, &consistentPoint, &snapshotName, &dummy)
+ }
+ return
+}
+
// Drop the replication slot for the given name
func (rc *ReplicationConn) DropReplicationSlot(slotName string) (err error) {
_, err = rc.c.Exec(fmt.Sprintf("DROP_REPLICATION_SLOT %s", slotName))
diff --git a/vendor/github.com/jackc/pgx/replication_test.go b/vendor/github.com/jackc/pgx/replication_test.go
deleted file mode 100644
index 4f810c7..0000000
--- a/vendor/github.com/jackc/pgx/replication_test.go
+++ /dev/null
@@ -1,329 +0,0 @@
-package pgx_test
-
-import (
- "fmt"
- "github.com/jackc/pgx"
- "reflect"
- "strconv"
- "strings"
- "testing"
- "time"
-)
-
-// This function uses a postgresql 9.6 specific column
-func getConfirmedFlushLsnFor(t *testing.T, conn *pgx.Conn, slot string) string {
- // Fetch the restart LSN of the slot, to establish a starting point
- rows, err := conn.Query(fmt.Sprintf("select confirmed_flush_lsn from pg_replication_slots where slot_name='%s'", slot))
- if err != nil {
- t.Fatalf("conn.Query failed: %v", err)
- }
- defer rows.Close()
-
- var restartLsn string
- for rows.Next() {
- rows.Scan(&restartLsn)
- }
- return restartLsn
-}
-
-// This battleship test (at least somewhat by necessity) does
-// several things all at once in a single run. It:
-// - Establishes a replication connection & slot
-// - Does a series of operations to create some known WAL entries
-// - Replicates the entries down, and checks that the rows it
-// created come down in order
-// - Sends a standby status message to update the server with the
-// wal position of the slot
-// - Checks the wal position of the slot on the server to make sure
-// the update succeeded
-func TestSimpleReplicationConnection(t *testing.T) {
- t.Parallel()
-
- var err error
-
- if replicationConnConfig == nil {
- t.Skip("Skipping due to undefined replicationConnConfig")
- }
-
- conn := mustConnect(t, *replicationConnConfig)
- defer closeConn(t, conn)
-
- replicationConn := mustReplicationConnect(t, *replicationConnConfig)
- defer closeReplicationConn(t, replicationConn)
-
- err = replicationConn.CreateReplicationSlot("pgx_test", "test_decoding")
- if err != nil {
- t.Logf("replication slot create failed: %v", err)
- }
-
- // Do a simple change so we can get some wal data
- _, err = conn.Exec("create table if not exists replication_test (a integer)")
- if err != nil {
- t.Fatalf("Failed to create table: %v", err)
- }
-
- err = replicationConn.StartReplication("pgx_test", 0, -1)
- if err != nil {
- t.Fatalf("Failed to start replication: %v", err)
- }
-
- var i int32
- var insertedTimes []int64
- for i < 5 {
- var ct pgx.CommandTag
- currentTime := time.Now().Unix()
- insertedTimes = append(insertedTimes, currentTime)
- ct, err = conn.Exec("insert into replication_test(a) values($1)", currentTime)
- if err != nil {
- t.Fatalf("Insert failed: %v", err)
- }
- t.Logf("Inserted %d rows", ct.RowsAffected())
- i++
- }
-
- i = 0
- var foundTimes []int64
- var foundCount int
- var maxWal uint64
- for {
- var message *pgx.ReplicationMessage
-
- message, err = replicationConn.WaitForReplicationMessage(time.Duration(1 * time.Second))
- if err != nil {
- if err != pgx.ErrNotificationTimeout {
- t.Fatalf("Replication failed: %v %s", err, reflect.TypeOf(err))
- }
- }
- if message != nil {
- if message.WalMessage != nil {
- // The waldata payload with the test_decoding plugin looks like:
- // public.replication_test: INSERT: a[integer]:2
- // What we wanna do here is check that once we find one of our inserted times,
- // that they occur in the wal stream in the order we executed them.
- walString := string(message.WalMessage.WalData)
- if strings.Contains(walString, "public.replication_test: INSERT") {
- stringParts := strings.Split(walString, ":")
- offset, err := strconv.ParseInt(stringParts[len(stringParts)-1], 10, 64)
- if err != nil {
- t.Fatalf("Failed to parse walString %s", walString)
- }
- if foundCount > 0 || offset == insertedTimes[0] {
- foundTimes = append(foundTimes, offset)
- foundCount++
- }
- }
- if message.WalMessage.WalStart > maxWal {
- maxWal = message.WalMessage.WalStart
- }
-
- }
- if message.ServerHeartbeat != nil {
- t.Logf("Got heartbeat: %s", message.ServerHeartbeat)
- }
- } else {
- t.Log("Timed out waiting for wal message")
- i++
- }
- if i > 3 {
- t.Log("Actual timeout")
- break
- }
- }
-
- if foundCount != len(insertedTimes) {
- t.Fatalf("Failed to find all inserted time values in WAL stream (found %d expected %d)", foundCount, len(insertedTimes))
- }
-
- for i := range insertedTimes {
- if foundTimes[i] != insertedTimes[i] {
- t.Fatalf("Found %d expected %d", foundTimes[i], insertedTimes[i])
- }
- }
-
- t.Logf("Found %d times, as expected", len(foundTimes))
-
- // Before closing our connection, let's send a standby status to update our wal
- // position, which should then be reflected if we fetch out our current wal position
- // for the slot
- status, err := pgx.NewStandbyStatus(maxWal)
- if err != nil {
- t.Errorf("Failed to create standby status %v", err)
- }
- replicationConn.SendStandbyStatus(status)
-
- restartLsn := getConfirmedFlushLsnFor(t, conn, "pgx_test")
- integerRestartLsn, _ := pgx.ParseLSN(restartLsn)
- if integerRestartLsn != maxWal {
- t.Fatalf("Wal offset update failed, expected %s found %s", pgx.FormatLSN(maxWal), restartLsn)
- }
-
- closeReplicationConn(t, replicationConn)
-
- replicationConn2 := mustReplicationConnect(t, *replicationConnConfig)
- defer closeReplicationConn(t, replicationConn2)
-
- err = replicationConn2.DropReplicationSlot("pgx_test")
- if err != nil {
- t.Fatalf("Failed to drop replication slot: %v", err)
- }
-
- droppedLsn := getConfirmedFlushLsnFor(t, conn, "pgx_test")
- if droppedLsn != "" {
- t.Errorf("Got odd flush lsn %s for supposedly dropped slot", droppedLsn)
- }
-}
-
-func TestReplicationConn_DropReplicationSlot(t *testing.T) {
- if replicationConnConfig == nil {
- t.Skip("Skipping due to undefined replicationConnConfig")
- }
-
- replicationConn := mustReplicationConnect(t, *replicationConnConfig)
- defer closeReplicationConn(t, replicationConn)
-
- err := replicationConn.CreateReplicationSlot("pgx_slot_test", "test_decoding")
- if err != nil {
- t.Logf("replication slot create failed: %v", err)
- }
- err = replicationConn.DropReplicationSlot("pgx_slot_test")
- if err != nil {
- t.Fatalf("Failed to drop replication slot: %v", err)
- }
-
- // We re-create to ensure the drop worked.
- err = replicationConn.CreateReplicationSlot("pgx_slot_test", "test_decoding")
- if err != nil {
- t.Logf("replication slot create failed: %v", err)
- }
-
- // And finally we drop to ensure we don't leave dirty state
- err = replicationConn.DropReplicationSlot("pgx_slot_test")
- if err != nil {
- t.Fatalf("Failed to drop replication slot: %v", err)
- }
-}
-
-func TestIdentifySystem(t *testing.T) {
- if replicationConnConfig == nil {
- t.Skip("Skipping due to undefined replicationConnConfig")
- }
-
- replicationConn2 := mustReplicationConnect(t, *replicationConnConfig)
- defer closeReplicationConn(t, replicationConn2)
-
- r, err := replicationConn2.IdentifySystem()
- if err != nil {
- t.Error(err)
- }
- defer r.Close()
- for _, fd := range r.FieldDescriptions() {
- t.Logf("Field: %s of type %v", fd.Name, fd.DataType)
- }
-
- var rowCount int
- for r.Next() {
- rowCount++
- values, err := r.Values()
- if err != nil {
- t.Error(err)
- }
- t.Logf("Row values: %v", values)
- }
- if r.Err() != nil {
- t.Error(r.Err())
- }
-
- if rowCount == 0 {
- t.Errorf("Failed to find any rows: %d", rowCount)
- }
-}
-
-func getCurrentTimeline(t *testing.T, rc *pgx.ReplicationConn) int {
- r, err := rc.IdentifySystem()
- if err != nil {
- t.Error(err)
- }
- defer r.Close()
- for r.Next() {
- values, e := r.Values()
- if e != nil {
- t.Error(e)
- }
- timeline, e := strconv.Atoi(values[1].(string))
- if e != nil {
- t.Error(e)
- }
- return timeline
- }
- t.Fatal("Failed to read timeline")
- return -1
-}
-
-func TestGetTimelineHistory(t *testing.T) {
- if replicationConnConfig == nil {
- t.Skip("Skipping due to undefined replicationConnConfig")
- }
-
- replicationConn := mustReplicationConnect(t, *replicationConnConfig)
- defer closeReplicationConn(t, replicationConn)
-
- timeline := getCurrentTimeline(t, replicationConn)
-
- r, err := replicationConn.TimelineHistory(timeline)
- if err != nil {
- t.Errorf("%#v", err)
- }
- defer r.Close()
-
- for _, fd := range r.FieldDescriptions() {
- t.Logf("Field: %s of type %v", fd.Name, fd.DataType)
- }
-
- var rowCount int
- for r.Next() {
- rowCount++
- values, err := r.Values()
- if err != nil {
- t.Error(err)
- }
- t.Logf("Row values: %v", values)
- }
- if r.Err() != nil {
- if strings.Contains(r.Err().Error(), "No such file or directory") {
- // This is normal, this means the timeline we're on has no
- // history, which is the common case in a test db that
- // has only one timeline
- return
- }
- t.Error(r.Err())
- }
-
- // If we have a timeline history (see above) there should have been
- // rows emitted
- if rowCount == 0 {
- t.Errorf("Failed to find any rows: %d", rowCount)
- }
-}
-
-func TestStandbyStatusParsing(t *testing.T) {
- // Let's push the boundary conditions of the standby status and ensure it errors correctly
- status, err := pgx.NewStandbyStatus(0, 1, 2, 3, 4)
- if err == nil {
- t.Errorf("Expected error from new standby status, got %v", status)
- }
-
- // And if you provide 3 args, ensure the right fields are set
- status, err = pgx.NewStandbyStatus(1, 2, 3)
- if err != nil {
- t.Errorf("Failed to create test status: %v", err)
- }
- if status.WalFlushPosition != 1 {
- t.Errorf("Unexpected flush position %d", status.WalFlushPosition)
- }
- if status.WalApplyPosition != 2 {
- t.Errorf("Unexpected apply position %d", status.WalApplyPosition)
- }
- if status.WalWritePosition != 3 {
- t.Errorf("Unexpected write position %d", status.WalWritePosition)
- }
-}
diff --git a/vendor/github.com/jackc/pgx/sql_test.go b/vendor/github.com/jackc/pgx/sql_test.go
deleted file mode 100644
index dd03603..0000000
--- a/vendor/github.com/jackc/pgx/sql_test.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package pgx_test
-
-import (
- "strconv"
- "testing"
-
- "github.com/jackc/pgx"
-)
-
-func TestQueryArgs(t *testing.T) {
- var qa pgx.QueryArgs
-
- for i := 1; i < 512; i++ {
- expectedPlaceholder := "$" + strconv.Itoa(i)
- placeholder := qa.Append(i)
- if placeholder != expectedPlaceholder {
- t.Errorf(`Expected qa.Append to return "%s", but it returned "%s"`, expectedPlaceholder, placeholder)
- }
- }
-}
-
-func BenchmarkQueryArgs(b *testing.B) {
- for i := 0; i < b.N; i++ {
- qa := pgx.QueryArgs(make([]interface{}, 0, 16))
- qa.Append("foo1")
- qa.Append("foo2")
- qa.Append("foo3")
- qa.Append("foo4")
- qa.Append("foo5")
- qa.Append("foo6")
- qa.Append("foo7")
- qa.Append("foo8")
- qa.Append("foo9")
- qa.Append("foo10")
- }
-}
diff --git a/vendor/github.com/jackc/pgx/stdlib/sql.go b/vendor/github.com/jackc/pgx/stdlib/sql.go
index 8c78cd3..2d4930e 100644
--- a/vendor/github.com/jackc/pgx/stdlib/sql.go
+++ b/vendor/github.com/jackc/pgx/stdlib/sql.go
@@ -14,154 +14,213 @@
// return err
// }
//
-// Or a normal pgx connection pool can be established and the database/sql
-// connection can be created through stdlib.OpenFromConnPool(). This allows
-// more control over the connection process (such as TLS), more control
-// over the connection pool, setting an AfterConnect hook, and using both
-// database/sql and pgx interfaces as needed.
+// A DriverConfig can be used to further configure the connection process. This
+// allows configuring TLS configuration, setting a custom dialer, logging, and
+// setting an AfterConnect hook.
//
-// connConfig := pgx.ConnConfig{
-// Host: "localhost",
-// User: "pgx_md5",
-// Password: "secret",
-// Database: "pgx_test",
-// }
+// driverConfig := stdlib.DriverConfig{
+// ConnConfig: pgx.ConnConfig{
+// Logger: logger,
+// },
+// AfterConnect: func(c *pgx.Conn) error {
+// // Ensure all connections have this temp table available
+// _, err := c.Exec("create temporary table foo(...)")
+// return err
+// },
+// }
//
-// config := pgx.ConnPoolConfig{ConnConfig: connConfig}
-// pool, err := pgx.NewConnPool(config)
-// if err != nil {
-// return err
-// }
+// stdlib.RegisterDriverConfig(&driverConfig)
//
-// db, err := stdlib.OpenFromConnPool(pool)
+// db, err := sql.Open("pgx", driverConfig.ConnectionString("postgres://pgx_md5:secret@127.0.0.1:5432/pgx_test"))
// if err != nil {
-// t.Fatalf("Unable to create connection pool: %v", err)
+// return err
+// }
+//
+// pgx uses standard PostgreSQL positional parameters in queries. e.g. $1, $2.
+// It does not support named parameters.
+//
+// db.QueryRow("select * from users where id=$1", userID)
+//
+// AcquireConn and ReleaseConn acquire and release a *pgx.Conn from the standard
+// database/sql.DB connection pool. This allows operations that must be
+// performed on a single connection, but should not be run in a transaction or
+// to use pgx specific functionality.
+//
+// conn, err := stdlib.AcquireConn(db)
+// if err != nil {
+// return err
// }
+// defer stdlib.ReleaseConn(db, conn)
//
-// If the database/sql connection is established through
-// stdlib.OpenFromConnPool then access to a pgx *ConnPool can be regained
-// through db.Driver(). This allows writing a fast path for pgx while
-// preserving compatibility with other drivers and database
+// // do stuff with pgx.Conn
//
-// if driver, ok := db.Driver().(*stdlib.Driver); ok && driver.Pool != nil {
+// It also can be used to enable a fast path for pgx while preserving
+// compatibility with other drivers and database.
+//
+// conn, err := stdlib.AcquireConn(db)
+// if err == nil {
// // fast path with pgx
+// // ...
+// // release conn when done
+// stdlib.ReleaseConn(db, conn)
// } else {
// // normal path for other drivers and databases
// }
package stdlib
import (
+ "context"
"database/sql"
"database/sql/driver"
- "errors"
+ "encoding/binary"
"fmt"
"io"
+ "reflect"
+ "strings"
"sync"
- "github.com/jackc/pgx"
-)
+ "github.com/pkg/errors"
-var (
- openFromConnPoolCountMu sync.Mutex
- openFromConnPoolCount int
+ "github.com/jackc/pgx"
+ "github.com/jackc/pgx/pgtype"
)
// oids that map to intrinsic database/sql types. These will be allowed to be
// binary, anything else will be forced to text format
-var databaseSqlOids map[pgx.Oid]bool
+var databaseSqlOIDs map[pgtype.OID]bool
+
+var pgxDriver *Driver
+
+type ctxKey int
+
+var ctxKeyFakeTx ctxKey = 0
+
+var ErrNotPgx = errors.New("not pgx *sql.DB")
func init() {
- d := &Driver{}
- sql.Register("pgx", d)
-
- databaseSqlOids = make(map[pgx.Oid]bool)
- databaseSqlOids[pgx.BoolOid] = true
- databaseSqlOids[pgx.ByteaOid] = true
- databaseSqlOids[pgx.Int2Oid] = true
- databaseSqlOids[pgx.Int4Oid] = true
- databaseSqlOids[pgx.Int8Oid] = true
- databaseSqlOids[pgx.Float4Oid] = true
- databaseSqlOids[pgx.Float8Oid] = true
- databaseSqlOids[pgx.DateOid] = true
- databaseSqlOids[pgx.TimestampTzOid] = true
- databaseSqlOids[pgx.TimestampOid] = true
+ pgxDriver = &Driver{
+ configs: make(map[int64]*DriverConfig),
+ fakeTxConns: make(map[*pgx.Conn]*sql.Tx),
+ }
+ sql.Register("pgx", pgxDriver)
+
+ databaseSqlOIDs = make(map[pgtype.OID]bool)
+ databaseSqlOIDs[pgtype.BoolOID] = true
+ databaseSqlOIDs[pgtype.ByteaOID] = true
+ databaseSqlOIDs[pgtype.CIDOID] = true
+ databaseSqlOIDs[pgtype.DateOID] = true
+ databaseSqlOIDs[pgtype.Float4OID] = true
+ databaseSqlOIDs[pgtype.Float8OID] = true
+ databaseSqlOIDs[pgtype.Int2OID] = true
+ databaseSqlOIDs[pgtype.Int4OID] = true
+ databaseSqlOIDs[pgtype.Int8OID] = true
+ databaseSqlOIDs[pgtype.OIDOID] = true
+ databaseSqlOIDs[pgtype.TimestampOID] = true
+ databaseSqlOIDs[pgtype.TimestamptzOID] = true
+ databaseSqlOIDs[pgtype.XIDOID] = true
}
type Driver struct {
- Pool *pgx.ConnPool
+ configMutex sync.Mutex
+ configCount int64
+ configs map[int64]*DriverConfig
+
+ fakeTxMutex sync.Mutex
+ fakeTxConns map[*pgx.Conn]*sql.Tx
}
func (d *Driver) Open(name string) (driver.Conn, error) {
- if d.Pool != nil {
- conn, err := d.Pool.Acquire()
- if err != nil {
- return nil, err
- }
-
- return &Conn{conn: conn, pool: d.Pool}, nil
+ var connConfig pgx.ConnConfig
+ var afterConnect func(*pgx.Conn) error
+ if len(name) >= 9 && name[0] == 0 {
+ idBuf := []byte(name)[1:9]
+ id := int64(binary.BigEndian.Uint64(idBuf))
+ connConfig = d.configs[id].ConnConfig
+ afterConnect = d.configs[id].AfterConnect
+ name = name[9:]
}
- connConfig, err := pgx.ParseConnectionString(name)
+ parsedConfig, err := pgx.ParseConnectionString(name)
if err != nil {
return nil, err
}
+ connConfig = connConfig.Merge(parsedConfig)
conn, err := pgx.Connect(connConfig)
if err != nil {
return nil, err
}
- c := &Conn{conn: conn}
+ if afterConnect != nil {
+ err = afterConnect(conn)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ c := &Conn{conn: conn, driver: d, connConfig: connConfig}
return c, nil
}
-// OpenFromConnPool takes the existing *pgx.ConnPool pool and returns a *sql.DB
-// with pool as the backend. This enables full control over the connection
-// process and configuration while maintaining compatibility with the
-// database/sql interface. In addition, by calling Driver() on the returned
-// *sql.DB and typecasting to *stdlib.Driver a reference to the pgx.ConnPool can
-// be reaquired later. This allows fast paths targeting pgx to be used while
-// still maintaining compatibility with other databases and drivers.
-//
-// pool connection size must be at least 2.
-func OpenFromConnPool(pool *pgx.ConnPool) (*sql.DB, error) {
- d := &Driver{Pool: pool}
-
- openFromConnPoolCountMu.Lock()
- name := fmt.Sprintf("pgx-%d", openFromConnPoolCount)
- openFromConnPoolCount++
- openFromConnPoolCountMu.Unlock()
+type DriverConfig struct {
+ pgx.ConnConfig
+ AfterConnect func(*pgx.Conn) error // function to call on every new connection
+ driver *Driver
+ id int64
+}
- sql.Register(name, d)
- db, err := sql.Open(name, "")
- if err != nil {
- return nil, err
+// ConnectionString encodes the DriverConfig into the original connection
+// string. DriverConfig must be registered before calling ConnectionString.
+func (c *DriverConfig) ConnectionString(original string) string {
+ if c.driver == nil {
+ panic("DriverConfig must be registered before calling ConnectionString")
}
- // Presumably OpenFromConnPool is being used because the user wants to use
- // database/sql most of the time, but fast path with pgx some of the time.
- // Allow database/sql to use all the connections, but release 2 idle ones.
- // Don't have database/sql immediately release all idle connections because
- // that would mean that prepared statements would be lost (which kills
- // performance if the prepared statements constantly have to be reprepared)
- stat := pool.Stat()
+ buf := make([]byte, 9)
+ binary.BigEndian.PutUint64(buf[1:], uint64(c.id))
+ buf = append(buf, original...)
+ return string(buf)
+}
+
+func (d *Driver) registerDriverConfig(c *DriverConfig) {
+ d.configMutex.Lock()
- if stat.MaxConnections <= 2 {
- return nil, errors.New("pool connection size must be at least 3")
- }
- db.SetMaxIdleConns(stat.MaxConnections - 2)
- db.SetMaxOpenConns(stat.MaxConnections)
+ c.driver = d
+ c.id = d.configCount
+ d.configs[d.configCount] = c
+ d.configCount++
- return db, nil
+ d.configMutex.Unlock()
+}
+
+func (d *Driver) unregisterDriverConfig(c *DriverConfig) {
+ d.configMutex.Lock()
+ delete(d.configs, c.id)
+ d.configMutex.Unlock()
+}
+
+// RegisterDriverConfig registers a DriverConfig for use with Open.
+func RegisterDriverConfig(c *DriverConfig) {
+ pgxDriver.registerDriverConfig(c)
+}
+
+// UnregisterDriverConfig removes a DriverConfig registration.
+func UnregisterDriverConfig(c *DriverConfig) {
+ pgxDriver.unregisterDriverConfig(c)
}
type Conn struct {
- conn *pgx.Conn
- pool *pgx.ConnPool
- psCount int64 // Counter used for creating unique prepared statement names
+ conn *pgx.Conn
+ psCount int64 // Counter used for creating unique prepared statement names
+ driver *Driver
+ connConfig pgx.ConnConfig
}
func (c *Conn) Prepare(query string) (driver.Stmt, error) {
+ return c.PrepareContext(context.Background(), query)
+}
+
+func (c *Conn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) {
if !c.conn.IsAlive() {
return nil, driver.ErrBadConn
}
@@ -169,7 +228,7 @@ func (c *Conn) Prepare(query string) (driver.Stmt, error) {
name := fmt.Sprintf("pgx_%d", c.psCount)
c.psCount++
- ps, err := c.conn.Prepare(name, query)
+ ps, err := c.conn.PrepareEx(ctx, name, query, nil)
if err != nil {
return nil, err
}
@@ -180,25 +239,43 @@ func (c *Conn) Prepare(query string) (driver.Stmt, error) {
}
func (c *Conn) Close() error {
- err := c.conn.Close()
- if c.pool != nil {
- c.pool.Release(c.conn)
- }
-
- return err
+ return c.conn.Close()
}
func (c *Conn) Begin() (driver.Tx, error) {
+ return c.BeginTx(context.Background(), driver.TxOptions{})
+}
+
+func (c *Conn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
if !c.conn.IsAlive() {
return nil, driver.ErrBadConn
}
- _, err := c.conn.Exec("begin")
- if err != nil {
- return nil, err
+ if pconn, ok := ctx.Value(ctxKeyFakeTx).(**pgx.Conn); ok {
+ *pconn = c.conn
+ return fakeTx{}, nil
+ }
+
+ var pgxOpts pgx.TxOptions
+ switch sql.IsolationLevel(opts.Isolation) {
+ case sql.LevelDefault:
+ case sql.LevelReadUncommitted:
+ pgxOpts.IsoLevel = pgx.ReadUncommitted
+ case sql.LevelReadCommitted:
+ pgxOpts.IsoLevel = pgx.ReadCommitted
+ case sql.LevelSnapshot:
+ pgxOpts.IsoLevel = pgx.RepeatableRead
+ case sql.LevelSerializable:
+ pgxOpts.IsoLevel = pgx.Serializable
+ default:
+ return nil, errors.Errorf("unsupported isolation: %v", opts.Isolation)
+ }
+
+ if opts.ReadOnly {
+ pgxOpts.AccessMode = pgx.ReadOnly
}
- return &Tx{conn: c.conn}, nil
+ return c.conn.BeginEx(ctx, &pgxOpts)
}
func (c *Conn) Exec(query string, argsV []driver.Value) (driver.Result, error) {
@@ -211,19 +288,65 @@ func (c *Conn) Exec(query string, argsV []driver.Value) (driver.Result, error) {
return driver.RowsAffected(commandTag.RowsAffected()), err
}
+func (c *Conn) ExecContext(ctx context.Context, query string, argsV []driver.NamedValue) (driver.Result, error) {
+ if !c.conn.IsAlive() {
+ return nil, driver.ErrBadConn
+ }
+
+ args := namedValueToInterface(argsV)
+
+ commandTag, err := c.conn.ExecEx(ctx, query, nil, args...)
+ return driver.RowsAffected(commandTag.RowsAffected()), err
+}
+
func (c *Conn) Query(query string, argsV []driver.Value) (driver.Rows, error) {
if !c.conn.IsAlive() {
return nil, driver.ErrBadConn
}
- ps, err := c.conn.Prepare("", query)
+ if !c.connConfig.PreferSimpleProtocol {
+ ps, err := c.conn.Prepare("", query)
+ if err != nil {
+ return nil, err
+ }
+
+ restrictBinaryToDatabaseSqlTypes(ps)
+ return c.queryPrepared("", argsV)
+ }
+
+ rows, err := c.conn.Query(query, valueToInterface(argsV)...)
if err != nil {
return nil, err
}
- restrictBinaryToDatabaseSqlTypes(ps)
+ // Preload first row because otherwise we won't know what columns are available when database/sql asks.
+ more := rows.Next()
+ return &Rows{rows: rows, skipNext: true, skipNextMore: more}, nil
+}
- return c.queryPrepared("", argsV)
+func (c *Conn) QueryContext(ctx context.Context, query string, argsV []driver.NamedValue) (driver.Rows, error) {
+ if !c.conn.IsAlive() {
+ return nil, driver.ErrBadConn
+ }
+
+ if !c.connConfig.PreferSimpleProtocol {
+ ps, err := c.conn.PrepareEx(ctx, "", query, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ restrictBinaryToDatabaseSqlTypes(ps)
+ return c.queryPreparedContext(ctx, "", argsV)
+ }
+
+ rows, err := c.conn.QueryEx(ctx, query, nil, namedValueToInterface(argsV)...)
+ if err != nil {
+ return nil, err
+ }
+
+ // Preload first row because otherwise we won't know what columns are available when database/sql asks.
+ more := rows.Next()
+ return &Rows{rows: rows, skipNext: true, skipNextMore: more}, nil
}
func (c *Conn) queryPrepared(name string, argsV []driver.Value) (driver.Rows, error) {
@@ -241,12 +364,35 @@ func (c *Conn) queryPrepared(name string, argsV []driver.Value) (driver.Rows, er
return &Rows{rows: rows}, nil
}
+func (c *Conn) queryPreparedContext(ctx context.Context, name string, argsV []driver.NamedValue) (driver.Rows, error) {
+ if !c.conn.IsAlive() {
+ return nil, driver.ErrBadConn
+ }
+
+ args := namedValueToInterface(argsV)
+
+ rows, err := c.conn.QueryEx(ctx, name, nil, args...)
+ if err != nil {
+ return nil, err
+ }
+
+ return &Rows{rows: rows}, nil
+}
+
+func (c *Conn) Ping(ctx context.Context) error {
+ if !c.conn.IsAlive() {
+ return driver.ErrBadConn
+ }
+
+ return c.conn.Ping(ctx)
+}
+
// Anything that isn't a database/sql compatible type needs to be forced to
// text format so that pgx.Rows.Values doesn't decode it into a native type
// (e.g. []int32)
func restrictBinaryToDatabaseSqlTypes(ps *pgx.PreparedStatement) {
for i := range ps.FieldDescriptions {
- intrinsic, _ := databaseSqlOids[ps.FieldDescriptions[i].DataType]
+ intrinsic, _ := databaseSqlOIDs[ps.FieldDescriptions[i].DataType]
if !intrinsic {
ps.FieldDescriptions[i].FormatCode = pgx.TextFormatCode
}
@@ -263,20 +409,30 @@ func (s *Stmt) Close() error {
}
func (s *Stmt) NumInput() int {
- return len(s.ps.ParameterOids)
+ return len(s.ps.ParameterOIDs)
}
func (s *Stmt) Exec(argsV []driver.Value) (driver.Result, error) {
return s.conn.Exec(s.ps.Name, argsV)
}
+func (s *Stmt) ExecContext(ctx context.Context, argsV []driver.NamedValue) (driver.Result, error) {
+ return s.conn.ExecContext(ctx, s.ps.Name, argsV)
+}
+
func (s *Stmt) Query(argsV []driver.Value) (driver.Rows, error) {
return s.conn.queryPrepared(s.ps.Name, argsV)
}
-// TODO - rename to avoid alloc
+func (s *Stmt) QueryContext(ctx context.Context, argsV []driver.NamedValue) (driver.Rows, error) {
+ return s.conn.queryPreparedContext(ctx, s.ps.Name, argsV)
+}
+
type Rows struct {
- rows *pgx.Rows
+ rows *pgx.Rows
+ values []interface{}
+ skipNext bool
+ skipNextMore bool
}
func (r *Rows) Columns() []string {
@@ -288,13 +444,79 @@ func (r *Rows) Columns() []string {
return names
}
+// ColumnTypeDatabaseTypeName return the database system type name.
+func (r *Rows) ColumnTypeDatabaseTypeName(index int) string {
+ return strings.ToUpper(r.rows.FieldDescriptions()[index].DataTypeName)
+}
+
+// ColumnTypeLength returns the length of the column type if the column is a
+// variable length type. If the column is not a variable length type ok
+// should return false.
+func (r *Rows) ColumnTypeLength(index int) (int64, bool) {
+ return r.rows.FieldDescriptions()[index].Length()
+}
+
+// ColumnTypePrecisionScale should return the precision and scale for decimal
+// types. If not applicable, ok should be false.
+func (r *Rows) ColumnTypePrecisionScale(index int) (precision, scale int64, ok bool) {
+ return r.rows.FieldDescriptions()[index].PrecisionScale()
+}
+
+// ColumnTypeScanType returns the value type that can be used to scan types into.
+func (r *Rows) ColumnTypeScanType(index int) reflect.Type {
+ return r.rows.FieldDescriptions()[index].Type()
+}
+
func (r *Rows) Close() error {
r.rows.Close()
return nil
}
func (r *Rows) Next(dest []driver.Value) error {
- more := r.rows.Next()
+ if r.values == nil {
+ r.values = make([]interface{}, len(r.rows.FieldDescriptions()))
+ for i, fd := range r.rows.FieldDescriptions() {
+ switch fd.DataType {
+ case pgtype.BoolOID:
+ r.values[i] = &pgtype.Bool{}
+ case pgtype.ByteaOID:
+ r.values[i] = &pgtype.Bytea{}
+ case pgtype.CIDOID:
+ r.values[i] = &pgtype.CID{}
+ case pgtype.DateOID:
+ r.values[i] = &pgtype.Date{}
+ case pgtype.Float4OID:
+ r.values[i] = &pgtype.Float4{}
+ case pgtype.Float8OID:
+ r.values[i] = &pgtype.Float8{}
+ case pgtype.Int2OID:
+ r.values[i] = &pgtype.Int2{}
+ case pgtype.Int4OID:
+ r.values[i] = &pgtype.Int4{}
+ case pgtype.Int8OID:
+ r.values[i] = &pgtype.Int8{}
+ case pgtype.OIDOID:
+ r.values[i] = &pgtype.OIDValue{}
+ case pgtype.TimestampOID:
+ r.values[i] = &pgtype.Timestamp{}
+ case pgtype.TimestamptzOID:
+ r.values[i] = &pgtype.Timestamptz{}
+ case pgtype.XIDOID:
+ r.values[i] = &pgtype.XID{}
+ default:
+ r.values[i] = &pgtype.GenericText{}
+ }
+ }
+ }
+
+ var more bool
+ if r.skipNext {
+ more = r.skipNextMore
+ r.skipNext = false
+ } else {
+ more = r.rows.Next()
+ }
+
if !more {
if r.rows.Err() == nil {
return io.EOF
@@ -303,19 +525,16 @@ func (r *Rows) Next(dest []driver.Value) error {
}
}
- values, err := r.rows.Values()
+ err := r.rows.Scan(r.values...)
if err != nil {
return err
}
- if len(dest) < len(values) {
- fmt.Printf("%d: %#v\n", len(dest), dest)
- fmt.Printf("%d: %#v\n", len(values), values)
- return errors.New("expected more values than were received")
- }
-
- for i, v := range values {
- dest[i] = driver.Value(v)
+ for i, v := range r.values {
+ dest[i], err = v.(driver.Valuer).Value()
+ if err != nil {
+ return err
+ }
}
return nil
@@ -333,16 +552,58 @@ func valueToInterface(argsV []driver.Value) []interface{} {
return args
}
-type Tx struct {
- conn *pgx.Conn
+func namedValueToInterface(argsV []driver.NamedValue) []interface{} {
+ args := make([]interface{}, 0, len(argsV))
+ for _, v := range argsV {
+ if v.Value != nil {
+ args = append(args, v.Value.(interface{}))
+ } else {
+ args = append(args, nil)
+ }
+ }
+ return args
}
-func (t *Tx) Commit() error {
- _, err := t.conn.Exec("commit")
- return err
+type fakeTx struct{}
+
+func (fakeTx) Commit() error { return nil }
+
+func (fakeTx) Rollback() error { return nil }
+
+func AcquireConn(db *sql.DB) (*pgx.Conn, error) {
+ driver, ok := db.Driver().(*Driver)
+ if !ok {
+ return nil, ErrNotPgx
+ }
+
+ var conn *pgx.Conn
+ ctx := context.WithValue(context.Background(), ctxKeyFakeTx, &conn)
+ tx, err := db.BeginTx(ctx, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ driver.fakeTxMutex.Lock()
+ driver.fakeTxConns[conn] = tx
+ driver.fakeTxMutex.Unlock()
+
+ return conn, nil
}
-func (t *Tx) Rollback() error {
- _, err := t.conn.Exec("rollback")
- return err
+func ReleaseConn(db *sql.DB, conn *pgx.Conn) error {
+ var tx *sql.Tx
+ var ok bool
+
+ driver := db.Driver().(*Driver)
+ driver.fakeTxMutex.Lock()
+ tx, ok = driver.fakeTxConns[conn]
+ if ok {
+ delete(driver.fakeTxConns, conn)
+ driver.fakeTxMutex.Unlock()
+ } else {
+ driver.fakeTxMutex.Unlock()
+ return errors.Errorf("can't release conn that is not acquired")
+ }
+
+ return tx.Rollback()
}
diff --git a/vendor/github.com/jackc/pgx/stdlib/sql_test.go b/vendor/github.com/jackc/pgx/stdlib/sql_test.go
deleted file mode 100644
index 1455ca1..0000000
--- a/vendor/github.com/jackc/pgx/stdlib/sql_test.go
+++ /dev/null
@@ -1,691 +0,0 @@
-package stdlib_test
-
-import (
- "bytes"
- "database/sql"
- "github.com/jackc/pgx"
- "github.com/jackc/pgx/stdlib"
- "sync"
- "testing"
-)
-
-func openDB(t *testing.T) *sql.DB {
- db, err := sql.Open("pgx", "postgres://pgx_md5:secret@127.0.0.1:5432/pgx_test")
- if err != nil {
- t.Fatalf("sql.Open failed: %v", err)
- }
-
- return db
-}
-
-func closeDB(t *testing.T, db *sql.DB) {
- err := db.Close()
- if err != nil {
- t.Fatalf("db.Close unexpectedly failed: %v", err)
- }
-}
-
-// Do a simple query to ensure the connection is still usable
-func ensureConnValid(t *testing.T, db *sql.DB) {
- var sum, rowCount int32
-
- rows, err := db.Query("select generate_series(1,$1)", 10)
- if err != nil {
- t.Fatalf("db.Query failed: %v", err)
- }
- defer rows.Close()
-
- for rows.Next() {
- var n int32
- rows.Scan(&n)
- sum += n
- rowCount++
- }
-
- if rows.Err() != nil {
- t.Fatalf("db.Query failed: %v", err)
- }
-
- if rowCount != 10 {
- t.Error("Select called onDataRow wrong number of times")
- }
- if sum != 55 {
- t.Error("Wrong values returned")
- }
-}
-
-type preparer interface {
- Prepare(query string) (*sql.Stmt, error)
-}
-
-func prepareStmt(t *testing.T, p preparer, sql string) *sql.Stmt {
- stmt, err := p.Prepare(sql)
- if err != nil {
- t.Fatalf("%v Prepare unexpectedly failed: %v", p, err)
- }
-
- return stmt
-}
-
-func closeStmt(t *testing.T, stmt *sql.Stmt) {
- err := stmt.Close()
- if err != nil {
- t.Fatalf("stmt.Close unexpectedly failed: %v", err)
- }
-}
-
-func TestNormalLifeCycle(t *testing.T) {
- db := openDB(t)
- defer closeDB(t, db)
-
- stmt := prepareStmt(t, db, "select 'foo', n from generate_series($1::int, $2::int) n")
- defer closeStmt(t, stmt)
-
- rows, err := stmt.Query(int32(1), int32(10))
- if err != nil {
- t.Fatalf("stmt.Query unexpectedly failed: %v", err)
- }
-
- rowCount := int64(0)
-
- for rows.Next() {
- rowCount++
-
- var s string
- var n int64
- if err := rows.Scan(&s, &n); err != nil {
- t.Fatalf("rows.Scan unexpectedly failed: %v", err)
- }
- if s != "foo" {
- t.Errorf(`Expected "foo", received "%v"`, s)
- }
- if n != rowCount {
- t.Errorf("Expected %d, received %d", rowCount, n)
- }
- }
- err = rows.Err()
- if err != nil {
- t.Fatalf("rows.Err unexpectedly is: %v", err)
- }
- if rowCount != 10 {
- t.Fatalf("Expected to receive 10 rows, instead received %d", rowCount)
- }
-
- err = rows.Close()
- if err != nil {
- t.Fatalf("rows.Close unexpectedly failed: %v", err)
- }
-
- ensureConnValid(t, db)
-}
-
-func TestSqlOpenDoesNotHavePool(t *testing.T) {
- db := openDB(t)
- defer closeDB(t, db)
-
- driver := db.Driver().(*stdlib.Driver)
- if driver.Pool != nil {
- t.Fatal("Did not expect driver opened through database/sql to have Pool, but it did")
- }
-}
-
-func TestOpenFromConnPool(t *testing.T) {
- connConfig := pgx.ConnConfig{
- Host: "127.0.0.1",
- User: "pgx_md5",
- Password: "secret",
- Database: "pgx_test",
- }
-
- config := pgx.ConnPoolConfig{ConnConfig: connConfig}
- pool, err := pgx.NewConnPool(config)
- if err != nil {
- t.Fatalf("Unable to create connection pool: %v", err)
- }
- defer pool.Close()
-
- db, err := stdlib.OpenFromConnPool(pool)
- if err != nil {
- t.Fatalf("Unable to create connection pool: %v", err)
- }
- defer closeDB(t, db)
-
- // Can get pgx.ConnPool from driver
- driver := db.Driver().(*stdlib.Driver)
- if driver.Pool == nil {
- t.Fatal("Expected driver opened through OpenFromConnPool to have Pool, but it did not")
- }
-
- // Normal sql/database still works
- var n int64
- err = db.QueryRow("select 1").Scan(&n)
- if err != nil {
- t.Fatalf("db.QueryRow unexpectedly failed: %v", err)
- }
-}
-
-func TestOpenFromConnPoolRace(t *testing.T) {
- wg := &sync.WaitGroup{}
- connConfig := pgx.ConnConfig{
- Host: "127.0.0.1",
- User: "pgx_md5",
- Password: "secret",
- Database: "pgx_test",
- }
-
- config := pgx.ConnPoolConfig{ConnConfig: connConfig}
- pool, err := pgx.NewConnPool(config)
- if err != nil {
- t.Fatalf("Unable to create connection pool: %v", err)
- }
- defer pool.Close()
-
- wg.Add(10)
- for i := 0; i < 10; i++ {
- go func() {
- defer wg.Done()
- db, err := stdlib.OpenFromConnPool(pool)
- if err != nil {
- t.Fatalf("Unable to create connection pool: %v", err)
- }
- defer closeDB(t, db)
-
- // Can get pgx.ConnPool from driver
- driver := db.Driver().(*stdlib.Driver)
- if driver.Pool == nil {
- t.Fatal("Expected driver opened through OpenFromConnPool to have Pool, but it did not")
- }
- }()
- }
-
- wg.Wait()
-}
-
-func TestStmtExec(t *testing.T) {
- db := openDB(t)
- defer closeDB(t, db)
-
- tx, err := db.Begin()
- if err != nil {
- t.Fatalf("db.Begin unexpectedly failed: %v", err)
- }
-
- createStmt := prepareStmt(t, tx, "create temporary table t(a varchar not null)")
- _, err = createStmt.Exec()
- if err != nil {
- t.Fatalf("stmt.Exec unexpectedly failed: %v", err)
- }
- closeStmt(t, createStmt)
-
- insertStmt := prepareStmt(t, tx, "insert into t values($1::text)")
- result, err := insertStmt.Exec("foo")
- if err != nil {
- t.Fatalf("stmt.Exec unexpectedly failed: %v", err)
- }
-
- n, err := result.RowsAffected()
- if err != nil {
- t.Fatalf("result.RowsAffected unexpectedly failed: %v", err)
- }
- if n != 1 {
- t.Fatalf("Expected 1, received %d", n)
- }
- closeStmt(t, insertStmt)
-
- if err != nil {
- t.Fatalf("tx.Commit unexpectedly failed: %v", err)
- }
-
- ensureConnValid(t, db)
-}
-
-func TestQueryCloseRowsEarly(t *testing.T) {
- db := openDB(t)
- defer closeDB(t, db)
-
- stmt := prepareStmt(t, db, "select 'foo', n from generate_series($1::int, $2::int) n")
- defer closeStmt(t, stmt)
-
- rows, err := stmt.Query(int32(1), int32(10))
- if err != nil {
- t.Fatalf("stmt.Query unexpectedly failed: %v", err)
- }
-
- // Close rows immediately without having read them
- err = rows.Close()
- if err != nil {
- t.Fatalf("rows.Close unexpectedly failed: %v", err)
- }
-
- // Run the query again to ensure the connection and statement are still ok
- rows, err = stmt.Query(int32(1), int32(10))
- if err != nil {
- t.Fatalf("stmt.Query unexpectedly failed: %v", err)
- }
-
- rowCount := int64(0)
-
- for rows.Next() {
- rowCount++
-
- var s string
- var n int64
- if err := rows.Scan(&s, &n); err != nil {
- t.Fatalf("rows.Scan unexpectedly failed: %v", err)
- }
- if s != "foo" {
- t.Errorf(`Expected "foo", received "%v"`, s)
- }
- if n != rowCount {
- t.Errorf("Expected %d, received %d", rowCount, n)
- }
- }
- err = rows.Err()
- if err != nil {
- t.Fatalf("rows.Err unexpectedly is: %v", err)
- }
- if rowCount != 10 {
- t.Fatalf("Expected to receive 10 rows, instead received %d", rowCount)
- }
-
- err = rows.Close()
- if err != nil {
- t.Fatalf("rows.Close unexpectedly failed: %v", err)
- }
-
- ensureConnValid(t, db)
-}
-
-func TestConnExec(t *testing.T) {
- db := openDB(t)
- defer closeDB(t, db)
-
- _, err := db.Exec("create temporary table t(a varchar not null)")
- if err != nil {
- t.Fatalf("db.Exec unexpectedly failed: %v", err)
- }
-
- result, err := db.Exec("insert into t values('hey')")
- if err != nil {
- t.Fatalf("db.Exec unexpectedly failed: %v", err)
- }
-
- n, err := result.RowsAffected()
- if err != nil {
- t.Fatalf("result.RowsAffected unexpectedly failed: %v", err)
- }
- if n != 1 {
- t.Fatalf("Expected 1, received %d", n)
- }
-
- ensureConnValid(t, db)
-}
-
-func TestConnQuery(t *testing.T) {
- db := openDB(t)
- defer closeDB(t, db)
-
- rows, err := db.Query("select 'foo', n from generate_series($1::int, $2::int) n", int32(1), int32(10))
- if err != nil {
- t.Fatalf("db.Query unexpectedly failed: %v", err)
- }
-
- rowCount := int64(0)
-
- for rows.Next() {
- rowCount++
-
- var s string
- var n int64
- if err := rows.Scan(&s, &n); err != nil {
- t.Fatalf("rows.Scan unexpectedly failed: %v", err)
- }
- if s != "foo" {
- t.Errorf(`Expected "foo", received "%v"`, s)
- }
- if n != rowCount {
- t.Errorf("Expected %d, received %d", rowCount, n)
- }
- }
- err = rows.Err()
- if err != nil {
- t.Fatalf("rows.Err unexpectedly is: %v", err)
- }
- if rowCount != 10 {
- t.Fatalf("Expected to receive 10 rows, instead received %d", rowCount)
- }
-
- err = rows.Close()
- if err != nil {
- t.Fatalf("rows.Close unexpectedly failed: %v", err)
- }
-
- ensureConnValid(t, db)
-}
-
-type testLog struct {
- lvl int
- msg string
- ctx []interface{}
-}
-
-type testLogger struct {
- logs []testLog
-}
-
-func (l *testLogger) Debug(msg string, ctx ...interface{}) {
- l.logs = append(l.logs, testLog{lvl: pgx.LogLevelDebug, msg: msg, ctx: ctx})
-}
-func (l *testLogger) Info(msg string, ctx ...interface{}) {
- l.logs = append(l.logs, testLog{lvl: pgx.LogLevelInfo, msg: msg, ctx: ctx})
-}
-func (l *testLogger) Warn(msg string, ctx ...interface{}) {
- l.logs = append(l.logs, testLog{lvl: pgx.LogLevelWarn, msg: msg, ctx: ctx})
-}
-func (l *testLogger) Error(msg string, ctx ...interface{}) {
- l.logs = append(l.logs, testLog{lvl: pgx.LogLevelError, msg: msg, ctx: ctx})
-}
-
-func TestConnQueryLog(t *testing.T) {
- logger := &testLogger{}
-
- connConfig := pgx.ConnConfig{
- Host: "127.0.0.1",
- User: "pgx_md5",
- Password: "secret",
- Database: "pgx_test",
- Logger: logger,
- }
-
- config := pgx.ConnPoolConfig{ConnConfig: connConfig}
- pool, err := pgx.NewConnPool(config)
- if err != nil {
- t.Fatalf("Unable to create connection pool: %v", err)
- }
- defer pool.Close()
-
- db, err := stdlib.OpenFromConnPool(pool)
- if err != nil {
- t.Fatalf("Unable to create connection pool: %v", err)
- }
- defer closeDB(t, db)
-
- // clear logs from initial connection
- logger.logs = []testLog{}
-
- var n int64
- err = db.QueryRow("select 1").Scan(&n)
- if err != nil {
- t.Fatalf("db.QueryRow unexpectedly failed: %v", err)
- }
-
- l := logger.logs[0]
- if l.msg != "Query" {
- t.Errorf("Expected to log Query, but got %v", l)
- }
-
- if !(l.ctx[0] == "sql" && l.ctx[1] == "select 1") {
- t.Errorf("Expected to log Query with sql 'select 1', but got %v", l)
- }
-}
-
-func TestConnQueryNull(t *testing.T) {
- db := openDB(t)
- defer closeDB(t, db)
-
- rows, err := db.Query("select $1::int", nil)
- if err != nil {
- t.Fatalf("db.Query unexpectedly failed: %v", err)
- }
-
- rowCount := int64(0)
-
- for rows.Next() {
- rowCount++
-
- var n sql.NullInt64
- if err := rows.Scan(&n); err != nil {
- t.Fatalf("rows.Scan unexpectedly failed: %v", err)
- }
- if n.Valid != false {
- t.Errorf("Expected n to be null, but it was %v", n)
- }
- }
- err = rows.Err()
- if err != nil {
- t.Fatalf("rows.Err unexpectedly is: %v", err)
- }
- if rowCount != 1 {
- t.Fatalf("Expected to receive 11 rows, instead received %d", rowCount)
- }
-
- err = rows.Close()
- if err != nil {
- t.Fatalf("rows.Close unexpectedly failed: %v", err)
- }
-
- ensureConnValid(t, db)
-}
-
-func TestConnQueryRowByteSlice(t *testing.T) {
- db := openDB(t)
- defer closeDB(t, db)
-
- expected := []byte{222, 173, 190, 239}
- var actual []byte
-
- err := db.QueryRow(`select E'\\xdeadbeef'::bytea`).Scan(&actual)
- if err != nil {
- t.Fatalf("db.QueryRow unexpectedly failed: %v", err)
- }
-
- if bytes.Compare(actual, expected) != 0 {
- t.Fatalf("Expected %v, but got %v", expected, actual)
- }
-
- ensureConnValid(t, db)
-}
-
-func TestConnQueryFailure(t *testing.T) {
- db := openDB(t)
- defer closeDB(t, db)
-
- _, err := db.Query("select 'foo")
- if _, ok := err.(pgx.PgError); !ok {
- t.Fatalf("Expected db.Query to return pgx.PgError, but instead received: %v", err)
- }
-
- ensureConnValid(t, db)
-}
-
-// Test type that pgx would handle natively in binary, but since it is not a
-// database/sql native type should be passed through as a string
-func TestConnQueryRowPgxBinary(t *testing.T) {
- db := openDB(t)
- defer closeDB(t, db)
-
- sql := "select $1::int4[]"
- expected := "{1,2,3}"
- var actual string
-
- err := db.QueryRow(sql, expected).Scan(&actual)
- if err != nil {
- t.Errorf("Unexpected failure: %v (sql -> %v)", err, sql)
- }
-
- if actual != expected {
- t.Errorf(`Expected "%v", got "%v" (sql -> %v)`, expected, actual, sql)
- }
-
- ensureConnValid(t, db)
-}
-
-func TestConnQueryRowUnknownType(t *testing.T) {
- db := openDB(t)
- defer closeDB(t, db)
-
- sql := "select $1::point"
- expected := "(1,2)"
- var actual string
-
- err := db.QueryRow(sql, expected).Scan(&actual)
- if err != nil {
- t.Errorf("Unexpected failure: %v (sql -> %v)", err, sql)
- }
-
- if actual != expected {
- t.Errorf(`Expected "%v", got "%v" (sql -> %v)`, expected, actual, sql)
- }
-
- ensureConnValid(t, db)
-}
-
-func TestConnQueryJSONIntoByteSlice(t *testing.T) {
- db := openDB(t)
- defer closeDB(t, db)
-
- if !serverHasJSON(t, db) {
- t.Skip("Skipping due to server's lack of JSON type")
- }
-
- _, err := db.Exec(`
- create temporary table docs(
- body json not null
- );
-
- insert into docs(body) values('{"foo":"bar"}');
-`)
- if err != nil {
- t.Fatalf("db.Exec unexpectedly failed: %v", err)
- }
-
- sql := `select * from docs`
- expected := []byte(`{"foo":"bar"}`)
- var actual []byte
-
- err = db.QueryRow(sql).Scan(&actual)
- if err != nil {
- t.Errorf("Unexpected failure: %v (sql -> %v)", err, sql)
- }
-
- if bytes.Compare(actual, expected) != 0 {
- t.Errorf(`Expected "%v", got "%v" (sql -> %v)`, string(expected), string(actual), sql)
- }
-
- _, err = db.Exec(`drop table docs`)
- if err != nil {
- t.Fatalf("db.Exec unexpectedly failed: %v", err)
- }
-
- ensureConnValid(t, db)
-}
-
-func TestConnExecInsertByteSliceIntoJSON(t *testing.T) {
- db := openDB(t)
- defer closeDB(t, db)
-
- if !serverHasJSON(t, db) {
- t.Skip("Skipping due to server's lack of JSON type")
- }
-
- _, err := db.Exec(`
- create temporary table docs(
- body json not null
- );
-`)
- if err != nil {
- t.Fatalf("db.Exec unexpectedly failed: %v", err)
- }
-
- expected := []byte(`{"foo":"bar"}`)
-
- _, err = db.Exec(`insert into docs(body) values($1)`, expected)
- if err != nil {
- t.Fatalf("db.Exec unexpectedly failed: %v", err)
- }
-
- var actual []byte
- err = db.QueryRow(`select body from docs`).Scan(&actual)
- if err != nil {
- t.Fatalf("db.QueryRow unexpectedly failed: %v", err)
- }
-
- if bytes.Compare(actual, expected) != 0 {
- t.Errorf(`Expected "%v", got "%v"`, string(expected), string(actual))
- }
-
- _, err = db.Exec(`drop table docs`)
- if err != nil {
- t.Fatalf("db.Exec unexpectedly failed: %v", err)
- }
-
- ensureConnValid(t, db)
-}
-
-func serverHasJSON(t *testing.T, db *sql.DB) bool {
- var hasJSON bool
- err := db.QueryRow(`select exists(select 1 from pg_type where typname='json')`).Scan(&hasJSON)
- if err != nil {
- t.Fatalf("db.QueryRow unexpectedly failed: %v", err)
- }
- return hasJSON
-}
-
-func TestTransactionLifeCycle(t *testing.T) {
- db := openDB(t)
- defer closeDB(t, db)
-
- _, err := db.Exec("create temporary table t(a varchar not null)")
- if err != nil {
- t.Fatalf("db.Exec unexpectedly failed: %v", err)
- }
-
- tx, err := db.Begin()
- if err != nil {
- t.Fatalf("db.Begin unexpectedly failed: %v", err)
- }
-
- _, err = tx.Exec("insert into t values('hi')")
- if err != nil {
- t.Fatalf("tx.Exec unexpectedly failed: %v", err)
- }
-
- err = tx.Rollback()
- if err != nil {
- t.Fatalf("tx.Rollback unexpectedly failed: %v", err)
- }
-
- var n int64
- err = db.QueryRow("select count(*) from t").Scan(&n)
- if err != nil {
- t.Fatalf("db.QueryRow.Scan unexpectedly failed: %v", err)
- }
- if n != 0 {
- t.Fatalf("Expected 0 rows due to rollback, instead found %d", n)
- }
-
- tx, err = db.Begin()
- if err != nil {
- t.Fatalf("db.Begin unexpectedly failed: %v", err)
- }
-
- _, err = tx.Exec("insert into t values('hi')")
- if err != nil {
- t.Fatalf("tx.Exec unexpectedly failed: %v", err)
- }
-
- err = tx.Commit()
- if err != nil {
- t.Fatalf("tx.Commit unexpectedly failed: %v", err)
- }
-
- err = db.QueryRow("select count(*) from t").Scan(&n)
- if err != nil {
- t.Fatalf("db.QueryRow.Scan unexpectedly failed: %v", err)
- }
- if n != 1 {
- t.Fatalf("Expected 1 rows due to rollback, instead found %d", n)
- }
-
- ensureConnValid(t, db)
-}
diff --git a/vendor/github.com/jackc/pgx/stress_test.go b/vendor/github.com/jackc/pgx/stress_test.go
deleted file mode 100644
index 150d13c..0000000
--- a/vendor/github.com/jackc/pgx/stress_test.go
+++ /dev/null
@@ -1,346 +0,0 @@
-package pgx_test
-
-import (
- "errors"
- "fmt"
- "math/rand"
- "testing"
- "time"
-
- "github.com/jackc/fake"
- "github.com/jackc/pgx"
-)
-
-type execer interface {
- Exec(sql string, arguments ...interface{}) (commandTag pgx.CommandTag, err error)
-}
-type queryer interface {
- Query(sql string, args ...interface{}) (*pgx.Rows, error)
-}
-type queryRower interface {
- QueryRow(sql string, args ...interface{}) *pgx.Row
-}
-
-func TestStressConnPool(t *testing.T) {
- maxConnections := 8
- pool := createConnPool(t, maxConnections)
- defer pool.Close()
-
- setupStressDB(t, pool)
-
- actions := []struct {
- name string
- fn func(*pgx.ConnPool, int) error
- }{
- {"insertUnprepared", func(p *pgx.ConnPool, n int) error { return insertUnprepared(p, n) }},
- {"queryRowWithoutParams", func(p *pgx.ConnPool, n int) error { return queryRowWithoutParams(p, n) }},
- {"query", func(p *pgx.ConnPool, n int) error { return queryCloseEarly(p, n) }},
- {"queryCloseEarly", func(p *pgx.ConnPool, n int) error { return query(p, n) }},
- {"queryErrorWhileReturningRows", func(p *pgx.ConnPool, n int) error { return queryErrorWhileReturningRows(p, n) }},
- {"txInsertRollback", txInsertRollback},
- {"txInsertCommit", txInsertCommit},
- {"txMultipleQueries", txMultipleQueries},
- {"notify", notify},
- {"listenAndPoolUnlistens", listenAndPoolUnlistens},
- {"reset", func(p *pgx.ConnPool, n int) error { p.Reset(); return nil }},
- {"poolPrepareUseAndDeallocate", poolPrepareUseAndDeallocate},
- }
-
- var timer *time.Timer
- if testing.Short() {
- timer = time.NewTimer(5 * time.Second)
- } else {
- timer = time.NewTimer(60 * time.Second)
- }
- workerCount := 16
-
- workChan := make(chan int)
- doneChan := make(chan struct{})
- errChan := make(chan error)
-
- work := func() {
- for n := range workChan {
- action := actions[rand.Intn(len(actions))]
- err := action.fn(pool, n)
- if err != nil {
- errChan <- err
- break
- }
- }
- doneChan <- struct{}{}
- }
-
- for i := 0; i < workerCount; i++ {
- go work()
- }
-
- var stop bool
- for i := 0; !stop; i++ {
- select {
- case <-timer.C:
- stop = true
- case workChan <- i:
- case err := <-errChan:
- close(workChan)
- t.Fatal(err)
- }
- }
- close(workChan)
-
- for i := 0; i < workerCount; i++ {
- <-doneChan
- }
-}
-
-func TestStressTLSConnection(t *testing.T) {
- t.Parallel()
-
- if tlsConnConfig == nil {
- t.Skip("Skipping due to undefined tlsConnConfig")
- }
-
- if testing.Short() {
- t.Skip("Skipping due to testing -short")
- }
-
- conn, err := pgx.Connect(*tlsConnConfig)
- if err != nil {
- t.Fatalf("Unable to establish connection: %v", err)
- }
- defer conn.Close()
-
- for i := 0; i < 50; i++ {
- sql := `select * from generate_series(1, $1)`
-
- rows, err := conn.Query(sql, 2000000)
- if err != nil {
- t.Fatal(err)
- }
-
- var n int32
- for rows.Next() {
- rows.Scan(&n)
- }
-
- if rows.Err() != nil {
- t.Fatalf("queryCount: %d, Row number: %d. %v", i, n, rows.Err())
- }
- }
-}
-
-func setupStressDB(t *testing.T, pool *pgx.ConnPool) {
- _, err := pool.Exec(`
- drop table if exists widgets;
- create table widgets(
- id serial primary key,
- name varchar not null,
- description text,
- creation_time timestamptz
- );
-`)
- if err != nil {
- t.Fatal(err)
- }
-}
-
-func insertUnprepared(e execer, actionNum int) error {
- sql := `
- insert into widgets(name, description, creation_time)
- values($1, $2, $3)`
-
- _, err := e.Exec(sql, fake.ProductName(), fake.Sentences(), time.Now())
- return err
-}
-
-func queryRowWithoutParams(qr queryRower, actionNum int) error {
- var id int32
- var name, description string
- var creationTime time.Time
-
- sql := `select * from widgets order by random() limit 1`
-
- err := qr.QueryRow(sql).Scan(&id, &name, &description, &creationTime)
- if err == pgx.ErrNoRows {
- return nil
- }
- return err
-}
-
-func query(q queryer, actionNum int) error {
- sql := `select * from widgets order by random() limit $1`
-
- rows, err := q.Query(sql, 10)
- if err != nil {
- return err
- }
- defer rows.Close()
-
- for rows.Next() {
- var id int32
- var name, description string
- var creationTime time.Time
- rows.Scan(&id, &name, &description, &creationTime)
- }
-
- return rows.Err()
-}
-
-func queryCloseEarly(q queryer, actionNum int) error {
- sql := `select * from generate_series(1,$1)`
-
- rows, err := q.Query(sql, 100)
- if err != nil {
- return err
- }
- defer rows.Close()
-
- for i := 0; i < 10 && rows.Next(); i++ {
- var n int32
- rows.Scan(&n)
- }
- rows.Close()
-
- return rows.Err()
-}
-
-func queryErrorWhileReturningRows(q queryer, actionNum int) error {
- // This query should divide by 0 within the first number of rows
- sql := `select 42 / (random() * 20)::integer from generate_series(1,100000)`
-
- rows, err := q.Query(sql)
- if err != nil {
- return nil
- }
- defer rows.Close()
-
- for rows.Next() {
- var n int32
- rows.Scan(&n)
- }
-
- if _, ok := rows.Err().(pgx.PgError); ok {
- return nil
- }
- return rows.Err()
-}
-
-func notify(pool *pgx.ConnPool, actionNum int) error {
- _, err := pool.Exec("notify stress")
- return err
-}
-
-func listenAndPoolUnlistens(pool *pgx.ConnPool, actionNum int) error {
- conn, err := pool.Acquire()
- if err != nil {
- return err
- }
- defer pool.Release(conn)
-
- err = conn.Listen("stress")
- if err != nil {
- return err
- }
-
- _, err = conn.WaitForNotification(100 * time.Millisecond)
- if err == pgx.ErrNotificationTimeout {
- return nil
- }
- return err
-}
-
-func poolPrepareUseAndDeallocate(pool *pgx.ConnPool, actionNum int) error {
- psName := fmt.Sprintf("poolPreparedStatement%d", actionNum)
-
- _, err := pool.Prepare(psName, "select $1::text")
- if err != nil {
- return err
- }
-
- var s string
- err = pool.QueryRow(psName, "hello").Scan(&s)
- if err != nil {
- return err
- }
-
- if s != "hello" {
- return fmt.Errorf("Prepared statement did not return expected value: %v", s)
- }
-
- return pool.Deallocate(psName)
-}
-
-func txInsertRollback(pool *pgx.ConnPool, actionNum int) error {
- tx, err := pool.Begin()
- if err != nil {
- return err
- }
-
- sql := `
- insert into widgets(name, description, creation_time)
- values($1, $2, $3)`
-
- _, err = tx.Exec(sql, fake.ProductName(), fake.Sentences(), time.Now())
- if err != nil {
- return err
- }
-
- return tx.Rollback()
-}
-
-func txInsertCommit(pool *pgx.ConnPool, actionNum int) error {
- tx, err := pool.Begin()
- if err != nil {
- return err
- }
-
- sql := `
- insert into widgets(name, description, creation_time)
- values($1, $2, $3)`
-
- _, err = tx.Exec(sql, fake.ProductName(), fake.Sentences(), time.Now())
- if err != nil {
- tx.Rollback()
- return err
- }
-
- return tx.Commit()
-}
-
-func txMultipleQueries(pool *pgx.ConnPool, actionNum int) error {
- tx, err := pool.Begin()
- if err != nil {
- return err
- }
- defer tx.Rollback()
-
- errExpectedTxDeath := errors.New("Expected tx death")
-
- actions := []struct {
- name string
- fn func() error
- }{
- {"insertUnprepared", func() error { return insertUnprepared(tx, actionNum) }},
- {"queryRowWithoutParams", func() error { return queryRowWithoutParams(tx, actionNum) }},
- {"query", func() error { return query(tx, actionNum) }},
- {"queryCloseEarly", func() error { return queryCloseEarly(tx, actionNum) }},
- {"queryErrorWhileReturningRows", func() error {
- err := queryErrorWhileReturningRows(tx, actionNum)
- if err != nil {
- return err
- }
- return errExpectedTxDeath
- }},
- }
-
- for i := 0; i < 20; i++ {
- action := actions[rand.Intn(len(actions))]
- err := action.fn()
- if err == errExpectedTxDeath {
- return nil
- } else if err != nil {
- return err
- }
- }
-
- return tx.Commit()
-}
diff --git a/vendor/github.com/jackc/pgx/tx.go b/vendor/github.com/jackc/pgx/tx.go
index deb6c01..81fcfa2 100644
--- a/vendor/github.com/jackc/pgx/tx.go
+++ b/vendor/github.com/jackc/pgx/tx.go
@@ -1,16 +1,38 @@
package pgx
import (
- "errors"
+ "bytes"
+ "context"
"fmt"
+ "time"
+
+ "github.com/pkg/errors"
)
+type TxIsoLevel string
+
// Transaction isolation levels
const (
- Serializable = "serializable"
- RepeatableRead = "repeatable read"
- ReadCommitted = "read committed"
- ReadUncommitted = "read uncommitted"
+ Serializable = TxIsoLevel("serializable")
+ RepeatableRead = TxIsoLevel("repeatable read")
+ ReadCommitted = TxIsoLevel("read committed")
+ ReadUncommitted = TxIsoLevel("read uncommitted")
+)
+
+type TxAccessMode string
+
+// Transaction access modes
+const (
+ ReadWrite = TxAccessMode("read write")
+ ReadOnly = TxAccessMode("read only")
+)
+
+type TxDeferrableMode string
+
+// Transaction deferrable modes
+const (
+ Deferrable = TxDeferrableMode("deferrable")
+ NotDeferrable = TxDeferrableMode("not deferrable")
)
const (
@@ -21,6 +43,32 @@ const (
TxStatusRollbackSuccess = 2
)
+type TxOptions struct {
+ IsoLevel TxIsoLevel
+ AccessMode TxAccessMode
+ DeferrableMode TxDeferrableMode
+}
+
+func (txOptions *TxOptions) beginSQL() string {
+ if txOptions == nil {
+ return "begin"
+ }
+
+ buf := &bytes.Buffer{}
+ buf.WriteString("begin")
+ if txOptions.IsoLevel != "" {
+ fmt.Fprintf(buf, " isolation level %s", txOptions.IsoLevel)
+ }
+ if txOptions.AccessMode != "" {
+ fmt.Fprintf(buf, " %s", txOptions.AccessMode)
+ }
+ if txOptions.DeferrableMode != "" {
+ fmt.Fprintf(buf, " %s", txOptions.DeferrableMode)
+ }
+
+ return buf.String()
+}
+
var ErrTxClosed = errors.New("tx is closed")
// ErrTxCommitRollback occurs when an error has occurred in a transaction and
@@ -28,34 +76,21 @@ var ErrTxClosed = errors.New("tx is closed")
// it is treated as ROLLBACK.
var ErrTxCommitRollback = errors.New("commit unexpectedly resulted in rollback")
-// Begin starts a transaction with the default isolation level for the current
-// connection. To use a specific isolation level see BeginIso.
+// Begin starts a transaction with the default transaction mode for the
+// current connection. To use a specific transaction mode see BeginEx.
func (c *Conn) Begin() (*Tx, error) {
- return c.begin("")
+ return c.BeginEx(context.Background(), nil)
}
-// BeginIso starts a transaction with isoLevel as the transaction isolation
-// level.
-//
-// Valid isolation levels (and their constants) are:
-// serializable (pgx.Serializable)
-// repeatable read (pgx.RepeatableRead)
-// read committed (pgx.ReadCommitted)
-// read uncommitted (pgx.ReadUncommitted)
-func (c *Conn) BeginIso(isoLevel string) (*Tx, error) {
- return c.begin(isoLevel)
-}
-
-func (c *Conn) begin(isoLevel string) (*Tx, error) {
- var beginSQL string
- if isoLevel == "" {
- beginSQL = "begin"
- } else {
- beginSQL = fmt.Sprintf("begin isolation level %s", isoLevel)
- }
-
- _, err := c.Exec(beginSQL)
+// BeginEx starts a transaction with txOptions determining the transaction
+// mode. Unlike database/sql, the context only affects the begin command. i.e.
+// there is no auto-rollback on context cancelation.
+func (c *Conn) BeginEx(ctx context.Context, txOptions *TxOptions) (*Tx, error) {
+ _, err := c.ExecEx(ctx, txOptions.beginSQL(), nil)
if err != nil {
+ // begin should never fail unless there is an underlying connection issue or
+ // a context timeout. In either case, the connection is possibly broken.
+ c.die(errors.New("failed to begin transaction"))
return nil, err
}
@@ -67,19 +102,24 @@ func (c *Conn) begin(isoLevel string) (*Tx, error) {
// All Tx methods return ErrTxClosed if Commit or Rollback has already been
// called on the Tx.
type Tx struct {
- conn *Conn
- afterClose func(*Tx)
- err error
- status int8
+ conn *Conn
+ connPool *ConnPool
+ err error
+ status int8
}
// Commit commits the transaction
func (tx *Tx) Commit() error {
+ return tx.CommitEx(context.Background())
+}
+
+// CommitEx commits the transaction with a context.
+func (tx *Tx) CommitEx(ctx context.Context) error {
if tx.status != TxStatusInProgress {
return ErrTxClosed
}
- commandTag, err := tx.conn.Exec("commit")
+ commandTag, err := tx.conn.ExecEx(ctx, "commit", nil)
if err == nil && commandTag == "COMMIT" {
tx.status = TxStatusCommitSuccess
} else if err == nil && commandTag == "ROLLBACK" {
@@ -88,11 +128,14 @@ func (tx *Tx) Commit() error {
} else {
tx.status = TxStatusCommitFailure
tx.err = err
+ // A commit failure leaves the connection in an undefined state
+ tx.conn.die(errors.New("commit failed"))
}
- if tx.afterClose != nil {
- tx.afterClose(tx)
+ if tx.connPool != nil {
+ tx.connPool.Release(tx.conn)
}
+
return tx.err
}
@@ -101,55 +144,74 @@ func (tx *Tx) Commit() error {
// defer tx.Rollback() is safe even if tx.Commit() will be called first in a
// non-error condition.
func (tx *Tx) Rollback() error {
+ ctx, _ := context.WithTimeout(context.Background(), 15*time.Second)
+ return tx.RollbackEx(ctx)
+}
+
+// RollbackEx is the context version of Rollback
+func (tx *Tx) RollbackEx(ctx context.Context) error {
if tx.status != TxStatusInProgress {
return ErrTxClosed
}
- _, tx.err = tx.conn.Exec("rollback")
+ _, tx.err = tx.conn.ExecEx(ctx, "rollback", nil)
if tx.err == nil {
tx.status = TxStatusRollbackSuccess
} else {
tx.status = TxStatusRollbackFailure
+ // A rollback failure leaves the connection in an undefined state
+ tx.conn.die(errors.New("rollback failed"))
}
- if tx.afterClose != nil {
- tx.afterClose(tx)
+ if tx.connPool != nil {
+ tx.connPool.Release(tx.conn)
}
+
return tx.err
}
// Exec delegates to the underlying *Conn
func (tx *Tx) Exec(sql string, arguments ...interface{}) (commandTag CommandTag, err error) {
+ return tx.ExecEx(context.Background(), sql, nil, arguments...)
+}
+
+// ExecEx delegates to the underlying *Conn
+func (tx *Tx) ExecEx(ctx context.Context, sql string, options *QueryExOptions, arguments ...interface{}) (commandTag CommandTag, err error) {
if tx.status != TxStatusInProgress {
return CommandTag(""), ErrTxClosed
}
- return tx.conn.Exec(sql, arguments...)
+ return tx.conn.ExecEx(ctx, sql, options, arguments...)
}
// Prepare delegates to the underlying *Conn
func (tx *Tx) Prepare(name, sql string) (*PreparedStatement, error) {
- return tx.PrepareEx(name, sql, nil)
+ return tx.PrepareEx(context.Background(), name, sql, nil)
}
// PrepareEx delegates to the underlying *Conn
-func (tx *Tx) PrepareEx(name, sql string, opts *PrepareExOptions) (*PreparedStatement, error) {
+func (tx *Tx) PrepareEx(ctx context.Context, name, sql string, opts *PrepareExOptions) (*PreparedStatement, error) {
if tx.status != TxStatusInProgress {
return nil, ErrTxClosed
}
- return tx.conn.PrepareEx(name, sql, opts)
+ return tx.conn.PrepareEx(ctx, name, sql, opts)
}
// Query delegates to the underlying *Conn
func (tx *Tx) Query(sql string, args ...interface{}) (*Rows, error) {
+ return tx.QueryEx(context.Background(), sql, nil, args...)
+}
+
+// QueryEx delegates to the underlying *Conn
+func (tx *Tx) QueryEx(ctx context.Context, sql string, options *QueryExOptions, args ...interface{}) (*Rows, error) {
if tx.status != TxStatusInProgress {
// Because checking for errors can be deferred to the *Rows, build one with the error
err := ErrTxClosed
return &Rows{closed: true, err: err}, err
}
- return tx.conn.Query(sql, args...)
+ return tx.conn.QueryEx(ctx, sql, options, args...)
}
// QueryRow delegates to the underlying *Conn
@@ -158,13 +220,10 @@ func (tx *Tx) QueryRow(sql string, args ...interface{}) *Row {
return (*Row)(rows)
}
-// Deprecated. Use CopyFrom instead. CopyTo delegates to the underlying *Conn
-func (tx *Tx) CopyTo(tableName string, columnNames []string, rowSrc CopyToSource) (int, error) {
- if tx.status != TxStatusInProgress {
- return 0, ErrTxClosed
- }
-
- return tx.conn.CopyTo(tableName, columnNames, rowSrc)
+// QueryRowEx delegates to the underlying *Conn
+func (tx *Tx) QueryRowEx(ctx context.Context, sql string, options *QueryExOptions, args ...interface{}) *Row {
+ rows, _ := tx.QueryEx(ctx, sql, options, args...)
+ return (*Row)(rows)
}
// CopyFrom delegates to the underlying *Conn
@@ -176,11 +235,6 @@ func (tx *Tx) CopyFrom(tableName Identifier, columnNames []string, rowSrc CopyFr
return tx.conn.CopyFrom(tableName, columnNames, rowSrc)
}
-// Conn returns the *Conn this transaction is using.
-func (tx *Tx) Conn() *Conn {
- return tx.conn
-}
-
// Status returns the status of the transaction from the set of
// pgx.TxStatus* constants.
func (tx *Tx) Status() int8 {
@@ -191,17 +245,3 @@ func (tx *Tx) Status() int8 {
func (tx *Tx) Err() error {
return tx.err
}
-
-// AfterClose adds f to a LILO queue of functions that will be called when
-// the transaction is closed (either Commit or Rollback).
-func (tx *Tx) AfterClose(f func(*Tx)) {
- if tx.afterClose == nil {
- tx.afterClose = f
- } else {
- prevFn := tx.afterClose
- tx.afterClose = func(tx *Tx) {
- f(tx)
- prevFn(tx)
- }
- }
-}
diff --git a/vendor/github.com/jackc/pgx/tx_test.go b/vendor/github.com/jackc/pgx/tx_test.go
deleted file mode 100644
index 435521a..0000000
--- a/vendor/github.com/jackc/pgx/tx_test.go
+++ /dev/null
@@ -1,297 +0,0 @@
-package pgx_test
-
-import (
- "github.com/jackc/pgx"
- "testing"
- "time"
-)
-
-func TestTransactionSuccessfulCommit(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- createSql := `
- create temporary table foo(
- id integer,
- unique (id) initially deferred
- );
- `
-
- if _, err := conn.Exec(createSql); err != nil {
- t.Fatalf("Failed to create table: %v", err)
- }
-
- tx, err := conn.Begin()
- if err != nil {
- t.Fatalf("conn.Begin failed: %v", err)
- }
-
- _, err = tx.Exec("insert into foo(id) values (1)")
- if err != nil {
- t.Fatalf("tx.Exec failed: %v", err)
- }
-
- err = tx.Commit()
- if err != nil {
- t.Fatalf("tx.Commit failed: %v", err)
- }
-
- var n int64
- err = conn.QueryRow("select count(*) from foo").Scan(&n)
- if err != nil {
- t.Fatalf("QueryRow Scan failed: %v", err)
- }
- if n != 1 {
- t.Fatalf("Did not receive correct number of rows: %v", n)
- }
-}
-
-func TestTxCommitWhenTxBroken(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- createSql := `
- create temporary table foo(
- id integer,
- unique (id) initially deferred
- );
- `
-
- if _, err := conn.Exec(createSql); err != nil {
- t.Fatalf("Failed to create table: %v", err)
- }
-
- tx, err := conn.Begin()
- if err != nil {
- t.Fatalf("conn.Begin failed: %v", err)
- }
-
- if _, err := tx.Exec("insert into foo(id) values (1)"); err != nil {
- t.Fatalf("tx.Exec failed: %v", err)
- }
-
- // Purposely break transaction
- if _, err := tx.Exec("syntax error"); err == nil {
- t.Fatal("Unexpected success")
- }
-
- err = tx.Commit()
- if err != pgx.ErrTxCommitRollback {
- t.Fatalf("Expected error %v, got %v", pgx.ErrTxCommitRollback, err)
- }
-
- var n int64
- err = conn.QueryRow("select count(*) from foo").Scan(&n)
- if err != nil {
- t.Fatalf("QueryRow Scan failed: %v", err)
- }
- if n != 0 {
- t.Fatalf("Did not receive correct number of rows: %v", n)
- }
-}
-
-func TestTxCommitSerializationFailure(t *testing.T) {
- t.Parallel()
-
- pool := createConnPool(t, 5)
- defer pool.Close()
-
- pool.Exec(`drop table if exists tx_serializable_sums`)
- _, err := pool.Exec(`create table tx_serializable_sums(num integer);`)
- if err != nil {
- t.Fatalf("Unable to create temporary table: %v", err)
- }
- defer pool.Exec(`drop table tx_serializable_sums`)
-
- tx1, err := pool.BeginIso(pgx.Serializable)
- if err != nil {
- t.Fatalf("BeginIso failed: %v", err)
- }
- defer tx1.Rollback()
-
- tx2, err := pool.BeginIso(pgx.Serializable)
- if err != nil {
- t.Fatalf("BeginIso failed: %v", err)
- }
- defer tx2.Rollback()
-
- _, err = tx1.Exec(`insert into tx_serializable_sums(num) select sum(num) from tx_serializable_sums`)
- if err != nil {
- t.Fatalf("Exec failed: %v", err)
- }
-
- _, err = tx2.Exec(`insert into tx_serializable_sums(num) select sum(num) from tx_serializable_sums`)
- if err != nil {
- t.Fatalf("Exec failed: %v", err)
- }
-
- err = tx1.Commit()
- if err != nil {
- t.Fatalf("Commit failed: %v", err)
- }
-
- err = tx2.Commit()
- if pgErr, ok := err.(pgx.PgError); !ok || pgErr.Code != "40001" {
- t.Fatalf("Expected serialization error 40001, got %#v", err)
- }
-}
-
-func TestTransactionSuccessfulRollback(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- createSql := `
- create temporary table foo(
- id integer,
- unique (id) initially deferred
- );
- `
-
- if _, err := conn.Exec(createSql); err != nil {
- t.Fatalf("Failed to create table: %v", err)
- }
-
- tx, err := conn.Begin()
- if err != nil {
- t.Fatalf("conn.Begin failed: %v", err)
- }
-
- _, err = tx.Exec("insert into foo(id) values (1)")
- if err != nil {
- t.Fatalf("tx.Exec failed: %v", err)
- }
-
- err = tx.Rollback()
- if err != nil {
- t.Fatalf("tx.Rollback failed: %v", err)
- }
-
- var n int64
- err = conn.QueryRow("select count(*) from foo").Scan(&n)
- if err != nil {
- t.Fatalf("QueryRow Scan failed: %v", err)
- }
- if n != 0 {
- t.Fatalf("Did not receive correct number of rows: %v", n)
- }
-}
-
-func TestBeginIso(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- isoLevels := []string{pgx.Serializable, pgx.RepeatableRead, pgx.ReadCommitted, pgx.ReadUncommitted}
- for _, iso := range isoLevels {
- tx, err := conn.BeginIso(iso)
- if err != nil {
- t.Fatalf("conn.BeginIso failed: %v", err)
- }
-
- var level string
- conn.QueryRow("select current_setting('transaction_isolation')").Scan(&level)
- if level != iso {
- t.Errorf("Expected to be in isolation level %v but was %v", iso, level)
- }
-
- err = tx.Rollback()
- if err != nil {
- t.Fatalf("tx.Rollback failed: %v", err)
- }
- }
-}
-
-func TestTxAfterClose(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- tx, err := conn.Begin()
- if err != nil {
- t.Fatal(err)
- }
-
- var zeroTime, t1, t2 time.Time
- tx.AfterClose(func(tx *pgx.Tx) {
- t1 = time.Now()
- })
-
- tx.AfterClose(func(tx *pgx.Tx) {
- t2 = time.Now()
- })
-
- tx.Rollback()
-
- if t1 == zeroTime {
- t.Error("First Tx.AfterClose callback not called")
- }
-
- if t2 == zeroTime {
- t.Error("Second Tx.AfterClose callback not called")
- }
-
- if t1.Before(t2) {
- t.Errorf("AfterClose callbacks called out of order: %v, %v", t1, t2)
- }
-}
-
-func TestTxStatus(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- tx, err := conn.Begin()
- if err != nil {
- t.Fatal(err)
- }
-
- if status := tx.Status(); status != pgx.TxStatusInProgress {
- t.Fatalf("Expected status to be %v, but it was %v", pgx.TxStatusInProgress, status)
- }
-
- if err := tx.Rollback(); err != nil {
- t.Fatal(err)
- }
-
- if status := tx.Status(); status != pgx.TxStatusRollbackSuccess {
- t.Fatalf("Expected status to be %v, but it was %v", pgx.TxStatusRollbackSuccess, status)
- }
-}
-
-func TestTxErr(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- tx, err := conn.Begin()
- if err != nil {
- t.Fatal(err)
- }
-
- // Purposely break transaction
- if _, err := tx.Exec("syntax error"); err == nil {
- t.Fatal("Unexpected success")
- }
-
- if err := tx.Commit(); err != pgx.ErrTxCommitRollback {
- t.Fatalf("Expected error %v, got %v", pgx.ErrTxCommitRollback, err)
- }
-
- if status := tx.Status(); status != pgx.TxStatusCommitFailure {
- t.Fatalf("Expected status to be %v, but it was %v", pgx.TxStatusRollbackSuccess, status)
- }
-
- if err := tx.Err(); err != pgx.ErrTxCommitRollback {
- t.Fatalf("Expected error %v, got %v", pgx.ErrTxCommitRollback, err)
- }
-}
diff --git a/vendor/github.com/jackc/pgx/value_reader.go b/vendor/github.com/jackc/pgx/value_reader.go
deleted file mode 100644
index a489754..0000000
--- a/vendor/github.com/jackc/pgx/value_reader.go
+++ /dev/null
@@ -1,156 +0,0 @@
-package pgx
-
-import (
- "errors"
-)
-
-// ValueReader is used by the Scanner interface to decode values.
-type ValueReader struct {
- mr *msgReader
- fd *FieldDescription
- valueBytesRemaining int32
- err error
-}
-
-// Err returns any error that the ValueReader has experienced
-func (r *ValueReader) Err() error {
- return r.err
-}
-
-// Fatal tells r that a Fatal error has occurred
-func (r *ValueReader) Fatal(err error) {
- r.err = err
-}
-
-// Len returns the number of unread bytes
-func (r *ValueReader) Len() int32 {
- return r.valueBytesRemaining
-}
-
-// Type returns the *FieldDescription of the value
-func (r *ValueReader) Type() *FieldDescription {
- return r.fd
-}
-
-func (r *ValueReader) ReadByte() byte {
- if r.err != nil {
- return 0
- }
-
- r.valueBytesRemaining--
- if r.valueBytesRemaining < 0 {
- r.Fatal(errors.New("read past end of value"))
- return 0
- }
-
- return r.mr.readByte()
-}
-
-func (r *ValueReader) ReadInt16() int16 {
- if r.err != nil {
- return 0
- }
-
- r.valueBytesRemaining -= 2
- if r.valueBytesRemaining < 0 {
- r.Fatal(errors.New("read past end of value"))
- return 0
- }
-
- return r.mr.readInt16()
-}
-
-func (r *ValueReader) ReadUint16() uint16 {
- if r.err != nil {
- return 0
- }
-
- r.valueBytesRemaining -= 2
- if r.valueBytesRemaining < 0 {
- r.Fatal(errors.New("read past end of value"))
- return 0
- }
-
- return r.mr.readUint16()
-}
-
-func (r *ValueReader) ReadInt32() int32 {
- if r.err != nil {
- return 0
- }
-
- r.valueBytesRemaining -= 4
- if r.valueBytesRemaining < 0 {
- r.Fatal(errors.New("read past end of value"))
- return 0
- }
-
- return r.mr.readInt32()
-}
-
-func (r *ValueReader) ReadUint32() uint32 {
- if r.err != nil {
- return 0
- }
-
- r.valueBytesRemaining -= 4
- if r.valueBytesRemaining < 0 {
- r.Fatal(errors.New("read past end of value"))
- return 0
- }
-
- return r.mr.readUint32()
-}
-
-func (r *ValueReader) ReadInt64() int64 {
- if r.err != nil {
- return 0
- }
-
- r.valueBytesRemaining -= 8
- if r.valueBytesRemaining < 0 {
- r.Fatal(errors.New("read past end of value"))
- return 0
- }
-
- return r.mr.readInt64()
-}
-
-func (r *ValueReader) ReadOid() Oid {
- return Oid(r.ReadUint32())
-}
-
-// ReadString reads count bytes and returns as string
-func (r *ValueReader) ReadString(count int32) string {
- if r.err != nil {
- return ""
- }
-
- r.valueBytesRemaining -= count
- if r.valueBytesRemaining < 0 {
- r.Fatal(errors.New("read past end of value"))
- return ""
- }
-
- return r.mr.readString(count)
-}
-
-// ReadBytes reads count bytes and returns as []byte
-func (r *ValueReader) ReadBytes(count int32) []byte {
- if r.err != nil {
- return nil
- }
-
- if count < 0 {
- r.Fatal(errors.New("count must not be negative"))
- return nil
- }
-
- r.valueBytesRemaining -= count
- if r.valueBytesRemaining < 0 {
- r.Fatal(errors.New("read past end of value"))
- return nil
- }
-
- return r.mr.readBytes(count)
-}
diff --git a/vendor/github.com/jackc/pgx/values.go b/vendor/github.com/jackc/pgx/values.go
index a189e18..6a1c4f0 100644
--- a/vendor/github.com/jackc/pgx/values.go
+++ b/vendor/github.com/jackc/pgx/values.go
@@ -1,62 +1,15 @@
package pgx
import (
- "bytes"
"database/sql/driver"
- "encoding/json"
"fmt"
- "io"
"math"
- "net"
"reflect"
- "regexp"
- "strconv"
- "strings"
"time"
-)
-// PostgreSQL oids for common types
-const (
- BoolOid = 16
- ByteaOid = 17
- CharOid = 18
- NameOid = 19
- Int8Oid = 20
- Int2Oid = 21
- Int4Oid = 23
- TextOid = 25
- OidOid = 26
- TidOid = 27
- XidOid = 28
- CidOid = 29
- JsonOid = 114
- CidrOid = 650
- CidrArrayOid = 651
- Float4Oid = 700
- Float8Oid = 701
- UnknownOid = 705
- InetOid = 869
- BoolArrayOid = 1000
- Int2ArrayOid = 1005
- Int4ArrayOid = 1007
- TextArrayOid = 1009
- ByteaArrayOid = 1001
- VarcharArrayOid = 1015
- Int8ArrayOid = 1016
- Float4ArrayOid = 1021
- Float8ArrayOid = 1022
- AclItemOid = 1033
- AclItemArrayOid = 1034
- InetArrayOid = 1041
- VarcharOid = 1043
- DateOid = 1082
- TimestampOid = 1114
- TimestampArrayOid = 1115
- TimestampTzOid = 1184
- TimestampTzArrayOid = 1185
- RecordOid = 2249
- UuidOid = 2950
- JsonbOid = 3802
+ "github.com/jackc/pgx/pgio"
+ "github.com/jackc/pgx/pgtype"
+ "github.com/pkg/errors"
)
// PostgreSQL format codes
@@ -65,61 +18,6 @@ const (
BinaryFormatCode = 1
)
-const maxUint = ^uint(0)
-const maxInt = int(maxUint >> 1)
-const minInt = -maxInt - 1
-
-// DefaultTypeFormats maps type names to their default requested format (text
-// or binary). In theory the Scanner interface should be the one to determine
-// the format of the returned values. However, the query has already been
-// executed by the time Scan is called so it has no chance to set the format.
-// So for types that should always be returned in binary the format should be
-// set here.
-var DefaultTypeFormats map[string]int16
-
-func init() {
- DefaultTypeFormats = map[string]int16{
- "_aclitem": TextFormatCode, // Pg's src/backend/utils/adt/acl.c has only in/out (text) not send/recv (bin)
- "_bool": BinaryFormatCode,
- "_bytea": BinaryFormatCode,
- "_cidr": BinaryFormatCode,
- "_float4": BinaryFormatCode,
- "_float8": BinaryFormatCode,
- "_inet": BinaryFormatCode,
- "_int2": BinaryFormatCode,
- "_int4": BinaryFormatCode,
- "_int8": BinaryFormatCode,
- "_text": BinaryFormatCode,
- "_timestamp": BinaryFormatCode,
- "_timestamptz": BinaryFormatCode,
- "_varchar": BinaryFormatCode,
- "aclitem": TextFormatCode, // Pg's src/backend/utils/adt/acl.c has only in/out (text) not send/recv (bin)
- "bool": BinaryFormatCode,
- "bytea": BinaryFormatCode,
- "char": BinaryFormatCode,
- "cid": BinaryFormatCode,
- "cidr": BinaryFormatCode,
- "date": BinaryFormatCode,
- "float4": BinaryFormatCode,
- "float8": BinaryFormatCode,
- "json": BinaryFormatCode,
- "jsonb": BinaryFormatCode,
- "inet": BinaryFormatCode,
- "int2": BinaryFormatCode,
- "int4": BinaryFormatCode,
- "int8": BinaryFormatCode,
- "name": BinaryFormatCode,
- "oid": BinaryFormatCode,
- "record": BinaryFormatCode,
- "text": BinaryFormatCode,
- "tid": BinaryFormatCode,
- "timestamp": BinaryFormatCode,
- "timestamptz": BinaryFormatCode,
- "varchar": BinaryFormatCode,
- "xid": BinaryFormatCode,
- }
-}
-
// SerializationError occurs on failure to encode or decode a value
type SerializationError string
@@ -127,3313 +25,235 @@ func (e SerializationError) Error() string {
return string(e)
}
-// Deprecated: Scanner is an interface used to decode values from the PostgreSQL
-// server. To allow types to support pgx and database/sql.Scan this interface
-// has been deprecated in favor of PgxScanner.
-type Scanner interface {
- // Scan MUST check r.Type().DataType (to check by OID) or
- // r.Type().DataTypeName (to check by name) to ensure that it is scanning an
- // expected column type. It also MUST check r.Type().FormatCode before
- // decoding. It should not assume that it was called on a data type or format
- // that it understands.
- Scan(r *ValueReader) error
-}
-
-// PgxScanner is an interface used to decode values from the PostgreSQL server.
-// It is used exactly the same as the Scanner interface. It simply has renamed
-// the method.
-type PgxScanner interface {
- // ScanPgx MUST check r.Type().DataType (to check by OID) or
- // r.Type().DataTypeName (to check by name) to ensure that it is scanning an
- // expected column type. It also MUST check r.Type().FormatCode before
- // decoding. It should not assume that it was called on a data type or format
- // that it understands.
- ScanPgx(r *ValueReader) error
-}
-
-// Encoder is an interface used to encode values for transmission to the
-// PostgreSQL server.
-type Encoder interface {
- // Encode writes the value to w.
- //
- // If the value is NULL an int32(-1) should be written.
- //
- // Encode MUST check oid to see if the parameter data type is compatible. If
- // this is not done, the PostgreSQL server may detect the error if the
- // expected data size or format of the encoded data does not match. But if
- // the encoded data is a valid representation of the data type PostgreSQL
- // expects such as date and int4, incorrect data may be stored.
- Encode(w *WriteBuf, oid Oid) error
-
- // FormatCode returns the format that the encoder writes the value. It must be
- // either pgx.TextFormatCode or pgx.BinaryFormatCode.
- FormatCode() int16
-}
-
-// NullFloat32 represents an float4 that may be null. NullFloat32 implements the
-// Scanner and Encoder interfaces so it may be used both as an argument to
-// Query[Row] and a destination for Scan.
-//
-// If Valid is false then the value is NULL.
-type NullFloat32 struct {
- Float32 float32
- Valid bool // Valid is true if Float32 is not NULL
-}
-
-func (n *NullFloat32) Scan(vr *ValueReader) error {
- if vr.Type().DataType != Float4Oid {
- return SerializationError(fmt.Sprintf("NullFloat32.Scan cannot decode OID %d", vr.Type().DataType))
- }
-
- if vr.Len() == -1 {
- n.Float32, n.Valid = 0, false
- return nil
- }
- n.Valid = true
- n.Float32 = decodeFloat4(vr)
- return vr.Err()
-}
-
-func (n NullFloat32) FormatCode() int16 { return BinaryFormatCode }
-
-func (n NullFloat32) Encode(w *WriteBuf, oid Oid) error {
- if oid != Float4Oid {
- return SerializationError(fmt.Sprintf("NullFloat32.Encode cannot encode into OID %d", oid))
- }
-
- if !n.Valid {
- w.WriteInt32(-1)
- return nil
- }
-
- return encodeFloat32(w, oid, n.Float32)
-}
-
-// NullFloat64 represents an float8 that may be null. NullFloat64 implements the
-// Scanner and Encoder interfaces so it may be used both as an argument to
-// Query[Row] and a destination for Scan.
-//
-// If Valid is false then the value is NULL.
-type NullFloat64 struct {
- Float64 float64
- Valid bool // Valid is true if Float64 is not NULL
-}
-
-func (n *NullFloat64) Scan(vr *ValueReader) error {
- if vr.Type().DataType != Float8Oid {
- return SerializationError(fmt.Sprintf("NullFloat64.Scan cannot decode OID %d", vr.Type().DataType))
- }
-
- if vr.Len() == -1 {
- n.Float64, n.Valid = 0, false
- return nil
- }
- n.Valid = true
- n.Float64 = decodeFloat8(vr)
- return vr.Err()
-}
-
-func (n NullFloat64) FormatCode() int16 { return BinaryFormatCode }
-
-func (n NullFloat64) Encode(w *WriteBuf, oid Oid) error {
- if oid != Float8Oid {
- return SerializationError(fmt.Sprintf("NullFloat64.Encode cannot encode into OID %d", oid))
- }
-
- if !n.Valid {
- w.WriteInt32(-1)
- return nil
- }
-
- return encodeFloat64(w, oid, n.Float64)
-}
-
-// NullString represents an string that may be null. NullString implements the
-// Scanner Encoder interfaces so it may be used both as an argument to
-// Query[Row] and a destination for Scan.
-//
-// If Valid is false then the value is NULL.
-type NullString struct {
- String string
- Valid bool // Valid is true if String is not NULL
-}
-
-func (n *NullString) Scan(vr *ValueReader) error {
- // Not checking oid as so we can scan anything into into a NullString - may revisit this decision later
-
- if vr.Len() == -1 {
- n.String, n.Valid = "", false
- return nil
- }
-
- n.Valid = true
- n.String = decodeText(vr)
- return vr.Err()
-}
-
-func (n NullString) FormatCode() int16 { return TextFormatCode }
-
-func (n NullString) Encode(w *WriteBuf, oid Oid) error {
- if !n.Valid {
- w.WriteInt32(-1)
- return nil
- }
-
- return encodeString(w, oid, n.String)
-}
-
-// AclItem is used for PostgreSQL's aclitem data type. A sample aclitem
-// might look like this:
-//
-// postgres=arwdDxt/postgres
-//
-// Note, however, that because the user/role name part of an aclitem is
-// an identifier, it follows all the usual formatting rules for SQL
-// identifiers: if it contains spaces and other special characters,
-// it should appear in double-quotes:
-//
-// postgres=arwdDxt/"role with spaces"
-//
-type AclItem string
-
-// NullAclItem represents a pgx.AclItem that may be null. NullAclItem implements the
-// Scanner and Encoder interfaces so it may be used both as an argument to
-// Query[Row] and a destination for Scan for prepared and unprepared queries.
-//
-// If Valid is false then the value is NULL.
-type NullAclItem struct {
- AclItem AclItem
- Valid bool // Valid is true if AclItem is not NULL
-}
-
-func (n *NullAclItem) Scan(vr *ValueReader) error {
- if vr.Type().DataType != AclItemOid {
- return SerializationError(fmt.Sprintf("NullAclItem.Scan cannot decode OID %d", vr.Type().DataType))
- }
-
- if vr.Len() == -1 {
- n.AclItem, n.Valid = "", false
- return nil
- }
-
- n.Valid = true
- n.AclItem = AclItem(decodeText(vr))
- return vr.Err()
-}
-
-// Particularly important to return TextFormatCode, seeing as Postgres
-// only ever sends aclitem as text, not binary.
-func (n NullAclItem) FormatCode() int16 { return TextFormatCode }
-
-func (n NullAclItem) Encode(w *WriteBuf, oid Oid) error {
- if !n.Valid {
- w.WriteInt32(-1)
- return nil
- }
-
- return encodeString(w, oid, string(n.AclItem))
-}
-
-// Name is a type used for PostgreSQL's special 63-byte
-// name data type, used for identifiers like table names.
-// The pg_class.relname column is a good example of where the
-// name data type is used.
-//
-// Note that the underlying Go data type of pgx.Name is string,
-// so there is no way to enforce the 63-byte length. Inputting
-// a longer name into PostgreSQL will result in silent truncation
-// to 63 bytes.
-//
-// Also, if you have custom-compiled PostgreSQL and set
-// NAMEDATALEN to a different value, obviously that number of
-// bytes applies, rather than the default 63.
-type Name string
-
-// NullName represents a pgx.Name that may be null. NullName implements the
-// Scanner and Encoder interfaces so it may be used both as an argument to
-// Query[Row] and a destination for Scan for prepared and unprepared queries.
-//
-// If Valid is false then the value is NULL.
-type NullName struct {
- Name Name
- Valid bool // Valid is true if Name is not NULL
-}
-
-func (n *NullName) Scan(vr *ValueReader) error {
- if vr.Type().DataType != NameOid {
- return SerializationError(fmt.Sprintf("NullName.Scan cannot decode OID %d", vr.Type().DataType))
- }
-
- if vr.Len() == -1 {
- n.Name, n.Valid = "", false
- return nil
- }
-
- n.Valid = true
- n.Name = Name(decodeText(vr))
- return vr.Err()
-}
-
-func (n NullName) FormatCode() int16 { return TextFormatCode }
-
-func (n NullName) Encode(w *WriteBuf, oid Oid) error {
- if !n.Valid {
- w.WriteInt32(-1)
- return nil
- }
-
- return encodeString(w, oid, string(n.Name))
-}
-
-// The pgx.Char type is for PostgreSQL's special 8-bit-only
-// "char" type more akin to the C language's char type, or Go's byte type.
-// (Note that the name in PostgreSQL itself is "char", in double-quotes,
-// and not char.) It gets used a lot in PostgreSQL's system tables to hold
-// a single ASCII character value (eg pg_class.relkind).
-type Char byte
-
-// NullChar represents a pgx.Char that may be null. NullChar implements the
-// Scanner and Encoder interfaces so it may be used both as an argument to
-// Query[Row] and a destination for Scan for prepared and unprepared queries.
-//
-// If Valid is false then the value is NULL.
-type NullChar struct {
- Char Char
- Valid bool // Valid is true if Char is not NULL
-}
-
-func (n *NullChar) Scan(vr *ValueReader) error {
- if vr.Type().DataType != CharOid {
- return SerializationError(fmt.Sprintf("NullChar.Scan cannot decode OID %d", vr.Type().DataType))
- }
-
- if vr.Len() == -1 {
- n.Char, n.Valid = 0, false
- return nil
- }
- n.Valid = true
- n.Char = decodeChar(vr)
- return vr.Err()
-}
-
-func (n NullChar) FormatCode() int16 { return BinaryFormatCode }
-
-func (n NullChar) Encode(w *WriteBuf, oid Oid) error {
- if oid != CharOid {
- return SerializationError(fmt.Sprintf("NullChar.Encode cannot encode into OID %d", oid))
- }
-
- if !n.Valid {
- w.WriteInt32(-1)
- return nil
- }
-
- return encodeChar(w, oid, n.Char)
-}
-
-// NullInt16 represents a smallint that may be null. NullInt16 implements the
-// Scanner and Encoder interfaces so it may be used both as an argument to
-// Query[Row] and a destination for Scan for prepared and unprepared queries.
-//
-// If Valid is false then the value is NULL.
-type NullInt16 struct {
- Int16 int16
- Valid bool // Valid is true if Int16 is not NULL
-}
-
-func (n *NullInt16) Scan(vr *ValueReader) error {
- if vr.Type().DataType != Int2Oid {
- return SerializationError(fmt.Sprintf("NullInt16.Scan cannot decode OID %d", vr.Type().DataType))
- }
-
- if vr.Len() == -1 {
- n.Int16, n.Valid = 0, false
- return nil
- }
- n.Valid = true
- n.Int16 = decodeInt2(vr)
- return vr.Err()
-}
-
-func (n NullInt16) FormatCode() int16 { return BinaryFormatCode }
-
-func (n NullInt16) Encode(w *WriteBuf, oid Oid) error {
- if oid != Int2Oid {
- return SerializationError(fmt.Sprintf("NullInt16.Encode cannot encode into OID %d", oid))
- }
-
- if !n.Valid {
- w.WriteInt32(-1)
- return nil
- }
-
- return encodeInt16(w, oid, n.Int16)
-}
-
-// NullInt32 represents an integer that may be null. NullInt32 implements the
-// Scanner and Encoder interfaces so it may be used both as an argument to
-// Query[Row] and a destination for Scan.
-//
-// If Valid is false then the value is NULL.
-type NullInt32 struct {
- Int32 int32
- Valid bool // Valid is true if Int32 is not NULL
-}
-
-func (n *NullInt32) Scan(vr *ValueReader) error {
- if vr.Type().DataType != Int4Oid {
- return SerializationError(fmt.Sprintf("NullInt32.Scan cannot decode OID %d", vr.Type().DataType))
- }
-
- if vr.Len() == -1 {
- n.Int32, n.Valid = 0, false
- return nil
- }
- n.Valid = true
- n.Int32 = decodeInt4(vr)
- return vr.Err()
-}
-
-func (n NullInt32) FormatCode() int16 { return BinaryFormatCode }
-
-func (n NullInt32) Encode(w *WriteBuf, oid Oid) error {
- if oid != Int4Oid {
- return SerializationError(fmt.Sprintf("NullInt32.Encode cannot encode into OID %d", oid))
- }
-
- if !n.Valid {
- w.WriteInt32(-1)
- return nil
- }
-
- return encodeInt32(w, oid, n.Int32)
-}
-
-// Oid (Object Identifier Type) is, according to https://www.postgresql.org/docs/current/static/datatype-oid.html,
-// used internally by PostgreSQL as a primary key for various system tables. It is currently implemented
-// as an unsigned four-byte integer. Its definition can be found in src/include/postgres_ext.h
-// in the PostgreSQL sources.
-type Oid uint32
-
-// NullOid represents a Command Identifier (Oid) that may be null. NullOid implements the
-// Scanner and Encoder interfaces so it may be used both as an argument to
-// Query[Row] and a destination for Scan.
-//
-// If Valid is false then the value is NULL.
-type NullOid struct {
- Oid Oid
- Valid bool // Valid is true if Oid is not NULL
-}
-
-func (n *NullOid) Scan(vr *ValueReader) error {
- if vr.Type().DataType != OidOid {
- return SerializationError(fmt.Sprintf("NullOid.Scan cannot decode OID %d", vr.Type().DataType))
- }
-
- if vr.Len() == -1 {
- n.Oid, n.Valid = 0, false
- return nil
- }
- n.Valid = true
- n.Oid = decodeOid(vr)
- return vr.Err()
-}
-
-func (n NullOid) FormatCode() int16 { return BinaryFormatCode }
-
-func (n NullOid) Encode(w *WriteBuf, oid Oid) error {
- if oid != OidOid {
- return SerializationError(fmt.Sprintf("NullOid.Encode cannot encode into OID %d", oid))
- }
-
- if !n.Valid {
- w.WriteInt32(-1)
- return nil
- }
-
- return encodeOid(w, oid, n.Oid)
-}
-
-// Xid is PostgreSQL's Transaction ID type.
-//
-// In later versions of PostgreSQL, it is the type used for the backend_xid
-// and backend_xmin columns of the pg_stat_activity system view.
-//
-// Also, when one does
-//
-// select xmin, xmax, * from some_table;
-//
-// it is the data type of the xmin and xmax hidden system columns.
-//
-// It is currently implemented as an unsigned four byte integer.
-// Its definition can be found in src/include/postgres_ext.h as TransactionId
-// in the PostgreSQL sources.
-type Xid uint32
-
-// NullXid represents a Transaction ID (Xid) that may be null. NullXid implements the
-// Scanner and Encoder interfaces so it may be used both as an argument to
-// Query[Row] and a destination for Scan.
-//
-// If Valid is false then the value is NULL.
-type NullXid struct {
- Xid Xid
- Valid bool // Valid is true if Xid is not NULL
-}
-
-func (n *NullXid) Scan(vr *ValueReader) error {
- if vr.Type().DataType != XidOid {
- return SerializationError(fmt.Sprintf("NullXid.Scan cannot decode OID %d", vr.Type().DataType))
- }
-
- if vr.Len() == -1 {
- n.Xid, n.Valid = 0, false
- return nil
- }
- n.Valid = true
- n.Xid = decodeXid(vr)
- return vr.Err()
-}
-
-func (n NullXid) FormatCode() int16 { return BinaryFormatCode }
-
-func (n NullXid) Encode(w *WriteBuf, oid Oid) error {
- if oid != XidOid {
- return SerializationError(fmt.Sprintf("NullXid.Encode cannot encode into OID %d", oid))
- }
-
- if !n.Valid {
- w.WriteInt32(-1)
- return nil
- }
-
- return encodeXid(w, oid, n.Xid)
-}
-
-// Cid is PostgreSQL's Command Identifier type.
-//
-// When one does
-//
-// select cmin, cmax, * from some_table;
-//
-// it is the data type of the cmin and cmax hidden system columns.
-//
-// It is currently implemented as an unsigned four byte integer.
-// Its definition can be found in src/include/c.h as CommandId
-// in the PostgreSQL sources.
-type Cid uint32
-
-// NullCid represents a Command Identifier (Cid) that may be null. NullCid implements the
-// Scanner and Encoder interfaces so it may be used both as an argument to
-// Query[Row] and a destination for Scan.
-//
-// If Valid is false then the value is NULL.
-type NullCid struct {
- Cid Cid
- Valid bool // Valid is true if Cid is not NULL
-}
-
-func (n *NullCid) Scan(vr *ValueReader) error {
- if vr.Type().DataType != CidOid {
- return SerializationError(fmt.Sprintf("NullCid.Scan cannot decode OID %d", vr.Type().DataType))
- }
-
- if vr.Len() == -1 {
- n.Cid, n.Valid = 0, false
- return nil
- }
- n.Valid = true
- n.Cid = decodeCid(vr)
- return vr.Err()
-}
-
-func (n NullCid) FormatCode() int16 { return BinaryFormatCode }
-
-func (n NullCid) Encode(w *WriteBuf, oid Oid) error {
- if oid != CidOid {
- return SerializationError(fmt.Sprintf("NullCid.Encode cannot encode into OID %d", oid))
- }
-
- if !n.Valid {
- w.WriteInt32(-1)
- return nil
- }
-
- return encodeCid(w, oid, n.Cid)
-}
-
-// Tid is PostgreSQL's Tuple Identifier type.
-//
-// When one does
-//
-// select ctid, * from some_table;
-//
-// it is the data type of the ctid hidden system column.
-//
-// It is currently implemented as a pair unsigned two byte integers.
-// Its conversion functions can be found in src/backend/utils/adt/tid.c
-// in the PostgreSQL sources.
-type Tid struct {
- BlockNumber uint32
- OffsetNumber uint16
-}
-
-// NullTid represents a Tuple Identifier (Tid) that may be null. NullTid implements the
-// Scanner and Encoder interfaces so it may be used both as an argument to
-// Query[Row] and a destination for Scan.
-//
-// If Valid is false then the value is NULL.
-type NullTid struct {
- Tid Tid
- Valid bool // Valid is true if Tid is not NULL
-}
-
-func (n *NullTid) Scan(vr *ValueReader) error {
- if vr.Type().DataType != TidOid {
- return SerializationError(fmt.Sprintf("NullTid.Scan cannot decode OID %d", vr.Type().DataType))
- }
-
- if vr.Len() == -1 {
- n.Tid, n.Valid = Tid{BlockNumber: 0, OffsetNumber: 0}, false
- return nil
- }
- n.Valid = true
- n.Tid = decodeTid(vr)
- return vr.Err()
-}
-
-func (n NullTid) FormatCode() int16 { return BinaryFormatCode }
-
-func (n NullTid) Encode(w *WriteBuf, oid Oid) error {
- if oid != TidOid {
- return SerializationError(fmt.Sprintf("NullTid.Encode cannot encode into OID %d", oid))
- }
-
- if !n.Valid {
- w.WriteInt32(-1)
- return nil
- }
-
- return encodeTid(w, oid, n.Tid)
-}
-
-// NullInt64 represents an bigint that may be null. NullInt64 implements the
-// Scanner and Encoder interfaces so it may be used both as an argument to
-// Query[Row] and a destination for Scan.
-//
-// If Valid is false then the value is NULL.
-type NullInt64 struct {
- Int64 int64
- Valid bool // Valid is true if Int64 is not NULL
-}
-
-func (n *NullInt64) Scan(vr *ValueReader) error {
- if vr.Type().DataType != Int8Oid {
- return SerializationError(fmt.Sprintf("NullInt64.Scan cannot decode OID %d", vr.Type().DataType))
- }
-
- if vr.Len() == -1 {
- n.Int64, n.Valid = 0, false
- return nil
- }
- n.Valid = true
- n.Int64 = decodeInt8(vr)
- return vr.Err()
-}
-
-func (n NullInt64) FormatCode() int16 { return BinaryFormatCode }
-
-func (n NullInt64) Encode(w *WriteBuf, oid Oid) error {
- if oid != Int8Oid {
- return SerializationError(fmt.Sprintf("NullInt64.Encode cannot encode into OID %d", oid))
- }
-
- if !n.Valid {
- w.WriteInt32(-1)
- return nil
- }
-
- return encodeInt64(w, oid, n.Int64)
-}
-
-// NullBool represents an bool that may be null. NullBool implements the Scanner
-// and Encoder interfaces so it may be used both as an argument to Query[Row]
-// and a destination for Scan.
-//
-// If Valid is false then the value is NULL.
-type NullBool struct {
- Bool bool
- Valid bool // Valid is true if Bool is not NULL
-}
-
-func (n *NullBool) Scan(vr *ValueReader) error {
- if vr.Type().DataType != BoolOid {
- return SerializationError(fmt.Sprintf("NullBool.Scan cannot decode OID %d", vr.Type().DataType))
- }
-
- if vr.Len() == -1 {
- n.Bool, n.Valid = false, false
- return nil
- }
- n.Valid = true
- n.Bool = decodeBool(vr)
- return vr.Err()
-}
-
-func (n NullBool) FormatCode() int16 { return BinaryFormatCode }
-
-func (n NullBool) Encode(w *WriteBuf, oid Oid) error {
- if oid != BoolOid {
- return SerializationError(fmt.Sprintf("NullBool.Encode cannot encode into OID %d", oid))
- }
-
- if !n.Valid {
- w.WriteInt32(-1)
- return nil
- }
-
- return encodeBool(w, oid, n.Bool)
-}
-
-// NullTime represents an time.Time that may be null. NullTime implements the
-// Scanner and Encoder interfaces so it may be used both as an argument to
-// Query[Row] and a destination for Scan. It corresponds with the PostgreSQL
-// types timestamptz, timestamp, and date.
-//
-// If Valid is false then the value is NULL.
-type NullTime struct {
- Time time.Time
- Valid bool // Valid is true if Time is not NULL
-}
-
-func (n *NullTime) Scan(vr *ValueReader) error {
- oid := vr.Type().DataType
- if oid != TimestampTzOid && oid != TimestampOid && oid != DateOid {
- return SerializationError(fmt.Sprintf("NullTime.Scan cannot decode OID %d", vr.Type().DataType))
- }
-
- if vr.Len() == -1 {
- n.Time, n.Valid = time.Time{}, false
- return nil
- }
-
- n.Valid = true
- switch oid {
- case TimestampTzOid:
- n.Time = decodeTimestampTz(vr)
- case TimestampOid:
- n.Time = decodeTimestamp(vr)
- case DateOid:
- n.Time = decodeDate(vr)
- }
-
- return vr.Err()
-}
-
-func (n NullTime) FormatCode() int16 { return BinaryFormatCode }
-
-func (n NullTime) Encode(w *WriteBuf, oid Oid) error {
- if oid != TimestampTzOid && oid != TimestampOid && oid != DateOid {
- return SerializationError(fmt.Sprintf("NullTime.Encode cannot encode into OID %d", oid))
- }
-
- if !n.Valid {
- w.WriteInt32(-1)
- return nil
- }
-
- return encodeTime(w, oid, n.Time)
-}
-
-// Hstore represents an hstore column. It does not support a null column or null
-// key values (use NullHstore for this). Hstore implements the Scanner and
-// Encoder interfaces so it may be used both as an argument to Query[Row] and a
-// destination for Scan.
-type Hstore map[string]string
-
-func (h *Hstore) Scan(vr *ValueReader) error {
- //oid for hstore not standardized, so we check its type name
- if vr.Type().DataTypeName != "hstore" {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode type %s into Hstore", vr.Type().DataTypeName)))
- return nil
- }
-
- if vr.Len() == -1 {
- vr.Fatal(ProtocolError("Cannot decode null column into Hstore"))
- return nil
- }
-
- switch vr.Type().FormatCode {
- case TextFormatCode:
- m, err := parseHstoreToMap(vr.ReadString(vr.Len()))
- if err != nil {
- vr.Fatal(ProtocolError(fmt.Sprintf("Can't decode hstore column: %v", err)))
- return nil
- }
- hm := Hstore(m)
- *h = hm
- return nil
- case BinaryFormatCode:
- vr.Fatal(ProtocolError("Can't decode binary hstore"))
- return nil
- default:
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return nil
- }
-}
-
-func (h Hstore) FormatCode() int16 { return TextFormatCode }
-
-func (h Hstore) Encode(w *WriteBuf, oid Oid) error {
- var buf bytes.Buffer
-
- i := 0
- for k, v := range h {
- i++
- ks := strings.Replace(k, `\`, `\\`, -1)
- ks = strings.Replace(ks, `"`, `\"`, -1)
- vs := strings.Replace(v, `\`, `\\`, -1)
- vs = strings.Replace(vs, `"`, `\"`, -1)
- buf.WriteString(`"`)
- buf.WriteString(ks)
- buf.WriteString(`"=>"`)
- buf.WriteString(vs)
- buf.WriteString(`"`)
- if i < len(h) {
- buf.WriteString(", ")
- }
- }
- w.WriteInt32(int32(buf.Len()))
- w.WriteBytes(buf.Bytes())
- return nil
-}
-
-// NullHstore represents an hstore column that can be null or have null values
-// associated with its keys. NullHstore implements the Scanner and Encoder
-// interfaces so it may be used both as an argument to Query[Row] and a
-// destination for Scan.
-//
-// If Valid is false, then the value of the entire hstore column is NULL
-// If any of the NullString values in Store has Valid set to false, the key
-// appears in the hstore column, but its value is explicitly set to NULL.
-type NullHstore struct {
- Hstore map[string]NullString
- Valid bool
-}
-
-func (h *NullHstore) Scan(vr *ValueReader) error {
- //oid for hstore not standardized, so we check its type name
- if vr.Type().DataTypeName != "hstore" {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode type %s into NullHstore", vr.Type().DataTypeName)))
- return nil
- }
-
- if vr.Len() == -1 {
- h.Valid = false
- return nil
- }
-
- switch vr.Type().FormatCode {
- case TextFormatCode:
- store, err := parseHstoreToNullHstore(vr.ReadString(vr.Len()))
- if err != nil {
- vr.Fatal(ProtocolError(fmt.Sprintf("Can't decode hstore column: %v", err)))
- return nil
- }
- h.Valid = true
- h.Hstore = store
- return nil
- case BinaryFormatCode:
- vr.Fatal(ProtocolError("Can't decode binary hstore"))
- return nil
- default:
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return nil
- }
-}
-
-func (h NullHstore) FormatCode() int16 { return TextFormatCode }
-
-func (h NullHstore) Encode(w *WriteBuf, oid Oid) error {
- var buf bytes.Buffer
-
- if !h.Valid {
- w.WriteInt32(-1)
- return nil
- }
-
- i := 0
- for k, v := range h.Hstore {
- i++
- ks := strings.Replace(k, `\`, `\\`, -1)
- ks = strings.Replace(ks, `"`, `\"`, -1)
- if v.Valid {
- vs := strings.Replace(v.String, `\`, `\\`, -1)
- vs = strings.Replace(vs, `"`, `\"`, -1)
- buf.WriteString(fmt.Sprintf(`"%s"=>"%s"`, ks, vs))
- } else {
- buf.WriteString(fmt.Sprintf(`"%s"=>NULL`, ks))
- }
- if i < len(h.Hstore) {
- buf.WriteString(", ")
- }
- }
- w.WriteInt32(int32(buf.Len()))
- w.WriteBytes(buf.Bytes())
- return nil
-}
-
-// Encode encodes arg into wbuf as the type oid. This allows implementations
-// of the Encoder interface to delegate the actual work of encoding to the
-// built-in functionality.
-func Encode(wbuf *WriteBuf, oid Oid, arg interface{}) error {
+func convertSimpleArgument(ci *pgtype.ConnInfo, arg interface{}) (interface{}, error) {
if arg == nil {
- wbuf.WriteInt32(-1)
- return nil
+ return nil, nil
}
switch arg := arg.(type) {
- case Encoder:
- return arg.Encode(wbuf, oid)
case driver.Valuer:
- v, err := arg.Value()
+ return callValuerValue(arg)
+ case pgtype.TextEncoder:
+ buf, err := arg.EncodeText(ci, nil)
if err != nil {
- return err
+ return nil, err
}
- return Encode(wbuf, oid, v)
- case string:
- return encodeString(wbuf, oid, arg)
- case []AclItem:
- return encodeAclItemSlice(wbuf, oid, arg)
- case []byte:
- return encodeByteSlice(wbuf, oid, arg)
- case [][]byte:
- return encodeByteSliceSlice(wbuf, oid, arg)
- }
-
- refVal := reflect.ValueOf(arg)
-
- if refVal.Kind() == reflect.Ptr {
- if refVal.IsNil() {
- wbuf.WriteInt32(-1)
- return nil
+ if buf == nil {
+ return nil, nil
}
- arg = refVal.Elem().Interface()
- return Encode(wbuf, oid, arg)
- }
-
- if oid == JsonOid {
- return encodeJSON(wbuf, oid, arg)
- }
- if oid == JsonbOid {
- return encodeJSONB(wbuf, oid, arg)
- }
-
- switch arg := arg.(type) {
- case []string:
- return encodeStringSlice(wbuf, oid, arg)
+ return string(buf), nil
+ case int64:
+ return arg, nil
+ case float64:
+ return arg, nil
case bool:
- return encodeBool(wbuf, oid, arg)
- case []bool:
- return encodeBoolSlice(wbuf, oid, arg)
- case int:
- return encodeInt(wbuf, oid, arg)
- case uint:
- return encodeUInt(wbuf, oid, arg)
- case Char:
- return encodeChar(wbuf, oid, arg)
- case AclItem:
- // The aclitem data type goes over the wire using the same format as string,
- // so just cast to string and use encodeString
- return encodeString(wbuf, oid, string(arg))
- case Name:
- // The name data type goes over the wire using the same format as string,
- // so just cast to string and use encodeString
- return encodeString(wbuf, oid, string(arg))
+ return arg, nil
+ case time.Time:
+ return arg, nil
+ case string:
+ return arg, nil
+ case []byte:
+ return arg, nil
case int8:
- return encodeInt8(wbuf, oid, arg)
- case uint8:
- return encodeUInt8(wbuf, oid, arg)
+ return int64(arg), nil
case int16:
- return encodeInt16(wbuf, oid, arg)
- case []int16:
- return encodeInt16Slice(wbuf, oid, arg)
- case uint16:
- return encodeUInt16(wbuf, oid, arg)
- case []uint16:
- return encodeUInt16Slice(wbuf, oid, arg)
+ return int64(arg), nil
case int32:
- return encodeInt32(wbuf, oid, arg)
- case []int32:
- return encodeInt32Slice(wbuf, oid, arg)
+ return int64(arg), nil
+ case int:
+ return int64(arg), nil
+ case uint8:
+ return int64(arg), nil
+ case uint16:
+ return int64(arg), nil
case uint32:
- return encodeUInt32(wbuf, oid, arg)
- case []uint32:
- return encodeUInt32Slice(wbuf, oid, arg)
- case int64:
- return encodeInt64(wbuf, oid, arg)
- case []int64:
- return encodeInt64Slice(wbuf, oid, arg)
+ return int64(arg), nil
case uint64:
- return encodeUInt64(wbuf, oid, arg)
- case []uint64:
- return encodeUInt64Slice(wbuf, oid, arg)
- case float32:
- return encodeFloat32(wbuf, oid, arg)
- case []float32:
- return encodeFloat32Slice(wbuf, oid, arg)
- case float64:
- return encodeFloat64(wbuf, oid, arg)
- case []float64:
- return encodeFloat64Slice(wbuf, oid, arg)
- case time.Time:
- return encodeTime(wbuf, oid, arg)
- case []time.Time:
- return encodeTimeSlice(wbuf, oid, arg)
- case net.IP:
- return encodeIP(wbuf, oid, arg)
- case []net.IP:
- return encodeIPSlice(wbuf, oid, arg)
- case net.IPNet:
- return encodeIPNet(wbuf, oid, arg)
- case []net.IPNet:
- return encodeIPNetSlice(wbuf, oid, arg)
- case Oid:
- return encodeOid(wbuf, oid, arg)
- case Xid:
- return encodeXid(wbuf, oid, arg)
- case Cid:
- return encodeCid(wbuf, oid, arg)
- default:
- if strippedArg, ok := stripNamedType(&refVal); ok {
- return Encode(wbuf, oid, strippedArg)
- }
- return SerializationError(fmt.Sprintf("Cannot encode %T into oid %v - %T must implement Encoder or be converted to a string", arg, oid, arg))
- }
-}
-
-func stripNamedType(val *reflect.Value) (interface{}, bool) {
- switch val.Kind() {
- case reflect.Int:
- return int(val.Int()), true
- case reflect.Int8:
- return int8(val.Int()), true
- case reflect.Int16:
- return int16(val.Int()), true
- case reflect.Int32:
- return int32(val.Int()), true
- case reflect.Int64:
- return int64(val.Int()), true
- case reflect.Uint:
- return uint(val.Uint()), true
- case reflect.Uint8:
- return uint8(val.Uint()), true
- case reflect.Uint16:
- return uint16(val.Uint()), true
- case reflect.Uint32:
- return uint32(val.Uint()), true
- case reflect.Uint64:
- return uint64(val.Uint()), true
- case reflect.String:
- return val.String(), true
- }
-
- return nil, false
-}
-
-// Decode decodes from vr into d. d must be a pointer. This allows
-// implementations of the Decoder interface to delegate the actual work of
-// decoding to the built-in functionality.
-func Decode(vr *ValueReader, d interface{}) error {
- switch v := d.(type) {
- case *bool:
- *v = decodeBool(vr)
- case *int:
- n := decodeInt(vr)
- if n < int64(minInt) {
- return fmt.Errorf("%d is less than minimum value for int", n)
- } else if n > int64(maxInt) {
- return fmt.Errorf("%d is greater than maximum value for int", n)
- }
- *v = int(n)
- case *int8:
- n := decodeInt(vr)
- if n < math.MinInt8 {
- return fmt.Errorf("%d is less than minimum value for int8", n)
- } else if n > math.MaxInt8 {
- return fmt.Errorf("%d is greater than maximum value for int8", n)
- }
- *v = int8(n)
- case *int16:
- n := decodeInt(vr)
- if n < math.MinInt16 {
- return fmt.Errorf("%d is less than minimum value for int16", n)
- } else if n > math.MaxInt16 {
- return fmt.Errorf("%d is greater than maximum value for int16", n)
- }
- *v = int16(n)
- case *int32:
- n := decodeInt(vr)
- if n < math.MinInt32 {
- return fmt.Errorf("%d is less than minimum value for int32", n)
- } else if n > math.MaxInt32 {
- return fmt.Errorf("%d is greater than maximum value for int32", n)
- }
- *v = int32(n)
- case *int64:
- n := decodeInt(vr)
- if n < math.MinInt64 {
- return fmt.Errorf("%d is less than minimum value for int64", n)
- } else if n > math.MaxInt64 {
- return fmt.Errorf("%d is greater than maximum value for int64", n)
- }
- *v = int64(n)
- case *uint:
- n := decodeInt(vr)
- if n < 0 {
- return fmt.Errorf("%d is less than zero for uint8", n)
- } else if maxInt == math.MaxInt32 && n > math.MaxUint32 {
- return fmt.Errorf("%d is greater than maximum value for uint", n)
- }
- *v = uint(n)
- case *uint8:
- n := decodeInt(vr)
- if n < 0 {
- return fmt.Errorf("%d is less than zero for uint8", n)
- } else if n > math.MaxUint8 {
- return fmt.Errorf("%d is greater than maximum value for uint8", n)
- }
- *v = uint8(n)
- case *uint16:
- n := decodeInt(vr)
- if n < 0 {
- return fmt.Errorf("%d is less than zero for uint16", n)
- } else if n > math.MaxUint16 {
- return fmt.Errorf("%d is greater than maximum value for uint16", n)
- }
- *v = uint16(n)
- case *uint32:
- n := decodeInt(vr)
- if n < 0 {
- return fmt.Errorf("%d is less than zero for uint32", n)
- } else if n > math.MaxUint32 {
- return fmt.Errorf("%d is greater than maximum value for uint32", n)
- }
- *v = uint32(n)
- case *uint64:
- n := decodeInt(vr)
- if n < 0 {
- return fmt.Errorf("%d is less than zero for uint64", n)
- }
- *v = uint64(n)
- case *Char:
- *v = decodeChar(vr)
- case *AclItem:
- // aclitem goes over the wire just like text
- *v = AclItem(decodeText(vr))
- case *Name:
- // name goes over the wire just like text
- *v = Name(decodeText(vr))
- case *Oid:
- *v = decodeOid(vr)
- case *Xid:
- *v = decodeXid(vr)
- case *Tid:
- *v = decodeTid(vr)
- case *Cid:
- *v = decodeCid(vr)
- case *string:
- *v = decodeText(vr)
- case *float32:
- *v = decodeFloat4(vr)
- case *float64:
- *v = decodeFloat8(vr)
- case *[]AclItem:
- *v = decodeAclItemArray(vr)
- case *[]bool:
- *v = decodeBoolArray(vr)
- case *[]int16:
- *v = decodeInt2Array(vr)
- case *[]uint16:
- *v = decodeInt2ArrayToUInt(vr)
- case *[]int32:
- *v = decodeInt4Array(vr)
- case *[]uint32:
- *v = decodeInt4ArrayToUInt(vr)
- case *[]int64:
- *v = decodeInt8Array(vr)
- case *[]uint64:
- *v = decodeInt8ArrayToUInt(vr)
- case *[]float32:
- *v = decodeFloat4Array(vr)
- case *[]float64:
- *v = decodeFloat8Array(vr)
- case *[]string:
- *v = decodeTextArray(vr)
- case *[]time.Time:
- *v = decodeTimestampArray(vr)
- case *[][]byte:
- *v = decodeByteaArray(vr)
- case *[]interface{}:
- *v = decodeRecord(vr)
- case *time.Time:
- switch vr.Type().DataType {
- case DateOid:
- *v = decodeDate(vr)
- case TimestampTzOid:
- *v = decodeTimestampTz(vr)
- case TimestampOid:
- *v = decodeTimestamp(vr)
- default:
- return fmt.Errorf("Can't convert OID %v to time.Time", vr.Type().DataType)
- }
- case *net.IP:
- ipnet := decodeInet(vr)
- if oneCount, bitCount := ipnet.Mask.Size(); oneCount != bitCount {
- return fmt.Errorf("Cannot decode netmask into *net.IP")
- }
- *v = ipnet.IP
- case *[]net.IP:
- ipnets := decodeInetArray(vr)
- ips := make([]net.IP, len(ipnets))
- for i, ipnet := range ipnets {
- if oneCount, bitCount := ipnet.Mask.Size(); oneCount != bitCount {
- return fmt.Errorf("Cannot decode netmask into *net.IP")
- }
- ips[i] = ipnet.IP
- }
- *v = ips
- case *net.IPNet:
- *v = decodeInet(vr)
- case *[]net.IPNet:
- *v = decodeInetArray(vr)
- default:
- if v := reflect.ValueOf(d); v.Kind() == reflect.Ptr {
- el := v.Elem()
- switch el.Kind() {
- // if d is a pointer to pointer, strip the pointer and try again
- case reflect.Ptr:
- // -1 is a null value
- if vr.Len() == -1 {
- if !el.IsNil() {
- // if the destination pointer is not nil, nil it out
- el.Set(reflect.Zero(el.Type()))
- }
- return nil
- }
- if el.IsNil() {
- // allocate destination
- el.Set(reflect.New(el.Type().Elem()))
- }
- d = el.Interface()
- return Decode(vr, d)
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- n := decodeInt(vr)
- if el.OverflowInt(n) {
- return fmt.Errorf("Scan cannot decode %d into %T", n, d)
- }
- el.SetInt(n)
- return nil
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- n := decodeInt(vr)
- if n < 0 {
- return fmt.Errorf("%d is less than zero for %T", n, d)
- }
- if el.OverflowUint(uint64(n)) {
- return fmt.Errorf("Scan cannot decode %d into %T", n, d)
- }
- el.SetUint(uint64(n))
- return nil
- case reflect.String:
- el.SetString(decodeText(vr))
- return nil
- }
+ if arg > math.MaxInt64 {
+ return nil, errors.Errorf("arg too big for int64: %v", arg)
}
- return fmt.Errorf("Scan cannot decode into %T", d)
- }
-
- return nil
-}
-
-func decodeBool(vr *ValueReader) bool {
- if vr.Len() == -1 {
- vr.Fatal(ProtocolError("Cannot decode null into bool"))
- return false
- }
-
- if vr.Type().DataType != BoolOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into bool", vr.Type().DataType)))
- return false
- }
-
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return false
- }
-
- if vr.Len() != 1 {
- vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an bool: %d", vr.Len())))
- return false
- }
-
- b := vr.ReadByte()
- return b != 0
-}
-
-func encodeBool(w *WriteBuf, oid Oid, value bool) error {
- if oid != BoolOid {
- return fmt.Errorf("cannot encode Go %s into oid %d", "bool", oid)
- }
-
- w.WriteInt32(1)
-
- var n byte
- if value {
- n = 1
- }
-
- w.WriteByte(n)
-
- return nil
-}
-
-func decodeInt(vr *ValueReader) int64 {
- switch vr.Type().DataType {
- case Int2Oid:
- return int64(decodeInt2(vr))
- case Int4Oid:
- return int64(decodeInt4(vr))
- case Int8Oid:
- return int64(decodeInt8(vr))
- }
-
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into any integer type", vr.Type().DataType)))
- return 0
-}
-
-func decodeInt8(vr *ValueReader) int64 {
- if vr.Len() == -1 {
- vr.Fatal(ProtocolError("Cannot decode null into int64"))
- return 0
- }
-
- if vr.Type().DataType != Int8Oid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into int8", vr.Type().DataType)))
- return 0
- }
-
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return 0
- }
-
- if vr.Len() != 8 {
- vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an int8: %d", vr.Len())))
- return 0
- }
-
- return vr.ReadInt64()
-}
-
-func decodeChar(vr *ValueReader) Char {
- if vr.Len() == -1 {
- vr.Fatal(ProtocolError("Cannot decode null into char"))
- return Char(0)
- }
-
- if vr.Type().DataType != CharOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into char", vr.Type().DataType)))
- return Char(0)
- }
-
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return Char(0)
- }
-
- if vr.Len() != 1 {
- vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for a char: %d", vr.Len())))
- return Char(0)
- }
-
- return Char(vr.ReadByte())
-}
-
-func decodeInt2(vr *ValueReader) int16 {
- if vr.Len() == -1 {
- vr.Fatal(ProtocolError("Cannot decode null into int16"))
- return 0
- }
-
- if vr.Type().DataType != Int2Oid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into int16", vr.Type().DataType)))
- return 0
- }
-
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return 0
- }
-
- if vr.Len() != 2 {
- vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an int2: %d", vr.Len())))
- return 0
- }
-
- return vr.ReadInt16()
-}
-
-func encodeInt(w *WriteBuf, oid Oid, value int) error {
- switch oid {
- case Int2Oid:
- if value < math.MinInt16 {
- return fmt.Errorf("%d is less than min pg:int2", value)
- } else if value > math.MaxInt16 {
- return fmt.Errorf("%d is greater than max pg:int2", value)
- }
- w.WriteInt32(2)
- w.WriteInt16(int16(value))
- case Int4Oid:
- if value < math.MinInt32 {
- return fmt.Errorf("%d is less than min pg:int4", value)
- } else if value > math.MaxInt32 {
- return fmt.Errorf("%d is greater than max pg:int4", value)
- }
- w.WriteInt32(4)
- w.WriteInt32(int32(value))
- case Int8Oid:
- if int64(value) <= int64(math.MaxInt64) {
- w.WriteInt32(8)
- w.WriteInt64(int64(value))
- } else {
- return fmt.Errorf("%d is larger than max int64 %d", value, int64(math.MaxInt64))
- }
- default:
- return fmt.Errorf("cannot encode %s into oid %v", "int8", oid)
- }
-
- return nil
-}
-
-func encodeUInt(w *WriteBuf, oid Oid, value uint) error {
- switch oid {
- case Int2Oid:
- if value > math.MaxInt16 {
- return fmt.Errorf("%d is greater than max pg:int2", value)
- }
- w.WriteInt32(2)
- w.WriteInt16(int16(value))
- case Int4Oid:
- if value > math.MaxInt32 {
- return fmt.Errorf("%d is greater than max pg:int4", value)
- }
- w.WriteInt32(4)
- w.WriteInt32(int32(value))
- case Int8Oid:
- //****** Changed value to int64(value) and math.MaxInt64 to int64(math.MaxInt64)
- if int64(value) > int64(math.MaxInt64) {
- return fmt.Errorf("%d is greater than max pg:int8", value)
- }
- w.WriteInt32(8)
- w.WriteInt64(int64(value))
-
- default:
- return fmt.Errorf("cannot encode %s into oid %v", "uint8", oid)
- }
-
- return nil
-}
-
-func encodeChar(w *WriteBuf, oid Oid, value Char) error {
- w.WriteInt32(1)
- w.WriteByte(byte(value))
- return nil
-}
-
-func encodeInt8(w *WriteBuf, oid Oid, value int8) error {
- switch oid {
- case Int2Oid:
- w.WriteInt32(2)
- w.WriteInt16(int16(value))
- case Int4Oid:
- w.WriteInt32(4)
- w.WriteInt32(int32(value))
- case Int8Oid:
- w.WriteInt32(8)
- w.WriteInt64(int64(value))
- default:
- return fmt.Errorf("cannot encode %s into oid %v", "int8", oid)
- }
-
- return nil
-}
-
-func encodeUInt8(w *WriteBuf, oid Oid, value uint8) error {
- switch oid {
- case Int2Oid:
- w.WriteInt32(2)
- w.WriteInt16(int16(value))
- case Int4Oid:
- w.WriteInt32(4)
- w.WriteInt32(int32(value))
- case Int8Oid:
- w.WriteInt32(8)
- w.WriteInt64(int64(value))
- default:
- return fmt.Errorf("cannot encode %s into oid %v", "uint8", oid)
- }
-
- return nil
-}
-
-func encodeInt16(w *WriteBuf, oid Oid, value int16) error {
- switch oid {
- case Int2Oid:
- w.WriteInt32(2)
- w.WriteInt16(value)
- case Int4Oid:
- w.WriteInt32(4)
- w.WriteInt32(int32(value))
- case Int8Oid:
- w.WriteInt32(8)
- w.WriteInt64(int64(value))
- default:
- return fmt.Errorf("cannot encode %s into oid %v", "int16", oid)
- }
-
- return nil
-}
-
-func encodeUInt16(w *WriteBuf, oid Oid, value uint16) error {
- switch oid {
- case Int2Oid:
- if value <= math.MaxInt16 {
- w.WriteInt32(2)
- w.WriteInt16(int16(value))
- } else {
- return fmt.Errorf("%d is greater than max int16 %d", value, math.MaxInt16)
- }
- case Int4Oid:
- w.WriteInt32(4)
- w.WriteInt32(int32(value))
- case Int8Oid:
- w.WriteInt32(8)
- w.WriteInt64(int64(value))
- default:
- return fmt.Errorf("cannot encode %s into oid %v", "int16", oid)
- }
-
- return nil
-}
-
-func encodeInt32(w *WriteBuf, oid Oid, value int32) error {
- switch oid {
- case Int2Oid:
- if value <= math.MaxInt16 {
- w.WriteInt32(2)
- w.WriteInt16(int16(value))
- } else {
- return fmt.Errorf("%d is greater than max int16 %d", value, math.MaxInt16)
- }
- case Int4Oid:
- w.WriteInt32(4)
- w.WriteInt32(value)
- case Int8Oid:
- w.WriteInt32(8)
- w.WriteInt64(int64(value))
- default:
- return fmt.Errorf("cannot encode %s into oid %v", "int32", oid)
- }
-
- return nil
-}
-
-func encodeUInt32(w *WriteBuf, oid Oid, value uint32) error {
- switch oid {
- case Int2Oid:
- if value <= math.MaxInt16 {
- w.WriteInt32(2)
- w.WriteInt16(int16(value))
- } else {
- return fmt.Errorf("%d is greater than max int16 %d", value, math.MaxInt16)
- }
- case Int4Oid:
- if value <= math.MaxInt32 {
- w.WriteInt32(4)
- w.WriteInt32(int32(value))
- } else {
- return fmt.Errorf("%d is greater than max int32 %d", value, math.MaxInt32)
- }
- case Int8Oid:
- w.WriteInt32(8)
- w.WriteInt64(int64(value))
- default:
- return fmt.Errorf("cannot encode %s into oid %v", "uint32", oid)
- }
-
- return nil
-}
-
-func encodeInt64(w *WriteBuf, oid Oid, value int64) error {
- switch oid {
- case Int2Oid:
- if value <= math.MaxInt16 {
- w.WriteInt32(2)
- w.WriteInt16(int16(value))
- } else {
- return fmt.Errorf("%d is greater than max int16 %d", value, math.MaxInt16)
- }
- case Int4Oid:
- if value <= math.MaxInt32 {
- w.WriteInt32(4)
- w.WriteInt32(int32(value))
- } else {
- return fmt.Errorf("%d is greater than max int32 %d", value, math.MaxInt32)
- }
- case Int8Oid:
- w.WriteInt32(8)
- w.WriteInt64(value)
- default:
- return fmt.Errorf("cannot encode %s into oid %v", "int64", oid)
- }
-
- return nil
-}
-
-func encodeUInt64(w *WriteBuf, oid Oid, value uint64) error {
- switch oid {
- case Int2Oid:
- if value <= math.MaxInt16 {
- w.WriteInt32(2)
- w.WriteInt16(int16(value))
- } else {
- return fmt.Errorf("%d is greater than max int16 %d", value, math.MaxInt16)
- }
- case Int4Oid:
- if value <= math.MaxInt32 {
- w.WriteInt32(4)
- w.WriteInt32(int32(value))
- } else {
- return fmt.Errorf("%d is greater than max int32 %d", value, math.MaxInt32)
- }
- case Int8Oid:
-
- if value <= math.MaxInt64 {
- w.WriteInt32(8)
- w.WriteInt64(int64(value))
- } else {
- return fmt.Errorf("%d is greater than max int64 %d", value, int64(math.MaxInt64))
- }
- default:
- return fmt.Errorf("cannot encode %s into oid %v", "uint64", oid)
- }
-
- return nil
-}
-
-func decodeInt4(vr *ValueReader) int32 {
- if vr.Len() == -1 {
- vr.Fatal(ProtocolError("Cannot decode null into int32"))
- return 0
- }
-
- if vr.Type().DataType != Int4Oid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into int32", vr.Type().DataType)))
- return 0
- }
-
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return 0
- }
-
- if vr.Len() != 4 {
- vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an int4: %d", vr.Len())))
- return 0
- }
-
- return vr.ReadInt32()
-}
-
-func decodeOid(vr *ValueReader) Oid {
- if vr.Len() == -1 {
- vr.Fatal(ProtocolError("Cannot decode null into Oid"))
- return Oid(0)
- }
-
- if vr.Type().DataType != OidOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into pgx.Oid", vr.Type().DataType)))
- return Oid(0)
- }
-
- // Oid needs to decode text format because it is used in loadPgTypes
- switch vr.Type().FormatCode {
- case TextFormatCode:
- s := vr.ReadString(vr.Len())
- n, err := strconv.ParseUint(s, 10, 32)
- if err != nil {
- vr.Fatal(ProtocolError(fmt.Sprintf("Received invalid Oid: %v", s)))
- }
- return Oid(n)
- case BinaryFormatCode:
- if vr.Len() != 4 {
- vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an Oid: %d", vr.Len())))
- return Oid(0)
+ return int64(arg), nil
+ case uint:
+ if uint64(arg) > math.MaxInt64 {
+ return nil, errors.Errorf("arg too big for int64: %v", arg)
}
- return Oid(vr.ReadInt32())
- default:
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return Oid(0)
- }
-}
-
-func encodeOid(w *WriteBuf, oid Oid, value Oid) error {
- if oid != OidOid {
- return fmt.Errorf("cannot encode Go %s into oid %d", "pgx.Oid", oid)
- }
-
- w.WriteInt32(4)
- w.WriteUint32(uint32(value))
-
- return nil
-}
-
-func decodeXid(vr *ValueReader) Xid {
- if vr.Len() == -1 {
- vr.Fatal(ProtocolError("Cannot decode null into Xid"))
- return Xid(0)
+ return int64(arg), nil
+ case float32:
+ return float64(arg), nil
}
- if vr.Type().DataType != XidOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into pgx.Xid", vr.Type().DataType)))
- return Xid(0)
- }
+ refVal := reflect.ValueOf(arg)
- // Unlikely Xid will ever go over the wire as text format, but who knows?
- switch vr.Type().FormatCode {
- case TextFormatCode:
- s := vr.ReadString(vr.Len())
- n, err := strconv.ParseUint(s, 10, 32)
- if err != nil {
- vr.Fatal(ProtocolError(fmt.Sprintf("Received invalid Oid: %v", s)))
- }
- return Xid(n)
- case BinaryFormatCode:
- if vr.Len() != 4 {
- vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an Oid: %d", vr.Len())))
- return Xid(0)
+ if refVal.Kind() == reflect.Ptr {
+ if refVal.IsNil() {
+ return nil, nil
}
- return Xid(vr.ReadUint32())
- default:
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return Xid(0)
+ arg = refVal.Elem().Interface()
+ return convertSimpleArgument(ci, arg)
}
-}
-func encodeXid(w *WriteBuf, oid Oid, value Xid) error {
- if oid != XidOid {
- return fmt.Errorf("cannot encode Go %s into oid %d", "pgx.Xid", oid)
+ if strippedArg, ok := stripNamedType(&refVal); ok {
+ return convertSimpleArgument(ci, strippedArg)
}
-
- w.WriteInt32(4)
- w.WriteUint32(uint32(value))
-
- return nil
+ return nil, SerializationError(fmt.Sprintf("Cannot encode %T in simple protocol - %T must implement driver.Valuer, pgtype.TextEncoder, or be a native type", arg, arg))
}
-func decodeCid(vr *ValueReader) Cid {
- if vr.Len() == -1 {
- vr.Fatal(ProtocolError("Cannot decode null into Cid"))
- return Cid(0)
- }
-
- if vr.Type().DataType != CidOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into pgx.Cid", vr.Type().DataType)))
- return Cid(0)
+func encodePreparedStatementArgument(ci *pgtype.ConnInfo, buf []byte, oid pgtype.OID, arg interface{}) ([]byte, error) {
+ if arg == nil {
+ return pgio.AppendInt32(buf, -1), nil
}
- // Unlikely Cid will ever go over the wire as text format, but who knows?
- switch vr.Type().FormatCode {
- case TextFormatCode:
- s := vr.ReadString(vr.Len())
- n, err := strconv.ParseUint(s, 10, 32)
+ switch arg := arg.(type) {
+ case pgtype.BinaryEncoder:
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+ argBuf, err := arg.EncodeBinary(ci, buf)
if err != nil {
- vr.Fatal(ProtocolError(fmt.Sprintf("Received invalid Oid: %v", s)))
- }
- return Cid(n)
- case BinaryFormatCode:
- if vr.Len() != 4 {
- vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an Oid: %d", vr.Len())))
- return Cid(0)
- }
- return Cid(vr.ReadUint32())
- default:
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return Cid(0)
- }
-}
-
-func encodeCid(w *WriteBuf, oid Oid, value Cid) error {
- if oid != CidOid {
- return fmt.Errorf("cannot encode Go %s into oid %d", "pgx.Cid", oid)
- }
-
- w.WriteInt32(4)
- w.WriteUint32(uint32(value))
-
- return nil
-}
-
-// Note that we do not match negative numbers, because neither the
-// BlockNumber nor OffsetNumber of a Tid can be negative.
-var tidRegexp *regexp.Regexp = regexp.MustCompile(`^\((\d*),(\d*)\)$`)
-
-func decodeTid(vr *ValueReader) Tid {
- if vr.Len() == -1 {
- vr.Fatal(ProtocolError("Cannot decode null into Tid"))
- return Tid{BlockNumber: 0, OffsetNumber: 0}
- }
-
- if vr.Type().DataType != TidOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into pgx.Tid", vr.Type().DataType)))
- return Tid{BlockNumber: 0, OffsetNumber: 0}
- }
-
- // Unlikely Tid will ever go over the wire as text format, but who knows?
- switch vr.Type().FormatCode {
- case TextFormatCode:
- s := vr.ReadString(vr.Len())
-
- match := tidRegexp.FindStringSubmatch(s)
- if match == nil {
- vr.Fatal(ProtocolError(fmt.Sprintf("Received invalid Oid: %v", s)))
- return Tid{BlockNumber: 0, OffsetNumber: 0}
+ return nil, err
}
-
- blockNumber, err := strconv.ParseUint(s, 10, 16)
- if err != nil {
- vr.Fatal(ProtocolError(fmt.Sprintf("Received invalid BlockNumber part of a Tid: %v", s)))
+ if argBuf != nil {
+ buf = argBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
}
-
- offsetNumber, err := strconv.ParseUint(s, 10, 16)
+ return buf, nil
+ case pgtype.TextEncoder:
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+ argBuf, err := arg.EncodeText(ci, buf)
if err != nil {
- vr.Fatal(ProtocolError(fmt.Sprintf("Received invalid offsetNumber part of a Tid: %v", s)))
- }
- return Tid{BlockNumber: uint32(blockNumber), OffsetNumber: uint16(offsetNumber)}
- case BinaryFormatCode:
- if vr.Len() != 6 {
- vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an Oid: %d", vr.Len())))
- return Tid{BlockNumber: 0, OffsetNumber: 0}
- }
- return Tid{BlockNumber: vr.ReadUint32(), OffsetNumber: vr.ReadUint16()}
- default:
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return Tid{BlockNumber: 0, OffsetNumber: 0}
- }
-}
-
-func encodeTid(w *WriteBuf, oid Oid, value Tid) error {
- if oid != TidOid {
- return fmt.Errorf("cannot encode Go %s into oid %d", "pgx.Tid", oid)
- }
-
- w.WriteInt32(6)
- w.WriteUint32(value.BlockNumber)
- w.WriteUint16(value.OffsetNumber)
-
- return nil
-}
-
-func decodeFloat4(vr *ValueReader) float32 {
- if vr.Len() == -1 {
- vr.Fatal(ProtocolError("Cannot decode null into float32"))
- return 0
- }
-
- if vr.Type().DataType != Float4Oid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into float32", vr.Type().DataType)))
- return 0
- }
-
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return 0
- }
-
- if vr.Len() != 4 {
- vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an float4: %d", vr.Len())))
- return 0
- }
-
- i := vr.ReadInt32()
- return math.Float32frombits(uint32(i))
-}
-
-func encodeFloat32(w *WriteBuf, oid Oid, value float32) error {
- switch oid {
- case Float4Oid:
- w.WriteInt32(4)
- w.WriteInt32(int32(math.Float32bits(value)))
- case Float8Oid:
- w.WriteInt32(8)
- w.WriteInt64(int64(math.Float64bits(float64(value))))
- default:
- return fmt.Errorf("cannot encode %s into oid %v", "float32", oid)
- }
-
- return nil
-}
-
-func decodeFloat8(vr *ValueReader) float64 {
- if vr.Len() == -1 {
- vr.Fatal(ProtocolError("Cannot decode null into float64"))
- return 0
- }
-
- if vr.Type().DataType != Float8Oid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into float64", vr.Type().DataType)))
- return 0
- }
-
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return 0
- }
-
- if vr.Len() != 8 {
- vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an float8: %d", vr.Len())))
- return 0
- }
-
- i := vr.ReadInt64()
- return math.Float64frombits(uint64(i))
-}
-
-func encodeFloat64(w *WriteBuf, oid Oid, value float64) error {
- switch oid {
- case Float8Oid:
- w.WriteInt32(8)
- w.WriteInt64(int64(math.Float64bits(value)))
- default:
- return fmt.Errorf("cannot encode %s into oid %v", "float64", oid)
- }
-
- return nil
-}
-
-func decodeText(vr *ValueReader) string {
- if vr.Len() == -1 {
- vr.Fatal(ProtocolError("Cannot decode null into string"))
- return ""
- }
-
- return vr.ReadString(vr.Len())
-}
-
-func encodeString(w *WriteBuf, oid Oid, value string) error {
- w.WriteInt32(int32(len(value)))
- w.WriteBytes([]byte(value))
- return nil
-}
-
-func decodeBytea(vr *ValueReader) []byte {
- if vr.Len() == -1 {
- return nil
- }
-
- if vr.Type().DataType != ByteaOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into []byte", vr.Type().DataType)))
- return nil
- }
-
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return nil
- }
-
- return vr.ReadBytes(vr.Len())
-}
-
-func encodeByteSlice(w *WriteBuf, oid Oid, value []byte) error {
- w.WriteInt32(int32(len(value)))
- w.WriteBytes(value)
-
- return nil
-}
-
-func decodeJSON(vr *ValueReader, d interface{}) error {
- if vr.Len() == -1 {
- return nil
- }
-
- if vr.Type().DataType != JsonOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into json", vr.Type().DataType)))
- }
-
- bytes := vr.ReadBytes(vr.Len())
- err := json.Unmarshal(bytes, d)
- if err != nil {
- vr.Fatal(err)
- }
- return err
-}
-
-func encodeJSON(w *WriteBuf, oid Oid, value interface{}) error {
- if oid != JsonOid {
- return fmt.Errorf("cannot encode JSON into oid %v", oid)
- }
-
- s, err := json.Marshal(value)
- if err != nil {
- return fmt.Errorf("Failed to encode json from type: %T", value)
- }
-
- w.WriteInt32(int32(len(s)))
- w.WriteBytes(s)
-
- return nil
-}
-
-func decodeJSONB(vr *ValueReader, d interface{}) error {
- if vr.Len() == -1 {
- return nil
- }
-
- if vr.Type().DataType != JsonbOid {
- err := ProtocolError(fmt.Sprintf("Cannot decode oid %v into jsonb", vr.Type().DataType))
- vr.Fatal(err)
- return err
- }
-
- bytes := vr.ReadBytes(vr.Len())
- if vr.Type().FormatCode == BinaryFormatCode {
- if bytes[0] != 1 {
- err := ProtocolError(fmt.Sprintf("Unknown jsonb format byte: %x", bytes[0]))
- vr.Fatal(err)
- return err
- }
- bytes = bytes[1:]
- }
-
- err := json.Unmarshal(bytes, d)
- if err != nil {
- vr.Fatal(err)
- }
- return err
-}
-
-func encodeJSONB(w *WriteBuf, oid Oid, value interface{}) error {
- if oid != JsonbOid {
- return fmt.Errorf("cannot encode JSON into oid %v", oid)
- }
-
- s, err := json.Marshal(value)
- if err != nil {
- return fmt.Errorf("Failed to encode json from type: %T", value)
- }
-
- w.WriteInt32(int32(len(s) + 1))
- w.WriteByte(1) // JSONB format header
- w.WriteBytes(s)
-
- return nil
-}
-
-func decodeDate(vr *ValueReader) time.Time {
- var zeroTime time.Time
-
- if vr.Len() == -1 {
- vr.Fatal(ProtocolError("Cannot decode null into time.Time"))
- return zeroTime
- }
-
- if vr.Type().DataType != DateOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into time.Time", vr.Type().DataType)))
- return zeroTime
- }
-
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return zeroTime
- }
-
- if vr.Len() != 4 {
- vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an date: %d", vr.Len())))
- }
- dayOffset := vr.ReadInt32()
- return time.Date(2000, 1, int(1+dayOffset), 0, 0, 0, 0, time.Local)
-}
-
-func encodeTime(w *WriteBuf, oid Oid, value time.Time) error {
- switch oid {
- case DateOid:
- tUnix := time.Date(value.Year(), value.Month(), value.Day(), 0, 0, 0, 0, time.UTC).Unix()
- dateEpoch := time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC).Unix()
-
- secSinceDateEpoch := tUnix - dateEpoch
- daysSinceDateEpoch := secSinceDateEpoch / 86400
-
- w.WriteInt32(4)
- w.WriteInt32(int32(daysSinceDateEpoch))
-
- return nil
- case TimestampTzOid, TimestampOid:
- microsecSinceUnixEpoch := value.Unix()*1000000 + int64(value.Nanosecond())/1000
- microsecSinceY2K := microsecSinceUnixEpoch - microsecFromUnixEpochToY2K
-
- w.WriteInt32(8)
- w.WriteInt64(microsecSinceY2K)
-
- return nil
- default:
- return fmt.Errorf("cannot encode %s into oid %v", "time.Time", oid)
- }
-}
-
-const microsecFromUnixEpochToY2K = 946684800 * 1000000
-
-func decodeTimestampTz(vr *ValueReader) time.Time {
- var zeroTime time.Time
-
- if vr.Len() == -1 {
- vr.Fatal(ProtocolError("Cannot decode null into time.Time"))
- return zeroTime
- }
-
- if vr.Type().DataType != TimestampTzOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into time.Time", vr.Type().DataType)))
- return zeroTime
- }
-
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return zeroTime
- }
-
- if vr.Len() != 8 {
- vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an timestamptz: %d", vr.Len())))
- return zeroTime
- }
-
- microsecSinceY2K := vr.ReadInt64()
- microsecSinceUnixEpoch := microsecFromUnixEpochToY2K + microsecSinceY2K
- return time.Unix(microsecSinceUnixEpoch/1000000, (microsecSinceUnixEpoch%1000000)*1000)
-}
-
-func decodeTimestamp(vr *ValueReader) time.Time {
- var zeroTime time.Time
-
- if vr.Len() == -1 {
- vr.Fatal(ProtocolError("Cannot decode null into timestamp"))
- return zeroTime
- }
-
- if vr.Type().DataType != TimestampOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into time.Time", vr.Type().DataType)))
- return zeroTime
- }
-
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return zeroTime
- }
-
- if vr.Len() != 8 {
- vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an timestamp: %d", vr.Len())))
- return zeroTime
- }
-
- microsecSinceY2K := vr.ReadInt64()
- microsecSinceUnixEpoch := microsecFromUnixEpochToY2K + microsecSinceY2K
- return time.Unix(microsecSinceUnixEpoch/1000000, (microsecSinceUnixEpoch%1000000)*1000)
-}
-
-func decodeInet(vr *ValueReader) net.IPNet {
- var zero net.IPNet
-
- if vr.Len() == -1 {
- vr.Fatal(ProtocolError("Cannot decode null into net.IPNet"))
- return zero
- }
-
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return zero
- }
-
- pgType := vr.Type()
- if pgType.DataType != InetOid && pgType.DataType != CidrOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into %s", pgType.DataType, pgType.Name)))
- return zero
- }
- if vr.Len() != 8 && vr.Len() != 20 {
- vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for a %s: %d", pgType.Name, vr.Len())))
- return zero
- }
-
- vr.ReadByte() // ignore family
- bits := vr.ReadByte()
- vr.ReadByte() // ignore is_cidr
- addressLength := vr.ReadByte()
-
- var ipnet net.IPNet
- ipnet.IP = vr.ReadBytes(int32(addressLength))
- ipnet.Mask = net.CIDRMask(int(bits), int(addressLength)*8)
-
- return ipnet
-}
-
-func encodeIPNet(w *WriteBuf, oid Oid, value net.IPNet) error {
- if oid != InetOid && oid != CidrOid {
- return fmt.Errorf("cannot encode %s into oid %v", "net.IPNet", oid)
- }
-
- var size int32
- var family byte
- switch len(value.IP) {
- case net.IPv4len:
- size = 8
- family = *w.conn.pgsqlAfInet
- case net.IPv6len:
- size = 20
- family = *w.conn.pgsqlAfInet6
- default:
- return fmt.Errorf("Unexpected IP length: %v", len(value.IP))
- }
-
- w.WriteInt32(size)
- w.WriteByte(family)
- ones, _ := value.Mask.Size()
- w.WriteByte(byte(ones))
- w.WriteByte(0) // is_cidr is ignored on server
- w.WriteByte(byte(len(value.IP)))
- w.WriteBytes(value.IP)
-
- return nil
-}
-
-func encodeIP(w *WriteBuf, oid Oid, value net.IP) error {
- if oid != InetOid && oid != CidrOid {
- return fmt.Errorf("cannot encode %s into oid %v", "net.IP", oid)
- }
-
- var ipnet net.IPNet
- ipnet.IP = value
- bitCount := len(value) * 8
- ipnet.Mask = net.CIDRMask(bitCount, bitCount)
- return encodeIPNet(w, oid, ipnet)
-}
-
-func decodeRecord(vr *ValueReader) []interface{} {
- if vr.Len() == -1 {
- return nil
- }
-
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return nil
- }
-
- if vr.Type().DataType != RecordOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into []interface{}", vr.Type().DataType)))
- return nil
- }
-
- valueCount := vr.ReadInt32()
- record := make([]interface{}, 0, int(valueCount))
-
- for i := int32(0); i < valueCount; i++ {
- fd := FieldDescription{FormatCode: BinaryFormatCode}
- fieldVR := ValueReader{mr: vr.mr, fd: &fd}
- fd.DataType = vr.ReadOid()
- fieldVR.valueBytesRemaining = vr.ReadInt32()
- vr.valueBytesRemaining -= fieldVR.valueBytesRemaining
-
- switch fd.DataType {
- case BoolOid:
- record = append(record, decodeBool(&fieldVR))
- case ByteaOid:
- record = append(record, decodeBytea(&fieldVR))
- case Int8Oid:
- record = append(record, decodeInt8(&fieldVR))
- case Int2Oid:
- record = append(record, decodeInt2(&fieldVR))
- case Int4Oid:
- record = append(record, decodeInt4(&fieldVR))
- case OidOid:
- record = append(record, decodeOid(&fieldVR))
- case Float4Oid:
- record = append(record, decodeFloat4(&fieldVR))
- case Float8Oid:
- record = append(record, decodeFloat8(&fieldVR))
- case DateOid:
- record = append(record, decodeDate(&fieldVR))
- case TimestampTzOid:
- record = append(record, decodeTimestampTz(&fieldVR))
- case TimestampOid:
- record = append(record, decodeTimestamp(&fieldVR))
- case InetOid, CidrOid:
- record = append(record, decodeInet(&fieldVR))
- case TextOid, VarcharOid, UnknownOid:
- record = append(record, decodeText(&fieldVR))
- default:
- vr.Fatal(fmt.Errorf("decodeRecord cannot decode oid %d", fd.DataType))
- return nil
- }
-
- // Consume any remaining data
- if fieldVR.Len() > 0 {
- fieldVR.ReadBytes(fieldVR.Len())
- }
-
- if fieldVR.Err() != nil {
- vr.Fatal(fieldVR.Err())
- return nil
- }
- }
-
- return record
-}
-
-func decode1dArrayHeader(vr *ValueReader) (length int32, err error) {
- numDims := vr.ReadInt32()
- if numDims > 1 {
- return 0, ProtocolError(fmt.Sprintf("Expected array to have 0 or 1 dimension, but it had %v", numDims))
- }
-
- vr.ReadInt32() // 0 if no nulls / 1 if there is one or more nulls -- but we don't care
- vr.ReadInt32() // element oid
-
- if numDims == 0 {
- return 0, nil
- }
-
- length = vr.ReadInt32()
-
- idxFirstElem := vr.ReadInt32()
- if idxFirstElem != 1 {
- return 0, ProtocolError(fmt.Sprintf("Expected array's first element to start a index 1, but it is %d", idxFirstElem))
- }
-
- return length, nil
-}
-
-func decodeBoolArray(vr *ValueReader) []bool {
- if vr.Len() == -1 {
- return nil
- }
-
- if vr.Type().DataType != BoolArrayOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into []bool", vr.Type().DataType)))
- return nil
- }
-
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return nil
- }
-
- numElems, err := decode1dArrayHeader(vr)
- if err != nil {
- vr.Fatal(err)
- return nil
- }
-
- a := make([]bool, int(numElems))
- for i := 0; i < len(a); i++ {
- elSize := vr.ReadInt32()
- switch elSize {
- case 1:
- if vr.ReadByte() == 1 {
- a[i] = true
- }
- case -1:
- vr.Fatal(ProtocolError("Cannot decode null element"))
- return nil
- default:
- vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an bool element: %d", elSize)))
- return nil
- }
- }
-
- return a
-}
-
-func encodeBoolSlice(w *WriteBuf, oid Oid, slice []bool) error {
- if oid != BoolArrayOid {
- return fmt.Errorf("cannot encode Go %s into oid %d", "[]bool", oid)
- }
-
- encodeArrayHeader(w, BoolOid, len(slice), 5)
- for _, v := range slice {
- w.WriteInt32(1)
- var b byte
- if v {
- b = 1
- }
- w.WriteByte(b)
- }
-
- return nil
-}
-
-func decodeByteaArray(vr *ValueReader) [][]byte {
- if vr.Len() == -1 {
- return nil
- }
-
- if vr.Type().DataType != ByteaArrayOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into [][]byte", vr.Type().DataType)))
- return nil
- }
-
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return nil
- }
-
- numElems, err := decode1dArrayHeader(vr)
- if err != nil {
- vr.Fatal(err)
- return nil
- }
-
- a := make([][]byte, int(numElems))
- for i := 0; i < len(a); i++ {
- elSize := vr.ReadInt32()
- switch elSize {
- case -1:
- vr.Fatal(ProtocolError("Cannot decode null element"))
- return nil
- default:
- a[i] = vr.ReadBytes(elSize)
- }
- }
-
- return a
-}
-
-func encodeByteSliceSlice(w *WriteBuf, oid Oid, value [][]byte) error {
- if oid != ByteaArrayOid {
- return fmt.Errorf("cannot encode Go %s into oid %d", "[][]byte", oid)
- }
-
- size := 20 // array header size
- for _, el := range value {
- size += 4 + len(el)
- }
-
- w.WriteInt32(int32(size))
-
- w.WriteInt32(1) // number of dimensions
- w.WriteInt32(0) // no nulls
- w.WriteInt32(int32(ByteaOid)) // type of elements
- w.WriteInt32(int32(len(value))) // number of elements
- w.WriteInt32(1) // index of first element
-
- for _, el := range value {
- encodeByteSlice(w, ByteaOid, el)
- }
-
- return nil
-}
-
-func decodeInt2Array(vr *ValueReader) []int16 {
- if vr.Len() == -1 {
- return nil
- }
-
- if vr.Type().DataType != Int2ArrayOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into []int16", vr.Type().DataType)))
- return nil
- }
-
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return nil
- }
-
- numElems, err := decode1dArrayHeader(vr)
- if err != nil {
- vr.Fatal(err)
- return nil
- }
-
- a := make([]int16, int(numElems))
- for i := 0; i < len(a); i++ {
- elSize := vr.ReadInt32()
- switch elSize {
- case 2:
- a[i] = vr.ReadInt16()
- case -1:
- vr.Fatal(ProtocolError("Cannot decode null element"))
- return nil
- default:
- vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an int2 element: %d", elSize)))
- return nil
- }
- }
-
- return a
-}
-
-func decodeInt2ArrayToUInt(vr *ValueReader) []uint16 {
- if vr.Len() == -1 {
- return nil
- }
-
- if vr.Type().DataType != Int2ArrayOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into []uint16", vr.Type().DataType)))
- return nil
- }
-
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return nil
- }
-
- numElems, err := decode1dArrayHeader(vr)
- if err != nil {
- vr.Fatal(err)
- return nil
- }
-
- a := make([]uint16, int(numElems))
- for i := 0; i < len(a); i++ {
- elSize := vr.ReadInt32()
- switch elSize {
- case 2:
- tmp := vr.ReadInt16()
- if tmp < 0 {
- vr.Fatal(ProtocolError(fmt.Sprintf("%d is less than zero for uint16", tmp)))
- return nil
- }
- a[i] = uint16(tmp)
- case -1:
- vr.Fatal(ProtocolError("Cannot decode null element"))
- return nil
- default:
- vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an int2 element: %d", elSize)))
- return nil
- }
- }
-
- return a
-}
-
-func encodeInt16Slice(w *WriteBuf, oid Oid, slice []int16) error {
- if oid != Int2ArrayOid {
- return fmt.Errorf("cannot encode Go %s into oid %d", "[]int16", oid)
- }
-
- encodeArrayHeader(w, Int2Oid, len(slice), 6)
- for _, v := range slice {
- w.WriteInt32(2)
- w.WriteInt16(v)
- }
-
- return nil
-}
-
-func encodeUInt16Slice(w *WriteBuf, oid Oid, slice []uint16) error {
- if oid != Int2ArrayOid {
- return fmt.Errorf("cannot encode Go %s into oid %d", "[]uint16", oid)
- }
-
- encodeArrayHeader(w, Int2Oid, len(slice), 6)
- for _, v := range slice {
- if v <= math.MaxInt16 {
- w.WriteInt32(2)
- w.WriteInt16(int16(v))
- } else {
- return fmt.Errorf("%d is greater than max smallint %d", v, math.MaxInt16)
- }
- }
-
- return nil
-}
-
-func decodeInt4Array(vr *ValueReader) []int32 {
- if vr.Len() == -1 {
- return nil
- }
-
- if vr.Type().DataType != Int4ArrayOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into []int32", vr.Type().DataType)))
- return nil
- }
-
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return nil
- }
-
- numElems, err := decode1dArrayHeader(vr)
- if err != nil {
- vr.Fatal(err)
- return nil
- }
-
- a := make([]int32, int(numElems))
- for i := 0; i < len(a); i++ {
- elSize := vr.ReadInt32()
- switch elSize {
- case 4:
- a[i] = vr.ReadInt32()
- case -1:
- vr.Fatal(ProtocolError("Cannot decode null element"))
- return nil
- default:
- vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an int4 element: %d", elSize)))
- return nil
- }
- }
-
- return a
-}
-
-func decodeInt4ArrayToUInt(vr *ValueReader) []uint32 {
- if vr.Len() == -1 {
- return nil
- }
-
- if vr.Type().DataType != Int4ArrayOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into []uint32", vr.Type().DataType)))
- return nil
- }
-
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return nil
- }
-
- numElems, err := decode1dArrayHeader(vr)
- if err != nil {
- vr.Fatal(err)
- return nil
- }
-
- a := make([]uint32, int(numElems))
- for i := 0; i < len(a); i++ {
- elSize := vr.ReadInt32()
- switch elSize {
- case 4:
- tmp := vr.ReadInt32()
- if tmp < 0 {
- vr.Fatal(ProtocolError(fmt.Sprintf("%d is less than zero for uint32", tmp)))
- return nil
- }
- a[i] = uint32(tmp)
- case -1:
- vr.Fatal(ProtocolError("Cannot decode null element"))
- return nil
- default:
- vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an int4 element: %d", elSize)))
- return nil
- }
- }
-
- return a
-}
-
-func encodeInt32Slice(w *WriteBuf, oid Oid, slice []int32) error {
- if oid != Int4ArrayOid {
- return fmt.Errorf("cannot encode Go %s into oid %d", "[]int32", oid)
- }
-
- encodeArrayHeader(w, Int4Oid, len(slice), 8)
- for _, v := range slice {
- w.WriteInt32(4)
- w.WriteInt32(v)
- }
-
- return nil
-}
-
-func encodeUInt32Slice(w *WriteBuf, oid Oid, slice []uint32) error {
- if oid != Int4ArrayOid {
- return fmt.Errorf("cannot encode Go %s into oid %d", "[]uint32", oid)
- }
-
- encodeArrayHeader(w, Int4Oid, len(slice), 8)
- for _, v := range slice {
- if v <= math.MaxInt32 {
- w.WriteInt32(4)
- w.WriteInt32(int32(v))
- } else {
- return fmt.Errorf("%d is greater than max integer %d", v, math.MaxInt32)
- }
- }
-
- return nil
-}
-
-func decodeInt8Array(vr *ValueReader) []int64 {
- if vr.Len() == -1 {
- return nil
- }
-
- if vr.Type().DataType != Int8ArrayOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into []int64", vr.Type().DataType)))
- return nil
- }
-
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return nil
- }
-
- numElems, err := decode1dArrayHeader(vr)
- if err != nil {
- vr.Fatal(err)
- return nil
- }
-
- a := make([]int64, int(numElems))
- for i := 0; i < len(a); i++ {
- elSize := vr.ReadInt32()
- switch elSize {
- case 8:
- a[i] = vr.ReadInt64()
- case -1:
- vr.Fatal(ProtocolError("Cannot decode null element"))
- return nil
- default:
- vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an int8 element: %d", elSize)))
- return nil
- }
- }
-
- return a
-}
-
-func decodeInt8ArrayToUInt(vr *ValueReader) []uint64 {
- if vr.Len() == -1 {
- return nil
- }
-
- if vr.Type().DataType != Int8ArrayOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into []uint64", vr.Type().DataType)))
- return nil
- }
-
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return nil
- }
-
- numElems, err := decode1dArrayHeader(vr)
- if err != nil {
- vr.Fatal(err)
- return nil
- }
-
- a := make([]uint64, int(numElems))
- for i := 0; i < len(a); i++ {
- elSize := vr.ReadInt32()
- switch elSize {
- case 8:
- tmp := vr.ReadInt64()
- if tmp < 0 {
- vr.Fatal(ProtocolError(fmt.Sprintf("%d is less than zero for uint64", tmp)))
- return nil
- }
- a[i] = uint64(tmp)
- case -1:
- vr.Fatal(ProtocolError("Cannot decode null element"))
- return nil
- default:
- vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an int8 element: %d", elSize)))
- return nil
- }
- }
-
- return a
-}
-
-func encodeInt64Slice(w *WriteBuf, oid Oid, slice []int64) error {
- if oid != Int8ArrayOid {
- return fmt.Errorf("cannot encode Go %s into oid %d", "[]int64", oid)
- }
-
- encodeArrayHeader(w, Int8Oid, len(slice), 12)
- for _, v := range slice {
- w.WriteInt32(8)
- w.WriteInt64(v)
- }
-
- return nil
-}
-
-func encodeUInt64Slice(w *WriteBuf, oid Oid, slice []uint64) error {
- if oid != Int8ArrayOid {
- return fmt.Errorf("cannot encode Go %s into oid %d", "[]uint64", oid)
- }
-
- encodeArrayHeader(w, Int8Oid, len(slice), 12)
- for _, v := range slice {
- if v <= math.MaxInt64 {
- w.WriteInt32(8)
- w.WriteInt64(int64(v))
- } else {
- return fmt.Errorf("%d is greater than max bigint %d", v, int64(math.MaxInt64))
- }
- }
-
- return nil
-}
-
-func decodeFloat4Array(vr *ValueReader) []float32 {
- if vr.Len() == -1 {
- return nil
- }
-
- if vr.Type().DataType != Float4ArrayOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into []float32", vr.Type().DataType)))
- return nil
- }
-
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return nil
- }
-
- numElems, err := decode1dArrayHeader(vr)
- if err != nil {
- vr.Fatal(err)
- return nil
- }
-
- a := make([]float32, int(numElems))
- for i := 0; i < len(a); i++ {
- elSize := vr.ReadInt32()
- switch elSize {
- case 4:
- n := vr.ReadInt32()
- a[i] = math.Float32frombits(uint32(n))
- case -1:
- vr.Fatal(ProtocolError("Cannot decode null element"))
- return nil
- default:
- vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an float4 element: %d", elSize)))
- return nil
+ return nil, err
}
- }
-
- return a
-}
-
-func encodeFloat32Slice(w *WriteBuf, oid Oid, slice []float32) error {
- if oid != Float4ArrayOid {
- return fmt.Errorf("cannot encode Go %s into oid %d", "[]float32", oid)
- }
-
- encodeArrayHeader(w, Float4Oid, len(slice), 8)
- for _, v := range slice {
- w.WriteInt32(4)
- w.WriteInt32(int32(math.Float32bits(v)))
- }
-
- return nil
-}
-
-func decodeFloat8Array(vr *ValueReader) []float64 {
- if vr.Len() == -1 {
- return nil
- }
-
- if vr.Type().DataType != Float8ArrayOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into []float64", vr.Type().DataType)))
- return nil
- }
-
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return nil
- }
-
- numElems, err := decode1dArrayHeader(vr)
- if err != nil {
- vr.Fatal(err)
- return nil
- }
-
- a := make([]float64, int(numElems))
- for i := 0; i < len(a); i++ {
- elSize := vr.ReadInt32()
- switch elSize {
- case 8:
- n := vr.ReadInt64()
- a[i] = math.Float64frombits(uint64(n))
- case -1:
- vr.Fatal(ProtocolError("Cannot decode null element"))
- return nil
- default:
- vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an float4 element: %d", elSize)))
- return nil
+ if argBuf != nil {
+ buf = argBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
}
+ return buf, nil
+ case string:
+ buf = pgio.AppendInt32(buf, int32(len(arg)))
+ buf = append(buf, arg...)
+ return buf, nil
}
- return a
-}
-
-func encodeFloat64Slice(w *WriteBuf, oid Oid, slice []float64) error {
- if oid != Float8ArrayOid {
- return fmt.Errorf("cannot encode Go %s into oid %d", "[]float64", oid)
- }
-
- encodeArrayHeader(w, Float8Oid, len(slice), 12)
- for _, v := range slice {
- w.WriteInt32(8)
- w.WriteInt64(int64(math.Float64bits(v)))
- }
-
- return nil
-}
-
-func decodeTextArray(vr *ValueReader) []string {
- if vr.Len() == -1 {
- return nil
- }
-
- if vr.Type().DataType != TextArrayOid && vr.Type().DataType != VarcharArrayOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into []string", vr.Type().DataType)))
- return nil
- }
-
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return nil
- }
-
- numElems, err := decode1dArrayHeader(vr)
- if err != nil {
- vr.Fatal(err)
- return nil
- }
+ refVal := reflect.ValueOf(arg)
- a := make([]string, int(numElems))
- for i := 0; i < len(a); i++ {
- elSize := vr.ReadInt32()
- if elSize == -1 {
- vr.Fatal(ProtocolError("Cannot decode null element"))
- return nil
+ if refVal.Kind() == reflect.Ptr {
+ if refVal.IsNil() {
+ return pgio.AppendInt32(buf, -1), nil
}
-
- a[i] = vr.ReadString(elSize)
+ arg = refVal.Elem().Interface()
+ return encodePreparedStatementArgument(ci, buf, oid, arg)
}
- return a
-}
-
-// escapeAclItem escapes an AclItem before it is added to
-// its aclitem[] string representation. The PostgreSQL aclitem
-// datatype itself can need escapes because it follows the
-// formatting rules of SQL identifiers. Think of this function
-// as escaping the escapes, so that PostgreSQL's array parser
-// will do the right thing.
-func escapeAclItem(acl string) (string, error) {
- var escapedAclItem bytes.Buffer
- reader := strings.NewReader(acl)
- for {
- rn, _, err := reader.ReadRune()
+ if dt, ok := ci.DataTypeForOID(oid); ok {
+ value := dt.Value
+ err := value.Set(arg)
if err != nil {
- if err == io.EOF {
- // Here, EOF is an expected end state, not an error.
- return escapedAclItem.String(), nil
+ {
+ if arg, ok := arg.(driver.Valuer); ok {
+ v, err := callValuerValue(arg)
+ if err != nil {
+ return nil, err
+ }
+ return encodePreparedStatementArgument(ci, buf, oid, v)
+ }
}
- // This error was not expected
- return "", err
- }
- if needsEscape(rn) {
- escapedAclItem.WriteRune('\\')
- }
- escapedAclItem.WriteRune(rn)
- }
-}
-
-// needsEscape determines whether or not a rune needs escaping
-// before being placed in the textual representation of an
-// aclitem[] array.
-func needsEscape(rn rune) bool {
- return rn == '\\' || rn == ',' || rn == '"' || rn == '}'
-}
-
-// encodeAclItemSlice encodes a slice of AclItems in
-// their textual represention for PostgreSQL.
-func encodeAclItemSlice(w *WriteBuf, oid Oid, aclitems []AclItem) error {
- strs := make([]string, len(aclitems))
- var escapedAclItem string
- var err error
- for i := range strs {
- escapedAclItem, err = escapeAclItem(string(aclitems[i]))
- if err != nil {
- return err
- }
- strs[i] = string(escapedAclItem)
- }
-
- var buf bytes.Buffer
- buf.WriteRune('{')
- buf.WriteString(strings.Join(strs, ","))
- buf.WriteRune('}')
- str := buf.String()
- w.WriteInt32(int32(len(str)))
- w.WriteBytes([]byte(str))
- return nil
-}
-// parseAclItemArray parses the textual representation
-// of the aclitem[] type. The textual representation is chosen because
-// Pg's src/backend/utils/adt/acl.c has only in/out (text) not send/recv (bin).
-// See https://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO
-// for formatting notes.
-func parseAclItemArray(arr string) ([]AclItem, error) {
- reader := strings.NewReader(arr)
- // Difficult to guess a performant initial capacity for a slice of
- // aclitems, but let's go with 5.
- aclItems := make([]AclItem, 0, 5)
- // A single value
- aclItem := AclItem("")
- for {
- // Grab the first/next/last rune to see if we are dealing with a
- // quoted value, an unquoted value, or the end of the string.
- rn, _, err := reader.ReadRune()
- if err != nil {
- if err == io.EOF {
- // Here, EOF is an expected end state, not an error.
- return aclItems, nil
- }
- // This error was not expected
return nil, err
}
- if rn == '"' {
- // Discard the opening quote of the quoted value.
- aclItem, err = parseQuotedAclItem(reader)
- } else {
- // We have just read the first rune of an unquoted (bare) value;
- // put it back so that ParseBareValue can read it.
- err := reader.UnreadRune()
- if err != nil {
- return nil, err
- }
- aclItem, err = parseBareAclItem(reader)
- }
-
+ sp := len(buf)
+ buf = pgio.AppendInt32(buf, -1)
+ argBuf, err := value.(pgtype.BinaryEncoder).EncodeBinary(ci, buf)
if err != nil {
- if err == io.EOF {
- // Here, EOF is an expected end state, not an error..
- aclItems = append(aclItems, aclItem)
- return aclItems, nil
- }
- // This error was not expected.
return nil, err
}
- aclItems = append(aclItems, aclItem)
- }
-}
-
-// parseBareAclItem parses a bare (unquoted) aclitem from reader
-func parseBareAclItem(reader *strings.Reader) (AclItem, error) {
- var aclItem bytes.Buffer
- for {
- rn, _, err := reader.ReadRune()
- if err != nil {
- // Return the read value in case the error is a harmless io.EOF.
- // (io.EOF marks the end of a bare aclitem at the end of a string)
- return AclItem(aclItem.String()), err
- }
- if rn == ',' {
- // A comma marks the end of a bare aclitem.
- return AclItem(aclItem.String()), nil
- } else {
- aclItem.WriteRune(rn)
- }
- }
-}
-
-// parseQuotedAclItem parses an aclitem which is in double quotes from reader
-func parseQuotedAclItem(reader *strings.Reader) (AclItem, error) {
- var aclItem bytes.Buffer
- for {
- rn, escaped, err := readPossiblyEscapedRune(reader)
- if err != nil {
- if err == io.EOF {
- // Even when it is the last value, the final rune of
- // a quoted aclitem should be the final closing quote, not io.EOF.
- return AclItem(""), fmt.Errorf("unexpected end of quoted value")
- }
- // Return the read aclitem in case the error is a harmless io.EOF,
- // which will be determined by the caller.
- return AclItem(aclItem.String()), err
- }
- if !escaped && rn == '"' {
- // An unescaped double quote marks the end of a quoted value.
- // The next rune should either be a comma or the end of the string.
- rn, _, err := reader.ReadRune()
- if err != nil {
- // Return the read value in case the error is a harmless io.EOF,
- // which will be determined by the caller.
- return AclItem(aclItem.String()), err
- }
- if rn != ',' {
- return AclItem(""), fmt.Errorf("unexpected rune after quoted value")
- }
- return AclItem(aclItem.String()), nil
+ if argBuf != nil {
+ buf = argBuf
+ pgio.SetInt32(buf[sp:], int32(len(buf[sp:])-4))
}
- aclItem.WriteRune(rn)
+ return buf, nil
}
-}
-// Returns the next rune from r, unless it is a backslash;
-// in that case, it returns the rune after the backslash. The second
-// return value tells us whether or not the rune was
-// preceeded by a backslash (escaped).
-func readPossiblyEscapedRune(reader *strings.Reader) (rune, bool, error) {
- rn, _, err := reader.ReadRune()
- if err != nil {
- return 0, false, err
- }
- if rn == '\\' {
- // Discard the backslash and read the next rune.
- rn, _, err = reader.ReadRune()
+ if arg, ok := arg.(driver.Valuer); ok {
+ v, err := callValuerValue(arg)
if err != nil {
- return 0, false, err
- }
- return rn, true, nil
- }
- return rn, false, nil
-}
-
-func decodeAclItemArray(vr *ValueReader) []AclItem {
- if vr.Len() == -1 {
- vr.Fatal(ProtocolError("Cannot decode null into []AclItem"))
- return nil
- }
-
- str := vr.ReadString(vr.Len())
-
- // Short-circuit empty array.
- if str == "{}" {
- return []AclItem{}
- }
-
- // Remove the '{' at the front and the '}' at the end,
- // so that parseAclItemArray doesn't have to deal with them.
- str = str[1 : len(str)-1]
- aclItems, err := parseAclItemArray(str)
- if err != nil {
- vr.Fatal(ProtocolError(err.Error()))
- return nil
- }
- return aclItems
-}
-
-func encodeStringSlice(w *WriteBuf, oid Oid, slice []string) error {
- var elOid Oid
- switch oid {
- case VarcharArrayOid:
- elOid = VarcharOid
- case TextArrayOid:
- elOid = TextOid
- default:
- return fmt.Errorf("cannot encode Go %s into oid %d", "[]string", oid)
- }
-
- var totalStringSize int
- for _, v := range slice {
- totalStringSize += len(v)
- }
-
- size := 20 + len(slice)*4 + totalStringSize
- w.WriteInt32(int32(size))
-
- w.WriteInt32(1) // number of dimensions
- w.WriteInt32(0) // no nulls
- w.WriteInt32(int32(elOid)) // type of elements
- w.WriteInt32(int32(len(slice))) // number of elements
- w.WriteInt32(1) // index of first element
-
- for _, v := range slice {
- w.WriteInt32(int32(len(v)))
- w.WriteBytes([]byte(v))
- }
-
- return nil
-}
-
-func decodeTimestampArray(vr *ValueReader) []time.Time {
- if vr.Len() == -1 {
- return nil
- }
-
- if vr.Type().DataType != TimestampArrayOid && vr.Type().DataType != TimestampTzArrayOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into []time.Time", vr.Type().DataType)))
- return nil
- }
-
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return nil
- }
-
- numElems, err := decode1dArrayHeader(vr)
- if err != nil {
- vr.Fatal(err)
- return nil
- }
-
- a := make([]time.Time, int(numElems))
- for i := 0; i < len(a); i++ {
- elSize := vr.ReadInt32()
- switch elSize {
- case 8:
- microsecSinceY2K := vr.ReadInt64()
- microsecSinceUnixEpoch := microsecFromUnixEpochToY2K + microsecSinceY2K
- a[i] = time.Unix(microsecSinceUnixEpoch/1000000, (microsecSinceUnixEpoch%1000000)*1000)
- case -1:
- vr.Fatal(ProtocolError("Cannot decode null element"))
- return nil
- default:
- vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an time.Time element: %d", elSize)))
- return nil
+ return nil, err
}
+ return encodePreparedStatementArgument(ci, buf, oid, v)
}
- return a
-}
-
-func encodeTimeSlice(w *WriteBuf, oid Oid, slice []time.Time) error {
- var elOid Oid
- switch oid {
- case TimestampArrayOid:
- elOid = TimestampOid
- case TimestampTzArrayOid:
- elOid = TimestampTzOid
- default:
- return fmt.Errorf("cannot encode Go %s into oid %d", "[]time.Time", oid)
- }
-
- encodeArrayHeader(w, int(elOid), len(slice), 12)
- for _, t := range slice {
- w.WriteInt32(8)
- microsecSinceUnixEpoch := t.Unix()*1000000 + int64(t.Nanosecond())/1000
- microsecSinceY2K := microsecSinceUnixEpoch - microsecFromUnixEpochToY2K
- w.WriteInt64(microsecSinceY2K)
+ if strippedArg, ok := stripNamedType(&refVal); ok {
+ return encodePreparedStatementArgument(ci, buf, oid, strippedArg)
}
-
- return nil
+ return nil, SerializationError(fmt.Sprintf("Cannot encode %T into oid %v - %T must implement Encoder or be converted to a string", arg, oid, arg))
}
-func decodeInetArray(vr *ValueReader) []net.IPNet {
- if vr.Len() == -1 {
- return nil
- }
-
- if vr.Type().DataType != InetArrayOid && vr.Type().DataType != CidrArrayOid {
- vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into []net.IP", vr.Type().DataType)))
- return nil
+// chooseParameterFormatCode determines the correct format code for an
+// argument to a prepared statement. It defaults to TextFormatCode if no
+// determination can be made.
+func chooseParameterFormatCode(ci *pgtype.ConnInfo, oid pgtype.OID, arg interface{}) int16 {
+ switch arg.(type) {
+ case pgtype.BinaryEncoder:
+ return BinaryFormatCode
+ case string, *string, pgtype.TextEncoder:
+ return TextFormatCode
}
- if vr.Type().FormatCode != BinaryFormatCode {
- vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode)))
- return nil
- }
-
- numElems, err := decode1dArrayHeader(vr)
- if err != nil {
- vr.Fatal(err)
- return nil
- }
+ if dt, ok := ci.DataTypeForOID(oid); ok {
+ if _, ok := dt.Value.(pgtype.BinaryEncoder); ok {
+ if arg, ok := arg.(driver.Valuer); ok {
+ if err := dt.Value.Set(arg); err != nil {
+ if value, err := callValuerValue(arg); err == nil {
+ if _, ok := value.(string); ok {
+ return TextFormatCode
+ }
+ }
+ }
+ }
- a := make([]net.IPNet, int(numElems))
- for i := 0; i < len(a); i++ {
- elSize := vr.ReadInt32()
- if elSize == -1 {
- vr.Fatal(ProtocolError("Cannot decode null element"))
- return nil
+ return BinaryFormatCode
}
-
- vr.ReadByte() // ignore family
- bits := vr.ReadByte()
- vr.ReadByte() // ignore is_cidr
- addressLength := vr.ReadByte()
-
- var ipnet net.IPNet
- ipnet.IP = vr.ReadBytes(int32(addressLength))
- ipnet.Mask = net.CIDRMask(int(bits), int(addressLength)*8)
-
- a[i] = ipnet
- }
-
- return a
-}
-
-func encodeIPNetSlice(w *WriteBuf, oid Oid, slice []net.IPNet) error {
- var elOid Oid
- switch oid {
- case InetArrayOid:
- elOid = InetOid
- case CidrArrayOid:
- elOid = CidrOid
- default:
- return fmt.Errorf("cannot encode Go %s into oid %d", "[]net.IPNet", oid)
- }
-
- size := int32(20) // array header size
- for _, ipnet := range slice {
- size += 4 + 4 + int32(len(ipnet.IP)) // size of element + inet/cidr metadata + IP bytes
- }
- w.WriteInt32(int32(size))
-
- w.WriteInt32(1) // number of dimensions
- w.WriteInt32(0) // no nulls
- w.WriteInt32(int32(elOid)) // type of elements
- w.WriteInt32(int32(len(slice))) // number of elements
- w.WriteInt32(1) // index of first element
-
- for _, ipnet := range slice {
- encodeIPNet(w, elOid, ipnet)
}
- return nil
+ return TextFormatCode
}
-func encodeIPSlice(w *WriteBuf, oid Oid, slice []net.IP) error {
- var elOid Oid
- switch oid {
- case InetArrayOid:
- elOid = InetOid
- case CidrArrayOid:
- elOid = CidrOid
- default:
- return fmt.Errorf("cannot encode Go %s into oid %d", "[]net.IPNet", oid)
- }
-
- size := int32(20) // array header size
- for _, ip := range slice {
- size += 4 + 4 + int32(len(ip)) // size of element + inet/cidr metadata + IP bytes
- }
- w.WriteInt32(int32(size))
-
- w.WriteInt32(1) // number of dimensions
- w.WriteInt32(0) // no nulls
- w.WriteInt32(int32(elOid)) // type of elements
- w.WriteInt32(int32(len(slice))) // number of elements
- w.WriteInt32(1) // index of first element
-
- for _, ip := range slice {
- encodeIP(w, elOid, ip)
+func stripNamedType(val *reflect.Value) (interface{}, bool) {
+ switch val.Kind() {
+ case reflect.Int:
+ convVal := int(val.Int())
+ return convVal, reflect.TypeOf(convVal) != val.Type()
+ case reflect.Int8:
+ convVal := int8(val.Int())
+ return convVal, reflect.TypeOf(convVal) != val.Type()
+ case reflect.Int16:
+ convVal := int16(val.Int())
+ return convVal, reflect.TypeOf(convVal) != val.Type()
+ case reflect.Int32:
+ convVal := int32(val.Int())
+ return convVal, reflect.TypeOf(convVal) != val.Type()
+ case reflect.Int64:
+ convVal := int64(val.Int())
+ return convVal, reflect.TypeOf(convVal) != val.Type()
+ case reflect.Uint:
+ convVal := uint(val.Uint())
+ return convVal, reflect.TypeOf(convVal) != val.Type()
+ case reflect.Uint8:
+ convVal := uint8(val.Uint())
+ return convVal, reflect.TypeOf(convVal) != val.Type()
+ case reflect.Uint16:
+ convVal := uint16(val.Uint())
+ return convVal, reflect.TypeOf(convVal) != val.Type()
+ case reflect.Uint32:
+ convVal := uint32(val.Uint())
+ return convVal, reflect.TypeOf(convVal) != val.Type()
+ case reflect.Uint64:
+ convVal := uint64(val.Uint())
+ return convVal, reflect.TypeOf(convVal) != val.Type()
+ case reflect.String:
+ convVal := val.String()
+ return convVal, reflect.TypeOf(convVal) != val.Type()
}
- return nil
-}
-
-func encodeArrayHeader(w *WriteBuf, oid, length, sizePerItem int) {
- w.WriteInt32(int32(20 + length*sizePerItem))
- w.WriteInt32(1) // number of dimensions
- w.WriteInt32(0) // no nulls
- w.WriteInt32(int32(oid)) // type of elements
- w.WriteInt32(int32(length)) // number of elements
- w.WriteInt32(1) // index of first element
+ return nil, false
}
diff --git a/vendor/github.com/jackc/pgx/values_test.go b/vendor/github.com/jackc/pgx/values_test.go
deleted file mode 100644
index 42d5bd3..0000000
--- a/vendor/github.com/jackc/pgx/values_test.go
+++ /dev/null
@@ -1,1183 +0,0 @@
-package pgx_test
-
-import (
- "bytes"
- "net"
- "reflect"
- "strings"
- "testing"
- "time"
-
- "github.com/jackc/pgx"
-)
-
-func TestDateTranscode(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- dates := []time.Time{
- time.Date(1, 1, 1, 0, 0, 0, 0, time.Local),
- time.Date(1000, 1, 1, 0, 0, 0, 0, time.Local),
- time.Date(1600, 1, 1, 0, 0, 0, 0, time.Local),
- time.Date(1700, 1, 1, 0, 0, 0, 0, time.Local),
- time.Date(1800, 1, 1, 0, 0, 0, 0, time.Local),
- time.Date(1900, 1, 1, 0, 0, 0, 0, time.Local),
- time.Date(1990, 1, 1, 0, 0, 0, 0, time.Local),
- time.Date(1999, 12, 31, 0, 0, 0, 0, time.Local),
- time.Date(2000, 1, 1, 0, 0, 0, 0, time.Local),
- time.Date(2001, 1, 2, 0, 0, 0, 0, time.Local),
- time.Date(2004, 2, 29, 0, 0, 0, 0, time.Local),
- time.Date(2013, 7, 4, 0, 0, 0, 0, time.Local),
- time.Date(2013, 12, 25, 0, 0, 0, 0, time.Local),
- time.Date(2029, 1, 1, 0, 0, 0, 0, time.Local),
- time.Date(2081, 1, 1, 0, 0, 0, 0, time.Local),
- time.Date(2096, 2, 29, 0, 0, 0, 0, time.Local),
- time.Date(2550, 1, 1, 0, 0, 0, 0, time.Local),
- time.Date(9999, 12, 31, 0, 0, 0, 0, time.Local),
- }
-
- for _, actualDate := range dates {
- var d time.Time
-
- err := conn.QueryRow("select $1::date", actualDate).Scan(&d)
- if err != nil {
- t.Fatalf("Unexpected failure on QueryRow Scan: %v", err)
- }
- if !actualDate.Equal(d) {
- t.Errorf("Did not transcode date successfully: %v is not %v", d, actualDate)
- }
- }
-}
-
-func TestTimestampTzTranscode(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- inputTime := time.Date(2013, 1, 2, 3, 4, 5, 6000, time.Local)
-
- var outputTime time.Time
-
- err := conn.QueryRow("select $1::timestamptz", inputTime).Scan(&outputTime)
- if err != nil {
- t.Fatalf("QueryRow Scan failed: %v", err)
- }
- if !inputTime.Equal(outputTime) {
- t.Errorf("Did not transcode time successfully: %v is not %v", outputTime, inputTime)
- }
-
- err = conn.QueryRow("select $1::timestamptz", inputTime).Scan(&outputTime)
- if err != nil {
- t.Fatalf("QueryRow Scan failed: %v", err)
- }
- if !inputTime.Equal(outputTime) {
- t.Errorf("Did not transcode time successfully: %v is not %v", outputTime, inputTime)
- }
-}
-
-func TestJsonAndJsonbTranscode(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- for _, oid := range []pgx.Oid{pgx.JsonOid, pgx.JsonbOid} {
- if _, ok := conn.PgTypes[oid]; !ok {
- return // No JSON/JSONB type -- must be running against old PostgreSQL
- }
-
- for _, format := range []int16{pgx.TextFormatCode, pgx.BinaryFormatCode} {
- pgtype := conn.PgTypes[oid]
- pgtype.DefaultFormat = format
- conn.PgTypes[oid] = pgtype
-
- typename := conn.PgTypes[oid].Name
-
- testJsonString(t, conn, typename, format)
- testJsonStringPointer(t, conn, typename, format)
- testJsonSingleLevelStringMap(t, conn, typename, format)
- testJsonNestedMap(t, conn, typename, format)
- testJsonStringArray(t, conn, typename, format)
- testJsonInt64Array(t, conn, typename, format)
- testJsonInt16ArrayFailureDueToOverflow(t, conn, typename, format)
- testJsonStruct(t, conn, typename, format)
- }
- }
-}
-
-func testJsonString(t *testing.T, conn *pgx.Conn, typename string, format int16) {
- input := `{"key": "value"}`
- expectedOutput := map[string]string{"key": "value"}
- var output map[string]string
- err := conn.QueryRow("select $1::"+typename, input).Scan(&output)
- if err != nil {
- t.Errorf("%s %d: QueryRow Scan failed: %v", typename, format, err)
- return
- }
-
- if !reflect.DeepEqual(expectedOutput, output) {
- t.Errorf("%s %d: Did not transcode map[string]string successfully: %v is not %v", typename, format, expectedOutput, output)
- return
- }
-}
-
-func testJsonStringPointer(t *testing.T, conn *pgx.Conn, typename string, format int16) {
- input := `{"key": "value"}`
- expectedOutput := map[string]string{"key": "value"}
- var output map[string]string
- err := conn.QueryRow("select $1::"+typename, &input).Scan(&output)
- if err != nil {
- t.Errorf("%s %d: QueryRow Scan failed: %v", typename, format, err)
- return
- }
-
- if !reflect.DeepEqual(expectedOutput, output) {
- t.Errorf("%s %d: Did not transcode map[string]string successfully: %v is not %v", typename, format, expectedOutput, output)
- return
- }
-}
-
-func testJsonSingleLevelStringMap(t *testing.T, conn *pgx.Conn, typename string, format int16) {
- input := map[string]string{"key": "value"}
- var output map[string]string
- err := conn.QueryRow("select $1::"+typename, input).Scan(&output)
- if err != nil {
- t.Errorf("%s %d: QueryRow Scan failed: %v", typename, format, err)
- return
- }
-
- if !reflect.DeepEqual(input, output) {
- t.Errorf("%s %d: Did not transcode map[string]string successfully: %v is not %v", typename, format, input, output)
- return
- }
-}
-
-func testJsonNestedMap(t *testing.T, conn *pgx.Conn, typename string, format int16) {
- input := map[string]interface{}{
- "name": "Uncanny",
- "stats": map[string]interface{}{"hp": float64(107), "maxhp": float64(150)},
- "inventory": []interface{}{"phone", "key"},
- }
- var output map[string]interface{}
- err := conn.QueryRow("select $1::"+typename, input).Scan(&output)
- if err != nil {
- t.Errorf("%s %d: QueryRow Scan failed: %v", typename, format, err)
- return
- }
-
- if !reflect.DeepEqual(input, output) {
- t.Errorf("%s %d: Did not transcode map[string]interface{} successfully: %v is not %v", typename, format, input, output)
- return
- }
-}
-
-func testJsonStringArray(t *testing.T, conn *pgx.Conn, typename string, format int16) {
- input := []string{"foo", "bar", "baz"}
- var output []string
- err := conn.QueryRow("select $1::"+typename, input).Scan(&output)
- if err != nil {
- t.Errorf("%s %d: QueryRow Scan failed: %v", typename, format, err)
- }
-
- if !reflect.DeepEqual(input, output) {
- t.Errorf("%s %d: Did not transcode []string successfully: %v is not %v", typename, format, input, output)
- }
-}
-
-func testJsonInt64Array(t *testing.T, conn *pgx.Conn, typename string, format int16) {
- input := []int64{1, 2, 234432}
- var output []int64
- err := conn.QueryRow("select $1::"+typename, input).Scan(&output)
- if err != nil {
- t.Errorf("%s %d: QueryRow Scan failed: %v", typename, format, err)
- }
-
- if !reflect.DeepEqual(input, output) {
- t.Errorf("%s %d: Did not transcode []int64 successfully: %v is not %v", typename, format, input, output)
- }
-}
-
-func testJsonInt16ArrayFailureDueToOverflow(t *testing.T, conn *pgx.Conn, typename string, format int16) {
- input := []int{1, 2, 234432}
- var output []int16
- err := conn.QueryRow("select $1::"+typename, input).Scan(&output)
- if err == nil || err.Error() != "can't scan into dest[0]: json: cannot unmarshal number 234432 into Go value of type int16" {
- t.Errorf("%s %d: Expected *json.UnmarkalTypeError, but got %v", typename, format, err)
- }
-}
-
-func testJsonStruct(t *testing.T, conn *pgx.Conn, typename string, format int16) {
- type person struct {
- Name string `json:"name"`
- Age int `json:"age"`
- }
-
- input := person{
- Name: "John",
- Age: 42,
- }
-
- var output person
-
- err := conn.QueryRow("select $1::"+typename, input).Scan(&output)
- if err != nil {
- t.Errorf("%s %d: QueryRow Scan failed: %v", typename, format, err)
- }
-
- if !reflect.DeepEqual(input, output) {
- t.Errorf("%s %d: Did not transcode struct successfully: %v is not %v", typename, format, input, output)
- }
-}
-
-func mustParseCIDR(t *testing.T, s string) net.IPNet {
- _, ipnet, err := net.ParseCIDR(s)
- if err != nil {
- t.Fatal(err)
- }
-
- return *ipnet
-}
-
-func TestStringToNotTextTypeTranscode(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- input := "01086ee0-4963-4e35-9116-30c173a8d0bd"
-
- var output string
- err := conn.QueryRow("select $1::uuid", input).Scan(&output)
- if err != nil {
- t.Fatal(err)
- }
- if input != output {
- t.Errorf("uuid: Did not transcode string successfully: %s is not %s", input, output)
- }
-
- err = conn.QueryRow("select $1::uuid", &input).Scan(&output)
- if err != nil {
- t.Fatal(err)
- }
- if input != output {
- t.Errorf("uuid: Did not transcode pointer to string successfully: %s is not %s", input, output)
- }
-}
-
-func TestInetCidrTranscodeIPNet(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- tests := []struct {
- sql string
- value net.IPNet
- }{
- {"select $1::inet", mustParseCIDR(t, "0.0.0.0/32")},
- {"select $1::inet", mustParseCIDR(t, "127.0.0.1/32")},
- {"select $1::inet", mustParseCIDR(t, "12.34.56.0/32")},
- {"select $1::inet", mustParseCIDR(t, "192.168.1.0/24")},
- {"select $1::inet", mustParseCIDR(t, "255.0.0.0/8")},
- {"select $1::inet", mustParseCIDR(t, "255.255.255.255/32")},
- {"select $1::inet", mustParseCIDR(t, "::/128")},
- {"select $1::inet", mustParseCIDR(t, "::/0")},
- {"select $1::inet", mustParseCIDR(t, "::1/128")},
- {"select $1::inet", mustParseCIDR(t, "2607:f8b0:4009:80b::200e/128")},
- {"select $1::cidr", mustParseCIDR(t, "0.0.0.0/32")},
- {"select $1::cidr", mustParseCIDR(t, "127.0.0.1/32")},
- {"select $1::cidr", mustParseCIDR(t, "12.34.56.0/32")},
- {"select $1::cidr", mustParseCIDR(t, "192.168.1.0/24")},
- {"select $1::cidr", mustParseCIDR(t, "255.0.0.0/8")},
- {"select $1::cidr", mustParseCIDR(t, "255.255.255.255/32")},
- {"select $1::cidr", mustParseCIDR(t, "::/128")},
- {"select $1::cidr", mustParseCIDR(t, "::/0")},
- {"select $1::cidr", mustParseCIDR(t, "::1/128")},
- {"select $1::cidr", mustParseCIDR(t, "2607:f8b0:4009:80b::200e/128")},
- }
-
- for i, tt := range tests {
- var actual net.IPNet
-
- err := conn.QueryRow(tt.sql, tt.value).Scan(&actual)
- if err != nil {
- t.Errorf("%d. Unexpected failure: %v (sql -> %v, value -> %v)", i, err, tt.sql, tt.value)
- continue
- }
-
- if actual.String() != tt.value.String() {
- t.Errorf("%d. Expected %v, got %v (sql -> %v)", i, tt.value, actual, tt.sql)
- }
-
- ensureConnValid(t, conn)
- }
-}
-
-func TestInetCidrTranscodeIP(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- tests := []struct {
- sql string
- value net.IP
- }{
- {"select $1::inet", net.ParseIP("0.0.0.0")},
- {"select $1::inet", net.ParseIP("127.0.0.1")},
- {"select $1::inet", net.ParseIP("12.34.56.0")},
- {"select $1::inet", net.ParseIP("255.255.255.255")},
- {"select $1::inet", net.ParseIP("::1")},
- {"select $1::inet", net.ParseIP("2607:f8b0:4009:80b::200e")},
- {"select $1::cidr", net.ParseIP("0.0.0.0")},
- {"select $1::cidr", net.ParseIP("127.0.0.1")},
- {"select $1::cidr", net.ParseIP("12.34.56.0")},
- {"select $1::cidr", net.ParseIP("255.255.255.255")},
- {"select $1::cidr", net.ParseIP("::1")},
- {"select $1::cidr", net.ParseIP("2607:f8b0:4009:80b::200e")},
- }
-
- for i, tt := range tests {
- var actual net.IP
-
- err := conn.QueryRow(tt.sql, tt.value).Scan(&actual)
- if err != nil {
- t.Errorf("%d. Unexpected failure: %v (sql -> %v, value -> %v)", i, err, tt.sql, tt.value)
- continue
- }
-
- if !actual.Equal(tt.value) {
- t.Errorf("%d. Expected %v, got %v (sql -> %v)", i, tt.value, actual, tt.sql)
- }
-
- ensureConnValid(t, conn)
- }
-
- failTests := []struct {
- sql string
- value net.IPNet
- }{
- {"select $1::inet", mustParseCIDR(t, "192.168.1.0/24")},
- {"select $1::cidr", mustParseCIDR(t, "192.168.1.0/24")},
- }
- for i, tt := range failTests {
- var actual net.IP
-
- err := conn.QueryRow(tt.sql, tt.value).Scan(&actual)
- if !strings.Contains(err.Error(), "Cannot decode netmask") {
- t.Errorf("%d. Expected failure cannot decode netmask, but got: %v (sql -> %v, value -> %v)", i, err, tt.sql, tt.value)
- continue
- }
-
- ensureConnValid(t, conn)
- }
-}
-
-func TestInetCidrArrayTranscodeIPNet(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- tests := []struct {
- sql string
- value []net.IPNet
- }{
- {
- "select $1::inet[]",
- []net.IPNet{
- mustParseCIDR(t, "0.0.0.0/32"),
- mustParseCIDR(t, "127.0.0.1/32"),
- mustParseCIDR(t, "12.34.56.0/32"),
- mustParseCIDR(t, "192.168.1.0/24"),
- mustParseCIDR(t, "255.0.0.0/8"),
- mustParseCIDR(t, "255.255.255.255/32"),
- mustParseCIDR(t, "::/128"),
- mustParseCIDR(t, "::/0"),
- mustParseCIDR(t, "::1/128"),
- mustParseCIDR(t, "2607:f8b0:4009:80b::200e/128"),
- },
- },
- {
- "select $1::cidr[]",
- []net.IPNet{
- mustParseCIDR(t, "0.0.0.0/32"),
- mustParseCIDR(t, "127.0.0.1/32"),
- mustParseCIDR(t, "12.34.56.0/32"),
- mustParseCIDR(t, "192.168.1.0/24"),
- mustParseCIDR(t, "255.0.0.0/8"),
- mustParseCIDR(t, "255.255.255.255/32"),
- mustParseCIDR(t, "::/128"),
- mustParseCIDR(t, "::/0"),
- mustParseCIDR(t, "::1/128"),
- mustParseCIDR(t, "2607:f8b0:4009:80b::200e/128"),
- },
- },
- }
-
- for i, tt := range tests {
- var actual []net.IPNet
-
- err := conn.QueryRow(tt.sql, tt.value).Scan(&actual)
- if err != nil {
- t.Errorf("%d. Unexpected failure: %v (sql -> %v, value -> %v)", i, err, tt.sql, tt.value)
- continue
- }
-
- if !reflect.DeepEqual(actual, tt.value) {
- t.Errorf("%d. Expected %v, got %v (sql -> %v)", i, tt.value, actual, tt.sql)
- }
-
- ensureConnValid(t, conn)
- }
-}
-
-func TestInetCidrArrayTranscodeIP(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- tests := []struct {
- sql string
- value []net.IP
- }{
- {
- "select $1::inet[]",
- []net.IP{
- net.ParseIP("0.0.0.0"),
- net.ParseIP("127.0.0.1"),
- net.ParseIP("12.34.56.0"),
- net.ParseIP("255.255.255.255"),
- net.ParseIP("2607:f8b0:4009:80b::200e"),
- },
- },
- {
- "select $1::cidr[]",
- []net.IP{
- net.ParseIP("0.0.0.0"),
- net.ParseIP("127.0.0.1"),
- net.ParseIP("12.34.56.0"),
- net.ParseIP("255.255.255.255"),
- net.ParseIP("2607:f8b0:4009:80b::200e"),
- },
- },
- }
-
- for i, tt := range tests {
- var actual []net.IP
-
- err := conn.QueryRow(tt.sql, tt.value).Scan(&actual)
- if err != nil {
- t.Errorf("%d. Unexpected failure: %v (sql -> %v, value -> %v)", i, err, tt.sql, tt.value)
- continue
- }
-
- if !reflect.DeepEqual(actual, tt.value) {
- t.Errorf("%d. Expected %v, got %v (sql -> %v)", i, tt.value, actual, tt.sql)
- }
-
- ensureConnValid(t, conn)
- }
-
- failTests := []struct {
- sql string
- value []net.IPNet
- }{
- {
- "select $1::inet[]",
- []net.IPNet{
- mustParseCIDR(t, "12.34.56.0/32"),
- mustParseCIDR(t, "192.168.1.0/24"),
- },
- },
- {
- "select $1::cidr[]",
- []net.IPNet{
- mustParseCIDR(t, "12.34.56.0/32"),
- mustParseCIDR(t, "192.168.1.0/24"),
- },
- },
- }
-
- for i, tt := range failTests {
- var actual []net.IP
-
- err := conn.QueryRow(tt.sql, tt.value).Scan(&actual)
- if err == nil || !strings.Contains(err.Error(), "Cannot decode netmask") {
- t.Errorf("%d. Expected failure cannot decode netmask, but got: %v (sql -> %v, value -> %v)", i, err, tt.sql, tt.value)
- continue
- }
-
- ensureConnValid(t, conn)
- }
-}
-
-func TestInetCidrTranscodeWithJustIP(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- tests := []struct {
- sql string
- value string
- }{
- {"select $1::inet", "0.0.0.0/32"},
- {"select $1::inet", "127.0.0.1/32"},
- {"select $1::inet", "12.34.56.0/32"},
- {"select $1::inet", "255.255.255.255/32"},
- {"select $1::inet", "::/128"},
- {"select $1::inet", "2607:f8b0:4009:80b::200e/128"},
- {"select $1::cidr", "0.0.0.0/32"},
- {"select $1::cidr", "127.0.0.1/32"},
- {"select $1::cidr", "12.34.56.0/32"},
- {"select $1::cidr", "255.255.255.255/32"},
- {"select $1::cidr", "::/128"},
- {"select $1::cidr", "2607:f8b0:4009:80b::200e/128"},
- }
-
- for i, tt := range tests {
- expected := mustParseCIDR(t, tt.value)
- var actual net.IPNet
-
- err := conn.QueryRow(tt.sql, expected.IP).Scan(&actual)
- if err != nil {
- t.Errorf("%d. Unexpected failure: %v (sql -> %v, value -> %v)", i, err, tt.sql, tt.value)
- continue
- }
-
- if actual.String() != expected.String() {
- t.Errorf("%d. Expected %v, got %v (sql -> %v)", i, tt.value, actual, tt.sql)
- }
-
- ensureConnValid(t, conn)
- }
-}
-
-func TestNullX(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- type allTypes struct {
- s pgx.NullString
- i16 pgx.NullInt16
- i32 pgx.NullInt32
- c pgx.NullChar
- a pgx.NullAclItem
- n pgx.NullName
- oid pgx.NullOid
- xid pgx.NullXid
- cid pgx.NullCid
- tid pgx.NullTid
- i64 pgx.NullInt64
- f32 pgx.NullFloat32
- f64 pgx.NullFloat64
- b pgx.NullBool
- t pgx.NullTime
- }
-
- var actual, zero allTypes
-
- tests := []struct {
- sql string
- queryArgs []interface{}
- scanArgs []interface{}
- expected allTypes
- }{
- {"select $1::text", []interface{}{pgx.NullString{String: "foo", Valid: true}}, []interface{}{&actual.s}, allTypes{s: pgx.NullString{String: "foo", Valid: true}}},
- {"select $1::text", []interface{}{pgx.NullString{String: "foo", Valid: false}}, []interface{}{&actual.s}, allTypes{s: pgx.NullString{String: "", Valid: false}}},
- {"select $1::int2", []interface{}{pgx.NullInt16{Int16: 1, Valid: true}}, []interface{}{&actual.i16}, allTypes{i16: pgx.NullInt16{Int16: 1, Valid: true}}},
- {"select $1::int2", []interface{}{pgx.NullInt16{Int16: 1, Valid: false}}, []interface{}{&actual.i16}, allTypes{i16: pgx.NullInt16{Int16: 0, Valid: false}}},
- {"select $1::int4", []interface{}{pgx.NullInt32{Int32: 1, Valid: true}}, []interface{}{&actual.i32}, allTypes{i32: pgx.NullInt32{Int32: 1, Valid: true}}},
- {"select $1::int4", []interface{}{pgx.NullInt32{Int32: 1, Valid: false}}, []interface{}{&actual.i32}, allTypes{i32: pgx.NullInt32{Int32: 0, Valid: false}}},
- {"select $1::oid", []interface{}{pgx.NullOid{Oid: 1, Valid: true}}, []interface{}{&actual.oid}, allTypes{oid: pgx.NullOid{Oid: 1, Valid: true}}},
- {"select $1::oid", []interface{}{pgx.NullOid{Oid: 1, Valid: false}}, []interface{}{&actual.oid}, allTypes{oid: pgx.NullOid{Oid: 0, Valid: false}}},
- {"select $1::oid", []interface{}{pgx.NullOid{Oid: 4294967295, Valid: true}}, []interface{}{&actual.oid}, allTypes{oid: pgx.NullOid{Oid: 4294967295, Valid: true}}},
- {"select $1::xid", []interface{}{pgx.NullXid{Xid: 1, Valid: true}}, []interface{}{&actual.xid}, allTypes{xid: pgx.NullXid{Xid: 1, Valid: true}}},
- {"select $1::xid", []interface{}{pgx.NullXid{Xid: 1, Valid: false}}, []interface{}{&actual.xid}, allTypes{xid: pgx.NullXid{Xid: 0, Valid: false}}},
- {"select $1::xid", []interface{}{pgx.NullXid{Xid: 4294967295, Valid: true}}, []interface{}{&actual.xid}, allTypes{xid: pgx.NullXid{Xid: 4294967295, Valid: true}}},
- {"select $1::\"char\"", []interface{}{pgx.NullChar{Char: 1, Valid: true}}, []interface{}{&actual.c}, allTypes{c: pgx.NullChar{Char: 1, Valid: true}}},
- {"select $1::\"char\"", []interface{}{pgx.NullChar{Char: 1, Valid: false}}, []interface{}{&actual.c}, allTypes{c: pgx.NullChar{Char: 0, Valid: false}}},
- {"select $1::\"char\"", []interface{}{pgx.NullChar{Char: 255, Valid: true}}, []interface{}{&actual.c}, allTypes{c: pgx.NullChar{Char: 255, Valid: true}}},
- {"select $1::name", []interface{}{pgx.NullName{Name: "foo", Valid: true}}, []interface{}{&actual.n}, allTypes{n: pgx.NullName{Name: "foo", Valid: true}}},
- {"select $1::name", []interface{}{pgx.NullName{Name: "foo", Valid: false}}, []interface{}{&actual.n}, allTypes{n: pgx.NullName{Name: "", Valid: false}}},
- {"select $1::aclitem", []interface{}{pgx.NullAclItem{AclItem: "postgres=arwdDxt/postgres", Valid: true}}, []interface{}{&actual.a}, allTypes{a: pgx.NullAclItem{AclItem: "postgres=arwdDxt/postgres", Valid: true}}},
- {"select $1::aclitem", []interface{}{pgx.NullAclItem{AclItem: "postgres=arwdDxt/postgres", Valid: false}}, []interface{}{&actual.a}, allTypes{a: pgx.NullAclItem{AclItem: "", Valid: false}}},
- // A tricky (and valid) aclitem can still be used, especially with Go's useful backticks
- {"select $1::aclitem", []interface{}{pgx.NullAclItem{AclItem: `postgres=arwdDxt/" tricky, ' } "" \ test user "`, Valid: true}}, []interface{}{&actual.a}, allTypes{a: pgx.NullAclItem{AclItem: `postgres=arwdDxt/" tricky, ' } "" \ test user "`, Valid: true}}},
- {"select $1::cid", []interface{}{pgx.NullCid{Cid: 1, Valid: true}}, []interface{}{&actual.cid}, allTypes{cid: pgx.NullCid{Cid: 1, Valid: true}}},
- {"select $1::cid", []interface{}{pgx.NullCid{Cid: 1, Valid: false}}, []interface{}{&actual.cid}, allTypes{cid: pgx.NullCid{Cid: 0, Valid: false}}},
- {"select $1::cid", []interface{}{pgx.NullCid{Cid: 4294967295, Valid: true}}, []interface{}{&actual.cid}, allTypes{cid: pgx.NullCid{Cid: 4294967295, Valid: true}}},
- {"select $1::tid", []interface{}{pgx.NullTid{Tid: pgx.Tid{BlockNumber: 1, OffsetNumber: 1}, Valid: true}}, []interface{}{&actual.tid}, allTypes{tid: pgx.NullTid{Tid: pgx.Tid{BlockNumber: 1, OffsetNumber: 1}, Valid: true}}},
- {"select $1::tid", []interface{}{pgx.NullTid{Tid: pgx.Tid{BlockNumber: 1, OffsetNumber: 1}, Valid: false}}, []interface{}{&actual.tid}, allTypes{tid: pgx.NullTid{Tid: pgx.Tid{BlockNumber: 0, OffsetNumber: 0}, Valid: false}}},
- {"select $1::tid", []interface{}{pgx.NullTid{Tid: pgx.Tid{BlockNumber: 4294967295, OffsetNumber: 65535}, Valid: true}}, []interface{}{&actual.tid}, allTypes{tid: pgx.NullTid{Tid: pgx.Tid{BlockNumber: 4294967295, OffsetNumber: 65535}, Valid: true}}},
- {"select $1::int8", []interface{}{pgx.NullInt64{Int64: 1, Valid: true}}, []interface{}{&actual.i64}, allTypes{i64: pgx.NullInt64{Int64: 1, Valid: true}}},
- {"select $1::int8", []interface{}{pgx.NullInt64{Int64: 1, Valid: false}}, []interface{}{&actual.i64}, allTypes{i64: pgx.NullInt64{Int64: 0, Valid: false}}},
- {"select $1::float4", []interface{}{pgx.NullFloat32{Float32: 1.23, Valid: true}}, []interface{}{&actual.f32}, allTypes{f32: pgx.NullFloat32{Float32: 1.23, Valid: true}}},
- {"select $1::float4", []interface{}{pgx.NullFloat32{Float32: 1.23, Valid: false}}, []interface{}{&actual.f32}, allTypes{f32: pgx.NullFloat32{Float32: 0, Valid: false}}},
- {"select $1::float8", []interface{}{pgx.NullFloat64{Float64: 1.23, Valid: true}}, []interface{}{&actual.f64}, allTypes{f64: pgx.NullFloat64{Float64: 1.23, Valid: true}}},
- {"select $1::float8", []interface{}{pgx.NullFloat64{Float64: 1.23, Valid: false}}, []interface{}{&actual.f64}, allTypes{f64: pgx.NullFloat64{Float64: 0, Valid: false}}},
- {"select $1::bool", []interface{}{pgx.NullBool{Bool: true, Valid: true}}, []interface{}{&actual.b}, allTypes{b: pgx.NullBool{Bool: true, Valid: true}}},
- {"select $1::bool", []interface{}{pgx.NullBool{Bool: true, Valid: false}}, []interface{}{&actual.b}, allTypes{b: pgx.NullBool{Bool: false, Valid: false}}},
- {"select $1::timestamptz", []interface{}{pgx.NullTime{Time: time.Unix(123, 5000), Valid: true}}, []interface{}{&actual.t}, allTypes{t: pgx.NullTime{Time: time.Unix(123, 5000), Valid: true}}},
- {"select $1::timestamptz", []interface{}{pgx.NullTime{Time: time.Unix(123, 5000), Valid: false}}, []interface{}{&actual.t}, allTypes{t: pgx.NullTime{Time: time.Time{}, Valid: false}}},
- {"select $1::timestamp", []interface{}{pgx.NullTime{Time: time.Unix(123, 5000), Valid: true}}, []interface{}{&actual.t}, allTypes{t: pgx.NullTime{Time: time.Unix(123, 5000), Valid: true}}},
- {"select $1::timestamp", []interface{}{pgx.NullTime{Time: time.Unix(123, 5000), Valid: false}}, []interface{}{&actual.t}, allTypes{t: pgx.NullTime{Time: time.Time{}, Valid: false}}},
- {"select $1::date", []interface{}{pgx.NullTime{Time: time.Date(1990, 1, 1, 0, 0, 0, 0, time.Local), Valid: true}}, []interface{}{&actual.t}, allTypes{t: pgx.NullTime{Time: time.Date(1990, 1, 1, 0, 0, 0, 0, time.Local), Valid: true}}},
- {"select $1::date", []interface{}{pgx.NullTime{Time: time.Date(1990, 1, 1, 0, 0, 0, 0, time.Local), Valid: false}}, []interface{}{&actual.t}, allTypes{t: pgx.NullTime{Time: time.Time{}, Valid: false}}},
- {"select 42::int4, $1::float8", []interface{}{pgx.NullFloat64{Float64: 1.23, Valid: true}}, []interface{}{&actual.i32, &actual.f64}, allTypes{i32: pgx.NullInt32{Int32: 42, Valid: true}, f64: pgx.NullFloat64{Float64: 1.23, Valid: true}}},
- }
-
- for i, tt := range tests {
- actual = zero
-
- err := conn.QueryRow(tt.sql, tt.queryArgs...).Scan(tt.scanArgs...)
- if err != nil {
- t.Errorf("%d. Unexpected failure: %v (sql -> %v, queryArgs -> %v)", i, err, tt.sql, tt.queryArgs)
- }
-
- if actual != tt.expected {
- t.Errorf("%d. Expected %v, got %v (sql -> %v, queryArgs -> %v)", i, tt.expected, actual, tt.sql, tt.queryArgs)
- }
-
- ensureConnValid(t, conn)
- }
-}
-
-func assertAclItemSlicesEqual(t *testing.T, query, scan []pgx.AclItem) {
- if !reflect.DeepEqual(query, scan) {
- t.Errorf("failed to encode aclitem[]\n EXPECTED: %d %v\n ACTUAL: %d %v", len(query), query, len(scan), scan)
- }
-}
-
-func TestAclArrayDecoding(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- sql := "select $1::aclitem[]"
- var scan []pgx.AclItem
-
- tests := []struct {
- query []pgx.AclItem
- }{
- {
- []pgx.AclItem{},
- },
- {
- []pgx.AclItem{"=r/postgres"},
- },
- {
- []pgx.AclItem{"=r/postgres", "postgres=arwdDxt/postgres"},
- },
- {
- []pgx.AclItem{"=r/postgres", "postgres=arwdDxt/postgres", `postgres=arwdDxt/" tricky, ' } "" \ test user "`},
- },
- }
- for i, tt := range tests {
- err := conn.QueryRow(sql, tt.query).Scan(&scan)
- if err != nil {
- // t.Errorf(`%d. error reading array: %v`, i, err)
- t.Errorf(`%d. error reading array: %v query: %s`, i, err, tt.query)
- if pgerr, ok := err.(pgx.PgError); ok {
- t.Errorf(`%d. error reading array (detail): %s`, i, pgerr.Detail)
- }
- continue
- }
- assertAclItemSlicesEqual(t, tt.query, scan)
- ensureConnValid(t, conn)
- }
-}
-
-func TestArrayDecoding(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- tests := []struct {
- sql string
- query interface{}
- scan interface{}
- assert func(*testing.T, interface{}, interface{})
- }{
- {
- "select $1::bool[]", []bool{true, false, true}, &[]bool{},
- func(t *testing.T, query, scan interface{}) {
- if !reflect.DeepEqual(query, *(scan.(*[]bool))) {
- t.Errorf("failed to encode bool[]")
- }
- },
- },
- {
- "select $1::smallint[]", []int16{2, 4, 484, 32767}, &[]int16{},
- func(t *testing.T, query, scan interface{}) {
- if !reflect.DeepEqual(query, *(scan.(*[]int16))) {
- t.Errorf("failed to encode smallint[]")
- }
- },
- },
- {
- "select $1::smallint[]", []uint16{2, 4, 484, 32767}, &[]uint16{},
- func(t *testing.T, query, scan interface{}) {
- if !reflect.DeepEqual(query, *(scan.(*[]uint16))) {
- t.Errorf("failed to encode smallint[]")
- }
- },
- },
- {
- "select $1::int[]", []int32{2, 4, 484}, &[]int32{},
- func(t *testing.T, query, scan interface{}) {
- if !reflect.DeepEqual(query, *(scan.(*[]int32))) {
- t.Errorf("failed to encode int[]")
- }
- },
- },
- {
- "select $1::int[]", []uint32{2, 4, 484, 2147483647}, &[]uint32{},
- func(t *testing.T, query, scan interface{}) {
- if !reflect.DeepEqual(query, *(scan.(*[]uint32))) {
- t.Errorf("failed to encode int[]")
- }
- },
- },
- {
- "select $1::bigint[]", []int64{2, 4, 484, 9223372036854775807}, &[]int64{},
- func(t *testing.T, query, scan interface{}) {
- if !reflect.DeepEqual(query, *(scan.(*[]int64))) {
- t.Errorf("failed to encode bigint[]")
- }
- },
- },
- {
- "select $1::bigint[]", []uint64{2, 4, 484, 9223372036854775807}, &[]uint64{},
- func(t *testing.T, query, scan interface{}) {
- if !reflect.DeepEqual(query, *(scan.(*[]uint64))) {
- t.Errorf("failed to encode bigint[]")
- }
- },
- },
- {
- "select $1::text[]", []string{"it's", "over", "9000!"}, &[]string{},
- func(t *testing.T, query, scan interface{}) {
- if !reflect.DeepEqual(query, *(scan.(*[]string))) {
- t.Errorf("failed to encode text[]")
- }
- },
- },
- {
- "select $1::timestamp[]", []time.Time{time.Unix(323232, 0), time.Unix(3239949334, 00)}, &[]time.Time{},
- func(t *testing.T, query, scan interface{}) {
- if !reflect.DeepEqual(query, *(scan.(*[]time.Time))) {
- t.Errorf("failed to encode time.Time[] to timestamp[]")
- }
- },
- },
- {
- "select $1::timestamptz[]", []time.Time{time.Unix(323232, 0), time.Unix(3239949334, 00)}, &[]time.Time{},
- func(t *testing.T, query, scan interface{}) {
- if !reflect.DeepEqual(query, *(scan.(*[]time.Time))) {
- t.Errorf("failed to encode time.Time[] to timestamptz[]")
- }
- },
- },
- {
- "select $1::bytea[]", [][]byte{{0, 1, 2, 3}, {4, 5, 6, 7}}, &[][]byte{},
- func(t *testing.T, query, scan interface{}) {
- queryBytesSliceSlice := query.([][]byte)
- scanBytesSliceSlice := *(scan.(*[][]byte))
- if len(queryBytesSliceSlice) != len(scanBytesSliceSlice) {
- t.Errorf("failed to encode byte[][] to bytea[]: expected %d to equal %d", len(queryBytesSliceSlice), len(scanBytesSliceSlice))
- }
- for i := range queryBytesSliceSlice {
- qb := queryBytesSliceSlice[i]
- sb := scanBytesSliceSlice[i]
- if !bytes.Equal(qb, sb) {
- t.Errorf("failed to encode byte[][] to bytea[]: expected %v to equal %v", qb, sb)
- }
- }
- },
- },
- }
-
- for i, tt := range tests {
- err := conn.QueryRow(tt.sql, tt.query).Scan(tt.scan)
- if err != nil {
- t.Errorf(`%d. error reading array: %v`, i, err)
- continue
- }
- tt.assert(t, tt.query, tt.scan)
- ensureConnValid(t, conn)
- }
-}
-
-type shortScanner struct{}
-
-func (*shortScanner) Scan(r *pgx.ValueReader) error {
- r.ReadByte()
- return nil
-}
-
-func TestShortScanner(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- rows, err := conn.Query("select 'ab', 'cd' union select 'cd', 'ef'")
- if err != nil {
- t.Error(err)
- }
- defer rows.Close()
-
- for rows.Next() {
- var s1, s2 shortScanner
- err = rows.Scan(&s1, &s2)
- if err != nil {
- t.Error(err)
- }
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestEmptyArrayDecoding(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- var val []string
-
- err := conn.QueryRow("select array[]::text[]").Scan(&val)
- if err != nil {
- t.Errorf(`error reading array: %v`, err)
- }
- if len(val) != 0 {
- t.Errorf("Expected 0 values, got %d", len(val))
- }
-
- var n, m int32
-
- err = conn.QueryRow("select 1::integer, array[]::text[], 42::integer").Scan(&n, &val, &m)
- if err != nil {
- t.Errorf(`error reading array: %v`, err)
- }
- if len(val) != 0 {
- t.Errorf("Expected 0 values, got %d", len(val))
- }
- if n != 1 {
- t.Errorf("Expected n to be 1, but it was %d", n)
- }
- if m != 42 {
- t.Errorf("Expected n to be 42, but it was %d", n)
- }
-
- rows, err := conn.Query("select 1::integer, array['test']::text[] union select 2::integer, array[]::text[] union select 3::integer, array['test']::text[]")
- if err != nil {
- t.Errorf(`error retrieving rows with array: %v`, err)
- }
- defer rows.Close()
-
- for rows.Next() {
- err = rows.Scan(&n, &val)
- if err != nil {
- t.Errorf(`error reading array: %v`, err)
- }
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestNullXMismatch(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- type allTypes struct {
- s pgx.NullString
- i16 pgx.NullInt16
- i32 pgx.NullInt32
- i64 pgx.NullInt64
- f32 pgx.NullFloat32
- f64 pgx.NullFloat64
- b pgx.NullBool
- t pgx.NullTime
- }
-
- var actual, zero allTypes
-
- tests := []struct {
- sql string
- queryArgs []interface{}
- scanArgs []interface{}
- err string
- }{
- {"select $1::date", []interface{}{pgx.NullString{String: "foo", Valid: true}}, []interface{}{&actual.s}, "invalid input syntax for type date"},
- {"select $1::date", []interface{}{pgx.NullInt16{Int16: 1, Valid: true}}, []interface{}{&actual.i16}, "cannot encode into OID 1082"},
- {"select $1::date", []interface{}{pgx.NullInt32{Int32: 1, Valid: true}}, []interface{}{&actual.i32}, "cannot encode into OID 1082"},
- {"select $1::date", []interface{}{pgx.NullInt64{Int64: 1, Valid: true}}, []interface{}{&actual.i64}, "cannot encode into OID 1082"},
- {"select $1::date", []interface{}{pgx.NullFloat32{Float32: 1.23, Valid: true}}, []interface{}{&actual.f32}, "cannot encode into OID 1082"},
- {"select $1::date", []interface{}{pgx.NullFloat64{Float64: 1.23, Valid: true}}, []interface{}{&actual.f64}, "cannot encode into OID 1082"},
- {"select $1::date", []interface{}{pgx.NullBool{Bool: true, Valid: true}}, []interface{}{&actual.b}, "cannot encode into OID 1082"},
- {"select $1::int4", []interface{}{pgx.NullTime{Time: time.Unix(123, 5000), Valid: true}}, []interface{}{&actual.t}, "cannot encode into OID 23"},
- }
-
- for i, tt := range tests {
- actual = zero
-
- err := conn.QueryRow(tt.sql, tt.queryArgs...).Scan(tt.scanArgs...)
- if err == nil || !strings.Contains(err.Error(), tt.err) {
- t.Errorf(`%d. Expected error to contain "%s", but it didn't: %v`, i, tt.err, err)
- }
-
- ensureConnValid(t, conn)
- }
-}
-
-func TestPointerPointer(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- type allTypes struct {
- s *string
- i16 *int16
- i32 *int32
- i64 *int64
- f32 *float32
- f64 *float64
- b *bool
- t *time.Time
- }
-
- var actual, zero, expected allTypes
-
- {
- s := "foo"
- expected.s = &s
- i16 := int16(1)
- expected.i16 = &i16
- i32 := int32(1)
- expected.i32 = &i32
- i64 := int64(1)
- expected.i64 = &i64
- f32 := float32(1.23)
- expected.f32 = &f32
- f64 := float64(1.23)
- expected.f64 = &f64
- b := true
- expected.b = &b
- t := time.Unix(123, 5000)
- expected.t = &t
- }
-
- tests := []struct {
- sql string
- queryArgs []interface{}
- scanArgs []interface{}
- expected allTypes
- }{
- {"select $1::text", []interface{}{expected.s}, []interface{}{&actual.s}, allTypes{s: expected.s}},
- {"select $1::text", []interface{}{zero.s}, []interface{}{&actual.s}, allTypes{}},
- {"select $1::int2", []interface{}{expected.i16}, []interface{}{&actual.i16}, allTypes{i16: expected.i16}},
- {"select $1::int2", []interface{}{zero.i16}, []interface{}{&actual.i16}, allTypes{}},
- {"select $1::int4", []interface{}{expected.i32}, []interface{}{&actual.i32}, allTypes{i32: expected.i32}},
- {"select $1::int4", []interface{}{zero.i32}, []interface{}{&actual.i32}, allTypes{}},
- {"select $1::int8", []interface{}{expected.i64}, []interface{}{&actual.i64}, allTypes{i64: expected.i64}},
- {"select $1::int8", []interface{}{zero.i64}, []interface{}{&actual.i64}, allTypes{}},
- {"select $1::float4", []interface{}{expected.f32}, []interface{}{&actual.f32}, allTypes{f32: expected.f32}},
- {"select $1::float4", []interface{}{zero.f32}, []interface{}{&actual.f32}, allTypes{}},
- {"select $1::float8", []interface{}{expected.f64}, []interface{}{&actual.f64}, allTypes{f64: expected.f64}},
- {"select $1::float8", []interface{}{zero.f64}, []interface{}{&actual.f64}, allTypes{}},
- {"select $1::bool", []interface{}{expected.b}, []interface{}{&actual.b}, allTypes{b: expected.b}},
- {"select $1::bool", []interface{}{zero.b}, []interface{}{&actual.b}, allTypes{}},
- {"select $1::timestamptz", []interface{}{expected.t}, []interface{}{&actual.t}, allTypes{t: expected.t}},
- {"select $1::timestamptz", []interface{}{zero.t}, []interface{}{&actual.t}, allTypes{}},
- {"select $1::timestamp", []interface{}{expected.t}, []interface{}{&actual.t}, allTypes{t: expected.t}},
- {"select $1::timestamp", []interface{}{zero.t}, []interface{}{&actual.t}, allTypes{}},
- }
-
- for i, tt := range tests {
- actual = zero
-
- err := conn.QueryRow(tt.sql, tt.queryArgs...).Scan(tt.scanArgs...)
- if err != nil {
- t.Errorf("%d. Unexpected failure: %v (sql -> %v, queryArgs -> %v)", i, err, tt.sql, tt.queryArgs)
- }
-
- if !reflect.DeepEqual(actual, tt.expected) {
- t.Errorf("%d. Expected %v, got %v (sql -> %v, queryArgs -> %v)", i, tt.expected, actual, tt.sql, tt.queryArgs)
- }
-
- ensureConnValid(t, conn)
- }
-}
-
-func TestPointerPointerNonZero(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- f := "foo"
- dest := &f
-
- err := conn.QueryRow("select $1::text", nil).Scan(&dest)
- if err != nil {
- t.Errorf("Unexpected failure scanning: %v", err)
- }
- if dest != nil {
- t.Errorf("Expected dest to be nil, got %#v", dest)
- }
-}
-
-func TestEncodeTypeRename(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- type _int int
- inInt := _int(3)
- var outInt _int
-
- type _int8 int8
- inInt8 := _int8(3)
- var outInt8 _int8
-
- type _int16 int16
- inInt16 := _int16(3)
- var outInt16 _int16
-
- type _int32 int32
- inInt32 := _int32(4)
- var outInt32 _int32
-
- type _int64 int64
- inInt64 := _int64(5)
- var outInt64 _int64
-
- type _uint uint
- inUint := _uint(6)
- var outUint _uint
-
- type _uint8 uint8
- inUint8 := _uint8(7)
- var outUint8 _uint8
-
- type _uint16 uint16
- inUint16 := _uint16(8)
- var outUint16 _uint16
-
- type _uint32 uint32
- inUint32 := _uint32(9)
- var outUint32 _uint32
-
- type _uint64 uint64
- inUint64 := _uint64(10)
- var outUint64 _uint64
-
- type _string string
- inString := _string("foo")
- var outString _string
-
- err := conn.QueryRow("select $1::int, $2::int, $3::int2, $4::int4, $5::int8, $6::int, $7::int, $8::int, $9::int, $10::int, $11::text",
- inInt, inInt8, inInt16, inInt32, inInt64, inUint, inUint8, inUint16, inUint32, inUint64, inString,
- ).Scan(&outInt, &outInt8, &outInt16, &outInt32, &outInt64, &outUint, &outUint8, &outUint16, &outUint32, &outUint64, &outString)
- if err != nil {
- t.Fatalf("Failed with type rename: %v", err)
- }
-
- if inInt != outInt {
- t.Errorf("int rename: expected %v, got %v", inInt, outInt)
- }
-
- if inInt8 != outInt8 {
- t.Errorf("int8 rename: expected %v, got %v", inInt8, outInt8)
- }
-
- if inInt16 != outInt16 {
- t.Errorf("int16 rename: expected %v, got %v", inInt16, outInt16)
- }
-
- if inInt32 != outInt32 {
- t.Errorf("int32 rename: expected %v, got %v", inInt32, outInt32)
- }
-
- if inInt64 != outInt64 {
- t.Errorf("int64 rename: expected %v, got %v", inInt64, outInt64)
- }
-
- if inUint != outUint {
- t.Errorf("uint rename: expected %v, got %v", inUint, outUint)
- }
-
- if inUint8 != outUint8 {
- t.Errorf("uint8 rename: expected %v, got %v", inUint8, outUint8)
- }
-
- if inUint16 != outUint16 {
- t.Errorf("uint16 rename: expected %v, got %v", inUint16, outUint16)
- }
-
- if inUint32 != outUint32 {
- t.Errorf("uint32 rename: expected %v, got %v", inUint32, outUint32)
- }
-
- if inUint64 != outUint64 {
- t.Errorf("uint64 rename: expected %v, got %v", inUint64, outUint64)
- }
-
- if inString != outString {
- t.Errorf("string rename: expected %v, got %v", inString, outString)
- }
-
- ensureConnValid(t, conn)
-}
-
-func TestRowDecode(t *testing.T) {
- t.Parallel()
-
- conn := mustConnect(t, *defaultConnConfig)
- defer closeConn(t, conn)
-
- tests := []struct {
- sql string
- expected []interface{}
- }{
- {
- "select row(1, 'cat', '2015-01-01 08:12:42-00'::timestamptz)",
- []interface{}{
- int32(1),
- "cat",
- time.Date(2015, 1, 1, 8, 12, 42, 0, time.UTC).Local(),
- },
- },
- }
-
- for i, tt := range tests {
- var actual []interface{}
-
- err := conn.QueryRow(tt.sql).Scan(&actual)
- if err != nil {
- t.Errorf("%d. Unexpected failure: %v (sql -> %v)", i, err, tt.sql)
- continue
- }
-
- if !reflect.DeepEqual(actual, tt.expected) {
- t.Errorf("%d. Expected %v, got %v (sql -> %v)", i, tt.expected, actual, tt.sql)
- }
-
- ensureConnValid(t, conn)
- }
-}