summaryrefslogtreecommitdiff
path: root/vendor/github.com/golang/geo
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/golang/geo')
-rw-r--r--vendor/github.com/golang/geo/r1/LICENSE202
-rw-r--r--vendor/github.com/golang/geo/r1/doc.go22
-rw-r--r--vendor/github.com/golang/geo/r1/interval.go161
-rw-r--r--vendor/github.com/golang/geo/r2/LICENSE202
-rw-r--r--vendor/github.com/golang/geo/r2/doc.go22
-rw-r--r--vendor/github.com/golang/geo/r2/rect.go257
-rw-r--r--vendor/github.com/golang/geo/r3/LICENSE202
-rw-r--r--vendor/github.com/golang/geo/r3/doc.go22
-rw-r--r--vendor/github.com/golang/geo/r3/precisevector.go200
-rw-r--r--vendor/github.com/golang/geo/r3/vector.go184
-rw-r--r--vendor/github.com/golang/geo/s1/LICENSE202
-rw-r--r--vendor/github.com/golang/geo/s1/angle.go119
-rw-r--r--vendor/github.com/golang/geo/s1/chordangle.go202
-rw-r--r--vendor/github.com/golang/geo/s1/doc.go22
-rw-r--r--vendor/github.com/golang/geo/s1/interval.go350
-rw-r--r--vendor/github.com/golang/geo/s2/LICENSE202
-rw-r--r--vendor/github.com/golang/geo/s2/cap.go406
-rw-r--r--vendor/github.com/golang/geo/s2/cell.go385
-rw-r--r--vendor/github.com/golang/geo/s2/cellid.go729
-rw-r--r--vendor/github.com/golang/geo/s2/cellunion.go236
-rw-r--r--vendor/github.com/golang/geo/s2/doc.go31
-rw-r--r--vendor/github.com/golang/geo/s2/edgeutil.go1293
-rw-r--r--vendor/github.com/golang/geo/s2/latlng.go96
-rw-r--r--vendor/github.com/golang/geo/s2/loop.go282
-rw-r--r--vendor/github.com/golang/geo/s2/matrix3x3.go127
-rw-r--r--vendor/github.com/golang/geo/s2/metric.go166
-rw-r--r--vendor/github.com/golang/geo/s2/paddedcell.go254
-rw-r--r--vendor/github.com/golang/geo/s2/point.go291
-rw-r--r--vendor/github.com/golang/geo/s2/polygon.go211
-rw-r--r--vendor/github.com/golang/geo/s2/polyline.go177
-rw-r--r--vendor/github.com/golang/geo/s2/predicates.go238
-rw-r--r--vendor/github.com/golang/geo/s2/rect.go426
-rw-r--r--vendor/github.com/golang/geo/s2/region.go50
-rw-r--r--vendor/github.com/golang/geo/s2/regioncoverer.go465
-rw-r--r--vendor/github.com/golang/geo/s2/shapeindex.go202
-rw-r--r--vendor/github.com/golang/geo/s2/stuv.go310
36 files changed, 8946 insertions, 0 deletions
diff --git a/vendor/github.com/golang/geo/r1/LICENSE b/vendor/github.com/golang/geo/r1/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/github.com/golang/geo/r1/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/golang/geo/r1/doc.go b/vendor/github.com/golang/geo/r1/doc.go
new file mode 100644
index 0000000..85f0cdc
--- /dev/null
+++ b/vendor/github.com/golang/geo/r1/doc.go
@@ -0,0 +1,22 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Package r1 implements types and functions for working with geometry in ℝ¹.
+
+See ../s2 for a more detailed overview.
+*/
+package r1
diff --git a/vendor/github.com/golang/geo/r1/interval.go b/vendor/github.com/golang/geo/r1/interval.go
new file mode 100644
index 0000000..00fc64a
--- /dev/null
+++ b/vendor/github.com/golang/geo/r1/interval.go
@@ -0,0 +1,161 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package r1
+
+import (
+ "fmt"
+ "math"
+)
+
+// Interval represents a closed interval on ℝ.
+// Zero-length intervals (where Lo == Hi) represent single points.
+// If Lo > Hi then the interval is empty.
+type Interval struct {
+ Lo, Hi float64
+}
+
+// EmptyInterval returns an empty interval.
+func EmptyInterval() Interval { return Interval{1, 0} }
+
+// IntervalFromPoint returns an interval representing a single point.
+func IntervalFromPoint(p float64) Interval { return Interval{p, p} }
+
+// IsEmpty reports whether the interval is empty.
+func (i Interval) IsEmpty() bool { return i.Lo > i.Hi }
+
+// Equal returns true iff the interval contains the same points as oi.
+func (i Interval) Equal(oi Interval) bool {
+ return i == oi || i.IsEmpty() && oi.IsEmpty()
+}
+
+// Center returns the midpoint of the interval.
+// It is undefined for empty intervals.
+func (i Interval) Center() float64 { return 0.5 * (i.Lo + i.Hi) }
+
+// Length returns the length of the interval.
+// The length of an empty interval is negative.
+func (i Interval) Length() float64 { return i.Hi - i.Lo }
+
+// Contains returns true iff the interval contains p.
+func (i Interval) Contains(p float64) bool { return i.Lo <= p && p <= i.Hi }
+
+// ContainsInterval returns true iff the interval contains oi.
+func (i Interval) ContainsInterval(oi Interval) bool {
+ if oi.IsEmpty() {
+ return true
+ }
+ return i.Lo <= oi.Lo && oi.Hi <= i.Hi
+}
+
+// InteriorContains returns true iff the the interval strictly contains p.
+func (i Interval) InteriorContains(p float64) bool {
+ return i.Lo < p && p < i.Hi
+}
+
+// InteriorContainsInterval returns true iff the interval strictly contains oi.
+func (i Interval) InteriorContainsInterval(oi Interval) bool {
+ if oi.IsEmpty() {
+ return true
+ }
+ return i.Lo < oi.Lo && oi.Hi < i.Hi
+}
+
+// Intersects returns true iff the interval contains any points in common with oi.
+func (i Interval) Intersects(oi Interval) bool {
+ if i.Lo <= oi.Lo {
+ return oi.Lo <= i.Hi && oi.Lo <= oi.Hi // oi.Lo ∈ i and oi is not empty
+ }
+ return i.Lo <= oi.Hi && i.Lo <= i.Hi // i.Lo ∈ oi and i is not empty
+}
+
+// InteriorIntersects returns true iff the interior of the interval contains any points in common with oi, including the latter's boundary.
+func (i Interval) InteriorIntersects(oi Interval) bool {
+ return oi.Lo < i.Hi && i.Lo < oi.Hi && i.Lo < i.Hi && oi.Lo <= i.Hi
+}
+
+// Intersection returns the interval containing all points common to i and j.
+func (i Interval) Intersection(j Interval) Interval {
+ // Empty intervals do not need to be special-cased.
+ return Interval{
+ Lo: math.Max(i.Lo, j.Lo),
+ Hi: math.Min(i.Hi, j.Hi),
+ }
+}
+
+// AddPoint returns the interval expanded so that it contains the given point.
+func (i Interval) AddPoint(p float64) Interval {
+ if i.IsEmpty() {
+ return Interval{p, p}
+ }
+ if p < i.Lo {
+ return Interval{p, i.Hi}
+ }
+ if p > i.Hi {
+ return Interval{i.Lo, p}
+ }
+ return i
+}
+
+// ClampPoint returns the closest point in the interval to the given point "p".
+// The interval must be non-empty.
+func (i Interval) ClampPoint(p float64) float64 {
+ return math.Max(i.Lo, math.Min(i.Hi, p))
+}
+
+// Expanded returns an interval that has been expanded on each side by margin.
+// If margin is negative, then the function shrinks the interval on
+// each side by margin instead. The resulting interval may be empty. Any
+// expansion of an empty interval remains empty.
+func (i Interval) Expanded(margin float64) Interval {
+ if i.IsEmpty() {
+ return i
+ }
+ return Interval{i.Lo - margin, i.Hi + margin}
+}
+
+// Union returns the smallest interval that contains this interval and the given interval.
+func (i Interval) Union(other Interval) Interval {
+ if i.IsEmpty() {
+ return other
+ }
+ if other.IsEmpty() {
+ return i
+ }
+ return Interval{math.Min(i.Lo, other.Lo), math.Max(i.Hi, other.Hi)}
+}
+
+func (i Interval) String() string { return fmt.Sprintf("[%.7f, %.7f]", i.Lo, i.Hi) }
+
+// epsilon is a small number that represents a reasonable level of noise between two
+// values that can be considered to be equal.
+const epsilon = 1e-14
+
+// ApproxEqual reports whether the interval can be transformed into the
+// given interval by moving each endpoint a small distance.
+// The empty interval is considered to be positioned arbitrarily on the
+// real line, so any interval with a small enough length will match
+// the empty interval.
+func (i Interval) ApproxEqual(other Interval) bool {
+ if i.IsEmpty() {
+ return other.Length() <= 2*epsilon
+ }
+ if other.IsEmpty() {
+ return i.Length() <= 2*epsilon
+ }
+ return math.Abs(other.Lo-i.Lo) <= epsilon &&
+ math.Abs(other.Hi-i.Hi) <= epsilon
+}
diff --git a/vendor/github.com/golang/geo/r2/LICENSE b/vendor/github.com/golang/geo/r2/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/github.com/golang/geo/r2/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/golang/geo/r2/doc.go b/vendor/github.com/golang/geo/r2/doc.go
new file mode 100644
index 0000000..aa962ce
--- /dev/null
+++ b/vendor/github.com/golang/geo/r2/doc.go
@@ -0,0 +1,22 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Package r2 implements types and functions for working with geometry in ℝ².
+
+See package s2 for a more detailed overview.
+*/
+package r2
diff --git a/vendor/github.com/golang/geo/r2/rect.go b/vendor/github.com/golang/geo/r2/rect.go
new file mode 100644
index 0000000..7148bd4
--- /dev/null
+++ b/vendor/github.com/golang/geo/r2/rect.go
@@ -0,0 +1,257 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package r2
+
+import (
+ "fmt"
+ "math"
+
+ "github.com/golang/geo/r1"
+)
+
+// Point represents a point in ℝ².
+type Point struct {
+ X, Y float64
+}
+
+// Add returns the sum of p and op.
+func (p Point) Add(op Point) Point { return Point{p.X + op.X, p.Y + op.Y} }
+
+// Sub returns the difference of p and op.
+func (p Point) Sub(op Point) Point { return Point{p.X - op.X, p.Y - op.Y} }
+
+// Mul returns the scalar product of p and m.
+func (p Point) Mul(m float64) Point { return Point{m * p.X, m * p.Y} }
+
+// Ortho returns a counterclockwise orthogonal point with the same norm.
+func (p Point) Ortho() Point { return Point{-p.Y, p.X} }
+
+// Dot returns the dot product between p and op.
+func (p Point) Dot(op Point) float64 { return p.X*op.X + p.Y*op.Y }
+
+// Cross returns the cross product of p and op.
+func (p Point) Cross(op Point) float64 { return p.X*op.Y - p.Y*op.X }
+
+// Norm returns the vector's norm.
+func (p Point) Norm() float64 { return math.Hypot(p.X, p.Y) }
+
+// Normalize returns a unit point in the same direction as p.
+func (p Point) Normalize() Point {
+ if p.X == 0 && p.Y == 0 {
+ return p
+ }
+ return p.Mul(1 / p.Norm())
+}
+
+func (p Point) String() string { return fmt.Sprintf("(%.12f, %.12f)", p.X, p.Y) }
+
+// Rect represents a closed axis-aligned rectangle in the (x,y) plane.
+type Rect struct {
+ X, Y r1.Interval
+}
+
+// RectFromPoints constructs a rect that contains the given points.
+func RectFromPoints(pts ...Point) Rect {
+ // Because the default value on interval is 0,0, we need to manually
+ // define the interval from the first point passed in as our starting
+ // interval, otherwise we end up with the case of passing in
+ // Point{0.2, 0.3} and getting the starting Rect of {0, 0.2}, {0, 0.3}
+ // instead of the Rect {0.2, 0.2}, {0.3, 0.3} which is not correct.
+ if len(pts) == 0 {
+ return Rect{}
+ }
+
+ r := Rect{
+ X: r1.Interval{Lo: pts[0].X, Hi: pts[0].X},
+ Y: r1.Interval{Lo: pts[0].Y, Hi: pts[0].Y},
+ }
+
+ for _, p := range pts[1:] {
+ r = r.AddPoint(p)
+ }
+ return r
+}
+
+// RectFromCenterSize constructs a rectangle with the given center and size.
+// Both dimensions of size must be non-negative.
+func RectFromCenterSize(center, size Point) Rect {
+ return Rect{
+ r1.Interval{Lo: center.X - size.X/2, Hi: center.X + size.X/2},
+ r1.Interval{Lo: center.Y - size.Y/2, Hi: center.Y + size.Y/2},
+ }
+}
+
+// EmptyRect constructs the canonical empty rectangle. Use IsEmpty() to test
+// for empty rectangles, since they have more than one representation. A Rect{}
+// is not the same as the EmptyRect.
+func EmptyRect() Rect {
+ return Rect{r1.EmptyInterval(), r1.EmptyInterval()}
+}
+
+// IsValid reports whether the rectangle is valid.
+// This requires the width to be empty iff the height is empty.
+func (r Rect) IsValid() bool {
+ return r.X.IsEmpty() == r.Y.IsEmpty()
+}
+
+// IsEmpty reports whether the rectangle is empty.
+func (r Rect) IsEmpty() bool {
+ return r.X.IsEmpty()
+}
+
+// Vertices returns all four vertices of the rectangle. Vertices are returned in
+// CCW direction starting with the lower left corner.
+func (r Rect) Vertices() [4]Point {
+ return [4]Point{
+ {r.X.Lo, r.Y.Lo},
+ {r.X.Hi, r.Y.Lo},
+ {r.X.Hi, r.Y.Hi},
+ {r.X.Lo, r.Y.Hi},
+ }
+}
+
+// VertexIJ returns the vertex in direction i along the X-axis (0=left, 1=right) and
+// direction j along the Y-axis (0=down, 1=up).
+func (r Rect) VertexIJ(i, j int) Point {
+ x := r.X.Lo
+ if i == 1 {
+ x = r.X.Hi
+ }
+ y := r.Y.Lo
+ if j == 1 {
+ y = r.Y.Hi
+ }
+ return Point{x, y}
+}
+
+// Lo returns the low corner of the rect.
+func (r Rect) Lo() Point {
+ return Point{r.X.Lo, r.Y.Lo}
+}
+
+// Hi returns the high corner of the rect.
+func (r Rect) Hi() Point {
+ return Point{r.X.Hi, r.Y.Hi}
+}
+
+// Center returns the center of the rectangle in (x,y)-space
+func (r Rect) Center() Point {
+ return Point{r.X.Center(), r.Y.Center()}
+}
+
+// Size returns the width and height of this rectangle in (x,y)-space. Empty
+// rectangles have a negative width and height.
+func (r Rect) Size() Point {
+ return Point{r.X.Length(), r.Y.Length()}
+}
+
+// ContainsPoint reports whether the rectangle contains the given point.
+// Rectangles are closed regions, i.e. they contain their boundary.
+func (r Rect) ContainsPoint(p Point) bool {
+ return r.X.Contains(p.X) && r.Y.Contains(p.Y)
+}
+
+// InteriorContainsPoint returns true iff the given point is contained in the interior
+// of the region (i.e. the region excluding its boundary).
+func (r Rect) InteriorContainsPoint(p Point) bool {
+ return r.X.InteriorContains(p.X) && r.Y.InteriorContains(p.Y)
+}
+
+// Contains reports whether the rectangle contains the given rectangle.
+func (r Rect) Contains(other Rect) bool {
+ return r.X.ContainsInterval(other.X) && r.Y.ContainsInterval(other.Y)
+}
+
+// InteriorContains reports whether the interior of this rectangle contains all of the
+// points of the given other rectangle (including its boundary).
+func (r Rect) InteriorContains(other Rect) bool {
+ return r.X.InteriorContainsInterval(other.X) && r.Y.InteriorContainsInterval(other.Y)
+}
+
+// Intersects reports whether this rectangle and the other rectangle have any points in common.
+func (r Rect) Intersects(other Rect) bool {
+ return r.X.Intersects(other.X) && r.Y.Intersects(other.Y)
+}
+
+// InteriorIntersects reports whether the interior of this rectangle intersects
+// any point (including the boundary) of the given other rectangle.
+func (r Rect) InteriorIntersects(other Rect) bool {
+ return r.X.InteriorIntersects(other.X) && r.Y.InteriorIntersects(other.Y)
+}
+
+// AddPoint expands the rectangle to include the given point. The rectangle is
+// expanded by the minimum amount possible.
+func (r Rect) AddPoint(p Point) Rect {
+ return Rect{r.X.AddPoint(p.X), r.Y.AddPoint(p.Y)}
+}
+
+// AddRect expands the rectangle to include the given rectangle. This is the
+// same as replacing the rectangle by the union of the two rectangles, but
+// is more efficient.
+func (r Rect) AddRect(other Rect) Rect {
+ return Rect{r.X.Union(other.X), r.Y.Union(other.Y)}
+}
+
+// ClampPoint returns the closest point in the rectangle to the given point.
+// The rectangle must be non-empty.
+func (r Rect) ClampPoint(p Point) Point {
+ return Point{r.X.ClampPoint(p.X), r.Y.ClampPoint(p.Y)}
+}
+
+// Expanded returns a rectangle that has been expanded in the x-direction
+// by margin.X, and in y-direction by margin.Y. If either margin is empty,
+// then shrink the interval on the corresponding sides instead. The resulting
+// rectangle may be empty. Any expansion of an empty rectangle remains empty.
+func (r Rect) Expanded(margin Point) Rect {
+ xx := r.X.Expanded(margin.X)
+ yy := r.Y.Expanded(margin.Y)
+ if xx.IsEmpty() || yy.IsEmpty() {
+ return EmptyRect()
+ }
+ return Rect{xx, yy}
+}
+
+// ExpandedByMargin returns a Rect that has been expanded by the amount on all sides.
+func (r Rect) ExpandedByMargin(margin float64) Rect {
+ return r.Expanded(Point{margin, margin})
+}
+
+// Union returns the smallest rectangle containing the union of this rectangle and
+// the given rectangle.
+func (r Rect) Union(other Rect) Rect {
+ return Rect{r.X.Union(other.X), r.Y.Union(other.Y)}
+}
+
+// Intersection returns the smallest rectangle containing the intersection of this
+// rectangle and the given rectangle.
+func (r Rect) Intersection(other Rect) Rect {
+ xx := r.X.Intersection(other.X)
+ yy := r.Y.Intersection(other.Y)
+ if xx.IsEmpty() || yy.IsEmpty() {
+ return EmptyRect()
+ }
+
+ return Rect{xx, yy}
+}
+
+// ApproxEquals returns true if the x- and y-intervals of the two rectangles are
+// the same up to the given tolerance.
+func (r Rect) ApproxEquals(r2 Rect) bool {
+ return r.X.ApproxEqual(r2.X) && r.Y.ApproxEqual(r2.Y)
+}
+
+func (r Rect) String() string { return fmt.Sprintf("[Lo%s, Hi%s]", r.Lo(), r.Hi()) }
diff --git a/vendor/github.com/golang/geo/r3/LICENSE b/vendor/github.com/golang/geo/r3/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/github.com/golang/geo/r3/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/golang/geo/r3/doc.go b/vendor/github.com/golang/geo/r3/doc.go
new file mode 100644
index 0000000..666bee5
--- /dev/null
+++ b/vendor/github.com/golang/geo/r3/doc.go
@@ -0,0 +1,22 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Package r3 implements types and functions for working with geometry in ℝ³.
+
+See ../s2 for a more detailed overview.
+*/
+package r3
diff --git a/vendor/github.com/golang/geo/r3/precisevector.go b/vendor/github.com/golang/geo/r3/precisevector.go
new file mode 100644
index 0000000..2d92d03
--- /dev/null
+++ b/vendor/github.com/golang/geo/r3/precisevector.go
@@ -0,0 +1,200 @@
+/*
+Copyright 2016 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package r3
+
+import (
+ "fmt"
+ "math/big"
+)
+
+const (
+ // prec is the number of bits of precision to use for the Float values.
+ // To keep things simple, we use the maximum allowable precision on big
+ // values. This allows us to handle all values we expect in the s2 library.
+ prec = big.MaxPrec
+)
+
+// define some commonly referenced values.
+var (
+ precise0 = precInt(0)
+ precise1 = precInt(1)
+)
+
+// precStr wraps the conversion from a string into a big.Float. For results that
+// actually can be represented exactly, this should only be used on values that
+// are integer multiples of integer powers of 2.
+func precStr(s string) *big.Float {
+ // Explicitly ignoring the bool return for this usage.
+ f, _ := new(big.Float).SetPrec(prec).SetString(s)
+ return f
+}
+
+func precInt(i int64) *big.Float {
+ return new(big.Float).SetPrec(prec).SetInt64(i)
+}
+
+func precFloat(f float64) *big.Float {
+ return new(big.Float).SetPrec(prec).SetFloat64(f)
+}
+
+func precAdd(a, b *big.Float) *big.Float {
+ return new(big.Float).SetPrec(prec).Add(a, b)
+}
+
+func precSub(a, b *big.Float) *big.Float {
+ return new(big.Float).SetPrec(prec).Sub(a, b)
+}
+
+func precMul(a, b *big.Float) *big.Float {
+ return new(big.Float).SetPrec(prec).Mul(a, b)
+}
+
+// PreciseVector represents a point in ℝ³ using high-precision values.
+// Note that this is NOT a complete implementation because there are some
+// operations that Vector supports that are not feasible with arbitrary precision
+// math. (e.g., methods that need divison like Normalize, or methods needing a
+// square root operation such as Norm)
+type PreciseVector struct {
+ X, Y, Z *big.Float
+}
+
+// PreciseVectorFromVector creates a high precision vector from the given Vector.
+func PreciseVectorFromVector(v Vector) PreciseVector {
+ return NewPreciseVector(v.X, v.Y, v.Z)
+}
+
+// NewPreciseVector creates a high precision vector from the given floating point values.
+func NewPreciseVector(x, y, z float64) PreciseVector {
+ return PreciseVector{
+ X: precFloat(x),
+ Y: precFloat(y),
+ Z: precFloat(z),
+ }
+}
+
+// Vector returns this precise vector converted to a Vector.
+func (v PreciseVector) Vector() Vector {
+ // The accuracy flag is ignored on these conversions back to float64.
+ x, _ := v.X.Float64()
+ y, _ := v.Y.Float64()
+ z, _ := v.Z.Float64()
+ return Vector{x, y, z}
+}
+
+// Equals reports whether v and ov are equal.
+func (v PreciseVector) Equals(ov PreciseVector) bool {
+ return v.X.Cmp(ov.X) == 0 && v.Y.Cmp(ov.Y) == 0 && v.Z.Cmp(ov.Z) == 0
+}
+
+func (v PreciseVector) String() string {
+ return fmt.Sprintf("(%v, %v, %v)", v.X, v.Y, v.Z)
+}
+
+// Norm2 returns the square of the norm.
+func (v PreciseVector) Norm2() *big.Float { return v.Dot(v) }
+
+// IsUnit reports whether this vector is of unit length.
+func (v PreciseVector) IsUnit() bool {
+ return v.Norm2().Cmp(precise1) == 0
+}
+
+// Abs returns the vector with nonnegative components.
+func (v PreciseVector) Abs() PreciseVector {
+ return PreciseVector{
+ X: new(big.Float).Abs(v.X),
+ Y: new(big.Float).Abs(v.Y),
+ Z: new(big.Float).Abs(v.Z),
+ }
+}
+
+// Add returns the standard vector sum of v and ov.
+func (v PreciseVector) Add(ov PreciseVector) PreciseVector {
+ return PreciseVector{
+ X: precAdd(v.X, ov.X),
+ Y: precAdd(v.Y, ov.Y),
+ Z: precAdd(v.Z, ov.Z),
+ }
+}
+
+// Sub returns the standard vector difference of v and ov.
+func (v PreciseVector) Sub(ov PreciseVector) PreciseVector {
+ return PreciseVector{
+ X: precSub(v.X, ov.X),
+ Y: precSub(v.Y, ov.Y),
+ Z: precSub(v.Z, ov.Z),
+ }
+}
+
+// Mul returns the standard scalar product of v and f.
+func (v PreciseVector) Mul(f *big.Float) PreciseVector {
+ return PreciseVector{
+ X: precMul(v.X, f),
+ Y: precMul(v.Y, f),
+ Z: precMul(v.Z, f),
+ }
+}
+
+// MulByFloat64 returns the standard scalar product of v and f.
+func (v PreciseVector) MulByFloat64(f float64) PreciseVector {
+ return v.Mul(precFloat(f))
+}
+
+// Dot returns the standard dot product of v and ov.
+func (v PreciseVector) Dot(ov PreciseVector) *big.Float {
+ return precAdd(precMul(v.X, ov.X), precAdd(precMul(v.Y, ov.Y), precMul(v.Z, ov.Z)))
+}
+
+// Cross returns the standard cross product of v and ov.
+func (v PreciseVector) Cross(ov PreciseVector) PreciseVector {
+ return PreciseVector{
+ X: precSub(precMul(v.Y, ov.Z), precMul(v.Z, ov.Y)),
+ Y: precSub(precMul(v.Z, ov.X), precMul(v.X, ov.Z)),
+ Z: precSub(precMul(v.X, ov.Y), precMul(v.Y, ov.X)),
+ }
+}
+
+// LargestComponent returns the axis that represents the largest component in this vector.
+func (v PreciseVector) LargestComponent() Axis {
+ t := v.Abs()
+
+ if t.X.Cmp(t.Y) > 0 {
+ if t.X.Cmp(t.Z) > 0 {
+ return XAxis
+ }
+ return ZAxis
+ }
+ if t.Y.Cmp(t.Z) > 0 {
+ return YAxis
+ }
+ return ZAxis
+}
+
+// SmallestComponent returns the axis that represents the smallest component in this vector.
+func (v PreciseVector) SmallestComponent() Axis {
+ t := v.Abs()
+
+ if t.X.Cmp(t.Y) < 0 {
+ if t.X.Cmp(t.Z) < 0 {
+ return XAxis
+ }
+ return ZAxis
+ }
+ if t.Y.Cmp(t.Z) < 0 {
+ return YAxis
+ }
+ return ZAxis
+}
diff --git a/vendor/github.com/golang/geo/r3/vector.go b/vendor/github.com/golang/geo/r3/vector.go
new file mode 100644
index 0000000..f39bf3a
--- /dev/null
+++ b/vendor/github.com/golang/geo/r3/vector.go
@@ -0,0 +1,184 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package r3
+
+import (
+ "fmt"
+ "math"
+
+ "github.com/golang/geo/s1"
+)
+
+// Vector represents a point in ℝ³.
+type Vector struct {
+ X, Y, Z float64
+}
+
+// ApproxEqual reports whether v and ov are equal within a small epsilon.
+func (v Vector) ApproxEqual(ov Vector) bool {
+ const epsilon = 1e-16
+ return math.Abs(v.X-ov.X) < epsilon && math.Abs(v.Y-ov.Y) < epsilon && math.Abs(v.Z-ov.Z) < epsilon
+}
+
+func (v Vector) String() string { return fmt.Sprintf("(%0.24f, %0.24f, %0.24f)", v.X, v.Y, v.Z) }
+
+// Norm returns the vector's norm.
+func (v Vector) Norm() float64 { return math.Sqrt(v.Dot(v)) }
+
+// Norm2 returns the square of the norm.
+func (v Vector) Norm2() float64 { return v.Dot(v) }
+
+// Normalize returns a unit vector in the same direction as v.
+func (v Vector) Normalize() Vector {
+ if v == (Vector{0, 0, 0}) {
+ return v
+ }
+ return v.Mul(1 / v.Norm())
+}
+
+// IsUnit returns whether this vector is of approximately unit length.
+func (v Vector) IsUnit() bool {
+ const epsilon = 5e-14
+ return math.Abs(v.Norm2()-1) <= epsilon
+}
+
+// Abs returns the vector with nonnegative components.
+func (v Vector) Abs() Vector { return Vector{math.Abs(v.X), math.Abs(v.Y), math.Abs(v.Z)} }
+
+// Add returns the standard vector sum of v and ov.
+func (v Vector) Add(ov Vector) Vector { return Vector{v.X + ov.X, v.Y + ov.Y, v.Z + ov.Z} }
+
+// Sub returns the standard vector difference of v and ov.
+func (v Vector) Sub(ov Vector) Vector { return Vector{v.X - ov.X, v.Y - ov.Y, v.Z - ov.Z} }
+
+// Mul returns the standard scalar product of v and m.
+func (v Vector) Mul(m float64) Vector { return Vector{m * v.X, m * v.Y, m * v.Z} }
+
+// Dot returns the standard dot product of v and ov.
+func (v Vector) Dot(ov Vector) float64 { return v.X*ov.X + v.Y*ov.Y + v.Z*ov.Z }
+
+// Cross returns the standard cross product of v and ov.
+func (v Vector) Cross(ov Vector) Vector {
+ return Vector{
+ v.Y*ov.Z - v.Z*ov.Y,
+ v.Z*ov.X - v.X*ov.Z,
+ v.X*ov.Y - v.Y*ov.X,
+ }
+}
+
+// Distance returns the Euclidean distance between v and ov.
+func (v Vector) Distance(ov Vector) float64 { return v.Sub(ov).Norm() }
+
+// Angle returns the angle between v and ov.
+func (v Vector) Angle(ov Vector) s1.Angle {
+ return s1.Angle(math.Atan2(v.Cross(ov).Norm(), v.Dot(ov))) * s1.Radian
+}
+
+// Axis enumerates the 3 axes of ℝ³.
+type Axis int
+
+// The three axes of ℝ³.
+const (
+ XAxis Axis = iota
+ YAxis
+ ZAxis
+)
+
+// Ortho returns a unit vector that is orthogonal to v.
+// Ortho(-v) = -Ortho(v) for all v.
+func (v Vector) Ortho() Vector {
+ ov := Vector{0.012, 0.0053, 0.00457}
+ switch v.LargestComponent() {
+ case XAxis:
+ ov.Z = 1
+ case YAxis:
+ ov.X = 1
+ default:
+ ov.Y = 1
+ }
+ return v.Cross(ov).Normalize()
+}
+
+// LargestComponent returns the axis that represents the largest component in this vector.
+func (v Vector) LargestComponent() Axis {
+ t := v.Abs()
+
+ if t.X > t.Y {
+ if t.X > t.Z {
+ return XAxis
+ }
+ return ZAxis
+ }
+ if t.Y > t.Z {
+ return YAxis
+ }
+ return ZAxis
+}
+
+// SmallestComponent returns the axis that represents the smallest component in this vector.
+func (v Vector) SmallestComponent() Axis {
+ t := v.Abs()
+
+ if t.X < t.Y {
+ if t.X < t.Z {
+ return XAxis
+ }
+ return ZAxis
+ }
+ if t.Y < t.Z {
+ return YAxis
+ }
+ return ZAxis
+}
+
+// Cmp compares v and ov lexicographically and returns:
+//
+// -1 if v < ov
+// 0 if v == ov
+// +1 if v > ov
+//
+// This method is based on C++'s std::lexicographical_compare. Two entities
+// are compared element by element with the given operator. The first mismatch
+// defines which is less (or greater) than the other. If both have equivalent
+// values they are lexicographically equal.
+func (v Vector) Cmp(ov Vector) int {
+ if v.X < ov.X {
+ return -1
+ }
+ if v.X > ov.X {
+ return 1
+ }
+
+ // First elements were the same, try the next.
+ if v.Y < ov.Y {
+ return -1
+ }
+ if v.Y > ov.Y {
+ return 1
+ }
+
+ // Second elements were the same return the final compare.
+ if v.Z < ov.Z {
+ return -1
+ }
+ if v.Z > ov.Z {
+ return 1
+ }
+
+ // Both are equal
+ return 0
+}
diff --git a/vendor/github.com/golang/geo/s1/LICENSE b/vendor/github.com/golang/geo/s1/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/github.com/golang/geo/s1/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/golang/geo/s1/angle.go b/vendor/github.com/golang/geo/s1/angle.go
new file mode 100644
index 0000000..5b3a25c
--- /dev/null
+++ b/vendor/github.com/golang/geo/s1/angle.go
@@ -0,0 +1,119 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package s1
+
+import (
+ "math"
+ "strconv"
+)
+
+// Angle represents a 1D angle. The internal representation is a double precision
+// value in radians, so conversion to and from radians is exact.
+// Conversions between E5, E6, E7, and Degrees are not always
+// exact. For example, Degrees(3.1) is different from E6(3100000) or E7(310000000).
+//
+// The following conversions between degrees and radians are exact:
+//
+// Degree*180 == Radian*math.Pi
+// Degree*(180/n) == Radian*(math.Pi/n) for n == 0..8
+//
+// These identities hold when the arguments are scaled up or down by any power
+// of 2. Some similar identities are also true, for example,
+//
+// Degree*60 == Radian*(math.Pi/3)
+//
+// But be aware that this type of identity does not hold in general. For example,
+//
+// Degree*3 != Radian*(math.Pi/60)
+//
+// Similarly, the conversion to radians means that (Angle(x)*Degree).Degrees()
+// does not always equal x. For example,
+//
+// (Angle(45*n)*Degree).Degrees() == 45*n for n == 0..8
+//
+// but
+//
+// (60*Degree).Degrees() != 60
+//
+// When testing for equality, you should allow for numerical errors (floatApproxEq)
+// or convert to discrete E5/E6/E7 values first.
+type Angle float64
+
+// Angle units.
+const (
+ Radian Angle = 1
+ Degree = (math.Pi / 180) * Radian
+
+ E5 = 1e-5 * Degree
+ E6 = 1e-6 * Degree
+ E7 = 1e-7 * Degree
+)
+
+// Radians returns the angle in radians.
+func (a Angle) Radians() float64 { return float64(a) }
+
+// Degrees returns the angle in degrees.
+func (a Angle) Degrees() float64 { return float64(a / Degree) }
+
+// round returns the value rounded to nearest as an int32.
+// This does not match C++ exactly for the case of x.5.
+func round(val float64) int32 {
+ if val < 0 {
+ return int32(val - 0.5)
+ }
+ return int32(val + 0.5)
+}
+
+// InfAngle returns an angle larger than any finite angle.
+func InfAngle() Angle {
+ return Angle(math.Inf(1))
+}
+
+// isInf reports whether this Angle is infinite.
+func (a Angle) isInf() bool {
+ return math.IsInf(float64(a), 0)
+}
+
+// E5 returns the angle in hundred thousandths of degrees.
+func (a Angle) E5() int32 { return round(a.Degrees() * 1e5) }
+
+// E6 returns the angle in millionths of degrees.
+func (a Angle) E6() int32 { return round(a.Degrees() * 1e6) }
+
+// E7 returns the angle in ten millionths of degrees.
+func (a Angle) E7() int32 { return round(a.Degrees() * 1e7) }
+
+// Abs returns the absolute value of the angle.
+func (a Angle) Abs() Angle { return Angle(math.Abs(float64(a))) }
+
+// Normalized returns an equivalent angle in [0, 2π).
+func (a Angle) Normalized() Angle {
+ rad := math.Mod(float64(a), 2*math.Pi)
+ if rad < 0 {
+ rad += 2 * math.Pi
+ }
+ return Angle(rad)
+}
+
+func (a Angle) String() string {
+ return strconv.FormatFloat(a.Degrees(), 'f', 7, 64) // like "%.7f"
+}
+
+// BUG(dsymonds): The major differences from the C++ version are:
+// - no unsigned E5/E6/E7 methods
+// - no S2Point or S2LatLng constructors
+// - no comparison or arithmetic operators
diff --git a/vendor/github.com/golang/geo/s1/chordangle.go b/vendor/github.com/golang/geo/s1/chordangle.go
new file mode 100644
index 0000000..5f5832a
--- /dev/null
+++ b/vendor/github.com/golang/geo/s1/chordangle.go
@@ -0,0 +1,202 @@
+/*
+Copyright 2015 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package s1
+
+import (
+ "math"
+)
+
+// ChordAngle represents the angle subtended by a chord (i.e., the straight
+// line segment connecting two points on the sphere). Its representation
+// makes it very efficient for computing and comparing distances, but unlike
+// Angle it is only capable of representing angles between 0 and π radians.
+// Generally, ChordAngle should only be used in loops where many angles need
+// to be calculated and compared. Otherwise it is simpler to use Angle.
+//
+// ChordAngle loses some accuracy as the angle approaches π radians.
+// Specifically, the representation of (π - x) radians has an error of about
+// (1e-15 / x), with a maximum error of about 2e-8 radians (about 13cm on the
+// Earth's surface). For comparison, for angles up to π/2 radians (10000km)
+// the worst-case representation error is about 2e-16 radians (1 nanonmeter),
+// which is about the same as Angle.
+//
+// ChordAngles are represented by the squared chord length, which can
+// range from 0 to 4. Positive infinity represents an infinite squared length.
+type ChordAngle float64
+
+const (
+ // NegativeChordAngle represents a chord angle smaller than the zero angle.
+ // The only valid operations on a NegativeChordAngle are comparisons and
+ // Angle conversions.
+ NegativeChordAngle = ChordAngle(-1)
+
+ // RightChordAngle represents a chord angle of 90 degrees (a "right angle").
+ RightChordAngle = ChordAngle(2)
+
+ // StraightChordAngle represents a chord angle of 180 degrees (a "straight angle").
+ // This is the maximum finite chord angle.
+ StraightChordAngle = ChordAngle(4)
+)
+
+// ChordAngleFromAngle returns a ChordAngle from the given Angle.
+func ChordAngleFromAngle(a Angle) ChordAngle {
+ if a < 0 {
+ return NegativeChordAngle
+ }
+ if a.isInf() {
+ return InfChordAngle()
+ }
+ l := 2 * math.Sin(0.5*math.Min(math.Pi, a.Radians()))
+ return ChordAngle(l * l)
+}
+
+// ChordAngleFromSquaredLength returns a ChordAngle from the squared chord length.
+// Note that the argument is automatically clamped to a maximum of 4.0 to
+// handle possible roundoff errors. The argument must be non-negative.
+func ChordAngleFromSquaredLength(length2 float64) ChordAngle {
+ if length2 > 4 {
+ return StraightChordAngle
+ }
+ return ChordAngle(length2)
+}
+
+// Expanded returns a new ChordAngle that has been adjusted by the given error
+// bound (which can be positive or negative). Error should be the value
+// returned by either MaxPointError or MaxAngleError. For example:
+// a := ChordAngleFromPoints(x, y)
+// a1 := a.Expanded(a.MaxPointError())
+func (c ChordAngle) Expanded(e float64) ChordAngle {
+ // If the angle is special, don't change it. Otherwise clamp it to the valid range.
+ if c.isSpecial() {
+ return c
+ }
+ return ChordAngle(math.Max(0.0, math.Min(4.0, float64(c)+e)))
+}
+
+// Angle converts this ChordAngle to an Angle.
+func (c ChordAngle) Angle() Angle {
+ if c < 0 {
+ return -1 * Radian
+ }
+ if c.isInf() {
+ return InfAngle()
+ }
+ return Angle(2 * math.Asin(0.5*math.Sqrt(float64(c))))
+}
+
+// InfChordAngle returns a chord angle larger than any finite chord angle.
+// The only valid operations on an InfChordAngle are comparisons and Angle conversions.
+func InfChordAngle() ChordAngle {
+ return ChordAngle(math.Inf(1))
+}
+
+// isInf reports whether this ChordAngle is infinite.
+func (c ChordAngle) isInf() bool {
+ return math.IsInf(float64(c), 1)
+}
+
+// isSpecial reports whether this ChordAngle is one of the special cases.
+func (c ChordAngle) isSpecial() bool {
+ return c < 0 || c.isInf()
+}
+
+// isValid reports whether this ChordAngle is valid or not.
+func (c ChordAngle) isValid() bool {
+ return (c >= 0 && c <= 4) || c.isSpecial()
+}
+
+// MaxPointError returns the maximum error size for a ChordAngle constructed
+// from 2 Points x and y, assuming that x and y are normalized to within the
+// bounds guaranteed by s2.Point.Normalize. The error is defined with respect to
+// the true distance after the points are projected to lie exactly on the sphere.
+func (c ChordAngle) MaxPointError() float64 {
+ // There is a relative error of (2.5*dblEpsilon) when computing the squared
+ // distance, plus an absolute error of (16 * dblEpsilon**2) because the
+ // lengths of the input points may differ from 1 by up to (2*dblEpsilon) each.
+ return 2.5*dblEpsilon*float64(c) + 16*dblEpsilon*dblEpsilon
+}
+
+// MaxAngleError returns the maximum error for a ChordAngle constructed
+// as an Angle distance.
+func (c ChordAngle) MaxAngleError() float64 {
+ return dblEpsilon * float64(c)
+}
+
+// Add adds the other ChordAngle to this one and returns the resulting value.
+// This method assumes the ChordAngles are not special.
+func (c ChordAngle) Add(other ChordAngle) ChordAngle {
+ // Note that this method (and Sub) is much more efficient than converting
+ // the ChordAngle to an Angle and adding those and converting back. It
+ // requires only one square root plus a few additions and multiplications.
+
+ // Optimization for the common case where b is an error tolerance
+ // parameter that happens to be set to zero.
+ if other == 0 {
+ return c
+ }
+
+ // Clamp the angle sum to at most 180 degrees.
+ if c+other >= 4 {
+ return StraightChordAngle
+ }
+
+ // Let a and b be the (non-squared) chord lengths, and let c = a+b.
+ // Let A, B, and C be the corresponding half-angles (a = 2*sin(A), etc).
+ // Then the formula below can be derived from c = 2 * sin(A+B) and the
+ // relationships sin(A+B) = sin(A)*cos(B) + sin(B)*cos(A)
+ // cos(X) = sqrt(1 - sin^2(X))
+ x := float64(c * (1 - 0.25*other))
+ y := float64(other * (1 - 0.25*c))
+ return ChordAngle(math.Min(4.0, x+y+2*math.Sqrt(x*y)))
+}
+
+// Sub subtracts the other ChordAngle from this one and returns the resulting value.
+// This method assumes the ChordAngles are not special.
+func (c ChordAngle) Sub(other ChordAngle) ChordAngle {
+ if other == 0 {
+ return c
+ }
+ if c <= other {
+ return 0
+ }
+ x := float64(c * (1 - 0.25*other))
+ y := float64(other * (1 - 0.25*c))
+ return ChordAngle(math.Max(0.0, x+y-2*math.Sqrt(x*y)))
+}
+
+// Sin returns the sine of this chord angle. This method is more efficient
+// than converting to Angle and performing the computation.
+func (c ChordAngle) Sin() float64 {
+ // Let a be the (non-squared) chord length, and let A be the corresponding
+ // half-angle (a = 2*sin(A)). The formula below can be derived from:
+ // sin(2*A) = 2 * sin(A) * cos(A)
+ // cos^2(A) = 1 - sin^2(A)
+ // This is much faster than converting to an angle and computing its sine.
+ return math.Sqrt(float64(c * (1 - 0.25*c)))
+}
+
+// Cos returns the cosine of this chord angle. This method is more efficient
+// than converting to Angle and performing the computation.
+func (c ChordAngle) Cos() float64 {
+ // cos(2*A) = cos^2(A) - sin^2(A) = 1 - 2*sin^2(A)
+ return float64(1 - 0.5*c)
+}
+
+// Tan returns the tangent of this chord angle.
+func (c ChordAngle) Tan() float64 {
+ return c.Sin() / c.Cos()
+}
diff --git a/vendor/github.com/golang/geo/s1/doc.go b/vendor/github.com/golang/geo/s1/doc.go
new file mode 100644
index 0000000..b9fca50
--- /dev/null
+++ b/vendor/github.com/golang/geo/s1/doc.go
@@ -0,0 +1,22 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Package s1 implements types and functions for working with geometry in S¹ (circular geometry).
+
+See ../s2 for a more detailed overview.
+*/
+package s1
diff --git a/vendor/github.com/golang/geo/s1/interval.go b/vendor/github.com/golang/geo/s1/interval.go
new file mode 100644
index 0000000..b9cd34b
--- /dev/null
+++ b/vendor/github.com/golang/geo/s1/interval.go
@@ -0,0 +1,350 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package s1
+
+import (
+ "math"
+ "strconv"
+)
+
+// Interval represents a closed interval on a unit circle.
+// Zero-length intervals (where Lo == Hi) represent single points.
+// If Lo > Hi then the interval is "inverted".
+// The point at (-1, 0) on the unit circle has two valid representations,
+// [π,π] and [-π,-π]. We normalize the latter to the former in IntervalFromEndpoints.
+// There are two special intervals that take advantage of that:
+// - the full interval, [-π,π], and
+// - the empty interval, [π,-π].
+// Treat the exported fields as read-only.
+type Interval struct {
+ Lo, Hi float64
+}
+
+// IntervalFromEndpoints constructs a new interval from endpoints.
+// Both arguments must be in the range [-π,π]. This function allows inverted intervals
+// to be created.
+func IntervalFromEndpoints(lo, hi float64) Interval {
+ i := Interval{lo, hi}
+ if lo == -math.Pi && hi != math.Pi {
+ i.Lo = math.Pi
+ }
+ if hi == -math.Pi && lo != math.Pi {
+ i.Hi = math.Pi
+ }
+ return i
+}
+
+// IntervalFromPointPair returns the minimal interval containing the two given points.
+// Both arguments must be in [-π,π].
+func IntervalFromPointPair(a, b float64) Interval {
+ if a == -math.Pi {
+ a = math.Pi
+ }
+ if b == -math.Pi {
+ b = math.Pi
+ }
+ if positiveDistance(a, b) <= math.Pi {
+ return Interval{a, b}
+ }
+ return Interval{b, a}
+}
+
+// EmptyInterval returns an empty interval.
+func EmptyInterval() Interval { return Interval{math.Pi, -math.Pi} }
+
+// FullInterval returns a full interval.
+func FullInterval() Interval { return Interval{-math.Pi, math.Pi} }
+
+// IsValid reports whether the interval is valid.
+func (i Interval) IsValid() bool {
+ return (math.Abs(i.Lo) <= math.Pi && math.Abs(i.Hi) <= math.Pi &&
+ !(i.Lo == -math.Pi && i.Hi != math.Pi) &&
+ !(i.Hi == -math.Pi && i.Lo != math.Pi))
+}
+
+// IsFull reports whether the interval is full.
+func (i Interval) IsFull() bool { return i.Lo == -math.Pi && i.Hi == math.Pi }
+
+// IsEmpty reports whether the interval is empty.
+func (i Interval) IsEmpty() bool { return i.Lo == math.Pi && i.Hi == -math.Pi }
+
+// IsInverted reports whether the interval is inverted; that is, whether Lo > Hi.
+func (i Interval) IsInverted() bool { return i.Lo > i.Hi }
+
+// Invert returns the interval with endpoints swapped.
+func (i Interval) Invert() Interval {
+ return Interval{i.Hi, i.Lo}
+}
+
+// Center returns the midpoint of the interval.
+// It is undefined for full and empty intervals.
+func (i Interval) Center() float64 {
+ c := 0.5 * (i.Lo + i.Hi)
+ if !i.IsInverted() {
+ return c
+ }
+ if c <= 0 {
+ return c + math.Pi
+ }
+ return c - math.Pi
+}
+
+// Length returns the length of the interval.
+// The length of an empty interval is negative.
+func (i Interval) Length() float64 {
+ l := i.Hi - i.Lo
+ if l >= 0 {
+ return l
+ }
+ l += 2 * math.Pi
+ if l > 0 {
+ return l
+ }
+ return -1
+}
+
+// Assumes p ∈ (-π,π].
+func (i Interval) fastContains(p float64) bool {
+ if i.IsInverted() {
+ return (p >= i.Lo || p <= i.Hi) && !i.IsEmpty()
+ }
+ return p >= i.Lo && p <= i.Hi
+}
+
+// Contains returns true iff the interval contains p.
+// Assumes p ∈ [-π,π].
+func (i Interval) Contains(p float64) bool {
+ if p == -math.Pi {
+ p = math.Pi
+ }
+ return i.fastContains(p)
+}
+
+// ContainsInterval returns true iff the interval contains oi.
+func (i Interval) ContainsInterval(oi Interval) bool {
+ if i.IsInverted() {
+ if oi.IsInverted() {
+ return oi.Lo >= i.Lo && oi.Hi <= i.Hi
+ }
+ return (oi.Lo >= i.Lo || oi.Hi <= i.Hi) && !i.IsEmpty()
+ }
+ if oi.IsInverted() {
+ return i.IsFull() || oi.IsEmpty()
+ }
+ return oi.Lo >= i.Lo && oi.Hi <= i.Hi
+}
+
+// InteriorContains returns true iff the interior of the interval contains p.
+// Assumes p ∈ [-π,π].
+func (i Interval) InteriorContains(p float64) bool {
+ if p == -math.Pi {
+ p = math.Pi
+ }
+ if i.IsInverted() {
+ return p > i.Lo || p < i.Hi
+ }
+ return (p > i.Lo && p < i.Hi) || i.IsFull()
+}
+
+// InteriorContainsInterval returns true iff the interior of the interval contains oi.
+func (i Interval) InteriorContainsInterval(oi Interval) bool {
+ if i.IsInverted() {
+ if oi.IsInverted() {
+ return (oi.Lo > i.Lo && oi.Hi < i.Hi) || oi.IsEmpty()
+ }
+ return oi.Lo > i.Lo || oi.Hi < i.Hi
+ }
+ if oi.IsInverted() {
+ return i.IsFull() || oi.IsEmpty()
+ }
+ return (oi.Lo > i.Lo && oi.Hi < i.Hi) || i.IsFull()
+}
+
+// Intersects returns true iff the interval contains any points in common with oi.
+func (i Interval) Intersects(oi Interval) bool {
+ if i.IsEmpty() || oi.IsEmpty() {
+ return false
+ }
+ if i.IsInverted() {
+ return oi.IsInverted() || oi.Lo <= i.Hi || oi.Hi >= i.Lo
+ }
+ if oi.IsInverted() {
+ return oi.Lo <= i.Hi || oi.Hi >= i.Lo
+ }
+ return oi.Lo <= i.Hi && oi.Hi >= i.Lo
+}
+
+// InteriorIntersects returns true iff the interior of the interval contains any points in common with oi, including the latter's boundary.
+func (i Interval) InteriorIntersects(oi Interval) bool {
+ if i.IsEmpty() || oi.IsEmpty() || i.Lo == i.Hi {
+ return false
+ }
+ if i.IsInverted() {
+ return oi.IsInverted() || oi.Lo < i.Hi || oi.Hi > i.Lo
+ }
+ if oi.IsInverted() {
+ return oi.Lo < i.Hi || oi.Hi > i.Lo
+ }
+ return (oi.Lo < i.Hi && oi.Hi > i.Lo) || i.IsFull()
+}
+
+// Compute distance from a to b in [0,2π], in a numerically stable way.
+func positiveDistance(a, b float64) float64 {
+ d := b - a
+ if d >= 0 {
+ return d
+ }
+ return (b + math.Pi) - (a - math.Pi)
+}
+
+// Union returns the smallest interval that contains both the interval and oi.
+func (i Interval) Union(oi Interval) Interval {
+ if oi.IsEmpty() {
+ return i
+ }
+ if i.fastContains(oi.Lo) {
+ if i.fastContains(oi.Hi) {
+ // Either oi ⊂ i, or i ∪ oi is the full interval.
+ if i.ContainsInterval(oi) {
+ return i
+ }
+ return FullInterval()
+ }
+ return Interval{i.Lo, oi.Hi}
+ }
+ if i.fastContains(oi.Hi) {
+ return Interval{oi.Lo, i.Hi}
+ }
+
+ // Neither endpoint of oi is in i. Either i ⊂ oi, or i and oi are disjoint.
+ if i.IsEmpty() || oi.fastContains(i.Lo) {
+ return oi
+ }
+
+ // This is the only hard case where we need to find the closest pair of endpoints.
+ if positiveDistance(oi.Hi, i.Lo) < positiveDistance(i.Hi, oi.Lo) {
+ return Interval{oi.Lo, i.Hi}
+ }
+ return Interval{i.Lo, oi.Hi}
+}
+
+// Intersection returns the smallest interval that contains the intersection of the interval and oi.
+func (i Interval) Intersection(oi Interval) Interval {
+ if oi.IsEmpty() {
+ return EmptyInterval()
+ }
+ if i.fastContains(oi.Lo) {
+ if i.fastContains(oi.Hi) {
+ // Either oi ⊂ i, or i and oi intersect twice. Neither are empty.
+ // In the first case we want to return i (which is shorter than oi).
+ // In the second case one of them is inverted, and the smallest interval
+ // that covers the two disjoint pieces is the shorter of i and oi.
+ // We thus want to pick the shorter of i and oi in both cases.
+ if oi.Length() < i.Length() {
+ return oi
+ }
+ return i
+ }
+ return Interval{oi.Lo, i.Hi}
+ }
+ if i.fastContains(oi.Hi) {
+ return Interval{i.Lo, oi.Hi}
+ }
+
+ // Neither endpoint of oi is in i. Either i ⊂ oi, or i and oi are disjoint.
+ if oi.fastContains(i.Lo) {
+ return i
+ }
+ return EmptyInterval()
+}
+
+// AddPoint returns the interval expanded by the minimum amount necessary such
+// that it contains the given point "p" (an angle in the range [-Pi, Pi]).
+func (i Interval) AddPoint(p float64) Interval {
+ if math.Abs(p) > math.Pi {
+ return i
+ }
+ if p == -math.Pi {
+ p = math.Pi
+ }
+ if i.fastContains(p) {
+ return i
+ }
+ if i.IsEmpty() {
+ return Interval{p, p}
+ }
+ if positiveDistance(p, i.Lo) < positiveDistance(i.Hi, p) {
+ return Interval{p, i.Hi}
+ }
+ return Interval{i.Lo, p}
+}
+
+// Define the maximum rounding error for arithmetic operations. Depending on the
+// platform the mantissa precision may be different than others, so we choose to
+// use specific values to be consistent across all.
+// The values come from the C++ implementation.
+var (
+ // epsilon is a small number that represents a reasonable level of noise between two
+ // values that can be considered to be equal.
+ epsilon = 1e-15
+ // dblEpsilon is a smaller number for values that require more precision.
+ dblEpsilon = 2.220446049e-16
+)
+
+// Expanded returns an interval that has been expanded on each side by margin.
+// If margin is negative, then the function shrinks the interval on
+// each side by margin instead. The resulting interval may be empty or
+// full. Any expansion (positive or negative) of a full interval remains
+// full, and any expansion of an empty interval remains empty.
+func (i Interval) Expanded(margin float64) Interval {
+ if margin >= 0 {
+ if i.IsEmpty() {
+ return i
+ }
+ // Check whether this interval will be full after expansion, allowing
+ // for a rounding error when computing each endpoint.
+ if i.Length()+2*margin+2*dblEpsilon >= 2*math.Pi {
+ return FullInterval()
+ }
+ } else {
+ if i.IsFull() {
+ return i
+ }
+ // Check whether this interval will be empty after expansion, allowing
+ // for a rounding error when computing each endpoint.
+ if i.Length()+2*margin-2*dblEpsilon <= 0 {
+ return EmptyInterval()
+ }
+ }
+ result := IntervalFromEndpoints(
+ math.Remainder(i.Lo-margin, 2*math.Pi),
+ math.Remainder(i.Hi+margin, 2*math.Pi),
+ )
+ if result.Lo <= -math.Pi {
+ result.Lo = math.Pi
+ }
+ return result
+}
+
+func (i Interval) String() string {
+ // like "[%.7f, %.7f]"
+ return "[" + strconv.FormatFloat(i.Lo, 'f', 7, 64) + ", " + strconv.FormatFloat(i.Hi, 'f', 7, 64) + "]"
+}
+
+// BUG(dsymonds): The major differences from the C++ version are:
+// - no validity checking on construction, etc. (not a bug?)
+// - a few operations
diff --git a/vendor/github.com/golang/geo/s2/LICENSE b/vendor/github.com/golang/geo/s2/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/golang/geo/s2/cap.go b/vendor/github.com/golang/geo/s2/cap.go
new file mode 100644
index 0000000..4f60e86
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/cap.go
@@ -0,0 +1,406 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package s2
+
+import (
+ "fmt"
+ "math"
+
+ "github.com/golang/geo/r1"
+ "github.com/golang/geo/s1"
+)
+
+const (
+ emptyHeight = -1.0
+ zeroHeight = 0.0
+ fullHeight = 2.0
+
+ roundUp = 1.0 + 1.0/(1<<52)
+)
+
+var (
+ // centerPoint is the default center for S2Caps
+ centerPoint = Point{PointFromCoords(1.0, 0, 0).Normalize()}
+)
+
+// Cap represents a disc-shaped region defined by a center and radius.
+// Technically this shape is called a "spherical cap" (rather than disc)
+// because it is not planar; the cap represents a portion of the sphere that
+// has been cut off by a plane. The boundary of the cap is the circle defined
+// by the intersection of the sphere and the plane. For containment purposes,
+// the cap is a closed set, i.e. it contains its boundary.
+//
+// For the most part, you can use a spherical cap wherever you would use a
+// disc in planar geometry. The radius of the cap is measured along the
+// surface of the sphere (rather than the straight-line distance through the
+// interior). Thus a cap of radius π/2 is a hemisphere, and a cap of radius
+// π covers the entire sphere.
+//
+// The center is a point on the surface of the unit sphere. (Hence the need for
+// it to be of unit length.)
+//
+// Internally, the cap is represented by its center and "height". The height
+// is the distance from the center point to the cutoff plane. This
+// representation is much more efficient for containment tests than the
+// (center, radius) representation. There is also support for "empty" and
+// "full" caps, which contain no points and all points respectively.
+//
+// The zero value of Cap is an invalid cap. Use EmptyCap to get a valid empty cap.
+type Cap struct {
+ center Point
+ height float64
+}
+
+// CapFromPoint constructs a cap containing a single point.
+func CapFromPoint(p Point) Cap {
+ return CapFromCenterHeight(p, zeroHeight)
+}
+
+// CapFromCenterAngle constructs a cap with the given center and angle.
+func CapFromCenterAngle(center Point, angle s1.Angle) Cap {
+ return CapFromCenterHeight(center, radiusToHeight(angle))
+}
+
+// CapFromCenterHeight constructs a cap with the given center and height. A
+// negative height yields an empty cap; a height of 2 or more yields a full cap.
+// The center should be unit length.
+func CapFromCenterHeight(center Point, height float64) Cap {
+ return Cap{
+ center: center,
+ height: height,
+ }
+}
+
+// CapFromCenterArea constructs a cap with the given center and surface area.
+// Note that the area can also be interpreted as the solid angle subtended by the
+// cap (because the sphere has unit radius). A negative area yields an empty cap;
+// an area of 4*π or more yields a full cap.
+func CapFromCenterArea(center Point, area float64) Cap {
+ return CapFromCenterHeight(center, area/(math.Pi*2.0))
+}
+
+// EmptyCap returns a cap that contains no points.
+func EmptyCap() Cap {
+ return CapFromCenterHeight(centerPoint, emptyHeight)
+}
+
+// FullCap returns a cap that contains all points.
+func FullCap() Cap {
+ return CapFromCenterHeight(centerPoint, fullHeight)
+}
+
+// IsValid reports whether the Cap is considered valid.
+// Heights are normalized so that they do not exceed 2.
+func (c Cap) IsValid() bool {
+ return c.center.Vector.IsUnit() && c.height <= fullHeight
+}
+
+// IsEmpty reports whether the cap is empty, i.e. it contains no points.
+func (c Cap) IsEmpty() bool {
+ return c.height < zeroHeight
+}
+
+// IsFull reports whether the cap is full, i.e. it contains all points.
+func (c Cap) IsFull() bool {
+ return c.height == fullHeight
+}
+
+// Center returns the cap's center point.
+func (c Cap) Center() Point {
+ return c.center
+}
+
+// Height returns the cap's "height".
+func (c Cap) Height() float64 {
+ return c.height
+}
+
+// Radius returns the cap's radius.
+func (c Cap) Radius() s1.Angle {
+ if c.IsEmpty() {
+ return s1.Angle(emptyHeight)
+ }
+
+ // This could also be computed as acos(1 - height_), but the following
+ // formula is much more accurate when the cap height is small. It
+ // follows from the relationship h = 1 - cos(r) = 2 sin^2(r/2).
+ return s1.Angle(2 * math.Asin(math.Sqrt(0.5*c.height)))
+}
+
+// Area returns the surface area of the Cap on the unit sphere.
+func (c Cap) Area() float64 {
+ return 2.0 * math.Pi * math.Max(zeroHeight, c.height)
+}
+
+// Contains reports whether this cap contains the other.
+func (c Cap) Contains(other Cap) bool {
+ // In a set containment sense, every cap contains the empty cap.
+ if c.IsFull() || other.IsEmpty() {
+ return true
+ }
+ return c.Radius() >= c.center.Distance(other.center)+other.Radius()
+}
+
+// Intersects reports whether this cap intersects the other cap.
+// i.e. whether they have any points in common.
+func (c Cap) Intersects(other Cap) bool {
+ if c.IsEmpty() || other.IsEmpty() {
+ return false
+ }
+
+ return c.Radius()+other.Radius() >= c.center.Distance(other.center)
+}
+
+// InteriorIntersects reports whether this caps interior intersects the other cap.
+func (c Cap) InteriorIntersects(other Cap) bool {
+ // Make sure this cap has an interior and the other cap is non-empty.
+ if c.height <= zeroHeight || other.IsEmpty() {
+ return false
+ }
+
+ return c.Radius()+other.Radius() > c.center.Distance(other.center)
+}
+
+// ContainsPoint reports whether this cap contains the point.
+func (c Cap) ContainsPoint(p Point) bool {
+ return c.center.Sub(p.Vector).Norm2() <= 2*c.height
+}
+
+// InteriorContainsPoint reports whether the point is within the interior of this cap.
+func (c Cap) InteriorContainsPoint(p Point) bool {
+ return c.IsFull() || c.center.Sub(p.Vector).Norm2() < 2*c.height
+}
+
+// Complement returns the complement of the interior of the cap. A cap and its
+// complement have the same boundary but do not share any interior points.
+// The complement operator is not a bijection because the complement of a
+// singleton cap (containing a single point) is the same as the complement
+// of an empty cap.
+func (c Cap) Complement() Cap {
+ height := emptyHeight
+ if !c.IsFull() {
+ height = fullHeight - math.Max(c.height, zeroHeight)
+ }
+ return CapFromCenterHeight(Point{c.center.Mul(-1.0)}, height)
+}
+
+// CapBound returns a bounding spherical cap. This is not guaranteed to be exact.
+func (c Cap) CapBound() Cap {
+ return c
+}
+
+// RectBound returns a bounding latitude-longitude rectangle.
+// The bounds are not guaranteed to be tight.
+func (c Cap) RectBound() Rect {
+ if c.IsEmpty() {
+ return EmptyRect()
+ }
+
+ capAngle := c.Radius().Radians()
+ allLongitudes := false
+ lat := r1.Interval{
+ Lo: latitude(c.center).Radians() - capAngle,
+ Hi: latitude(c.center).Radians() + capAngle,
+ }
+ lng := s1.FullInterval()
+
+ // Check whether cap includes the south pole.
+ if lat.Lo <= -math.Pi/2 {
+ lat.Lo = -math.Pi / 2
+ allLongitudes = true
+ }
+
+ // Check whether cap includes the north pole.
+ if lat.Hi >= math.Pi/2 {
+ lat.Hi = math.Pi / 2
+ allLongitudes = true
+ }
+
+ if !allLongitudes {
+ // Compute the range of longitudes covered by the cap. We use the law
+ // of sines for spherical triangles. Consider the triangle ABC where
+ // A is the north pole, B is the center of the cap, and C is the point
+ // of tangency between the cap boundary and a line of longitude. Then
+ // C is a right angle, and letting a,b,c denote the sides opposite A,B,C,
+ // we have sin(a)/sin(A) = sin(c)/sin(C), or sin(A) = sin(a)/sin(c).
+ // Here "a" is the cap angle, and "c" is the colatitude (90 degrees
+ // minus the latitude). This formula also works for negative latitudes.
+ //
+ // The formula for sin(a) follows from the relationship h = 1 - cos(a).
+ sinA := math.Sqrt(c.height * (2 - c.height))
+ sinC := math.Cos(latitude(c.center).Radians())
+ if sinA <= sinC {
+ angleA := math.Asin(sinA / sinC)
+ lng.Lo = math.Remainder(longitude(c.center).Radians()-angleA, math.Pi*2)
+ lng.Hi = math.Remainder(longitude(c.center).Radians()+angleA, math.Pi*2)
+ }
+ }
+ return Rect{lat, lng}
+}
+
+// ApproxEqual reports whether this cap's center and height are within
+// a reasonable epsilon from the other cap.
+func (c Cap) ApproxEqual(other Cap) bool {
+ // Caps have a wider tolerance than the usual epsilon for approximately equal.
+ const epsilon = 1e-14
+ return c.center.ApproxEqual(other.center) &&
+ math.Abs(c.height-other.height) <= epsilon ||
+ c.IsEmpty() && other.height <= epsilon ||
+ other.IsEmpty() && c.height <= epsilon ||
+ c.IsFull() && other.height >= 2-epsilon ||
+ other.IsFull() && c.height >= 2-epsilon
+}
+
+// AddPoint increases the cap if necessary to include the given point. If this cap is empty,
+// then the center is set to the point with a zero height. p must be unit-length.
+func (c Cap) AddPoint(p Point) Cap {
+ if c.IsEmpty() {
+ return Cap{center: p}
+ }
+
+ // To make sure that the resulting cap actually includes this point,
+ // we need to round up the distance calculation. That is, after
+ // calling cap.AddPoint(p), cap.Contains(p) should be true.
+ dist2 := c.center.Sub(p.Vector).Norm2()
+ c.height = math.Max(c.height, roundUp*0.5*dist2)
+ return c
+}
+
+// AddCap increases the cap height if necessary to include the other cap. If this cap is empty,
+// it is set to the other cap.
+func (c Cap) AddCap(other Cap) Cap {
+ if c.IsEmpty() {
+ return other
+ }
+ if other.IsEmpty() {
+ return c
+ }
+
+ radius := c.center.Angle(other.center.Vector) + other.Radius()
+ c.height = math.Max(c.height, roundUp*radiusToHeight(radius))
+ return c
+}
+
+// Expanded returns a new cap expanded by the given angle. If the cap is empty,
+// it returns an empty cap.
+func (c Cap) Expanded(distance s1.Angle) Cap {
+ if c.IsEmpty() {
+ return EmptyCap()
+ }
+ return CapFromCenterAngle(c.center, c.Radius()+distance)
+}
+
+func (c Cap) String() string {
+ return fmt.Sprintf("[Center=%v, Radius=%f]", c.center.Vector, c.Radius().Degrees())
+}
+
+// radiusToHeight converts an s1.Angle into the height of the cap.
+func radiusToHeight(r s1.Angle) float64 {
+ if r.Radians() < 0 {
+ return emptyHeight
+ }
+ if r.Radians() >= math.Pi {
+ return fullHeight
+ }
+ // The height of the cap can be computed as 1 - cos(r), but this isn't very
+ // accurate for angles close to zero (where cos(r) is almost 1). The
+ // formula below has much better precision.
+ d := math.Sin(0.5 * r.Radians())
+ return 2 * d * d
+
+}
+
+// ContainsCell reports whether the cap contains the given cell.
+func (c Cap) ContainsCell(cell Cell) bool {
+ // If the cap does not contain all cell vertices, return false.
+ var vertices [4]Point
+ for k := 0; k < 4; k++ {
+ vertices[k] = cell.Vertex(k)
+ if !c.ContainsPoint(vertices[k]) {
+ return false
+ }
+ }
+ // Otherwise, return true if the complement of the cap does not intersect the cell.
+ return !c.Complement().intersects(cell, vertices)
+}
+
+// IntersectsCell reports whether the cap intersects the cell.
+func (c Cap) IntersectsCell(cell Cell) bool {
+ // If the cap contains any cell vertex, return true.
+ var vertices [4]Point
+ for k := 0; k < 4; k++ {
+ vertices[k] = cell.Vertex(k)
+ if c.ContainsPoint(vertices[k]) {
+ return true
+ }
+ }
+ return c.intersects(cell, vertices)
+}
+
+// intersects reports whether the cap intersects any point of the cell excluding
+// its vertices (which are assumed to already have been checked).
+func (c Cap) intersects(cell Cell, vertices [4]Point) bool {
+ // If the cap is a hemisphere or larger, the cell and the complement of the cap
+ // are both convex. Therefore since no vertex of the cell is contained, no other
+ // interior point of the cell is contained either.
+ if c.height >= 1 {
+ return false
+ }
+
+ // We need to check for empty caps due to the center check just below.
+ if c.IsEmpty() {
+ return false
+ }
+
+ // Optimization: return true if the cell contains the cap center. This allows half
+ // of the edge checks below to be skipped.
+ if cell.ContainsPoint(c.center) {
+ return true
+ }
+
+ // At this point we know that the cell does not contain the cap center, and the cap
+ // does not contain any cell vertex. The only way that they can intersect is if the
+ // cap intersects the interior of some edge.
+ sin2Angle := c.height * (2 - c.height)
+ for k := 0; k < 4; k++ {
+ edge := cell.Edge(k).Vector
+ dot := c.center.Vector.Dot(edge)
+ if dot > 0 {
+ // The center is in the interior half-space defined by the edge. We do not need
+ // to consider these edges, since if the cap intersects this edge then it also
+ // intersects the edge on the opposite side of the cell, because the center is
+ // not contained with the cell.
+ continue
+ }
+
+ // The Norm2() factor is necessary because "edge" is not normalized.
+ if dot*dot > sin2Angle*edge.Norm2() {
+ return false
+ }
+
+ // Otherwise, the great circle containing this edge intersects the interior of the cap. We just
+ // need to check whether the point of closest approach occurs between the two edge endpoints.
+ dir := edge.Cross(c.center.Vector)
+ if dir.Dot(vertices[k].Vector) < 0 && dir.Dot(vertices[(k+1)&3].Vector) > 0 {
+ return true
+ }
+ }
+ return false
+}
+
+// TODO(roberts): Differences from C++
+// Centroid, Union
diff --git a/vendor/github.com/golang/geo/s2/cell.go b/vendor/github.com/golang/geo/s2/cell.go
new file mode 100644
index 0000000..e106da7
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/cell.go
@@ -0,0 +1,385 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package s2
+
+import (
+ "math"
+
+ "github.com/golang/geo/r1"
+ "github.com/golang/geo/r2"
+ "github.com/golang/geo/s1"
+)
+
+// Cell is an S2 region object that represents a cell. Unlike CellIDs,
+// it supports efficient containment and intersection tests. However, it is
+// also a more expensive representation.
+type Cell struct {
+ face int8
+ level int8
+ orientation int8
+ id CellID
+ uv r2.Rect
+}
+
+// CellFromCellID constructs a Cell corresponding to the given CellID.
+func CellFromCellID(id CellID) Cell {
+ c := Cell{}
+ c.id = id
+ f, i, j, o := c.id.faceIJOrientation()
+ c.face = int8(f)
+ c.level = int8(c.id.Level())
+ c.orientation = int8(o)
+ c.uv = ijLevelToBoundUV(i, j, int(c.level))
+ return c
+}
+
+// CellFromPoint constructs a cell for the given Point.
+func CellFromPoint(p Point) Cell {
+ return CellFromCellID(cellIDFromPoint(p))
+}
+
+// CellFromLatLng constructs a cell for the given LatLng.
+func CellFromLatLng(ll LatLng) Cell {
+ return CellFromCellID(CellIDFromLatLng(ll))
+}
+
+// Face returns the face this cell is on.
+func (c Cell) Face() int {
+ return int(c.face)
+}
+
+// Level returns the level of this cell.
+func (c Cell) Level() int {
+ return int(c.level)
+}
+
+// ID returns the CellID this cell represents.
+func (c Cell) ID() CellID {
+ return c.id
+}
+
+// IsLeaf returns whether this Cell is a leaf or not.
+func (c Cell) IsLeaf() bool {
+ return c.level == maxLevel
+}
+
+// SizeIJ returns the CellID value for the cells level.
+func (c Cell) SizeIJ() int {
+ return sizeIJ(int(c.level))
+}
+
+// Vertex returns the k-th vertex of the cell (k = 0,1,2,3) in CCW order
+// (lower left, lower right, upper right, upper left in the UV plane).
+func (c Cell) Vertex(k int) Point {
+ return Point{faceUVToXYZ(int(c.face), c.uv.Vertices()[k].X, c.uv.Vertices()[k].Y).Normalize()}
+}
+
+// Edge returns the inward-facing normal of the great circle passing through
+// the CCW ordered edge from vertex k to vertex k+1 (mod 4) (for k = 0,1,2,3).
+func (c Cell) Edge(k int) Point {
+ switch k {
+ case 0:
+ return Point{vNorm(int(c.face), c.uv.Y.Lo).Normalize()} // Bottom
+ case 1:
+ return Point{uNorm(int(c.face), c.uv.X.Hi).Normalize()} // Right
+ case 2:
+ return Point{vNorm(int(c.face), c.uv.Y.Hi).Mul(-1.0).Normalize()} // Top
+ default:
+ return Point{uNorm(int(c.face), c.uv.X.Lo).Mul(-1.0).Normalize()} // Left
+ }
+}
+
+// BoundUV returns the bounds of this cell in (u,v)-space.
+func (c Cell) BoundUV() r2.Rect {
+ return c.uv
+}
+
+// Center returns the direction vector corresponding to the center in
+// (s,t)-space of the given cell. This is the point at which the cell is
+// divided into four subcells; it is not necessarily the centroid of the
+// cell in (u,v)-space or (x,y,z)-space
+func (c Cell) Center() Point {
+ return Point{c.id.rawPoint().Normalize()}
+}
+
+// Children returns the four direct children of this cell in traversal order
+// and returns true. If this is a leaf cell, or the children could not be created,
+// false is returned.
+// The C++ method is called Subdivide.
+func (c Cell) Children() ([4]Cell, bool) {
+ var children [4]Cell
+
+ if c.id.IsLeaf() {
+ return children, false
+ }
+
+ // Compute the cell midpoint in uv-space.
+ uvMid := c.id.centerUV()
+
+ // Create four children with the appropriate bounds.
+ cid := c.id.ChildBegin()
+ for pos := 0; pos < 4; pos++ {
+ children[pos] = Cell{
+ face: c.face,
+ level: c.level + 1,
+ orientation: c.orientation ^ int8(posToOrientation[pos]),
+ id: cid,
+ }
+
+ // We want to split the cell in half in u and v. To decide which
+ // side to set equal to the midpoint value, we look at cell's (i,j)
+ // position within its parent. The index for i is in bit 1 of ij.
+ ij := posToIJ[c.orientation][pos]
+ i := ij >> 1
+ j := ij & 1
+ if i == 1 {
+ children[pos].uv.X.Hi = c.uv.X.Hi
+ children[pos].uv.X.Lo = uvMid.X
+ } else {
+ children[pos].uv.X.Lo = c.uv.X.Lo
+ children[pos].uv.X.Hi = uvMid.X
+ }
+ if j == 1 {
+ children[pos].uv.Y.Hi = c.uv.Y.Hi
+ children[pos].uv.Y.Lo = uvMid.Y
+ } else {
+ children[pos].uv.Y.Lo = c.uv.Y.Lo
+ children[pos].uv.Y.Hi = uvMid.Y
+ }
+ cid = cid.Next()
+ }
+ return children, true
+}
+
+// ExactArea returns the area of this cell as accurately as possible.
+func (c Cell) ExactArea() float64 {
+ v0, v1, v2, v3 := c.Vertex(0), c.Vertex(1), c.Vertex(2), c.Vertex(3)
+ return PointArea(v0, v1, v2) + PointArea(v0, v2, v3)
+}
+
+// ApproxArea returns the approximate area of this cell. This method is accurate
+// to within 3% percent for all cell sizes and accurate to within 0.1% for cells
+// at level 5 or higher (i.e. squares 350km to a side or smaller on the Earth's
+// surface). It is moderately cheap to compute.
+func (c Cell) ApproxArea() float64 {
+ // All cells at the first two levels have the same area.
+ if c.level < 2 {
+ return c.AverageArea()
+ }
+
+ // First, compute the approximate area of the cell when projected
+ // perpendicular to its normal. The cross product of its diagonals gives
+ // the normal, and the length of the normal is twice the projected area.
+ flatArea := 0.5 * (c.Vertex(2).Sub(c.Vertex(0).Vector).
+ Cross(c.Vertex(3).Sub(c.Vertex(1).Vector)).Norm())
+
+ // Now, compensate for the curvature of the cell surface by pretending
+ // that the cell is shaped like a spherical cap. The ratio of the
+ // area of a spherical cap to the area of its projected disc turns out
+ // to be 2 / (1 + sqrt(1 - r*r)) where r is the radius of the disc.
+ // For example, when r=0 the ratio is 1, and when r=1 the ratio is 2.
+ // Here we set Pi*r*r == flatArea to find the equivalent disc.
+ return flatArea * 2 / (1 + math.Sqrt(1-math.Min(1/math.Pi*flatArea, 1)))
+}
+
+// AverageArea returns the average area of cells at the level of this cell.
+// This is accurate to within a factor of 1.7.
+func (c Cell) AverageArea() float64 {
+ return AvgAreaMetric.Value(int(c.level))
+}
+
+// IntersectsCell reports whether the intersection of this cell and the other cell is not nil.
+func (c Cell) IntersectsCell(oc Cell) bool {
+ return c.id.Intersects(oc.id)
+}
+
+// ContainsCell reports whether this cell contains the other cell.
+func (c Cell) ContainsCell(oc Cell) bool {
+ return c.id.Contains(oc.id)
+}
+
+// latitude returns the latitude of the cell vertex given by (i,j), where "i" and "j" are either 0 or 1.
+func (c Cell) latitude(i, j int) float64 {
+ var u, v float64
+ switch {
+ case i == 0 && j == 0:
+ u = c.uv.X.Lo
+ v = c.uv.Y.Lo
+ case i == 0 && j == 1:
+ u = c.uv.X.Lo
+ v = c.uv.Y.Hi
+ case i == 1 && j == 0:
+ u = c.uv.X.Hi
+ v = c.uv.Y.Lo
+ case i == 1 && j == 1:
+ u = c.uv.X.Hi
+ v = c.uv.Y.Hi
+ default:
+ panic("i and/or j is out of bound")
+ }
+ return latitude(Point{faceUVToXYZ(int(c.face), u, v)}).Radians()
+}
+
+// longitude returns the longitude of the cell vertex given by (i,j), where "i" and "j" are either 0 or 1.
+func (c Cell) longitude(i, j int) float64 {
+ var u, v float64
+ switch {
+ case i == 0 && j == 0:
+ u = c.uv.X.Lo
+ v = c.uv.Y.Lo
+ case i == 0 && j == 1:
+ u = c.uv.X.Lo
+ v = c.uv.Y.Hi
+ case i == 1 && j == 0:
+ u = c.uv.X.Hi
+ v = c.uv.Y.Lo
+ case i == 1 && j == 1:
+ u = c.uv.X.Hi
+ v = c.uv.Y.Hi
+ default:
+ panic("i and/or j is out of bound")
+ }
+ return longitude(Point{faceUVToXYZ(int(c.face), u, v)}).Radians()
+}
+
+var (
+ poleMinLat = math.Asin(math.Sqrt(1.0/3)) - 0.5*dblEpsilon
+)
+
+// RectBound returns the bounding rectangle of this cell.
+func (c Cell) RectBound() Rect {
+ if c.level > 0 {
+ // Except for cells at level 0, the latitude and longitude extremes are
+ // attained at the vertices. Furthermore, the latitude range is
+ // determined by one pair of diagonally opposite vertices and the
+ // longitude range is determined by the other pair.
+ //
+ // We first determine which corner (i,j) of the cell has the largest
+ // absolute latitude. To maximize latitude, we want to find the point in
+ // the cell that has the largest absolute z-coordinate and the smallest
+ // absolute x- and y-coordinates. To do this we look at each coordinate
+ // (u and v), and determine whether we want to minimize or maximize that
+ // coordinate based on the axis direction and the cell's (u,v) quadrant.
+ u := c.uv.X.Lo + c.uv.X.Hi
+ v := c.uv.Y.Lo + c.uv.Y.Hi
+ var i, j int
+ if uAxis(int(c.face)).Z == 0 {
+ if u < 0 {
+ i = 1
+ }
+ } else if u > 0 {
+ i = 1
+ }
+ if vAxis(int(c.face)).Z == 0 {
+ if v < 0 {
+ j = 1
+ }
+ } else if v > 0 {
+ j = 1
+ }
+ lat := r1.IntervalFromPoint(c.latitude(i, j)).AddPoint(c.latitude(1-i, 1-j))
+ lng := s1.EmptyInterval().AddPoint(c.longitude(i, 1-j)).AddPoint(c.longitude(1-i, j))
+
+ // We grow the bounds slightly to make sure that the bounding rectangle
+ // contains LatLngFromPoint(P) for any point P inside the loop L defined by the
+ // four *normalized* vertices. Note that normalization of a vector can
+ // change its direction by up to 0.5 * dblEpsilon radians, and it is not
+ // enough just to add Normalize calls to the code above because the
+ // latitude/longitude ranges are not necessarily determined by diagonally
+ // opposite vertex pairs after normalization.
+ //
+ // We would like to bound the amount by which the latitude/longitude of a
+ // contained point P can exceed the bounds computed above. In the case of
+ // longitude, the normalization error can change the direction of rounding
+ // leading to a maximum difference in longitude of 2 * dblEpsilon. In
+ // the case of latitude, the normalization error can shift the latitude by
+ // up to 0.5 * dblEpsilon and the other sources of error can cause the
+ // two latitudes to differ by up to another 1.5 * dblEpsilon, which also
+ // leads to a maximum difference of 2 * dblEpsilon.
+ return Rect{lat, lng}.expanded(LatLng{s1.Angle(2 * dblEpsilon), s1.Angle(2 * dblEpsilon)}).PolarClosure()
+ }
+
+ // The 4 cells around the equator extend to +/-45 degrees latitude at the
+ // midpoints of their top and bottom edges. The two cells covering the
+ // poles extend down to +/-35.26 degrees at their vertices. The maximum
+ // error in this calculation is 0.5 * dblEpsilon.
+ var bound Rect
+ switch c.face {
+ case 0:
+ bound = Rect{r1.Interval{-math.Pi / 4, math.Pi / 4}, s1.Interval{-math.Pi / 4, math.Pi / 4}}
+ case 1:
+ bound = Rect{r1.Interval{-math.Pi / 4, math.Pi / 4}, s1.Interval{math.Pi / 4, 3 * math.Pi / 4}}
+ case 2:
+ bound = Rect{r1.Interval{poleMinLat, math.Pi / 2}, s1.FullInterval()}
+ case 3:
+ bound = Rect{r1.Interval{-math.Pi / 4, math.Pi / 4}, s1.Interval{3 * math.Pi / 4, -3 * math.Pi / 4}}
+ case 4:
+ bound = Rect{r1.Interval{-math.Pi / 4, math.Pi / 4}, s1.Interval{-3 * math.Pi / 4, -math.Pi / 4}}
+ default:
+ bound = Rect{r1.Interval{-math.Pi / 2, -poleMinLat}, s1.FullInterval()}
+ }
+
+ // Finally, we expand the bound to account for the error when a point P is
+ // converted to an LatLng to test for containment. (The bound should be
+ // large enough so that it contains the computed LatLng of any contained
+ // point, not just the infinite-precision version.) We don't need to expand
+ // longitude because longitude is calculated via a single call to math.Atan2,
+ // which is guaranteed to be semi-monotonic.
+ return bound.expanded(LatLng{s1.Angle(dblEpsilon), s1.Angle(0)})
+}
+
+// CapBound returns the bounding cap of this cell.
+func (c Cell) CapBound() Cap {
+ // We use the cell center in (u,v)-space as the cap axis. This vector is very close
+ // to GetCenter() and faster to compute. Neither one of these vectors yields the
+ // bounding cap with minimal surface area, but they are both pretty close.
+ cap := CapFromPoint(Point{faceUVToXYZ(int(c.face), c.uv.Center().X, c.uv.Center().Y).Normalize()})
+ for k := 0; k < 4; k++ {
+ cap = cap.AddPoint(c.Vertex(k))
+ }
+ return cap
+}
+
+// ContainsPoint reports whether this cell contains the given point. Note that
+// unlike Loop/Polygon, a Cell is considered to be a closed set. This means
+// that a point on a Cell's edge or vertex belong to the Cell and the relevant
+// adjacent Cells too.
+//
+// If you want every point to be contained by exactly one Cell,
+// you will need to convert the Cell to a Loop.
+func (c Cell) ContainsPoint(p Point) bool {
+ var uv r2.Point
+ var ok bool
+ if uv.X, uv.Y, ok = faceXYZToUV(int(c.face), p); !ok {
+ return false
+ }
+
+ // Expand the (u,v) bound to ensure that
+ //
+ // CellFromPoint(p).ContainsPoint(p)
+ //
+ // is always true. To do this, we need to account for the error when
+ // converting from (u,v) coordinates to (s,t) coordinates. In the
+ // normal case the total error is at most dblEpsilon.
+ return c.uv.ExpandedByMargin(dblEpsilon).ContainsPoint(uv)
+}
+
+// BUG(roberts): Differences from C++:
+// Subdivide
+// BoundUV
+// Distance/DistanceToEdge
+// VertexChordDistance
diff --git a/vendor/github.com/golang/geo/s2/cellid.go b/vendor/github.com/golang/geo/s2/cellid.go
new file mode 100644
index 0000000..fdb2954
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/cellid.go
@@ -0,0 +1,729 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package s2
+
+import (
+ "bytes"
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+
+ "github.com/golang/geo/r1"
+ "github.com/golang/geo/r2"
+ "github.com/golang/geo/r3"
+)
+
+// CellID uniquely identifies a cell in the S2 cell decomposition.
+// The most significant 3 bits encode the face number (0-5). The
+// remaining 61 bits encode the position of the center of this cell
+// along the Hilbert curve on that face. The zero value and the value
+// (1<<64)-1 are invalid cell IDs. The first compares less than any
+// valid cell ID, the second as greater than any valid cell ID.
+type CellID uint64
+
+// TODO(dsymonds): Some of these constants should probably be exported.
+const (
+ faceBits = 3
+ numFaces = 6
+ maxLevel = 30
+ posBits = 2*maxLevel + 1
+ maxSize = 1 << maxLevel
+ wrapOffset = uint64(numFaces) << posBits
+)
+
+// CellIDFromFacePosLevel returns a cell given its face in the range
+// [0,5], the 61-bit Hilbert curve position pos within that face, and
+// the level in the range [0,maxLevel]. The position in the cell ID
+// will be truncated to correspond to the Hilbert curve position at
+// the center of the returned cell.
+func CellIDFromFacePosLevel(face int, pos uint64, level int) CellID {
+ return CellID(uint64(face)<<posBits + pos | 1).Parent(level)
+}
+
+// CellIDFromFace returns the cell corresponding to a given S2 cube face.
+func CellIDFromFace(face int) CellID {
+ return CellID((uint64(face) << posBits) + lsbForLevel(0))
+}
+
+// CellIDFromLatLng returns the leaf cell containing ll.
+func CellIDFromLatLng(ll LatLng) CellID {
+ return cellIDFromPoint(PointFromLatLng(ll))
+}
+
+// CellIDFromToken returns a cell given a hex-encoded string of its uint64 ID.
+func CellIDFromToken(s string) CellID {
+ if len(s) > 16 {
+ return CellID(0)
+ }
+ n, err := strconv.ParseUint(s, 16, 64)
+ if err != nil {
+ return CellID(0)
+ }
+ // Equivalent to right-padding string with zeros to 16 characters.
+ if len(s) < 16 {
+ n = n << (4 * uint(16-len(s)))
+ }
+ return CellID(n)
+}
+
+// ToToken returns a hex-encoded string of the uint64 cell id, with leading
+// zeros included but trailing zeros stripped.
+func (ci CellID) ToToken() string {
+ s := strings.TrimRight(fmt.Sprintf("%016x", uint64(ci)), "0")
+ if len(s) == 0 {
+ return "X"
+ }
+ return s
+}
+
+// IsValid reports whether ci represents a valid cell.
+func (ci CellID) IsValid() bool {
+ return ci.Face() < numFaces && (ci.lsb()&0x1555555555555555 != 0)
+}
+
+// Face returns the cube face for this cell ID, in the range [0,5].
+func (ci CellID) Face() int { return int(uint64(ci) >> posBits) }
+
+// Pos returns the position along the Hilbert curve of this cell ID, in the range [0,2^posBits-1].
+func (ci CellID) Pos() uint64 { return uint64(ci) & (^uint64(0) >> faceBits) }
+
+// Level returns the subdivision level of this cell ID, in the range [0, maxLevel].
+func (ci CellID) Level() int {
+ return maxLevel - findLSBSetNonZero64(uint64(ci))>>1
+}
+
+// IsLeaf returns whether this cell ID is at the deepest level;
+// that is, the level at which the cells are smallest.
+func (ci CellID) IsLeaf() bool { return uint64(ci)&1 != 0 }
+
+// ChildPosition returns the child position (0..3) of this cell's
+// ancestor at the given level, relative to its parent. The argument
+// should be in the range 1..kMaxLevel. For example,
+// ChildPosition(1) returns the position of this cell's level-1
+// ancestor within its top-level face cell.
+func (ci CellID) ChildPosition(level int) int {
+ return int(uint64(ci)>>uint64(2*(maxLevel-level)+1)) & 3
+}
+
+// lsbForLevel returns the lowest-numbered bit that is on for cells at the given level.
+func lsbForLevel(level int) uint64 { return 1 << uint64(2*(maxLevel-level)) }
+
+// Parent returns the cell at the given level, which must be no greater than the current level.
+func (ci CellID) Parent(level int) CellID {
+ lsb := lsbForLevel(level)
+ return CellID((uint64(ci) & -lsb) | lsb)
+}
+
+// immediateParent is cheaper than Parent, but assumes !ci.isFace().
+func (ci CellID) immediateParent() CellID {
+ nlsb := CellID(ci.lsb() << 2)
+ return (ci & -nlsb) | nlsb
+}
+
+// isFace returns whether this is a top-level (face) cell.
+func (ci CellID) isFace() bool { return uint64(ci)&(lsbForLevel(0)-1) == 0 }
+
+// lsb returns the least significant bit that is set.
+func (ci CellID) lsb() uint64 { return uint64(ci) & -uint64(ci) }
+
+// Children returns the four immediate children of this cell.
+// If ci is a leaf cell, it returns four identical cells that are not the children.
+func (ci CellID) Children() [4]CellID {
+ var ch [4]CellID
+ lsb := CellID(ci.lsb())
+ ch[0] = ci - lsb + lsb>>2
+ lsb >>= 1
+ ch[1] = ch[0] + lsb
+ ch[2] = ch[1] + lsb
+ ch[3] = ch[2] + lsb
+ return ch
+}
+
+func sizeIJ(level int) int {
+ return 1 << uint(maxLevel-level)
+}
+
+// EdgeNeighbors returns the four cells that are adjacent across the cell's four edges.
+// Edges 0, 1, 2, 3 are in the down, right, up, left directions in the face space.
+// All neighbors are guaranteed to be distinct.
+func (ci CellID) EdgeNeighbors() [4]CellID {
+ level := ci.Level()
+ size := sizeIJ(level)
+ f, i, j, _ := ci.faceIJOrientation()
+ return [4]CellID{
+ cellIDFromFaceIJWrap(f, i, j-size).Parent(level),
+ cellIDFromFaceIJWrap(f, i+size, j).Parent(level),
+ cellIDFromFaceIJWrap(f, i, j+size).Parent(level),
+ cellIDFromFaceIJWrap(f, i-size, j).Parent(level),
+ }
+}
+
+// VertexNeighbors returns the neighboring cellIDs with vertex closest to this cell at the given level.
+// (Normally there are four neighbors, but the closest vertex may only have three neighbors if it is one of
+// the 8 cube vertices.)
+func (ci CellID) VertexNeighbors(level int) []CellID {
+ halfSize := sizeIJ(level + 1)
+ size := halfSize << 1
+ f, i, j, _ := ci.faceIJOrientation()
+
+ var isame, jsame bool
+ var ioffset, joffset int
+ if i&halfSize != 0 {
+ ioffset = size
+ isame = (i + size) < maxSize
+ } else {
+ ioffset = -size
+ isame = (i - size) >= 0
+ }
+ if j&halfSize != 0 {
+ joffset = size
+ jsame = (j + size) < maxSize
+ } else {
+ joffset = -size
+ jsame = (j - size) >= 0
+ }
+
+ results := []CellID{
+ ci.Parent(level),
+ cellIDFromFaceIJSame(f, i+ioffset, j, isame).Parent(level),
+ cellIDFromFaceIJSame(f, i, j+joffset, jsame).Parent(level),
+ }
+
+ if isame || jsame {
+ results = append(results, cellIDFromFaceIJSame(f, i+ioffset, j+joffset, isame && jsame).Parent(level))
+ }
+
+ return results
+}
+
+// RangeMin returns the minimum CellID that is contained within this cell.
+func (ci CellID) RangeMin() CellID { return CellID(uint64(ci) - (ci.lsb() - 1)) }
+
+// RangeMax returns the maximum CellID that is contained within this cell.
+func (ci CellID) RangeMax() CellID { return CellID(uint64(ci) + (ci.lsb() - 1)) }
+
+// Contains returns true iff the CellID contains oci.
+func (ci CellID) Contains(oci CellID) bool {
+ return uint64(ci.RangeMin()) <= uint64(oci) && uint64(oci) <= uint64(ci.RangeMax())
+}
+
+// Intersects returns true iff the CellID intersects oci.
+func (ci CellID) Intersects(oci CellID) bool {
+ return uint64(oci.RangeMin()) <= uint64(ci.RangeMax()) && uint64(oci.RangeMax()) >= uint64(ci.RangeMin())
+}
+
+// String returns the string representation of the cell ID in the form "1/3210".
+func (ci CellID) String() string {
+ if !ci.IsValid() {
+ return "Invalid: " + strconv.FormatInt(int64(ci), 16)
+ }
+ var b bytes.Buffer
+ b.WriteByte("012345"[ci.Face()]) // values > 5 will have been picked off by !IsValid above
+ b.WriteByte('/')
+ for level := 1; level <= ci.Level(); level++ {
+ b.WriteByte("0123"[ci.ChildPosition(level)])
+ }
+ return b.String()
+}
+
+// Point returns the center of the s2 cell on the sphere as a Point.
+// The maximum directional error in Point (compared to the exact
+// mathematical result) is 1.5 * dblEpsilon radians, and the maximum length
+// error is 2 * dblEpsilon (the same as Normalize).
+func (ci CellID) Point() Point { return Point{ci.rawPoint().Normalize()} }
+
+// LatLng returns the center of the s2 cell on the sphere as a LatLng.
+func (ci CellID) LatLng() LatLng { return LatLngFromPoint(Point{ci.rawPoint()}) }
+
+// ChildBegin returns the first child in a traversal of the children of this cell, in Hilbert curve order.
+//
+// for ci := c.ChildBegin(); ci != c.ChildEnd(); ci = ci.Next() {
+// ...
+// }
+func (ci CellID) ChildBegin() CellID {
+ ol := ci.lsb()
+ return CellID(uint64(ci) - ol + ol>>2)
+}
+
+// ChildBeginAtLevel returns the first cell in a traversal of children a given level deeper than this cell, in
+// Hilbert curve order. The given level must be no smaller than the cell's level.
+// See ChildBegin for example use.
+func (ci CellID) ChildBeginAtLevel(level int) CellID {
+ return CellID(uint64(ci) - ci.lsb() + lsbForLevel(level))
+}
+
+// ChildEnd returns the first cell after a traversal of the children of this cell in Hilbert curve order.
+// The returned cell may be invalid.
+func (ci CellID) ChildEnd() CellID {
+ ol := ci.lsb()
+ return CellID(uint64(ci) + ol + ol>>2)
+}
+
+// ChildEndAtLevel returns the first cell after the last child in a traversal of children a given level deeper
+// than this cell, in Hilbert curve order.
+// The given level must be no smaller than the cell's level.
+// The returned cell may be invalid.
+func (ci CellID) ChildEndAtLevel(level int) CellID {
+ return CellID(uint64(ci) + ci.lsb() + lsbForLevel(level))
+}
+
+// Next returns the next cell along the Hilbert curve.
+// This is expected to be used with ChildBegin and ChildEnd,
+// or ChildBeginAtLevel and ChildEndAtLevel.
+func (ci CellID) Next() CellID {
+ return CellID(uint64(ci) + ci.lsb()<<1)
+}
+
+// Prev returns the previous cell along the Hilbert curve.
+func (ci CellID) Prev() CellID {
+ return CellID(uint64(ci) - ci.lsb()<<1)
+}
+
+// NextWrap returns the next cell along the Hilbert curve, wrapping from last to
+// first as necessary. This should not be used with ChildBegin and ChildEnd.
+func (ci CellID) NextWrap() CellID {
+ n := ci.Next()
+ if uint64(n) < wrapOffset {
+ return n
+ }
+ return CellID(uint64(n) - wrapOffset)
+}
+
+// PrevWrap returns the previous cell along the Hilbert curve, wrapping around from
+// first to last as necessary. This should not be used with ChildBegin and ChildEnd.
+func (ci CellID) PrevWrap() CellID {
+ p := ci.Prev()
+ if uint64(p) < wrapOffset {
+ return p
+ }
+ return CellID(uint64(p) + wrapOffset)
+}
+
+// AdvanceWrap advances or retreats the indicated number of steps along the
+// Hilbert curve at the current level and returns the new position. The
+// position wraps between the first and last faces as necessary.
+func (ci CellID) AdvanceWrap(steps int64) CellID {
+ if steps == 0 {
+ return ci
+ }
+
+ // We clamp the number of steps if necessary to ensure that we do not
+ // advance past the End() or before the Begin() of this level.
+ shift := uint(2*(maxLevel-ci.Level()) + 1)
+ if steps < 0 {
+ if min := -int64(uint64(ci) >> shift); steps < min {
+ wrap := int64(wrapOffset >> shift)
+ steps %= wrap
+ if steps < min {
+ steps += wrap
+ }
+ }
+ } else {
+ // Unlike Advance(), we don't want to return End(level).
+ if max := int64((wrapOffset - uint64(ci)) >> shift); steps > max {
+ wrap := int64(wrapOffset >> shift)
+ steps %= wrap
+ if steps > max {
+ steps -= wrap
+ }
+ }
+ }
+
+ // If steps is negative, then shifting it left has undefined behavior.
+ // Cast to uint64 for a 2's complement answer.
+ return CellID(uint64(ci) + (uint64(steps) << shift))
+}
+
+// TODO: the methods below are not exported yet. Settle on the entire API design
+// before doing this. Do we want to mirror the C++ one as closely as possible?
+
+// rawPoint returns an unnormalized r3 vector from the origin through the center
+// of the s2 cell on the sphere.
+func (ci CellID) rawPoint() r3.Vector {
+ face, si, ti := ci.faceSiTi()
+ return faceUVToXYZ(face, stToUV((0.5/maxSize)*float64(si)), stToUV((0.5/maxSize)*float64(ti)))
+
+}
+
+// faceSiTi returns the Face/Si/Ti coordinates of the center of the cell.
+func (ci CellID) faceSiTi() (face, si, ti int) {
+ face, i, j, _ := ci.faceIJOrientation()
+ delta := 0
+ if ci.IsLeaf() {
+ delta = 1
+ } else {
+ if (i^(int(ci)>>2))&1 != 0 {
+ delta = 2
+ }
+ }
+ return face, 2*i + delta, 2*j + delta
+}
+
+// faceIJOrientation uses the global lookupIJ table to unfiddle the bits of ci.
+func (ci CellID) faceIJOrientation() (f, i, j, orientation int) {
+ f = ci.Face()
+ orientation = f & swapMask
+ nbits := maxLevel - 7*lookupBits // first iteration
+
+ for k := 7; k >= 0; k-- {
+ orientation += (int(uint64(ci)>>uint64(k*2*lookupBits+1)) & ((1 << uint((2 * nbits))) - 1)) << 2
+ orientation = lookupIJ[orientation]
+ i += (orientation >> (lookupBits + 2)) << uint(k*lookupBits)
+ j += ((orientation >> 2) & ((1 << lookupBits) - 1)) << uint(k*lookupBits)
+ orientation &= (swapMask | invertMask)
+ nbits = lookupBits // following iterations
+ }
+
+ if ci.lsb()&0x1111111111111110 != 0 {
+ orientation ^= swapMask
+ }
+
+ return
+}
+
+// cellIDFromFaceIJ returns a leaf cell given its cube face (range 0..5) and IJ coordinates.
+func cellIDFromFaceIJ(f, i, j int) CellID {
+ // Note that this value gets shifted one bit to the left at the end
+ // of the function.
+ n := uint64(f) << (posBits - 1)
+ // Alternating faces have opposite Hilbert curve orientations; this
+ // is necessary in order for all faces to have a right-handed
+ // coordinate system.
+ bits := f & swapMask
+ // Each iteration maps 4 bits of "i" and "j" into 8 bits of the Hilbert
+ // curve position. The lookup table transforms a 10-bit key of the form
+ // "iiiijjjjoo" to a 10-bit value of the form "ppppppppoo", where the
+ // letters [ijpo] denote bits of "i", "j", Hilbert curve position, and
+ // Hilbert curve orientation respectively.
+ for k := 7; k >= 0; k-- {
+ mask := (1 << lookupBits) - 1
+ bits += int((i>>uint(k*lookupBits))&mask) << (lookupBits + 2)
+ bits += int((j>>uint(k*lookupBits))&mask) << 2
+ bits = lookupPos[bits]
+ n |= uint64(bits>>2) << (uint(k) * 2 * lookupBits)
+ bits &= (swapMask | invertMask)
+ }
+ return CellID(n*2 + 1)
+}
+
+func cellIDFromFaceIJWrap(f, i, j int) CellID {
+ // Convert i and j to the coordinates of a leaf cell just beyond the
+ // boundary of this face. This prevents 32-bit overflow in the case
+ // of finding the neighbors of a face cell.
+ i = clamp(i, -1, maxSize)
+ j = clamp(j, -1, maxSize)
+
+ // We want to wrap these coordinates onto the appropriate adjacent face.
+ // The easiest way to do this is to convert the (i,j) coordinates to (x,y,z)
+ // (which yields a point outside the normal face boundary), and then call
+ // xyzToFaceUV to project back onto the correct face.
+ //
+ // The code below converts (i,j) to (si,ti), and then (si,ti) to (u,v) using
+ // the linear projection (u=2*s-1 and v=2*t-1). (The code further below
+ // converts back using the inverse projection, s=0.5*(u+1) and t=0.5*(v+1).
+ // Any projection would work here, so we use the simplest.) We also clamp
+ // the (u,v) coordinates so that the point is barely outside the
+ // [-1,1]x[-1,1] face rectangle, since otherwise the reprojection step
+ // (which divides by the new z coordinate) might change the other
+ // coordinates enough so that we end up in the wrong leaf cell.
+ const scale = 1.0 / maxSize
+ limit := math.Nextafter(1, 2)
+ u := math.Max(-limit, math.Min(limit, scale*float64((i<<1)+1-maxSize)))
+ v := math.Max(-limit, math.Min(limit, scale*float64((j<<1)+1-maxSize)))
+
+ // Find the leaf cell coordinates on the adjacent face, and convert
+ // them to a cell id at the appropriate level.
+ f, u, v = xyzToFaceUV(faceUVToXYZ(f, u, v))
+ return cellIDFromFaceIJ(f, stToIJ(0.5*(u+1)), stToIJ(0.5*(v+1)))
+}
+
+func cellIDFromFaceIJSame(f, i, j int, sameFace bool) CellID {
+ if sameFace {
+ return cellIDFromFaceIJ(f, i, j)
+ }
+ return cellIDFromFaceIJWrap(f, i, j)
+}
+
+// clamp returns number closest to x within the range min..max.
+func clamp(x, min, max int) int {
+ if x < min {
+ return min
+ }
+ if x > max {
+ return max
+ }
+ return x
+}
+
+// ijToSTMin converts the i- or j-index of a leaf cell to the minimum corresponding
+// s- or t-value contained by that cell. The argument must be in the range
+// [0..2**30], i.e. up to one position beyond the normal range of valid leaf
+// cell indices.
+func ijToSTMin(i int) float64 {
+ return float64(i) / float64(maxSize)
+}
+
+// stToIJ converts value in ST coordinates to a value in IJ coordinates.
+func stToIJ(s float64) int {
+ return clamp(int(math.Floor(maxSize*s)), 0, maxSize-1)
+}
+
+// cellIDFromPoint returns a leaf cell containing point p. Usually there is
+// exactly one such cell, but for points along the edge of a cell, any
+// adjacent cell may be (deterministically) chosen. This is because
+// s2.CellIDs are considered to be closed sets. The returned cell will
+// always contain the given point, i.e.
+//
+// CellFromPoint(p).ContainsPoint(p)
+//
+// is always true.
+func cellIDFromPoint(p Point) CellID {
+ f, u, v := xyzToFaceUV(r3.Vector{p.X, p.Y, p.Z})
+ i := stToIJ(uvToST(u))
+ j := stToIJ(uvToST(v))
+ return cellIDFromFaceIJ(f, i, j)
+}
+
+// ijLevelToBoundUV returns the bounds in (u,v)-space for the cell at the given
+// level containing the leaf cell with the given (i,j)-coordinates.
+func ijLevelToBoundUV(i, j, level int) r2.Rect {
+ cellSize := sizeIJ(level)
+ xLo := i & -cellSize
+ yLo := j & -cellSize
+
+ return r2.Rect{
+ X: r1.Interval{
+ Lo: stToUV(ijToSTMin(xLo)),
+ Hi: stToUV(ijToSTMin(xLo + cellSize)),
+ },
+ Y: r1.Interval{
+ Lo: stToUV(ijToSTMin(yLo)),
+ Hi: stToUV(ijToSTMin(yLo + cellSize)),
+ },
+ }
+}
+
+// Constants related to the bit mangling in the Cell ID.
+const (
+ lookupBits = 4
+ swapMask = 0x01
+ invertMask = 0x02
+)
+
+var (
+ ijToPos = [4][4]int{
+ {0, 1, 3, 2}, // canonical order
+ {0, 3, 1, 2}, // axes swapped
+ {2, 3, 1, 0}, // bits inverted
+ {2, 1, 3, 0}, // swapped & inverted
+ }
+ posToIJ = [4][4]int{
+ {0, 1, 3, 2}, // canonical order: (0,0), (0,1), (1,1), (1,0)
+ {0, 2, 3, 1}, // axes swapped: (0,0), (1,0), (1,1), (0,1)
+ {3, 2, 0, 1}, // bits inverted: (1,1), (1,0), (0,0), (0,1)
+ {3, 1, 0, 2}, // swapped & inverted: (1,1), (0,1), (0,0), (1,0)
+ }
+ posToOrientation = [4]int{swapMask, 0, 0, invertMask | swapMask}
+ lookupIJ [1 << (2*lookupBits + 2)]int
+ lookupPos [1 << (2*lookupBits + 2)]int
+)
+
+func init() {
+ initLookupCell(0, 0, 0, 0, 0, 0)
+ initLookupCell(0, 0, 0, swapMask, 0, swapMask)
+ initLookupCell(0, 0, 0, invertMask, 0, invertMask)
+ initLookupCell(0, 0, 0, swapMask|invertMask, 0, swapMask|invertMask)
+}
+
+// initLookupCell initializes the lookupIJ table at init time.
+func initLookupCell(level, i, j, origOrientation, pos, orientation int) {
+ if level == lookupBits {
+ ij := (i << lookupBits) + j
+ lookupPos[(ij<<2)+origOrientation] = (pos << 2) + orientation
+ lookupIJ[(pos<<2)+origOrientation] = (ij << 2) + orientation
+ return
+ }
+
+ level++
+ i <<= 1
+ j <<= 1
+ pos <<= 2
+ r := posToIJ[orientation]
+ initLookupCell(level, i+(r[0]>>1), j+(r[0]&1), origOrientation, pos, orientation^posToOrientation[0])
+ initLookupCell(level, i+(r[1]>>1), j+(r[1]&1), origOrientation, pos+1, orientation^posToOrientation[1])
+ initLookupCell(level, i+(r[2]>>1), j+(r[2]&1), origOrientation, pos+2, orientation^posToOrientation[2])
+ initLookupCell(level, i+(r[3]>>1), j+(r[3]&1), origOrientation, pos+3, orientation^posToOrientation[3])
+}
+
+// CommonAncestorLevel returns the level of the common ancestor of the two S2 CellIDs.
+func (ci CellID) CommonAncestorLevel(other CellID) (level int, ok bool) {
+ bits := uint64(ci ^ other)
+ if bits < ci.lsb() {
+ bits = ci.lsb()
+ }
+ if bits < other.lsb() {
+ bits = other.lsb()
+ }
+
+ msbPos := findMSBSetNonZero64(bits)
+ if msbPos > 60 {
+ return 0, false
+ }
+ return (60 - msbPos) >> 1, true
+}
+
+// findMSBSetNonZero64 returns the index (between 0 and 63) of the most
+// significant set bit. Passing zero to this function has undefined behavior.
+func findMSBSetNonZero64(bits uint64) int {
+ val := []uint64{0x2, 0xC, 0xF0, 0xFF00, 0xFFFF0000, 0xFFFFFFFF00000000}
+ shift := []uint64{1, 2, 4, 8, 16, 32}
+ var msbPos uint64
+ for i := 5; i >= 0; i-- {
+ if bits&val[i] != 0 {
+ bits >>= shift[i]
+ msbPos |= shift[i]
+ }
+ }
+ return int(msbPos)
+}
+
+const deBruijn64 = 0x03f79d71b4ca8b09
+const digitMask = uint64(1<<64 - 1)
+
+var deBruijn64Lookup = []byte{
+ 0, 1, 56, 2, 57, 49, 28, 3, 61, 58, 42, 50, 38, 29, 17, 4,
+ 62, 47, 59, 36, 45, 43, 51, 22, 53, 39, 33, 30, 24, 18, 12, 5,
+ 63, 55, 48, 27, 60, 41, 37, 16, 46, 35, 44, 21, 52, 32, 23, 11,
+ 54, 26, 40, 15, 34, 20, 31, 10, 25, 14, 19, 9, 13, 8, 7, 6,
+}
+
+// findLSBSetNonZero64 returns the index (between 0 and 63) of the least
+// significant set bit. Passing zero to this function has undefined behavior.
+//
+// This code comes from trailingZeroBits in https://golang.org/src/math/big/nat.go
+// which references (Knuth, volume 4, section 7.3.1).
+func findLSBSetNonZero64(bits uint64) int {
+ return int(deBruijn64Lookup[((bits&-bits)*(deBruijn64&digitMask))>>58])
+}
+
+// Advance advances or retreats the indicated number of steps along the
+// Hilbert curve at the current level, and returns the new position. The
+// position is never advanced past End() or before Begin().
+func (ci CellID) Advance(steps int64) CellID {
+ if steps == 0 {
+ return ci
+ }
+
+ // We clamp the number of steps if necessary to ensure that we do not
+ // advance past the End() or before the Begin() of this level. Note that
+ // minSteps and maxSteps always fit in a signed 64-bit integer.
+ stepShift := uint(2*(maxLevel-ci.Level()) + 1)
+ if steps < 0 {
+ minSteps := -int64(uint64(ci) >> stepShift)
+ if steps < minSteps {
+ steps = minSteps
+ }
+ } else {
+ maxSteps := int64((wrapOffset + ci.lsb() - uint64(ci)) >> stepShift)
+ if steps > maxSteps {
+ steps = maxSteps
+ }
+ }
+ return ci + CellID(steps)<<stepShift
+}
+
+// centerST return the center of the CellID in (s,t)-space.
+func (ci CellID) centerST() r2.Point {
+ _, si, ti := ci.faceSiTi()
+ return r2.Point{siTiToST(uint64(si)), siTiToST(uint64(ti))}
+}
+
+// sizeST returns the edge length of this CellID in (s,t)-space at the given level.
+func (ci CellID) sizeST(level int) float64 {
+ return ijToSTMin(sizeIJ(level))
+}
+
+// boundST returns the bound of this CellID in (s,t)-space.
+func (ci CellID) boundST() r2.Rect {
+ s := ci.sizeST(ci.Level())
+ return r2.RectFromCenterSize(ci.centerST(), r2.Point{s, s})
+}
+
+// centerUV returns the center of this CellID in (u,v)-space. Note that
+// the center of the cell is defined as the point at which it is recursively
+// subdivided into four children; in general, it is not at the midpoint of
+// the (u,v) rectangle covered by the cell.
+func (ci CellID) centerUV() r2.Point {
+ _, si, ti := ci.faceSiTi()
+ return r2.Point{stToUV(siTiToST(uint64(si))), stToUV(siTiToST(uint64(ti)))}
+}
+
+// boundUV returns the bound of this CellID in (u,v)-space.
+func (ci CellID) boundUV() r2.Rect {
+ _, i, j, _ := ci.faceIJOrientation()
+ return ijLevelToBoundUV(i, j, ci.Level())
+
+}
+
+// MaxTile returns the largest cell with the same RangeMin such that
+// RangeMax < limit.RangeMin. It returns limit if no such cell exists.
+// This method can be used to generate a small set of CellIDs that covers
+// a given range (a tiling). This example shows how to generate a tiling
+// for a semi-open range of leaf cells [start, limit):
+//
+// for id := start.MaxTile(limit); id != limit; id = id.Next().MaxTile(limit)) { ... }
+//
+// Note that in general the cells in the tiling will be of different sizes;
+// they gradually get larger (near the middle of the range) and then
+// gradually get smaller as limit is approached.
+func (ci CellID) MaxTile(limit CellID) CellID {
+ start := ci.RangeMin()
+ if start >= limit.RangeMin() {
+ return limit
+ }
+
+ if ci.RangeMax() >= limit {
+ // The cell is too large, shrink it. Note that when generating coverings
+ // of CellID ranges, this loop usually executes only once. Also because
+ // ci.RangeMin() < limit.RangeMin(), we will always exit the loop by the
+ // time we reach a leaf cell.
+ for {
+ ci = ci.Children()[0]
+ if ci.RangeMax() < limit {
+ break
+ }
+ }
+ return ci
+ }
+
+ // The cell may be too small. Grow it if necessary. Note that generally
+ // this loop only iterates once.
+ for !ci.isFace() {
+ parent := ci.immediateParent()
+ if parent.RangeMin() != start || parent.RangeMax() >= limit {
+ break
+ }
+ ci = parent
+ }
+ return ci
+}
+
+// TODO: Differences from C++:
+// ExpandedByDistanceUV/ExpandEndpoint
+// CenterSiTi
+// AppendVertexNeighbors/AppendAllNeighbors
diff --git a/vendor/github.com/golang/geo/s2/cellunion.go b/vendor/github.com/golang/geo/s2/cellunion.go
new file mode 100644
index 0000000..dfdf611
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/cellunion.go
@@ -0,0 +1,236 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package s2
+
+import (
+ "sort"
+)
+
+// A CellUnion is a collection of CellIDs.
+//
+// It is normalized if it is sorted, and does not contain redundancy.
+// Specifically, it may not contain the same CellID twice, nor a CellID that
+// is contained by another, nor the four sibling CellIDs that are children of
+// a single higher level CellID.
+type CellUnion []CellID
+
+// CellUnionFromRange creates a CellUnion that covers the half-open range
+// of leaf cells [begin, end). If begin == end the resulting union is empty.
+// This requires that begin and end are both leaves, and begin <= end.
+// To create a closed-ended range, pass in end.Next().
+func CellUnionFromRange(begin, end CellID) CellUnion {
+ // We repeatedly add the largest cell we can.
+ var cu CellUnion
+ for id := begin.MaxTile(end); id != end; id = id.Next().MaxTile(end) {
+ cu = append(cu, id)
+ }
+ return cu
+}
+
+// Normalize normalizes the CellUnion.
+func (cu *CellUnion) Normalize() {
+ sort.Sort(byID(*cu))
+
+ output := make([]CellID, 0, len(*cu)) // the list of accepted cells
+ // Loop invariant: output is a sorted list of cells with no redundancy.
+ for _, ci := range *cu {
+ // The first two passes here either ignore this new candidate,
+ // or remove previously accepted cells that are covered by this candidate.
+
+ // Ignore this cell if it is contained by the previous one.
+ // We only need to check the last accepted cell. The ordering of the
+ // cells implies containment (but not the converse), and output has no redundancy,
+ // so if this candidate is not contained by the last accepted cell
+ // then it cannot be contained by any previously accepted cell.
+ if len(output) > 0 && output[len(output)-1].Contains(ci) {
+ continue
+ }
+
+ // Discard any previously accepted cells contained by this one.
+ // This could be any contiguous trailing subsequence, but it can't be
+ // a discontiguous subsequence because of the containment property of
+ // sorted S2 cells mentioned above.
+ j := len(output) - 1 // last index to keep
+ for j >= 0 {
+ if !ci.Contains(output[j]) {
+ break
+ }
+ j--
+ }
+ output = output[:j+1]
+
+ // See if the last three cells plus this one can be collapsed.
+ // We loop because collapsing three accepted cells and adding a higher level cell
+ // could cascade into previously accepted cells.
+ for len(output) >= 3 {
+ fin := output[len(output)-3:]
+
+ // fast XOR test; a necessary but not sufficient condition
+ if fin[0]^fin[1]^fin[2]^ci != 0 {
+ break
+ }
+
+ // more expensive test; exact.
+ // Compute the two bit mask for the encoded child position,
+ // then see if they all agree.
+ mask := CellID(ci.lsb() << 1)
+ mask = ^(mask + mask<<1)
+ should := ci & mask
+ if (fin[0]&mask != should) || (fin[1]&mask != should) || (fin[2]&mask != should) || ci.isFace() {
+ break
+ }
+
+ output = output[:len(output)-3]
+ ci = ci.immediateParent() // checked !ci.isFace above
+ }
+ output = append(output, ci)
+ }
+ *cu = output
+}
+
+// IntersectsCellID reports whether this cell union intersects the given cell ID.
+//
+// This method assumes that the CellUnion has been normalized.
+func (cu *CellUnion) IntersectsCellID(id CellID) bool {
+ // Find index of array item that occurs directly after our probe cell:
+ i := sort.Search(len(*cu), func(i int) bool { return id < (*cu)[i] })
+
+ if i != len(*cu) && (*cu)[i].RangeMin() <= id.RangeMax() {
+ return true
+ }
+ return i != 0 && (*cu)[i-1].RangeMax() >= id.RangeMin()
+}
+
+// ContainsCellID reports whether the cell union contains the given cell ID.
+// Containment is defined with respect to regions, e.g. a cell contains its 4 children.
+//
+// This method assumes that the CellUnion has been normalized.
+func (cu *CellUnion) ContainsCellID(id CellID) bool {
+ // Find index of array item that occurs directly after our probe cell:
+ i := sort.Search(len(*cu), func(i int) bool { return id < (*cu)[i] })
+
+ if i != len(*cu) && (*cu)[i].RangeMin() <= id {
+ return true
+ }
+ return i != 0 && (*cu)[i-1].RangeMax() >= id
+}
+
+type byID []CellID
+
+func (cu byID) Len() int { return len(cu) }
+func (cu byID) Less(i, j int) bool { return cu[i] < cu[j] }
+func (cu byID) Swap(i, j int) { cu[i], cu[j] = cu[j], cu[i] }
+
+// Denormalize replaces this CellUnion with an expanded version of the
+// CellUnion where any cell whose level is less than minLevel or where
+// (level - minLevel) is not a multiple of levelMod is replaced by its
+// children, until either both of these conditions are satisfied or the
+// maximum level is reached.
+func (cu *CellUnion) Denormalize(minLevel, levelMod int) {
+ var denorm CellUnion
+ for _, id := range *cu {
+ level := id.Level()
+ newLevel := level
+ if newLevel < minLevel {
+ newLevel = minLevel
+ }
+ if levelMod > 1 {
+ newLevel += (maxLevel - (newLevel - minLevel)) % levelMod
+ if newLevel > maxLevel {
+ newLevel = maxLevel
+ }
+ }
+ if newLevel == level {
+ denorm = append(denorm, id)
+ } else {
+ end := id.ChildEndAtLevel(newLevel)
+ for ci := id.ChildBeginAtLevel(newLevel); ci != end; ci = ci.Next() {
+ denorm = append(denorm, ci)
+ }
+ }
+ }
+ *cu = denorm
+}
+
+// RectBound returns a Rect that bounds this entity.
+func (cu *CellUnion) RectBound() Rect {
+ bound := EmptyRect()
+ for _, c := range *cu {
+ bound = bound.Union(CellFromCellID(c).RectBound())
+ }
+ return bound
+}
+
+// CapBound returns a Cap that bounds this entity.
+func (cu *CellUnion) CapBound() Cap {
+ if len(*cu) == 0 {
+ return EmptyCap()
+ }
+
+ // Compute the approximate centroid of the region. This won't produce the
+ // bounding cap of minimal area, but it should be close enough.
+ var centroid Point
+
+ for _, ci := range *cu {
+ area := AvgAreaMetric.Value(ci.Level())
+ centroid = Point{centroid.Add(ci.Point().Mul(area))}
+ }
+
+ if zero := (Point{}); centroid == zero {
+ centroid = PointFromCoords(1, 0, 0)
+ } else {
+ centroid = Point{centroid.Normalize()}
+ }
+
+ // Use the centroid as the cap axis, and expand the cap angle so that it
+ // contains the bounding caps of all the individual cells. Note that it is
+ // *not* sufficient to just bound all the cell vertices because the bounding
+ // cap may be concave (i.e. cover more than one hemisphere).
+ c := CapFromPoint(centroid)
+ for _, ci := range *cu {
+ c = c.AddCap(CellFromCellID(ci).CapBound())
+ }
+
+ return c
+}
+
+// ContainsCell reports whether this cell union contains the given cell.
+func (cu *CellUnion) ContainsCell(c Cell) bool {
+ return cu.ContainsCellID(c.id)
+}
+
+// IntersectsCell reports whether this cell union intersects the given cell.
+func (cu *CellUnion) IntersectsCell(c Cell) bool {
+ return cu.IntersectsCellID(c.id)
+}
+
+// LeafCellsCovered reports the number of leaf cells covered by this cell union.
+// This will be no more than 6*2^60 for the whole sphere.
+func (cu *CellUnion) LeafCellsCovered() int64 {
+ var numLeaves int64
+ for _, c := range *cu {
+ numLeaves += 1 << uint64((maxLevel-int64(c.Level()))<<1)
+ }
+ return numLeaves
+}
+
+// BUG: Differences from C++:
+// Contains(CellUnion)/Intersects(CellUnion)
+// Union(CellUnion)/Intersection(CellUnion)/Difference(CellUnion)
+// Expand
+// ContainsPoint
+// AverageArea/ApproxArea/ExactArea
diff --git a/vendor/github.com/golang/geo/s2/doc.go b/vendor/github.com/golang/geo/s2/doc.go
new file mode 100644
index 0000000..c6dbe44
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/doc.go
@@ -0,0 +1,31 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Package s2 implements types and functions for working with geometry in S² (spherical geometry).
+
+Its related packages, parallel to this one, are s1 (operates on S¹), r1 (operates on ℝ¹)
+and r3 (operates on ℝ³).
+
+This package provides types and functions for the S2 cell hierarchy and coordinate systems.
+The S2 cell hierarchy is a hierarchical decomposition of the surface of a unit sphere (S²)
+into ``cells''; it is highly efficient, scales from continental size to under 1 cm²
+and preserves spatial locality (nearby cells have close IDs).
+
+A presentation that gives an overview of S2 is
+https://docs.google.com/presentation/d/1Hl4KapfAENAOf4gv-pSngKwvS_jwNVHRPZTTDzXXn6Q/view.
+*/
+package s2
diff --git a/vendor/github.com/golang/geo/s2/edgeutil.go b/vendor/github.com/golang/geo/s2/edgeutil.go
new file mode 100644
index 0000000..c1e5c90
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/edgeutil.go
@@ -0,0 +1,1293 @@
+/*
+Copyright 2015 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package s2
+
+import (
+ "math"
+
+ "github.com/golang/geo/r1"
+ "github.com/golang/geo/r2"
+ "github.com/golang/geo/r3"
+ "github.com/golang/geo/s1"
+)
+
+const (
+ // edgeClipErrorUVCoord is the maximum error in a u- or v-coordinate
+ // compared to the exact result, assuming that the points A and B are in
+ // the rectangle [-1,1]x[1,1] or slightly outside it (by 1e-10 or less).
+ edgeClipErrorUVCoord = 2.25 * dblEpsilon
+
+ // edgeClipErrorUVDist is the maximum distance from a clipped point to
+ // the corresponding exact result. It is equal to the error in a single
+ // coordinate because at most one coordinate is subject to error.
+ edgeClipErrorUVDist = 2.25 * dblEpsilon
+
+ // faceClipErrorRadians is the maximum angle between a returned vertex
+ // and the nearest point on the exact edge AB. It is equal to the
+ // maximum directional error in PointCross, plus the error when
+ // projecting points onto a cube face.
+ faceClipErrorRadians = 3 * dblEpsilon
+
+ // faceClipErrorDist is the same angle expressed as a maximum distance
+ // in (u,v)-space. In other words, a returned vertex is at most this far
+ // from the exact edge AB projected into (u,v)-space.
+ faceClipErrorUVDist = 9 * dblEpsilon
+
+ // faceClipErrorUVCoord is the maximum angle between a returned vertex
+ // and the nearest point on the exact edge AB expressed as the maximum error
+ // in an individual u- or v-coordinate. In other words, for each
+ // returned vertex there is a point on the exact edge AB whose u- and
+ // v-coordinates differ from the vertex by at most this amount.
+ faceClipErrorUVCoord = 9.0 * (1.0 / math.Sqrt2) * dblEpsilon
+
+ // intersectsRectErrorUVDist is the maximum error when computing if a point
+ // intersects with a given Rect. If some point of AB is inside the
+ // rectangle by at least this distance, the result is guaranteed to be true;
+ // if all points of AB are outside the rectangle by at least this distance,
+ // the result is guaranteed to be false. This bound assumes that rect is
+ // a subset of the rectangle [-1,1]x[-1,1] or extends slightly outside it
+ // (e.g., by 1e-10 or less).
+ intersectsRectErrorUVDist = 3 * math.Sqrt2 * dblEpsilon
+
+ // intersectionError can be set somewhat arbitrarily, because the algorithm
+ // uses more precision if necessary in order to achieve the specified error.
+ // The only strict requirement is that intersectionError >= dblEpsilon
+ // radians. However, using a larger error tolerance makes the algorithm more
+ // efficient because it reduces the number of cases where exact arithmetic is
+ // needed.
+ intersectionError = s1.Angle(4 * dblEpsilon)
+
+ // intersectionMergeRadius is used to ensure that intersection points that
+ // are supposed to be coincident are merged back together into a single
+ // vertex. This is required in order for various polygon operations (union,
+ // intersection, etc) to work correctly. It is twice the intersection error
+ // because two coincident intersection points might have errors in
+ // opposite directions.
+ intersectionMergeRadius = 2 * intersectionError
+)
+
+// SimpleCrossing reports whether edge AB crosses CD at a point that is interior
+// to both edges. Properties:
+//
+// (1) SimpleCrossing(b,a,c,d) == SimpleCrossing(a,b,c,d)
+// (2) SimpleCrossing(c,d,a,b) == SimpleCrossing(a,b,c,d)
+func SimpleCrossing(a, b, c, d Point) bool {
+ // We compute the equivalent of Sign for triangles ACB, CBD, BDA,
+ // and DAC. All of these triangles need to have the same orientation
+ // (CW or CCW) for an intersection to exist.
+
+ ab := a.Vector.Cross(b.Vector)
+ acb := -(ab.Dot(c.Vector))
+ bda := ab.Dot(d.Vector)
+ if acb*bda <= 0 {
+ return false
+ }
+
+ cd := c.Vector.Cross(d.Vector)
+ cbd := -(cd.Dot(b.Vector))
+ dac := cd.Dot(a.Vector)
+ return (acb*cbd > 0) && (acb*dac > 0)
+}
+
+// VertexCrossing reports whether two edges "cross" in such a way that point-in-polygon
+// containment tests can be implemented by counting the number of edge crossings.
+//
+// Given two edges AB and CD where at least two vertices are identical
+// (i.e. CrossingSign(a,b,c,d) == 0), the basic rule is that a "crossing"
+// occurs if AB is encountered after CD during a CCW sweep around the shared
+// vertex starting from a fixed reference point.
+//
+// Note that according to this rule, if AB crosses CD then in general CD
+// does not cross AB. However, this leads to the correct result when
+// counting polygon edge crossings. For example, suppose that A,B,C are
+// three consecutive vertices of a CCW polygon. If we now consider the edge
+// crossings of a segment BP as P sweeps around B, the crossing number
+// changes parity exactly when BP crosses BA or BC.
+//
+// Useful properties of VertexCrossing (VC):
+//
+// (1) VC(a,a,c,d) == VC(a,b,c,c) == false
+// (2) VC(a,b,a,b) == VC(a,b,b,a) == true
+// (3) VC(a,b,c,d) == VC(a,b,d,c) == VC(b,a,c,d) == VC(b,a,d,c)
+// (3) If exactly one of a,b equals one of c,d, then exactly one of
+// VC(a,b,c,d) and VC(c,d,a,b) is true
+//
+// It is an error to call this method with 4 distinct vertices.
+func VertexCrossing(a, b, c, d Point) bool {
+ // If A == B or C == D there is no intersection. We need to check this
+ // case first in case 3 or more input points are identical.
+ if a.ApproxEqual(b) || c.ApproxEqual(d) {
+ return false
+ }
+
+ // If any other pair of vertices is equal, there is a crossing if and only
+ // if OrderedCCW indicates that the edge AB is further CCW around the
+ // shared vertex O (either A or B) than the edge CD, starting from an
+ // arbitrary fixed reference point.
+ switch {
+ case a.ApproxEqual(d):
+ return OrderedCCW(Point{a.Ortho()}, c, b, a)
+ case b.ApproxEqual(c):
+ return OrderedCCW(Point{b.Ortho()}, d, a, b)
+ case a.ApproxEqual(c):
+ return OrderedCCW(Point{a.Ortho()}, d, b, a)
+ case b.ApproxEqual(d):
+ return OrderedCCW(Point{b.Ortho()}, c, a, b)
+ }
+
+ return false
+}
+
+// DistanceFraction returns the distance ratio of the point X along an edge AB.
+// If X is on the line segment AB, this is the fraction T such
+// that X == Interpolate(T, A, B).
+//
+// This requires that A and B are distinct.
+func DistanceFraction(x, a, b Point) float64 {
+ d0 := x.Angle(a.Vector)
+ d1 := x.Angle(b.Vector)
+ return float64(d0 / (d0 + d1))
+}
+
+// Interpolate returns the point X along the line segment AB whose distance from A
+// is the given fraction "t" of the distance AB. Does NOT require that "t" be
+// between 0 and 1. Note that all distances are measured on the surface of
+// the sphere, so this is more complicated than just computing (1-t)*a + t*b
+// and normalizing the result.
+func Interpolate(t float64, a, b Point) Point {
+ if t == 0 {
+ return a
+ }
+ if t == 1 {
+ return b
+ }
+ ab := a.Angle(b.Vector)
+ return InterpolateAtDistance(s1.Angle(t)*ab, a, b)
+}
+
+// InterpolateAtDistance returns the point X along the line segment AB whose
+// distance from A is the angle ax.
+func InterpolateAtDistance(ax s1.Angle, a, b Point) Point {
+ aRad := ax.Radians()
+
+ // Use PointCross to compute the tangent vector at A towards B. The
+ // result is always perpendicular to A, even if A=B or A=-B, but it is not
+ // necessarily unit length. (We effectively normalize it below.)
+ normal := a.PointCross(b)
+ tangent := normal.Vector.Cross(a.Vector)
+
+ // Now compute the appropriate linear combination of A and "tangent". With
+ // infinite precision the result would always be unit length, but we
+ // normalize it anyway to ensure that the error is within acceptable bounds.
+ // (Otherwise errors can build up when the result of one interpolation is
+ // fed into another interpolation.)
+ return Point{(a.Mul(math.Cos(aRad)).Add(tangent.Mul(math.Sin(aRad) / tangent.Norm()))).Normalize()}
+}
+
+// RectBounder is used to compute a bounding rectangle that contains all edges
+// defined by a vertex chain (v0, v1, v2, ...). All vertices must be unit length.
+// Note that the bounding rectangle of an edge can be larger than the bounding
+// rectangle of its endpoints, e.g. consider an edge that passes through the North Pole.
+//
+// The bounds are calculated conservatively to account for numerical errors
+// when points are converted to LatLngs. More precisely, this function
+// guarantees the following:
+// Let L be a closed edge chain (Loop) such that the interior of the loop does
+// not contain either pole. Now if P is any point such that L.ContainsPoint(P),
+// then RectBound(L).ContainsPoint(LatLngFromPoint(P)).
+type RectBounder struct {
+ // The previous vertex in the chain.
+ a Point
+ // The previous vertex latitude longitude.
+ aLL LatLng
+ bound Rect
+}
+
+// NewRectBounder returns a new instance of a RectBounder.
+func NewRectBounder() *RectBounder {
+ return &RectBounder{
+ bound: EmptyRect(),
+ }
+}
+
+// AddPoint adds the given point to the chain. The Point must be unit length.
+func (r *RectBounder) AddPoint(b Point) {
+ bLL := LatLngFromPoint(b)
+
+ if r.bound.IsEmpty() {
+ r.a = b
+ r.aLL = bLL
+ r.bound = r.bound.AddPoint(bLL)
+ return
+ }
+
+ // First compute the cross product N = A x B robustly. This is the normal
+ // to the great circle through A and B. We don't use RobustSign
+ // since that method returns an arbitrary vector orthogonal to A if the two
+ // vectors are proportional, and we want the zero vector in that case.
+ n := r.a.Sub(b.Vector).Cross(r.a.Add(b.Vector)) // N = 2 * (A x B)
+
+ // The relative error in N gets large as its norm gets very small (i.e.,
+ // when the two points are nearly identical or antipodal). We handle this
+ // by choosing a maximum allowable error, and if the error is greater than
+ // this we fall back to a different technique. Since it turns out that
+ // the other sources of error in converting the normal to a maximum
+ // latitude add up to at most 1.16 * dblEpsilon, and it is desirable to
+ // have the total error be a multiple of dblEpsilon, we have chosen to
+ // limit the maximum error in the normal to be 3.84 * dblEpsilon.
+ // It is possible to show that the error is less than this when
+ //
+ // n.Norm() >= 8 * sqrt(3) / (3.84 - 0.5 - sqrt(3)) * dblEpsilon
+ // = 1.91346e-15 (about 8.618 * dblEpsilon)
+ nNorm := n.Norm()
+ if nNorm < 1.91346e-15 {
+ // A and B are either nearly identical or nearly antipodal (to within
+ // 4.309 * dblEpsilon, or about 6 nanometers on the earth's surface).
+ if r.a.Dot(b.Vector) < 0 {
+ // The two points are nearly antipodal. The easiest solution is to
+ // assume that the edge between A and B could go in any direction
+ // around the sphere.
+ r.bound = FullRect()
+ } else {
+ // The two points are nearly identical (to within 4.309 * dblEpsilon).
+ // In this case we can just use the bounding rectangle of the points,
+ // since after the expansion done by GetBound this Rect is
+ // guaranteed to include the (lat,lng) values of all points along AB.
+ r.bound = r.bound.Union(RectFromLatLng(r.aLL).AddPoint(bLL))
+ }
+ r.a = b
+ r.aLL = bLL
+ return
+ }
+
+ // Compute the longitude range spanned by AB.
+ lngAB := s1.EmptyInterval().AddPoint(r.aLL.Lng.Radians()).AddPoint(bLL.Lng.Radians())
+ if lngAB.Length() >= math.Pi-2*dblEpsilon {
+ // The points lie on nearly opposite lines of longitude to within the
+ // maximum error of the calculation. The easiest solution is to assume
+ // that AB could go on either side of the pole.
+ lngAB = s1.FullInterval()
+ }
+
+ // Next we compute the latitude range spanned by the edge AB. We start
+ // with the range spanning the two endpoints of the edge:
+ latAB := r1.IntervalFromPoint(r.aLL.Lat.Radians()).AddPoint(bLL.Lat.Radians())
+
+ // This is the desired range unless the edge AB crosses the plane
+ // through N and the Z-axis (which is where the great circle through A
+ // and B attains its minimum and maximum latitudes). To test whether AB
+ // crosses this plane, we compute a vector M perpendicular to this
+ // plane and then project A and B onto it.
+ m := n.Cross(PointFromCoords(0, 0, 1).Vector)
+ mA := m.Dot(r.a.Vector)
+ mB := m.Dot(b.Vector)
+
+ // We want to test the signs of "mA" and "mB", so we need to bound
+ // the error in these calculations. It is possible to show that the
+ // total error is bounded by
+ //
+ // (1 + sqrt(3)) * dblEpsilon * nNorm + 8 * sqrt(3) * (dblEpsilon**2)
+ // = 6.06638e-16 * nNorm + 6.83174e-31
+
+ mError := 6.06638e-16*nNorm + 6.83174e-31
+ if mA*mB < 0 || math.Abs(mA) <= mError || math.Abs(mB) <= mError {
+ // Minimum/maximum latitude *may* occur in the edge interior.
+ //
+ // The maximum latitude is 90 degrees minus the latitude of N. We
+ // compute this directly using atan2 in order to get maximum accuracy
+ // near the poles.
+ //
+ // Our goal is compute a bound that contains the computed latitudes of
+ // all S2Points P that pass the point-in-polygon containment test.
+ // There are three sources of error we need to consider:
+ // - the directional error in N (at most 3.84 * dblEpsilon)
+ // - converting N to a maximum latitude
+ // - computing the latitude of the test point P
+ // The latter two sources of error are at most 0.955 * dblEpsilon
+ // individually, but it is possible to show by a more complex analysis
+ // that together they can add up to at most 1.16 * dblEpsilon, for a
+ // total error of 5 * dblEpsilon.
+ //
+ // We add 3 * dblEpsilon to the bound here, and GetBound() will pad
+ // the bound by another 2 * dblEpsilon.
+ maxLat := math.Min(
+ math.Atan2(math.Sqrt(n.X*n.X+n.Y*n.Y), math.Abs(n.Z))+3*dblEpsilon,
+ math.Pi/2)
+
+ // In order to get tight bounds when the two points are close together,
+ // we also bound the min/max latitude relative to the latitudes of the
+ // endpoints A and B. First we compute the distance between A and B,
+ // and then we compute the maximum change in latitude between any two
+ // points along the great circle that are separated by this distance.
+ // This gives us a latitude change "budget". Some of this budget must
+ // be spent getting from A to B; the remainder bounds the round-trip
+ // distance (in latitude) from A or B to the min or max latitude
+ // attained along the edge AB.
+ latBudget := 2 * math.Asin(0.5*(r.a.Sub(b.Vector)).Norm()*math.Sin(maxLat))
+ maxDelta := 0.5*(latBudget-latAB.Length()) + dblEpsilon
+
+ // Test whether AB passes through the point of maximum latitude or
+ // minimum latitude. If the dot product(s) are small enough then the
+ // result may be ambiguous.
+ if mA <= mError && mB >= -mError {
+ latAB.Hi = math.Min(maxLat, latAB.Hi+maxDelta)
+ }
+ if mB <= mError && mA >= -mError {
+ latAB.Lo = math.Max(-maxLat, latAB.Lo-maxDelta)
+ }
+ }
+ r.a = b
+ r.aLL = bLL
+ r.bound = r.bound.Union(Rect{latAB, lngAB})
+}
+
+// RectBound returns the bounding rectangle of the edge chain that connects the
+// vertices defined so far. This bound satisfies the guarantee made
+// above, i.e. if the edge chain defines a Loop, then the bound contains
+// the LatLng coordinates of all Points contained by the loop.
+func (r *RectBounder) RectBound() Rect {
+ return r.bound.expanded(LatLng{s1.Angle(2 * dblEpsilon), 0}).PolarClosure()
+}
+
+// ExpandForSubregions expands a bounding Rect so that it is guaranteed to
+// contain the bounds of any subregion whose bounds are computed using
+// ComputeRectBound. For example, consider a loop L that defines a square.
+// GetBound ensures that if a point P is contained by this square, then
+// LatLngFromPoint(P) is contained by the bound. But now consider a diamond
+// shaped loop S contained by L. It is possible that GetBound returns a
+// *larger* bound for S than it does for L, due to rounding errors. This
+// method expands the bound for L so that it is guaranteed to contain the
+// bounds of any subregion S.
+//
+// More precisely, if L is a loop that does not contain either pole, and S
+// is a loop such that L.Contains(S), then
+//
+// ExpandForSubregions(L.RectBound).Contains(S.RectBound).
+//
+func ExpandForSubregions(bound Rect) Rect {
+ // Empty bounds don't need expansion.
+ if bound.IsEmpty() {
+ return bound
+ }
+
+ // First we need to check whether the bound B contains any nearly-antipodal
+ // points (to within 4.309 * dblEpsilon). If so then we need to return
+ // FullRect, since the subregion might have an edge between two
+ // such points, and AddPoint returns Full for such edges. Note that
+ // this can happen even if B is not Full for example, consider a loop
+ // that defines a 10km strip straddling the equator extending from
+ // longitudes -100 to +100 degrees.
+ //
+ // It is easy to check whether B contains any antipodal points, but checking
+ // for nearly-antipodal points is trickier. Essentially we consider the
+ // original bound B and its reflection through the origin B', and then test
+ // whether the minimum distance between B and B' is less than 4.309 * dblEpsilon.
+
+ // lngGap is a lower bound on the longitudinal distance between B and its
+ // reflection B'. (2.5 * dblEpsilon is the maximum combined error of the
+ // endpoint longitude calculations and the Length call.)
+ lngGap := math.Max(0, math.Pi-bound.Lng.Length()-2.5*dblEpsilon)
+
+ // minAbsLat is the minimum distance from B to the equator (if zero or
+ // negative, then B straddles the equator).
+ minAbsLat := math.Max(bound.Lat.Lo, -bound.Lat.Hi)
+
+ // latGapSouth and latGapNorth measure the minimum distance from B to the
+ // south and north poles respectively.
+ latGapSouth := math.Pi/2 + bound.Lat.Lo
+ latGapNorth := math.Pi/2 - bound.Lat.Hi
+
+ if minAbsLat >= 0 {
+ // The bound B does not straddle the equator. In this case the minimum
+ // distance is between one endpoint of the latitude edge in B closest to
+ // the equator and the other endpoint of that edge in B'. The latitude
+ // distance between these two points is 2*minAbsLat, and the longitude
+ // distance is lngGap. We could compute the distance exactly using the
+ // Haversine formula, but then we would need to bound the errors in that
+ // calculation. Since we only need accuracy when the distance is very
+ // small (close to 4.309 * dblEpsilon), we substitute the Euclidean
+ // distance instead. This gives us a right triangle XYZ with two edges of
+ // length x = 2*minAbsLat and y ~= lngGap. The desired distance is the
+ // length of the third edge z, and we have
+ //
+ // z ~= sqrt(x^2 + y^2) >= (x + y) / sqrt(2)
+ //
+ // Therefore the region may contain nearly antipodal points only if
+ //
+ // 2*minAbsLat + lngGap < sqrt(2) * 4.309 * dblEpsilon
+ // ~= 1.354e-15
+ //
+ // Note that because the given bound B is conservative, minAbsLat and
+ // lngGap are both lower bounds on their true values so we do not need
+ // to make any adjustments for their errors.
+ if 2*minAbsLat+lngGap < 1.354e-15 {
+ return FullRect()
+ }
+ } else if lngGap >= math.Pi/2 {
+ // B spans at most Pi/2 in longitude. The minimum distance is always
+ // between one corner of B and the diagonally opposite corner of B'. We
+ // use the same distance approximation that we used above; in this case
+ // we have an obtuse triangle XYZ with two edges of length x = latGapSouth
+ // and y = latGapNorth, and angle Z >= Pi/2 between them. We then have
+ //
+ // z >= sqrt(x^2 + y^2) >= (x + y) / sqrt(2)
+ //
+ // Unlike the case above, latGapSouth and latGapNorth are not lower bounds
+ // (because of the extra addition operation, and because math.Pi/2 is not
+ // exactly equal to Pi/2); they can exceed their true values by up to
+ // 0.75 * dblEpsilon. Putting this all together, the region may contain
+ // nearly antipodal points only if
+ //
+ // latGapSouth + latGapNorth < (sqrt(2) * 4.309 + 1.5) * dblEpsilon
+ // ~= 1.687e-15
+ if latGapSouth+latGapNorth < 1.687e-15 {
+ return FullRect()
+ }
+ } else {
+ // Otherwise we know that (1) the bound straddles the equator and (2) its
+ // width in longitude is at least Pi/2. In this case the minimum
+ // distance can occur either between a corner of B and the diagonally
+ // opposite corner of B' (as in the case above), or between a corner of B
+ // and the opposite longitudinal edge reflected in B'. It is sufficient
+ // to only consider the corner-edge case, since this distance is also a
+ // lower bound on the corner-corner distance when that case applies.
+
+ // Consider the spherical triangle XYZ where X is a corner of B with
+ // minimum absolute latitude, Y is the closest pole to X, and Z is the
+ // point closest to X on the opposite longitudinal edge of B'. This is a
+ // right triangle (Z = Pi/2), and from the spherical law of sines we have
+ //
+ // sin(z) / sin(Z) = sin(y) / sin(Y)
+ // sin(maxLatGap) / 1 = sin(dMin) / sin(lngGap)
+ // sin(dMin) = sin(maxLatGap) * sin(lngGap)
+ //
+ // where "maxLatGap" = max(latGapSouth, latGapNorth) and "dMin" is the
+ // desired minimum distance. Now using the facts that sin(t) >= (2/Pi)*t
+ // for 0 <= t <= Pi/2, that we only need an accurate approximation when
+ // at least one of "maxLatGap" or lngGap is extremely small (in which
+ // case sin(t) ~= t), and recalling that "maxLatGap" has an error of up
+ // to 0.75 * dblEpsilon, we want to test whether
+ //
+ // maxLatGap * lngGap < (4.309 + 0.75) * (Pi/2) * dblEpsilon
+ // ~= 1.765e-15
+ if math.Max(latGapSouth, latGapNorth)*lngGap < 1.765e-15 {
+ return FullRect()
+ }
+ }
+ // Next we need to check whether the subregion might contain any edges that
+ // span (math.Pi - 2 * dblEpsilon) radians or more in longitude, since AddPoint
+ // sets the longitude bound to Full in that case. This corresponds to
+ // testing whether (lngGap <= 0) in lngExpansion below.
+
+ // Otherwise, the maximum latitude error in AddPoint is 4.8 * dblEpsilon.
+ // In the worst case, the errors when computing the latitude bound for a
+ // subregion could go in the opposite direction as the errors when computing
+ // the bound for the original region, so we need to double this value.
+ // (More analysis shows that it's okay to round down to a multiple of
+ // dblEpsilon.)
+ //
+ // For longitude, we rely on the fact that atan2 is correctly rounded and
+ // therefore no additional bounds expansion is necessary.
+
+ latExpansion := 9 * dblEpsilon
+ lngExpansion := 0.0
+ if lngGap <= 0 {
+ lngExpansion = math.Pi
+ }
+ return bound.expanded(LatLng{s1.Angle(latExpansion), s1.Angle(lngExpansion)}).PolarClosure()
+}
+
+// EdgeCrosser allows edges to be efficiently tested for intersection with a
+// given fixed edge AB. It is especially efficient when testing for
+// intersection with an edge chain connecting vertices v0, v1, v2, ...
+type EdgeCrosser struct {
+ a Point
+ b Point
+ aXb Point
+
+ // To reduce the number of calls to expensiveSign, we compute an
+ // outward-facing tangent at A and B if necessary. If the plane
+ // perpendicular to one of these tangents separates AB from CD (i.e., one
+ // edge on each side) then there is no intersection.
+ aTangent Point // Outward-facing tangent at A.
+ bTangent Point // Outward-facing tangent at B.
+
+ // The fields below are updated for each vertex in the chain.
+ c Point // Previous vertex in the vertex chain.
+ acb Direction // The orientation of triangle ACB.
+}
+
+// NewEdgeCrosser returns an EdgeCrosser with the fixed edge AB.
+func NewEdgeCrosser(a, b Point) *EdgeCrosser {
+ norm := a.PointCross(b)
+ return &EdgeCrosser{
+ a: a,
+ b: b,
+ aXb: Point{a.Cross(b.Vector)},
+ aTangent: Point{a.Cross(norm.Vector)},
+ bTangent: Point{norm.Cross(b.Vector)},
+ }
+}
+
+// A Crossing indicates how edges cross.
+type Crossing int
+
+const (
+ // Cross means the edges cross.
+ Cross Crossing = iota
+ // MaybeCross means two vertices from different edges are the same.
+ MaybeCross
+ // DoNotCross means the edges do not cross.
+ DoNotCross
+)
+
+// CrossingSign reports whether the edge AB intersects the edge CD.
+// If any two vertices from different edges are the same, returns MaybeCross.
+// If either edge is degenerate (A == B or C == D), returns DoNotCross or MaybeCross.
+//
+// Properties of CrossingSign:
+//
+// (1) CrossingSign(b,a,c,d) == CrossingSign(a,b,c,d)
+// (2) CrossingSign(c,d,a,b) == CrossingSign(a,b,c,d)
+// (3) CrossingSign(a,b,c,d) == MaybeCross if a==c, a==d, b==c, b==d
+// (3) CrossingSign(a,b,c,d) == DoNotCross or MaybeCross if a==b or c==d
+//
+// Note that if you want to check an edge against a chain of other edges,
+// it is slightly more efficient to use the single-argument version
+// ChainCrossingSign below.
+func (e *EdgeCrosser) CrossingSign(c, d Point) Crossing {
+ if c != e.c {
+ e.RestartAt(c)
+ }
+ return e.ChainCrossingSign(d)
+}
+
+// EdgeOrVertexCrossing reports whether if CrossingSign(c, d) > 0, or AB and
+// CD share a vertex and VertexCrossing(a, b, c, d) is true.
+//
+// This method extends the concept of a "crossing" to the case where AB
+// and CD have a vertex in common. The two edges may or may not cross,
+// according to the rules defined in VertexCrossing above. The rules
+// are designed so that point containment tests can be implemented simply
+// by counting edge crossings. Similarly, determining whether one edge
+// chain crosses another edge chain can be implemented by counting.
+func (e *EdgeCrosser) EdgeOrVertexCrossing(c, d Point) bool {
+ if c != e.c {
+ e.RestartAt(c)
+ }
+ return e.EdgeOrVertexChainCrossing(d)
+}
+
+// NewChainEdgeCrosser is a convenience constructor that uses AB as the fixed edge,
+// and C as the first vertex of the vertex chain (equivalent to calling RestartAt(c)).
+//
+// You don't need to use this or any of the chain functions unless you're trying to
+// squeeze out every last drop of performance. Essentially all you are saving is a test
+// whether the first vertex of the current edge is the same as the second vertex of the
+// previous edge.
+func NewChainEdgeCrosser(a, b, c Point) *EdgeCrosser {
+ e := NewEdgeCrosser(a, b)
+ e.RestartAt(c)
+ return e
+}
+
+// RestartAt sets the current point of the edge crosser to be c.
+// Call this method when your chain 'jumps' to a new place.
+// The argument must point to a value that persists until the next call.
+func (e *EdgeCrosser) RestartAt(c Point) {
+ e.c = c
+ e.acb = -triageSign(e.a, e.b, e.c)
+}
+
+// ChainCrossingSign is like CrossingSign, but uses the last vertex passed to one of
+// the crossing methods (or RestartAt) as the first vertex of the current edge.
+func (e *EdgeCrosser) ChainCrossingSign(d Point) Crossing {
+ // For there to be an edge crossing, the triangles ACB, CBD, BDA, DAC must
+ // all be oriented the same way (CW or CCW). We keep the orientation of ACB
+ // as part of our state. When each new point D arrives, we compute the
+ // orientation of BDA and check whether it matches ACB. This checks whether
+ // the points C and D are on opposite sides of the great circle through AB.
+
+ // Recall that triageSign is invariant with respect to rotating its
+ // arguments, i.e. ABD has the same orientation as BDA.
+ bda := triageSign(e.a, e.b, d)
+ if e.acb == -bda && bda != Indeterminate {
+ // The most common case -- triangles have opposite orientations. Save the
+ // current vertex D as the next vertex C, and also save the orientation of
+ // the new triangle ACB (which is opposite to the current triangle BDA).
+ e.c = d
+ e.acb = -bda
+ return DoNotCross
+ }
+ return e.crossingSign(d, bda)
+}
+
+// EdgeOrVertexChainCrossing is like EdgeOrVertexCrossing, but uses the last vertex
+// passed to one of the crossing methods (or RestartAt) as the first vertex of the current edge.
+func (e *EdgeCrosser) EdgeOrVertexChainCrossing(d Point) bool {
+ // We need to copy e.c since it is clobbered by ChainCrossingSign.
+ c := e.c
+ switch e.ChainCrossingSign(d) {
+ case DoNotCross:
+ return false
+ case Cross:
+ return true
+ }
+ return VertexCrossing(e.a, e.b, c, d)
+}
+
+// crossingSign handle the slow path of CrossingSign.
+func (e *EdgeCrosser) crossingSign(d Point, bda Direction) Crossing {
+ // Compute the actual result, and then save the current vertex D as the next
+ // vertex C, and save the orientation of the next triangle ACB (which is
+ // opposite to the current triangle BDA).
+ defer func() {
+ e.c = d
+ e.acb = -bda
+ }()
+
+ // RobustSign is very expensive, so we avoid calling it if at all possible.
+ // First eliminate the cases where two vertices are equal.
+ if e.a == e.c || e.a == d || e.b == e.c || e.b == d {
+ return MaybeCross
+ }
+
+ // At this point, a very common situation is that A,B,C,D are four points on
+ // a line such that AB does not overlap CD. (For example, this happens when
+ // a line or curve is sampled finely, or when geometry is constructed by
+ // computing the union of S2CellIds.) Most of the time, we can determine
+ // that AB and CD do not intersect using the two outward-facing
+ // tangents at A and B (parallel to AB) and testing whether AB and CD are on
+ // opposite sides of the plane perpendicular to one of these tangents. This
+ // is moderately expensive but still much cheaper than expensiveSign.
+
+ // The error in RobustCrossProd is insignificant. The maximum error in
+ // the call to CrossProd (i.e., the maximum norm of the error vector) is
+ // (0.5 + 1/sqrt(3)) * dblEpsilon. The maximum error in each call to
+ // DotProd below is dblEpsilon. (There is also a small relative error
+ // term that is insignificant because we are comparing the result against a
+ // constant that is very close to zero.)
+ maxError := (1.5 + 1/math.Sqrt(3)) * dblEpsilon
+ if (e.c.Dot(e.aTangent.Vector) > maxError && d.Dot(e.aTangent.Vector) > maxError) || (e.c.Dot(e.bTangent.Vector) > maxError && d.Dot(e.bTangent.Vector) > maxError) {
+ return DoNotCross
+ }
+
+ // Otherwise it's time to break out the big guns.
+ if e.acb == Indeterminate {
+ e.acb = -expensiveSign(e.a, e.b, e.c)
+ }
+ if bda == Indeterminate {
+ bda = expensiveSign(e.a, e.b, d)
+ }
+
+ if bda != e.acb {
+ return DoNotCross
+ }
+
+ cbd := -RobustSign(e.c, d, e.b)
+ if cbd != e.acb {
+ return DoNotCross
+ }
+ dac := RobustSign(e.c, d, e.a)
+ if dac == e.acb {
+ return Cross
+ }
+ return DoNotCross
+}
+
+// pointUVW represents a Point in (u,v,w) coordinate space of a cube face.
+type pointUVW Point
+
+// intersectsFace reports whether a given directed line L intersects the cube face F.
+// The line L is defined by its normal N in the (u,v,w) coordinates of F.
+func (p pointUVW) intersectsFace() bool {
+ // L intersects the [-1,1]x[-1,1] square in (u,v) if and only if the dot
+ // products of N with the four corner vertices (-1,-1,1), (1,-1,1), (1,1,1),
+ // and (-1,1,1) do not all have the same sign. This is true exactly when
+ // |Nu| + |Nv| >= |Nw|. The code below evaluates this expression exactly.
+ u := math.Abs(p.X)
+ v := math.Abs(p.Y)
+ w := math.Abs(p.Z)
+
+ // We only need to consider the cases where u or v is the smallest value,
+ // since if w is the smallest then both expressions below will have a
+ // positive LHS and a negative RHS.
+ return (v >= w-u) && (u >= w-v)
+}
+
+// intersectsOppositeEdges reports whether a directed line L intersects two
+// opposite edges of a cube face F. This includs the case where L passes
+// exactly through a corner vertex of F. The directed line L is defined
+// by its normal N in the (u,v,w) coordinates of F.
+func (p pointUVW) intersectsOppositeEdges() bool {
+ // The line L intersects opposite edges of the [-1,1]x[-1,1] (u,v) square if
+ // and only exactly two of the corner vertices lie on each side of L. This
+ // is true exactly when ||Nu| - |Nv|| >= |Nw|. The code below evaluates this
+ // expression exactly.
+ u := math.Abs(p.X)
+ v := math.Abs(p.Y)
+ w := math.Abs(p.Z)
+
+ // If w is the smallest, the following line returns an exact result.
+ if math.Abs(u-v) != w {
+ return math.Abs(u-v) >= w
+ }
+
+ // Otherwise u - v = w exactly, or w is not the smallest value. In either
+ // case the following returns the correct result.
+ if u >= v {
+ return u-w >= v
+ }
+ return v-w >= u
+}
+
+// axis represents the possible results of exitAxis.
+type axis int
+
+const (
+ axisU axis = iota
+ axisV
+)
+
+// exitAxis reports which axis the directed line L exits the cube face F on.
+// The directed line L is represented by its CCW normal N in the (u,v,w) coordinates
+// of F. It returns axisU if L exits through the u=-1 or u=+1 edge, and axisV if L exits
+// through the v=-1 or v=+1 edge. Either result is acceptable if L exits exactly
+// through a corner vertex of the cube face.
+func (p pointUVW) exitAxis() axis {
+ if p.intersectsOppositeEdges() {
+ // The line passes through through opposite edges of the face.
+ // It exits through the v=+1 or v=-1 edge if the u-component of N has a
+ // larger absolute magnitude than the v-component.
+ if math.Abs(p.X) >= math.Abs(p.Y) {
+ return axisV
+ }
+ return axisU
+ }
+
+ // The line passes through through two adjacent edges of the face.
+ // It exits the v=+1 or v=-1 edge if an even number of the components of N
+ // are negative. We test this using signbit() rather than multiplication
+ // to avoid the possibility of underflow.
+ var x, y, z int
+ if math.Signbit(p.X) {
+ x = 1
+ }
+ if math.Signbit(p.Y) {
+ y = 1
+ }
+ if math.Signbit(p.Z) {
+ z = 1
+ }
+
+ if x^y^z == 0 {
+ return axisV
+ }
+ return axisU
+}
+
+// exitPoint returns the UV coordinates of the point where a directed line L (represented
+// by the CCW normal of this point), exits the cube face this point is derived from along
+// the given axis.
+func (p pointUVW) exitPoint(a axis) r2.Point {
+ if a == axisU {
+ u := -1.0
+ if p.Y > 0 {
+ u = 1.0
+ }
+ return r2.Point{u, (-u*p.X - p.Z) / p.Y}
+ }
+
+ v := -1.0
+ if p.X < 0 {
+ v = 1.0
+ }
+ return r2.Point{(-v*p.Y - p.Z) / p.X, v}
+}
+
+// clipDestination returns a score which is used to indicate if the clipped edge AB
+// on the given face intersects the face at all. This function returns the score for
+// the given endpoint, which is an integer ranging from 0 to 3. If the sum of the scores
+// from both of the endpoints is 3 or more, then edge AB does not intersect this face.
+//
+// First, it clips the line segment AB to find the clipped destination B' on a given
+// face. (The face is specified implicitly by expressing *all arguments* in the (u,v,w)
+// coordinates of that face.) Second, it partially computes whether the segment AB
+// intersects this face at all. The actual condition is fairly complicated, but it
+// turns out that it can be expressed as a "score" that can be computed independently
+// when clipping the two endpoints A and B.
+func clipDestination(a, b, scaledN, aTan, bTan pointUVW, scaleUV float64) (r2.Point, int) {
+ var uv r2.Point
+
+ // Optimization: if B is within the safe region of the face, use it.
+ maxSafeUVCoord := 1 - faceClipErrorUVCoord
+ if b.Z > 0 {
+ uv = r2.Point{b.X / b.Z, b.Y / b.Z}
+ if math.Max(math.Abs(uv.X), math.Abs(uv.Y)) <= maxSafeUVCoord {
+ return uv, 0
+ }
+ }
+
+ // Otherwise find the point B' where the line AB exits the face.
+ uv = scaledN.exitPoint(scaledN.exitAxis()).Mul(scaleUV)
+
+ p := pointUVW(PointFromCoords(uv.X, uv.Y, 1.0))
+
+ // Determine if the exit point B' is contained within the segment. We do this
+ // by computing the dot products with two inward-facing tangent vectors at A
+ // and B. If either dot product is negative, we say that B' is on the "wrong
+ // side" of that point. As the point B' moves around the great circle AB past
+ // the segment endpoint B, it is initially on the wrong side of B only; as it
+ // moves further it is on the wrong side of both endpoints; and then it is on
+ // the wrong side of A only. If the exit point B' is on the wrong side of
+ // either endpoint, we can't use it; instead the segment is clipped at the
+ // original endpoint B.
+ //
+ // We reject the segment if the sum of the scores of the two endpoints is 3
+ // or more. Here is what that rule encodes:
+ // - If B' is on the wrong side of A, then the other clipped endpoint A'
+ // must be in the interior of AB (otherwise AB' would go the wrong way
+ // around the circle). There is a similar rule for A'.
+ // - If B' is on the wrong side of either endpoint (and therefore we must
+ // use the original endpoint B instead), then it must be possible to
+ // project B onto this face (i.e., its w-coordinate must be positive).
+ // This rule is only necessary to handle certain zero-length edges (A=B).
+ score := 0
+ if p.Sub(a.Vector).Dot(aTan.Vector) < 0 {
+ score = 2 // B' is on wrong side of A.
+ } else if p.Sub(b.Vector).Dot(bTan.Vector) < 0 {
+ score = 1 // B' is on wrong side of B.
+ }
+
+ if score > 0 { // B' is not in the interior of AB.
+ if b.Z <= 0 {
+ score = 3 // B cannot be projected onto this face.
+ } else {
+ uv = r2.Point{b.X / b.Z, b.Y / b.Z}
+ }
+ }
+
+ return uv, score
+}
+
+// ClipToFace returns the (u,v) coordinates for the portion of the edge AB that
+// intersects the given face, or false if the edge AB does not intersect.
+// This method guarantees that the clipped vertices lie within the [-1,1]x[-1,1]
+// cube face rectangle and are within faceClipErrorUVDist of the line AB, but
+// the results may differ from those produced by faceSegments.
+func ClipToFace(a, b Point, face int) (aUV, bUV r2.Point, intersects bool) {
+ return ClipToPaddedFace(a, b, face, 0.0)
+}
+
+// ClipToPaddedFace returns the (u,v) coordinates for the portion of the edge AB that
+// intersects the given face, but rather than clipping to the square [-1,1]x[-1,1]
+// in (u,v) space, this method clips to [-R,R]x[-R,R] where R=(1+padding).
+// Padding must be non-negative.
+func ClipToPaddedFace(a, b Point, f int, padding float64) (aUV, bUV r2.Point, intersects bool) {
+ // Fast path: both endpoints are on the given face.
+ if face(a.Vector) == f && face(b.Vector) == f {
+ au, av := validFaceXYZToUV(f, a.Vector)
+ bu, bv := validFaceXYZToUV(f, b.Vector)
+ return r2.Point{au, av}, r2.Point{bu, bv}, true
+ }
+
+ // Convert everything into the (u,v,w) coordinates of the given face. Note
+ // that the cross product *must* be computed in the original (x,y,z)
+ // coordinate system because PointCross (unlike the mathematical cross
+ // product) can produce different results in different coordinate systems
+ // when one argument is a linear multiple of the other, due to the use of
+ // symbolic perturbations.
+ normUVW := pointUVW(faceXYZtoUVW(f, a.PointCross(b)))
+ aUVW := pointUVW(faceXYZtoUVW(f, a))
+ bUVW := pointUVW(faceXYZtoUVW(f, b))
+
+ // Padding is handled by scaling the u- and v-components of the normal.
+ // Letting R=1+padding, this means that when we compute the dot product of
+ // the normal with a cube face vertex (such as (-1,-1,1)), we will actually
+ // compute the dot product with the scaled vertex (-R,-R,1). This allows
+ // methods such as intersectsFace, exitAxis, etc, to handle padding
+ // with no further modifications.
+ scaleUV := 1 + padding
+ scaledN := pointUVW{r3.Vector{X: scaleUV * normUVW.X, Y: scaleUV * normUVW.Y, Z: normUVW.Z}}
+ if !scaledN.intersectsFace() {
+ return aUV, bUV, false
+ }
+
+ // TODO(roberts): This is a workaround for extremely small vectors where some
+ // loss of precision can occur in Normalize causing underflow. When PointCross
+ // is updated to work around this, this can be removed.
+ if math.Max(math.Abs(normUVW.X), math.Max(math.Abs(normUVW.Y), math.Abs(normUVW.Z))) < math.Ldexp(1, -511) {
+ normUVW = pointUVW{normUVW.Mul(math.Ldexp(1, 563))}
+ }
+
+ normUVW = pointUVW{normUVW.Normalize()}
+
+ aTan := pointUVW{normUVW.Cross(aUVW.Vector)}
+ bTan := pointUVW{bUVW.Cross(normUVW.Vector)}
+
+ // As described in clipDestination, if the sum of the scores from clipping the two
+ // endpoints is 3 or more, then the segment does not intersect this face.
+ aUV, aScore := clipDestination(bUVW, aUVW, pointUVW{scaledN.Mul(-1)}, bTan, aTan, scaleUV)
+ bUV, bScore := clipDestination(aUVW, bUVW, scaledN, aTan, bTan, scaleUV)
+
+ return aUV, bUV, aScore+bScore < 3
+}
+
+// interpolateDouble returns a value with the same combination of a1 and b1 as the
+// given value x is of a and b. This function makes the following guarantees:
+// - If x == a, then x1 = a1 (exactly).
+// - If x == b, then x1 = b1 (exactly).
+// - If a <= x <= b, then a1 <= x1 <= b1 (even if a1 == b1).
+// This requires a != b.
+func interpolateDouble(x, a, b, a1, b1 float64) float64 {
+ // To get results that are accurate near both A and B, we interpolate
+ // starting from the closer of the two points.
+ if math.Abs(a-x) <= math.Abs(b-x) {
+ return a1 + (b1-a1)*(x-a)/(b-a)
+ }
+ return b1 + (a1-b1)*(x-b)/(a-b)
+}
+
+// updateEndpoint returns the interval with the specified endpoint updated to
+// the given value. If the value lies beyond the opposite endpoint, nothing is
+// changed and false is returned.
+func updateEndpoint(bound r1.Interval, highEndpoint bool, value float64) (r1.Interval, bool) {
+ if !highEndpoint {
+ if bound.Hi < value {
+ return bound, false
+ }
+ if bound.Lo < value {
+ bound.Lo = value
+ }
+ return bound, true
+ }
+
+ if bound.Lo > value {
+ return bound, false
+ }
+ if bound.Hi > value {
+ bound.Hi = value
+ }
+ return bound, true
+}
+
+// clipBoundAxis returns the clipped versions of the bounding intervals for the given
+// axes for the line segment from (a0,a1) to (b0,b1) so that neither extends beyond the
+// given clip interval. negSlope is a precomputed helper variable that indicates which
+// diagonal of the bounding box is spanned by AB; it is false if AB has positive slope,
+// and true if AB has negative slope. If the clipping interval doesn't overlap the bounds,
+// false is returned.
+func clipBoundAxis(a0, b0 float64, bound0 r1.Interval, a1, b1 float64, bound1 r1.Interval,
+ negSlope bool, clip r1.Interval) (bound0c, bound1c r1.Interval, updated bool) {
+
+ if bound0.Lo < clip.Lo {
+ // If the upper bound is below the clips lower bound, there is nothing to do.
+ if bound0.Hi < clip.Lo {
+ return bound0, bound1, false
+ }
+ // narrow the intervals lower bound to the clip bound.
+ bound0.Lo = clip.Lo
+ if bound1, updated = updateEndpoint(bound1, negSlope, interpolateDouble(clip.Lo, a0, b0, a1, b1)); !updated {
+ return bound0, bound1, false
+ }
+ }
+
+ if bound0.Hi > clip.Hi {
+ // If the lower bound is above the clips upper bound, there is nothing to do.
+ if bound0.Lo > clip.Hi {
+ return bound0, bound1, false
+ }
+ // narrow the intervals upper bound to the clip bound.
+ bound0.Hi = clip.Hi
+ if bound1, updated = updateEndpoint(bound1, !negSlope, interpolateDouble(clip.Hi, a0, b0, a1, b1)); !updated {
+ return bound0, bound1, false
+ }
+ }
+ return bound0, bound1, true
+}
+
+// edgeIntersectsRect reports whether the edge defined by AB intersects the
+// given closed rectangle to within the error bound.
+func edgeIntersectsRect(a, b r2.Point, r r2.Rect) bool {
+ // First check whether the bounds of a Rect around AB intersects the given rect.
+ if !r.Intersects(r2.RectFromPoints(a, b)) {
+ return false
+ }
+
+ // Otherwise AB intersects the rect if and only if all four vertices of rect
+ // do not lie on the same side of the extended line AB. We test this by finding
+ // the two vertices of rect with minimum and maximum projections onto the normal
+ // of AB, and computing their dot products with the edge normal.
+ n := b.Sub(a).Ortho()
+
+ i := 0
+ if n.X >= 0 {
+ i = 1
+ }
+ j := 0
+ if n.Y >= 0 {
+ j = 1
+ }
+
+ max := n.Dot(r.VertexIJ(i, j).Sub(a))
+ min := n.Dot(r.VertexIJ(1-i, 1-j).Sub(a))
+
+ return (max >= 0) && (min <= 0)
+}
+
+// clippedEdgeBound returns the bounding rectangle of the portion of the edge defined
+// by AB intersected by clip. The resulting bound may be empty. This is a convenience
+// function built on top of clipEdgeBound.
+func clippedEdgeBound(a, b r2.Point, clip r2.Rect) r2.Rect {
+ bound := r2.RectFromPoints(a, b)
+ if b1, intersects := clipEdgeBound(a, b, clip, bound); intersects {
+ return b1
+ }
+ return r2.EmptyRect()
+}
+
+// clipEdgeBound clips an edge AB to sequence of rectangles efficiently.
+// It represents the clipped edges by their bounding boxes rather than as a pair of
+// endpoints. Specifically, let A'B' be some portion of an edge AB, and let bound be
+// a tight bound of A'B'. This function returns the bound that is a tight bound
+// of A'B' intersected with a given rectangle. If A'B' does not intersect clip,
+// it returns false and the original bound.
+func clipEdgeBound(a, b r2.Point, clip, bound r2.Rect) (r2.Rect, bool) {
+ // negSlope indicates which diagonal of the bounding box is spanned by AB: it
+ // is false if AB has positive slope, and true if AB has negative slope. This is
+ // used to determine which interval endpoints need to be updated each time
+ // the edge is clipped.
+ negSlope := (a.X > b.X) != (a.Y > b.Y)
+
+ b0x, b0y, up1 := clipBoundAxis(a.X, b.X, bound.X, a.Y, b.Y, bound.Y, negSlope, clip.X)
+ if !up1 {
+ return bound, false
+ }
+ b1y, b1x, up2 := clipBoundAxis(a.Y, b.Y, b0y, a.X, b.X, b0x, negSlope, clip.Y)
+ if !up2 {
+ return r2.Rect{b0x, b0y}, false
+ }
+ return r2.Rect{X: b1x, Y: b1y}, true
+}
+
+// ClipEdge returns the portion of the edge defined by AB that is contained by the
+// given rectangle. If there is no intersection, false is returned and aClip and bClip
+// are undefined.
+func ClipEdge(a, b r2.Point, clip r2.Rect) (aClip, bClip r2.Point, intersects bool) {
+ // Compute the bounding rectangle of AB, clip it, and then extract the new
+ // endpoints from the clipped bound.
+ bound := r2.RectFromPoints(a, b)
+ if bound, intersects = clipEdgeBound(a, b, clip, bound); !intersects {
+ return aClip, bClip, false
+ }
+ ai := 0
+ if a.X > b.X {
+ ai = 1
+ }
+ aj := 0
+ if a.Y > b.Y {
+ aj = 1
+ }
+
+ return bound.VertexIJ(ai, aj), bound.VertexIJ(1-ai, 1-aj), true
+}
+
+// ClosestPoint returns the point along the edge AB that is closest to the point X.
+// The fractional distance of this point along the edge AB can be obtained
+// using DistanceFraction.
+//
+// This requires that all points are unit length.
+func ClosestPoint(x, a, b Point) Point {
+ aXb := a.PointCross(b)
+ // Find the closest point to X along the great circle through AB.
+ p := x.Sub(aXb.Mul(x.Dot(aXb.Vector) / aXb.Vector.Norm2()))
+
+ // If this point is on the edge AB, then it's the closest point.
+ if Sign(aXb, a, Point{p}) && Sign(Point{p}, b, aXb) {
+ return Point{p.Normalize()}
+ }
+
+ // Otherwise, the closest point is either A or B.
+ if x.Sub(a.Vector).Norm2() <= x.Sub(b.Vector).Norm2() {
+ return a
+ }
+ return b
+}
+
+// DistanceFromSegment returns the distance of point x from line segment ab.
+// The points are expected to be normalized.
+func DistanceFromSegment(x, a, b Point) s1.Angle {
+ if d, ok := interiorDist(x, a, b); ok {
+ return d.Angle()
+ }
+ // Chord distance of x to both end points a and b.
+ xa2, xb2 := (x.Sub(a.Vector)).Norm2(), x.Sub(b.Vector).Norm2()
+ return s1.ChordAngle(math.Min(xa2, xb2)).Angle()
+}
+
+// interiorDist returns the shortest distance from point x to edge ab,
+// assuming that the closest point to x is interior to ab.
+// If the closest point is not interior to ab, interiorDist returns (0, false).
+func interiorDist(x, a, b Point) (s1.ChordAngle, bool) {
+ // Chord distance of x to both end points a and b.
+ xa2, xb2 := (x.Sub(a.Vector)).Norm2(), x.Sub(b.Vector).Norm2()
+
+ // The closest point on AB could either be one of the two vertices (the
+ // vertex case) or in the interior (the interior case). Let C = A x B.
+ // If X is in the spherical wedge extending from A to B around the axis
+ // through C, then we are in the interior case. Otherwise we are in the
+ // vertex case.
+ //
+ // Check whether we might be in the interior case. For this to be true, XAB
+ // and XBA must both be acute angles. Checking this condition exactly is
+ // expensive, so instead we consider the planar triangle ABX (which passes
+ // through the sphere's interior). The planar angles XAB and XBA are always
+ // less than the corresponding spherical angles, so if we are in the
+ // interior case then both of these angles must be acute.
+ //
+ // We check this by computing the squared edge lengths of the planar
+ // triangle ABX, and testing acuteness using the law of cosines:
+ //
+ // max(XA^2, XB^2) < min(XA^2, XB^2) + AB^2
+ if math.Max(xa2, xb2) >= math.Min(xa2, xb2)+(a.Sub(b.Vector)).Norm2() {
+ return 0, false
+ }
+
+ // The minimum distance might be to a point on the edge interior. Let R
+ // be closest point to X that lies on the great circle through AB. Rather
+ // than computing the geodesic distance along the surface of the sphere,
+ // instead we compute the "chord length" through the sphere's interior.
+ //
+ // The squared chord length XR^2 can be expressed as XQ^2 + QR^2, where Q
+ // is the point X projected onto the plane through the great circle AB.
+ // The distance XQ^2 can be written as (X.C)^2 / |C|^2 where C = A x B.
+ // We ignore the QR^2 term and instead use XQ^2 as a lower bound, since it
+ // is faster and the corresponding distance on the Earth's surface is
+ // accurate to within 1% for distances up to about 1800km.
+
+ // Test for the interior case. This test is very likely to succeed because
+ // of the conservative planar test we did initially.
+ c := a.PointCross(b)
+ c2 := c.Norm2()
+ cx := c.Cross(x.Vector)
+ if a.Dot(cx) >= 0 || b.Dot(cx) <= 0 {
+ return 0, false
+ }
+
+ // Compute the squared chord length XR^2 = XQ^2 + QR^2 (see above).
+ // This calculation has good accuracy for all chord lengths since it
+ // is based on both the dot product and cross product (rather than
+ // deriving one from the other). However, note that the chord length
+ // representation itself loses accuracy as the angle approaches π.
+ xDotC := x.Dot(c.Vector)
+ xDotC2 := xDotC * xDotC
+ qr := 1 - math.Sqrt(cx.Norm2()/c2)
+ return s1.ChordAngle((xDotC2 / c2) + (qr * qr)), true
+}
+
+// WedgeRel enumerates the possible relation between two wedges A and B.
+type WedgeRel int
+
+// Define the different possible relationships between two wedges.
+const (
+ WedgeEquals WedgeRel = iota // A and B are equal.
+ WedgeProperlyContains // A is a strict superset of B.
+ WedgeIsProperlyContained // A is a strict subset of B.
+ WedgeProperlyOverlaps // A-B, B-A, and A intersect B are non-empty.
+ WedgeIsDisjoint // A and B are disjoint.
+)
+
+// WedgeRelation reports the relation between two non-empty wedges
+// A=(a0, ab1, a2) and B=(b0, ab1, b2).
+func WedgeRelation(a0, ab1, a2, b0, b2 Point) WedgeRel {
+ // There are 6 possible edge orderings at a shared vertex (all
+ // of these orderings are circular, i.e. abcd == bcda):
+ //
+ // (1) a2 b2 b0 a0: A contains B
+ // (2) a2 a0 b0 b2: B contains A
+ // (3) a2 a0 b2 b0: A and B are disjoint
+ // (4) a2 b0 a0 b2: A and B intersect in one wedge
+ // (5) a2 b2 a0 b0: A and B intersect in one wedge
+ // (6) a2 b0 b2 a0: A and B intersect in two wedges
+ //
+ // We do not distinguish between 4, 5, and 6.
+ // We pay extra attention when some of the edges overlap. When edges
+ // overlap, several of these orderings can be satisfied, and we take
+ // the most specific.
+ if a0 == b0 && a2 == b2 {
+ return WedgeEquals
+ }
+
+ // Cases 1, 2, 5, and 6
+ if OrderedCCW(a0, a2, b2, ab1) {
+ // The cases with this vertex ordering are 1, 5, and 6,
+ if OrderedCCW(b2, b0, a0, ab1) {
+ return WedgeProperlyContains
+ }
+
+ // We are in case 5 or 6, or case 2 if a2 == b2.
+ if a2 == b2 {
+ return WedgeIsProperlyContained
+ }
+ return WedgeProperlyOverlaps
+
+ }
+ // We are in case 2, 3, or 4.
+ if OrderedCCW(a0, b0, b2, ab1) {
+ return WedgeIsProperlyContained
+ }
+
+ if OrderedCCW(a0, b0, a2, ab1) {
+ return WedgeIsDisjoint
+ }
+ return WedgeProperlyOverlaps
+}
+
+// WedgeContains reports whether non-empty wedge A=(a0, ab1, a2) contains B=(b0, ab1, b2).
+// Equivalent to WedgeRelation == WedgeProperlyContains || WedgeEquals.
+func WedgeContains(a0, ab1, a2, b0, b2 Point) bool {
+ // For A to contain B (where each loop interior is defined to be its left
+ // side), the CCW edge order around ab1 must be a2 b2 b0 a0. We split
+ // this test into two parts that test three vertices each.
+ return OrderedCCW(a2, b2, b0, ab1) && OrderedCCW(b0, a0, a2, ab1)
+}
+
+// WedgeIntersects reports whether non-empty wedge A=(a0, ab1, a2) intersects B=(b0, ab1, b2).
+// Equivalent to WedgeRelation == WedgeIsDisjoint
+func WedgeIntersects(a0, ab1, a2, b0, b2 Point) bool {
+ // For A not to intersect B (where each loop interior is defined to be
+ // its left side), the CCW edge order around ab1 must be a0 b2 b0 a2.
+ // Note that it's important to write these conditions as negatives
+ // (!OrderedCCW(a,b,c,o) rather than Ordered(c,b,a,o)) to get correct
+ // results when two vertices are the same.
+ return !(OrderedCCW(a0, b2, b0, ab1) && OrderedCCW(b0, a2, a0, ab1))
+}
+
+// TODO(roberts): Differences from C++
+// LongitudePruner
+// updateMinDistanceMaxError
+// IsDistanceLess
+// UpdateMinDistance
+// IsInteriorDistanceLess
+// UpdateMinInteriorDistance
+// UpdateEdgePairMinDistance
+// EdgePairClosestPoints
+// IsEdgeBNearEdgeA
+// FaceSegments
+// PointFromExact
+// IntersectionExact
+// intersectionExactError
diff --git a/vendor/github.com/golang/geo/s2/latlng.go b/vendor/github.com/golang/geo/s2/latlng.go
new file mode 100644
index 0000000..55532c7
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/latlng.go
@@ -0,0 +1,96 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package s2
+
+import (
+ "fmt"
+ "math"
+
+ "github.com/golang/geo/s1"
+)
+
+const (
+ northPoleLat = s1.Angle(math.Pi/2) * s1.Radian
+ southPoleLat = -northPoleLat
+)
+
+// LatLng represents a point on the unit sphere as a pair of angles.
+type LatLng struct {
+ Lat, Lng s1.Angle
+}
+
+// LatLngFromDegrees returns a LatLng for the coordinates given in degrees.
+func LatLngFromDegrees(lat, lng float64) LatLng {
+ return LatLng{s1.Angle(lat) * s1.Degree, s1.Angle(lng) * s1.Degree}
+}
+
+// IsValid returns true iff the LatLng is normalized, with Lat ∈ [-π/2,π/2] and Lng ∈ [-π,π].
+func (ll LatLng) IsValid() bool {
+ return math.Abs(ll.Lat.Radians()) <= math.Pi/2 && math.Abs(ll.Lng.Radians()) <= math.Pi
+}
+
+// Normalized returns the normalized version of the LatLng,
+// with Lat clamped to [-π/2,π/2] and Lng wrapped in [-π,π].
+func (ll LatLng) Normalized() LatLng {
+ lat := ll.Lat
+ if lat > northPoleLat {
+ lat = northPoleLat
+ } else if lat < southPoleLat {
+ lat = southPoleLat
+ }
+ lng := s1.Angle(math.Remainder(ll.Lng.Radians(), 2*math.Pi)) * s1.Radian
+ return LatLng{lat, lng}
+}
+
+func (ll LatLng) String() string { return fmt.Sprintf("[%v, %v]", ll.Lat, ll.Lng) }
+
+// Distance returns the angle between two LatLngs.
+func (ll LatLng) Distance(ll2 LatLng) s1.Angle {
+ // Haversine formula, as used in C++ S2LatLng::GetDistance.
+ lat1, lat2 := ll.Lat.Radians(), ll2.Lat.Radians()
+ lng1, lng2 := ll.Lng.Radians(), ll2.Lng.Radians()
+ dlat := math.Sin(0.5 * (lat2 - lat1))
+ dlng := math.Sin(0.5 * (lng2 - lng1))
+ x := dlat*dlat + dlng*dlng*math.Cos(lat1)*math.Cos(lat2)
+ return s1.Angle(2*math.Atan2(math.Sqrt(x), math.Sqrt(math.Max(0, 1-x)))) * s1.Radian
+}
+
+// NOTE(mikeperrow): The C++ implementation publicly exposes latitude/longitude
+// functions. Let's see if that's really necessary before exposing the same functionality.
+
+func latitude(p Point) s1.Angle {
+ return s1.Angle(math.Atan2(p.Z, math.Sqrt(p.X*p.X+p.Y*p.Y))) * s1.Radian
+}
+
+func longitude(p Point) s1.Angle {
+ return s1.Angle(math.Atan2(p.Y, p.X)) * s1.Radian
+}
+
+// PointFromLatLng returns an Point for the given LatLng.
+// The maximum error in the result is 1.5 * dblEpsilon. (This does not
+// include the error of converting degrees, E5, E6, or E7 into radians.)
+func PointFromLatLng(ll LatLng) Point {
+ phi := ll.Lat.Radians()
+ theta := ll.Lng.Radians()
+ cosphi := math.Cos(phi)
+ return PointFromCoords(math.Cos(theta)*cosphi, math.Sin(theta)*cosphi, math.Sin(phi))
+}
+
+// LatLngFromPoint returns an LatLng for a given Point.
+func LatLngFromPoint(p Point) LatLng {
+ return LatLng{latitude(p), longitude(p)}
+}
diff --git a/vendor/github.com/golang/geo/s2/loop.go b/vendor/github.com/golang/geo/s2/loop.go
new file mode 100644
index 0000000..4d54860
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/loop.go
@@ -0,0 +1,282 @@
+/*
+Copyright 2015 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package s2
+
+import (
+ "math"
+
+ "github.com/golang/geo/r1"
+ "github.com/golang/geo/r3"
+ "github.com/golang/geo/s1"
+)
+
+// Loop represents a simple spherical polygon. It consists of a sequence
+// of vertices where the first vertex is implicitly connected to the
+// last. All loops are defined to have a CCW orientation, i.e. the interior of
+// the loop is on the left side of the edges. This implies that a clockwise
+// loop enclosing a small area is interpreted to be a CCW loop enclosing a
+// very large area.
+//
+// Loops are not allowed to have any duplicate vertices (whether adjacent or
+// not), and non-adjacent edges are not allowed to intersect. Loops must have
+// at least 3 vertices (except for the "empty" and "full" loops discussed
+// below).
+//
+// There are two special loops: the "empty" loop contains no points and the
+// "full" loop contains all points. These loops do not have any edges, but to
+// preserve the invariant that every loop can be represented as a vertex
+// chain, they are defined as having exactly one vertex each (see EmptyLoop
+// and FullLoop).
+type Loop struct {
+ vertices []Point
+
+ // originInside keeps a precomputed value whether this loop contains the origin
+ // versus computing from the set of vertices every time.
+ originInside bool
+
+ // bound is a conservative bound on all points contained by this loop.
+ // If l.ContainsPoint(P), then l.bound.ContainsPoint(P).
+ bound Rect
+
+ // Since "bound" is not exact, it is possible that a loop A contains
+ // another loop B whose bounds are slightly larger. subregionBound
+ // has been expanded sufficiently to account for this error, i.e.
+ // if A.Contains(B), then A.subregionBound.Contains(B.bound).
+ subregionBound Rect
+}
+
+// LoopFromPoints constructs a loop from the given points.
+func LoopFromPoints(pts []Point) *Loop {
+ l := &Loop{
+ vertices: pts,
+ }
+
+ l.initOriginAndBound()
+ return l
+}
+
+// LoopFromCell constructs a loop corresponding to the given cell.
+//
+// Note that the loop and cell *do not* contain exactly the same set of
+// points, because Loop and Cell have slightly different definitions of
+// point containment. For example, a Cell vertex is contained by all
+// four neighboring Cells, but it is contained by exactly one of four
+// Loops constructed from those cells. As another example, the cell
+// coverings of cell and LoopFromCell(cell) will be different, because the
+// loop contains points on its boundary that actually belong to other cells
+// (i.e., the covering will include a layer of neighboring cells).
+func LoopFromCell(c Cell) *Loop {
+ l := &Loop{
+ vertices: []Point{
+ c.Vertex(0),
+ c.Vertex(1),
+ c.Vertex(2),
+ c.Vertex(3),
+ },
+ }
+
+ l.initOriginAndBound()
+ return l
+}
+
+// EmptyLoop returns a special "empty" loop.
+func EmptyLoop() *Loop {
+ return LoopFromPoints([]Point{{r3.Vector{X: 0, Y: 0, Z: 1}}})
+}
+
+// FullLoop returns a special "full" loop.
+func FullLoop() *Loop {
+ return LoopFromPoints([]Point{{r3.Vector{X: 0, Y: 0, Z: -1}}})
+}
+
+// initOriginAndBound sets the origin containment for the given point and then calls
+// the initialization for the bounds objects and the internal index.
+func (l *Loop) initOriginAndBound() {
+ if len(l.vertices) < 3 {
+ // Check for the special "empty" and "full" loops (which have one vertex).
+ if !l.isEmptyOrFull() {
+ l.originInside = false
+ return
+ }
+
+ // This is the special empty or full loop, so the origin depends on if
+ // the vertex is in the southern hemisphere or not.
+ l.originInside = l.vertices[0].Z < 0
+ } else {
+ // Point containment testing is done by counting edge crossings starting
+ // at a fixed point on the sphere (OriginPoint). We need to know whether
+ // the reference point (OriginPoint) is inside or outside the loop before
+ // we can construct the ShapeIndex. We do this by first guessing that
+ // it is outside, and then seeing whether we get the correct containment
+ // result for vertex 1. If the result is incorrect, the origin must be
+ // inside the loop.
+ //
+ // A loop with consecutive vertices A,B,C contains vertex B if and only if
+ // the fixed vector R = B.Ortho is contained by the wedge ABC. The
+ // wedge is closed at A and open at C, i.e. the point B is inside the loop
+ // if A = R but not if C = R. This convention is required for compatibility
+ // with VertexCrossing. (Note that we can't use OriginPoint
+ // as the fixed vector because of the possibility that B == OriginPoint.)
+ l.originInside = false
+ v1Inside := OrderedCCW(Point{l.vertices[1].Ortho()}, l.vertices[0], l.vertices[2], l.vertices[1])
+ if v1Inside != l.ContainsPoint(l.vertices[1]) {
+ l.originInside = true
+ }
+ }
+
+ // We *must* call initBound before initIndex, because initBound calls
+ // ContainsPoint(s2.Point), and ContainsPoint(s2.Point) does a bounds check whenever the
+ // index is not fresh (i.e., the loop has been added to the index but the
+ // index has not been updated yet).
+ l.initBound()
+
+ // TODO(roberts): Depends on s2shapeindex being implemented.
+ // l.initIndex()
+}
+
+// initBound sets up the approximate bounding Rects for this loop.
+func (l *Loop) initBound() {
+ // Check for the special "empty" and "full" loops.
+ if l.isEmptyOrFull() {
+ if l.IsEmpty() {
+ l.bound = EmptyRect()
+ } else {
+ l.bound = FullRect()
+ }
+ l.subregionBound = l.bound
+ return
+ }
+
+ // The bounding rectangle of a loop is not necessarily the same as the
+ // bounding rectangle of its vertices. First, the maximal latitude may be
+ // attained along the interior of an edge. Second, the loop may wrap
+ // entirely around the sphere (e.g. a loop that defines two revolutions of a
+ // candy-cane stripe). Third, the loop may include one or both poles.
+ // Note that a small clockwise loop near the equator contains both poles.
+ bounder := NewRectBounder()
+ for i := 0; i <= len(l.vertices); i++ { // add vertex 0 twice
+ bounder.AddPoint(l.Vertex(i))
+ }
+ b := bounder.RectBound()
+
+ if l.ContainsPoint(Point{r3.Vector{0, 0, 1}}) {
+ b = Rect{r1.Interval{b.Lat.Lo, math.Pi / 2}, s1.FullInterval()}
+ }
+ // If a loop contains the south pole, then either it wraps entirely
+ // around the sphere (full longitude range), or it also contains the
+ // north pole in which case b.Lng.IsFull() due to the test above.
+ // Either way, we only need to do the south pole containment test if
+ // b.Lng.IsFull().
+ if b.Lng.IsFull() && l.ContainsPoint(Point{r3.Vector{0, 0, -1}}) {
+ b.Lat.Lo = -math.Pi / 2
+ }
+ l.bound = b
+ l.subregionBound = ExpandForSubregions(l.bound)
+}
+
+// ContainsOrigin reports true if this loop contains s2.OriginPoint().
+func (l Loop) ContainsOrigin() bool {
+ return l.originInside
+}
+
+// HasInterior returns true because all loops have an interior.
+func (l Loop) HasInterior() bool {
+ return true
+}
+
+// NumEdges returns the number of edges in this shape.
+func (l Loop) NumEdges() int {
+ if l.isEmptyOrFull() {
+ return 0
+ }
+ return len(l.vertices)
+}
+
+// Edge returns the endpoints for the given edge index.
+func (l Loop) Edge(i int) (a, b Point) {
+ return l.Vertex(i), l.Vertex(i + 1)
+}
+
+// IsEmpty reports true if this is the special "empty" loop that contains no points.
+func (l Loop) IsEmpty() bool {
+ return l.isEmptyOrFull() && !l.ContainsOrigin()
+}
+
+// IsFull reports true if this is the special "full" loop that contains all points.
+func (l Loop) IsFull() bool {
+ return l.isEmptyOrFull() && l.ContainsOrigin()
+}
+
+// isEmptyOrFull reports true if this loop is either the "empty" or "full" special loops.
+func (l Loop) isEmptyOrFull() bool {
+ return len(l.vertices) == 1
+}
+
+// RectBound returns a tight bounding rectangle. If the loop contains the point,
+// the bound also contains it.
+func (l Loop) RectBound() Rect {
+ return l.bound
+}
+
+// CapBound returns a bounding cap that may have more padding than the corresponding
+// RectBound. The bound is conservative such that if the loop contains a point P,
+// the bound also contains it.
+func (l Loop) CapBound() Cap {
+ return l.bound.CapBound()
+}
+
+// Vertex returns the vertex for the given index. For convenience, the vertex indices
+// wrap automatically for methods that do index math such as Edge.
+// i.e., Vertex(NumEdges() + n) is the same as Vertex(n).
+func (l Loop) Vertex(i int) Point {
+ return l.vertices[i%len(l.vertices)]
+}
+
+// Vertices returns the vertices in the loop.
+func (l Loop) Vertices() []Point {
+ return l.vertices
+}
+
+// ContainsPoint returns true if the loop contains the point.
+func (l Loop) ContainsPoint(p Point) bool {
+ // TODO(sbeckman): Move to bruteForceContains and update with ShapeIndex when available.
+ // Empty and full loops don't need a special case, but invalid loops with
+ // zero vertices do, so we might as well handle them all at once.
+ if len(l.vertices) < 3 {
+ return l.originInside
+ }
+
+ origin := OriginPoint()
+ inside := l.originInside
+ crosser := NewChainEdgeCrosser(origin, p, l.Vertex(0))
+ for i := 1; i <= len(l.vertices); i++ { // add vertex 0 twice
+ inside = inside != crosser.EdgeOrVertexChainCrossing(l.Vertex(i))
+ }
+ return inside
+}
+
+// RegularLoop creates a loop with the given number of vertices, all
+// located on a circle of the specified radius around the given center.
+func RegularLoop(center Point, radius s1.Angle, numVertices int) *Loop {
+ return RegularLoopForFrame(getFrame(center), radius, numVertices)
+}
+
+// RegularLoopForFrame creates a loop centered around the z-axis of the given
+// coordinate frame, with the first vertex in the direction of the positive x-axis.
+func RegularLoopForFrame(frame matrix3x3, radius s1.Angle, numVertices int) *Loop {
+ return LoopFromPoints(regularPointsForFrame(frame, radius, numVertices))
+}
diff --git a/vendor/github.com/golang/geo/s2/matrix3x3.go b/vendor/github.com/golang/geo/s2/matrix3x3.go
new file mode 100644
index 0000000..1f78d5d
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/matrix3x3.go
@@ -0,0 +1,127 @@
+/*
+Copyright 2015 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package s2
+
+import (
+ "fmt"
+)
+
+// matrix3x3 represents a traditional 3x3 matrix of floating point values.
+// This is not a full fledged matrix. It only contains the pieces needed
+// to satisfy the computations done within the s2 package.
+type matrix3x3 [3][3]float64
+
+// col returns the given column as a Point.
+func (m *matrix3x3) col(col int) Point {
+ return PointFromCoords(m[0][col], m[1][col], m[2][col])
+}
+
+// row returns the given row as a Point.
+func (m *matrix3x3) row(row int) Point {
+ return PointFromCoords(m[row][0], m[row][1], m[row][2])
+}
+
+// setCol sets the specified column to the value in the given Point.
+func (m *matrix3x3) setCol(col int, p Point) *matrix3x3 {
+ m[0][col] = p.X
+ m[1][col] = p.Y
+ m[2][col] = p.Z
+
+ return m
+}
+
+// setRow sets the specified row to the value in the given Point.
+func (m *matrix3x3) setRow(row int, p Point) *matrix3x3 {
+ m[row][0] = p.X
+ m[row][1] = p.Y
+ m[row][2] = p.Z
+
+ return m
+}
+
+// scale multiplies the matrix by the given value.
+func (m *matrix3x3) scale(f float64) *matrix3x3 {
+ return &matrix3x3{
+ [3]float64{f * m[0][0], f * m[0][1], f * m[0][2]},
+ [3]float64{f * m[1][0], f * m[1][1], f * m[1][2]},
+ [3]float64{f * m[2][0], f * m[2][1], f * m[2][2]},
+ }
+}
+
+// mul returns the multiplication of m by the Point p and converts the
+// resulting 1x3 matrix into a Point.
+func (m *matrix3x3) mul(p Point) Point {
+ return PointFromCoords(
+ m[0][0]*p.X+m[0][1]*p.Y+m[0][2]*p.Z,
+ m[1][0]*p.X+m[1][1]*p.Y+m[1][2]*p.Z,
+ m[2][0]*p.X+m[2][1]*p.Y+m[2][2]*p.Z,
+ )
+}
+
+// det returns the determinant of this matrix.
+func (m *matrix3x3) det() float64 {
+ // | a b c |
+ // det | d e f | = aei + bfg + cdh - ceg - bdi - afh
+ // | g h i |
+ return m[0][0]*m[1][1]*m[2][2] + m[0][1]*m[1][2]*m[2][0] + m[0][2]*m[1][0]*m[2][1] -
+ m[0][2]*m[1][1]*m[2][0] - m[0][1]*m[1][0]*m[2][2] - m[0][0]*m[1][2]*m[2][1]
+}
+
+// transpose reflects the matrix along its diagonal and returns the result.
+func (m *matrix3x3) transpose() *matrix3x3 {
+ m[0][1], m[1][0] = m[1][0], m[0][1]
+ m[0][2], m[2][0] = m[2][0], m[0][2]
+ m[1][2], m[2][1] = m[2][1], m[1][2]
+
+ return m
+}
+
+// String formats the matrix into an easier to read layout.
+func (m *matrix3x3) String() string {
+ return fmt.Sprintf("[ %0.4f %0.4f %0.4f ] [ %0.4f %0.4f %0.4f ] [ %0.4f %0.4f %0.4f ]",
+ m[0][0], m[0][1], m[0][2],
+ m[1][0], m[1][1], m[1][2],
+ m[2][0], m[2][1], m[2][2],
+ )
+}
+
+// getFrame returns the orthonormal frame for the given point on the unit sphere.
+func getFrame(p Point) matrix3x3 {
+ // Given the point p on the unit sphere, extend this into a right-handed
+ // coordinate frame of unit-length column vectors m = (x,y,z). Note that
+ // the vectors (x,y) are an orthonormal frame for the tangent space at point p,
+ // while p itself is an orthonormal frame for the normal space at p.
+ m := matrix3x3{}
+ m.setCol(2, p)
+ m.setCol(1, Point{p.Ortho()})
+ m.setCol(0, Point{m.col(1).Cross(p.Vector)})
+ return m
+}
+
+// toFrame returns the coordinates of the given point with respect to its orthonormal basis m.
+// The resulting point q satisfies the identity (m * q == p).
+func toFrame(m matrix3x3, p Point) Point {
+ // The inverse of an orthonormal matrix is its transpose.
+ return m.transpose().mul(p)
+}
+
+// fromFrame returns the coordinates of the given point in standard axis-aligned basis
+// from its orthonormal basis m.
+// The resulting point p satisfies the identity (p == m * q).
+func fromFrame(m matrix3x3, q Point) Point {
+ return m.mul(q)
+}
diff --git a/vendor/github.com/golang/geo/s2/metric.go b/vendor/github.com/golang/geo/s2/metric.go
new file mode 100644
index 0000000..a005f79
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/metric.go
@@ -0,0 +1,166 @@
+/*
+Copyright 2015 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package s2
+
+// This file implements functions for various S2 measurements.
+
+import "math"
+
+// A Metric is a measure for cells. It is used to describe the shape and size
+// of cells. They are useful for deciding which cell level to use in order to
+// satisfy a given condition (e.g. that cell vertices must be no further than
+// "x" apart). You can use the Value(level) method to compute the corresponding
+// length or area on the unit sphere for cells at a given level. The minimum
+// and maximum bounds are valid for cells at all levels, but they may be
+// somewhat conservative for very large cells (e.g. face cells).
+type Metric struct {
+ // Dim is either 1 or 2, for a 1D or 2D metric respectively.
+ Dim int
+ // Deriv is the scaling factor for the metric.
+ Deriv float64
+}
+
+// Defined metrics.
+// Of the projection methods defined in C++, Go only supports the quadratic projection.
+
+// Each cell is bounded by four planes passing through its four edges and
+// the center of the sphere. These metrics relate to the angle between each
+// pair of opposite bounding planes, or equivalently, between the planes
+// corresponding to two different s-values or two different t-values.
+var (
+ MinAngleSpanMetric = Metric{1, 4.0 / 3}
+ AvgAngleSpanMetric = Metric{1, math.Pi / 2}
+ MaxAngleSpanMetric = Metric{1, 1.704897179199218452}
+)
+
+// The width of geometric figure is defined as the distance between two
+// parallel bounding lines in a given direction. For cells, the minimum
+// width is always attained between two opposite edges, and the maximum
+// width is attained between two opposite vertices. However, for our
+// purposes we redefine the width of a cell as the perpendicular distance
+// between a pair of opposite edges. A cell therefore has two widths, one
+// in each direction. The minimum width according to this definition agrees
+// with the classic geometric one, but the maximum width is different. (The
+// maximum geometric width corresponds to MaxDiag defined below.)
+//
+// The average width in both directions for all cells at level k is approximately
+// AvgWidthMetric.Value(k).
+//
+// The width is useful for bounding the minimum or maximum distance from a
+// point on one edge of a cell to the closest point on the opposite edge.
+// For example, this is useful when growing regions by a fixed distance.
+var (
+ MinWidthMetric = Metric{1, 2 * math.Sqrt2 / 3}
+ AvgWidthMetric = Metric{1, 1.434523672886099389}
+ MaxWidthMetric = Metric{1, MaxAngleSpanMetric.Deriv}
+)
+
+// The edge length metrics can be used to bound the minimum, maximum,
+// or average distance from the center of one cell to the center of one of
+// its edge neighbors. In particular, it can be used to bound the distance
+// between adjacent cell centers along the space-filling Hilbert curve for
+// cells at any given level.
+var (
+ MinEdgeMetric = Metric{1, 2 * math.Sqrt2 / 3}
+ AvgEdgeMetric = Metric{1, 1.459213746386106062}
+ MaxEdgeMetric = Metric{1, MaxAngleSpanMetric.Deriv}
+
+ // MaxEdgeAspect is the maximum edge aspect ratio over all cells at any level,
+ // where the edge aspect ratio of a cell is defined as the ratio of its longest
+ // edge length to its shortest edge length.
+ MaxEdgeAspect = 1.442615274452682920
+
+ MinAreaMetric = Metric{2, 8 * math.Sqrt2 / 9}
+ AvgAreaMetric = Metric{2, 4 * math.Pi / 6}
+ MaxAreaMetric = Metric{2, 2.635799256963161491}
+)
+
+// The maximum diagonal is also the maximum diameter of any cell,
+// and also the maximum geometric width (see the comment for widths). For
+// example, the distance from an arbitrary point to the closest cell center
+// at a given level is at most half the maximum diagonal length.
+var (
+ MinDiagMetric = Metric{1, 8 * math.Sqrt2 / 9}
+ AvgDiagMetric = Metric{1, 2.060422738998471683}
+ MaxDiagMetric = Metric{1, 2.438654594434021032}
+
+ // MaxDiagAspect is the maximum diagonal aspect ratio over all cells at any
+ // level, where the diagonal aspect ratio of a cell is defined as the ratio
+ // of its longest diagonal length to its shortest diagonal length.
+ MaxDiagAspect = math.Sqrt(3)
+)
+
+// Value returns the value of the metric at the given level.
+func (m Metric) Value(level int) float64 {
+ return math.Ldexp(m.Deriv, -m.Dim*level)
+}
+
+// MinLevel returns the minimum level such that the metric is at most
+// the given value, or maxLevel (30) if there is no such level.
+//
+// For example, MinLevel(0.1) returns the minimum level such that all cell diagonal
+// lengths are 0.1 or smaller. The returned value is always a valid level.
+//
+// In C++, this is called GetLevelForMaxValue.
+func (m Metric) MinLevel(val float64) int {
+ if val < 0 {
+ return maxLevel
+ }
+
+ level := -(math.Ilogb(val/m.Deriv) >> uint(m.Dim-1))
+ if level > maxLevel {
+ level = maxLevel
+ }
+ if level < 0 {
+ level = 0
+ }
+ return level
+}
+
+// MaxLevel returns the maximum level such that the metric is at least
+// the given value, or zero if there is no such level.
+//
+// For example, MaxLevel(0.1) returns the maximum level such that all cells have a
+// minimum width of 0.1 or larger. The returned value is always a valid level.
+//
+// In C++, this is called GetLevelForMinValue.
+func (m Metric) MaxLevel(val float64) int {
+ if val <= 0 {
+ return maxLevel
+ }
+
+ level := math.Ilogb(m.Deriv/val) >> uint(m.Dim-1)
+ if level > maxLevel {
+ level = maxLevel
+ }
+ if level < 0 {
+ level = 0
+ }
+ return level
+}
+
+// ClosestLevel returns the level at which the metric has approximately the given
+// value. The return value is always a valid level. For example,
+// AvgEdgeMetric.ClosestLevel(0.1) returns the level at which the average cell edge
+// length is approximately 0.1.
+func (m Metric) ClosestLevel(val float64) int {
+ x := math.Sqrt2
+ if m.Dim == 2 {
+ x = 2
+ }
+ return m.MinLevel(x * val)
+}
diff --git a/vendor/github.com/golang/geo/s2/paddedcell.go b/vendor/github.com/golang/geo/s2/paddedcell.go
new file mode 100644
index 0000000..03d4b35
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/paddedcell.go
@@ -0,0 +1,254 @@
+/*
+Copyright 2016 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package s2
+
+import (
+ "github.com/golang/geo/r1"
+ "github.com/golang/geo/r2"
+)
+
+// PaddedCell represents a Cell whose (u,v)-range has been expanded on
+// all sides by a given amount of "padding". Unlike Cell, its methods and
+// representation are optimized for clipping edges against Cell boundaries
+// to determine which cells are intersected by a given set of edges.
+type PaddedCell struct {
+ id CellID
+ padding float64
+ bound r2.Rect
+ middle r2.Rect // A rect in (u, v)-space that belongs to all four children.
+ iLo, jLo int // Minimum (i,j)-coordinates of this cell before padding
+ orientation int // Hilbert curve orientation of this cell.
+ level int
+}
+
+// PaddedCellFromCellID constructs a padded cell with the given padding.
+func PaddedCellFromCellID(id CellID, padding float64) *PaddedCell {
+ p := &PaddedCell{
+ id: id,
+ padding: padding,
+ middle: r2.EmptyRect(),
+ }
+
+ // Fast path for constructing a top-level face (the most common case).
+ if id.isFace() {
+ limit := padding + 1
+ p.bound = r2.Rect{r1.Interval{-limit, limit}, r1.Interval{-limit, limit}}
+ p.middle = r2.Rect{r1.Interval{-padding, padding}, r1.Interval{-padding, padding}}
+ p.orientation = id.Face() & 1
+ return p
+ }
+
+ _, p.iLo, p.jLo, p.orientation = id.faceIJOrientation()
+ p.level = id.Level()
+ p.bound = ijLevelToBoundUV(p.iLo, p.jLo, p.level).ExpandedByMargin(padding)
+ ijSize := sizeIJ(p.level)
+ p.iLo &= -ijSize
+ p.jLo &= -ijSize
+
+ return p
+}
+
+// PaddedCellFromParentIJ constructs the child of parent with the given (i,j) index.
+// The four child cells have indices of (0,0), (0,1), (1,0), (1,1), where the i and j
+// indices correspond to increasing u- and v-values respectively.
+func PaddedCellFromParentIJ(parent *PaddedCell, i, j int) *PaddedCell {
+ // Compute the position and orientation of the child incrementally from the
+ // orientation of the parent.
+ pos := ijToPos[parent.orientation][2*i+j]
+
+ p := &PaddedCell{
+ id: parent.id.Children()[pos],
+ padding: parent.padding,
+ bound: parent.bound,
+ orientation: parent.orientation ^ posToOrientation[pos],
+ level: parent.level + 1,
+ middle: r2.EmptyRect(),
+ }
+
+ ijSize := sizeIJ(p.level)
+ p.iLo = parent.iLo + i*ijSize
+ p.jLo = parent.jLo + j*ijSize
+
+ // For each child, one corner of the bound is taken directly from the parent
+ // while the diagonally opposite corner is taken from middle().
+ middle := parent.Middle()
+ if i == 1 {
+ p.bound.X.Lo = middle.X.Lo
+ } else {
+ p.bound.X.Hi = middle.X.Hi
+ }
+ if j == 1 {
+ p.bound.Y.Lo = middle.Y.Lo
+ } else {
+ p.bound.Y.Hi = middle.Y.Hi
+ }
+
+ return p
+}
+
+// CellID returns the CellID this padded cell represents.
+func (p PaddedCell) CellID() CellID {
+ return p.id
+}
+
+// Padding returns the amount of padding on this cell.
+func (p PaddedCell) Padding() float64 {
+ return p.padding
+}
+
+// Level returns the level this cell is at.
+func (p PaddedCell) Level() int {
+ return p.level
+}
+
+// Center returns the center of this cell.
+func (p PaddedCell) Center() Point {
+ ijSize := sizeIJ(p.level)
+ si := uint64(2*p.iLo + ijSize)
+ ti := uint64(2*p.jLo + ijSize)
+ return Point{faceSiTiToXYZ(p.id.Face(), si, ti).Normalize()}
+}
+
+// Middle returns the rectangle in the middle of this cell that belongs to
+// all four of its children in (u,v)-space.
+func (p *PaddedCell) Middle() r2.Rect {
+ // We compute this field lazily because it is not needed the majority of the
+ // time (i.e., for cells where the recursion terminates).
+ if p.middle.IsEmpty() {
+ ijSize := sizeIJ(p.level)
+ u := stToUV(siTiToST(uint64(2*p.iLo + ijSize)))
+ v := stToUV(siTiToST(uint64(2*p.jLo + ijSize)))
+ p.middle = r2.Rect{
+ r1.Interval{u - p.padding, u + p.padding},
+ r1.Interval{v - p.padding, v + p.padding},
+ }
+ }
+ return p.middle
+}
+
+// Bound returns the bounds for this cell in (u,v)-space including padding.
+func (p PaddedCell) Bound() r2.Rect {
+ return p.bound
+}
+
+// ChildIJ returns the (i,j) coordinates for the child cell at the given traversal
+// position. The traversal position corresponds to the order in which child
+// cells are visited by the Hilbert curve.
+func (p PaddedCell) ChildIJ(pos int) (i, j int) {
+ ij := posToIJ[p.orientation][pos]
+ return ij >> 1, ij & 1
+}
+
+// EntryVertex return the vertex where the space-filling curve enters this cell.
+func (p PaddedCell) EntryVertex() Point {
+ // The curve enters at the (0,0) vertex unless the axis directions are
+ // reversed, in which case it enters at the (1,1) vertex.
+ i := p.iLo
+ j := p.jLo
+ if p.orientation&invertMask != 0 {
+ ijSize := sizeIJ(p.level)
+ i += ijSize
+ j += ijSize
+ }
+ return Point{faceSiTiToXYZ(p.id.Face(), uint64(2*i), uint64(2*j)).Normalize()}
+}
+
+// ExitVertex returns the vertex where the space-filling curve exits this cell.
+func (p PaddedCell) ExitVertex() Point {
+ // The curve exits at the (1,0) vertex unless the axes are swapped or
+ // inverted but not both, in which case it exits at the (0,1) vertex.
+ i := p.iLo
+ j := p.jLo
+ ijSize := sizeIJ(p.level)
+ if p.orientation == 0 || p.orientation == swapMask+invertMask {
+ i += ijSize
+ } else {
+ j += ijSize
+ }
+ return Point{faceSiTiToXYZ(p.id.Face(), uint64(2*i), uint64(2*j)).Normalize()}
+}
+
+// ShrinkToFit returns the smallest CellID that contains all descendants of this
+// padded cell whose bounds intersect the given rect. For algorithms that use
+// recursive subdivision to find the cells that intersect a particular object, this
+// method can be used to skip all of the initial subdivision steps where only
+// one child needs to be expanded.
+//
+// Note that this method is not the same as returning the smallest cell that contains
+// the intersection of this cell with rect. Because of the padding, even if one child
+// completely contains rect it is still possible that a neighboring child may also
+// intersect the given rect.
+//
+// The provided Rect must intersect the bounds of this cell.
+func (p *PaddedCell) ShrinkToFit(rect r2.Rect) CellID {
+ // Quick rejection test: if rect contains the center of this cell along
+ // either axis, then no further shrinking is possible.
+ if p.level == 0 {
+ // Fast path (most calls to this function start with a face cell).
+ if rect.X.Contains(0) || rect.Y.Contains(0) {
+ return p.id
+ }
+ }
+
+ ijSize := sizeIJ(p.level)
+ if rect.X.Contains(stToUV(siTiToST(uint64(2*p.iLo+ijSize)))) ||
+ rect.Y.Contains(stToUV(siTiToST(uint64(2*p.jLo+ijSize)))) {
+ return p.id
+ }
+
+ // Otherwise we expand rect by the given padding on all sides and find
+ // the range of coordinates that it spans along the i- and j-axes. We then
+ // compute the highest bit position at which the min and max coordinates
+ // differ. This corresponds to the first cell level at which at least two
+ // children intersect rect.
+
+ // Increase the padding to compensate for the error in uvToST.
+ // (The constant below is a provable upper bound on the additional error.)
+ padded := rect.ExpandedByMargin(p.padding + 1.5*dblEpsilon)
+ iMin, jMin := p.iLo, p.jLo // Min i- or j- coordinate spanned by padded
+ var iXor, jXor int // XOR of the min and max i- or j-coordinates
+
+ if iMin < stToIJ(uvToST(padded.X.Lo)) {
+ iMin = stToIJ(uvToST(padded.X.Lo))
+ }
+ if a, b := p.iLo+ijSize-1, stToIJ(uvToST(padded.X.Hi)); a <= b {
+ iXor = iMin ^ a
+ } else {
+ iXor = iMin ^ b
+ }
+
+ if jMin < stToIJ(uvToST(padded.Y.Lo)) {
+ jMin = stToIJ(uvToST(padded.Y.Lo))
+ }
+ if a, b := p.jLo+ijSize-1, stToIJ(uvToST(padded.Y.Hi)); a <= b {
+ jXor = jMin ^ a
+ } else {
+ jXor = jMin ^ b
+ }
+
+ // Compute the highest bit position where the two i- or j-endpoints differ,
+ // and then choose the cell level that includes both of these endpoints. So
+ // if both pairs of endpoints are equal we choose maxLevel; if they differ
+ // only at bit 0, we choose (maxLevel - 1), and so on.
+ levelMSB := uint64(((iXor | jXor) << 1) + 1)
+ level := maxLevel - int(findMSBSetNonZero64(levelMSB))
+ if level <= p.level {
+ return p.id
+ }
+
+ return cellIDFromFaceIJ(p.id.Face(), iMin, jMin).Parent(level)
+}
diff --git a/vendor/github.com/golang/geo/s2/point.go b/vendor/github.com/golang/geo/s2/point.go
new file mode 100644
index 0000000..2b300cd
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/point.go
@@ -0,0 +1,291 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package s2
+
+import (
+ "math"
+
+ "github.com/golang/geo/r3"
+ "github.com/golang/geo/s1"
+)
+
+// Point represents a point on the unit sphere as a normalized 3D vector.
+// Points are guaranteed to be close to normalized.
+// Fields should be treated as read-only. Use one of the factory methods for creation.
+type Point struct {
+ r3.Vector
+}
+
+// PointFromCoords creates a new normalized point from coordinates.
+//
+// This always returns a valid point. If the given coordinates can not be normalized
+// the origin point will be returned.
+//
+// This behavior is different from the C++ construction of a S2Point from coordinates
+// (i.e. S2Point(x, y, z)) in that in C++ they do not Normalize.
+func PointFromCoords(x, y, z float64) Point {
+ if x == 0 && y == 0 && z == 0 {
+ return OriginPoint()
+ }
+ return Point{r3.Vector{x, y, z}.Normalize()}
+}
+
+// OriginPoint returns a unique "origin" on the sphere for operations that need a fixed
+// reference point. In particular, this is the "point at infinity" used for
+// point-in-polygon testing (by counting the number of edge crossings).
+//
+// It should *not* be a point that is commonly used in edge tests in order
+// to avoid triggering code to handle degenerate cases (this rules out the
+// north and south poles). It should also not be on the boundary of any
+// low-level S2Cell for the same reason.
+func OriginPoint() Point {
+ return Point{r3.Vector{-0.0099994664350250197, 0.0025924542609324121, 0.99994664350250195}}
+}
+
+// PointCross returns a Point that is orthogonal to both p and op. This is similar to
+// p.Cross(op) (the true cross product) except that it does a better job of
+// ensuring orthogonality when the Point is nearly parallel to op, it returns
+// a non-zero result even when p == op or p == -op and the result is a Point,
+// so it will have norm 1.
+//
+// It satisfies the following properties (f == PointCross):
+//
+// (1) f(p, op) != 0 for all p, op
+// (2) f(op,p) == -f(p,op) unless p == op or p == -op
+// (3) f(-p,op) == -f(p,op) unless p == op or p == -op
+// (4) f(p,-op) == -f(p,op) unless p == op or p == -op
+func (p Point) PointCross(op Point) Point {
+ // NOTE(dnadasi): In the C++ API the equivalent method here was known as "RobustCrossProd",
+ // but PointCross more accurately describes how this method is used.
+ x := p.Add(op.Vector).Cross(op.Sub(p.Vector))
+
+ if x.ApproxEqual(r3.Vector{0, 0, 0}) {
+ // The only result that makes sense mathematically is to return zero, but
+ // we find it more convenient to return an arbitrary orthogonal vector.
+ return Point{p.Ortho()}
+ }
+
+ return Point{x.Normalize()}
+}
+
+// OrderedCCW returns true if the edges OA, OB, and OC are encountered in that
+// order while sweeping CCW around the point O.
+//
+// You can think of this as testing whether A <= B <= C with respect to the
+// CCW ordering around O that starts at A, or equivalently, whether B is
+// contained in the range of angles (inclusive) that starts at A and extends
+// CCW to C. Properties:
+//
+// (1) If OrderedCCW(a,b,c,o) && OrderedCCW(b,a,c,o), then a == b
+// (2) If OrderedCCW(a,b,c,o) && OrderedCCW(a,c,b,o), then b == c
+// (3) If OrderedCCW(a,b,c,o) && OrderedCCW(c,b,a,o), then a == b == c
+// (4) If a == b or b == c, then OrderedCCW(a,b,c,o) is true
+// (5) Otherwise if a == c, then OrderedCCW(a,b,c,o) is false
+func OrderedCCW(a, b, c, o Point) bool {
+ sum := 0
+ if RobustSign(b, o, a) != Clockwise {
+ sum++
+ }
+ if RobustSign(c, o, b) != Clockwise {
+ sum++
+ }
+ if RobustSign(a, o, c) == CounterClockwise {
+ sum++
+ }
+ return sum >= 2
+}
+
+// Distance returns the angle between two points.
+func (p Point) Distance(b Point) s1.Angle {
+ return p.Vector.Angle(b.Vector)
+}
+
+// ApproxEqual reports whether the two points are similar enough to be equal.
+func (p Point) ApproxEqual(other Point) bool {
+ return p.Vector.Angle(other.Vector) <= s1.Angle(epsilon)
+}
+
+// PointArea returns the area on the unit sphere for the triangle defined by the
+// given points.
+//
+// This method is based on l'Huilier's theorem,
+//
+// tan(E/4) = sqrt(tan(s/2) tan((s-a)/2) tan((s-b)/2) tan((s-c)/2))
+//
+// where E is the spherical excess of the triangle (i.e. its area),
+// a, b, c are the side lengths, and
+// s is the semiperimeter (a + b + c) / 2.
+//
+// The only significant source of error using l'Huilier's method is the
+// cancellation error of the terms (s-a), (s-b), (s-c). This leads to a
+// *relative* error of about 1e-16 * s / min(s-a, s-b, s-c). This compares
+// to a relative error of about 1e-15 / E using Girard's formula, where E is
+// the true area of the triangle. Girard's formula can be even worse than
+// this for very small triangles, e.g. a triangle with a true area of 1e-30
+// might evaluate to 1e-5.
+//
+// So, we prefer l'Huilier's formula unless dmin < s * (0.1 * E), where
+// dmin = min(s-a, s-b, s-c). This basically includes all triangles
+// except for extremely long and skinny ones.
+//
+// Since we don't know E, we would like a conservative upper bound on
+// the triangle area in terms of s and dmin. It's possible to show that
+// E <= k1 * s * sqrt(s * dmin), where k1 = 2*sqrt(3)/Pi (about 1).
+// Using this, it's easy to show that we should always use l'Huilier's
+// method if dmin >= k2 * s^5, where k2 is about 1e-2. Furthermore,
+// if dmin < k2 * s^5, the triangle area is at most k3 * s^4, where
+// k3 is about 0.1. Since the best case error using Girard's formula
+// is about 1e-15, this means that we shouldn't even consider it unless
+// s >= 3e-4 or so.
+func PointArea(a, b, c Point) float64 {
+ sa := float64(b.Angle(c.Vector))
+ sb := float64(c.Angle(a.Vector))
+ sc := float64(a.Angle(b.Vector))
+ s := 0.5 * (sa + sb + sc)
+ if s >= 3e-4 {
+ // Consider whether Girard's formula might be more accurate.
+ dmin := s - math.Max(sa, math.Max(sb, sc))
+ if dmin < 1e-2*s*s*s*s*s {
+ // This triangle is skinny enough to use Girard's formula.
+ ab := a.PointCross(b)
+ bc := b.PointCross(c)
+ ac := a.PointCross(c)
+ area := math.Max(0.0, float64(ab.Angle(ac.Vector)-ab.Angle(bc.Vector)+bc.Angle(ac.Vector)))
+
+ if dmin < s*0.1*area {
+ return area
+ }
+ }
+ }
+
+ // Use l'Huilier's formula.
+ return 4 * math.Atan(math.Sqrt(math.Max(0.0, math.Tan(0.5*s)*math.Tan(0.5*(s-sa))*
+ math.Tan(0.5*(s-sb))*math.Tan(0.5*(s-sc)))))
+}
+
+// TrueCentroid returns the true centroid of the spherical triangle ABC multiplied by the
+// signed area of spherical triangle ABC. The result is not normalized.
+// The reasons for multiplying by the signed area are (1) this is the quantity
+// that needs to be summed to compute the centroid of a union or difference of triangles,
+// and (2) it's actually easier to calculate this way. All points must have unit length.
+//
+// The true centroid (mass centroid) is defined as the surface integral
+// over the spherical triangle of (x,y,z) divided by the triangle area.
+// This is the point that the triangle would rotate around if it was
+// spinning in empty space.
+//
+// The best centroid for most purposes is the true centroid. Unlike the
+// planar and surface centroids, the true centroid behaves linearly as
+// regions are added or subtracted. That is, if you split a triangle into
+// pieces and compute the average of their centroids (weighted by triangle
+// area), the result equals the centroid of the original triangle. This is
+// not true of the other centroids.
+func TrueCentroid(a, b, c Point) Point {
+ ra := float64(1)
+ if sa := float64(b.Distance(c)); sa != 0 {
+ ra = sa / math.Sin(sa)
+ }
+ rb := float64(1)
+ if sb := float64(c.Distance(a)); sb != 0 {
+ rb = sb / math.Sin(sb)
+ }
+ rc := float64(1)
+ if sc := float64(a.Distance(b)); sc != 0 {
+ rc = sc / math.Sin(sc)
+ }
+
+ // Now compute a point M such that:
+ //
+ // [Ax Ay Az] [Mx] [ra]
+ // [Bx By Bz] [My] = 0.5 * det(A,B,C) * [rb]
+ // [Cx Cy Cz] [Mz] [rc]
+ //
+ // To improve the numerical stability we subtract the first row (A) from the
+ // other two rows; this reduces the cancellation error when A, B, and C are
+ // very close together. Then we solve it using Cramer's rule.
+ //
+ // This code still isn't as numerically stable as it could be.
+ // The biggest potential improvement is to compute B-A and C-A more
+ // accurately so that (B-A)x(C-A) is always inside triangle ABC.
+ x := r3.Vector{a.X, b.X - a.X, c.X - a.X}
+ y := r3.Vector{a.Y, b.Y - a.Y, c.Y - a.Y}
+ z := r3.Vector{a.Z, b.Z - a.Z, c.Z - a.Z}
+ r := r3.Vector{ra, rb - ra, rc - ra}
+
+ return Point{r3.Vector{y.Cross(z).Dot(r), z.Cross(x).Dot(r), x.Cross(y).Dot(r)}.Mul(0.5)}
+}
+
+// PlanarCentroid returns the centroid of the planar triangle ABC, which is not normalized.
+// It can be normalized to unit length to obtain the "surface centroid" of the corresponding
+// spherical triangle, i.e. the intersection of the three medians. However,
+// note that for large spherical triangles the surface centroid may be
+// nowhere near the intuitive "center" (see example in TrueCentroid comments).
+//
+// Note that the surface centroid may be nowhere near the intuitive
+// "center" of a spherical triangle. For example, consider the triangle
+// with vertices A=(1,eps,0), B=(0,0,1), C=(-1,eps,0) (a quarter-sphere).
+// The surface centroid of this triangle is at S=(0, 2*eps, 1), which is
+// within a distance of 2*eps of the vertex B. Note that the median from A
+// (the segment connecting A to the midpoint of BC) passes through S, since
+// this is the shortest path connecting the two endpoints. On the other
+// hand, the true centroid is at M=(0, 0.5, 0.5), which when projected onto
+// the surface is a much more reasonable interpretation of the "center" of
+// this triangle.
+func PlanarCentroid(a, b, c Point) Point {
+ return Point{a.Add(b.Vector).Add(c.Vector).Mul(1. / 3)}
+}
+
+// ChordAngleBetweenPoints constructs a ChordAngle corresponding to the distance
+// between the two given points. The points must be unit length.
+func ChordAngleBetweenPoints(x, y Point) s1.ChordAngle {
+ return s1.ChordAngle(math.Min(4.0, x.Sub(y.Vector).Norm2()))
+}
+
+// regularPoints generates a slice of points shaped as a regular polygon with
+// the numVertices vertices, all located on a circle of the specified angular radius
+// around the center. The radius is the actual distance from center to each vertex.
+func regularPoints(center Point, radius s1.Angle, numVertices int) []Point {
+ return regularPointsForFrame(getFrame(center), radius, numVertices)
+}
+
+// regularPointsForFrame generates a slice of points shaped as a regular polygon
+// with numVertices vertices, all on a circle of the specified angular radius around
+// the center. The radius is the actual distance from the center to each vertex.
+func regularPointsForFrame(frame matrix3x3, radius s1.Angle, numVertices int) []Point {
+ // We construct the loop in the given frame coordinates, with the center at
+ // (0, 0, 1). For a loop of radius r, the loop vertices have the form
+ // (x, y, z) where x^2 + y^2 = sin(r) and z = cos(r). The distance on the
+ // sphere (arc length) from each vertex to the center is acos(cos(r)) = r.
+ z := math.Cos(radius.Radians())
+ r := math.Sin(radius.Radians())
+ radianStep := 2 * math.Pi / float64(numVertices)
+ var vertices []Point
+
+ for i := 0; i < numVertices; i++ {
+ angle := float64(i) * radianStep
+ p := PointFromCoords(r*math.Cos(angle), r*math.Sin(angle), z)
+ vertices = append(vertices, Point{fromFrame(frame, p).Normalize()})
+ }
+
+ return vertices
+}
+
+// TODO: Differences from C++
+// Rotate
+// Angle
+// TurnAngle
+// SignedArea
diff --git a/vendor/github.com/golang/geo/s2/polygon.go b/vendor/github.com/golang/geo/s2/polygon.go
new file mode 100644
index 0000000..352cc23
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/polygon.go
@@ -0,0 +1,211 @@
+/*
+Copyright 2015 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package s2
+
+// Polygon represents a sequence of zero or more loops; recall that the
+// interior of a loop is defined to be its left-hand side (see Loop).
+//
+// When the polygon is initialized, the given loops are automatically converted
+// into a canonical form consisting of "shells" and "holes". Shells and holes
+// are both oriented CCW, and are nested hierarchically. The loops are
+// reordered to correspond to a preorder traversal of the nesting hierarchy.
+//
+// Polygons may represent any region of the sphere with a polygonal boundary,
+// including the entire sphere (known as the "full" polygon). The full polygon
+// consists of a single full loop (see Loop), whereas the empty polygon has no
+// loops at all.
+//
+// Use FullPolygon() to construct a full polygon. The zero value of Polygon is
+// treated as the empty polygon.
+//
+// Polygons have the following restrictions:
+//
+// - Loops may not cross, i.e. the boundary of a loop may not intersect
+// both the interior and exterior of any other loop.
+//
+// - Loops may not share edges, i.e. if a loop contains an edge AB, then
+// no other loop may contain AB or BA.
+//
+// - Loops may share vertices, however no vertex may appear twice in a
+// single loop (see Loop).
+//
+// - No loop may be empty. The full loop may appear only in the full polygon.
+type Polygon struct {
+ loops []*Loop
+
+ // loopDepths keeps track of how deep a given loop is in this polygon.
+ // The depths tracked in this slice are kept in 1:1 lockstep with the
+ // elements in the loops list.
+ // Holes inside a polygon are stored as odd numbers, and shells are even.
+ loopDepths []int
+
+ // index is a spatial index of all the polygon loops.
+ index ShapeIndex
+
+ // hasHoles tracks if this polygon has at least one hole.
+ hasHoles bool
+
+ // numVertices keeps the running total of all of the vertices of the contained loops.
+ numVertices int
+
+ // bound is a conservative bound on all points contained by this loop.
+ // If l.ContainsPoint(P), then l.bound.ContainsPoint(P).
+ bound Rect
+
+ // Since bound is not exact, it is possible that a loop A contains
+ // another loop B whose bounds are slightly larger. subregionBound
+ // has been expanded sufficiently to account for this error, i.e.
+ // if A.Contains(B), then A.subregionBound.Contains(B.bound).
+ subregionBound Rect
+}
+
+// PolygonFromLoops constructs a polygon from the given hierarchically nested
+// loops. The polygon interior consists of the points contained by an odd
+// number of loops. (Recall that a loop contains the set of points on its
+// left-hand side.)
+//
+// This method figures out the loop nesting hierarchy and assigns every loop a
+// depth. Shells have even depths, and holes have odd depths.
+//
+// NOTE: this function is NOT YET IMPLEMENTED for more than one loop and will
+// panic if given a slice of length > 1.
+func PolygonFromLoops(loops []*Loop) *Polygon {
+ if len(loops) > 1 {
+ panic("s2.PolygonFromLoops for multiple loops is not yet implemented")
+ }
+ return &Polygon{
+ loops: loops,
+ // TODO(roberts): This is explicitly set as depth of 0 for the one loop in
+ // the polygon. When multiple loops are supported, fix this to set the depths.
+ loopDepths: []int{0},
+ numVertices: len(loops[0].Vertices()), // TODO(roberts): Once multi-loop is supported, fix this.
+ bound: EmptyRect(),
+ subregionBound: EmptyRect(),
+ }
+}
+
+// FullPolygon returns a special "full" polygon.
+func FullPolygon() *Polygon {
+ return &Polygon{
+ loops: []*Loop{
+ FullLoop(),
+ },
+ loopDepths: []int{0},
+ numVertices: len(FullLoop().Vertices()),
+ bound: FullRect(),
+ subregionBound: FullRect(),
+ }
+}
+
+// IsEmpty reports whether this is the special "empty" polygon (consisting of no loops).
+func (p *Polygon) IsEmpty() bool {
+ return len(p.loops) == 0
+}
+
+// IsFull reports whether this is the special "full" polygon (consisting of a
+// single loop that encompasses the entire sphere).
+func (p *Polygon) IsFull() bool {
+ return len(p.loops) == 1 && p.loops[0].IsFull()
+}
+
+// NumLoops returns the number of loops in this polygon.
+func (p *Polygon) NumLoops() int {
+ return len(p.loops)
+}
+
+// Loops returns the loops in this polygon.
+func (p *Polygon) Loops() []*Loop {
+ return p.loops
+}
+
+// Loop returns the loop at the given index. Note that during initialization,
+// the given loops are reordered according to a preorder traversal of the loop
+// nesting hierarchy. This implies that every loop is immediately followed by
+// its descendants. This hierarchy can be traversed using the methods Parent,
+// LastDescendant, and Loop.depth.
+func (p *Polygon) Loop(k int) *Loop {
+ return p.loops[k]
+}
+
+// Parent returns the index of the parent of loop k.
+// If the loop does not have a parent, ok=false is returned.
+func (p *Polygon) Parent(k int) (index int, ok bool) {
+ // See where we are on the depth heirarchy.
+ depth := p.loopDepths[k]
+ if depth == 0 {
+ return -1, false
+ }
+
+ // There may be several loops at the same nesting level as us that share a
+ // parent loop with us. (Imagine a slice of swiss cheese, of which we are one loop.
+ // we don't know how many may be next to us before we get back to our parent loop.)
+ // Move up one position from us, and then begin traversing back through the set of loops
+ // until we find the one that is our parent or we get to the top of the polygon.
+ for k--; k >= 0 && p.loopDepths[k] <= depth; k-- {
+ }
+ return k, true
+}
+
+// LastDescendant returns the index of the last loop that is contained within loop k.
+// If k is negative, it returns the last loop in the polygon.
+// Note that loops are indexed according to a preorder traversal of the nesting
+// hierarchy, so the immediate children of loop k can be found by iterating over
+// the loops (k+1)..LastDescendant(k) and selecting those whose depth is equal
+// to Loop(k).depth+1.
+func (p *Polygon) LastDescendant(k int) int {
+ if k < 0 {
+ return len(p.loops) - 1
+ }
+
+ depth := p.loopDepths[k]
+
+ // Find the next loop immediately past us in the set of loops, and then start
+ // moving down the list until we either get to the end or find the next loop
+ // that is higher up the heirarchy than we are.
+ for k++; k < len(p.loops) && p.loopDepths[k] > depth; k++ {
+ }
+ return k - 1
+}
+
+// loopIsHole reports whether the given loop represents a hole in this polygon.
+func (p *Polygon) loopIsHole(k int) bool {
+ return p.loopDepths[k]&1 != 0
+}
+
+// loopSign returns -1 if this loop represents a hole in this polygon.
+// Otherwise, it returns +1. This is used when computing the area of a polygon.
+// (holes are subtracted from the total area).
+func (p *Polygon) loopSign(k int) int {
+ if p.loopIsHole(k) {
+ return -1
+ }
+ return 1
+}
+
+// CapBound returns a bounding spherical cap.
+func (p *Polygon) CapBound() Cap { return p.bound.CapBound() }
+
+// RectBound returns a bounding latitude-longitude rectangle.
+func (p *Polygon) RectBound() Rect { return p.bound }
+
+// ContainsCell reports whether the polygon contains the given cell.
+// TODO(roberts)
+//func (p *Polygon) ContainsCell(c Cell) bool { ... }
+
+// IntersectsCell reports whether the polygon intersects the given cell.
+// TODO(roberts)
+//func (p *Polygon) IntersectsCell(c Cell) bool { ... }
diff --git a/vendor/github.com/golang/geo/s2/polyline.go b/vendor/github.com/golang/geo/s2/polyline.go
new file mode 100644
index 0000000..6535ede
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/polyline.go
@@ -0,0 +1,177 @@
+/*
+Copyright 2016 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package s2
+
+import (
+ "math"
+
+ "github.com/golang/geo/s1"
+)
+
+// Polyline represents a sequence of zero or more vertices connected by
+// straight edges (geodesics). Edges of length 0 and 180 degrees are not
+// allowed, i.e. adjacent vertices should not be identical or antipodal.
+type Polyline []Point
+
+// PolylineFromLatLngs creates a new Polyline from the given LatLngs.
+func PolylineFromLatLngs(points []LatLng) Polyline {
+ p := make(Polyline, len(points))
+ for k, v := range points {
+ p[k] = PointFromLatLng(v)
+ }
+ return p
+}
+
+// Reverse reverses the order of the Polyline vertices.
+func (p Polyline) Reverse() {
+ for i := 0; i < len(p)/2; i++ {
+ p[i], p[len(p)-i-1] = p[len(p)-i-1], p[i]
+ }
+}
+
+// Length returns the length of this Polyline.
+func (p Polyline) Length() s1.Angle {
+ var length s1.Angle
+
+ for i := 1; i < len(p); i++ {
+ length += p[i-1].Distance(p[i])
+ }
+ return length
+}
+
+// Centroid returns the true centroid of the polyline multiplied by the length of the
+// polyline. The result is not unit length, so you may wish to normalize it.
+//
+// Scaling by the Polyline length makes it easy to compute the centroid
+// of several Polylines (by simply adding up their centroids).
+func (p Polyline) Centroid() Point {
+ var centroid Point
+ for i := 1; i < len(p); i++ {
+ // The centroid (multiplied by length) is a vector toward the midpoint
+ // of the edge, whose length is twice the sin of half the angle between
+ // the two vertices. Defining theta to be this angle, we have:
+ vSum := p[i-1].Add(p[i].Vector) // Length == 2*cos(theta)
+ vDiff := p[i-1].Sub(p[i].Vector) // Length == 2*sin(theta)
+
+ // Length == 2*sin(theta)
+ centroid = Point{centroid.Add(vSum.Mul(math.Sqrt(vDiff.Norm2() / vSum.Norm2())))}
+ }
+ return centroid
+}
+
+// Equals reports whether the given Polyline is exactly the same as this one.
+func (p Polyline) Equals(b Polyline) bool {
+ if len(p) != len(b) {
+ return false
+ }
+ for i, v := range p {
+ if v != b[i] {
+ return false
+ }
+ }
+
+ return true
+}
+
+// CapBound returns the bounding Cap for this Polyline.
+func (p Polyline) CapBound() Cap {
+ return p.RectBound().CapBound()
+}
+
+// RectBound returns the bounding Rect for this Polyline.
+func (p Polyline) RectBound() Rect {
+ rb := NewRectBounder()
+ for _, v := range p {
+ rb.AddPoint(v)
+ }
+ return rb.RectBound()
+}
+
+// ContainsCell reports whether this Polyline contains the given Cell. Always returns false
+// because "containment" is not numerically well-defined except at the Polyline vertices.
+func (p Polyline) ContainsCell(cell Cell) bool {
+ return false
+}
+
+// IntersectsCell reports whether this Polyline intersects the given Cell.
+func (p Polyline) IntersectsCell(cell Cell) bool {
+ if len(p) == 0 {
+ return false
+ }
+
+ // We only need to check whether the cell contains vertex 0 for correctness,
+ // but these tests are cheap compared to edge crossings so we might as well
+ // check all the vertices.
+ for _, v := range p {
+ if cell.ContainsPoint(v) {
+ return true
+ }
+ }
+
+ cellVertices := []Point{
+ cell.Vertex(0),
+ cell.Vertex(1),
+ cell.Vertex(2),
+ cell.Vertex(3),
+ }
+
+ for j := 0; j < 4; j++ {
+ crosser := NewChainEdgeCrosser(cellVertices[j], cellVertices[(j+1)&3], p[0])
+ for i := 1; i < len(p); i++ {
+ if crosser.ChainCrossingSign(p[i]) != DoNotCross {
+ // There is a proper crossing, or two vertices were the same.
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// NumEdges returns the number of edges in this shape.
+func (p Polyline) NumEdges() int {
+ if len(p) == 0 {
+ return 0
+ }
+ return len(p) - 1
+}
+
+// Edge returns endpoints for the given edge index.
+func (p Polyline) Edge(i int) (a, b Point) {
+ return p[i], p[i+1]
+}
+
+// HasInterior returns false as Polylines are not closed.
+func (p Polyline) HasInterior() bool {
+ return false
+}
+
+// ContainsOrigin returns false because there is no interior to contain s2.Origin.
+func (p Polyline) ContainsOrigin() bool {
+ return false
+}
+
+// TODO(roberts): Differences from C++.
+// IsValid
+// Suffix
+// Interpolate/UnInterpolate
+// Project
+// IsPointOnRight
+// Intersects
+// Reverse
+// SubsampleVertices
+// ApproxEqual
+// NearlyCoversPolyline
diff --git a/vendor/github.com/golang/geo/s2/predicates.go b/vendor/github.com/golang/geo/s2/predicates.go
new file mode 100644
index 0000000..700604e
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/predicates.go
@@ -0,0 +1,238 @@
+/*
+Copyright 2016 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package s2
+
+// This file contains various predicates that are guaranteed to produce
+// correct, consistent results. They are also relatively efficient. This is
+// achieved by computing conservative error bounds and falling back to high
+// precision or even exact arithmetic when the result is uncertain. Such
+// predicates are useful in implementing robust algorithms.
+//
+// See also EdgeCrosser, which implements various exact
+// edge-crossing predicates more efficiently than can be done here.
+
+import (
+ "math"
+
+ "github.com/golang/geo/r3"
+)
+
+const (
+ // epsilon is a small number that represents a reasonable level of noise between two
+ // values that can be considered to be equal.
+ epsilon = 1e-15
+ // dblEpsilon is a smaller number for values that require more precision.
+ dblEpsilon = 2.220446049250313e-16
+
+ // maxDeterminantError is the maximum error in computing (AxB).C where all vectors
+ // are unit length. Using standard inequalities, it can be shown that
+ //
+ // fl(AxB) = AxB + D where |D| <= (|AxB| + (2/sqrt(3))*|A|*|B|) * e
+ //
+ // where "fl()" denotes a calculation done in floating-point arithmetic,
+ // |x| denotes either absolute value or the L2-norm as appropriate, and
+ // e is a reasonably small value near the noise level of floating point
+ // number accuracy. Similarly,
+ //
+ // fl(B.C) = B.C + d where |d| <= (|B.C| + 2*|B|*|C|) * e .
+ //
+ // Applying these bounds to the unit-length vectors A,B,C and neglecting
+ // relative error (which does not affect the sign of the result), we get
+ //
+ // fl((AxB).C) = (AxB).C + d where |d| <= (3 + 2/sqrt(3)) * e
+ maxDeterminantError = 1.8274 * dblEpsilon
+
+ // detErrorMultiplier is the factor to scale the magnitudes by when checking
+ // for the sign of set of points with certainty. Using a similar technique to
+ // the one used for maxDeterminantError, the error is at most:
+ //
+ // |d| <= (3 + 6/sqrt(3)) * |A-C| * |B-C| * e
+ //
+ // If the determinant magnitude is larger than this value then we know
+ // its sign with certainty.
+ detErrorMultiplier = 3.2321 * dblEpsilon
+)
+
+// Direction is an indication of the ordering of a set of points.
+type Direction int
+
+// These are the three options for the direction of a set of points.
+const (
+ Clockwise Direction = -1
+ Indeterminate = 0
+ CounterClockwise = 1
+)
+
+// Sign returns true if the points A, B, C are strictly counterclockwise,
+// and returns false if the points are clockwise or collinear (i.e. if they are all
+// contained on some great circle).
+//
+// Due to numerical errors, situations may arise that are mathematically
+// impossible, e.g. ABC may be considered strictly CCW while BCA is not.
+// However, the implementation guarantees the following:
+//
+// If Sign(a,b,c), then !Sign(c,b,a) for all a,b,c.
+func Sign(a, b, c Point) bool {
+ // NOTE(dnadasi): In the C++ API the equivalent method here was known as "SimpleSign".
+
+ // We compute the signed volume of the parallelepiped ABC. The usual
+ // formula for this is (A ⨯ B) · C, but we compute it here using (C ⨯ A) · B
+ // in order to ensure that ABC and CBA are not both CCW. This follows
+ // from the following identities (which are true numerically, not just
+ // mathematically):
+ //
+ // (1) x ⨯ y == -(y ⨯ x)
+ // (2) -x · y == -(x · y)
+ return c.Cross(a.Vector).Dot(b.Vector) > 0
+}
+
+// RobustSign returns a Direction representing the ordering of the points.
+// CounterClockwise is returned if the points are in counter-clockwise order,
+// Clockwise for clockwise, and Indeterminate if any two points are the same (collinear),
+// or the sign could not completely be determined.
+//
+// This function has additional logic to make sure that the above properties hold even
+// when the three points are coplanar, and to deal with the limitations of
+// floating-point arithmetic.
+//
+// RobustSign satisfies the following conditions:
+//
+// (1) RobustSign(a,b,c) == Indeterminate if and only if a == b, b == c, or c == a
+// (2) RobustSign(b,c,a) == RobustSign(a,b,c) for all a,b,c
+// (3) RobustSign(c,b,a) == -RobustSign(a,b,c) for all a,b,c
+//
+// In other words:
+//
+// (1) The result is Indeterminate if and only if two points are the same.
+// (2) Rotating the order of the arguments does not affect the result.
+// (3) Exchanging any two arguments inverts the result.
+//
+// On the other hand, note that it is not true in general that
+// RobustSign(-a,b,c) == -RobustSign(a,b,c), or any similar identities
+// involving antipodal points.
+func RobustSign(a, b, c Point) Direction {
+ sign := triageSign(a, b, c)
+ if sign == Indeterminate {
+ sign = expensiveSign(a, b, c)
+ }
+ return sign
+}
+
+// stableSign reports the direction sign of the points in a numerically stable way.
+// Unlike triageSign, this method can usually compute the correct determinant sign
+// even when all three points are as collinear as possible. For example if three
+// points are spaced 1km apart along a random line on the Earth's surface using
+// the nearest representable points, there is only a 0.4% chance that this method
+// will not be able to find the determinant sign. The probability of failure
+// decreases as the points get closer together; if the collinear points are 1 meter
+// apart, the failure rate drops to 0.0004%.
+//
+// This method could be extended to also handle nearly-antipodal points, but antipodal
+// points are rare in practice so it seems better to simply fall back to
+// exact arithmetic in that case.
+func stableSign(a, b, c Point) Direction {
+ ab := b.Sub(a.Vector)
+ ab2 := ab.Norm2()
+ bc := c.Sub(b.Vector)
+ bc2 := bc.Norm2()
+ ca := a.Sub(c.Vector)
+ ca2 := ca.Norm2()
+
+ // Now compute the determinant ((A-C)x(B-C)).C, where the vertices have been
+ // cyclically permuted if necessary so that AB is the longest edge. (This
+ // minimizes the magnitude of cross product.) At the same time we also
+ // compute the maximum error in the determinant.
+
+ // The two shortest edges, pointing away from their common point.
+ var e1, e2, op r3.Vector
+ if ab2 >= bc2 && ab2 >= ca2 {
+ // AB is the longest edge.
+ e1, e2, op = ca, bc, c.Vector
+ } else if bc2 >= ca2 {
+ // BC is the longest edge.
+ e1, e2, op = ab, ca, a.Vector
+ } else {
+ // CA is the longest edge.
+ e1, e2, op = bc, ab, b.Vector
+ }
+
+ det := -e1.Cross(e2).Dot(op)
+ maxErr := detErrorMultiplier * math.Sqrt(e1.Norm2()*e2.Norm2())
+
+ // If the determinant isn't zero, within maxErr, we know definitively the point ordering.
+ if det > maxErr {
+ return CounterClockwise
+ }
+ if det < -maxErr {
+ return Clockwise
+ }
+ return Indeterminate
+}
+
+// triageSign returns the direction sign of the points. It returns Indeterminate if two
+// points are identical or the result is uncertain. Uncertain cases can be resolved, if
+// desired, by calling expensiveSign.
+//
+// The purpose of this method is to allow additional cheap tests to be done without
+// calling expensiveSign.
+func triageSign(a, b, c Point) Direction {
+ det := a.Cross(b.Vector).Dot(c.Vector)
+ if det > maxDeterminantError {
+ return CounterClockwise
+ }
+ if det < -maxDeterminantError {
+ return Clockwise
+ }
+ return Indeterminate
+}
+
+// expensiveSign reports the direction sign of the points. It returns Indeterminate
+// if two of the input points are the same. It uses multiple-precision arithmetic
+// to ensure that its results are always self-consistent.
+func expensiveSign(a, b, c Point) Direction {
+ // Return Indeterminate if and only if two points are the same.
+ // This ensures RobustSign(a,b,c) == Indeterminate if and only if a == b, b == c, or c == a.
+ // ie. Property 1 of RobustSign.
+ if a == b || b == c || c == a {
+ return Indeterminate
+ }
+
+ // Next we try recomputing the determinant still using floating-point
+ // arithmetic but in a more precise way. This is more expensive than the
+ // simple calculation done by triageSign, but it is still *much* cheaper
+ // than using arbitrary-precision arithmetic. This optimization is able to
+ // compute the correct determinant sign in virtually all cases except when
+ // the three points are truly collinear (e.g., three points on the equator).
+ detSign := stableSign(a, b, c)
+ if detSign != Indeterminate {
+ return detSign
+ }
+
+ // Otherwise fall back to exact arithmetic and symbolic permutations.
+ return exactSign(a, b, c, false)
+}
+
+// exactSign reports the direction sign of the points using exact precision arithmetic.
+func exactSign(a, b, c Point, perturb bool) Direction {
+ // In the C++ version, the final computation is performed using OpenSSL's
+ // Bignum exact precision math library. The existence of an equivalent
+ // library in Go is indeterminate. In C++, using the exact precision library
+ // to solve this stage is ~300x slower than the above checks.
+ // TODO(roberts): Select and incorporate an appropriate Go exact precision
+ // floating point library for the remaining calculations.
+ return Indeterminate
+}
diff --git a/vendor/github.com/golang/geo/s2/rect.go b/vendor/github.com/golang/geo/s2/rect.go
new file mode 100644
index 0000000..134dc7e
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/rect.go
@@ -0,0 +1,426 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package s2
+
+import (
+ "fmt"
+ "math"
+
+ "github.com/golang/geo/r1"
+ "github.com/golang/geo/s1"
+)
+
+// Rect represents a closed latitude-longitude rectangle.
+type Rect struct {
+ Lat r1.Interval
+ Lng s1.Interval
+}
+
+var (
+ validRectLatRange = r1.Interval{-math.Pi / 2, math.Pi / 2}
+ validRectLngRange = s1.FullInterval()
+)
+
+// EmptyRect returns the empty rectangle.
+func EmptyRect() Rect { return Rect{r1.EmptyInterval(), s1.EmptyInterval()} }
+
+// FullRect returns the full rectangle.
+func FullRect() Rect { return Rect{validRectLatRange, validRectLngRange} }
+
+// RectFromLatLng constructs a rectangle containing a single point p.
+func RectFromLatLng(p LatLng) Rect {
+ return Rect{
+ Lat: r1.Interval{p.Lat.Radians(), p.Lat.Radians()},
+ Lng: s1.Interval{p.Lng.Radians(), p.Lng.Radians()},
+ }
+}
+
+// RectFromCenterSize constructs a rectangle with the given size and center.
+// center needs to be normalized, but size does not. The latitude
+// interval of the result is clamped to [-90,90] degrees, and the longitude
+// interval of the result is FullRect() if and only if the longitude size is
+// 360 degrees or more.
+//
+// Examples of clamping (in degrees):
+// center=(80,170), size=(40,60) -> lat=[60,90], lng=[140,-160]
+// center=(10,40), size=(210,400) -> lat=[-90,90], lng=[-180,180]
+// center=(-90,180), size=(20,50) -> lat=[-90,-80], lng=[155,-155]
+func RectFromCenterSize(center, size LatLng) Rect {
+ half := LatLng{size.Lat / 2, size.Lng / 2}
+ return RectFromLatLng(center).expanded(half)
+}
+
+// IsValid returns true iff the rectangle is valid.
+// This requires Lat ⊆ [-π/2,π/2] and Lng ⊆ [-π,π], and Lat = ∅ iff Lng = ∅
+func (r Rect) IsValid() bool {
+ return math.Abs(r.Lat.Lo) <= math.Pi/2 &&
+ math.Abs(r.Lat.Hi) <= math.Pi/2 &&
+ r.Lng.IsValid() &&
+ r.Lat.IsEmpty() == r.Lng.IsEmpty()
+}
+
+// IsEmpty reports whether the rectangle is empty.
+func (r Rect) IsEmpty() bool { return r.Lat.IsEmpty() }
+
+// IsFull reports whether the rectangle is full.
+func (r Rect) IsFull() bool { return r.Lat.Equal(validRectLatRange) && r.Lng.IsFull() }
+
+// IsPoint reports whether the rectangle is a single point.
+func (r Rect) IsPoint() bool { return r.Lat.Lo == r.Lat.Hi && r.Lng.Lo == r.Lng.Hi }
+
+// Vertex returns the i-th vertex of the rectangle (i = 0,1,2,3) in CCW order
+// (lower left, lower right, upper right, upper left).
+func (r Rect) Vertex(i int) LatLng {
+ var lat, lng float64
+
+ switch i {
+ case 0:
+ lat = r.Lat.Lo
+ lng = r.Lng.Lo
+ case 1:
+ lat = r.Lat.Lo
+ lng = r.Lng.Hi
+ case 2:
+ lat = r.Lat.Hi
+ lng = r.Lng.Hi
+ case 3:
+ lat = r.Lat.Hi
+ lng = r.Lng.Lo
+ }
+ return LatLng{s1.Angle(lat) * s1.Radian, s1.Angle(lng) * s1.Radian}
+}
+
+// Lo returns one corner of the rectangle.
+func (r Rect) Lo() LatLng {
+ return LatLng{s1.Angle(r.Lat.Lo) * s1.Radian, s1.Angle(r.Lng.Lo) * s1.Radian}
+}
+
+// Hi returns the other corner of the rectangle.
+func (r Rect) Hi() LatLng {
+ return LatLng{s1.Angle(r.Lat.Hi) * s1.Radian, s1.Angle(r.Lng.Hi) * s1.Radian}
+}
+
+// Center returns the center of the rectangle.
+func (r Rect) Center() LatLng {
+ return LatLng{s1.Angle(r.Lat.Center()) * s1.Radian, s1.Angle(r.Lng.Center()) * s1.Radian}
+}
+
+// Size returns the size of the Rect.
+func (r Rect) Size() LatLng {
+ return LatLng{s1.Angle(r.Lat.Length()) * s1.Radian, s1.Angle(r.Lng.Length()) * s1.Radian}
+}
+
+// Area returns the surface area of the Rect.
+func (r Rect) Area() float64 {
+ if r.IsEmpty() {
+ return 0
+ }
+ capDiff := math.Abs(math.Sin(r.Lat.Hi) - math.Sin(r.Lat.Lo))
+ return r.Lng.Length() * capDiff
+}
+
+// AddPoint increases the size of the rectangle to include the given point.
+func (r Rect) AddPoint(ll LatLng) Rect {
+ if !ll.IsValid() {
+ return r
+ }
+ return Rect{
+ Lat: r.Lat.AddPoint(ll.Lat.Radians()),
+ Lng: r.Lng.AddPoint(ll.Lng.Radians()),
+ }
+}
+
+// expanded returns a rectangle that has been expanded by margin.Lat on each side
+// in the latitude direction, and by margin.Lng on each side in the longitude
+// direction. If either margin is negative, then it shrinks the rectangle on
+// the corresponding sides instead. The resulting rectangle may be empty.
+//
+// The latitude-longitude space has the topology of a cylinder. Longitudes
+// "wrap around" at +/-180 degrees, while latitudes are clamped to range [-90, 90].
+// This means that any expansion (positive or negative) of the full longitude range
+// remains full (since the "rectangle" is actually a continuous band around the
+// cylinder), while expansion of the full latitude range remains full only if the
+// margin is positive.
+//
+// If either the latitude or longitude interval becomes empty after
+// expansion by a negative margin, the result is empty.
+//
+// Note that if an expanded rectangle contains a pole, it may not contain
+// all possible lat/lng representations of that pole, e.g., both points [π/2,0]
+// and [π/2,1] represent the same pole, but they might not be contained by the
+// same Rect.
+//
+// If you are trying to grow a rectangle by a certain distance on the
+// sphere (e.g. 5km), refer to the ExpandedByDistance() C++ method implementation
+// instead.
+func (r Rect) expanded(margin LatLng) Rect {
+ lat := r.Lat.Expanded(margin.Lat.Radians())
+ lng := r.Lng.Expanded(margin.Lng.Radians())
+
+ if lat.IsEmpty() || lng.IsEmpty() {
+ return EmptyRect()
+ }
+
+ return Rect{
+ Lat: lat.Intersection(validRectLatRange),
+ Lng: lng,
+ }
+}
+
+func (r Rect) String() string { return fmt.Sprintf("[Lo%v, Hi%v]", r.Lo(), r.Hi()) }
+
+// PolarClosure returns the rectangle unmodified if it does not include either pole.
+// If it includes either pole, PolarClosure returns an expansion of the rectangle along
+// the longitudinal range to include all possible representations of the contained poles.
+func (r Rect) PolarClosure() Rect {
+ if r.Lat.Lo == -math.Pi/2 || r.Lat.Hi == math.Pi/2 {
+ return Rect{r.Lat, s1.FullInterval()}
+ }
+ return r
+}
+
+// Union returns the smallest Rect containing the union of this rectangle and the given rectangle.
+func (r Rect) Union(other Rect) Rect {
+ return Rect{
+ Lat: r.Lat.Union(other.Lat),
+ Lng: r.Lng.Union(other.Lng),
+ }
+}
+
+// Intersection returns the smallest rectangle containing the intersection of
+// this rectangle and the given rectangle. Note that the region of intersection
+// may consist of two disjoint rectangles, in which case a single rectangle
+// spanning both of them is returned.
+func (r Rect) Intersection(other Rect) Rect {
+ lat := r.Lat.Intersection(other.Lat)
+ lng := r.Lng.Intersection(other.Lng)
+
+ if lat.IsEmpty() || lng.IsEmpty() {
+ return EmptyRect()
+ }
+ return Rect{lat, lng}
+}
+
+// Intersects reports whether this rectangle and the other have any points in common.
+func (r Rect) Intersects(other Rect) bool {
+ return r.Lat.Intersects(other.Lat) && r.Lng.Intersects(other.Lng)
+}
+
+// CapBound returns a cap that countains Rect.
+func (r Rect) CapBound() Cap {
+ // We consider two possible bounding caps, one whose axis passes
+ // through the center of the lat-long rectangle and one whose axis
+ // is the north or south pole. We return the smaller of the two caps.
+
+ if r.IsEmpty() {
+ return EmptyCap()
+ }
+
+ var poleZ, poleAngle float64
+ if r.Lat.Hi+r.Lat.Lo < 0 {
+ // South pole axis yields smaller cap.
+ poleZ = -1
+ poleAngle = math.Pi/2 + r.Lat.Hi
+ } else {
+ poleZ = 1
+ poleAngle = math.Pi/2 - r.Lat.Lo
+ }
+ poleCap := CapFromCenterAngle(PointFromCoords(0, 0, poleZ), s1.Angle(poleAngle)*s1.Radian)
+
+ // For bounding rectangles that span 180 degrees or less in longitude, the
+ // maximum cap size is achieved at one of the rectangle vertices. For
+ // rectangles that are larger than 180 degrees, we punt and always return a
+ // bounding cap centered at one of the two poles.
+ if math.Remainder(r.Lng.Hi-r.Lng.Lo, 2*math.Pi) >= 0 && r.Lng.Hi-r.Lng.Lo < 2*math.Pi {
+ midCap := CapFromPoint(PointFromLatLng(r.Center())).AddPoint(PointFromLatLng(r.Lo())).AddPoint(PointFromLatLng(r.Hi()))
+ if midCap.Height() < poleCap.Height() {
+ return midCap
+ }
+ }
+ return poleCap
+}
+
+// RectBound returns itself.
+func (r Rect) RectBound() Rect {
+ return r
+}
+
+// Contains reports whether this Rect contains the other Rect.
+func (r Rect) Contains(other Rect) bool {
+ return r.Lat.ContainsInterval(other.Lat) && r.Lng.ContainsInterval(other.Lng)
+}
+
+// ContainsCell reports whether the given Cell is contained by this Rect.
+func (r Rect) ContainsCell(c Cell) bool {
+ // A latitude-longitude rectangle contains a cell if and only if it contains
+ // the cell's bounding rectangle. This test is exact from a mathematical
+ // point of view, assuming that the bounds returned by Cell.RectBound()
+ // are tight. However, note that there can be a loss of precision when
+ // converting between representations -- for example, if an s2.Cell is
+ // converted to a polygon, the polygon's bounding rectangle may not contain
+ // the cell's bounding rectangle. This has some slightly unexpected side
+ // effects; for instance, if one creates an s2.Polygon from an s2.Cell, the
+ // polygon will contain the cell, but the polygon's bounding box will not.
+ return r.Contains(c.RectBound())
+}
+
+// ContainsLatLng reports whether the given LatLng is within the Rect.
+func (r Rect) ContainsLatLng(ll LatLng) bool {
+ if !ll.IsValid() {
+ return false
+ }
+ return r.Lat.Contains(ll.Lat.Radians()) && r.Lng.Contains(ll.Lng.Radians())
+}
+
+// ContainsPoint reports whether the given Point is within the Rect.
+func (r Rect) ContainsPoint(p Point) bool {
+ return r.ContainsLatLng(LatLngFromPoint(p))
+}
+
+// intersectsLatEdge reports whether the edge AB intersects the given edge of constant
+// latitude. Requires the points to have unit length.
+func intersectsLatEdge(a, b Point, lat s1.Angle, lng s1.Interval) bool {
+ // Unfortunately, lines of constant latitude are curves on
+ // the sphere. They can intersect a straight edge in 0, 1, or 2 points.
+
+ // First, compute the normal to the plane AB that points vaguely north.
+ z := a.PointCross(b)
+ if z.Z < 0 {
+ z = Point{z.Mul(-1)}
+ }
+
+ // Extend this to an orthonormal frame (x,y,z) where x is the direction
+ // where the great circle through AB achieves its maximium latitude.
+ y := z.PointCross(PointFromCoords(0, 0, 1))
+ x := y.Cross(z.Vector)
+
+ // Compute the angle "theta" from the x-axis (in the x-y plane defined
+ // above) where the great circle intersects the given line of latitude.
+ sinLat := math.Sin(float64(lat))
+ if math.Abs(sinLat) >= x.Z {
+ // The great circle does not reach the given latitude.
+ return false
+ }
+
+ cosTheta := sinLat / x.Z
+ sinTheta := math.Sqrt(1 - cosTheta*cosTheta)
+ theta := math.Atan2(sinTheta, cosTheta)
+
+ // The candidate intersection points are located +/- theta in the x-y
+ // plane. For an intersection to be valid, we need to check that the
+ // intersection point is contained in the interior of the edge AB and
+ // also that it is contained within the given longitude interval "lng".
+
+ // Compute the range of theta values spanned by the edge AB.
+ abTheta := s1.IntervalFromPointPair(
+ math.Atan2(a.Dot(y.Vector), a.Dot(x)),
+ math.Atan2(b.Dot(y.Vector), b.Dot(x)))
+
+ if abTheta.Contains(theta) {
+ // Check if the intersection point is also in the given lng interval.
+ isect := x.Mul(cosTheta).Add(y.Mul(sinTheta))
+ if lng.Contains(math.Atan2(isect.Y, isect.X)) {
+ return true
+ }
+ }
+
+ if abTheta.Contains(-theta) {
+ // Check if the other intersection point is also in the given lng interval.
+ isect := x.Mul(cosTheta).Sub(y.Mul(sinTheta))
+ if lng.Contains(math.Atan2(isect.Y, isect.X)) {
+ return true
+ }
+ }
+ return false
+}
+
+// intersectsLngEdge reports whether the edge AB intersects the given edge of constant
+// longitude. Requires the points to have unit length.
+func intersectsLngEdge(a, b Point, lat r1.Interval, lng s1.Angle) bool {
+ // The nice thing about edges of constant longitude is that
+ // they are straight lines on the sphere (geodesics).
+ return SimpleCrossing(a, b, PointFromLatLng(LatLng{s1.Angle(lat.Lo), lng}),
+ PointFromLatLng(LatLng{s1.Angle(lat.Hi), lng}))
+}
+
+// IntersectsCell reports whether this rectangle intersects the given cell. This is an
+// exact test and may be fairly expensive.
+func (r Rect) IntersectsCell(c Cell) bool {
+ // First we eliminate the cases where one region completely contains the
+ // other. Once these are disposed of, then the regions will intersect
+ // if and only if their boundaries intersect.
+ if r.IsEmpty() {
+ return false
+ }
+ if r.ContainsPoint(Point{c.id.rawPoint()}) {
+ return true
+ }
+ if c.ContainsPoint(PointFromLatLng(r.Center())) {
+ return true
+ }
+
+ // Quick rejection test (not required for correctness).
+ if !r.Intersects(c.RectBound()) {
+ return false
+ }
+
+ // Precompute the cell vertices as points and latitude-longitudes. We also
+ // check whether the Cell contains any corner of the rectangle, or
+ // vice-versa, since the edge-crossing tests only check the edge interiors.
+ vertices := [4]Point{}
+ latlngs := [4]LatLng{}
+
+ for i := range vertices {
+ vertices[i] = c.Vertex(i)
+ latlngs[i] = LatLngFromPoint(vertices[i])
+ if r.ContainsLatLng(latlngs[i]) {
+ return true
+ }
+ if c.ContainsPoint(PointFromLatLng(r.Vertex(i))) {
+ return true
+ }
+ }
+
+ // Now check whether the boundaries intersect. Unfortunately, a
+ // latitude-longitude rectangle does not have straight edges: two edges
+ // are curved, and at least one of them is concave.
+ for i := range vertices {
+ edgeLng := s1.IntervalFromEndpoints(latlngs[i].Lng.Radians(), latlngs[(i+1)&3].Lng.Radians())
+ if !r.Lng.Intersects(edgeLng) {
+ continue
+ }
+
+ a := vertices[i]
+ b := vertices[(i+1)&3]
+ if edgeLng.Contains(r.Lng.Lo) && intersectsLngEdge(a, b, r.Lat, s1.Angle(r.Lng.Lo)) {
+ return true
+ }
+ if edgeLng.Contains(r.Lng.Hi) && intersectsLngEdge(a, b, r.Lat, s1.Angle(r.Lng.Hi)) {
+ return true
+ }
+ if intersectsLatEdge(a, b, s1.Angle(r.Lat.Lo), r.Lng) {
+ return true
+ }
+ if intersectsLatEdge(a, b, s1.Angle(r.Lat.Hi), r.Lng) {
+ return true
+ }
+ }
+ return false
+}
+
+// BUG: The major differences from the C++ version are:
+// - GetCentroid, Get*Distance, Vertex, InteriorContains(LatLng|Rect|Point)
diff --git a/vendor/github.com/golang/geo/s2/region.go b/vendor/github.com/golang/geo/s2/region.go
new file mode 100644
index 0000000..f1e127e
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/region.go
@@ -0,0 +1,50 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package s2
+
+// A Region represents a two-dimensional region on the unit sphere.
+//
+// The purpose of this interface is to allow complex regions to be
+// approximated as simpler regions. The interface is restricted to methods
+// that are useful for computing approximations.
+type Region interface {
+ // CapBound returns a bounding spherical cap. This is not guaranteed to be exact.
+ CapBound() Cap
+
+ // RectBound returns a bounding latitude-longitude rectangle that contains
+ // the region. The bounds are not guaranteed to be tight.
+ RectBound() Rect
+
+ // ContainsCell reports whether the region completely contains the given region.
+ // It returns false if containment could not be determined.
+ ContainsCell(c Cell) bool
+
+ // IntersectsCell reports whether the region intersects the given cell or
+ // if intersection could not be determined. It returns false if the region
+ // does not intersect.
+ IntersectsCell(c Cell) bool
+}
+
+// Enforce interface satisfaction.
+var (
+ _ Region = Cap{}
+ _ Region = Cell{}
+ _ Region = (*CellUnion)(nil)
+ //_ Region = (*Polygon)(nil)
+ _ Region = Polyline{}
+ _ Region = Rect{}
+)
diff --git a/vendor/github.com/golang/geo/s2/regioncoverer.go b/vendor/github.com/golang/geo/s2/regioncoverer.go
new file mode 100644
index 0000000..4a44f28
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/regioncoverer.go
@@ -0,0 +1,465 @@
+/*
+Copyright 2015 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package s2
+
+import (
+ "container/heap"
+)
+
+// RegionCoverer allows arbitrary regions to be approximated as unions of cells (CellUnion).
+// This is useful for implementing various sorts of search and precomputation operations.
+//
+// Typical usage:
+//
+// rc := &s2.RegionCoverer{MaxLevel: 30, MaxCells: 5}
+// r := s2.Region(CapFromCenterArea(center, area))
+// covering := rc.Covering(r)
+//
+// This yields a CellUnion of at most 5 cells that is guaranteed to cover the
+// given region (a disc-shaped region on the sphere).
+//
+// For covering, only cells where (level - MinLevel) is a multiple of LevelMod will be used.
+// This effectively allows the branching factor of the S2 CellID hierarchy to be increased.
+// Currently the only parameter values allowed are 0/1, 2, or 3, corresponding to
+// branching factors of 4, 16, and 64 respectively.
+//
+// Note the following:
+//
+// - MinLevel takes priority over MaxCells, i.e. cells below the given level will
+// never be used even if this causes a large number of cells to be returned.
+//
+// - For any setting of MaxCells, up to 6 cells may be returned if that
+// is the minimum number of cells required (e.g. if the region intersects
+// all six face cells). Up to 3 cells may be returned even for very tiny
+// convex regions if they happen to be located at the intersection of
+// three cube faces.
+//
+// - For any setting of MaxCells, an arbitrary number of cells may be
+// returned if MinLevel is too high for the region being approximated.
+//
+// - If MaxCells is less than 4, the area of the covering may be
+// arbitrarily large compared to the area of the original region even if
+// the region is convex (e.g. a Cap or Rect).
+//
+// The approximation algorithm is not optimal but does a pretty good job in
+// practice. The output does not always use the maximum number of cells
+// allowed, both because this would not always yield a better approximation,
+// and because MaxCells is a limit on how much work is done exploring the
+// possible covering as well as a limit on the final output size.
+//
+// Because it is an approximation algorithm, one should not rely on the
+// stability of the output. In particular, the output of the covering algorithm
+// may change across different versions of the library.
+//
+// One can also generate interior coverings, which are sets of cells which
+// are entirely contained within a region. Interior coverings can be
+// empty, even for non-empty regions, if there are no cells that satisfy
+// the provided constraints and are contained by the region. Note that for
+// performance reasons, it is wise to specify a MaxLevel when computing
+// interior coverings - otherwise for regions with small or zero area, the
+// algorithm may spend a lot of time subdividing cells all the way to leaf
+// level to try to find contained cells.
+type RegionCoverer struct {
+ MinLevel int // the minimum cell level to be used.
+ MaxLevel int // the maximum cell level to be used.
+ LevelMod int // the LevelMod to be used.
+ MaxCells int // the maximum desired number of cells in the approximation.
+}
+
+type coverer struct {
+ minLevel int // the minimum cell level to be used.
+ maxLevel int // the maximum cell level to be used.
+ levelMod int // the LevelMod to be used.
+ maxCells int // the maximum desired number of cells in the approximation.
+ region Region
+ result CellUnion
+ pq priorityQueue
+ interiorCovering bool
+}
+
+type candidate struct {
+ cell Cell
+ terminal bool // Cell should not be expanded further.
+ numChildren int // Number of children that intersect the region.
+ children []*candidate // Actual size may be 0, 4, 16, or 64 elements.
+ priority int // Priority of the candiate.
+}
+
+func min(x, y int) int {
+ if x < y {
+ return x
+ }
+ return y
+}
+
+func max(x, y int) int {
+ if x > y {
+ return x
+ }
+ return y
+}
+
+type priorityQueue []*candidate
+
+func (pq priorityQueue) Len() int {
+ return len(pq)
+}
+
+func (pq priorityQueue) Less(i, j int) bool {
+ // We want Pop to give us the highest, not lowest, priority so we use greater than here.
+ return pq[i].priority > pq[j].priority
+}
+
+func (pq priorityQueue) Swap(i, j int) {
+ pq[i], pq[j] = pq[j], pq[i]
+}
+
+func (pq *priorityQueue) Push(x interface{}) {
+ item := x.(*candidate)
+ *pq = append(*pq, item)
+}
+
+func (pq *priorityQueue) Pop() interface{} {
+ item := (*pq)[len(*pq)-1]
+ *pq = (*pq)[:len(*pq)-1]
+ return item
+}
+
+func (pq *priorityQueue) Reset() {
+ *pq = (*pq)[:0]
+}
+
+// newCandidate returns a new candidate with no children if the cell intersects the given region.
+// The candidate is marked as terminal if it should not be expanded further.
+func (c *coverer) newCandidate(cell Cell) *candidate {
+ if !c.region.IntersectsCell(cell) {
+ return nil
+ }
+ cand := &candidate{cell: cell}
+ level := int(cell.level)
+ if level >= c.minLevel {
+ if c.interiorCovering {
+ if c.region.ContainsCell(cell) {
+ cand.terminal = true
+ } else if level+c.levelMod > c.maxLevel {
+ return nil
+ }
+ } else if level+c.levelMod > c.maxLevel || c.region.ContainsCell(cell) {
+ cand.terminal = true
+ }
+ }
+ return cand
+}
+
+// expandChildren populates the children of the candidate by expanding the given number of
+// levels from the given cell. Returns the number of children that were marked "terminal".
+func (c *coverer) expandChildren(cand *candidate, cell Cell, numLevels int) int {
+ numLevels--
+ var numTerminals int
+ last := cell.id.ChildEnd()
+ for ci := cell.id.ChildBegin(); ci != last; ci = ci.Next() {
+ childCell := CellFromCellID(ci)
+ if numLevels > 0 {
+ if c.region.IntersectsCell(childCell) {
+ numTerminals += c.expandChildren(cand, childCell, numLevels)
+ }
+ continue
+ }
+ if child := c.newCandidate(childCell); child != nil {
+ cand.children = append(cand.children, child)
+ cand.numChildren++
+ if child.terminal {
+ numTerminals++
+ }
+ }
+ }
+ return numTerminals
+}
+
+// addCandidate adds the given candidate to the result if it is marked as "terminal",
+// otherwise expands its children and inserts it into the priority queue.
+// Passing an argument of nil does nothing.
+func (c *coverer) addCandidate(cand *candidate) {
+ if cand.terminal {
+ c.result = append(c.result, cand.cell.id)
+ return
+ }
+
+ // Expand one level at a time until we hit minLevel to ensure that we don't skip over it.
+ numLevels := c.levelMod
+ level := int(cand.cell.level)
+ if level < c.minLevel {
+ numLevels = 1
+ }
+
+ numTerminals := c.expandChildren(cand, cand.cell, numLevels)
+ maxChildrenShift := uint(2 * c.levelMod)
+ if cand.numChildren == 0 {
+ return
+ } else if !c.interiorCovering && numTerminals == 1<<maxChildrenShift && level >= c.minLevel {
+ // Optimization: add the parent cell rather than all of its children.
+ // We can't do this for interior coverings, since the children just
+ // intersect the region, but may not be contained by it - we need to
+ // subdivide them further.
+ cand.terminal = true
+ c.addCandidate(cand)
+ } else {
+ // We negate the priority so that smaller absolute priorities are returned
+ // first. The heuristic is designed to refine the largest cells first,
+ // since those are where we have the largest potential gain. Among cells
+ // of the same size, we prefer the cells with the fewest children.
+ // Finally, among cells with equal numbers of children we prefer those
+ // with the smallest number of children that cannot be refined further.
+ cand.priority = -(((level<<maxChildrenShift)+cand.numChildren)<<maxChildrenShift + numTerminals)
+ heap.Push(&c.pq, cand)
+ }
+}
+
+// adjustLevel returns the reduced "level" so that it satisfies levelMod. Levels smaller than minLevel
+// are not affected (since cells at these levels are eventually expanded).
+func (c *coverer) adjustLevel(level int) int {
+ if c.levelMod > 1 && level > c.minLevel {
+ level -= (level - c.minLevel) % c.levelMod
+ }
+ return level
+}
+
+// adjustCellLevels ensures that all cells with level > minLevel also satisfy levelMod,
+// by replacing them with an ancestor if necessary. Cell levels smaller
+// than minLevel are not modified (see AdjustLevel). The output is
+// then normalized to ensure that no redundant cells are present.
+func (c *coverer) adjustCellLevels(cells *CellUnion) {
+ if c.levelMod == 1 {
+ return
+ }
+
+ var out int
+ for _, ci := range *cells {
+ level := ci.Level()
+ newLevel := c.adjustLevel(level)
+ if newLevel != level {
+ ci = ci.Parent(newLevel)
+ }
+ if out > 0 && (*cells)[out-1].Contains(ci) {
+ continue
+ }
+ for out > 0 && ci.Contains((*cells)[out-1]) {
+ out--
+ }
+ (*cells)[out] = ci
+ out++
+ }
+ *cells = (*cells)[:out]
+}
+
+// initialCandidates computes a set of initial candidates that cover the given region.
+func (c *coverer) initialCandidates() {
+ // Optimization: start with a small (usually 4 cell) covering of the region's bounding cap.
+ temp := &RegionCoverer{MaxLevel: c.maxLevel, LevelMod: 1, MaxCells: min(4, c.maxCells)}
+
+ cells := temp.FastCovering(c.region.CapBound())
+ c.adjustCellLevels(&cells)
+ for _, ci := range cells {
+ if cand := c.newCandidate(CellFromCellID(ci)); cand != nil {
+ c.addCandidate(cand)
+ }
+ }
+}
+
+// coveringInternal generates a covering and stores it in result.
+// Strategy: Start with the 6 faces of the cube. Discard any
+// that do not intersect the shape. Then repeatedly choose the
+// largest cell that intersects the shape and subdivide it.
+//
+// result contains the cells that will be part of the output, while pq
+// contains cells that we may still subdivide further. Cells that are
+// entirely contained within the region are immediately added to the output,
+// while cells that do not intersect the region are immediately discarded.
+// Therefore pq only contains cells that partially intersect the region.
+// Candidates are prioritized first according to cell size (larger cells
+// first), then by the number of intersecting children they have (fewest
+// children first), and then by the number of fully contained children
+// (fewest children first).
+func (c *coverer) coveringInternal(region Region) {
+ c.region = region
+
+ c.initialCandidates()
+ for c.pq.Len() > 0 && (!c.interiorCovering || len(c.result) < c.maxCells) {
+ cand := heap.Pop(&c.pq).(*candidate)
+
+ // For interior covering we keep subdividing no matter how many children
+ // candidate has. If we reach MaxCells before expanding all children,
+ // we will just use some of them.
+ // For exterior covering we cannot do this, because result has to cover the
+ // whole region, so all children have to be used.
+ // candidate.numChildren == 1 case takes care of the situation when we
+ // already have more then MaxCells in result (minLevel is too high).
+ // Subdividing of the candidate with one child does no harm in this case.
+ if c.interiorCovering || int(cand.cell.level) < c.minLevel || cand.numChildren == 1 || len(c.result)+c.pq.Len()+cand.numChildren <= c.maxCells {
+ for _, child := range cand.children {
+ if !c.interiorCovering || len(c.result) < c.maxCells {
+ c.addCandidate(child)
+ }
+ }
+ } else {
+ cand.terminal = true
+ c.addCandidate(cand)
+ }
+ }
+ c.pq.Reset()
+ c.region = nil
+}
+
+// newCoverer returns an instance of coverer.
+func (rc *RegionCoverer) newCoverer() *coverer {
+ return &coverer{
+ minLevel: max(0, min(maxLevel, rc.MinLevel)),
+ maxLevel: max(0, min(maxLevel, rc.MaxLevel)),
+ levelMod: max(1, min(3, rc.LevelMod)),
+ maxCells: rc.MaxCells,
+ }
+}
+
+// Covering returns a CellUnion that covers the given region and satisfies the various restrictions.
+func (rc *RegionCoverer) Covering(region Region) CellUnion {
+ covering := rc.CellUnion(region)
+ covering.Denormalize(max(0, min(maxLevel, rc.MinLevel)), max(1, min(3, rc.LevelMod)))
+ return covering
+}
+
+// InteriorCovering returns a CellUnion that is contained within the given region and satisfies the various restrictions.
+func (rc *RegionCoverer) InteriorCovering(region Region) CellUnion {
+ intCovering := rc.InteriorCellUnion(region)
+ intCovering.Denormalize(max(0, min(maxLevel, rc.MinLevel)), max(1, min(3, rc.LevelMod)))
+ return intCovering
+}
+
+// CellUnion returns a normalized CellUnion that covers the given region and
+// satisfies the restrictions except for minLevel and levelMod. These criteria
+// cannot be satisfied using a cell union because cell unions are
+// automatically normalized by replacing four child cells with their parent
+// whenever possible. (Note that the list of cell ids passed to the CellUnion
+// constructor does in fact satisfy all the given restrictions.)
+func (rc *RegionCoverer) CellUnion(region Region) CellUnion {
+ c := rc.newCoverer()
+ c.coveringInternal(region)
+ cu := c.result
+ cu.Normalize()
+ return cu
+}
+
+// InteriorCellUnion returns a normalized CellUnion that is contained within the given region and
+// satisfies the restrictions except for minLevel and levelMod. These criteria
+// cannot be satisfied using a cell union because cell unions are
+// automatically normalized by replacing four child cells with their parent
+// whenever possible. (Note that the list of cell ids passed to the CellUnion
+// constructor does in fact satisfy all the given restrictions.)
+func (rc *RegionCoverer) InteriorCellUnion(region Region) CellUnion {
+ c := rc.newCoverer()
+ c.interiorCovering = true
+ c.coveringInternal(region)
+ cu := c.result
+ cu.Normalize()
+ return cu
+}
+
+// FastCovering returns a CellUnion that covers the given region similar to Covering,
+// except that this method is much faster and the coverings are not as tight.
+// All of the usual parameters are respected (MaxCells, MinLevel, MaxLevel, and LevelMod),
+// except that the implementation makes no attempt to take advantage of large values of
+// MaxCells. (A small number of cells will always be returned.)
+//
+// This function is useful as a starting point for algorithms that
+// recursively subdivide cells.
+func (rc *RegionCoverer) FastCovering(cap Cap) CellUnion {
+ c := rc.newCoverer()
+ cu := c.rawFastCovering(cap)
+ c.normalizeCovering(&cu)
+ return cu
+}
+
+// rawFastCovering computes a covering of the given cap. In general the covering consists of
+// at most 4 cells (except for very large caps, which may need up to 6 cells).
+// The output is not sorted.
+func (c *coverer) rawFastCovering(cap Cap) CellUnion {
+ var covering CellUnion
+ // Find the maximum level such that the cap contains at most one cell vertex
+ // and such that CellId.VertexNeighbors() can be called.
+ level := min(MinWidthMetric.MaxLevel(2*cap.Radius().Radians()), maxLevel-1)
+ if level == 0 {
+ for face := 0; face < 6; face++ {
+ covering = append(covering, CellIDFromFace(face))
+ }
+ } else {
+ covering = append(covering, cellIDFromPoint(cap.center).VertexNeighbors(level)...)
+ }
+ return covering
+}
+
+// normalizeCovering normalizes the "covering" so that it conforms to the current covering
+// parameters (MaxCells, minLevel, maxLevel, and levelMod).
+// This method makes no attempt to be optimal. In particular, if
+// minLevel > 0 or levelMod > 1 then it may return more than the
+// desired number of cells even when this isn't necessary.
+//
+// Note that when the covering parameters have their default values, almost
+// all of the code in this function is skipped.
+func (c *coverer) normalizeCovering(covering *CellUnion) {
+ // If any cells are too small, or don't satisfy levelMod, then replace them with ancestors.
+ if c.maxLevel < maxLevel || c.levelMod > 1 {
+ for i, ci := range *covering {
+ level := ci.Level()
+ newLevel := c.adjustLevel(min(level, c.maxLevel))
+ if newLevel != level {
+ (*covering)[i] = ci.Parent(newLevel)
+ }
+ }
+ }
+ // Sort the cells and simplify them.
+ covering.Normalize()
+
+ // If there are still too many cells, then repeatedly replace two adjacent
+ // cells in CellID order by their lowest common ancestor.
+ for len(*covering) > c.maxCells {
+ bestIndex := -1
+ bestLevel := -1
+ for i := 0; i+1 < len(*covering); i++ {
+ level, ok := (*covering)[i].CommonAncestorLevel((*covering)[i+1])
+ if !ok {
+ continue
+ }
+ level = c.adjustLevel(level)
+ if level > bestLevel {
+ bestLevel = level
+ bestIndex = i
+ }
+ }
+
+ if bestLevel < c.minLevel {
+ break
+ }
+ (*covering)[bestIndex] = (*covering)[bestIndex].Parent(bestLevel)
+ covering.Normalize()
+ }
+ // Make sure that the covering satisfies minLevel and levelMod,
+ // possibly at the expense of satisfying MaxCells.
+ if c.minLevel > 0 || c.levelMod > 1 {
+ covering.Denormalize(c.minLevel, c.levelMod)
+ }
+}
+
+// BUG(akashagrawal): The differences from the C++ version FloodFill, SimpleCovering
diff --git a/vendor/github.com/golang/geo/s2/shapeindex.go b/vendor/github.com/golang/geo/s2/shapeindex.go
new file mode 100644
index 0000000..4bf15e5
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/shapeindex.go
@@ -0,0 +1,202 @@
+/*
+Copyright 2016 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package s2
+
+import (
+ "github.com/golang/geo/r2"
+)
+
+// Shape defines an interface for any s2 type that needs to be indexable.
+type Shape interface {
+ // NumEdges returns the number of edges in this shape.
+ NumEdges() int
+
+ // Edge returns endpoints for the given edge index.
+ Edge(i int) (a, b Point)
+
+ // HasInterior returns true if this shape has an interior.
+ // i.e. the Shape consists of one or more closed non-intersecting loops.
+ HasInterior() bool
+
+ // ContainsOrigin returns true if this shape contains s2.Origin.
+ // Shapes that do not have an interior will return false.
+ ContainsOrigin() bool
+}
+
+// A minimal check for types that should satisfy the Shape interface.
+var (
+ _ Shape = Loop{}
+ _ Shape = Polyline{}
+)
+
+// CellRelation describes the possible relationships between a target cell
+// and the cells of the ShapeIndex. If the target is an index cell or is
+// contained by an index cell, it is Indexed. If the target is subdivided
+// into one or more index cells, it is Subdivided. Otherwise it is Disjoint.
+type CellRelation int
+
+// The possible CellRelations for a ShapeIndex.
+const (
+ Indexed CellRelation = iota
+ Subdivided
+ Disjoint
+)
+
+var (
+ // cellPadding defines the total error when clipping an edge which comes
+ // from two sources:
+ // (1) Clipping the original spherical edge to a cube face (the face edge).
+ // The maximum error in this step is faceClipErrorUVCoord.
+ // (2) Clipping the face edge to the u- or v-coordinate of a cell boundary.
+ // The maximum error in this step is edgeClipErrorUVCoord.
+ // Finally, since we encounter the same errors when clipping query edges, we
+ // double the total error so that we only need to pad edges during indexing
+ // and not at query time.
+ cellPadding = 2.0 * (faceClipErrorUVCoord + edgeClipErrorUVCoord)
+)
+
+type clippedShape struct {
+ // shapeID is the index of the shape this clipped shape is a part of.
+ shapeID int32
+
+ // containsCenter indicates if the center of the CellID this shape has been
+ // clipped to falls inside this shape. This is false for shapes that do not
+ // have an interior.
+ containsCenter bool
+
+ // edges is the ordered set of ShapeIndex original edge ids. Edges
+ // are stored in increasing order of edge id.
+ edges []int
+}
+
+// init initializes this shape for the given shapeID and number of expected edges.
+func newClippedShape(id int32, numEdges int) *clippedShape {
+ return &clippedShape{
+ shapeID: id,
+ edges: make([]int, numEdges),
+ }
+}
+
+// shapeIndexCell stores the index contents for a particular CellID.
+type shapeIndexCell struct {
+ shapes []*clippedShape
+}
+
+// add adds the given clipped shape to this index cell.
+func (s *shapeIndexCell) add(c *clippedShape) {
+ s.shapes = append(s.shapes, c)
+}
+
+// findByID returns the clipped shape that contains the given shapeID,
+// or nil if none of the clipped shapes contain it.
+func (s *shapeIndexCell) findByID(shapeID int32) *clippedShape {
+ // Linear search is fine because the number of shapes per cell is typically
+ // very small (most often 1), and is large only for pathological inputs
+ // (e.g. very deeply nested loops).
+ for _, clipped := range s.shapes {
+ if clipped.shapeID == shapeID {
+ return clipped
+ }
+ }
+ return nil
+}
+
+// faceEdge and clippedEdge store temporary edge data while the index is being
+// updated.
+//
+// While it would be possible to combine all the edge information into one
+// structure, there are two good reasons for separating it:
+//
+// - Memory usage. Separating the two means that we only need to
+// store one copy of the per-face data no matter how many times an edge is
+// subdivided, and it also lets us delay computing bounding boxes until
+// they are needed for processing each face (when the dataset spans
+// multiple faces).
+//
+// - Performance. UpdateEdges is significantly faster on large polygons when
+// the data is separated, because it often only needs to access the data in
+// clippedEdge and this data is cached more successfully.
+
+// faceEdge represents an edge that has been projected onto a given face,
+type faceEdge struct {
+ shapeID int32 // The ID of shape that this edge belongs to
+ edgeID int // Edge ID within that shape
+ maxLevel int // Not desirable to subdivide this edge beyond this level
+ hasInterior bool // Belongs to a shape that has an interior
+ a, b r2.Point // The edge endpoints, clipped to a given face
+ va, vb Point // The original Loop vertices of this edge.
+}
+
+// clippedEdge represents the portion of that edge that has been clipped to a given Cell.
+type clippedEdge struct {
+ faceEdge *faceEdge // The original unclipped edge
+ bound r2.Rect // Bounding box for the clipped portion
+}
+
+// ShapeIndex indexes a set of Shapes, where a Shape is some collection of
+// edges. A shape can be as simple as a single edge, or as complex as a set of loops.
+// For Shapes that have interiors, the index makes it very fast to determine which
+// Shape(s) contain a given point or region.
+type ShapeIndex struct {
+ // shapes maps all shapes to their index.
+ shapes map[Shape]int32
+
+ maxEdgesPerCell int
+
+ // nextID tracks the next ID to hand out. IDs are not reused when shapes
+ // are removed from the index.
+ nextID int32
+}
+
+// NewShapeIndex creates a new ShapeIndex.
+func NewShapeIndex() *ShapeIndex {
+ return &ShapeIndex{
+ maxEdgesPerCell: 10,
+ shapes: make(map[Shape]int32),
+ }
+}
+
+// Add adds the given shape to the index and assign an ID to it.
+func (s *ShapeIndex) Add(shape Shape) {
+ s.shapes[shape] = s.nextID
+ s.nextID++
+}
+
+// Remove removes the given shape from the index.
+func (s *ShapeIndex) Remove(shape Shape) {
+ delete(s.shapes, shape)
+}
+
+// Len reports the number of Shapes in this index.
+func (s *ShapeIndex) Len() int {
+ return len(s.shapes)
+}
+
+// Reset clears the contents of the index and resets it to its original state.
+func (s *ShapeIndex) Reset() {
+ s.shapes = make(map[Shape]int32)
+ s.nextID = 0
+}
+
+// NumEdges returns the number of edges in this index.
+func (s *ShapeIndex) NumEdges() int {
+ numEdges := 0
+ for shape := range s.shapes {
+ numEdges += shape.NumEdges()
+ }
+ return numEdges
+}
diff --git a/vendor/github.com/golang/geo/s2/stuv.go b/vendor/github.com/golang/geo/s2/stuv.go
new file mode 100644
index 0000000..e669b7c
--- /dev/null
+++ b/vendor/github.com/golang/geo/s2/stuv.go
@@ -0,0 +1,310 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package s2
+
+import (
+ "math"
+
+ "github.com/golang/geo/r3"
+)
+
+const (
+ // maxSiTi is the maximum value of an si- or ti-coordinate.
+ // It is one shift more than maxSize.
+ maxSiTi = maxSize << 1
+)
+
+// siTiToST converts an si- or ti-value to the corresponding s- or t-value.
+// Value is capped at 1.0 because there is no DCHECK in Go.
+func siTiToST(si uint64) float64 {
+ if si > maxSiTi {
+ return 1.0
+ }
+ return float64(si) / float64(maxSiTi)
+}
+
+// stToSiTi converts the s- or t-value to the nearest si- or ti-coordinate.
+// The result may be outside the range of valid (si,ti)-values. Value of
+// 0.49999999999999994 (math.NextAfter(0.5, -1)), will be incorrectly rounded up.
+func stToSiTi(s float64) uint64 {
+ if s < 0 {
+ return uint64(s*maxSiTi - 0.5)
+ }
+ return uint64(s*maxSiTi + 0.5)
+}
+
+// stToUV converts an s or t value to the corresponding u or v value.
+// This is a non-linear transformation from [-1,1] to [-1,1] that
+// attempts to make the cell sizes more uniform.
+// This uses what the C++ version calls 'the quadratic transform'.
+func stToUV(s float64) float64 {
+ if s >= 0.5 {
+ return (1 / 3.) * (4*s*s - 1)
+ }
+ return (1 / 3.) * (1 - 4*(1-s)*(1-s))
+}
+
+// uvToST is the inverse of the stToUV transformation. Note that it
+// is not always true that uvToST(stToUV(x)) == x due to numerical
+// errors.
+func uvToST(u float64) float64 {
+ if u >= 0 {
+ return 0.5 * math.Sqrt(1+3*u)
+ }
+ return 1 - 0.5*math.Sqrt(1-3*u)
+}
+
+// face returns face ID from 0 to 5 containing the r. For points on the
+// boundary between faces, the result is arbitrary but deterministic.
+func face(r r3.Vector) int {
+ abs := r.Abs()
+ id := 0
+ value := r.X
+ if abs.Y > abs.X {
+ id = 1
+ value = r.Y
+ }
+ if abs.Z > math.Abs(value) {
+ id = 2
+ value = r.Z
+ }
+ if value < 0 {
+ id += 3
+ }
+ return id
+}
+
+// validFaceXYZToUV given a valid face for the given point r (meaning that
+// dot product of r with the face normal is positive), returns
+// the corresponding u and v values, which may lie outside the range [-1,1].
+func validFaceXYZToUV(face int, r r3.Vector) (float64, float64) {
+ switch face {
+ case 0:
+ return r.Y / r.X, r.Z / r.X
+ case 1:
+ return -r.X / r.Y, r.Z / r.Y
+ case 2:
+ return -r.X / r.Z, -r.Y / r.Z
+ case 3:
+ return r.Z / r.X, r.Y / r.X
+ case 4:
+ return r.Z / r.Y, -r.X / r.Y
+ }
+ return -r.Y / r.Z, -r.X / r.Z
+}
+
+// xyzToFaceUV converts a direction vector (not necessarily unit length) to
+// (face, u, v) coordinates.
+func xyzToFaceUV(r r3.Vector) (f int, u, v float64) {
+ f = face(r)
+ u, v = validFaceXYZToUV(f, r)
+ return f, u, v
+}
+
+// faceUVToXYZ turns face and UV coordinates into an unnormalized 3 vector.
+func faceUVToXYZ(face int, u, v float64) r3.Vector {
+ switch face {
+ case 0:
+ return r3.Vector{1, u, v}
+ case 1:
+ return r3.Vector{-u, 1, v}
+ case 2:
+ return r3.Vector{-u, -v, 1}
+ case 3:
+ return r3.Vector{-1, -v, -u}
+ case 4:
+ return r3.Vector{v, -1, -u}
+ default:
+ return r3.Vector{v, u, -1}
+ }
+}
+
+// faceXYZToUV returns the u and v values (which may lie outside the range
+// [-1, 1]) if the dot product of the point p with the given face normal is positive.
+func faceXYZToUV(face int, p Point) (u, v float64, ok bool) {
+ switch face {
+ case 0:
+ if p.X <= 0 {
+ return 0, 0, false
+ }
+ case 1:
+ if p.Y <= 0 {
+ return 0, 0, false
+ }
+ case 2:
+ if p.Z <= 0 {
+ return 0, 0, false
+ }
+ case 3:
+ if p.X >= 0 {
+ return 0, 0, false
+ }
+ case 4:
+ if p.Y >= 0 {
+ return 0, 0, false
+ }
+ default:
+ if p.Z >= 0 {
+ return 0, 0, false
+ }
+ }
+
+ u, v = validFaceXYZToUV(face, p.Vector)
+ return u, v, true
+}
+
+// faceXYZtoUVW transforms the given point P to the (u,v,w) coordinate frame of the given
+// face where the w-axis represents the face normal.
+func faceXYZtoUVW(face int, p Point) Point {
+ // The result coordinates are simply the dot products of P with the (u,v,w)
+ // axes for the given face (see faceUVWAxes).
+ switch face {
+ case 0:
+ return Point{r3.Vector{p.Y, p.Z, p.X}}
+ case 1:
+ return Point{r3.Vector{-p.X, p.Z, p.Y}}
+ case 2:
+ return Point{r3.Vector{-p.X, -p.Y, p.Z}}
+ case 3:
+ return Point{r3.Vector{-p.Z, -p.Y, -p.X}}
+ case 4:
+ return Point{r3.Vector{-p.Z, p.X, -p.Y}}
+ default:
+ return Point{r3.Vector{p.Y, p.X, -p.Z}}
+ }
+}
+
+// faceSiTiToXYZ transforms the (si, ti) coordinates to a (not necessarily
+// unit length) Point on the given face.
+func faceSiTiToXYZ(face int, si, ti uint64) Point {
+ return Point{faceUVToXYZ(face, stToUV(siTiToST(si)), stToUV(siTiToST(ti)))}
+}
+
+// xyzToFaceSiTi transforms the (not necessarily unit length) Point to
+// (face, si, ti) coordinates and the level the Point is at.
+func xyzToFaceSiTi(p Point) (face int, si, ti uint64, level int) {
+ face, u, v := xyzToFaceUV(p.Vector)
+ si = stToSiTi(uvToST(u))
+ ti = stToSiTi(uvToST(v))
+
+ // If the levels corresponding to si,ti are not equal, then p is not a cell
+ // center. The si,ti values of 0 and maxSiTi need to be handled specially
+ // because they do not correspond to cell centers at any valid level; they
+ // are mapped to level -1 by the code at the end.
+ level = maxLevel - findLSBSetNonZero64(si|maxSiTi)
+ if level < 0 || level != maxLevel-findLSBSetNonZero64(ti|maxSiTi) {
+ return face, si, ti, -1
+ }
+
+ // In infinite precision, this test could be changed to ST == SiTi. However,
+ // due to rounding errors, uvToST(xyzToFaceUV(faceUVToXYZ(stToUV(...)))) is
+ // not idempotent. On the other hand, the center is computed exactly the same
+ // way p was originally computed (if it is indeed the center of a Cell);
+ // the comparison can be exact.
+ if p.Vector == faceSiTiToXYZ(face, si, ti).Normalize() {
+ return face, si, ti, level
+ }
+
+ return face, si, ti, -1
+}
+
+// uNorm returns the right-handed normal (not necessarily unit length) for an
+// edge in the direction of the positive v-axis at the given u-value on
+// the given face. (This vector is perpendicular to the plane through
+// the sphere origin that contains the given edge.)
+func uNorm(face int, u float64) r3.Vector {
+ switch face {
+ case 0:
+ return r3.Vector{u, -1, 0}
+ case 1:
+ return r3.Vector{1, u, 0}
+ case 2:
+ return r3.Vector{1, 0, u}
+ case 3:
+ return r3.Vector{-u, 0, 1}
+ case 4:
+ return r3.Vector{0, -u, 1}
+ default:
+ return r3.Vector{0, -1, -u}
+ }
+}
+
+// vNorm returns the right-handed normal (not necessarily unit length) for an
+// edge in the direction of the positive u-axis at the given v-value on
+// the given face.
+func vNorm(face int, v float64) r3.Vector {
+ switch face {
+ case 0:
+ return r3.Vector{-v, 0, 1}
+ case 1:
+ return r3.Vector{0, -v, 1}
+ case 2:
+ return r3.Vector{0, -1, -v}
+ case 3:
+ return r3.Vector{v, -1, 0}
+ case 4:
+ return r3.Vector{1, v, 0}
+ default:
+ return r3.Vector{1, 0, v}
+ }
+}
+
+// faceUVWAxes are the U, V, and W axes for each face.
+var faceUVWAxes = [6][3]Point{
+ {Point{r3.Vector{0, 1, 0}}, Point{r3.Vector{0, 0, 1}}, Point{r3.Vector{1, 0, 0}}},
+ {Point{r3.Vector{-1, 0, 0}}, Point{r3.Vector{0, 0, 1}}, Point{r3.Vector{0, 1, 0}}},
+ {Point{r3.Vector{-1, 0, 0}}, Point{r3.Vector{0, -1, 0}}, Point{r3.Vector{0, 0, 1}}},
+ {Point{r3.Vector{0, 0, -1}}, Point{r3.Vector{0, -1, 0}}, Point{r3.Vector{-1, 0, 0}}},
+ {Point{r3.Vector{0, 0, -1}}, Point{r3.Vector{1, 0, 0}}, Point{r3.Vector{0, -1, 0}}},
+ {Point{r3.Vector{0, 1, 0}}, Point{r3.Vector{1, 0, 0}}, Point{r3.Vector{0, 0, -1}}},
+}
+
+// faceUVWFaces are the precomputed neighbors of each face.
+var faceUVWFaces = [6][3][2]int{
+ {{4, 1}, {5, 2}, {3, 0}},
+ {{0, 3}, {5, 2}, {4, 1}},
+ {{0, 3}, {1, 4}, {5, 2}},
+ {{2, 5}, {1, 4}, {0, 3}},
+ {{2, 5}, {3, 0}, {1, 4}},
+ {{4, 1}, {3, 0}, {2, 5}},
+}
+
+// uvwAxis returns the given axis of the given face.
+func uvwAxis(face, axis int) Point {
+ return faceUVWAxes[face][axis]
+}
+
+// uvwFaces returns the face in the (u,v,w) coordinate system on the given axis
+// in the given direction.
+func uvwFace(face, axis, direction int) int {
+ return faceUVWFaces[face][axis][direction]
+}
+
+// uAxis returns the u-axis for the given face.
+func uAxis(face int) Point {
+ return uvwAxis(face, 0)
+}
+
+// vAxis returns the v-axis for the given face.
+func vAxis(face int) Point {
+ return uvwAxis(face, 1)
+}
+
+// Return the unit-length normal for the given face.
+func unitNorm(face int) Point {
+ return uvwAxis(face, 2)
+}