diff options
Diffstat (limited to 'vendor/github.com/zenazn/goji/graceful')
| -rw-r--r-- | vendor/github.com/zenazn/goji/graceful/clone.go | 11 | ||||
| -rw-r--r-- | vendor/github.com/zenazn/goji/graceful/clone16.go | 34 | ||||
| -rw-r--r-- | vendor/github.com/zenazn/goji/graceful/einhorn.go | 21 | ||||
| -rw-r--r-- | vendor/github.com/zenazn/goji/graceful/graceful.go | 62 | ||||
| -rw-r--r-- | vendor/github.com/zenazn/goji/graceful/listener/conn.go | 151 | ||||
| -rw-r--r-- | vendor/github.com/zenazn/goji/graceful/listener/listener.go | 178 | ||||
| -rw-r--r-- | vendor/github.com/zenazn/goji/graceful/listener/shard.go | 98 | ||||
| -rw-r--r-- | vendor/github.com/zenazn/goji/graceful/middleware.go | 103 | ||||
| -rw-r--r-- | vendor/github.com/zenazn/goji/graceful/serve.go | 33 | ||||
| -rw-r--r-- | vendor/github.com/zenazn/goji/graceful/serve13.go | 76 | ||||
| -rw-r--r-- | vendor/github.com/zenazn/goji/graceful/server.go | 108 | ||||
| -rw-r--r-- | vendor/github.com/zenazn/goji/graceful/signal.go | 197 |
12 files changed, 1072 insertions, 0 deletions
diff --git a/vendor/github.com/zenazn/goji/graceful/clone.go b/vendor/github.com/zenazn/goji/graceful/clone.go new file mode 100644 index 0000000..a9027e5 --- /dev/null +++ b/vendor/github.com/zenazn/goji/graceful/clone.go @@ -0,0 +1,11 @@ +// +build !go1.6 + +package graceful + +import "crypto/tls" + +// see clone16.go +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + c := *cfg + return &c +} diff --git a/vendor/github.com/zenazn/goji/graceful/clone16.go b/vendor/github.com/zenazn/goji/graceful/clone16.go new file mode 100644 index 0000000..810b3a2 --- /dev/null +++ b/vendor/github.com/zenazn/goji/graceful/clone16.go @@ -0,0 +1,34 @@ +// +build go1.6 + +package graceful + +import "crypto/tls" + +// cloneTLSConfig was taken from the Go standard library's net/http package. We +// need it because tls.Config objects now contain a sync.Once. +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + return &tls.Config{ + Rand: cfg.Rand, + Time: cfg.Time, + Certificates: cfg.Certificates, + NameToCertificate: cfg.NameToCertificate, + GetCertificate: cfg.GetCertificate, + RootCAs: cfg.RootCAs, + NextProtos: cfg.NextProtos, + ServerName: cfg.ServerName, + ClientAuth: cfg.ClientAuth, + ClientCAs: cfg.ClientCAs, + InsecureSkipVerify: cfg.InsecureSkipVerify, + CipherSuites: cfg.CipherSuites, + PreferServerCipherSuites: cfg.PreferServerCipherSuites, + SessionTicketsDisabled: cfg.SessionTicketsDisabled, + SessionTicketKey: cfg.SessionTicketKey, + ClientSessionCache: cfg.ClientSessionCache, + MinVersion: cfg.MinVersion, + MaxVersion: cfg.MaxVersion, + CurvePreferences: cfg.CurvePreferences, + } +} diff --git a/vendor/github.com/zenazn/goji/graceful/einhorn.go b/vendor/github.com/zenazn/goji/graceful/einhorn.go new file mode 100644 index 0000000..082d1c4 --- /dev/null +++ b/vendor/github.com/zenazn/goji/graceful/einhorn.go @@ -0,0 +1,21 @@ +// +build !windows + +package graceful + +import ( + "os" + "strconv" + "syscall" +) + +func init() { + // This is a little unfortunate: goji/bind already knows whether we're + // running under einhorn, but we don't want to introduce a dependency + // between the two packages. Since the check is short enough, inlining + // it here seems "fine." + mpid, err := strconv.Atoi(os.Getenv("EINHORN_MASTER_PID")) + if err != nil || mpid != os.Getppid() { + return + } + stdSignals = append(stdSignals, syscall.SIGUSR2) +} diff --git a/vendor/github.com/zenazn/goji/graceful/graceful.go b/vendor/github.com/zenazn/goji/graceful/graceful.go new file mode 100644 index 0000000..ff9b186 --- /dev/null +++ b/vendor/github.com/zenazn/goji/graceful/graceful.go @@ -0,0 +1,62 @@ +/* +Package graceful implements graceful shutdown for HTTP servers by closing idle +connections after receiving a signal. By default, this package listens for +interrupts (i.e., SIGINT), but when it detects that it is running under Einhorn +it will additionally listen for SIGUSR2 as well, giving your application +automatic support for graceful restarts/code upgrades. +*/ +package graceful + +import ( + "net" + "runtime" + "sync/atomic" + + "github.com/zenazn/goji/graceful/listener" +) + +// WrapListener wraps an arbitrary net.Listener for use with graceful shutdowns. +// In the background, it uses the listener sub-package to Wrap the listener in +// Deadline mode. If another mode of operation is desired, you should call +// listener.Wrap yourself: this function is smart enough to not double-wrap +// listeners. +func WrapListener(l net.Listener) net.Listener { + if lt, ok := l.(*listener.T); ok { + appendListener(lt) + return lt + } + + lt := listener.Wrap(l, listener.Deadline) + appendListener(lt) + return lt +} + +func appendListener(l *listener.T) { + mu.Lock() + defer mu.Unlock() + + listeners = append(listeners, l) +} + +const errClosing = "use of closed network connection" + +// During graceful shutdown, calls to Accept will start returning errors. This +// is inconvenient, since we know these sorts of errors are peaceful, so we +// silently swallow them. +func peacefulError(err error) error { + if atomic.LoadInt32(&closing) == 0 { + return err + } + // Unfortunately Go doesn't really give us a better way to select errors + // than this, so *shrug*. + if oe, ok := err.(*net.OpError); ok { + errOp := "accept" + if runtime.GOOS == "windows" { + errOp = "AcceptEx" + } + if oe.Op == errOp && oe.Err.Error() == errClosing { + return nil + } + } + return err +} diff --git a/vendor/github.com/zenazn/goji/graceful/listener/conn.go b/vendor/github.com/zenazn/goji/graceful/listener/conn.go new file mode 100644 index 0000000..7b34b47 --- /dev/null +++ b/vendor/github.com/zenazn/goji/graceful/listener/conn.go @@ -0,0 +1,151 @@ +package listener + +import ( + "errors" + "io" + "net" + "sync" + "time" +) + +type conn struct { + net.Conn + + shard *shard + mode mode + + mu sync.Mutex // Protects the state machine below + busy bool // connection is in use (i.e., not idle) + closed bool // connection is closed + disowned bool // if true, this connection is no longer under our management +} + +// This intentionally looks a lot like the one in package net. +var errClosing = errors.New("use of closed network connection") + +func (c *conn) init() error { + c.shard.wg.Add(1) + if shouldExit := c.shard.track(c); shouldExit { + c.Close() + return errClosing + } + return nil +} + +func (c *conn) Read(b []byte) (n int, err error) { + defer func() { + c.mu.Lock() + defer c.mu.Unlock() + + if c.disowned { + return + } + + // This protects against a Close/Read race. We're not really + // concerned about the general case (it's fundamentally racy), + // but are mostly trying to prevent a race between a new request + // getting read off the wire in one thread while the connection + // is being gracefully shut down in another. + if c.closed && err == nil { + n = 0 + err = errClosing + return + } + + if c.mode != Manual && !c.busy && !c.closed { + c.busy = true + c.shard.markInUse(c) + } + }() + + return c.Conn.Read(b) +} + +func (c *conn) Close() error { + c.mu.Lock() + defer c.mu.Unlock() + + if c.disowned { + return c.Conn.Close() + } else if c.closed { + return errClosing + } + + c.closed = true + c.shard.disown(c) + defer c.shard.wg.Done() + + return c.Conn.Close() +} + +func (c *conn) SetReadDeadline(t time.Time) error { + c.mu.Lock() + if !c.disowned && c.mode == Deadline { + defer c.markIdle() + } + c.mu.Unlock() + return c.Conn.SetReadDeadline(t) +} + +func (c *conn) ReadFrom(r io.Reader) (int64, error) { + return io.Copy(c.Conn, r) +} + +func (c *conn) markIdle() { + c.mu.Lock() + defer c.mu.Unlock() + + if !c.busy { + return + } + c.busy = false + + if exit := c.shard.markIdle(c); exit && !c.closed && !c.disowned { + c.closed = true + c.shard.disown(c) + defer c.shard.wg.Done() + c.Conn.Close() + return + } +} + +func (c *conn) markInUse() { + c.mu.Lock() + defer c.mu.Unlock() + + if !c.busy && !c.closed && !c.disowned { + c.busy = true + c.shard.markInUse(c) + } +} + +func (c *conn) closeIfIdle() error { + c.mu.Lock() + defer c.mu.Unlock() + + if !c.busy && !c.closed && !c.disowned { + c.closed = true + c.shard.disown(c) + defer c.shard.wg.Done() + return c.Conn.Close() + } + + return nil +} + +var errAlreadyDisowned = errors.New("listener: conn already disowned") + +func (c *conn) disown() error { + c.mu.Lock() + defer c.mu.Unlock() + + if c.disowned { + return errAlreadyDisowned + } + + c.shard.disown(c) + c.disowned = true + c.shard.wg.Done() + + return nil +} diff --git a/vendor/github.com/zenazn/goji/graceful/listener/listener.go b/vendor/github.com/zenazn/goji/graceful/listener/listener.go new file mode 100644 index 0000000..6c9c477 --- /dev/null +++ b/vendor/github.com/zenazn/goji/graceful/listener/listener.go @@ -0,0 +1,178 @@ +/* +Package listener provides a way to incorporate graceful shutdown to any +net.Listener. + +This package provides low-level primitives, not a high-level API. If you're +looking for a package that provides graceful shutdown for HTTP servers, I +recommend this package's parent package, github.com/zenazn/goji/graceful. +*/ +package listener + +import ( + "errors" + "net" + "runtime" + "sync" + "sync/atomic" +) + +type mode int8 + +const ( + // Manual mode is completely manual: users must use use MarkIdle and + // MarkInUse to indicate when connections are busy servicing requests or + // are eligible for termination. + Manual mode = iota + // Automatic mode is what most users probably want: calling Read on a + // connection will mark it as in use, but users must manually call + // MarkIdle to indicate when connections may be safely closed. + Automatic + // Deadline mode is like automatic mode, except that calling + // SetReadDeadline on a connection will also mark it as being idle. This + // is useful for many servers like net/http, where SetReadDeadline is + // used to implement read timeouts on new requests. + Deadline +) + +// Wrap a net.Listener, returning a net.Listener which supports idle connection +// tracking and shutdown. Listeners can be placed in to one of three modes, +// exported as variables from this package: most users will probably want the +// "Automatic" mode. +func Wrap(l net.Listener, m mode) *T { + t := &T{ + l: l, + mode: m, + // To keep the expected contention rate constant we'd have to + // grow this as numcpu**2. In practice, CPU counts don't + // generally grow without bound, and contention is probably + // going to be small enough that nobody cares anyways. + shards: make([]shard, 2*runtime.NumCPU()), + } + for i := range t.shards { + t.shards[i].init(t) + } + return t +} + +// T is the type of this package's graceful listeners. +type T struct { + mu sync.Mutex + l net.Listener + + // TODO(carl): a count of currently outstanding connections. + connCount uint64 + shards []shard + + mode mode +} + +var _ net.Listener = &T{} + +// Accept waits for and returns the next connection to the listener. The +// returned net.Conn's idleness is tracked, and idle connections can be closed +// from the associated T. +func (t *T) Accept() (net.Conn, error) { + c, err := t.l.Accept() + if err != nil { + return nil, err + } + + connID := atomic.AddUint64(&t.connCount, 1) + shard := &t.shards[int(connID)%len(t.shards)] + wc := &conn{ + Conn: c, + shard: shard, + mode: t.mode, + } + + if err = wc.init(); err != nil { + return nil, err + } + return wc, nil +} + +// Addr returns the wrapped listener's network address. +func (t *T) Addr() net.Addr { + return t.l.Addr() +} + +// Close closes the wrapped listener. +func (t *T) Close() error { + return t.l.Close() +} + +// CloseIdle closes all connections that are currently marked as being idle. It, +// however, makes no attempt to wait for in-use connections to die, or to close +// connections which become idle in the future. Call this function if you're +// interested in shedding useless connections, but otherwise wish to continue +// serving requests. +func (t *T) CloseIdle() error { + for i := range t.shards { + t.shards[i].closeConns(false, false) + } + // Not sure if returning errors is actually useful here :/ + return nil +} + +// Drain immediately closes all idle connections, prevents new connections from +// being accepted, and waits for all outstanding connections to finish. +// +// Once a listener has been drained, there is no way to re-enable it. You +// probably want to Close the listener before draining it, otherwise new +// connections will be accepted and immediately closed. +func (t *T) Drain() error { + for i := range t.shards { + t.shards[i].closeConns(false, true) + } + for i := range t.shards { + t.shards[i].wait() + } + return nil +} + +// DrainAll closes all connections currently tracked by this listener (both idle +// and in-use connections), and prevents new connections from being accepted. +// Disowned connections are not closed. +func (t *T) DrainAll() error { + for i := range t.shards { + t.shards[i].closeConns(true, true) + } + for i := range t.shards { + t.shards[i].wait() + } + return nil +} + +var errNotManaged = errors.New("listener: passed net.Conn is not managed by this package") + +// Disown causes a connection to no longer be tracked by the listener. The +// passed connection must have been returned by a call to Accept from this +// listener. +func Disown(c net.Conn) error { + if cn, ok := c.(*conn); ok { + return cn.disown() + } + return errNotManaged +} + +// MarkIdle marks the given connection as being idle, and therefore eligible for +// closing at any time. The passed connection must have been returned by a call +// to Accept from this listener. +func MarkIdle(c net.Conn) error { + if cn, ok := c.(*conn); ok { + cn.markIdle() + return nil + } + return errNotManaged +} + +// MarkInUse marks this connection as being in use, removing it from the set of +// connections which are eligible for closing. The passed connection must have +// been returned by a call to Accept from this listener. +func MarkInUse(c net.Conn) error { + if cn, ok := c.(*conn); ok { + cn.markInUse() + return nil + } + return errNotManaged +} diff --git a/vendor/github.com/zenazn/goji/graceful/listener/shard.go b/vendor/github.com/zenazn/goji/graceful/listener/shard.go new file mode 100644 index 0000000..a9addad --- /dev/null +++ b/vendor/github.com/zenazn/goji/graceful/listener/shard.go @@ -0,0 +1,98 @@ +package listener + +import "sync" + +type shard struct { + l *T + + mu sync.Mutex + idle map[*conn]struct{} + all map[*conn]struct{} + wg sync.WaitGroup + drain bool +} + +// We pretty aggressively preallocate set entries in the hopes that we never +// have to allocate memory with the lock held. This is definitely a premature +// optimization and is probably misguided, but luckily it costs us essentially +// nothing. +const prealloc = 2048 + +func (s *shard) init(l *T) { + s.l = l + s.idle = make(map[*conn]struct{}, prealloc) + s.all = make(map[*conn]struct{}, prealloc) +} + +func (s *shard) track(c *conn) (shouldClose bool) { + s.mu.Lock() + if s.drain { + s.mu.Unlock() + return true + } + s.all[c] = struct{}{} + s.idle[c] = struct{}{} + s.mu.Unlock() + return false +} + +func (s *shard) disown(c *conn) { + s.mu.Lock() + delete(s.all, c) + delete(s.idle, c) + s.mu.Unlock() +} + +func (s *shard) markIdle(c *conn) (shouldClose bool) { + s.mu.Lock() + if s.drain { + s.mu.Unlock() + return true + } + s.idle[c] = struct{}{} + s.mu.Unlock() + return false +} + +func (s *shard) markInUse(c *conn) { + s.mu.Lock() + delete(s.idle, c) + s.mu.Unlock() +} + +func (s *shard) closeConns(all, drain bool) { + s.mu.Lock() + if drain { + s.drain = true + } + set := make(map[*conn]struct{}, len(s.all)) + if all { + for c := range s.all { + set[c] = struct{}{} + } + } else { + for c := range s.idle { + set[c] = struct{}{} + } + } + // We have to drop the shard lock here to avoid deadlock: we cannot + // acquire the shard lock after the connection lock, and the closeIfIdle + // call below will grab a connection lock. + s.mu.Unlock() + + for c := range set { + // This might return an error (from Close), but I don't think we + // can do anything about it, so let's just pretend it didn't + // happen. (I also expect that most errors returned in this way + // are going to be pretty boring) + if all { + c.Close() + } else { + c.closeIfIdle() + } + } +} + +func (s *shard) wait() { + s.wg.Wait() +} diff --git a/vendor/github.com/zenazn/goji/graceful/middleware.go b/vendor/github.com/zenazn/goji/graceful/middleware.go new file mode 100644 index 0000000..94edfe3 --- /dev/null +++ b/vendor/github.com/zenazn/goji/graceful/middleware.go @@ -0,0 +1,103 @@ +// +build !go1.3 + +package graceful + +import ( + "bufio" + "io" + "net" + "net/http" + "sync/atomic" + + "github.com/zenazn/goji/graceful/listener" +) + +// Middleware provides functionality similar to net/http.Server's +// SetKeepAlivesEnabled in Go 1.3, but in Go 1.2. +func middleware(h http.Handler) http.Handler { + if h == nil { + return nil + } + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _, cn := w.(http.CloseNotifier) + _, fl := w.(http.Flusher) + _, hj := w.(http.Hijacker) + _, rf := w.(io.ReaderFrom) + + bw := basicWriter{ResponseWriter: w} + + if cn && fl && hj && rf { + h.ServeHTTP(&fancyWriter{bw}, r) + } else { + h.ServeHTTP(&bw, r) + } + if !bw.headerWritten { + bw.maybeClose() + } + }) +} + +type basicWriter struct { + http.ResponseWriter + headerWritten bool +} + +func (b *basicWriter) maybeClose() { + b.headerWritten = true + if atomic.LoadInt32(&closing) != 0 { + b.ResponseWriter.Header().Set("Connection", "close") + } +} + +func (b *basicWriter) WriteHeader(code int) { + b.maybeClose() + b.ResponseWriter.WriteHeader(code) +} + +func (b *basicWriter) Write(buf []byte) (int, error) { + if !b.headerWritten { + b.maybeClose() + } + return b.ResponseWriter.Write(buf) +} + +func (b *basicWriter) Unwrap() http.ResponseWriter { + return b.ResponseWriter +} + +// Optimize for the common case of a ResponseWriter that supports all three of +// CloseNotifier, Flusher, and Hijacker. +type fancyWriter struct { + basicWriter +} + +func (f *fancyWriter) CloseNotify() <-chan bool { + cn := f.basicWriter.ResponseWriter.(http.CloseNotifier) + return cn.CloseNotify() +} +func (f *fancyWriter) Flush() { + fl := f.basicWriter.ResponseWriter.(http.Flusher) + fl.Flush() +} +func (f *fancyWriter) Hijack() (c net.Conn, b *bufio.ReadWriter, e error) { + hj := f.basicWriter.ResponseWriter.(http.Hijacker) + c, b, e = hj.Hijack() + + if e == nil { + e = listener.Disown(c) + } + + return +} +func (f *fancyWriter) ReadFrom(r io.Reader) (int64, error) { + rf := f.basicWriter.ResponseWriter.(io.ReaderFrom) + if !f.basicWriter.headerWritten { + f.basicWriter.maybeClose() + } + return rf.ReadFrom(r) +} + +var _ http.CloseNotifier = &fancyWriter{} +var _ http.Flusher = &fancyWriter{} +var _ http.Hijacker = &fancyWriter{} +var _ io.ReaderFrom = &fancyWriter{} diff --git a/vendor/github.com/zenazn/goji/graceful/serve.go b/vendor/github.com/zenazn/goji/graceful/serve.go new file mode 100644 index 0000000..edb2a53 --- /dev/null +++ b/vendor/github.com/zenazn/goji/graceful/serve.go @@ -0,0 +1,33 @@ +// +build !go1.3 + +package graceful + +import ( + "net" + "net/http" + "time" + + "github.com/zenazn/goji/graceful/listener" +) + +// About 200 years, also known as "forever" +const forever time.Duration = 200 * 365 * 24 * time.Hour + +// Serve behaves like the method on net/http.Server with the same name. +func (srv *Server) Serve(l net.Listener) error { + // Spawn a shadow http.Server to do the actual servering. We do this + // because we need to sketch on some of the parameters you passed in, + // and it's nice to keep our sketching to ourselves. + shadow := *(*http.Server)(srv) + + if shadow.ReadTimeout == 0 { + shadow.ReadTimeout = forever + } + shadow.Handler = middleware(shadow.Handler) + + wrap := listener.Wrap(l, listener.Deadline) + appendListener(wrap) + + err := shadow.Serve(wrap) + return peacefulError(err) +} diff --git a/vendor/github.com/zenazn/goji/graceful/serve13.go b/vendor/github.com/zenazn/goji/graceful/serve13.go new file mode 100644 index 0000000..68cac04 --- /dev/null +++ b/vendor/github.com/zenazn/goji/graceful/serve13.go @@ -0,0 +1,76 @@ +// +build go1.3 + +package graceful + +import ( + "log" + "net" + "net/http" + + "github.com/zenazn/goji/graceful/listener" +) + +// This is a slightly hacky shim to disable keepalives when shutting a server +// down. We could have added extra functionality in listener or signal.go to +// deal with this case, but this seems simpler. +type gracefulServer struct { + net.Listener + s *http.Server +} + +func (g gracefulServer) Close() error { + g.s.SetKeepAlivesEnabled(false) + return g.Listener.Close() +} + +// A chaining http.ConnState wrapper +type connState func(net.Conn, http.ConnState) + +func (c connState) Wrap(nc net.Conn, s http.ConnState) { + // There are a few other states defined, most notably StateActive. + // Unfortunately it doesn't look like it's possible to make use of + // StateActive to implement graceful shutdown, since StateActive is set + // after a complete request has been read off the wire with an intent to + // process it. If we were to race a graceful shutdown against a + // connection that was just read off the wire (but not yet in + // StateActive), we would accidentally close the connection out from + // underneath an active request. + // + // We already needed to work around this for Go 1.2 by shimming out a + // full net.Conn object, so we can just fall back to the old behavior + // there. + // + // I started a golang-nuts thread about this here: + // https://groups.google.com/forum/#!topic/golang-nuts/Xi8yjBGWfCQ + // I'd be very eager to find a better way to do this, so reach out to me + // if you have any ideas. + switch s { + case http.StateIdle: + if err := listener.MarkIdle(nc); err != nil { + log.Printf("error marking conn as idle: %v", err) + } + case http.StateHijacked: + if err := listener.Disown(nc); err != nil { + log.Printf("error disowning hijacked conn: %v", err) + } + } + if c != nil { + c(nc, s) + } +} + +// Serve behaves like the method on net/http.Server with the same name. +func (srv *Server) Serve(l net.Listener) error { + // Spawn a shadow http.Server to do the actual servering. We do this + // because we need to sketch on some of the parameters you passed in, + // and it's nice to keep our sketching to ourselves. + shadow := *(*http.Server)(srv) + shadow.ConnState = connState(shadow.ConnState).Wrap + + l = gracefulServer{l, &shadow} + wrap := listener.Wrap(l, listener.Automatic) + appendListener(wrap) + + err := shadow.Serve(wrap) + return peacefulError(err) +} diff --git a/vendor/github.com/zenazn/goji/graceful/server.go b/vendor/github.com/zenazn/goji/graceful/server.go new file mode 100644 index 0000000..ae9a5fb --- /dev/null +++ b/vendor/github.com/zenazn/goji/graceful/server.go @@ -0,0 +1,108 @@ +package graceful + +import ( + "crypto/tls" + "net" + "net/http" + "time" +) + +// Most of the code here is lifted straight from net/http + +// tcpKeepAliveListener sets TCP keep-alive timeouts on accepted +// connections. It's used by ListenAndServe and ListenAndServeTLS so +// dead TCP connections (e.g. closing laptop mid-download) eventually +// go away. +type tcpKeepAliveListener struct { + *net.TCPListener +} + +func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) { + tc, err := ln.AcceptTCP() + if err != nil { + return + } + tc.SetKeepAlive(true) + tc.SetKeepAlivePeriod(3 * time.Minute) + return tc, nil +} + +// A Server is exactly the same as an http.Server, but provides more graceful +// implementations of its methods. +type Server http.Server + +// ListenAndServe behaves like the method on net/http.Server with the same name. +func (srv *Server) ListenAndServe() error { + addr := srv.Addr + if addr == "" { + addr = ":http" + } + ln, err := net.Listen("tcp", addr) + if err != nil { + return err + } + return srv.Serve(tcpKeepAliveListener{ln.(*net.TCPListener)}) +} + +// ListenAndServeTLS behaves like the method on net/http.Server with the same +// name. Unlike the method of the same name on http.Server, this function +// defaults to enforcing TLS 1.0 or higher in order to address the POODLE +// vulnerability. Users who wish to enable SSLv3 must do so by supplying a +// TLSConfig explicitly. +func (srv *Server) ListenAndServeTLS(certFile, keyFile string) error { + addr := srv.Addr + if addr == "" { + addr = ":https" + } + config := &tls.Config{ + MinVersion: tls.VersionTLS10, + } + if srv.TLSConfig != nil { + config = cloneTLSConfig(srv.TLSConfig) + } + if config.NextProtos == nil { + config.NextProtos = []string{"http/1.1"} + } + + var err error + config.Certificates = make([]tls.Certificate, 1) + config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return err + } + + ln, err := net.Listen("tcp", addr) + if err != nil { + return err + } + + tlsListener := tls.NewListener(tcpKeepAliveListener{ln.(*net.TCPListener)}, config) + return srv.Serve(tlsListener) +} + +// ListenAndServe behaves exactly like the net/http function of the same name. +func ListenAndServe(addr string, handler http.Handler) error { + server := &Server{Addr: addr, Handler: handler} + return server.ListenAndServe() +} + +// ListenAndServeTLS behaves almost exactly like the net/http function of the +// same name. Unlike net/http, however, this function defaults to enforcing TLS +// 1.0 or higher in order to address the POODLE vulnerability. Users who wish to +// enable SSLv3 must do so by explicitly instantiating a server with an +// appropriately configured TLSConfig property. +func ListenAndServeTLS(addr, certfile, keyfile string, handler http.Handler) error { + server := &Server{Addr: addr, Handler: handler} + return server.ListenAndServeTLS(certfile, keyfile) +} + +// Serve mostly behaves like the net/http function of the same name, except that +// if the passed listener is a net.TCPListener, TCP keep-alives are enabled on +// accepted connections. +func Serve(l net.Listener, handler http.Handler) error { + if tl, ok := l.(*net.TCPListener); ok { + l = tcpKeepAliveListener{tl} + } + server := &Server{Handler: handler} + return server.Serve(l) +} diff --git a/vendor/github.com/zenazn/goji/graceful/signal.go b/vendor/github.com/zenazn/goji/graceful/signal.go new file mode 100644 index 0000000..60612b8 --- /dev/null +++ b/vendor/github.com/zenazn/goji/graceful/signal.go @@ -0,0 +1,197 @@ +package graceful + +import ( + "os" + "os/signal" + "sync" + "sync/atomic" + "time" + + "github.com/zenazn/goji/graceful/listener" +) + +var mu sync.Mutex // protects everything that follows +var listeners = make([]*listener.T, 0) +var prehooks = make([]func(), 0) +var posthooks = make([]func(), 0) +var closing int32 +var doubleKick, timeout time.Duration + +var wait = make(chan struct{}) +var stdSignals = []os.Signal{os.Interrupt} +var sigchan = make(chan os.Signal, 1) + +// HandleSignals installs signal handlers for a set of standard signals. By +// default, this set only includes keyboard interrupts, however when the package +// detects that it is running under Einhorn, a SIGUSR2 handler is installed as +// well. +func HandleSignals() { + AddSignal(stdSignals...) +} + +// AddSignal adds the given signal to the set of signals that trigger a graceful +// shutdown. +func AddSignal(sig ...os.Signal) { + signal.Notify(sigchan, sig...) +} + +// ResetSignals resets the list of signals that trigger a graceful shutdown. +func ResetSignals() { + signal.Stop(sigchan) +} + +// PreHook registers a function to be called before any of this package's normal +// shutdown actions. All listeners will be called in the order they were added, +// from a single goroutine. +func PreHook(f func()) { + mu.Lock() + defer mu.Unlock() + + prehooks = append(prehooks, f) +} + +// PostHook registers a function to be called after all of this package's normal +// shutdown actions. All listeners will be called in the order they were added, +// from a single goroutine, and are guaranteed to be called after all listening +// connections have been closed, but before Wait() returns. +// +// If you've Hijacked any connections that must be gracefully shut down in some +// other way (since this library disowns all hijacked connections), it's +// reasonable to use a PostHook to signal and wait for them. +func PostHook(f func()) { + mu.Lock() + defer mu.Unlock() + + posthooks = append(posthooks, f) +} + +// Shutdown manually triggers a shutdown from your application. Like Wait, +// blocks until all connections have gracefully shut down. +func Shutdown() { + shutdown(false) +} + +// ShutdownNow triggers an immediate shutdown from your application. All +// connections (not just those that are idle) are immediately closed, even if +// they are in the middle of serving a request. +func ShutdownNow() { + shutdown(true) +} + +// DoubleKickWindow sets the length of the window during which two back-to-back +// signals are treated as an especially urgent or forceful request to exit +// (i.e., ShutdownNow instead of Shutdown). Signals delivered more than this +// duration apart are treated as separate requests to exit gracefully as usual. +// +// Setting DoubleKickWindow to 0 disables the feature. +func DoubleKickWindow(d time.Duration) { + if d < 0 { + return + } + mu.Lock() + defer mu.Unlock() + + doubleKick = d +} + +// Timeout sets the maximum amount of time package graceful will wait for +// connections to gracefully shut down after receiving a signal. After this +// timeout, connections will be forcefully shut down (similar to calling +// ShutdownNow). +// +// Setting Timeout to 0 disables the feature. +func Timeout(d time.Duration) { + if d < 0 { + return + } + mu.Lock() + defer mu.Unlock() + + timeout = d +} + +// Wait for all connections to gracefully shut down. This is commonly called at +// the bottom of the main() function to prevent the program from exiting +// prematurely. +func Wait() { + <-wait +} + +func init() { + go sigLoop() +} +func sigLoop() { + var last time.Time + for { + <-sigchan + now := time.Now() + mu.Lock() + force := doubleKick != 0 && now.Sub(last) < doubleKick + if t := timeout; t != 0 && !force { + go func() { + time.Sleep(t) + shutdown(true) + }() + } + mu.Unlock() + go shutdown(force) + last = now + } +} + +var preOnce, closeOnce, forceOnce, postOnce, notifyOnce sync.Once + +func shutdown(force bool) { + preOnce.Do(func() { + mu.Lock() + defer mu.Unlock() + for _, f := range prehooks { + f() + } + }) + + if force { + forceOnce.Do(func() { + closeListeners(force) + }) + } else { + closeOnce.Do(func() { + closeListeners(force) + }) + } + + postOnce.Do(func() { + mu.Lock() + defer mu.Unlock() + for _, f := range posthooks { + f() + } + }) + + notifyOnce.Do(func() { + close(wait) + }) +} + +func closeListeners(force bool) { + atomic.StoreInt32(&closing, 1) + + var wg sync.WaitGroup + defer wg.Wait() + + mu.Lock() + defer mu.Unlock() + wg.Add(len(listeners)) + + for _, l := range listeners { + go func(l *listener.T) { + defer wg.Done() + l.Close() + if force { + l.DrainAll() + } else { + l.Drain() + } + }(l) + } +} |
