Просмотр исходного кода

Log irregular tunnels

- Certain obfuscation failure cases are reasonable indictors
  that the client is irregular or invalid

- Add obfuscation seed history to detect duplicate seeds
Rod Hynes 6 лет назад
Родитель
Сommit
a105d2aad7

+ 59 - 0
psiphon/common/obfuscator/history.go

@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2020, Psiphon Inc.
+ * All rights reserved.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+package obfuscator
+
+import (
+	"time"
+
+	lrucache "github.com/cognusion/go-cache-lru"
+)
+
+const (
+	OBFUSCATE_SEED_HISTORY_TTL         = 24 * time.Hour
+	OBFUSCATE_SEED_HISTORY_MAX_ENTRIES = 1000000
+)
+
+// SeedHistory maintains a history of recently observed obfuscation seed values.
+type SeedHistory struct {
+	history *lrucache.Cache
+}
+
+// NewSeedHistory creates a new SeedHistory.
+func NewSeedHistory() *SeedHistory {
+
+	// TTL and MAX_ENTRIES are tuned to provide an effective history size while
+	// bounding the amount of memory that will be used. While a probabilistic
+	// data structure such as a Bloom filter would provide a smaller memory
+	// footprint, we wish to avoid the associated risk of false positives.
+
+	return &SeedHistory{
+		history: lrucache.NewWithLRU(
+			OBFUSCATE_SEED_HISTORY_TTL,
+			1*time.Minute,
+			OBFUSCATE_SEED_HISTORY_MAX_ENTRIES),
+	}
+}
+
+// AddNew adds a new seed value to the history. If the seed value is already
+// in the history, AddNew returns false.
+func (s *SeedHistory) AddNew(seed []byte) bool {
+	err := s.history.Add(string(seed), true, 0)
+	return err == nil
+}

+ 45 - 2
psiphon/common/obfuscator/obfuscatedSshConn.go

@@ -117,12 +117,17 @@ const (
 // sequence. In OBFUSCATION_CONN_MODE_SERVER mode, the server obtains its PRNG
 // seed from the client's initial obfuscator message, resulting in the server
 // replaying its padding as well.
+//
+// seedHistory and irregularLogger are optional ObfuscatorConfig parameters
+// used only in OBFUSCATION_CONN_MODE_SERVER.
 func NewObfuscatedSSHConn(
 	mode ObfuscatedSSHConnMode,
 	conn net.Conn,
 	obfuscationKeyword string,
 	obfuscationPaddingPRNGSeed *prng.Seed,
-	minPadding, maxPadding *int) (*ObfuscatedSSHConn, error) {
+	minPadding, maxPadding *int,
+	seedHistory *SeedHistory,
+	irregularLogger func(error)) (*ObfuscatedSSHConn, error) {
 
 	var err error
 	var obfuscator *Obfuscator
@@ -147,7 +152,9 @@ func NewObfuscatedSSHConn(
 		// NewServerObfuscator reads a seed message from conn
 		obfuscator, err = NewServerObfuscator(
 			conn, &ObfuscatorConfig{
-				Keyword: obfuscationKeyword,
+				Keyword:         obfuscationKeyword,
+				SeedHistory:     seedHistory,
+				IrregularLogger: irregularLogger,
 			})
 		if err != nil {
 
@@ -186,6 +193,42 @@ func NewObfuscatedSSHConn(
 	}, nil
 }
 
+// NewClientObfuscatedSSHConn creates a client ObfuscatedSSHConn. See
+// documentation in NewObfuscatedSSHConn.
+func NewClientObfuscatedSSHConn(
+	conn net.Conn,
+	obfuscationKeyword string,
+	obfuscationPaddingPRNGSeed *prng.Seed,
+	minPadding, maxPadding *int) (*ObfuscatedSSHConn, error) {
+
+	return NewObfuscatedSSHConn(
+		OBFUSCATION_CONN_MODE_CLIENT,
+		conn,
+		obfuscationKeyword,
+		obfuscationPaddingPRNGSeed,
+		minPadding, maxPadding,
+		nil,
+		nil)
+}
+
+// NewServerObfuscatedSSHConn creates a server ObfuscatedSSHConn. See
+// documentation in NewObfuscatedSSHConn.
+func NewServerObfuscatedSSHConn(
+	conn net.Conn,
+	obfuscationKeyword string,
+	seedHistory *SeedHistory,
+	irregularLogger func(error)) (*ObfuscatedSSHConn, error) {
+
+	return NewObfuscatedSSHConn(
+		OBFUSCATION_CONN_MODE_SERVER,
+		conn,
+		obfuscationKeyword,
+		nil,
+		nil, nil,
+		seedHistory,
+		irregularLogger)
+}
+
 // GetDerivedPRNG creates a new PRNG with a seed derived from the
 // ObfuscatedSSHConn padding seed and distinguished by the salt, which should
 // be a unique identifier for each usage context.

+ 37 - 2
psiphon/common/obfuscator/obfuscator.go

@@ -59,11 +59,18 @@ type Obfuscator struct {
 	paddingPRNG          *prng.PRNG
 }
 
+// ObfuscatorConfig specifies an Obfuscator configuration.
 type ObfuscatorConfig struct {
 	Keyword         string
 	PaddingPRNGSeed *prng.Seed
 	MinPadding      *int
 	MaxPadding      *int
+
+	// SeedHistory and IrregularLogger are optional parameters used only by
+	// server obfuscators.
+
+	SeedHistory     *SeedHistory
+	IrregularLogger func(error)
 }
 
 // NewClientObfuscator creates a new Obfuscator, staging a seed message to be
@@ -268,6 +275,26 @@ func readSeedMessage(
 		return nil, nil, nil, errors.Trace(err)
 	}
 
+	// Irregular events that indicate an invalid client are logged via
+	// IrregularLogger. Note that event detection isn't infallible. For example,
+	// a man-in-the-middle may have manipulated the seed message sent by a valid
+	// client; or with a very small probability a valid client may generate a
+	// duplicate seed message.
+	//
+	//  Network I/O failures (e.g., failure to read the expected number of seed
+	//  message bytes) are not considered a reliable indicator of irregular
+	//  events.
+
+	if config.SeedHistory != nil {
+		if !config.SeedHistory.AddNew(seed) {
+			err := errors.TraceNew("duplicate obfuscation seed")
+			if config.IrregularLogger != nil {
+				config.IrregularLogger(err)
+			}
+			return nil, nil, nil, err
+		}
+	}
+
 	clientToServerCipher, serverToClientCipher, err := initObfuscatorCiphers(seed, config)
 	if err != nil {
 		return nil, nil, nil, errors.Trace(err)
@@ -299,11 +326,19 @@ func readSeedMessage(
 	}
 
 	if magicValue != OBFUSCATE_MAGIC_VALUE {
-		return nil, nil, nil, errors.TraceNew("invalid magic value")
+		err := errors.TraceNew("invalid magic value")
+		if config.IrregularLogger != nil {
+			config.IrregularLogger(err)
+		}
+		return nil, nil, nil, err
 	}
 
 	if paddingLength < 0 || paddingLength > OBFUSCATE_MAX_PADDING {
-		return nil, nil, nil, errors.TraceNew("invalid padding length")
+		err := errors.TraceNew("invalid padding length")
+		if config.IrregularLogger != nil {
+			config.IrregularLogger(err)
+		}
+		return nil, nil, nil, err
 	}
 
 	padding := make([]byte, paddingLength)

+ 19 - 4
psiphon/common/obfuscator/obfuscator_test.go

@@ -47,6 +47,8 @@ func TestObfuscator(t *testing.T) {
 		Keyword:         keyword,
 		MaxPadding:      &maxPadding,
 		PaddingPRNGSeed: paddingPRNGSeed,
+		SeedHistory:     NewSeedHistory(),
+		IrregularLogger: func(err error) { t.Logf("IrregularLogger: %s", err) },
 	}
 
 	client, err := NewClientObfuscator(config)
@@ -80,6 +82,13 @@ func TestObfuscator(t *testing.T) {
 	if !bytes.Equal(serverMessage, b) {
 		t.Fatalf("unexpected client message")
 	}
+
+	// Test: duplicate obfuscation seed
+
+	server, err = NewServerObfuscator(bytes.NewReader(seedMessage), config)
+	if err == nil {
+		t.Fatalf("NewServerObfuscator unexpectedly succeeded")
+	}
 }
 
 func TestObfuscatedSSHConn(t *testing.T) {
@@ -119,8 +128,11 @@ func TestObfuscatedSSHConn(t *testing.T) {
 		conn, err := listener.Accept()
 
 		if err == nil {
-			conn, err = NewObfuscatedSSHConn(
-				OBFUSCATION_CONN_MODE_SERVER, conn, keyword, nil, nil, nil)
+			conn, err = NewServerObfuscatedSSHConn(
+				conn,
+				keyword,
+				NewSeedHistory(),
+				func(err error) { t.Fatalf("IrregularLogger: %s", err) })
 		}
 
 		if err == nil {
@@ -150,8 +162,11 @@ func TestObfuscatedSSHConn(t *testing.T) {
 		}
 
 		if err == nil {
-			conn, err = NewObfuscatedSSHConn(
-				OBFUSCATION_CONN_MODE_CLIENT, conn, keyword, paddingPRNGSeed, nil, nil)
+			conn, err = NewClientObfuscatedSSHConn(
+				conn,
+				keyword,
+				paddingPRNGSeed,
+				nil, nil)
 		}
 
 		var KEXPRNGSeed *prng.Seed

+ 89 - 78
psiphon/server/meek.go

@@ -97,20 +97,21 @@ const (
 // HTTP payload traffic for a given session into net.Conn conforming Read()s and Write()s via
 // the meekConn struct.
 type MeekServer struct {
-	support           *SupportServices
-	listener          net.Listener
-	tlsConfig         *tris.Config
-	clientHandler     func(clientTunnelProtocol string, clientConn net.Conn)
-	openConns         *common.Conns
-	stopBroadcast     <-chan struct{}
-	sessionsLock      sync.RWMutex
-	sessions          map[string]*meekSession
-	checksumTable     *crc64.Table
-	bufferPool        *CachedResponseBufferPool
-	rateLimitLock     sync.Mutex
-	rateLimitHistory  map[string][]time.Time
-	rateLimitCount    int
-	rateLimitSignalGC chan struct{}
+	support               *SupportServices
+	listener              net.Listener
+	tlsConfig             *tris.Config
+	obfuscatorSeedHistory *obfuscator.SeedHistory
+	clientHandler         func(clientTunnelProtocol string, clientConn net.Conn)
+	openConns             *common.Conns
+	stopBroadcast         <-chan struct{}
+	sessionsLock          sync.RWMutex
+	sessions              map[string]*meekSession
+	checksumTable         *crc64.Table
+	bufferPool            *CachedResponseBufferPool
+	rateLimitLock         sync.Mutex
+	rateLimitHistory      map[string][]time.Time
+	rateLimitCount        int
+	rateLimitSignalGC     chan struct{}
 }
 
 // NewMeekServer initializes a new meek server.
@@ -136,16 +137,17 @@ func NewMeekServer(
 	bufferPool := NewCachedResponseBufferPool(bufferLength, bufferCount)
 
 	meekServer := &MeekServer{
-		support:           support,
-		listener:          listener,
-		clientHandler:     clientHandler,
-		openConns:         common.NewConns(),
-		stopBroadcast:     stopBroadcast,
-		sessions:          make(map[string]*meekSession),
-		checksumTable:     checksumTable,
-		bufferPool:        bufferPool,
-		rateLimitHistory:  make(map[string][]time.Time),
-		rateLimitSignalGC: make(chan struct{}, 1),
+		support:               support,
+		listener:              listener,
+		obfuscatorSeedHistory: obfuscator.NewSeedHistory(),
+		clientHandler:         clientHandler,
+		openConns:             common.NewConns(),
+		stopBroadcast:         stopBroadcast,
+		sessions:              make(map[string]*meekSession),
+		checksumTable:         checksumTable,
+		bufferPool:            bufferPool,
+		rateLimitHistory:      make(map[string][]time.Time),
+		rateLimitSignalGC:     make(chan struct{}, 1),
 	}
 
 	if useTLS {
@@ -563,7 +565,7 @@ func (server *MeekServer) getSessionOrEndpoint(
 	// The session is new (or expired). Treat the cookie value as a new meek
 	// cookie, extract the payload, and create a new session.
 
-	payloadJSON, err := getMeekCookiePayload(server.support, meekCookie.Value)
+	payloadJSON, err := server.getMeekCookiePayload(clientIP, meekCookie.Value)
 	if err != nil {
 		return "", nil, "", "", errors.Trace(err)
 	}
@@ -850,6 +852,68 @@ func (server *MeekServer) httpConnStateCallback(conn net.Conn, connState http.Co
 	}
 }
 
+// getMeekCookiePayload extracts the payload from a meek cookie. The cookie
+// payload is base64 encoded, obfuscated, and NaCl encrypted.
+func (server *MeekServer) getMeekCookiePayload(
+	clientIP string, cookieValue string) ([]byte, error) {
+
+	decodedValue, err := base64.StdEncoding.DecodeString(cookieValue)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+
+	// The data consists of an obfuscated seed message prepended
+	// to the obfuscated, encrypted payload. The server obfuscator
+	// will read the seed message, leaving the remaining encrypted
+	// data in the reader.
+
+	reader := bytes.NewReader(decodedValue[:])
+
+	obfuscator, err := obfuscator.NewServerObfuscator(
+		reader,
+		&obfuscator.ObfuscatorConfig{
+			Keyword:     server.support.Config.MeekObfuscatedKey,
+			SeedHistory: server.obfuscatorSeedHistory,
+			IrregularLogger: func(err error) {
+				logIrregularTunnel(
+					server.support.GeoIPService.Lookup(clientIP), err)
+			},
+		})
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+
+	offset, err := reader.Seek(0, 1)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	encryptedPayload := decodedValue[offset:]
+
+	obfuscator.ObfuscateClientToServer(encryptedPayload)
+
+	var nonce [24]byte
+	var privateKey, ephemeralPublicKey [32]byte
+
+	decodedPrivateKey, err := base64.StdEncoding.DecodeString(
+		server.support.Config.MeekCookieEncryptionPrivateKey)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	copy(privateKey[:], decodedPrivateKey)
+
+	if len(encryptedPayload) < 32 {
+		return nil, errors.TraceNew("unexpected encrypted payload size")
+	}
+	copy(ephemeralPublicKey[0:32], encryptedPayload[0:32])
+
+	payload, ok := box.Open(nil, encryptedPayload[32:], &nonce, &ephemeralPublicKey, &privateKey)
+	if !ok {
+		return nil, errors.TraceNew("open box failed")
+	}
+
+	return payload, nil
+}
+
 type meekSession struct {
 	// Note: 64-bit ints used with atomic operations are placed
 	// at the start of struct to ensure 64-bit alignment.
@@ -1024,59 +1088,6 @@ func makeMeekTLSConfig(
 	return config, nil
 }
 
-// getMeekCookiePayload extracts the payload from a meek cookie. The cookie
-// payload is base64 encoded, obfuscated, and NaCl encrypted.
-func getMeekCookiePayload(support *SupportServices, cookieValue string) ([]byte, error) {
-	decodedValue, err := base64.StdEncoding.DecodeString(cookieValue)
-	if err != nil {
-		return nil, errors.Trace(err)
-	}
-
-	// The data consists of an obfuscated seed message prepended
-	// to the obfuscated, encrypted payload. The server obfuscator
-	// will read the seed message, leaving the remaining encrypted
-	// data in the reader.
-
-	reader := bytes.NewReader(decodedValue[:])
-
-	obfuscator, err := obfuscator.NewServerObfuscator(
-		reader,
-		&obfuscator.ObfuscatorConfig{Keyword: support.Config.MeekObfuscatedKey})
-	if err != nil {
-		return nil, errors.Trace(err)
-	}
-
-	offset, err := reader.Seek(0, 1)
-	if err != nil {
-		return nil, errors.Trace(err)
-	}
-	encryptedPayload := decodedValue[offset:]
-
-	obfuscator.ObfuscateClientToServer(encryptedPayload)
-
-	var nonce [24]byte
-	var privateKey, ephemeralPublicKey [32]byte
-
-	decodedPrivateKey, err := base64.StdEncoding.DecodeString(
-		support.Config.MeekCookieEncryptionPrivateKey)
-	if err != nil {
-		return nil, errors.Trace(err)
-	}
-	copy(privateKey[:], decodedPrivateKey)
-
-	if len(encryptedPayload) < 32 {
-		return nil, errors.TraceNew("unexpected encrypted payload size")
-	}
-	copy(ephemeralPublicKey[0:32], encryptedPayload[0:32])
-
-	payload, ok := box.Open(nil, encryptedPayload[32:], &nonce, &ephemeralPublicKey, &privateKey)
-	if !ok {
-		return nil, errors.TraceNew("open box failed")
-	}
-
-	return payload, nil
-}
-
 // makeMeekSessionID creates a new session ID. The variable size is intended to
 // frustrate traffic analysis of both plaintext and TLS meek traffic.
 func makeMeekSessionID() (string, error) {

+ 6 - 0
psiphon/server/services.go

@@ -358,6 +358,12 @@ func logServerLoad(server *TunnelServer) {
 	}
 }
 
+func logIrregularTunnel(geoIPData GeoIPData, tunnelError error) {
+	irregularTunnel := getRequestLogFields("irregular_tunnel", geoIPData, nil, nil, nil)
+	irregularTunnel["tunnel_error"] = tunnelError.Error()
+	log.LogRawFieldsWithTimestamp(irregularTunnel)
+}
+
 // SupportServices carries common and shared data components
 // across different server components. SupportServices implements a
 // hot reload of traffic rules, psinet database, and geo IP database

+ 8 - 4
psiphon/server/tunnelServer.go

@@ -352,6 +352,7 @@ type sshServer struct {
 	oslSessionCache              *cache.Cache
 	authorizationSessionIDsMutex sync.Mutex
 	authorizationSessionIDs      map[string]string
+	obfuscatorSeedHistory        *obfuscator.SeedHistory
 }
 
 func newSSHServer(
@@ -397,6 +398,7 @@ func newSSHServer(
 		clients:                 make(map[string]*sshClient),
 		oslSessionCache:         oslSessionCache,
 		authorizationSessionIDs: make(map[string]string),
+		obfuscatorSeedHistory:   obfuscator.NewSeedHistory(),
 	}, nil
 }
 
@@ -1225,13 +1227,15 @@ func (sshClient *sshClient) run(
 		// Wrap the connection in an SSH deobfuscator when required.
 
 		if err == nil && protocol.TunnelProtocolUsesObfuscatedSSH(sshClient.tunnelProtocol) {
-			// Note: NewObfuscatedSSHConn blocks on network I/O
+
+			// Note: NewServerObfuscatedSSHConn blocks on network I/O
 			// TODO: ensure this won't block shutdown
-			result.obfuscatedSSHConn, err = obfuscator.NewObfuscatedSSHConn(
-				obfuscator.OBFUSCATION_CONN_MODE_SERVER,
+			result.obfuscatedSSHConn, err = obfuscator.NewServerObfuscatedSSHConn(
 				conn,
 				sshClient.sshServer.support.Config.ObfuscatedSSHKey,
-				nil, nil, nil)
+				sshClient.sshServer.obfuscatorSeedHistory,
+				func(err error) { logIrregularTunnel(sshClient.geoIPData, err) })
+
 			if err != nil {
 				err = errors.Trace(err)
 			} else {

+ 1 - 2
psiphon/tunnel.go

@@ -697,8 +697,7 @@ func dialTunnel(
 	// Add obfuscated SSH layer
 	var sshConn net.Conn = throttledConn
 	if protocol.TunnelProtocolUsesObfuscatedSSH(dialParams.TunnelProtocol) {
-		obfuscatedSSHConn, err := obfuscator.NewObfuscatedSSHConn(
-			obfuscator.OBFUSCATION_CONN_MODE_CLIENT,
+		obfuscatedSSHConn, err := obfuscator.NewClientObfuscatedSSHConn(
 			throttledConn,
 			dialParams.ServerEntry.SshObfuscatedKey,
 			dialParams.ObfuscatorPaddingSeed,

+ 10 - 0
vendor/github.com/cognusion/go-cache-lru/CONTRIBUTORS

@@ -0,0 +1,10 @@
+This is a list of people who have contributed code to go-cache. They, or their
+employers, are the copyright holders of the contributed code. Contributed code
+is subject to the license restrictions listed in LICENSE (as they were when the
+code was contributed.)
+
+Dustin Sallings <dustin@spy.net>
+Jason Mooberry <jasonmoo@me.com>
+Matthew Keller	<m@cognusion.com>
+Sergey Shepelev <temotor@gmail.com>
+Alex Edwards <ajmedwards@gmail.com>

+ 19 - 0
vendor/github.com/cognusion/go-cache-lru/LICENSE

@@ -0,0 +1,19 @@
+Copyright (c) 2012-2017 Patrick Mylund Nielsen and the go-cache contributors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.

+ 92 - 0
vendor/github.com/cognusion/go-cache-lru/README.md

@@ -0,0 +1,92 @@
+# go-cache
+
+go-cache is an in-memory key:value store/cache similar to memcached that is
+suitable for applications running on a single machine. Its major advantage is
+that, being essentially a thread-safe `map[string]interface{}` with expiration
+times, it doesn't need to serialize or transmit its contents over the network.
+
+Any object can be stored, for a given duration or forever, and the cache can be
+safely used by multiple goroutines.
+
+Although go-cache isn't meant to be used as a persistent datastore, the entire
+cache can be saved to and loaded from a file (using `c.Items()` to retrieve the
+items map to serialize, and `NewFrom()` to create a cache from a deserialized
+one) to recover from downtime quickly. (See the docs for `NewFrom()` for caveats.)
+
+When creating a cache object using `NewWithLRU()`, if you set the maxItems value
+above 0, the LRU functionality is enabled. The cache automatically updates a
+timestamp every time a given item is retrieved. In the background, the janitor takes
+care of deleting items that have expired because of staleness, or are
+least-recently-used when the cache is under pressure. Whatever you set your purge
+interval to controls when the item will actually be removed from the cache. If you
+don't want to use the janitor, and wish to manually purge LRU items, then
+`c.DeleteLRU(n)` where `n` is the number of items you'd like to purge.
+
+### Installation
+
+`go get github.com/patrickmn/go-cache`
+
+### Usage
+
+```go
+import (
+	"fmt"
+	"github.com/patrickmn/go-cache"
+	"time"
+)
+
+func main() {
+	// Create a cache with a default expiration time of 5 minutes, and which
+	// purges expired items every 10 minutes
+	c := cache.New(5*time.Minute, 10*time.Minute)
+
+	// Set the value of the key "foo" to "bar", with the default expiration time
+	c.Set("foo", "bar", cache.DefaultExpiration)
+
+	// Set the value of the key "baz" to 42, with no expiration time
+	// (the item won't be removed until it is re-set, or removed using
+	// c.Delete("baz")
+	c.Set("baz", 42, cache.NoExpiration)
+
+	// Get the string associated with the key "foo" from the cache
+	foo, found := c.Get("foo")
+	if found {
+		fmt.Println(foo)
+	}
+
+	// Since Go is statically typed, and cache values can be anything, type
+	// assertion is needed when values are being passed to functions that don't
+	// take arbitrary types, (i.e. interface{}). The simplest way to do this for
+	// values which will only be used once--e.g. for passing to another
+	// function--is:
+	foo, found := c.Get("foo")
+	if found {
+		MyFunction(foo.(string))
+	}
+
+	// This gets tedious if the value is used several times in the same function.
+	// You might do either of the following instead:
+	if x, found := c.Get("foo"); found {
+		foo := x.(string)
+		// ...
+	}
+	// or
+	var foo string
+	if x, found := c.Get("foo"); found {
+		foo = x.(string)
+	}
+	// ...
+	// foo can then be passed around freely as a string
+
+	// Want performance? Store pointers!
+	c.Set("foo", &MyStruct, cache.DefaultExpiration)
+	if x, found := c.Get("foo"); found {
+		foo := x.(*MyStruct)
+			// ...
+	}
+}
+```
+
+### Reference
+
+`godoc` or [http://godoc.org/github.com/patrickmn/go-cache](http://godoc.org/github.com/patrickmn/go-cache)

+ 1466 - 0
vendor/github.com/cognusion/go-cache-lru/cache.go

@@ -0,0 +1,1466 @@
+package cache
+
+import (
+	"encoding/gob"
+	"fmt"
+	"io"
+	"os"
+	"runtime"
+	"sync"
+	"time"
+)
+
+type Item struct {
+	Object     interface{}
+	Expiration int64
+	Accessed   int64
+}
+
+// Returns true if the item has expired.
+func (item Item) Expired() bool {
+	if item.Expiration == 0 {
+		return false
+	}
+	return time.Now().UnixNano() > item.Expiration
+}
+
+// Return the time at which this item was last accessed.
+func (item Item) LastAccessed() time.Time {
+	return time.Unix(0, item.Accessed)
+}
+
+const (
+	// For use with functions that take an expiration time.
+	NoExpiration time.Duration = -1
+	// For use with functions that take an expiration time. Equivalent to
+	// passing in the same expiration duration as was given to New() or
+	// NewFrom() when the cache was created (e.g. 5 minutes.)
+	DefaultExpiration time.Duration = 0
+)
+
+type Cache struct {
+	*cache
+	// If this is confusing, see the comment at the bottom of New()
+}
+
+type cache struct {
+	defaultExpiration time.Duration
+	items             map[string]Item
+	mu                sync.RWMutex
+	onEvicted         func(string, interface{})
+	janitor           *janitor
+	maxItems          int
+}
+
+// Add an item to the cache, replacing any existing item. If the duration is 0
+// (DefaultExpiration), the cache's default expiration time is used. If it is -1
+// (NoExpiration), the item never expires.
+func (c *cache) Set(k string, x interface{}, d time.Duration) {
+	// "Inlining" of set
+	var (
+		now time.Time
+		e   int64
+	)
+	if d == DefaultExpiration {
+		d = c.defaultExpiration
+	}
+	if d > 0 {
+		now = time.Now()
+		e = now.Add(d).UnixNano()
+	}
+	if c.maxItems > 0 {
+		if d <= 0 {
+			// d <= 0 means we didn't set now above
+			now = time.Now()
+		}
+		c.mu.Lock()
+		c.items[k] = Item{
+			Object:     x,
+			Expiration: e,
+			Accessed:   now.UnixNano(),
+		}
+		// TODO: Calls to mu.Unlock are currently not deferred because
+		// defer adds ~200 ns (as of go1.)
+		c.mu.Unlock()
+	} else {
+		c.mu.Lock()
+		c.items[k] = Item{
+			Object:     x,
+			Expiration: e,
+		}
+		c.mu.Unlock()
+	}
+}
+
+func (c *cache) set(k string, x interface{}, d time.Duration) {
+	var (
+		now time.Time
+		e   int64
+	)
+	if d == DefaultExpiration {
+		d = c.defaultExpiration
+	}
+	if d > 0 {
+		now = time.Now()
+		e = now.Add(d).UnixNano()
+	}
+	if c.maxItems > 0 {
+		if d <= 0 {
+			// d <= 0 means we didn't set now above
+			now = time.Now()
+		}
+		c.items[k] = Item{
+			Object:     x,
+			Expiration: e,
+			Accessed:   now.UnixNano(),
+		}
+	} else {
+		c.items[k] = Item{
+			Object:     x,
+			Expiration: e,
+		}
+	}
+}
+
+// Add an item to the cache, replacing any existing item, using the default
+// expiration.
+func (c *cache) SetDefault(k string, x interface{}) {
+	c.Set(k, x, DefaultExpiration)
+}
+
+// Add an item to the cache only if an item doesn't already exist for the given
+// key, or if the existing item has expired. Returns an error otherwise.
+func (c *cache) Add(k string, x interface{}, d time.Duration) error {
+	c.mu.Lock()
+	_, found := c.get(k)
+	if found {
+		c.mu.Unlock()
+		return fmt.Errorf("Item %s already exists", k)
+	}
+	c.set(k, x, d)
+	c.mu.Unlock()
+	return nil
+}
+
+// Set a new value for the cache key only if it already exists, and the existing
+// item hasn't expired. Returns an error otherwise.
+func (c *cache) Replace(k string, x interface{}, d time.Duration) error {
+	c.mu.Lock()
+	_, found := c.get(k)
+	if !found {
+		c.mu.Unlock()
+		return fmt.Errorf("Item %s doesn't exist", k)
+	}
+	c.set(k, x, d)
+	c.mu.Unlock()
+	return nil
+}
+
+// Get an item from the cache. Returns the item or nil, and a bool indicating
+// whether the key was found.
+func (c *cache) Get(k string) (interface{}, bool) {
+	if c.maxItems > 0 {
+		// LRU enabled; Get implies write
+		c.mu.Lock()
+	} else {
+		// LRU not enabled; Get is read-only
+		c.mu.RLock()
+	}
+	// "Inlining" of get and Expired
+	item, found := c.items[k]
+	if !found {
+		if c.maxItems > 0 {
+			c.mu.Unlock()
+		} else {
+			c.mu.RUnlock()
+		}
+		return nil, false
+	}
+	var now int64
+	if item.Expiration > 0 {
+		now = time.Now().UnixNano()
+		if now > item.Expiration {
+			if c.maxItems > 0 {
+				c.mu.Unlock()
+			} else {
+				c.mu.RUnlock()
+			}
+			return nil, false
+		}
+	}
+	if c.maxItems > 0 {
+		if now == 0 {
+			now = time.Now().UnixNano()
+		}
+		item.Accessed = now
+		c.items[k] = item
+		c.mu.Unlock()
+	} else {
+		c.mu.RUnlock()
+	}
+	return item.Object, true
+}
+
+// If LRU functionality is being used (and get implies updating item.Accessed,)
+// this function must be write-locked.
+func (c *cache) get(k string) (interface{}, bool) {
+	item, found := c.items[k]
+	if !found {
+		return nil, false
+	}
+	// "Inlining" of Expired
+	var now int64
+	if item.Expiration > 0 {
+		now = time.Now().UnixNano()
+		if now > item.Expiration {
+			return nil, false
+		}
+	}
+	if c.maxItems > 0 {
+		if now == 0 {
+			now = time.Now().UnixNano()
+		}
+		item.Accessed = now
+		c.items[k] = item
+	}
+	return item.Object, true
+}
+
+// GetWithExpiration returns an item and its expiration time from the cache.
+// It returns the item or nil, the expiration time if one is set (if the item
+// never expires a zero value for time.Time is returned), and a bool indicating
+// whether the key was found.
+func (c *cache) GetWithExpiration(k string) (interface{}, time.Time, bool) {
+	if c.maxItems > 0 {
+		// LRU enabled; GetWithExpiration implies write
+		c.mu.Lock()
+	} else {
+		// LRU not enabled; GetWithExpiration is read-only
+		c.mu.RLock()
+	}
+	// "Inlining" of get and Expired
+	item, found := c.items[k]
+	if !found {
+		if c.maxItems > 0 {
+			c.mu.Unlock()
+		} else {
+			c.mu.RUnlock()
+		}
+		return nil, time.Time{}, false
+	}
+	var now int64
+	if item.Expiration > 0 {
+		now = time.Now().UnixNano()
+		if now > item.Expiration {
+			if c.maxItems > 0 {
+				c.mu.Unlock()
+			} else {
+				c.mu.RUnlock()
+			}
+			return nil, time.Time{}, false
+		}
+		if c.maxItems > 0 {
+			if now == 0 {
+				now = time.Now().UnixNano()
+			}
+			item.Accessed = now
+			c.items[k] = item
+			c.mu.Unlock()
+		} else {
+			c.mu.RUnlock()
+		}
+		return item.Object, time.Unix(0, item.Expiration), true
+	}
+	if c.maxItems > 0 {
+		if now == 0 {
+			now = time.Now().UnixNano()
+		}
+		item.Accessed = now
+		c.items[k] = item
+		c.mu.Unlock()
+	} else {
+		c.mu.RUnlock()
+	}
+	// If expiration <= 0 (i.e. no expiration time set) then return the item
+	// and a zeroed time.Time
+	return item.Object, time.Time{}, true
+}
+
+// Increment an item of type int, int8, int16, int32, int64, uintptr, uint,
+// uint8, uint32, or uint64, float32 or float64 by n. Returns an error if the
+// item's value is not an integer, if it was not found, or if it is not
+// possible to increment it by n. To retrieve the incremented value, use one
+// of the specialized methods, e.g. IncrementInt64.
+func (c *cache) Increment(k string, n int64) error {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return fmt.Errorf("Item %s not found", k)
+	}
+	if c.maxItems > 0 {
+		v.Accessed = time.Now().UnixNano()
+	}
+	switch v.Object.(type) {
+	case int:
+		v.Object = v.Object.(int) + int(n)
+	case int8:
+		v.Object = v.Object.(int8) + int8(n)
+	case int16:
+		v.Object = v.Object.(int16) + int16(n)
+	case int32:
+		v.Object = v.Object.(int32) + int32(n)
+	case int64:
+		v.Object = v.Object.(int64) + n
+	case uint:
+		v.Object = v.Object.(uint) + uint(n)
+	case uintptr:
+		v.Object = v.Object.(uintptr) + uintptr(n)
+	case uint8:
+		v.Object = v.Object.(uint8) + uint8(n)
+	case uint16:
+		v.Object = v.Object.(uint16) + uint16(n)
+	case uint32:
+		v.Object = v.Object.(uint32) + uint32(n)
+	case uint64:
+		v.Object = v.Object.(uint64) + uint64(n)
+	case float32:
+		v.Object = v.Object.(float32) + float32(n)
+	case float64:
+		v.Object = v.Object.(float64) + float64(n)
+	default:
+		c.mu.Unlock()
+		return fmt.Errorf("The value for %s is not an integer", k)
+	}
+	c.items[k] = v
+	c.mu.Unlock()
+	return nil
+}
+
+// Increment an item of type float32 or float64 by n. Returns an error if the
+// item's value is not floating point, if it was not found, or if it is not
+// possible to increment it by n. Pass a negative number to decrement the
+// value. To retrieve the incremented value, use one of the specialized methods,
+// e.g. IncrementFloat64.
+func (c *cache) IncrementFloat(k string, n float64) error {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return fmt.Errorf("Item %s not found", k)
+	}
+	if c.maxItems > 0 {
+		v.Accessed = time.Now().UnixNano()
+	}
+	switch v.Object.(type) {
+	case float32:
+		v.Object = v.Object.(float32) + float32(n)
+	case float64:
+		v.Object = v.Object.(float64) + n
+	default:
+		c.mu.Unlock()
+		return fmt.Errorf("The value for %s does not have type float32 or float64", k)
+	}
+	c.items[k] = v
+	c.mu.Unlock()
+	return nil
+}
+
+// Increment an item of type int by n. Returns an error if the item's value is
+// not an int, or if it was not found. If there is no error, the incremented
+// value is returned.
+func (c *cache) IncrementInt(k string, n int) (int, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	if c.maxItems > 0 {
+		v.Accessed = time.Now().UnixNano()
+	}
+	rv, ok := v.Object.(int)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an int", k)
+	}
+	nv := rv + n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Increment an item of type int8 by n. Returns an error if the item's value is
+// not an int8, or if it was not found. If there is no error, the incremented
+// value is returned.
+func (c *cache) IncrementInt8(k string, n int8) (int8, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	if c.maxItems > 0 {
+		v.Accessed = time.Now().UnixNano()
+	}
+	rv, ok := v.Object.(int8)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an int8", k)
+	}
+	nv := rv + n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Increment an item of type int16 by n. Returns an error if the item's value is
+// not an int16, or if it was not found. If there is no error, the incremented
+// value is returned.
+func (c *cache) IncrementInt16(k string, n int16) (int16, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	if c.maxItems > 0 {
+		v.Accessed = time.Now().UnixNano()
+	}
+	rv, ok := v.Object.(int16)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an int16", k)
+	}
+	nv := rv + n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Increment an item of type int32 by n. Returns an error if the item's value is
+// not an int32, or if it was not found. If there is no error, the incremented
+// value is returned.
+func (c *cache) IncrementInt32(k string, n int32) (int32, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	if c.maxItems > 0 {
+		v.Accessed = time.Now().UnixNano()
+	}
+	rv, ok := v.Object.(int32)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an int32", k)
+	}
+	nv := rv + n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Increment an item of type int64 by n. Returns an error if the item's value is
+// not an int64, or if it was not found. If there is no error, the incremented
+// value is returned.
+func (c *cache) IncrementInt64(k string, n int64) (int64, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	if c.maxItems > 0 {
+		v.Accessed = time.Now().UnixNano()
+	}
+	rv, ok := v.Object.(int64)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an int64", k)
+	}
+	nv := rv + n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Increment an item of type uint by n. Returns an error if the item's value is
+// not an uint, or if it was not found. If there is no error, the incremented
+// value is returned.
+func (c *cache) IncrementUint(k string, n uint) (uint, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	if c.maxItems > 0 {
+		v.Accessed = time.Now().UnixNano()
+	}
+	rv, ok := v.Object.(uint)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an uint", k)
+	}
+	nv := rv + n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Increment an item of type uintptr by n. Returns an error if the item's value
+// is not an uintptr, or if it was not found. If there is no error, the
+// incremented value is returned.
+func (c *cache) IncrementUintptr(k string, n uintptr) (uintptr, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	if c.maxItems > 0 {
+		v.Accessed = time.Now().UnixNano()
+	}
+	rv, ok := v.Object.(uintptr)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an uintptr", k)
+	}
+	nv := rv + n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Increment an item of type uint8 by n. Returns an error if the item's value
+// is not an uint8, or if it was not found. If there is no error, the
+// incremented value is returned.
+func (c *cache) IncrementUint8(k string, n uint8) (uint8, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	if c.maxItems > 0 {
+		v.Accessed = time.Now().UnixNano()
+	}
+	rv, ok := v.Object.(uint8)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an uint8", k)
+	}
+	nv := rv + n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Increment an item of type uint16 by n. Returns an error if the item's value
+// is not an uint16, or if it was not found. If there is no error, the
+// incremented value is returned.
+func (c *cache) IncrementUint16(k string, n uint16) (uint16, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	if c.maxItems > 0 {
+		v.Accessed = time.Now().UnixNano()
+	}
+	rv, ok := v.Object.(uint16)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an uint16", k)
+	}
+	nv := rv + n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Increment an item of type uint32 by n. Returns an error if the item's value
+// is not an uint32, or if it was not found. If there is no error, the
+// incremented value is returned.
+func (c *cache) IncrementUint32(k string, n uint32) (uint32, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	if c.maxItems > 0 {
+		v.Accessed = time.Now().UnixNano()
+	}
+	rv, ok := v.Object.(uint32)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an uint32", k)
+	}
+	nv := rv + n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Increment an item of type uint64 by n. Returns an error if the item's value
+// is not an uint64, or if it was not found. If there is no error, the
+// incremented value is returned.
+func (c *cache) IncrementUint64(k string, n uint64) (uint64, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	if c.maxItems > 0 {
+		v.Accessed = time.Now().UnixNano()
+	}
+	rv, ok := v.Object.(uint64)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an uint64", k)
+	}
+	nv := rv + n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Increment an item of type float32 by n. Returns an error if the item's value
+// is not an float32, or if it was not found. If there is no error, the
+// incremented value is returned.
+func (c *cache) IncrementFloat32(k string, n float32) (float32, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	if c.maxItems > 0 {
+		v.Accessed = time.Now().UnixNano()
+	}
+	rv, ok := v.Object.(float32)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an float32", k)
+	}
+	nv := rv + n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Increment an item of type float64 by n. Returns an error if the item's value
+// is not an float64, or if it was not found. If there is no error, the
+// incremented value is returned.
+func (c *cache) IncrementFloat64(k string, n float64) (float64, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	if c.maxItems > 0 {
+		v.Accessed = time.Now().UnixNano()
+	}
+	rv, ok := v.Object.(float64)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an float64", k)
+	}
+	nv := rv + n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Decrement an item of type int, int8, int16, int32, int64, uintptr, uint,
+// uint8, uint32, or uint64, float32 or float64 by n. Returns an error if the
+// item's value is not an integer, if it was not found, or if it is not
+// possible to decrement it by n. To retrieve the decremented value, use one
+// of the specialized methods, e.g. DecrementInt64.
+func (c *cache) Decrement(k string, n int64) error {
+	// TODO: Implement Increment and Decrement more cleanly.
+	// (Cannot do Increment(k, n*-1) for uints.)
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return fmt.Errorf("Item not found")
+	}
+	if c.maxItems > 0 {
+		v.Accessed = time.Now().UnixNano()
+	}
+	switch v.Object.(type) {
+	case int:
+		v.Object = v.Object.(int) - int(n)
+	case int8:
+		v.Object = v.Object.(int8) - int8(n)
+	case int16:
+		v.Object = v.Object.(int16) - int16(n)
+	case int32:
+		v.Object = v.Object.(int32) - int32(n)
+	case int64:
+		v.Object = v.Object.(int64) - n
+	case uint:
+		v.Object = v.Object.(uint) - uint(n)
+	case uintptr:
+		v.Object = v.Object.(uintptr) - uintptr(n)
+	case uint8:
+		v.Object = v.Object.(uint8) - uint8(n)
+	case uint16:
+		v.Object = v.Object.(uint16) - uint16(n)
+	case uint32:
+		v.Object = v.Object.(uint32) - uint32(n)
+	case uint64:
+		v.Object = v.Object.(uint64) - uint64(n)
+	case float32:
+		v.Object = v.Object.(float32) - float32(n)
+	case float64:
+		v.Object = v.Object.(float64) - float64(n)
+	default:
+		c.mu.Unlock()
+		return fmt.Errorf("The value for %s is not an integer", k)
+	}
+	c.items[k] = v
+	c.mu.Unlock()
+	return nil
+}
+
+// Decrement an item of type float32 or float64 by n. Returns an error if the
+// item's value is not floating point, if it was not found, or if it is not
+// possible to decrement it by n. Pass a negative number to decrement the
+// value. To retrieve the decremented value, use one of the specialized methods,
+// e.g. DecrementFloat64.
+func (c *cache) DecrementFloat(k string, n float64) error {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return fmt.Errorf("Item %s not found", k)
+	}
+	if c.maxItems > 0 {
+		v.Accessed = time.Now().UnixNano()
+	}
+	switch v.Object.(type) {
+	case float32:
+		v.Object = v.Object.(float32) - float32(n)
+	case float64:
+		v.Object = v.Object.(float64) - n
+	default:
+		c.mu.Unlock()
+		return fmt.Errorf("The value for %s does not have type float32 or float64", k)
+	}
+	c.items[k] = v
+	c.mu.Unlock()
+	return nil
+}
+
+// Decrement an item of type int by n. Returns an error if the item's value is
+// not an int, or if it was not found. If there is no error, the decremented
+// value is returned.
+func (c *cache) DecrementInt(k string, n int) (int, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	if c.maxItems > 0 {
+		v.Accessed = time.Now().UnixNano()
+	}
+	rv, ok := v.Object.(int)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an int", k)
+	}
+	nv := rv - n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Decrement an item of type int8 by n. Returns an error if the item's value is
+// not an int8, or if it was not found. If there is no error, the decremented
+// value is returned.
+func (c *cache) DecrementInt8(k string, n int8) (int8, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	if c.maxItems > 0 {
+		v.Accessed = time.Now().UnixNano()
+	}
+	rv, ok := v.Object.(int8)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an int8", k)
+	}
+	nv := rv - n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Decrement an item of type int16 by n. Returns an error if the item's value is
+// not an int16, or if it was not found. If there is no error, the decremented
+// value is returned.
+func (c *cache) DecrementInt16(k string, n int16) (int16, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	if c.maxItems > 0 {
+		v.Accessed = time.Now().UnixNano()
+	}
+	rv, ok := v.Object.(int16)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an int16", k)
+	}
+	nv := rv - n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Decrement an item of type int32 by n. Returns an error if the item's value is
+// not an int32, or if it was not found. If there is no error, the decremented
+// value is returned.
+func (c *cache) DecrementInt32(k string, n int32) (int32, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	if c.maxItems > 0 {
+		v.Accessed = time.Now().UnixNano()
+	}
+	rv, ok := v.Object.(int32)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an int32", k)
+	}
+	nv := rv - n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Decrement an item of type int64 by n. Returns an error if the item's value is
+// not an int64, or if it was not found. If there is no error, the decremented
+// value is returned.
+func (c *cache) DecrementInt64(k string, n int64) (int64, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	if c.maxItems > 0 {
+		v.Accessed = time.Now().UnixNano()
+	}
+	rv, ok := v.Object.(int64)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an int64", k)
+	}
+	nv := rv - n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Decrement an item of type uint by n. Returns an error if the item's value is
+// not an uint, or if it was not found. If there is no error, the decremented
+// value is returned.
+func (c *cache) DecrementUint(k string, n uint) (uint, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	if c.maxItems > 0 {
+		v.Accessed = time.Now().UnixNano()
+	}
+	rv, ok := v.Object.(uint)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an uint", k)
+	}
+	nv := rv - n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Decrement an item of type uintptr by n. Returns an error if the item's value
+// is not an uintptr, or if it was not found. If there is no error, the
+// decremented value is returned.
+func (c *cache) DecrementUintptr(k string, n uintptr) (uintptr, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	if c.maxItems > 0 {
+		v.Accessed = time.Now().UnixNano()
+	}
+	rv, ok := v.Object.(uintptr)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an uintptr", k)
+	}
+	nv := rv - n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Decrement an item of type uint8 by n. Returns an error if the item's value is
+// not an uint8, or if it was not found. If there is no error, the decremented
+// value is returned.
+func (c *cache) DecrementUint8(k string, n uint8) (uint8, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	if c.maxItems > 0 {
+		v.Accessed = time.Now().UnixNano()
+	}
+	rv, ok := v.Object.(uint8)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an uint8", k)
+	}
+	nv := rv - n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Decrement an item of type uint16 by n. Returns an error if the item's value
+// is not an uint16, or if it was not found. If there is no error, the
+// decremented value is returned.
+func (c *cache) DecrementUint16(k string, n uint16) (uint16, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	if c.maxItems > 0 {
+		v.Accessed = time.Now().UnixNano()
+	}
+	rv, ok := v.Object.(uint16)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an uint16", k)
+	}
+	nv := rv - n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Decrement an item of type uint32 by n. Returns an error if the item's value
+// is not an uint32, or if it was not found. If there is no error, the
+// decremented value is returned.
+func (c *cache) DecrementUint32(k string, n uint32) (uint32, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	if c.maxItems > 0 {
+		v.Accessed = time.Now().UnixNano()
+	}
+	rv, ok := v.Object.(uint32)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an uint32", k)
+	}
+	nv := rv - n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Decrement an item of type uint64 by n. Returns an error if the item's value
+// is not an uint64, or if it was not found. If there is no error, the
+// decremented value is returned.
+func (c *cache) DecrementUint64(k string, n uint64) (uint64, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	if c.maxItems > 0 {
+		v.Accessed = time.Now().UnixNano()
+	}
+	rv, ok := v.Object.(uint64)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an uint64", k)
+	}
+	nv := rv - n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Decrement an item of type float32 by n. Returns an error if the item's value
+// is not an float32, or if it was not found. If there is no error, the
+// decremented value is returned.
+func (c *cache) DecrementFloat32(k string, n float32) (float32, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	if c.maxItems > 0 {
+		v.Accessed = time.Now().UnixNano()
+	}
+	rv, ok := v.Object.(float32)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an float32", k)
+	}
+	nv := rv - n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Decrement an item of type float64 by n. Returns an error if the item's value
+// is not an float64, or if it was not found. If there is no error, the
+// decremented value is returned.
+func (c *cache) DecrementFloat64(k string, n float64) (float64, error) {
+	c.mu.Lock()
+	v, found := c.items[k]
+	if !found || v.Expired() {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("Item %s not found", k)
+	}
+	if c.maxItems > 0 {
+		v.Accessed = time.Now().UnixNano()
+	}
+	rv, ok := v.Object.(float64)
+	if !ok {
+		c.mu.Unlock()
+		return 0, fmt.Errorf("The value for %s is not an float64", k)
+	}
+	nv := rv - n
+	v.Object = nv
+	c.items[k] = v
+	c.mu.Unlock()
+	return nv, nil
+}
+
+// Delete an item from the cache. Does nothing if the key is not in the cache.
+func (c *cache) Delete(k string) {
+	c.mu.Lock()
+	v, evicted := c.delete(k)
+	evictFunc := c.onEvicted
+	c.mu.Unlock()
+	if evicted {
+		evictFunc(k, v)
+	}
+}
+
+func (c *cache) delete(k string) (interface{}, bool) {
+	if c.onEvicted != nil {
+		if v, found := c.items[k]; found {
+			delete(c.items, k)
+			return v.Object, true
+		}
+	}
+	delete(c.items, k)
+	return nil, false
+}
+
+type keyAndValue struct {
+	key   string
+	value interface{}
+}
+
+// Delete all expired items from the cache.
+func (c *cache) DeleteExpired() {
+	var evictedItems []keyAndValue
+	now := time.Now().UnixNano()
+	c.mu.Lock()
+	evictFunc := c.onEvicted
+	for k, v := range c.items {
+		// "Inlining" of Expired
+		if v.Expiration > 0 && now > v.Expiration {
+			ov, evicted := c.delete(k)
+			if evicted {
+				evictedItems = append(evictedItems, keyAndValue{k, ov})
+			}
+		}
+	}
+	c.mu.Unlock()
+	for _, v := range evictedItems {
+		evictFunc(v.key, v.value)
+	}
+}
+
+// Sets an (optional) function that is called with the key and value when an
+// item is evicted from the cache. (Including when it is deleted manually, but
+// not when it is overwritten.) Set to nil to disable.
+func (c *cache) OnEvicted(f func(string, interface{})) {
+	c.mu.Lock()
+	c.onEvicted = f
+	c.mu.Unlock()
+}
+
+// Delete some of the oldest items in the cache if the soft size limit has been
+// exceeded.
+func (c *cache) DeleteLRU() {
+	c.mu.Lock()
+	var (
+		overCount = c.itemCount() - c.maxItems
+		evictFunc = c.onEvicted
+	)
+	evicted := c.deleteLRUAmount(overCount)
+	c.mu.Unlock()
+	for _, v := range evicted {
+		evictFunc(v.key, v.value)
+	}
+}
+
+// Delete a number of the oldest items from the cache.
+func (c *cache) DeleteLRUAmount(numItems int) {
+	c.mu.Lock()
+	evictFunc := c.onEvicted
+	evicted := c.deleteLRUAmount(numItems)
+	c.mu.Unlock()
+	for _, v := range evicted {
+		evictFunc(v.key, v.value)
+	}
+}
+
+func (c *cache) deleteLRUAmount(numItems int) []keyAndValue {
+	if numItems <= 0 {
+		return nil
+	}
+	var (
+		lastTime     int64 = 0
+		lastItems          = make([]string, numItems) // Ring buffer
+		liCount            = 0
+		full               = false
+		evictedItems []keyAndValue
+		now          = time.Now().UnixNano()
+	)
+	if c.onEvicted != nil {
+		evictedItems = make([]keyAndValue, 0, numItems)
+	}
+	for k, v := range c.items {
+		// "Inlining" of !Expired
+		if v.Expiration == 0 || now <= v.Expiration {
+			if full == false || v.Accessed < lastTime {
+				// We found a least-recently-used item, or our
+				// purge buffer isn't full yet
+				lastTime = v.Accessed
+				// Append it to the buffer, or start overwriting
+				// it
+				if liCount < numItems {
+					lastItems[liCount] = k
+					liCount++
+				} else {
+					lastItems[0] = k
+					liCount = 1
+					full = true
+				}
+			}
+		}
+	}
+	if lastTime > 0 {
+		for _, v := range lastItems {
+			if v != "" {
+				ov, evicted := c.delete(v)
+				if evicted {
+					evictedItems = append(evictedItems, keyAndValue{v, ov})
+				}
+			}
+		}
+	}
+	return evictedItems
+}
+
+// Write the cache's items (using Gob) to an io.Writer.
+//
+// NOTE: This method is deprecated in favor of c.Items() and NewFrom() (see the
+// documentation for NewFrom().)
+func (c *cache) Save(w io.Writer) (err error) {
+	enc := gob.NewEncoder(w)
+	defer func() {
+		if x := recover(); x != nil {
+			err = fmt.Errorf("Error registering item types with Gob library")
+		}
+	}()
+	c.mu.RLock()
+	defer c.mu.RUnlock()
+	for _, v := range c.items {
+		gob.Register(v.Object)
+	}
+	err = enc.Encode(&c.items)
+	return
+}
+
+// Save the cache's items to the given filename, creating the file if it
+// doesn't exist, and overwriting it if it does.
+//
+// NOTE: This method is deprecated in favor of c.Items() and NewFrom() (see the
+// documentation for NewFrom().)
+func (c *cache) SaveFile(fname string) error {
+	fp, err := os.Create(fname)
+	if err != nil {
+		return err
+	}
+	err = c.Save(fp)
+	if err != nil {
+		fp.Close()
+		return err
+	}
+	return fp.Close()
+}
+
+// Add (Gob-serialized) cache items from an io.Reader, excluding any items with
+// keys that already exist (and haven't expired) in the current cache.
+//
+// NOTE: This method is deprecated in favor of c.Items() and NewFrom() (see the
+// documentation for NewFrom().)
+func (c *cache) Load(r io.Reader) error {
+	dec := gob.NewDecoder(r)
+	items := map[string]Item{}
+	err := dec.Decode(&items)
+	if err == nil {
+		c.mu.Lock()
+		defer c.mu.Unlock()
+		for k, v := range items {
+			ov, found := c.items[k]
+			if !found || ov.Expired() {
+				c.items[k] = v
+			}
+		}
+	}
+	return err
+}
+
+// Load and add cache items from the given filename, excluding any items with
+// keys that already exist in the current cache.
+//
+// NOTE: This method is deprecated in favor of c.Items() and NewFrom() (see the
+// documentation for NewFrom().)
+func (c *cache) LoadFile(fname string) error {
+	fp, err := os.Open(fname)
+	if err != nil {
+		return err
+	}
+	err = c.Load(fp)
+	if err != nil {
+		fp.Close()
+		return err
+	}
+	return fp.Close()
+}
+
+// Copies all unexpired items in the cache into a new map and returns it.
+func (c *cache) Items() map[string]Item {
+	c.mu.RLock()
+	defer c.mu.RUnlock()
+	m := make(map[string]Item, len(c.items))
+	now := time.Now().UnixNano()
+	for k, v := range c.items {
+		// "Inlining" of Expired
+		if v.Expiration > 0 {
+			if now > v.Expiration {
+				continue
+			}
+		}
+		m[k] = v
+	}
+	return m
+}
+
+// Returns the number of items in the cache. This may include items that have
+// expired, but have not yet been cleaned up.
+func (c *cache) ItemCount() int {
+	c.mu.RLock()
+	n := len(c.items)
+	c.mu.RUnlock()
+	return n
+}
+
+// Returns the number of items in the cache without locking. This may include
+// items that have expired, but have not yet been cleaned up. Equivalent to
+// len(c.Items()).
+func (c *cache) itemCount() int {
+	n := len(c.items)
+	return n
+}
+
+// Delete all items from the cache.
+func (c *cache) Flush() {
+	c.mu.Lock()
+	c.items = map[string]Item{}
+	c.mu.Unlock()
+}
+
+type janitor struct {
+	Interval time.Duration
+	stop     chan bool
+}
+
+func (j *janitor) Run(c *cache) {
+	j.stop = make(chan bool)
+	ticker := time.NewTicker(j.Interval)
+	for {
+		select {
+		case <-ticker.C:
+			c.DeleteExpired()
+			if c.maxItems > 0 {
+				c.DeleteLRU()
+			}
+		case <-j.stop:
+			ticker.Stop()
+			return
+		}
+	}
+}
+
+func stopJanitor(c *Cache) {
+	c.janitor.stop <- true
+}
+
+func runJanitor(c *cache, ci time.Duration) {
+	j := &janitor{
+		Interval: ci,
+	}
+	c.janitor = j
+	go j.Run(c)
+}
+
+func newCache(de time.Duration, m map[string]Item, mi int) *cache {
+	if de == 0 {
+		de = -1
+	}
+	c := &cache{
+		defaultExpiration: de,
+		maxItems:          mi,
+		items:             m,
+	}
+	return c
+}
+
+func newCacheWithJanitor(de time.Duration, ci time.Duration, m map[string]Item, mi int) *Cache {
+	c := newCache(de, m, mi)
+	// This trick ensures that the janitor goroutine (which--granted it
+	// was enabled--is running DeleteExpired on c forever) does not keep
+	// the returned C object from being garbage collected. When it is
+	// garbage collected, the finalizer stops the janitor goroutine, after
+	// which c can be collected.
+	C := &Cache{c}
+	if ci > 0 {
+		runJanitor(c, ci)
+		runtime.SetFinalizer(C, stopJanitor)
+	}
+	return C
+}
+
+// Return a new cache with a given default expiration duration and cleanup
+// interval. If the expiration duration is less than one (or NoExpiration),
+// the items in the cache never expire (by default), and must be deleted
+// manually. If the cleanup interval is less than one, expired items are not
+// deleted from the cache before calling c.DeleteExpired().
+func New(defaultExpiration, cleanupInterval time.Duration) *Cache {
+	items := make(map[string]Item)
+	return newCacheWithJanitor(defaultExpiration, cleanupInterval, items, 0)
+}
+
+// Return a new cache with a given default expiration duration, cleanup
+// interval, and maximum-ish number of items. If the expiration duration
+// is less than one (or NoExpiration), the items in the cache never expire
+// (by default), and must be deleted manually. If the cleanup interval is
+// less than one, expired items are not deleted from the cache before
+// calling c.DeleteExpired(), c.DeleteLRU(), or c.DeleteLRUAmount(). If maxItems
+// is not greater than zero, then there will be no soft cap on the number of
+// items in the cache.
+//
+// Using the LRU functionality makes Get() a slower, write-locked operation.
+func NewWithLRU(defaultExpiration, cleanupInterval time.Duration, maxItems int) *Cache {
+	items := make(map[string]Item)
+	return newCacheWithJanitor(defaultExpiration, cleanupInterval, items, maxItems)
+}
+
+// Return a new cache with a given default expiration duration and cleanup
+// interval. If the expiration duration is less than one (or NoExpiration),
+// the items in the cache never expire (by default), and must be deleted
+// manually. If the cleanup interval is less than one, expired items are not
+// deleted from the cache before calling c.DeleteExpired().
+//
+// NewFrom() also accepts an items map which will serve as the underlying map
+// for the cache. This is useful for starting from a deserialized cache
+// (serialized using e.g. gob.Encode() on c.Items()), or passing in e.g.
+// make(map[string]Item, 500) to improve startup performance when the cache
+// is expected to reach a certain minimum size.
+//
+// Only the cache's methods synchronize access to this map, so it is not
+// recommended to keep any references to the map around after creating a cache.
+// If need be, the map can be accessed at a later point using c.Items() (subject
+// to the same caveat.)
+//
+// Note regarding serialization: When using e.g. gob, make sure to
+// gob.Register() the individual types stored in the cache before encoding a
+// map retrieved with c.Items(), and to register those same types before
+// decoding a blob containing an items map.
+func NewFrom(defaultExpiration, cleanupInterval time.Duration, items map[string]Item) *Cache {
+	return newCacheWithJanitor(defaultExpiration, cleanupInterval, items, 0)
+}
+
+// Similar to NewFrom, but creates a cache with LRU functionality enabled.
+func NewFromWithLRU(defaultExpiration, cleanupInterval time.Duration, items map[string]Item, maxItems int) *Cache {
+	return newCacheWithJanitor(defaultExpiration, cleanupInterval, items, maxItems)
+}

+ 192 - 0
vendor/github.com/cognusion/go-cache-lru/sharded.go

@@ -0,0 +1,192 @@
+package cache
+
+import (
+	"crypto/rand"
+	"math"
+	"math/big"
+	insecurerand "math/rand"
+	"os"
+	"runtime"
+	"time"
+)
+
+// This is an experimental and unexported (for now) attempt at making a cache
+// with better algorithmic complexity than the standard one, namely by
+// preventing write locks of the entire cache when an item is added. As of the
+// time of writing, the overhead of selecting buckets results in cache
+// operations being about twice as slow as for the standard cache with small
+// total cache sizes, and faster for larger ones.
+//
+// See cache_test.go for a few benchmarks.
+
+type unexportedShardedCache struct {
+	*shardedCache
+}
+
+type shardedCache struct {
+	seed    uint32
+	m       uint32
+	cs      []*cache
+	janitor *shardedJanitor
+}
+
+// djb2 with better shuffling. 5x faster than FNV with the hash.Hash overhead.
+func djb33(seed uint32, k string) uint32 {
+	var (
+		l = uint32(len(k))
+		d = 5381 + seed + l
+		i = uint32(0)
+	)
+	// Why is all this 5x faster than a for loop?
+	if l >= 4 {
+		for i < l-4 {
+			d = (d * 33) ^ uint32(k[i])
+			d = (d * 33) ^ uint32(k[i+1])
+			d = (d * 33) ^ uint32(k[i+2])
+			d = (d * 33) ^ uint32(k[i+3])
+			i += 4
+		}
+	}
+	switch l - i {
+	case 1:
+	case 2:
+		d = (d * 33) ^ uint32(k[i])
+	case 3:
+		d = (d * 33) ^ uint32(k[i])
+		d = (d * 33) ^ uint32(k[i+1])
+	case 4:
+		d = (d * 33) ^ uint32(k[i])
+		d = (d * 33) ^ uint32(k[i+1])
+		d = (d * 33) ^ uint32(k[i+2])
+	}
+	return d ^ (d >> 16)
+}
+
+func (sc *shardedCache) bucket(k string) *cache {
+	return sc.cs[djb33(sc.seed, k)%sc.m]
+}
+
+func (sc *shardedCache) Set(k string, x interface{}, d time.Duration) {
+	sc.bucket(k).Set(k, x, d)
+}
+
+func (sc *shardedCache) Add(k string, x interface{}, d time.Duration) error {
+	return sc.bucket(k).Add(k, x, d)
+}
+
+func (sc *shardedCache) Replace(k string, x interface{}, d time.Duration) error {
+	return sc.bucket(k).Replace(k, x, d)
+}
+
+func (sc *shardedCache) Get(k string) (interface{}, bool) {
+	return sc.bucket(k).Get(k)
+}
+
+func (sc *shardedCache) Increment(k string, n int64) error {
+	return sc.bucket(k).Increment(k, n)
+}
+
+func (sc *shardedCache) IncrementFloat(k string, n float64) error {
+	return sc.bucket(k).IncrementFloat(k, n)
+}
+
+func (sc *shardedCache) Decrement(k string, n int64) error {
+	return sc.bucket(k).Decrement(k, n)
+}
+
+func (sc *shardedCache) Delete(k string) {
+	sc.bucket(k).Delete(k)
+}
+
+func (sc *shardedCache) DeleteExpired() {
+	for _, v := range sc.cs {
+		v.DeleteExpired()
+	}
+}
+
+// Returns the items in the cache. This may include items that have expired,
+// but have not yet been cleaned up. If this is significant, the Expiration
+// fields of the items should be checked. Note that explicit synchronization
+// is needed to use a cache and its corresponding Items() return values at
+// the same time, as the maps are shared.
+func (sc *shardedCache) Items() []map[string]Item {
+	res := make([]map[string]Item, len(sc.cs))
+	for i, v := range sc.cs {
+		res[i] = v.Items()
+	}
+	return res
+}
+
+func (sc *shardedCache) Flush() {
+	for _, v := range sc.cs {
+		v.Flush()
+	}
+}
+
+type shardedJanitor struct {
+	Interval time.Duration
+	stop     chan bool
+}
+
+func (j *shardedJanitor) Run(sc *shardedCache) {
+	j.stop = make(chan bool)
+	tick := time.Tick(j.Interval)
+	for {
+		select {
+		case <-tick:
+			sc.DeleteExpired()
+		case <-j.stop:
+			return
+		}
+	}
+}
+
+func stopShardedJanitor(sc *unexportedShardedCache) {
+	sc.janitor.stop <- true
+}
+
+func runShardedJanitor(sc *shardedCache, ci time.Duration) {
+	j := &shardedJanitor{
+		Interval: ci,
+	}
+	sc.janitor = j
+	go j.Run(sc)
+}
+
+func newShardedCache(n int, de time.Duration) *shardedCache {
+	max := big.NewInt(0).SetUint64(uint64(math.MaxUint32))
+	rnd, err := rand.Int(rand.Reader, max)
+	var seed uint32
+	if err != nil {
+		os.Stderr.Write([]byte("WARNING: go-cache's newShardedCache failed to read from the system CSPRNG (/dev/urandom or equivalent.) Your system's security may be compromised. Continuing with an insecure seed.\n"))
+		seed = insecurerand.Uint32()
+	} else {
+		seed = uint32(rnd.Uint64())
+	}
+	sc := &shardedCache{
+		seed: seed,
+		m:    uint32(n),
+		cs:   make([]*cache, n),
+	}
+	for i := 0; i < n; i++ {
+		c := &cache{
+			defaultExpiration: de,
+			items:             map[string]Item{},
+		}
+		sc.cs[i] = c
+	}
+	return sc
+}
+
+func unexportedNewSharded(defaultExpiration, cleanupInterval time.Duration, shards int) *unexportedShardedCache {
+	if defaultExpiration == 0 {
+		defaultExpiration = -1
+	}
+	sc := newShardedCache(shards, defaultExpiration)
+	SC := &unexportedShardedCache{sc}
+	if cleanupInterval > 0 {
+		runShardedJanitor(sc, cleanupInterval)
+		runtime.SetFinalizer(SC, stopShardedJanitor)
+	}
+	return SC
+}

+ 8 - 0
vendor/vendor.json

@@ -110,6 +110,14 @@
 			"revision": "9127e812e1e9e501ce899a18121d316ecb52e4ba",
 			"revisionTime": "2017-03-28T20:00:08Z"
 		},
+		{
+			"checksumSHA1": "pgbqIkCTjigfQbzZhq0NCqGvVGs=",
+			"path": "github.com/cognusion/go-cache-lru",
+			"revision": "f73e2280ecea6386641f79a5148517ee83d0d184",
+			"revisionTime": "2017-04-19T14:26:35Z",
+			"version": "feature-lru",
+			"versionExact": "feature-lru"
+		},
 		{
 			"checksumSHA1": "bFj0ceSRvaFFCfmS4el1PjWhcgw=",
 			"path": "github.com/creack/goselect",