This is an automated email from the ASF dual-hosted git repository.

rob pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/trafficcontrol.git


The following commit(s) were added to refs/heads/master by this push:
     new 1e9b1ed  Canonical bbolt (#4837)
1e9b1ed is described below

commit 1e9b1ed3457b6ba57201254e196d25897da32594
Author: ocket8888 <[email protected]>
AuthorDate: Wed Sep 2 08:34:20 2020 -0600

    Canonical bbolt (#4837)
    
    * Switch bbolt to canonical import path (v1.3.5)
    
    Checked out new dependency at 232d8fc87f50244f9c808f4745759e08a304c029 - 
which was the head of the v1.3.5 tag
    
    (cherry picked from commit 9e630cbfdb28e4ec284bf62950213ce8fb353e02)
    
    * Added grove binary to .gitignore
---
 grove/diskcache/diskcache.go                       |   2 +-
 grove/vendor/github.com/coreos/bbolt/Makefile      |  30 ---
 grove/vendor/github.com/coreos/bbolt/appveyor.yml  |  18 --
 grove/vendor/github.com/coreos/bbolt/bolt_arm.go   |  28 ---
 grove/vendor/go.etcd.io/bbolt/.gitignore           |   5 +
 grove/vendor/go.etcd.io/bbolt/.travis.yml          |  17 ++
 .../coreos => go.etcd.io}/bbolt/LICENSE            |   0
 grove/vendor/go.etcd.io/bbolt/Makefile             |  38 ++++
 .../coreos => go.etcd.io}/bbolt/README.md          | 187 ++++++++++--------
 .../coreos => go.etcd.io}/bbolt/allocate_test.go   |   7 +-
 .../coreos => go.etcd.io}/bbolt/bolt_386.go        |   5 +-
 .../coreos => go.etcd.io}/bbolt/bolt_amd64.go      |   5 +-
 .../bolt_386.go => go.etcd.io/bbolt/bolt_arm.go}   |   5 +-
 .../coreos => go.etcd.io}/bbolt/bolt_arm64.go      |   5 +-
 .../coreos => go.etcd.io}/bbolt/bolt_linux.go      |   2 +-
 .../coreos => go.etcd.io}/bbolt/bolt_mips64x.go    |   5 +-
 .../coreos => go.etcd.io}/bbolt/bolt_mipsx.go      |   5 +-
 .../coreos => go.etcd.io}/bbolt/bolt_openbsd.go    |   2 +-
 .../coreos => go.etcd.io}/bbolt/bolt_ppc.go        |   5 +-
 .../coreos => go.etcd.io}/bbolt/bolt_ppc64.go      |   5 +-
 .../coreos => go.etcd.io}/bbolt/bolt_ppc64le.go    |   5 +-
 .../bbolt/bolt_riscv64.go}                         |   7 +-
 .../coreos => go.etcd.io}/bbolt/bolt_s390x.go      |   5 +-
 .../coreos => go.etcd.io}/bbolt/bolt_unix.go       |  11 +-
 .../bbolt/bolt_unix_aix.go}                        |   7 +-
 .../bbolt/bolt_unix_solaris.go                     |   5 +-
 .../coreos => go.etcd.io}/bbolt/bolt_windows.go    |  34 ++--
 .../coreos => go.etcd.io}/bbolt/boltsync_unix.go   |   2 +-
 .../coreos => go.etcd.io}/bbolt/bucket.go          |  36 ++--
 .../coreos => go.etcd.io}/bbolt/bucket_test.go     |   4 +-
 .../bolt => go.etcd.io/bbolt/cmd/bbolt}/main.go    |   4 +-
 .../bbolt/cmd/bbolt/main_test}                     |   3 +-
 .../coreos => go.etcd.io}/bbolt/cursor.go          |  12 +-
 .../coreos => go.etcd.io}/bbolt/cursor_test.go     |   4 +-
 .../{github.com/coreos => go.etcd.io}/bbolt/db.go  |  64 +++++--
 .../coreos => go.etcd.io}/bbolt/db_test.go         | 108 ++++++++---
 .../{github.com/coreos => go.etcd.io}/bbolt/doc.go |   4 +-
 .../coreos => go.etcd.io}/bbolt/errors.go          |   2 +-
 .../coreos => go.etcd.io}/bbolt/freelist.go        | 169 +++++++++++-----
 grove/vendor/go.etcd.io/bbolt/freelist_hmap.go     | 178 +++++++++++++++++
 .../coreos => go.etcd.io}/bbolt/freelist_test.go   | 212 +++++++++++++++++----
 grove/vendor/go.etcd.io/bbolt/go.mod               |   5 +
 grove/vendor/go.etcd.io/bbolt/go.sum               |   2 +
 grove/vendor/go.etcd.io/bbolt/manydbs_test.go      |  67 +++++++
 .../coreos => go.etcd.io}/bbolt/node.go            |  58 +++---
 .../coreos => go.etcd.io}/bbolt/node_test.go       |  10 +-
 .../coreos => go.etcd.io}/bbolt/page.go            |  43 +++--
 .../coreos => go.etcd.io}/bbolt/page_test.go       |   2 +-
 .../coreos => go.etcd.io}/bbolt/quick_test.go      |   7 +-
 .../bbolt/simulation_no_freelist_sync_test.go      |   4 +-
 .../coreos => go.etcd.io}/bbolt/simulation_test.go |  45 ++++-
 .../{github.com/coreos => go.etcd.io}/bbolt/tx.go  |  51 +++--
 .../coreos => go.etcd.io}/bbolt/tx_test.go         |  59 +++++-
 grove/vendor/go.etcd.io/bbolt/unsafe.go            |  39 ++++
 54 files changed, 1168 insertions(+), 476 deletions(-)

diff --git a/grove/diskcache/diskcache.go b/grove/diskcache/diskcache.go
index eee9909..8af71bb 100644
--- a/grove/diskcache/diskcache.go
+++ b/grove/diskcache/diskcache.go
@@ -26,7 +26,7 @@ import (
 
        "github.com/apache/trafficcontrol/lib/go-log"
 
-       bolt "github.com/coreos/bbolt"
+       bolt "go.etcd.io/bbolt"
 )
 
 type DiskCache struct {
diff --git a/grove/vendor/github.com/coreos/bbolt/Makefile 
b/grove/vendor/github.com/coreos/bbolt/Makefile
deleted file mode 100644
index 43b94f3..0000000
--- a/grove/vendor/github.com/coreos/bbolt/Makefile
+++ /dev/null
@@ -1,30 +0,0 @@
-BRANCH=`git rev-parse --abbrev-ref HEAD`
-COMMIT=`git rev-parse --short HEAD`
-GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)"
-
-default: build
-
-race:
-       @go test -v -race -test.run="TestSimulate_(100op|1000op)"
-
-fmt:
-       !(gofmt -l -s -d $(shell find . -name \*.go) | grep '[a-z]')
-
-# go get honnef.co/go/tools/simple
-gosimple:
-       gosimple ./...
-
-# go get honnef.co/go/tools/unused
-unused:
-       unused ./...
-
-# go get github.com/kisielk/errcheck
-errcheck:
-       @errcheck -ignorepkg=bytes -ignore=os:Remove github.com/coreos/bbolt
-
-test:
-       go test -timeout 20m -v -coverprofile cover.out -covermode atomic
-       # Note: gets "program not an importable package" in out of path builds
-       go test -v ./cmd/bolt
-
-.PHONY: race fmt errcheck test gosimple unused
diff --git a/grove/vendor/github.com/coreos/bbolt/appveyor.yml 
b/grove/vendor/github.com/coreos/bbolt/appveyor.yml
deleted file mode 100644
index 6e26e94..0000000
--- a/grove/vendor/github.com/coreos/bbolt/appveyor.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-version: "{build}"
-
-os: Windows Server 2012 R2
-
-clone_folder: c:\gopath\src\github.com\boltdb\bolt
-
-environment:
-  GOPATH: c:\gopath
-
-install:
-  - echo %PATH%
-  - echo %GOPATH%
-  - go version
-  - go env
-  - go get -v -t ./...
-
-build_script:
-  - go test -v ./...
diff --git a/grove/vendor/github.com/coreos/bbolt/bolt_arm.go 
b/grove/vendor/github.com/coreos/bbolt/bolt_arm.go
deleted file mode 100644
index 7e5cb4b..0000000
--- a/grove/vendor/github.com/coreos/bbolt/bolt_arm.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package bolt
-
-import "unsafe"
-
-// maxMapSize represents the largest mmap size supported by Bolt.
-const maxMapSize = 0x7FFFFFFF // 2GB
-
-// maxAllocSize is the size used when creating array pointers.
-const maxAllocSize = 0xFFFFFFF
-
-// Are unaligned load/stores broken on this arch?
-var brokenUnaligned bool
-
-func init() {
-       // Simple check to see whether this arch handles unaligned load/stores
-       // correctly.
-
-       // ARM9 and older devices require load/stores to be from/to aligned
-       // addresses. If not, the lower 2 bits are cleared and that address is
-       // read in a jumbled up order.
-
-       // See 
http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html
-
-       raw := [6]byte{0xfe, 0xef, 0x11, 0x22, 0x22, 0x11}
-       val := *(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&raw)) + 2))
-
-       brokenUnaligned = val != 0x11222211
-}
diff --git a/grove/vendor/go.etcd.io/bbolt/.gitignore 
b/grove/vendor/go.etcd.io/bbolt/.gitignore
new file mode 100644
index 0000000..3bcd8cb
--- /dev/null
+++ b/grove/vendor/go.etcd.io/bbolt/.gitignore
@@ -0,0 +1,5 @@
+*.prof
+*.test
+*.swp
+/bin/
+cover.out
diff --git a/grove/vendor/go.etcd.io/bbolt/.travis.yml 
b/grove/vendor/go.etcd.io/bbolt/.travis.yml
new file mode 100644
index 0000000..257dfdf
--- /dev/null
+++ b/grove/vendor/go.etcd.io/bbolt/.travis.yml
@@ -0,0 +1,17 @@
+language: go
+go_import_path: go.etcd.io/bbolt
+
+sudo: false
+
+go:
+- 1.12
+
+before_install:
+- go get -v honnef.co/go/tools/...
+- go get -v github.com/kisielk/errcheck
+
+script:
+- make fmt
+- make test
+- make race
+# - make errcheck
diff --git a/grove/vendor/github.com/coreos/bbolt/LICENSE 
b/grove/vendor/go.etcd.io/bbolt/LICENSE
similarity index 100%
rename from grove/vendor/github.com/coreos/bbolt/LICENSE
rename to grove/vendor/go.etcd.io/bbolt/LICENSE
diff --git a/grove/vendor/go.etcd.io/bbolt/Makefile 
b/grove/vendor/go.etcd.io/bbolt/Makefile
new file mode 100644
index 0000000..2968aaa
--- /dev/null
+++ b/grove/vendor/go.etcd.io/bbolt/Makefile
@@ -0,0 +1,38 @@
+BRANCH=`git rev-parse --abbrev-ref HEAD`
+COMMIT=`git rev-parse --short HEAD`
+GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)"
+
+default: build
+
+race:
+       @TEST_FREELIST_TYPE=hashmap go test -v -race 
-test.run="TestSimulate_(100op|1000op)"
+       @echo "array freelist test"
+       @TEST_FREELIST_TYPE=array go test -v -race 
-test.run="TestSimulate_(100op|1000op)"
+
+fmt:
+       !(gofmt -l -s -d $(shell find . -name \*.go) | grep '[a-z]')
+
+# go get honnef.co/go/tools/simple
+gosimple:
+       gosimple ./...
+
+# go get honnef.co/go/tools/unused
+unused:
+       unused ./...
+
+# go get github.com/kisielk/errcheck
+errcheck:
+       @errcheck -ignorepkg=bytes -ignore=os:Remove go.etcd.io/bbolt
+
+test:
+       TEST_FREELIST_TYPE=hashmap go test -timeout 20m -v -coverprofile 
cover.out -covermode atomic
+       # Note: gets "program not an importable package" in out of path builds
+       TEST_FREELIST_TYPE=hashmap go test -v ./cmd/bbolt
+
+       @echo "array freelist test"
+
+       @TEST_FREELIST_TYPE=array go test -timeout 20m -v -coverprofile 
cover.out -covermode atomic
+       # Note: gets "program not an importable package" in out of path builds
+       @TEST_FREELIST_TYPE=array go test -v ./cmd/bbolt
+
+.PHONY: race fmt errcheck test gosimple unused
diff --git a/grove/vendor/github.com/coreos/bbolt/README.md 
b/grove/vendor/go.etcd.io/bbolt/README.md
similarity index 89%
rename from grove/vendor/github.com/coreos/bbolt/README.md
rename to grove/vendor/go.etcd.io/bbolt/README.md
index 015f0ef..c9e64b1 100644
--- a/grove/vendor/github.com/coreos/bbolt/README.md
+++ b/grove/vendor/go.etcd.io/bbolt/README.md
@@ -1,9 +1,12 @@
 bbolt
-====
+=====
 
-[![Go Report 
Card](https://goreportcard.com/badge/github.com/coreos/bbolt?style=flat-square)](https://goreportcard.com/report/github.com/coreos/bbolt)
-[![Coverage](https://codecov.io/gh/coreos/bbolt/branch/master/graph/badge.svg)](https://codecov.io/gh/coreos/bbolt)
-[![Godoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/coreos/bbolt)
+[![Go Report 
Card](https://goreportcard.com/badge/github.com/etcd-io/bbolt?style=flat-square)](https://goreportcard.com/report/github.com/etcd-io/bbolt)
+[![Coverage](https://codecov.io/gh/etcd-io/bbolt/branch/master/graph/badge.svg)](https://codecov.io/gh/etcd-io/bbolt)
+[![Build Status 
Travis](https://img.shields.io/travis/etcd-io/bboltlabs.svg?style=flat-square&&branch=master)](https://travis-ci.com/etcd-io/bbolt)
+[![Godoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/etcd-io/bbolt)
+[![Releases](https://img.shields.io/github/release/etcd-io/bbolt/all.svg?style=flat-square)](https://github.com/etcd-io/bbolt/releases)
+[![LICENSE](https://img.shields.io/github/license/etcd-io/bbolt.svg?style=flat-square)](https://github.com/etcd-io/bbolt/blob/master/LICENSE)
 
 bbolt is a fork of [Ben Johnson's][gh_ben] [Bolt][bolt] key/value
 store. The purpose of this fork is to provide the Go community with an active
@@ -33,36 +36,42 @@ consistency and thread safety. Bolt is currently used in 
high-load production
 environments serving databases as large as 1TB. Many companies such as
 Shopify and Heroku use Bolt-backed services every day.
 
+## Project versioning
+
+bbolt uses [semantic versioning](http://semver.org).
+API should not change between patch and minor releases.
+New minor versions may add additional features to the API.
+
 ## Table of Contents
 
-- [Getting Started](#getting-started)
-  - [Installing](#installing)
-  - [Opening a database](#opening-a-database)
-  - [Transactions](#transactions)
-    - [Read-write transactions](#read-write-transactions)
-    - [Read-only transactions](#read-only-transactions)
-    - [Batch read-write transactions](#batch-read-write-transactions)
-    - [Managing transactions manually](#managing-transactions-manually)
-  - [Using buckets](#using-buckets)
-  - [Using key/value pairs](#using-keyvalue-pairs)
-  - [Autoincrementing integer for the 
bucket](#autoincrementing-integer-for-the-bucket)
-  - [Iterating over keys](#iterating-over-keys)
-    - [Prefix scans](#prefix-scans)
-    - [Range scans](#range-scans)
-    - [ForEach()](#foreach)
-  - [Nested buckets](#nested-buckets)
-  - [Database backups](#database-backups)
-  - [Statistics](#statistics)
-  - [Read-Only Mode](#read-only-mode)
-  - [Mobile Use (iOS/Android)](#mobile-use-iosandroid)
-- [Resources](#resources)
-- [Comparison with other databases](#comparison-with-other-databases)
-  - [Postgres, MySQL, & other relational 
databases](#postgres-mysql--other-relational-databases)
-  - [LevelDB, RocksDB](#leveldb-rocksdb)
-  - [LMDB](#lmdb)
-- [Caveats & Limitations](#caveats--limitations)
-- [Reading the Source](#reading-the-source)
-- [Other Projects Using Bolt](#other-projects-using-bolt)
+  - [Getting Started](#getting-started)
+    - [Installing](#installing)
+    - [Opening a database](#opening-a-database)
+    - [Transactions](#transactions)
+      - [Read-write transactions](#read-write-transactions)
+      - [Read-only transactions](#read-only-transactions)
+      - [Batch read-write transactions](#batch-read-write-transactions)
+      - [Managing transactions manually](#managing-transactions-manually)
+    - [Using buckets](#using-buckets)
+    - [Using key/value pairs](#using-keyvalue-pairs)
+    - [Autoincrementing integer for the 
bucket](#autoincrementing-integer-for-the-bucket)
+    - [Iterating over keys](#iterating-over-keys)
+      - [Prefix scans](#prefix-scans)
+      - [Range scans](#range-scans)
+      - [ForEach()](#foreach)
+    - [Nested buckets](#nested-buckets)
+    - [Database backups](#database-backups)
+    - [Statistics](#statistics)
+    - [Read-Only Mode](#read-only-mode)
+    - [Mobile Use (iOS/Android)](#mobile-use-iosandroid)
+  - [Resources](#resources)
+  - [Comparison with other databases](#comparison-with-other-databases)
+    - [Postgres, MySQL, & other relational 
databases](#postgres-mysql--other-relational-databases)
+    - [LevelDB, RocksDB](#leveldb-rocksdb)
+    - [LMDB](#lmdb)
+  - [Caveats & Limitations](#caveats--limitations)
+  - [Reading the Source](#reading-the-source)
+  - [Other Projects Using Bolt](#other-projects-using-bolt)
 
 ## Getting Started
 
@@ -71,13 +80,28 @@ Shopify and Heroku use Bolt-backed services every day.
 To start using Bolt, install Go and run `go get`:
 
 ```sh
-$ go get github.com/coreos/bbolt/...
+$ go get go.etcd.io/bbolt/...
 ```
 
 This will retrieve the library and install the `bolt` command line utility into
 your `$GOBIN` path.
 
 
+### Importing bbolt
+
+To use bbolt as an embedded key-value store, import as:
+
+```go
+import bolt "go.etcd.io/bbolt"
+
+db, err := bolt.Open(path, 0666, nil)
+if err != nil {
+  return err
+}
+defer db.Close()
+```
+
+
 ### Opening a database
 
 The top-level object in Bolt is a `DB`. It is represented as a single file on
@@ -91,7 +115,7 @@ package main
 import (
        "log"
 
-       bolt "github.com/coreos/bbolt"
+       bolt "go.etcd.io/bbolt"
 )
 
 func main() {
@@ -128,11 +152,12 @@ are not thread safe. To work with data in multiple 
goroutines you must start
 a transaction for each one or use locking to ensure only one goroutine accesses
 a transaction at a time. Creating transaction from the `DB` is thread safe.
 
-Read-only transactions and read-write transactions should not depend on one
-another and generally shouldn't be opened simultaneously in the same goroutine.
-This can cause a deadlock as the read-write transaction needs to periodically
-re-map the data file but it cannot do so while a read-only transaction is open.
-
+Transactions should not depend on one another and generally shouldn't be opened
+simultaneously in the same goroutine. This can cause a deadlock as the 
read-write
+transaction needs to periodically re-map the data file but it cannot do so 
while
+any read-only transaction is open. Even a nested read-only transaction can 
cause
+a deadlock, as the child transaction can block the parent transaction from 
releasing
+its resources.
 
 #### Read-write transactions
 
@@ -251,7 +276,7 @@ should be writable.
 ### Using buckets
 
 Buckets are collections of key/value pairs within the database. All keys in a
-bucket must be unique. You can create a bucket using the `DB.CreateBucket()`
+bucket must be unique. You can create a bucket using the `Tx.CreateBucket()`
 function:
 
 ```go
@@ -534,7 +559,7 @@ this from a read-only transaction, it will perform a hot 
backup and not block
 your other database reads and writes.
 
 By default, it will use a regular file handle which will utilize the operating
-system's page cache. See the 
[`Tx`](https://godoc.org/github.com/coreos/bbolt#Tx)
+system's page cache. See the [`Tx`](https://godoc.org/go.etcd.io/bbolt#Tx)
 documentation for information about optimizing for larger-than-RAM datasets.
 
 One common use case is to backup over HTTP so you can use tools like `cURL` to
@@ -875,54 +900,58 @@ them via pull request.
 
 Below is a list of public, open source projects that use Bolt:
 
-* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB 
files.
-* [Operation Go: A Routine Mission](http://gocode.io) - An online programming 
game for Golang using Bolt for user accounts and a leaderboard.
+* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with 
built-in support for Lua. Uses BoltDB as the default database backend.
 * [Bazil](https://bazil.org/) - A file system that lets your data reside where 
it is most convenient for it to reside.
-* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional 
storage engine and testing it against Basho-tuned leveldb.
-* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel 
analysis tool for web analytics.
-* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to 
store and process all Twitter mentions of GitHub projects.
-* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, 
BoltDB and Blackfriday.
+* [bolter](https://github.com/hasit/bolter) - Command-line app for viewing 
BoltDB file in your terminal.
+* [boltcli](https://github.com/spacewander/boltcli) - the redis-cli for boltdb 
with Lua script support.
+* [BoltHold](https://github.com/timshannon/bolthold) - An embeddable NoSQL 
store for Go types built on BoltDB
+* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt.
+* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - 
Boilerplate wrapper around bolt aiming to make simple calls one-liners.
+* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB 
files.
+* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to 
ElasticSearch that uses Bolt as the default storage backend.
+* [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet.
+* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining
+  simple tx and key scans.
+* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph 
database using Bolt as optional backend.
 * [ChainStore](https://github.com/pressly/chainstore) - Simple key-value 
interface to a variety of storage engines organized as a chain of operations.
-* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version 
of Graphite.
-* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, 
peer-to-peer Git repositories aka "Git meets Bitcoin".
+* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery 
and configuration made easy. Distributed, highly available, and 
datacenter-aware.
+* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional 
storage engine and testing it against Basho-tuned leveldb.
+* [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred 
cryptocurrency.
+* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google 
Drive command line client for \*NIX operating systems.
 * [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system 
service to collect and reliably deliver messages to Kafka.
+* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and 
lightweight platform for your files and data.
+* [Go Report Card](https://goreportcard.com/) - Go code quality report cards 
as a (free and open source) service.
+* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web 
application in Go using BoltDB.
+* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL 
shortener written in Golang and BoltDB for persistent key/value storage and for 
routing it's using high performent HTTPRouter.
+* [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to 
manage Go remote import paths with custom domains
+* [gokv](https://github.com/philippgille/gokv) - Simple key-value store 
abstraction and implementations for Go (Redis, Consul, etcd, bbolt, BadgerDB, 
LevelDB, Memcached, DynamoDB, S3, PostgreSQL, MongoDB, CockroachDB and many 
more)
+* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, 
peer-to-peer Git repositories aka "Git meets Bitcoin".
+* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, 
and real-time analytics.
+* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast 
ip-geo-location-server using bolt with bloom filters.
 * [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api 
for ipxed.
-* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt.
-* 
[photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session)
 - Sessions for a photo viewing site.
+* [Ironsmith](https://github.com/timshannon/ironsmith) - A simple, 
script-driven continuous integration (build - > test -> release) tool, with no 
external dependencies
+* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler 
optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 
8601 duration notation, and dependent jobs.
+* [Key Value Access Langusge (KVAL)](https://github.com/kval-access-language) 
- A proposed grammar for key-value datastores offering a bbolt binding.
 * [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, 
using Bolt as optional storage.
-* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast 
ip-geo-location-server using bolt with bloom filters.
-* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph 
database using Bolt as optional backend.
-* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to 
ElasticSearch that uses Bolt as the default storage backend.
-* [tentacool](https://github.com/optiflows/tentacool) - REST api server to 
manage system stuff (IP, DNS, Gateway...) on a linux server.
-* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly 
scalable distributed key~file system with O(1) disk read.
-* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, 
and real-time analytics.
-* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and 
lightweight platform for your files and data.
+* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed 
Least-Recently-Used (LRU) read-through cache with chainable remote stores.
+* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that 
allows easy operations on multi level (nested) buckets.
+* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version 
of Graphite.
+* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem 
creates a filesystem to organise your music files.
+* [NATS](https://github.com/nats-io/nats-streaming-server) - NATS Streaming 
uses bbolt for message and metadata storage.
+* [Operation Go: A Routine Mission](http://gocode.io) - An online programming 
game for Golang using Bolt for user accounts and a leaderboard.
+* 
[photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session)
 - Sessions for a photo viewing site.
 * [Prometheus Annotation 
Server](https://github.com/oliver006/prom_annotation_server) - Annotation 
server for PromDash & Prometheus service monitoring system.
-* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery 
and configuration made easy. Distributed, highly available, and 
datacenter-aware.
-* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler 
optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 
8601 duration notation, and dependent jobs.
-* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google 
Drive command line client for \*NIX operating systems.
+* [reef-pi](https://github.com/reef-pi/reef-pi) - reef-pi is an award winning, 
modular, DIY reef tank controller using easy to learn electronics based on a 
Raspberry Pi.
+* [Request Baskets](https://github.com/darklynx/request-baskets) - A web 
service to collect arbitrary HTTP requests and inspect them via REST API or 
simple web UI, similar to [RequestBin](http://requestb.in/) service
+* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly 
scalable distributed key~file system with O(1) disk read.
 * [stow](https://github.com/djherbis/stow) -  a persistence manager for objects
   backed by boltdb.
-* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining
-  simple tx and key scans.
-* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that 
allows easy operations on multi level (nested) buckets.
-* [Request Baskets](https://github.com/darklynx/request-baskets) - A web 
service to collect arbitrary HTTP requests and inspect them via REST API or 
simple web UI, similar to [RequestBin](http://requestb.in/) service
-* [Go Report Card](https://goreportcard.com/) - Go code quality report cards 
as a (free and open source) service.
-* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - 
Boilerplate wrapper around bolt aiming to make simple calls one-liners.
-* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed 
Least-Recently-Used (LRU) read-through cache with chainable remote stores.
 * [Storm](https://github.com/asdine/storm) - Simple and powerful ORM for 
BoltDB.
-* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web 
application in Go using BoltDB.
 * [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use 
BoltDB. Deals mainly with strings.
-* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with 
built-in support for Lua. Uses BoltDB as the default database backend.
-* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem 
creates a filesystem to organise your music files.
-* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL 
shortener written in Golang and BoltDB for persistent key/value storage and for 
routing it's using high performent HTTPRouter.
+* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel 
analysis tool for web analytics.
+* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to 
store and process all Twitter mentions of GitHub projects.
+* [tentacool](https://github.com/optiflows/tentacool) - REST api server to 
manage system stuff (IP, DNS, Gateway...) on a linux server.
 * [torrent](https://github.com/anacrolix/torrent) - Full-featured BitTorrent 
client package and utilities in Go. BoltDB is a storage backend in development.
-* [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to 
manage Go remote import paths with custom domains
-* [bolter](https://github.com/hasit/bolter) - Command-line app for viewing 
BoltDB file in your terminal.
-* [boltcli](https://github.com/spacewander/boltcli) - the redis-cli for boltdb 
with Lua script support.
-* [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet.
-* [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred 
cryptocurrency.
-* [Ironsmith](https://github.com/timshannon/ironsmith) - A simple, 
script-driven continuous integration (build - > test -> release) tool, with no 
external dependencies
-* [BoltHold](https://github.com/timshannon/bolthold) - An embeddable NoSQL 
store for Go types built on BoltDB
+* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, 
BoltDB and Blackfriday.
 
 If you are using Bolt in a project please send a pull request to add it to the 
list.
diff --git a/grove/vendor/github.com/coreos/bbolt/allocate_test.go 
b/grove/vendor/go.etcd.io/bbolt/allocate_test.go
similarity index 86%
rename from grove/vendor/github.com/coreos/bbolt/allocate_test.go
rename to grove/vendor/go.etcd.io/bbolt/allocate_test.go
index 8566b4d..98b06b4 100644
--- a/grove/vendor/github.com/coreos/bbolt/allocate_test.go
+++ b/grove/vendor/go.etcd.io/bbolt/allocate_test.go
@@ -1,12 +1,13 @@
-package bolt
+package bbolt
 
 import (
        "testing"
 )
 
 func TestTx_allocatePageStats(t *testing.T) {
-       f := newFreelist()
-       f.ids = []pgid{2, 3}
+       f := newTestFreelist()
+       ids := []pgid{2, 3}
+       f.readIDs(ids)
 
        tx := &Tx{
                db: &DB{
diff --git a/grove/vendor/github.com/coreos/bbolt/bolt_386.go 
b/grove/vendor/go.etcd.io/bbolt/bolt_386.go
similarity index 68%
copy from grove/vendor/github.com/coreos/bbolt/bolt_386.go
copy to grove/vendor/go.etcd.io/bbolt/bolt_386.go
index 820d533..aee2596 100644
--- a/grove/vendor/github.com/coreos/bbolt/bolt_386.go
+++ b/grove/vendor/go.etcd.io/bbolt/bolt_386.go
@@ -1,10 +1,7 @@
-package bolt
+package bbolt
 
 // maxMapSize represents the largest mmap size supported by Bolt.
 const maxMapSize = 0x7FFFFFFF // 2GB
 
 // maxAllocSize is the size used when creating array pointers.
 const maxAllocSize = 0xFFFFFFF
-
-// Are unaligned load/stores broken on this arch?
-var brokenUnaligned = false
diff --git a/grove/vendor/github.com/coreos/bbolt/bolt_amd64.go 
b/grove/vendor/go.etcd.io/bbolt/bolt_amd64.go
similarity index 69%
copy from grove/vendor/github.com/coreos/bbolt/bolt_amd64.go
copy to grove/vendor/go.etcd.io/bbolt/bolt_amd64.go
index 98fafdb..5dd8f3f 100644
--- a/grove/vendor/github.com/coreos/bbolt/bolt_amd64.go
+++ b/grove/vendor/go.etcd.io/bbolt/bolt_amd64.go
@@ -1,10 +1,7 @@
-package bolt
+package bbolt
 
 // maxMapSize represents the largest mmap size supported by Bolt.
 const maxMapSize = 0xFFFFFFFFFFFF // 256TB
 
 // maxAllocSize is the size used when creating array pointers.
 const maxAllocSize = 0x7FFFFFFF
-
-// Are unaligned load/stores broken on this arch?
-var brokenUnaligned = false
diff --git a/grove/vendor/github.com/coreos/bbolt/bolt_386.go 
b/grove/vendor/go.etcd.io/bbolt/bolt_arm.go
similarity index 68%
rename from grove/vendor/github.com/coreos/bbolt/bolt_386.go
rename to grove/vendor/go.etcd.io/bbolt/bolt_arm.go
index 820d533..aee2596 100644
--- a/grove/vendor/github.com/coreos/bbolt/bolt_386.go
+++ b/grove/vendor/go.etcd.io/bbolt/bolt_arm.go
@@ -1,10 +1,7 @@
-package bolt
+package bbolt
 
 // maxMapSize represents the largest mmap size supported by Bolt.
 const maxMapSize = 0x7FFFFFFF // 2GB
 
 // maxAllocSize is the size used when creating array pointers.
 const maxAllocSize = 0xFFFFFFF
-
-// Are unaligned load/stores broken on this arch?
-var brokenUnaligned = false
diff --git a/grove/vendor/github.com/coreos/bbolt/bolt_arm64.go 
b/grove/vendor/go.etcd.io/bbolt/bolt_arm64.go
similarity index 70%
rename from grove/vendor/github.com/coreos/bbolt/bolt_arm64.go
rename to grove/vendor/go.etcd.io/bbolt/bolt_arm64.go
index b26d84f..810dfd5 100644
--- a/grove/vendor/github.com/coreos/bbolt/bolt_arm64.go
+++ b/grove/vendor/go.etcd.io/bbolt/bolt_arm64.go
@@ -1,12 +1,9 @@
 // +build arm64
 
-package bolt
+package bbolt
 
 // maxMapSize represents the largest mmap size supported by Bolt.
 const maxMapSize = 0xFFFFFFFFFFFF // 256TB
 
 // maxAllocSize is the size used when creating array pointers.
 const maxAllocSize = 0x7FFFFFFF
-
-// Are unaligned load/stores broken on this arch?
-var brokenUnaligned = false
diff --git a/grove/vendor/github.com/coreos/bbolt/bolt_linux.go 
b/grove/vendor/go.etcd.io/bbolt/bolt_linux.go
similarity index 91%
rename from grove/vendor/github.com/coreos/bbolt/bolt_linux.go
rename to grove/vendor/go.etcd.io/bbolt/bolt_linux.go
index 2b67666..7707bca 100644
--- a/grove/vendor/github.com/coreos/bbolt/bolt_linux.go
+++ b/grove/vendor/go.etcd.io/bbolt/bolt_linux.go
@@ -1,4 +1,4 @@
-package bolt
+package bbolt
 
 import (
        "syscall"
diff --git a/grove/vendor/github.com/coreos/bbolt/bolt_mips64x.go 
b/grove/vendor/go.etcd.io/bbolt/bolt_mips64x.go
similarity index 71%
rename from grove/vendor/github.com/coreos/bbolt/bolt_mips64x.go
rename to grove/vendor/go.etcd.io/bbolt/bolt_mips64x.go
index 134b578..dd8ffe1 100644
--- a/grove/vendor/github.com/coreos/bbolt/bolt_mips64x.go
+++ b/grove/vendor/go.etcd.io/bbolt/bolt_mips64x.go
@@ -1,12 +1,9 @@
 // +build mips64 mips64le
 
-package bolt
+package bbolt
 
 // maxMapSize represents the largest mmap size supported by Bolt.
 const maxMapSize = 0x8000000000 // 512GB
 
 // maxAllocSize is the size used when creating array pointers.
 const maxAllocSize = 0x7FFFFFFF
-
-// Are unaligned load/stores broken on this arch?
-var brokenUnaligned = false
diff --git a/grove/vendor/github.com/coreos/bbolt/bolt_mipsx.go 
b/grove/vendor/go.etcd.io/bbolt/bolt_mipsx.go
similarity index 70%
rename from grove/vendor/github.com/coreos/bbolt/bolt_mipsx.go
rename to grove/vendor/go.etcd.io/bbolt/bolt_mipsx.go
index d5ecb05..a669703 100644
--- a/grove/vendor/github.com/coreos/bbolt/bolt_mipsx.go
+++ b/grove/vendor/go.etcd.io/bbolt/bolt_mipsx.go
@@ -1,12 +1,9 @@
 // +build mips mipsle
 
-package bolt
+package bbolt
 
 // maxMapSize represents the largest mmap size supported by Bolt.
 const maxMapSize = 0x40000000 // 1GB
 
 // maxAllocSize is the size used when creating array pointers.
 const maxAllocSize = 0xFFFFFFF
-
-// Are unaligned load/stores broken on this arch?
-var brokenUnaligned = false
diff --git a/grove/vendor/github.com/coreos/bbolt/bolt_openbsd.go 
b/grove/vendor/go.etcd.io/bbolt/bolt_openbsd.go
similarity index 97%
rename from grove/vendor/github.com/coreos/bbolt/bolt_openbsd.go
rename to grove/vendor/go.etcd.io/bbolt/bolt_openbsd.go
index 7058c3d..d7f5035 100644
--- a/grove/vendor/github.com/coreos/bbolt/bolt_openbsd.go
+++ b/grove/vendor/go.etcd.io/bbolt/bolt_openbsd.go
@@ -1,4 +1,4 @@
-package bolt
+package bbolt
 
 import (
        "syscall"
diff --git a/grove/vendor/github.com/coreos/bbolt/bolt_ppc.go 
b/grove/vendor/go.etcd.io/bbolt/bolt_ppc.go
similarity index 69%
rename from grove/vendor/github.com/coreos/bbolt/bolt_ppc.go
rename to grove/vendor/go.etcd.io/bbolt/bolt_ppc.go
index 55cb8a7..84e545e 100644
--- a/grove/vendor/github.com/coreos/bbolt/bolt_ppc.go
+++ b/grove/vendor/go.etcd.io/bbolt/bolt_ppc.go
@@ -1,12 +1,9 @@
 // +build ppc
 
-package bolt
+package bbolt
 
 // maxMapSize represents the largest mmap size supported by Bolt.
 const maxMapSize = 0x7FFFFFFF // 2GB
 
 // maxAllocSize is the size used when creating array pointers.
 const maxAllocSize = 0xFFFFFFF
-
-// Are unaligned load/stores broken on this arch?
-var brokenUnaligned = false
diff --git a/grove/vendor/github.com/coreos/bbolt/bolt_ppc64.go 
b/grove/vendor/go.etcd.io/bbolt/bolt_ppc64.go
similarity index 70%
rename from grove/vendor/github.com/coreos/bbolt/bolt_ppc64.go
rename to grove/vendor/go.etcd.io/bbolt/bolt_ppc64.go
index 9331d97..a761209 100644
--- a/grove/vendor/github.com/coreos/bbolt/bolt_ppc64.go
+++ b/grove/vendor/go.etcd.io/bbolt/bolt_ppc64.go
@@ -1,12 +1,9 @@
 // +build ppc64
 
-package bolt
+package bbolt
 
 // maxMapSize represents the largest mmap size supported by Bolt.
 const maxMapSize = 0xFFFFFFFFFFFF // 256TB
 
 // maxAllocSize is the size used when creating array pointers.
 const maxAllocSize = 0x7FFFFFFF
-
-// Are unaligned load/stores broken on this arch?
-var brokenUnaligned = false
diff --git a/grove/vendor/github.com/coreos/bbolt/bolt_ppc64le.go 
b/grove/vendor/go.etcd.io/bbolt/bolt_ppc64le.go
similarity index 70%
rename from grove/vendor/github.com/coreos/bbolt/bolt_ppc64le.go
rename to grove/vendor/go.etcd.io/bbolt/bolt_ppc64le.go
index 8c143bc..c830f2f 100644
--- a/grove/vendor/github.com/coreos/bbolt/bolt_ppc64le.go
+++ b/grove/vendor/go.etcd.io/bbolt/bolt_ppc64le.go
@@ -1,12 +1,9 @@
 // +build ppc64le
 
-package bolt
+package bbolt
 
 // maxMapSize represents the largest mmap size supported by Bolt.
 const maxMapSize = 0xFFFFFFFFFFFF // 256TB
 
 // maxAllocSize is the size used when creating array pointers.
 const maxAllocSize = 0x7FFFFFFF
-
-// Are unaligned load/stores broken on this arch?
-var brokenUnaligned = false
diff --git a/grove/vendor/github.com/coreos/bbolt/bolt_amd64.go 
b/grove/vendor/go.etcd.io/bbolt/bolt_riscv64.go
similarity index 69%
rename from grove/vendor/github.com/coreos/bbolt/bolt_amd64.go
rename to grove/vendor/go.etcd.io/bbolt/bolt_riscv64.go
index 98fafdb..c967613 100644
--- a/grove/vendor/github.com/coreos/bbolt/bolt_amd64.go
+++ b/grove/vendor/go.etcd.io/bbolt/bolt_riscv64.go
@@ -1,10 +1,9 @@
-package bolt
+// +build riscv64
+
+package bbolt
 
 // maxMapSize represents the largest mmap size supported by Bolt.
 const maxMapSize = 0xFFFFFFFFFFFF // 256TB
 
 // maxAllocSize is the size used when creating array pointers.
 const maxAllocSize = 0x7FFFFFFF
-
-// Are unaligned load/stores broken on this arch?
-var brokenUnaligned = false
diff --git a/grove/vendor/github.com/coreos/bbolt/bolt_s390x.go 
b/grove/vendor/go.etcd.io/bbolt/bolt_s390x.go
similarity index 70%
rename from grove/vendor/github.com/coreos/bbolt/bolt_s390x.go
rename to grove/vendor/go.etcd.io/bbolt/bolt_s390x.go
index d7c39af..ff2a560 100644
--- a/grove/vendor/github.com/coreos/bbolt/bolt_s390x.go
+++ b/grove/vendor/go.etcd.io/bbolt/bolt_s390x.go
@@ -1,12 +1,9 @@
 // +build s390x
 
-package bolt
+package bbolt
 
 // maxMapSize represents the largest mmap size supported by Bolt.
 const maxMapSize = 0xFFFFFFFFFFFF // 256TB
 
 // maxAllocSize is the size used when creating array pointers.
 const maxAllocSize = 0x7FFFFFFF
-
-// Are unaligned load/stores broken on this arch?
-var brokenUnaligned = false
diff --git a/grove/vendor/github.com/coreos/bbolt/bolt_unix.go 
b/grove/vendor/go.etcd.io/bbolt/bolt_unix.go
similarity index 87%
rename from grove/vendor/github.com/coreos/bbolt/bolt_unix.go
rename to grove/vendor/go.etcd.io/bbolt/bolt_unix.go
index 06592a0..2938fed 100644
--- a/grove/vendor/github.com/coreos/bbolt/bolt_unix.go
+++ b/grove/vendor/go.etcd.io/bbolt/bolt_unix.go
@@ -1,17 +1,16 @@
-// +build !windows,!plan9,!solaris
+// +build !windows,!plan9,!solaris,!aix
 
-package bolt
+package bbolt
 
 import (
        "fmt"
-       "os"
        "syscall"
        "time"
        "unsafe"
 )
 
 // flock acquires an advisory lock on a file descriptor.
-func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) 
error {
+func flock(db *DB, exclusive bool, timeout time.Duration) error {
        var t time.Time
        if timeout != 0 {
                t = time.Now()
@@ -56,7 +55,9 @@ func mmap(db *DB, sz int) error {
        }
 
        // Advise the kernel that the mmap is accessed randomly.
-       if err := madvise(b, syscall.MADV_RANDOM); err != nil {
+       err = madvise(b, syscall.MADV_RANDOM)
+       if err != nil && err != syscall.ENOSYS {
+               // Ignore not implemented error in kernel because it still 
works.
                return fmt.Errorf("madvise: %s", err)
        }
 
diff --git a/grove/vendor/github.com/coreos/bbolt/bolt_unix_solaris.go 
b/grove/vendor/go.etcd.io/bbolt/bolt_unix_aix.go
similarity index 94%
copy from grove/vendor/github.com/coreos/bbolt/bolt_unix_solaris.go
copy to grove/vendor/go.etcd.io/bbolt/bolt_unix_aix.go
index fd8335e..a64c16f 100644
--- a/grove/vendor/github.com/coreos/bbolt/bolt_unix_solaris.go
+++ b/grove/vendor/go.etcd.io/bbolt/bolt_unix_aix.go
@@ -1,8 +1,9 @@
-package bolt
+// +build aix
+
+package bbolt
 
 import (
        "fmt"
-       "os"
        "syscall"
        "time"
        "unsafe"
@@ -11,7 +12,7 @@ import (
 )
 
 // flock acquires an advisory lock on a file descriptor.
-func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) 
error {
+func flock(db *DB, exclusive bool, timeout time.Duration) error {
        var t time.Time
        if timeout != 0 {
                t = time.Now()
diff --git a/grove/vendor/github.com/coreos/bbolt/bolt_unix_solaris.go 
b/grove/vendor/go.etcd.io/bbolt/bolt_unix_solaris.go
similarity index 94%
rename from grove/vendor/github.com/coreos/bbolt/bolt_unix_solaris.go
rename to grove/vendor/go.etcd.io/bbolt/bolt_unix_solaris.go
index fd8335e..babad65 100644
--- a/grove/vendor/github.com/coreos/bbolt/bolt_unix_solaris.go
+++ b/grove/vendor/go.etcd.io/bbolt/bolt_unix_solaris.go
@@ -1,8 +1,7 @@
-package bolt
+package bbolt
 
 import (
        "fmt"
-       "os"
        "syscall"
        "time"
        "unsafe"
@@ -11,7 +10,7 @@ import (
 )
 
 // flock acquires an advisory lock on a file descriptor.
-func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) 
error {
+func flock(db *DB, exclusive bool, timeout time.Duration) error {
        var t time.Time
        if timeout != 0 {
                t = time.Now()
diff --git a/grove/vendor/github.com/coreos/bbolt/bolt_windows.go 
b/grove/vendor/go.etcd.io/bbolt/bolt_windows.go
similarity index 83%
rename from grove/vendor/github.com/coreos/bbolt/bolt_windows.go
rename to grove/vendor/go.etcd.io/bbolt/bolt_windows.go
index ca6f9a1..fca178b 100644
--- a/grove/vendor/github.com/coreos/bbolt/bolt_windows.go
+++ b/grove/vendor/go.etcd.io/bbolt/bolt_windows.go
@@ -1,4 +1,4 @@
-package bolt
+package bbolt
 
 import (
        "fmt"
@@ -16,8 +16,6 @@ var (
 )
 
 const (
-       lockExt = ".lock"
-
        // see 
https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
        flagLockExclusive       = 2
        flagLockFailImmediately = 1
@@ -48,28 +46,24 @@ func fdatasync(db *DB) error {
 }
 
 // flock acquires an advisory lock on a file descriptor.
-func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) 
error {
-       // Create a separate lock file on windows because a process
-       // cannot share an exclusive lock on the same file. This is
-       // needed during Tx.WriteTo().
-       f, err := os.OpenFile(db.path+lockExt, os.O_CREATE, mode)
-       if err != nil {
-               return err
-       }
-       db.lockfile = f
-
+func flock(db *DB, exclusive bool, timeout time.Duration) error {
        var t time.Time
        if timeout != 0 {
                t = time.Now()
        }
-       fd := f.Fd()
        var flag uint32 = flagLockFailImmediately
        if exclusive {
                flag |= flagLockExclusive
        }
        for {
-               // Attempt to obtain an exclusive lock.
-               err := lockFileEx(syscall.Handle(fd), flag, 0, 1, 0, 
&syscall.Overlapped{})
+               // Fix for https://github.com/etcd-io/bbolt/issues/121. Use 
byte-range
+               // -1..0 as the lock on the database file.
+               var m1 uint32 = (1 << 32) - 1 // -1 in a uint32
+               err := lockFileEx(syscall.Handle(db.file.Fd()), flag, 0, 1, 0, 
&syscall.Overlapped{
+                       Offset:     m1,
+                       OffsetHigh: m1,
+               })
+
                if err == nil {
                        return nil
                } else if err != errLockViolation {
@@ -88,9 +82,11 @@ func flock(db *DB, mode os.FileMode, exclusive bool, timeout 
time.Duration) erro
 
 // funlock releases an advisory lock on a file descriptor.
 func funlock(db *DB) error {
-       err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, 
&syscall.Overlapped{})
-       db.lockfile.Close()
-       os.Remove(db.path + lockExt)
+       var m1 uint32 = (1 << 32) - 1 // -1 in a uint32
+       err := unlockFileEx(syscall.Handle(db.file.Fd()), 0, 1, 0, 
&syscall.Overlapped{
+               Offset:     m1,
+               OffsetHigh: m1,
+       })
        return err
 }
 
diff --git a/grove/vendor/github.com/coreos/bbolt/boltsync_unix.go 
b/grove/vendor/go.etcd.io/bbolt/boltsync_unix.go
similarity index 91%
rename from grove/vendor/github.com/coreos/bbolt/boltsync_unix.go
rename to grove/vendor/go.etcd.io/bbolt/boltsync_unix.go
index f504425..9587afe 100644
--- a/grove/vendor/github.com/coreos/bbolt/boltsync_unix.go
+++ b/grove/vendor/go.etcd.io/bbolt/boltsync_unix.go
@@ -1,6 +1,6 @@
 // +build !windows,!plan9,!linux,!openbsd
 
-package bolt
+package bbolt
 
 // fdatasync flushes written data to a file descriptor.
 func fdatasync(db *DB) error {
diff --git a/grove/vendor/github.com/coreos/bbolt/bucket.go 
b/grove/vendor/go.etcd.io/bbolt/bucket.go
similarity index 95%
rename from grove/vendor/github.com/coreos/bbolt/bucket.go
rename to grove/vendor/go.etcd.io/bbolt/bucket.go
index 44db88b..d8750b1 100644
--- a/grove/vendor/github.com/coreos/bbolt/bucket.go
+++ b/grove/vendor/go.etcd.io/bbolt/bucket.go
@@ -1,4 +1,4 @@
-package bolt
+package bbolt
 
 import (
        "bytes"
@@ -123,10 +123,12 @@ func (b *Bucket) Bucket(name []byte) *Bucket {
 func (b *Bucket) openBucket(value []byte) *Bucket {
        var child = newBucket(b.tx)
 
-       // If unaligned load/stores are broken on this arch and value is
-       // unaligned simply clone to an aligned byte array.
-       unaligned := brokenUnaligned && uintptr(unsafe.Pointer(&value[0]))&3 != 0
-
+       // Unaligned access requires a copy to be made.
+       const unalignedMask = unsafe.Alignof(struct {
+               bucket
+               page
+       }{}) - 1
+       unaligned := uintptr(unsafe.Pointer(&value[0]))&unalignedMask != 0
        if unaligned {
                value = cloneBytes(value)
        }
@@ -206,7 +208,7 @@ func (b *Bucket) CreateBucketIfNotExists(key []byte) 
(*Bucket, error) {
 }
 
 // DeleteBucket deletes a bucket at the given key.
-// Returns an error if the bucket does not exists, or if the key represents a 
non-bucket value.
+// Returns an error if the bucket does not exist, or if the key represents a 
non-bucket value.
 func (b *Bucket) DeleteBucket(key []byte) error {
        if b.tx.db == nil {
                return ErrTxClosed
@@ -228,7 +230,7 @@ func (b *Bucket) DeleteBucket(key []byte) error {
        // Recursively delete all child buckets.
        child := b.Bucket(key)
        err := child.ForEach(func(k, v []byte) error {
-               if v == nil {
+               if _, _, childFlags := child.Cursor().seek(k); (childFlags & 
bucketLeafFlag) != 0 {
                        if err := child.DeleteBucket(k); err != nil {
                                return fmt.Errorf("delete bucket: %s", err)
                        }
@@ -409,7 +411,7 @@ func (b *Bucket) Stats() BucketStats {
 
                        if p.count != 0 {
                                // If page has any elements, add all element 
headers.
-                               used += leafPageElementSize * int(p.count-1)
+                               used += leafPageElementSize * uintptr(p.count-1)
 
                                // Add all element key, value sizes.
                                // The computation takes advantage of the fact 
that the position
@@ -417,16 +419,16 @@ func (b *Bucket) Stats() BucketStats {
                                // of all previous elements' keys and values.
                                // It also includes the last element's header.
                                lastElement := p.leafPageElement(p.count - 1)
-                               used += int(lastElement.pos + lastElement.ksize 
+ lastElement.vsize)
+                               used += uintptr(lastElement.pos + 
lastElement.ksize + lastElement.vsize)
                        }
 
                        if b.root == 0 {
                                // For inlined bucket just update the inline 
stats
-                               s.InlineBucketInuse += used
+                               s.InlineBucketInuse += int(used)
                        } else {
                                // For non-inlined bucket update all the leaf 
stats
                                s.LeafPageN++
-                               s.LeafInuse += used
+                               s.LeafInuse += int(used)
                                s.LeafOverflowN += int(p.overflow)
 
                                // Collect stats from sub-buckets.
@@ -447,13 +449,13 @@ func (b *Bucket) Stats() BucketStats {
 
                        // used totals the used bytes for the page
                        // Add header and all element headers.
-                       used := pageHeaderSize + (branchPageElementSize * 
int(p.count-1))
+                       used := pageHeaderSize + (branchPageElementSize * 
uintptr(p.count-1))
 
                        // Add size of all keys and values.
                        // Again, use the fact that last element's position 
equals to
                        // the total of key, value sizes of all previous 
elements.
-                       used += int(lastElement.pos + lastElement.ksize)
-                       s.BranchInuse += used
+                       used += uintptr(lastElement.pos + lastElement.ksize)
+                       s.BranchInuse += int(used)
                        s.BranchOverflowN += int(p.overflow)
                }
 
@@ -593,7 +595,7 @@ func (b *Bucket) inlineable() bool {
        // our threshold for inline bucket size.
        var size = pageHeaderSize
        for _, inode := range n.inodes {
-               size += leafPageElementSize + len(inode.key) + len(inode.value)
+               size += leafPageElementSize + uintptr(len(inode.key)) + 
uintptr(len(inode.value))
 
                if inode.flags&bucketLeafFlag != 0 {
                        return false
@@ -606,8 +608,8 @@ func (b *Bucket) inlineable() bool {
 }
 
 // Returns the maximum total size of a bucket to make it a candidate for 
inlining.
-func (b *Bucket) maxInlineBucketSize() int {
-       return b.tx.db.pageSize / 4
+func (b *Bucket) maxInlineBucketSize() uintptr {
+       return uintptr(b.tx.db.pageSize / 4)
 }
 
 // write allocates and writes a bucket to a byte slice.
diff --git a/grove/vendor/github.com/coreos/bbolt/bucket_test.go 
b/grove/vendor/go.etcd.io/bbolt/bucket_test.go
similarity index 99%
rename from grove/vendor/github.com/coreos/bbolt/bucket_test.go
rename to grove/vendor/go.etcd.io/bbolt/bucket_test.go
index b7ce32c..e48204b 100644
--- a/grove/vendor/github.com/coreos/bbolt/bucket_test.go
+++ b/grove/vendor/go.etcd.io/bbolt/bucket_test.go
@@ -1,4 +1,4 @@
-package bolt_test
+package bbolt_test
 
 import (
        "bytes"
@@ -13,7 +13,7 @@ import (
        "testing"
        "testing/quick"
 
-       "github.com/coreos/bbolt"
+       bolt "go.etcd.io/bbolt"
 )
 
 // Ensure that a bucket that gets a non-existent key returns nil.
diff --git a/grove/vendor/github.com/coreos/bbolt/cmd/bolt/main.go 
b/grove/vendor/go.etcd.io/bbolt/cmd/bbolt/main.go
similarity index 99%
rename from grove/vendor/github.com/coreos/bbolt/cmd/bolt/main.go
rename to grove/vendor/go.etcd.io/bbolt/cmd/bbolt/main.go
index eb85e05..91a13bc 100644
--- a/grove/vendor/github.com/coreos/bbolt/cmd/bolt/main.go
+++ b/grove/vendor/go.etcd.io/bbolt/cmd/bbolt/main.go
@@ -19,7 +19,7 @@ import (
        "unicode/utf8"
        "unsafe"
 
-       bolt "github.com/coreos/bbolt"
+       bolt "go.etcd.io/bbolt"
 )
 
 var (
@@ -774,7 +774,7 @@ func (cmd *PageCommand) PrintFreelist(w io.Writer, buf 
[]byte) error {
 
        // Print each page in the freelist.
        ids := (*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr))
-       for i := int(idx); i < count; i++ {
+       for i := idx; i < count; i++ {
                fmt.Fprintf(w, "%d\n", ids[i])
        }
        fmt.Fprintf(w, "\n")
diff --git a/grove/vendor/github.com/coreos/bbolt/cmd/bolt/main_test.go 
b/grove/vendor/go.etcd.io/bbolt/cmd/bbolt/main_test
similarity index 99%
rename from grove/vendor/github.com/coreos/bbolt/cmd/bolt/main_test.go
rename to grove/vendor/go.etcd.io/bbolt/cmd/bbolt/main_test
index 16bf804..b4871ff 100644
--- a/grove/vendor/github.com/coreos/bbolt/cmd/bolt/main_test.go
+++ b/grove/vendor/go.etcd.io/bbolt/cmd/bbolt/main_test
@@ -12,8 +12,7 @@ import (
        "strconv"
        "testing"
 
-       "github.com/coreos/bbolt"
-       "github.com/coreos/bbolt/cmd/bolt"
+       bolt "go.etcd.io/bbolt"
 )
 
 // Ensure the "info" command can print information about a database.
diff --git a/grove/vendor/github.com/coreos/bbolt/cursor.go 
b/grove/vendor/go.etcd.io/bbolt/cursor.go
similarity index 98%
rename from grove/vendor/github.com/coreos/bbolt/cursor.go
rename to grove/vendor/go.etcd.io/bbolt/cursor.go
index 1be9f35..98aeb44 100644
--- a/grove/vendor/github.com/coreos/bbolt/cursor.go
+++ b/grove/vendor/go.etcd.io/bbolt/cursor.go
@@ -1,4 +1,4 @@
-package bolt
+package bbolt
 
 import (
        "bytes"
@@ -157,12 +157,6 @@ func (c *Cursor) seek(seek []byte) (key []byte, value 
[]byte, flags uint32) {
        // Start from root page/node and traverse to correct page.
        c.stack = c.stack[:0]
        c.search(seek, c.bucket.root)
-       ref := &c.stack[len(c.stack)-1]
-
-       // If the cursor is pointing to the end of page/node then return nil.
-       if ref.index >= ref.count() {
-               return nil, nil, 0
-       }
 
        // If this is a bucket then return a nil value.
        return c.keyValue()
@@ -339,6 +333,8 @@ func (c *Cursor) nsearch(key []byte) {
 // keyValue returns the key and value of the current leaf element.
 func (c *Cursor) keyValue() ([]byte, []byte, uint32) {
        ref := &c.stack[len(c.stack)-1]
+
+       // If the cursor is pointing to the end of page/node then return nil.
        if ref.count() == 0 || ref.index >= ref.count() {
                return nil, nil, 0
        }
@@ -370,7 +366,7 @@ func (c *Cursor) node() *node {
        }
        for _, ref := range c.stack[:len(c.stack)-1] {
                _assert(!n.isLeaf, "expected branch node")
-               n = n.childAt(int(ref.index))
+               n = n.childAt(ref.index)
        }
        _assert(n.isLeaf, "expected leaf node")
        return n
diff --git a/grove/vendor/github.com/coreos/bbolt/cursor_test.go 
b/grove/vendor/go.etcd.io/bbolt/cursor_test.go
similarity index 99%
rename from grove/vendor/github.com/coreos/bbolt/cursor_test.go
rename to grove/vendor/go.etcd.io/bbolt/cursor_test.go
index 7b1ae19..d2a8bc7 100644
--- a/grove/vendor/github.com/coreos/bbolt/cursor_test.go
+++ b/grove/vendor/go.etcd.io/bbolt/cursor_test.go
@@ -1,4 +1,4 @@
-package bolt_test
+package bbolt_test
 
 import (
        "bytes"
@@ -11,7 +11,7 @@ import (
        "testing"
        "testing/quick"
 
-       "github.com/coreos/bbolt"
+       bolt "go.etcd.io/bbolt"
 )
 
 // Ensure that a cursor can return a reference to the bucket that created it.
diff --git a/grove/vendor/github.com/coreos/bbolt/db.go 
b/grove/vendor/go.etcd.io/bbolt/db.go
similarity index 93%
rename from grove/vendor/github.com/coreos/bbolt/db.go
rename to grove/vendor/go.etcd.io/bbolt/db.go
index d5c53f4..80b0095 100644
--- a/grove/vendor/github.com/coreos/bbolt/db.go
+++ b/grove/vendor/go.etcd.io/bbolt/db.go
@@ -1,4 +1,4 @@
-package bolt
+package bbolt
 
 import (
        "errors"
@@ -43,6 +43,16 @@ var defaultPageSize = os.Getpagesize()
 // The time elapsed between consecutive file locking attempts.
 const flockRetryTimeout = 50 * time.Millisecond
 
+// FreelistType is the type of the freelist backend
+type FreelistType string
+
+const (
+       // FreelistArrayType indicates backend freelist type is array
+       FreelistArrayType = FreelistType("array")
+       // FreelistMapType indicates backend freelist type is hashmap
+       FreelistMapType = FreelistType("hashmap")
+)
+
 // DB represents a collection of buckets persisted to a file on disk.
 // All data access is performed through transactions which can be obtained 
through the DB.
 // All the functions on DB will return a ErrDatabaseNotOpen if accessed before 
Open() is called.
@@ -70,6 +80,13 @@ type DB struct {
        // re-sync during recovery.
        NoFreelistSync bool
 
+       // FreelistType sets the backend freelist type. There are two options. 
Array which is simple but endures
+       // dramatic performance degradation if database is large and 
framentation in freelist is common.
+       // The alternative one is using hashmap, it is faster in almost all 
circumstances
+       // but it doesn't guarantee that it offers the smallest page id 
available. In normal case it is safe.
+       // The default type is array
+       FreelistType FreelistType
+
        // When true, skips the truncate call when growing the database.
        // Setting this to true is only safe on non-ext3/ext4 systems.
        // Skipping truncation avoids preallocation of hard drive space and
@@ -104,9 +121,9 @@ type DB struct {
        AllocSize int
 
        path     string
+       openFile func(string, int, os.FileMode) (*os.File, error)
        file     *os.File
-       lockfile *os.File // windows only
-       dataref  []byte   // mmap'ed readonly, write throws SEGV
+       dataref  []byte // mmap'ed readonly, write throws SEGV
        data     *[maxMapSize]byte
        datasz   int
        filesz   int // current on disk file size
@@ -170,6 +187,7 @@ func Open(path string, mode os.FileMode, options *Options) 
(*DB, error) {
        db.NoGrowSync = options.NoGrowSync
        db.MmapFlags = options.MmapFlags
        db.NoFreelistSync = options.NoFreelistSync
+       db.FreelistType = options.FreelistType
 
        // Set default values for later DB operations.
        db.MaxBatchSize = DefaultMaxBatchSize
@@ -182,13 +200,18 @@ func Open(path string, mode os.FileMode, options 
*Options) (*DB, error) {
                db.readOnly = true
        }
 
+       db.openFile = options.OpenFile
+       if db.openFile == nil {
+               db.openFile = os.OpenFile
+       }
+
        // Open data file and separate sync handler for metadata writes.
-       db.path = path
        var err error
-       if db.file, err = os.OpenFile(db.path, flag|os.O_CREATE, mode); err != 
nil {
+       if db.file, err = db.openFile(path, flag|os.O_CREATE, mode); err != nil 
{
                _ = db.close()
                return nil, err
        }
+       db.path = db.file.Name()
 
        // Lock file so that other processes using Bolt in read-write mode 
cannot
        // use the database  at the same time. This would cause corruption since
@@ -197,8 +220,7 @@ func Open(path string, mode os.FileMode, options *Options) 
(*DB, error) {
        // if !options.ReadOnly.
        // The database file is locked using the shared lock (more than one 
process may
        // hold a lock at the same time) otherwise (options.ReadOnly is set).
-       if err := flock(db, mode, !db.readOnly, options.Timeout); err != nil {
-               db.lockfile = nil // make 'unused' happy. TODO: rework locks
+       if err := flock(db, !db.readOnly, options.Timeout); err != nil {
                _ = db.close()
                return nil, err
        }
@@ -213,10 +235,13 @@ func Open(path string, mode os.FileMode, options 
*Options) (*DB, error) {
 
        // Initialize the database if it doesn't exist.
        if info, err := db.file.Stat(); err != nil {
+               _ = db.close()
                return nil, err
        } else if info.Size() == 0 {
                // Initialize new files with meta pages.
                if err := db.init(); err != nil {
+                       // clean up file descriptor on initialization fail
+                       _ = db.close()
                        return nil, err
                }
        } else {
@@ -236,6 +261,7 @@ func Open(path string, mode os.FileMode, options *Options) 
(*DB, error) {
                                db.pageSize = int(m.pageSize)
                        }
                } else {
+                       _ = db.close()
                        return nil, ErrInvalid
                }
        }
@@ -281,7 +307,7 @@ func Open(path string, mode os.FileMode, options *Options) 
(*DB, error) {
 // concurrent accesses being made to the freelist.
 func (db *DB) loadFreelist() {
        db.freelistLoad.Do(func() {
-               db.freelist = newFreelist()
+               db.freelist = newFreelist(db.FreelistType)
                if !db.hasSyncedFreelist() {
                        // Reconstruct free list by scanning the DB.
                        db.freelist.readIDs(db.freepages())
@@ -289,7 +315,7 @@ func (db *DB) loadFreelist() {
                        // Read free list from freelist page.
                        db.freelist.read(db.page(db.meta().freelist))
                }
-               db.stats.FreePageN = len(db.freelist.ids)
+               db.stats.FreePageN = db.freelist.free_count()
        })
 }
 
@@ -450,8 +476,8 @@ func (db *DB) Close() error {
        db.metalock.Lock()
        defer db.metalock.Unlock()
 
-       db.mmaplock.RLock()
-       defer db.mmaplock.RUnlock()
+       db.mmaplock.Lock()
+       defer db.mmaplock.Unlock()
 
        return db.close()
 }
@@ -1003,6 +1029,13 @@ type Options struct {
        // under normal operation, but requires a full database re-sync during 
recovery.
        NoFreelistSync bool
 
+       // FreelistType sets the backend freelist type. There are two options. 
Array which is simple but endures
+       // dramatic performance degradation if database is large and 
framentation in freelist is common.
+       // The alternative one is using hashmap, it is faster in almost all 
circumstances
+       // but it doesn't guarantee that it offers the smallest page id 
available. In normal case it is safe.
+       // The default type is array
+       FreelistType FreelistType
+
        // Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to
        // grab a shared lock (UNIX).
        ReadOnly bool
@@ -1027,13 +1060,18 @@ type Options struct {
        // set directly on the DB itself when returned from Open(), but this 
option
        // is useful in APIs which expose Options but not the underlying DB.
        NoSync bool
+
+       // OpenFile is used to open files. It defaults to os.OpenFile. This 
option
+       // is useful for writing hermetic tests.
+       OpenFile func(string, int, os.FileMode) (*os.File, error)
 }
 
 // DefaultOptions represent the options used if nil options are passed into 
Open().
 // No timeout is used which will cause Bolt to wait indefinitely for a lock.
 var DefaultOptions = &Options{
-       Timeout:    0,
-       NoGrowSync: false,
+       Timeout:      0,
+       NoGrowSync:   false,
+       FreelistType: FreelistArrayType,
 }
 
 // Stats represents statistics about the database.
diff --git a/grove/vendor/github.com/coreos/bbolt/db_test.go 
b/grove/vendor/go.etcd.io/bbolt/db_test.go
similarity index 95%
rename from grove/vendor/github.com/coreos/bbolt/db_test.go
rename to grove/vendor/go.etcd.io/bbolt/db_test.go
index e3a58c3..9b03f2f 100644
--- a/grove/vendor/github.com/coreos/bbolt/db_test.go
+++ b/grove/vendor/go.etcd.io/bbolt/db_test.go
@@ -1,4 +1,4 @@
-package bolt_test
+package bbolt_test
 
 import (
        "bytes"
@@ -18,7 +18,7 @@ import (
        "time"
        "unsafe"
 
-       "github.com/coreos/bbolt"
+       bolt "go.etcd.io/bbolt"
 )
 
 var statsFlag = flag.Bool("stats", false, "show performance stats")
@@ -63,6 +63,43 @@ func TestOpen(t *testing.T) {
        }
 }
 
+// Regression validation for https://github.com/etcd-io/bbolt/pull/122.
+// Tests multiple goroutines simultaneously opening a database.
+func TestOpen_MultipleGoroutines(t *testing.T) {
+       const (
+               instances  = 30
+               iterations = 30
+       )
+       path := tempfile()
+       defer os.RemoveAll(path)
+       var wg sync.WaitGroup
+       errCh := make(chan error, iterations*instances)
+       for iteration := 0; iteration < iterations; iteration++ {
+               for instance := 0; instance < instances; instance++ {
+                       wg.Add(1)
+                       go func() {
+                               defer wg.Done()
+                               db, err := bolt.Open(path, 0600, nil)
+                               if err != nil {
+                                       errCh <- err
+                                       return
+                               }
+                               if err := db.Close(); err != nil {
+                                       errCh <- err
+                                       return
+                               }
+                       }()
+               }
+               wg.Wait()
+       }
+       close(errCh)
+       for err := range errCh {
+               if err != nil {
+                       t.Fatalf("error from inside goroutine: %v", err)
+               }
+       }
+}
+
 // Ensure that opening a database with a blank path returns an error.
 func TestOpen_ErrPathRequired(t *testing.T) {
        _, err := bolt.Open("", 0666, nil)
@@ -400,19 +437,20 @@ func TestDB_Open_InitialMmapSize(t *testing.T) {
                t.Fatal(err)
        }
 
-       done := make(chan struct{})
+       done := make(chan error, 1)
 
        go func() {
-               if err := wtx.Commit(); err != nil {
-                       t.Fatal(err)
-               }
-               done <- struct{}{}
+               err := wtx.Commit()
+               done <- err
        }()
 
        select {
        case <-time.After(5 * time.Second):
                t.Errorf("unexpected that the reader blocks writer")
-       case <-done:
+       case err := <-done:
+               if err != nil {
+                       t.Fatal(err)
+               }
        }
 
        if err := rtx.Rollback(); err != nil {
@@ -662,40 +700,48 @@ func TestDB_Close_PendingTx_RO(t *testing.T) { 
testDB_Close_PendingTx(t, false)
 // Ensure that a database cannot close while transactions are open.
 func testDB_Close_PendingTx(t *testing.T, writable bool) {
        db := MustOpenDB()
-       defer db.MustClose()
 
        // Start transaction.
-       tx, err := db.Begin(true)
+       tx, err := db.Begin(writable)
        if err != nil {
                t.Fatal(err)
        }
 
        // Open update in separate goroutine.
-       done := make(chan struct{})
+       done := make(chan error, 1)
        go func() {
-               if err := db.Close(); err != nil {
-                       t.Fatal(err)
-               }
-               close(done)
+               err := db.Close()
+               done <- err
        }()
 
        // Ensure database hasn't closed.
        time.Sleep(100 * time.Millisecond)
        select {
-       case <-done:
+       case err := <-done:
+               if err != nil {
+                       t.Errorf("error from inside goroutine: %v", err)
+               }
                t.Fatal("database closed too early")
        default:
        }
 
-       // Commit transaction.
-       if err := tx.Commit(); err != nil {
+       // Commit/close transaction.
+       if writable {
+               err = tx.Commit()
+       } else {
+               err = tx.Rollback()
+       }
+       if err != nil {
                t.Fatal(err)
        }
 
        // Ensure database closed now.
        time.Sleep(100 * time.Millisecond)
        select {
-       case <-done:
+       case err := <-done:
+               if err != nil {
+                       t.Fatalf("error from inside goroutine: %v", err)
+               }
        default:
                t.Fatal("database did not close")
        }
@@ -1326,7 +1372,7 @@ func ExampleDB_View() {
        // John's last name is doe.
 }
 
-func ExampleDB_Begin_ReadOnly() {
+func ExampleDB_Begin() {
        // Open the database.
        db, err := bolt.Open(tempfile(), 0666, nil)
        if err != nil {
@@ -1488,6 +1534,7 @@ func BenchmarkDBBatchManual10x100(b *testing.B) {
        for i := 0; i < b.N; i++ {
                start := make(chan struct{})
                var wg sync.WaitGroup
+               errCh := make(chan error, 10)
 
                for major := 0; major < 10; major++ {
                        wg.Add(1)
@@ -1510,13 +1557,18 @@ func BenchmarkDBBatchManual10x100(b *testing.B) {
                                        }
                                        return nil
                                }
-                               if err := db.Update(insert100); err != nil {
-                                       b.Fatal(err)
-                               }
+                               err := db.Update(insert100)
+                               errCh <- err
                        }(uint32(major))
                }
                close(start)
                wg.Wait()
+               close(errCh)
+               for err := range errCh {
+                       if err != nil {
+                               b.Fatal(err)
+                       }
+               }
        }
 
        b.StopTimer()
@@ -1573,6 +1625,16 @@ func MustOpenDB() *DB {
 // MustOpenDBWithOption returns a new, open DB at a temporary location with 
given options.
 func MustOpenWithOption(o *bolt.Options) *DB {
        f := tempfile()
+       if o == nil {
+               o = bolt.DefaultOptions
+       }
+
+       freelistType := bolt.FreelistArrayType
+       if env := os.Getenv(bolt.TestFreelistType); env == 
string(bolt.FreelistMapType) {
+               freelistType = bolt.FreelistMapType
+       }
+       o.FreelistType = freelistType
+
        db, err := bolt.Open(f, 0666, o)
        if err != nil {
                panic(err)
diff --git a/grove/vendor/github.com/coreos/bbolt/doc.go 
b/grove/vendor/go.etcd.io/bbolt/doc.go
similarity index 94%
rename from grove/vendor/github.com/coreos/bbolt/doc.go
rename to grove/vendor/go.etcd.io/bbolt/doc.go
index cc93784..95f25f0 100644
--- a/grove/vendor/github.com/coreos/bbolt/doc.go
+++ b/grove/vendor/go.etcd.io/bbolt/doc.go
@@ -1,5 +1,5 @@
 /*
-Package bolt implements a low-level key/value store in pure Go. It supports
+package bbolt implements a low-level key/value store in pure Go. It supports
 fully serializable transactions, ACID semantics, and lock-free MVCC with
 multiple readers and a single writer. Bolt can be used for projects that
 want a simple data store without the need to add large dependencies such as
@@ -41,4 +41,4 @@ point to different data or can point to invalid memory which 
will cause a panic.
 
 
 */
-package bolt
+package bbolt
diff --git a/grove/vendor/github.com/coreos/bbolt/errors.go 
b/grove/vendor/go.etcd.io/bbolt/errors.go
similarity index 99%
rename from grove/vendor/github.com/coreos/bbolt/errors.go
rename to grove/vendor/go.etcd.io/bbolt/errors.go
index a3620a3..48758ca 100644
--- a/grove/vendor/github.com/coreos/bbolt/errors.go
+++ b/grove/vendor/go.etcd.io/bbolt/errors.go
@@ -1,4 +1,4 @@
-package bolt
+package bbolt
 
 import "errors"
 
diff --git a/grove/vendor/github.com/coreos/bbolt/freelist.go 
b/grove/vendor/go.etcd.io/bbolt/freelist.go
similarity index 60%
rename from grove/vendor/github.com/coreos/bbolt/freelist.go
rename to grove/vendor/go.etcd.io/bbolt/freelist.go
index 266f154..697a469 100644
--- a/grove/vendor/github.com/coreos/bbolt/freelist.go
+++ b/grove/vendor/go.etcd.io/bbolt/freelist.go
@@ -1,4 +1,4 @@
-package bolt
+package bbolt
 
 import (
        "fmt"
@@ -14,22 +14,54 @@ type txPending struct {
        lastReleaseBegin txid   // beginning txid of last matching releaseRange
 }
 
+// pidSet holds the set of starting pgids which have the same span size
+type pidSet map[pgid]struct{}
+
 // freelist represents a list of all pages that are available for allocation.
 // It also tracks pages that have been freed but are still in use by open 
transactions.
 type freelist struct {
-       ids     []pgid              // all free and available free page ids.
-       allocs  map[pgid]txid       // mapping of txid that allocated a pgid.
-       pending map[txid]*txPending // mapping of soon-to-be free page ids by 
tx.
-       cache   map[pgid]bool       // fast lookup of all free and pending page 
ids.
+       freelistType   FreelistType                // freelist type
+       ids            []pgid                      // all free and available 
free page ids.
+       allocs         map[pgid]txid               // mapping of txid that 
allocated a pgid.
+       pending        map[txid]*txPending         // mapping of soon-to-be 
free page ids by tx.
+       cache          map[pgid]bool               // fast lookup of all free 
and pending page ids.
+       freemaps       map[uint64]pidSet           // key is the size of 
continuous pages(span), value is a set which contains the starting pgids of 
same size
+       forwardMap     map[pgid]uint64             // key is start pgid, value 
is its span size
+       backwardMap    map[pgid]uint64             // key is end pgid, value is 
its span size
+       allocate       func(txid txid, n int) pgid // the freelist allocate func
+       free_count     func() int                  // the function which gives 
you free page number
+       mergeSpans     func(ids pgids)             // the mergeSpan func
+       getFreePageIDs func() []pgid               // get free pgids func
+       readIDs        func(pgids []pgid)          // readIDs func reads list 
of pages and init the freelist
 }
 
 // newFreelist returns an empty, initialized freelist.
-func newFreelist() *freelist {
-       return &freelist{
-               allocs:  make(map[pgid]txid),
-               pending: make(map[txid]*txPending),
-               cache:   make(map[pgid]bool),
+func newFreelist(freelistType FreelistType) *freelist {
+       f := &freelist{
+               freelistType: freelistType,
+               allocs:       make(map[pgid]txid),
+               pending:      make(map[txid]*txPending),
+               cache:        make(map[pgid]bool),
+               freemaps:     make(map[uint64]pidSet),
+               forwardMap:   make(map[pgid]uint64),
+               backwardMap:  make(map[pgid]uint64),
        }
+
+       if freelistType == FreelistMapType {
+               f.allocate = f.hashmapAllocate
+               f.free_count = f.hashmapFreeCount
+               f.mergeSpans = f.hashmapMergeSpans
+               f.getFreePageIDs = f.hashmapGetFreePageIDs
+               f.readIDs = f.hashmapReadIDs
+       } else {
+               f.allocate = f.arrayAllocate
+               f.free_count = f.arrayFreeCount
+               f.mergeSpans = f.arrayMergeSpans
+               f.getFreePageIDs = f.arrayGetFreePageIDs
+               f.readIDs = f.arrayReadIDs
+       }
+
+       return f
 }
 
 // size returns the size of the page after serialization.
@@ -39,7 +71,7 @@ func (f *freelist) size() int {
                // The first element will be used to store the count. See 
freelist.write.
                n++
        }
-       return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * n)
+       return int(pageHeaderSize) + (int(unsafe.Sizeof(pgid(0))) * n)
 }
 
 // count returns count of pages on the freelist
@@ -47,8 +79,8 @@ func (f *freelist) count() int {
        return f.free_count() + f.pending_count()
 }
 
-// free_count returns count of free pages
-func (f *freelist) free_count() int {
+// arrayFreeCount returns count of free pages(array version)
+func (f *freelist) arrayFreeCount() int {
        return len(f.ids)
 }
 
@@ -61,7 +93,7 @@ func (f *freelist) pending_count() int {
        return count
 }
 
-// copyall copies into dst a list of all free ids and all pending ids in one 
sorted list.
+// copyall copies a list of all free ids and all pending ids in one sorted 
list.
 // f.count returns the minimum length required for dst.
 func (f *freelist) copyall(dst []pgid) {
        m := make(pgids, 0, f.pending_count())
@@ -69,12 +101,12 @@ func (f *freelist) copyall(dst []pgid) {
                m = append(m, txp.ids...)
        }
        sort.Sort(m)
-       mergepgids(dst, f.ids, m)
+       mergepgids(dst, f.getFreePageIDs(), m)
 }
 
-// allocate returns the starting page id of a contiguous list of pages of a 
given size.
+// arrayAllocate returns the starting page id of a contiguous list of pages of 
a given size.
 // If a contiguous block cannot be found then 0 is returned.
-func (f *freelist) allocate(txid txid, n int) pgid {
+func (f *freelist) arrayAllocate(txid txid, n int) pgid {
        if len(f.ids) == 0 {
                return 0
        }
@@ -160,8 +192,7 @@ func (f *freelist) release(txid txid) {
                        delete(f.pending, tid)
                }
        }
-       sort.Sort(m)
-       f.ids = pgids(f.ids).merge(m)
+       f.mergeSpans(m)
 }
 
 // releaseRange moves pending pages allocated within an extent [begin,end] to 
the free list.
@@ -194,8 +225,7 @@ func (f *freelist) releaseRange(begin, end txid) {
                        delete(f.pending, tid)
                }
        }
-       sort.Sort(m)
-       f.ids = pgids(f.ids).merge(m)
+       f.mergeSpans(m)
 }
 
 // rollback removes the pages from a given pending tx.
@@ -222,8 +252,7 @@ func (f *freelist) rollback(txid txid) {
        }
        // Remove pages from pending list and mark as free if allocated by txid.
        delete(f.pending, txid)
-       sort.Sort(m)
-       f.ids = pgids(f.ids).merge(m)
+       f.mergeSpans(m)
 }
 
 // freed returns whether a given page is in the free list.
@@ -238,34 +267,44 @@ func (f *freelist) read(p *page) {
        }
        // If the page.count is at the max uint16 value (64k) then it's 
considered
        // an overflow and the size of the freelist is stored as the first 
element.
-       idx, count := 0, int(p.count)
+       var idx, count = 0, int(p.count)
        if count == 0xFFFF {
                idx = 1
-               count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0])
+               c := *(*pgid)(unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)))
+               count = int(c)
+               if count < 0 {
+                       panic(fmt.Sprintf("leading element count %d overflows 
int", c))
+               }
        }
 
        // Copy the list of page ids from the freelist.
        if count == 0 {
                f.ids = nil
        } else {
-               ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx : 
idx+count]
-               f.ids = make([]pgid, len(ids))
-               copy(f.ids, ids)
+               var ids []pgid
+               data := unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), 
unsafe.Sizeof(ids[0]), idx)
+               unsafeSlice(unsafe.Pointer(&ids), data, count)
 
+               // copy the ids, so we don't modify on the freelist page 
directly
+               idsCopy := make([]pgid, count)
+               copy(idsCopy, ids)
                // Make sure they're sorted.
-               sort.Sort(pgids(f.ids))
-       }
+               sort.Sort(pgids(idsCopy))
 
-       // Rebuild the page cache.
-       f.reindex()
+               f.readIDs(idsCopy)
+       }
 }
 
-// read initializes the freelist from a given list of ids.
-func (f *freelist) readIDs(ids []pgid) {
+// arrayReadIDs initializes the freelist from a given list of ids.
+func (f *freelist) arrayReadIDs(ids []pgid) {
        f.ids = ids
        f.reindex()
 }
 
+func (f *freelist) arrayGetFreePageIDs() []pgid {
+       return f.ids
+}
+
 // write writes the page ids onto a freelist page. All free and pending ids are
 // saved to disk since in the event of a program crash, all pending ids will
 // become free.
@@ -277,16 +316,22 @@ func (f *freelist) write(p *page) error {
 
        // The page.count can only hold up to 64k elements so if we overflow 
that
        // number then we handle it by putting the size in the first element.
-       lenids := f.count()
-       if lenids == 0 {
-               p.count = uint16(lenids)
-       } else if lenids < 0xFFFF {
-               p.count = uint16(lenids)
-               f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:])
+       l := f.count()
+       if l == 0 {
+               p.count = uint16(l)
+       } else if l < 0xFFFF {
+               p.count = uint16(l)
+               var ids []pgid
+               data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
+               unsafeSlice(unsafe.Pointer(&ids), data, l)
+               f.copyall(ids)
        } else {
                p.count = 0xFFFF
-               ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = 
pgid(lenids)
-               f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:])
+               var ids []pgid
+               data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
+               unsafeSlice(unsafe.Pointer(&ids), data, l+1)
+               ids[0] = pgid(l)
+               f.copyall(ids[1:])
        }
 
        return nil
@@ -307,22 +352,42 @@ func (f *freelist) reload(p *page) {
        // Check each page in the freelist and build a new available freelist
        // with any pages not in the pending lists.
        var a []pgid
-       for _, id := range f.ids {
+       for _, id := range f.getFreePageIDs() {
                if !pcache[id] {
                        a = append(a, id)
                }
        }
-       f.ids = a
 
-       // Once the available list is rebuilt then rebuild the free cache so 
that
-       // it includes the available and pending free pages.
-       f.reindex()
+       f.readIDs(a)
+}
+
+// noSyncReload reads the freelist from pgids and filters out pending items.
+func (f *freelist) noSyncReload(pgids []pgid) {
+       // Build a cache of only pending pages.
+       pcache := make(map[pgid]bool)
+       for _, txp := range f.pending {
+               for _, pendingID := range txp.ids {
+                       pcache[pendingID] = true
+               }
+       }
+
+       // Check each page in the freelist and build a new available freelist
+       // with any pages not in the pending lists.
+       var a []pgid
+       for _, id := range pgids {
+               if !pcache[id] {
+                       a = append(a, id)
+               }
+       }
+
+       f.readIDs(a)
 }
 
 // reindex rebuilds the free cache based on available and pending free lists.
 func (f *freelist) reindex() {
-       f.cache = make(map[pgid]bool, len(f.ids))
-       for _, id := range f.ids {
+       ids := f.getFreePageIDs()
+       f.cache = make(map[pgid]bool, len(ids))
+       for _, id := range ids {
                f.cache[id] = true
        }
        for _, txp := range f.pending {
@@ -331,3 +396,9 @@ func (f *freelist) reindex() {
                }
        }
 }
+
+// arrayMergeSpans try to merge list of pages(represented by pgids) with 
existing spans but using array
+func (f *freelist) arrayMergeSpans(ids pgids) {
+       sort.Sort(ids)
+       f.ids = pgids(f.ids).merge(ids)
+}
diff --git a/grove/vendor/go.etcd.io/bbolt/freelist_hmap.go 
b/grove/vendor/go.etcd.io/bbolt/freelist_hmap.go
new file mode 100644
index 0000000..02ef2be
--- /dev/null
+++ b/grove/vendor/go.etcd.io/bbolt/freelist_hmap.go
@@ -0,0 +1,178 @@
+package bbolt
+
+import "sort"
+
+// hashmapFreeCount returns count of free pages(hashmap version)
+func (f *freelist) hashmapFreeCount() int {
+       // use the forwardmap to get the total count
+       count := 0
+       for _, size := range f.forwardMap {
+               count += int(size)
+       }
+       return count
+}
+
+// hashmapAllocate serves the same purpose as arrayAllocate, but use hashmap 
as backend
+func (f *freelist) hashmapAllocate(txid txid, n int) pgid {
+       if n == 0 {
+               return 0
+       }
+
+       // if we have a exact size match just return short path
+       if bm, ok := f.freemaps[uint64(n)]; ok {
+               for pid := range bm {
+                       // remove the span
+                       f.delSpan(pid, uint64(n))
+
+                       f.allocs[pid] = txid
+
+                       for i := pgid(0); i < pgid(n); i++ {
+                               delete(f.cache, pid+i)
+                       }
+                       return pid
+               }
+       }
+
+       // lookup the map to find larger span
+       for size, bm := range f.freemaps {
+               if size < uint64(n) {
+                       continue
+               }
+
+               for pid := range bm {
+                       // remove the initial
+                       f.delSpan(pid, uint64(size))
+
+                       f.allocs[pid] = txid
+
+                       remain := size - uint64(n)
+
+                       // add remain span
+                       f.addSpan(pid+pgid(n), remain)
+
+                       for i := pgid(0); i < pgid(n); i++ {
+                               delete(f.cache, pid+pgid(i))
+                       }
+                       return pid
+               }
+       }
+
+       return 0
+}
+
+// hashmapReadIDs reads pgids as input an initial the freelist(hashmap version)
+func (f *freelist) hashmapReadIDs(pgids []pgid) {
+       f.init(pgids)
+
+       // Rebuild the page cache.
+       f.reindex()
+}
+
+// hashmapGetFreePageIDs returns the sorted free page ids
+func (f *freelist) hashmapGetFreePageIDs() []pgid {
+       count := f.free_count()
+       if count == 0 {
+               return nil
+       }
+
+       m := make([]pgid, 0, count)
+       for start, size := range f.forwardMap {
+               for i := 0; i < int(size); i++ {
+                       m = append(m, start+pgid(i))
+               }
+       }
+       sort.Sort(pgids(m))
+
+       return m
+}
+
+// hashmapMergeSpans try to merge list of pages(represented by pgids) with 
existing spans
+func (f *freelist) hashmapMergeSpans(ids pgids) {
+       for _, id := range ids {
+               // try to see if we can merge and update
+               f.mergeWithExistingSpan(id)
+       }
+}
+
+// mergeWithExistingSpan merges pid to the existing free spans, try to merge 
it backward and forward
+func (f *freelist) mergeWithExistingSpan(pid pgid) {
+       prev := pid - 1
+       next := pid + 1
+
+       preSize, mergeWithPrev := f.backwardMap[prev]
+       nextSize, mergeWithNext := f.forwardMap[next]
+       newStart := pid
+       newSize := uint64(1)
+
+       if mergeWithPrev {
+               //merge with previous span
+               start := prev + 1 - pgid(preSize)
+               f.delSpan(start, preSize)
+
+               newStart -= pgid(preSize)
+               newSize += preSize
+       }
+
+       if mergeWithNext {
+               // merge with next span
+               f.delSpan(next, nextSize)
+               newSize += nextSize
+       }
+
+       f.addSpan(newStart, newSize)
+}
+
+func (f *freelist) addSpan(start pgid, size uint64) {
+       f.backwardMap[start-1+pgid(size)] = size
+       f.forwardMap[start] = size
+       if _, ok := f.freemaps[size]; !ok {
+               f.freemaps[size] = make(map[pgid]struct{})
+       }
+
+       f.freemaps[size][start] = struct{}{}
+}
+
+func (f *freelist) delSpan(start pgid, size uint64) {
+       delete(f.forwardMap, start)
+       delete(f.backwardMap, start+pgid(size-1))
+       delete(f.freemaps[size], start)
+       if len(f.freemaps[size]) == 0 {
+               delete(f.freemaps, size)
+       }
+}
+
+// initial from pgids using when use hashmap version
+// pgids must be sorted
+func (f *freelist) init(pgids []pgid) {
+       if len(pgids) == 0 {
+               return
+       }
+
+       size := uint64(1)
+       start := pgids[0]
+
+       if !sort.SliceIsSorted([]pgid(pgids), func(i, j int) bool { return 
pgids[i] < pgids[j] }) {
+               panic("pgids not sorted")
+       }
+
+       f.freemaps = make(map[uint64]pidSet)
+       f.forwardMap = make(map[pgid]uint64)
+       f.backwardMap = make(map[pgid]uint64)
+
+       for i := 1; i < len(pgids); i++ {
+               // continuous page
+               if pgids[i] == pgids[i-1]+1 {
+                       size++
+               } else {
+                       f.addSpan(start, size)
+
+                       size = 1
+                       start = pgids[i]
+               }
+       }
+
+       // init the tail
+       if size != 0 && start != 0 {
+               f.addSpan(start, size)
+       }
+}
diff --git a/grove/vendor/github.com/coreos/bbolt/freelist_test.go 
b/grove/vendor/go.etcd.io/bbolt/freelist_test.go
similarity index 55%
rename from grove/vendor/github.com/coreos/bbolt/freelist_test.go
rename to grove/vendor/go.etcd.io/bbolt/freelist_test.go
index 24ed4cf..97656f4 100644
--- a/grove/vendor/github.com/coreos/bbolt/freelist_test.go
+++ b/grove/vendor/go.etcd.io/bbolt/freelist_test.go
@@ -1,46 +1,50 @@
-package bolt
+package bbolt
 
 import (
        "math/rand"
+       "os"
        "reflect"
        "sort"
        "testing"
        "unsafe"
 )
 
+// TestFreelistType is used as a env variable for test to indicate the backend 
type
+const TestFreelistType = "TEST_FREELIST_TYPE"
+
 // Ensure that a page is added to a transaction's freelist.
 func TestFreelist_free(t *testing.T) {
-       f := newFreelist()
+       f := newTestFreelist()
        f.free(100, &page{id: 12})
        if !reflect.DeepEqual([]pgid{12}, f.pending[100].ids) {
-               t.Fatalf("exp=%v; got=%v", []pgid{12}, f.pending[100])
+               t.Fatalf("exp=%v; got=%v", []pgid{12}, f.pending[100].ids)
        }
 }
 
 // Ensure that a page and its overflow is added to a transaction's freelist.
 func TestFreelist_free_overflow(t *testing.T) {
-       f := newFreelist()
+       f := newTestFreelist()
        f.free(100, &page{id: 12, overflow: 3})
        if exp := []pgid{12, 13, 14, 15}; !reflect.DeepEqual(exp, 
f.pending[100].ids) {
-               t.Fatalf("exp=%v; got=%v", exp, f.pending[100])
+               t.Fatalf("exp=%v; got=%v", exp, f.pending[100].ids)
        }
 }
 
 // Ensure that a transaction's free pages can be released.
 func TestFreelist_release(t *testing.T) {
-       f := newFreelist()
+       f := newTestFreelist()
        f.free(100, &page{id: 12, overflow: 1})
        f.free(100, &page{id: 9})
        f.free(102, &page{id: 39})
        f.release(100)
        f.release(101)
-       if exp := []pgid{9, 12, 13}; !reflect.DeepEqual(exp, f.ids) {
-               t.Fatalf("exp=%v; got=%v", exp, f.ids)
+       if exp := []pgid{9, 12, 13}; !reflect.DeepEqual(exp, 
f.getFreePageIDs()) {
+               t.Fatalf("exp=%v; got=%v", exp, f.getFreePageIDs())
        }
 
        f.release(102)
-       if exp := []pgid{9, 12, 13, 39}; !reflect.DeepEqual(exp, f.ids) {
-               t.Fatalf("exp=%v; got=%v", exp, f.ids)
+       if exp := []pgid{9, 12, 13, 39}; !reflect.DeepEqual(exp, 
f.getFreePageIDs()) {
+               t.Fatalf("exp=%v; got=%v", exp, f.getFreePageIDs())
        }
 }
 
@@ -142,40 +146,74 @@ func TestFreelist_releaseRange(t *testing.T) {
                                {id: 9, n: 2, allocTxn: 175, freeTxn: 200},
                        },
                        releaseRanges: []testRange{{50, 149}, {151, 300}},
-                       wantFree:      []pgid{4, 9},
+                       wantFree:      []pgid{4, 9, 10},
                },
        }
 
        for _, c := range releaseRangeTests {
-               f := newFreelist()
-
+               f := newTestFreelist()
+               var ids []pgid
                for _, p := range c.pagesIn {
                        for i := uint64(0); i < uint64(p.n); i++ {
-                               f.ids = append(f.ids, pgid(uint64(p.id)+i))
+                               ids = append(ids, pgid(uint64(p.id)+i))
                        }
                }
+               f.readIDs(ids)
                for _, p := range c.pagesIn {
                        f.allocate(p.allocTxn, p.n)
                }
 
                for _, p := range c.pagesIn {
-                       f.free(p.freeTxn, &page{id: p.id})
+                       f.free(p.freeTxn, &page{id: p.id, overflow: uint32(p.n 
- 1)})
                }
 
                for _, r := range c.releaseRanges {
                        f.releaseRange(r.begin, r.end)
                }
 
-               if exp := c.wantFree; !reflect.DeepEqual(exp, f.ids) {
-                       t.Errorf("exp=%v; got=%v for %s", exp, f.ids, c.title)
+               if exp := c.wantFree; !reflect.DeepEqual(exp, 
f.getFreePageIDs()) {
+                       t.Errorf("exp=%v; got=%v for %s", exp, 
f.getFreePageIDs(), c.title)
                }
        }
 }
 
+func TestFreelistHashmap_allocate(t *testing.T) {
+       f := newTestFreelist()
+       if f.freelistType != FreelistMapType {
+               t.Skip()
+       }
+
+       ids := []pgid{3, 4, 5, 6, 7, 9, 12, 13, 18}
+       f.readIDs(ids)
+
+       f.allocate(1, 3)
+       if x := f.free_count(); x != 6 {
+               t.Fatalf("exp=6; got=%v", x)
+       }
+
+       f.allocate(1, 2)
+       if x := f.free_count(); x != 4 {
+               t.Fatalf("exp=4; got=%v", x)
+       }
+       f.allocate(1, 1)
+       if x := f.free_count(); x != 3 {
+               t.Fatalf("exp=3; got=%v", x)
+       }
+
+       f.allocate(1, 0)
+       if x := f.free_count(); x != 3 {
+               t.Fatalf("exp=3; got=%v", x)
+       }
+}
+
 // Ensure that a freelist can find contiguous blocks of pages.
-func TestFreelist_allocate(t *testing.T) {
-       f := newFreelist()
-       f.ids = []pgid{3, 4, 5, 6, 7, 9, 12, 13, 18}
+func TestFreelistArray_allocate(t *testing.T) {
+       f := newTestFreelist()
+       if f.freelistType != FreelistArrayType {
+               t.Skip()
+       }
+       ids := []pgid{3, 4, 5, 6, 7, 9, 12, 13, 18}
+       f.readIDs(ids)
        if id := int(f.allocate(1, 3)); id != 3 {
                t.Fatalf("exp=3; got=%v", id)
        }
@@ -197,8 +235,8 @@ func TestFreelist_allocate(t *testing.T) {
        if id := int(f.allocate(1, 0)); id != 0 {
                t.Fatalf("exp=0; got=%v", id)
        }
-       if exp := []pgid{9, 18}; !reflect.DeepEqual(exp, f.ids) {
-               t.Fatalf("exp=%v; got=%v", exp, f.ids)
+       if exp := []pgid{9, 18}; !reflect.DeepEqual(exp, f.getFreePageIDs()) {
+               t.Fatalf("exp=%v; got=%v", exp, f.getFreePageIDs())
        }
 
        if id := int(f.allocate(1, 1)); id != 9 {
@@ -210,8 +248,8 @@ func TestFreelist_allocate(t *testing.T) {
        if id := int(f.allocate(1, 1)); id != 0 {
                t.Fatalf("exp=0; got=%v", id)
        }
-       if exp := []pgid{}; !reflect.DeepEqual(exp, f.ids) {
-               t.Fatalf("exp=%v; got=%v", exp, f.ids)
+       if exp := []pgid{}; !reflect.DeepEqual(exp, f.getFreePageIDs()) {
+               t.Fatalf("exp=%v; got=%v", exp, f.getFreePageIDs())
        }
 }
 
@@ -224,17 +262,17 @@ func TestFreelist_read(t *testing.T) {
        page.count = 2
 
        // Insert 2 page ids.
-       ids := (*[3]pgid)(unsafe.Pointer(&page.ptr))
+       ids := (*[3]pgid)(unsafe.Pointer(uintptr(unsafe.Pointer(page)) + 
unsafe.Sizeof(*page)))
        ids[0] = 23
        ids[1] = 50
 
        // Deserialize page into a freelist.
-       f := newFreelist()
+       f := newTestFreelist()
        f.read(page)
 
        // Ensure that there are two page ids in the freelist.
-       if exp := []pgid{23, 50}; !reflect.DeepEqual(exp, f.ids) {
-               t.Fatalf("exp=%v; got=%v", exp, f.ids)
+       if exp := []pgid{23, 50}; !reflect.DeepEqual(exp, f.getFreePageIDs()) {
+               t.Fatalf("exp=%v; got=%v", exp, f.getFreePageIDs())
        }
 }
 
@@ -242,7 +280,9 @@ func TestFreelist_read(t *testing.T) {
 func TestFreelist_write(t *testing.T) {
        // Create a freelist and write it to a page.
        var buf [4096]byte
-       f := &freelist{ids: []pgid{12, 39}, pending: make(map[txid]*txPending)}
+       f := newTestFreelist()
+
+       f.readIDs([]pgid{12, 39})
        f.pending[100] = &txPending{ids: []pgid{28, 11}}
        f.pending[101] = &txPending{ids: []pgid{3}}
        p := (*page)(unsafe.Pointer(&buf[0]))
@@ -251,13 +291,13 @@ func TestFreelist_write(t *testing.T) {
        }
 
        // Read the page back out.
-       f2 := newFreelist()
+       f2 := newTestFreelist()
        f2.read(p)
 
        // Ensure that the freelist is correct.
        // All pages should be present and in reverse order.
-       if exp := []pgid{3, 11, 12, 28, 39}; !reflect.DeepEqual(exp, f2.ids) {
-               t.Fatalf("exp=%v; got=%v", exp, f2.ids)
+       if exp := []pgid{3, 11, 12, 28, 39}; !reflect.DeepEqual(exp, 
f2.getFreePageIDs()) {
+               t.Fatalf("exp=%v; got=%v", exp, f2.getFreePageIDs())
        }
 }
 
@@ -272,7 +312,9 @@ func benchmark_FreelistRelease(b *testing.B, size int) {
        b.ResetTimer()
        for i := 0; i < b.N; i++ {
                txp := &txPending{ids: pending}
-               f := &freelist{ids: ids, pending: map[txid]*txPending{1: txp}}
+               f := newTestFreelist()
+               f.pending = map[txid]*txPending{1: txp}
+               f.readIDs(ids)
                f.release(1)
        }
 }
@@ -286,3 +328,107 @@ func randomPgids(n int) []pgid {
        sort.Sort(pgids)
        return pgids
 }
+
+func Test_freelist_ReadIDs_and_getFreePageIDs(t *testing.T) {
+       f := newTestFreelist()
+       exp := []pgid{3, 4, 5, 6, 7, 9, 12, 13, 18}
+
+       f.readIDs(exp)
+
+       if got := f.getFreePageIDs(); !reflect.DeepEqual(exp, got) {
+               t.Fatalf("exp=%v; got=%v", exp, got)
+       }
+
+       f2 := newTestFreelist()
+       var exp2 []pgid
+       f2.readIDs(exp2)
+
+       if got2 := f2.getFreePageIDs(); !reflect.DeepEqual(got2, exp2) {
+               t.Fatalf("exp2=%#v; got2=%#v", exp2, got2)
+       }
+
+}
+
+func Test_freelist_mergeWithExist(t *testing.T) {
+       bm1 := pidSet{1: struct{}{}}
+
+       bm2 := pidSet{5: struct{}{}}
+       tests := []struct {
+               name            string
+               ids             []pgid
+               pgid            pgid
+               want            []pgid
+               wantForwardmap  map[pgid]uint64
+               wantBackwardmap map[pgid]uint64
+               wantfreemap     map[uint64]pidSet
+       }{
+               {
+                       name:            "test1",
+                       ids:             []pgid{1, 2, 4, 5, 6},
+                       pgid:            3,
+                       want:            []pgid{1, 2, 3, 4, 5, 6},
+                       wantForwardmap:  map[pgid]uint64{1: 6},
+                       wantBackwardmap: map[pgid]uint64{6: 6},
+                       wantfreemap:     map[uint64]pidSet{6: bm1},
+               },
+               {
+                       name:            "test2",
+                       ids:             []pgid{1, 2, 5, 6},
+                       pgid:            3,
+                       want:            []pgid{1, 2, 3, 5, 6},
+                       wantForwardmap:  map[pgid]uint64{1: 3, 5: 2},
+                       wantBackwardmap: map[pgid]uint64{6: 2, 3: 3},
+                       wantfreemap:     map[uint64]pidSet{3: bm1, 2: bm2},
+               },
+               {
+                       name:            "test3",
+                       ids:             []pgid{1, 2},
+                       pgid:            3,
+                       want:            []pgid{1, 2, 3},
+                       wantForwardmap:  map[pgid]uint64{1: 3},
+                       wantBackwardmap: map[pgid]uint64{3: 3},
+                       wantfreemap:     map[uint64]pidSet{3: bm1},
+               },
+               {
+                       name:            "test4",
+                       ids:             []pgid{2, 3},
+                       pgid:            1,
+                       want:            []pgid{1, 2, 3},
+                       wantForwardmap:  map[pgid]uint64{1: 3},
+                       wantBackwardmap: map[pgid]uint64{3: 3},
+                       wantfreemap:     map[uint64]pidSet{3: bm1},
+               },
+       }
+       for _, tt := range tests {
+               f := newTestFreelist()
+               if f.freelistType == FreelistArrayType {
+                       t.Skip()
+               }
+               f.readIDs(tt.ids)
+
+               f.mergeWithExistingSpan(tt.pgid)
+
+               if got := f.getFreePageIDs(); !reflect.DeepEqual(tt.want, got) {
+                       t.Fatalf("name %s; exp=%v; got=%v", tt.name, tt.want, 
got)
+               }
+               if got := f.forwardMap; !reflect.DeepEqual(tt.wantForwardmap, 
got) {
+                       t.Fatalf("name %s; exp=%v; got=%v", tt.name, 
tt.wantForwardmap, got)
+               }
+               if got := f.backwardMap; !reflect.DeepEqual(tt.wantBackwardmap, 
got) {
+                       t.Fatalf("name %s; exp=%v; got=%v", tt.name, 
tt.wantBackwardmap, got)
+               }
+               if got := f.freemaps; !reflect.DeepEqual(tt.wantfreemap, got) {
+                       t.Fatalf("name %s; exp=%v; got=%v", tt.name, 
tt.wantfreemap, got)
+               }
+       }
+}
+
+// newTestFreelist get the freelist type from env and initial the freelist
+func newTestFreelist() *freelist {
+       freelistType := FreelistArrayType
+       if env := os.Getenv(TestFreelistType); env == string(FreelistMapType) {
+               freelistType = FreelistMapType
+       }
+
+       return newFreelist(freelistType)
+}
diff --git a/grove/vendor/go.etcd.io/bbolt/go.mod 
b/grove/vendor/go.etcd.io/bbolt/go.mod
new file mode 100644
index 0000000..c2366da
--- /dev/null
+++ b/grove/vendor/go.etcd.io/bbolt/go.mod
@@ -0,0 +1,5 @@
+module go.etcd.io/bbolt
+
+go 1.12
+
+require golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5
diff --git a/grove/vendor/go.etcd.io/bbolt/go.sum 
b/grove/vendor/go.etcd.io/bbolt/go.sum
new file mode 100644
index 0000000..4ad15a4
--- /dev/null
+++ b/grove/vendor/go.etcd.io/bbolt/go.sum
@@ -0,0 +1,2 @@
+golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5 
h1:LfCXLvNmTYH9kEmVgqbnsWfruoXZIrh4YBgqVHtDvw0=
+golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod 
h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
diff --git a/grove/vendor/go.etcd.io/bbolt/manydbs_test.go 
b/grove/vendor/go.etcd.io/bbolt/manydbs_test.go
new file mode 100644
index 0000000..fbbe8fe
--- /dev/null
+++ b/grove/vendor/go.etcd.io/bbolt/manydbs_test.go
@@ -0,0 +1,67 @@
+package bbolt
+
+import (
+       "fmt"
+       "io/ioutil"
+       "math/rand"
+       "os"
+       "path/filepath"
+       "testing"
+)
+
+func createDb(t *testing.T) (*DB, func()) {
+       // First, create a temporary directory to be used for the duration of
+       // this test.
+       tempDirName, err := ioutil.TempDir("", "bboltmemtest")
+       if err != nil {
+               t.Fatalf("error creating temp dir: %v", err)
+       }
+       path := filepath.Join(tempDirName, "testdb.db")
+
+       bdb, err := Open(path, 0600, nil)
+       if err != nil {
+               t.Fatalf("error creating bbolt db: %v", err)
+       }
+
+       cleanup := func() {
+               bdb.Close()
+               os.RemoveAll(tempDirName)
+       }
+
+       return bdb, cleanup
+}
+
+func createAndPutKeys(t *testing.T) {
+       t.Parallel()
+
+       db, cleanup := createDb(t)
+       defer cleanup()
+
+       bucketName := []byte("bucket")
+
+       for i := 0; i < 100; i++ {
+               err := db.Update(func(tx *Tx) error {
+                       nodes, err := tx.CreateBucketIfNotExists(bucketName)
+                       if err != nil {
+                               return err
+                       }
+
+                       var key [16]byte
+                       rand.Read(key[:])
+                       if err := nodes.Put(key[:], nil); err != nil {
+                               return err
+                       }
+
+                       return nil
+               })
+               if err != nil {
+                       t.Fatal(err)
+               }
+       }
+}
+
+func TestManyDBs(t *testing.T) {
+       for i := 0; i < 100; i++ {
+               t.Run(fmt.Sprintf("%d", i), createAndPutKeys)
+       }
+}
diff --git a/grove/vendor/github.com/coreos/bbolt/node.go 
b/grove/vendor/go.etcd.io/bbolt/node.go
similarity index 92%
rename from grove/vendor/github.com/coreos/bbolt/node.go
rename to grove/vendor/go.etcd.io/bbolt/node.go
index f4ce240..73988b5 100644
--- a/grove/vendor/github.com/coreos/bbolt/node.go
+++ b/grove/vendor/go.etcd.io/bbolt/node.go
@@ -1,4 +1,4 @@
-package bolt
+package bbolt
 
 import (
        "bytes"
@@ -41,19 +41,19 @@ func (n *node) size() int {
        sz, elsz := pageHeaderSize, n.pageElementSize()
        for i := 0; i < len(n.inodes); i++ {
                item := &n.inodes[i]
-               sz += elsz + len(item.key) + len(item.value)
+               sz += elsz + uintptr(len(item.key)) + uintptr(len(item.value))
        }
-       return sz
+       return int(sz)
 }
 
 // sizeLessThan returns true if the node is less than a given size.
 // This is an optimization to avoid calculating a large node when we only need
 // to know if it fits inside a certain page size.
-func (n *node) sizeLessThan(v int) bool {
+func (n *node) sizeLessThan(v uintptr) bool {
        sz, elsz := pageHeaderSize, n.pageElementSize()
        for i := 0; i < len(n.inodes); i++ {
                item := &n.inodes[i]
-               sz += elsz + len(item.key) + len(item.value)
+               sz += elsz + uintptr(len(item.key)) + uintptr(len(item.value))
                if sz >= v {
                        return false
                }
@@ -62,7 +62,7 @@ func (n *node) sizeLessThan(v int) bool {
 }
 
 // pageElementSize returns the size of each page element based on the type of 
node.
-func (n *node) pageElementSize() int {
+func (n *node) pageElementSize() uintptr {
        if n.isLeaf {
                return leafPageElementSize
        }
@@ -207,10 +207,17 @@ func (n *node) write(p *page) {
        }
 
        // Loop over each item and write it to the page.
-       b := 
(*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):]
+       // off tracks the offset into p of the start of the next data.
+       off := unsafe.Sizeof(*p) + n.pageElementSize()*uintptr(len(n.inodes))
        for i, item := range n.inodes {
                _assert(len(item.key) > 0, "write: zero-length inode key")
 
+               // Create a slice to write into of needed size and advance
+               // byte pointer for next iteration.
+               sz := len(item.key) + len(item.value)
+               b := unsafeByteSlice(unsafe.Pointer(p), off, 0, sz)
+               off += uintptr(sz)
+
                // Write the page element.
                if n.isLeaf {
                        elem := p.leafPageElement(uint16(i))
@@ -226,20 +233,9 @@ func (n *node) write(p *page) {
                        _assert(elem.pgid != p.id, "write: circular dependency 
occurred")
                }
 
-               // If the length of key+value is larger than the max allocation 
size
-               // then we need to reallocate the byte array pointer.
-               //
-               // See: https://github.com/boltdb/bolt/pull/335
-               klen, vlen := len(item.key), len(item.value)
-               if len(b) < klen+vlen {
-                       b = (*[maxAllocSize]byte)(unsafe.Pointer(&b[0]))[:]
-               }
-
                // Write data for the element to the end of the page.
-               copy(b[0:], item.key)
-               b = b[klen:]
-               copy(b[0:], item.value)
-               b = b[vlen:]
+               l := copy(b, item.key)
+               copy(b[l:], item.value)
        }
 
        // DEBUG ONLY: n.dump()
@@ -247,7 +243,7 @@ func (n *node) write(p *page) {
 
 // split breaks up a node into multiple smaller nodes, if appropriate.
 // This should only be called from the spill() function.
-func (n *node) split(pageSize int) []*node {
+func (n *node) split(pageSize uintptr) []*node {
        var nodes []*node
 
        node := n
@@ -270,7 +266,7 @@ func (n *node) split(pageSize int) []*node {
 
 // splitTwo breaks up a node into two smaller nodes, if appropriate.
 // This should only be called from the split() function.
-func (n *node) splitTwo(pageSize int) (*node, *node) {
+func (n *node) splitTwo(pageSize uintptr) (*node, *node) {
        // Ignore the split if the page doesn't have at least enough nodes for
        // two pages or if the nodes can fit in a single page.
        if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) {
@@ -312,18 +308,18 @@ func (n *node) splitTwo(pageSize int) (*node, *node) {
 // splitIndex finds the position where a page will fill a given threshold.
 // It returns the index as well as the size of the first page.
 // This is only be called from split().
-func (n *node) splitIndex(threshold int) (index, sz int) {
+func (n *node) splitIndex(threshold int) (index, sz uintptr) {
        sz = pageHeaderSize
 
        // Loop until we only have the minimum number of keys required for the 
second page.
        for i := 0; i < len(n.inodes)-minKeysPerPage; i++ {
-               index = i
+               index = uintptr(i)
                inode := n.inodes[i]
-               elsize := n.pageElementSize() + len(inode.key) + 
len(inode.value)
+               elsize := n.pageElementSize() + uintptr(len(inode.key)) + 
uintptr(len(inode.value))
 
                // If we have at least the minimum number of keys and adding 
another
                // node would put us over the threshold then exit and return.
-               if i >= minKeysPerPage && sz+elsize > threshold {
+               if index >= minKeysPerPage && sz+elsize > uintptr(threshold) {
                        break
                }
 
@@ -356,7 +352,7 @@ func (n *node) spill() error {
        n.children = nil
 
        // Split nodes into appropriate sizes. The first node will always be n.
-       var nodes = n.split(tx.db.pageSize)
+       var nodes = n.split(uintptr(tx.db.pageSize))
        for _, node := range nodes {
                // Add node's page to the freelist if it's not new.
                if node.pgid > 0 {
@@ -587,9 +583,11 @@ func (n *node) dump() {
 
 type nodes []*node
 
-func (s nodes) Len() int           { return len(s) }
-func (s nodes) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
-func (s nodes) Less(i, j int) bool { return bytes.Compare(s[i].inodes[0].key, 
s[j].inodes[0].key) == -1 }
+func (s nodes) Len() int      { return len(s) }
+func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s nodes) Less(i, j int) bool {
+       return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1
+}
 
 // inode represents an internal node inside of a node.
 // It can be used to point to elements in a page or point
diff --git a/grove/vendor/github.com/coreos/bbolt/node_test.go 
b/grove/vendor/go.etcd.io/bbolt/node_test.go
similarity index 95%
rename from grove/vendor/github.com/coreos/bbolt/node_test.go
rename to grove/vendor/go.etcd.io/bbolt/node_test.go
index fa5d10f..eea4d25 100644
--- a/grove/vendor/github.com/coreos/bbolt/node_test.go
+++ b/grove/vendor/go.etcd.io/bbolt/node_test.go
@@ -1,4 +1,4 @@
-package bolt
+package bbolt
 
 import (
        "testing"
@@ -39,14 +39,14 @@ func TestNode_read_LeafPage(t *testing.T) {
        page.count = 2
 
        // Insert 2 elements at the beginning. sizeof(leafPageElement) == 16
-       nodes := (*[3]leafPageElement)(unsafe.Pointer(&page.ptr))
+       nodes := 
(*[3]leafPageElement)(unsafe.Pointer(uintptr(unsafe.Pointer(page)) + 
unsafe.Sizeof(*page)))
        nodes[0] = leafPageElement{flags: 0, pos: 32, ksize: 3, vsize: 4}  // 
pos = sizeof(leafPageElement) * 2
        nodes[1] = leafPageElement{flags: 0, pos: 23, ksize: 10, vsize: 3} // 
pos = sizeof(leafPageElement) + 3 + 4
 
        // Write data for the nodes at the end.
-       data := (*[4096]byte)(unsafe.Pointer(&nodes[2]))
-       copy(data[:], []byte("barfooz"))
-       copy(data[7:], []byte("helloworldbye"))
+       const s = "barfoozhelloworldbye"
+       data := unsafeByteSlice(unsafe.Pointer(&nodes[2]), 0, 0, len(s))
+       copy(data, s)
 
        // Deserialize page into a leaf.
        n := &node{}
diff --git a/grove/vendor/github.com/coreos/bbolt/page.go 
b/grove/vendor/go.etcd.io/bbolt/page.go
similarity index 77%
rename from grove/vendor/github.com/coreos/bbolt/page.go
rename to grove/vendor/go.etcd.io/bbolt/page.go
index cde403a..c9a158f 100644
--- a/grove/vendor/github.com/coreos/bbolt/page.go
+++ b/grove/vendor/go.etcd.io/bbolt/page.go
@@ -1,4 +1,4 @@
-package bolt
+package bbolt
 
 import (
        "fmt"
@@ -7,12 +7,12 @@ import (
        "unsafe"
 )
 
-const pageHeaderSize = int(unsafe.Offsetof(((*page)(nil)).ptr))
+const pageHeaderSize = unsafe.Sizeof(page{})
 
 const minKeysPerPage = 2
 
-const branchPageElementSize = int(unsafe.Sizeof(branchPageElement{}))
-const leafPageElementSize = int(unsafe.Sizeof(leafPageElement{}))
+const branchPageElementSize = unsafe.Sizeof(branchPageElement{})
+const leafPageElementSize = unsafe.Sizeof(leafPageElement{})
 
 const (
        branchPageFlag   = 0x01
@@ -32,7 +32,6 @@ type page struct {
        flags    uint16
        count    uint16
        overflow uint32
-       ptr      uintptr
 }
 
 // typ returns a human readable page type string used for debugging.
@@ -51,13 +50,13 @@ func (p *page) typ() string {
 
 // meta returns a pointer to the metadata section of the page.
 func (p *page) meta() *meta {
-       return (*meta)(unsafe.Pointer(&p.ptr))
+       return (*meta)(unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)))
 }
 
 // leafPageElement retrieves the leaf node by index
 func (p *page) leafPageElement(index uint16) *leafPageElement {
-       n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index]
-       return n
+       return (*leafPageElement)(unsafeIndex(unsafe.Pointer(p), 
unsafe.Sizeof(*p),
+               leafPageElementSize, int(index)))
 }
 
 // leafPageElements retrieves a list of leaf nodes.
@@ -65,12 +64,16 @@ func (p *page) leafPageElements() []leafPageElement {
        if p.count == 0 {
                return nil
        }
-       return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:]
+       var elems []leafPageElement
+       data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
+       unsafeSlice(unsafe.Pointer(&elems), data, int(p.count))
+       return elems
 }
 
 // branchPageElement retrieves the branch node by index
 func (p *page) branchPageElement(index uint16) *branchPageElement {
-       return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index]
+       return (*branchPageElement)(unsafeIndex(unsafe.Pointer(p), 
unsafe.Sizeof(*p),
+               unsafe.Sizeof(branchPageElement{}), int(index)))
 }
 
 // branchPageElements retrieves a list of branch nodes.
@@ -78,12 +81,15 @@ func (p *page) branchPageElements() []branchPageElement {
        if p.count == 0 {
                return nil
        }
-       return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:]
+       var elems []branchPageElement
+       data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
+       unsafeSlice(unsafe.Pointer(&elems), data, int(p.count))
+       return elems
 }
 
 // dump writes n bytes of the page to STDERR as hex output.
 func (p *page) hexdump(n int) {
-       buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:n]
+       buf := unsafeByteSlice(unsafe.Pointer(p), 0, 0, n)
        fmt.Fprintf(os.Stderr, "%x\n", buf)
 }
 
@@ -102,8 +108,7 @@ type branchPageElement struct {
 
 // key returns a byte slice of the node key.
 func (n *branchPageElement) key() []byte {
-       buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
-       return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize]
+       return unsafeByteSlice(unsafe.Pointer(n), 0, int(n.pos), 
int(n.pos)+int(n.ksize))
 }
 
 // leafPageElement represents a node on a leaf page.
@@ -116,14 +121,16 @@ type leafPageElement struct {
 
 // key returns a byte slice of the node key.
 func (n *leafPageElement) key() []byte {
-       buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
-       return 
(*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize:n.ksize]
+       i := int(n.pos)
+       j := i + int(n.ksize)
+       return unsafeByteSlice(unsafe.Pointer(n), 0, i, j)
 }
 
 // value returns a byte slice of the node value.
 func (n *leafPageElement) value() []byte {
-       buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
-       return 
(*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize:n.vsize]
+       i := int(n.pos) + int(n.ksize)
+       j := i + int(n.vsize)
+       return unsafeByteSlice(unsafe.Pointer(n), 0, i, j)
 }
 
 // PageInfo represents human readable information about a page.
diff --git a/grove/vendor/github.com/coreos/bbolt/page_test.go 
b/grove/vendor/go.etcd.io/bbolt/page_test.go
similarity index 99%
rename from grove/vendor/github.com/coreos/bbolt/page_test.go
rename to grove/vendor/go.etcd.io/bbolt/page_test.go
index 59f4a30..9f5b7c0 100644
--- a/grove/vendor/github.com/coreos/bbolt/page_test.go
+++ b/grove/vendor/go.etcd.io/bbolt/page_test.go
@@ -1,4 +1,4 @@
-package bolt
+package bbolt
 
 import (
        "reflect"
diff --git a/grove/vendor/github.com/coreos/bbolt/quick_test.go 
b/grove/vendor/go.etcd.io/bbolt/quick_test.go
similarity index 97%
rename from grove/vendor/github.com/coreos/bbolt/quick_test.go
rename to grove/vendor/go.etcd.io/bbolt/quick_test.go
index 9e27792..da8b2e3 100644
--- a/grove/vendor/github.com/coreos/bbolt/quick_test.go
+++ b/grove/vendor/go.etcd.io/bbolt/quick_test.go
@@ -1,4 +1,4 @@
-package bolt_test
+package bbolt_test
 
 import (
        "bytes"
@@ -7,6 +7,7 @@ import (
        "math/rand"
        "os"
        "reflect"
+       "testing"
        "testing/quick"
        "time"
 )
@@ -23,7 +24,7 @@ import (
 
 var qcount, qseed, qmaxitems, qmaxksize, qmaxvsize int
 
-func init() {
+func TestMain(m *testing.M) {
        flag.IntVar(&qcount, "quick.count", 5, "")
        flag.IntVar(&qseed, "quick.seed", int(time.Now().UnixNano())%100000, "")
        flag.IntVar(&qmaxitems, "quick.maxitems", 1000, "")
@@ -32,6 +33,8 @@ func init() {
        flag.Parse()
        fmt.Fprintln(os.Stderr, "seed:", qseed)
        fmt.Fprintf(os.Stderr, "quick settings: count=%v, items=%v, ksize=%v, 
vsize=%v\n", qcount, qmaxitems, qmaxksize, qmaxvsize)
+
+       m.Run()
 }
 
 func qconfig() *quick.Config {
diff --git 
a/grove/vendor/github.com/coreos/bbolt/simulation_no_freelist_sync_test.go 
b/grove/vendor/go.etcd.io/bbolt/simulation_no_freelist_sync_test.go
similarity index 97%
rename from 
grove/vendor/github.com/coreos/bbolt/simulation_no_freelist_sync_test.go
rename to grove/vendor/go.etcd.io/bbolt/simulation_no_freelist_sync_test.go
index da2031e..25c3dfb 100644
--- a/grove/vendor/github.com/coreos/bbolt/simulation_no_freelist_sync_test.go
+++ b/grove/vendor/go.etcd.io/bbolt/simulation_no_freelist_sync_test.go
@@ -1,9 +1,9 @@
-package bolt_test
+package bbolt_test
 
 import (
        "testing"
 
-       "github.com/coreos/bbolt"
+       bolt "go.etcd.io/bbolt"
 )
 
 func TestSimulateNoFreeListSync_1op_1p(t *testing.T) {
diff --git a/grove/vendor/github.com/coreos/bbolt/simulation_test.go 
b/grove/vendor/go.etcd.io/bbolt/simulation_test.go
similarity index 89%
rename from grove/vendor/github.com/coreos/bbolt/simulation_test.go
rename to grove/vendor/go.etcd.io/bbolt/simulation_test.go
index a5889c0..a96a241 100644
--- a/grove/vendor/github.com/coreos/bbolt/simulation_test.go
+++ b/grove/vendor/go.etcd.io/bbolt/simulation_test.go
@@ -1,13 +1,14 @@
-package bolt_test
+package bbolt_test
 
 import (
        "bytes"
        "fmt"
        "math/rand"
        "sync"
+       "sync/atomic"
        "testing"
 
-       "github.com/coreos/bbolt"
+       bolt "go.etcd.io/bbolt"
 )
 
 func TestSimulate_1op_1p(t *testing.T)     { testSimulate(t, nil, 1, 1, 1) }
@@ -47,15 +48,28 @@ func testSimulate(t *testing.T, openOption *bolt.Options, 
round, threadCount, pa
 
        var mutex sync.Mutex
 
-       // Run n threads in parallel, each with their own operation.
-       var wg sync.WaitGroup
-
        for n := 0; n < round; n++ {
-
+               // Run n threads in parallel, each with their own operation.
                var threads = make(chan bool, parallelism)
+               var wg sync.WaitGroup
+
+               // counter for how many goroutines were fired
+               var opCount int64
+
+               // counter for ignored operations
+               var igCount int64
+
+               var errCh = make(chan error, threadCount)
+
                var i int
                for {
+                       // this buffered channel will keep accepting booleans
+                       // until it hits the limit defined by the parallelism
+                       // argument to testSimulate()
                        threads <- true
+
+                       // this wait group can only be marked "done" from inside
+                       // the subsequent goroutine
                        wg.Add(1)
                        writable := ((rand.Int() % 100) < 20) // 20% writers
 
@@ -70,11 +84,12 @@ func testSimulate(t *testing.T, openOption *bolt.Options, 
round, threadCount, pa
                        // Execute a thread for the given operation.
                        go func(writable bool, handler simulateHandler) {
                                defer wg.Done()
-
+                               atomic.AddInt64(&opCount, 1)
                                // Start transaction.
                                tx, err := db.Begin(writable)
                                if err != nil {
-                                       t.Fatal("tx begin: ", err)
+                                       errCh <- fmt.Errorf("error tx begin: 
%v", err)
+                                       return
                                }
 
                                // Obtain current state of the dataset.
@@ -93,7 +108,8 @@ func testSimulate(t *testing.T, openOption *bolt.Options, 
round, threadCount, pa
                                                mutex.Unlock()
 
                                                if err := tx.Commit(); err != 
nil {
-                                                       t.Fatal(err)
+                                                       errCh <- err
+                                                       return
                                                }
                                        }()
                                } else {
@@ -102,6 +118,7 @@ func testSimulate(t *testing.T, openOption *bolt.Options, 
round, threadCount, pa
 
                                // Ignore operation if we don't have data yet.
                                if qdb == nil {
+                                       atomic.AddInt64(&igCount, 1)
                                        return
                                }
 
@@ -113,17 +130,25 @@ func testSimulate(t *testing.T, openOption *bolt.Options, 
round, threadCount, pa
                        }(writable, handler)
 
                        i++
-                       if i > threadCount {
+                       if i >= threadCount {
                                break
                        }
                }
 
                // Wait until all threads are done.
                wg.Wait()
+               t.Logf("transactions:%d ignored:%d", opCount, igCount)
+               close(errCh)
+               for err := range errCh {
+                       if err != nil {
+                               t.Fatalf("error from inside goroutine: %v", err)
+                       }
+               }
 
                db.MustClose()
                db.MustReopen()
        }
+
 }
 
 type simulateHandler func(tx *bolt.Tx, qdb *QuickDB)
diff --git a/grove/vendor/github.com/coreos/bbolt/tx.go 
b/grove/vendor/go.etcd.io/bbolt/tx.go
similarity index 93%
rename from grove/vendor/github.com/coreos/bbolt/tx.go
rename to grove/vendor/go.etcd.io/bbolt/tx.go
index 5c02907..4b1a64a 100644
--- a/grove/vendor/github.com/coreos/bbolt/tx.go
+++ b/grove/vendor/go.etcd.io/bbolt/tx.go
@@ -1,4 +1,4 @@
-package bolt
+package bbolt
 
 import (
        "fmt"
@@ -254,17 +254,36 @@ func (tx *Tx) Rollback() error {
        if tx.db == nil {
                return ErrTxClosed
        }
-       tx.rollback()
+       tx.nonPhysicalRollback()
        return nil
 }
 
+// nonPhysicalRollback is called when user calls Rollback directly, in this 
case we do not need to reload the free pages from disk.
+func (tx *Tx) nonPhysicalRollback() {
+       if tx.db == nil {
+               return
+       }
+       if tx.writable {
+               tx.db.freelist.rollback(tx.meta.txid)
+       }
+       tx.close()
+}
+
+// rollback needs to reload the free pages from disk in case some system error 
happens like fsync error.
 func (tx *Tx) rollback() {
        if tx.db == nil {
                return
        }
        if tx.writable {
                tx.db.freelist.rollback(tx.meta.txid)
-               tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist))
+               if !tx.db.hasSyncedFreelist() {
+                       // Reconstruct free page list by scanning the DB to get 
the whole free page list.
+                       // Note: scaning the whole db is heavy if your db size 
is large in NoSyncFreeList mode.
+                       tx.db.freelist.noSyncReload(tx.db.freepages())
+               } else {
+                       // Read free page list from freelist page.
+                       tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist))
+               }
        }
        tx.close()
 }
@@ -303,7 +322,9 @@ func (tx *Tx) close() {
 }
 
 // Copy writes the entire database to a writer.
-// This function exists for backwards compatibility. Use WriteTo() instead.
+// This function exists for backwards compatibility.
+//
+// Deprecated; Use WriteTo() instead.
 func (tx *Tx) Copy(w io.Writer) error {
        _, err := tx.WriteTo(w)
        return err
@@ -313,7 +334,7 @@ func (tx *Tx) Copy(w io.Writer) error {
 // If err == nil then exactly tx.Size() bytes will be written into the writer.
 func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
        // Attempt to open reader with WriteFlag
-       f, err := os.OpenFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0)
+       f, err := tx.db.openFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0)
        if err != nil {
                return 0, err
        }
@@ -367,7 +388,7 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
 // A reader transaction is maintained during the copy so it is safe to continue
 // using the database while a copy is in progress.
 func (tx *Tx) CopyFile(path string, mode os.FileMode) error {
-       f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode)
+       f, err := tx.db.openFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode)
        if err != nil {
                return err
        }
@@ -502,20 +523,18 @@ func (tx *Tx) write() error {
 
        // Write pages to disk in order.
        for _, p := range pages {
-               size := (int(p.overflow) + 1) * tx.db.pageSize
+               rem := (uint64(p.overflow) + 1) * uint64(tx.db.pageSize)
                offset := int64(p.id) * int64(tx.db.pageSize)
+               var written uintptr
 
                // Write out page in "max allocation" sized chunks.
-               ptr := (*[maxAllocSize]byte)(unsafe.Pointer(p))
                for {
-                       // Limit our write to our max allocation size.
-                       sz := size
+                       sz := rem
                        if sz > maxAllocSize-1 {
                                sz = maxAllocSize - 1
                        }
+                       buf := unsafeByteSlice(unsafe.Pointer(p), written, 0, 
int(sz))
 
-                       // Write chunk to disk.
-                       buf := ptr[:sz]
                        if _, err := tx.db.ops.writeAt(buf, offset); err != nil 
{
                                return err
                        }
@@ -524,14 +543,14 @@ func (tx *Tx) write() error {
                        tx.stats.Write++
 
                        // Exit inner for loop if we've written all the chunks.
-                       size -= sz
-                       if size == 0 {
+                       rem -= sz
+                       if rem == 0 {
                                break
                        }
 
                        // Otherwise move offset forward and move pointer to 
next chunk.
                        offset += int64(sz)
-                       ptr = (*[maxAllocSize]byte)(unsafe.Pointer(&ptr[sz]))
+                       written += uintptr(sz)
                }
        }
 
@@ -550,7 +569,7 @@ func (tx *Tx) write() error {
                        continue
                }
 
-               buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:tx.db.pageSize]
+               buf := unsafeByteSlice(unsafe.Pointer(p), 0, 0, tx.db.pageSize)
 
                // See 
https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1
                for i := range buf {
diff --git a/grove/vendor/github.com/coreos/bbolt/tx_test.go 
b/grove/vendor/go.etcd.io/bbolt/tx_test.go
similarity index 93%
rename from grove/vendor/github.com/coreos/bbolt/tx_test.go
rename to grove/vendor/go.etcd.io/bbolt/tx_test.go
index de92cb5..38a25c6 100644
--- a/grove/vendor/github.com/coreos/bbolt/tx_test.go
+++ b/grove/vendor/go.etcd.io/bbolt/tx_test.go
@@ -1,4 +1,4 @@
-package bolt_test
+package bbolt_test
 
 import (
        "bytes"
@@ -8,7 +8,7 @@ import (
        "os"
        "testing"
 
-       "github.com/coreos/bbolt"
+       bolt "go.etcd.io/bbolt"
 )
 
 // TestTx_Check_ReadOnly tests consistency checking on a ReadOnly database.
@@ -57,6 +57,8 @@ func TestTx_Check_ReadOnly(t *testing.T) {
                        t.Fatal(err)
                }
        }
+       // Close the view transaction
+       tx.Rollback()
 }
 
 // Ensure that committing a closed transaction returns an error.
@@ -110,6 +112,8 @@ func TestTx_Commit_ErrTxNotWritable(t *testing.T) {
        if err := tx.Commit(); err != bolt.ErrTxNotWritable {
                t.Fatal(err)
        }
+       // Close the view transaction
+       tx.Rollback()
 }
 
 // Ensure that a transaction can retrieve a cursor on the root bucket.
@@ -650,6 +654,57 @@ func TestTx_CopyFile_Error_Normal(t *testing.T) {
        }
 }
 
+// TestTx_Rollback ensures there is no error when tx rollback whether we sync 
freelist or not.
+func TestTx_Rollback(t *testing.T) {
+       for _, isSyncFreelist := range []bool{false, true} {
+               // Open the database.
+               db, err := bolt.Open(tempfile(), 0666, nil)
+               if err != nil {
+                       log.Fatal(err)
+               }
+               defer os.Remove(db.Path())
+               db.NoFreelistSync = isSyncFreelist
+
+               tx, err := db.Begin(true)
+               if err != nil {
+                       t.Fatalf("Error starting tx: %v", err)
+               }
+               bucket := []byte("mybucket")
+               if _, err := tx.CreateBucket(bucket); err != nil {
+                       t.Fatalf("Error creating bucket: %v", err)
+               }
+               if err := tx.Commit(); err != nil {
+                       t.Fatalf("Error on commit: %v", err)
+               }
+
+               tx, err = db.Begin(true)
+               if err != nil {
+                       t.Fatalf("Error starting tx: %v", err)
+               }
+               b := tx.Bucket(bucket)
+               if err := b.Put([]byte("k"), []byte("v")); err != nil {
+                       t.Fatalf("Error on put: %v", err)
+               }
+               // Imagine there is an error and tx needs to be rolled-back
+               if err := tx.Rollback(); err != nil {
+                       t.Fatalf("Error on rollback: %v", err)
+               }
+
+               tx, err = db.Begin(false)
+               if err != nil {
+                       t.Fatalf("Error starting tx: %v", err)
+               }
+               b = tx.Bucket(bucket)
+               if v := b.Get([]byte("k")); v != nil {
+                       t.Fatalf("Value for k should not have been stored")
+               }
+               if err := tx.Rollback(); err != nil {
+                       t.Fatalf("Error on rollback: %v", err)
+               }
+
+       }
+}
+
 // TestTx_releaseRange ensures db.freePages handles page releases
 // correctly when there are transaction that are no longer reachable
 // via any read/write transactions and are "between" ongoing read
diff --git a/grove/vendor/go.etcd.io/bbolt/unsafe.go 
b/grove/vendor/go.etcd.io/bbolt/unsafe.go
new file mode 100644
index 0000000..c0e5037
--- /dev/null
+++ b/grove/vendor/go.etcd.io/bbolt/unsafe.go
@@ -0,0 +1,39 @@
+package bbolt
+
+import (
+       "reflect"
+       "unsafe"
+)
+
+func unsafeAdd(base unsafe.Pointer, offset uintptr) unsafe.Pointer {
+       return unsafe.Pointer(uintptr(base) + offset)
+}
+
+func unsafeIndex(base unsafe.Pointer, offset uintptr, elemsz uintptr, n int) 
unsafe.Pointer {
+       return unsafe.Pointer(uintptr(base) + offset + uintptr(n)*elemsz)
+}
+
+func unsafeByteSlice(base unsafe.Pointer, offset uintptr, i, j int) []byte {
+       // See: 
https://github.com/golang/go/wiki/cgo#turning-c-arrays-into-go-slices
+       //
+       // This memory is not allocated from C, but it is unmanaged by Go's
+       // garbage collector and should behave similarly, and the compiler
+       // should produce similar code.  Note that this conversion allows a
+       // subslice to begin after the base address, with an optional offset,
+       // while the URL above does not cover this case and only slices from
+       // index 0.  However, the wiki never says that the address must be to
+       // the beginning of a C allocation (or even that malloc was used at
+       // all), so this is believed to be correct.
+       return (*[maxAllocSize]byte)(unsafeAdd(base, offset))[i:j:j]
+}
+
+// unsafeSlice modifies the data, len, and cap of a slice variable pointed to 
by
+// the slice parameter.  This helper should be used over other direct
+// manipulation of reflect.SliceHeader to prevent misuse, namely, converting
+// from reflect.SliceHeader to a Go slice type.
+func unsafeSlice(slice, data unsafe.Pointer, len int) {
+       s := (*reflect.SliceHeader)(slice)
+       s.Data = uintptr(data)
+       s.Cap = len
+       s.Len = len
+}

Reply via email to