Merge pull request #1065 from vmarkovtsev/fix-unicode

Remove Unicode normalization in difftree
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 92b7b8c..bdb5f73 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -21,7 +21,8 @@
 
 The official support channels, for both users and contributors, are:
 
-- GitHub [issues](https://github.com/src-d/go-git/issues)*
+- [StackOverflow go-git tag](https://stackoverflow.com/questions/tagged/go-git) for user questions.
+- GitHub [Issues](https://github.com/src-d/go-git/issues)* for bug reports and feature requests.
 - Slack: #go-git room in the [source{d} Slack](https://join.slack.com/t/sourced-community/shared_invite/enQtMjc4Njk5MzEyNzM2LTFjNzY4NjEwZGEwMzRiNTM4MzRlMzQ4MmIzZjkwZmZlM2NjODUxZmJjNDI1OTcxNDAyMmZlNmFjODZlNTg0YWM)
 
 *Before opening a new issue or submitting a new pull request, it's helpful to
diff --git a/LICENSE b/LICENSE
index 6d972e2..8aa3d85 100644
--- a/LICENSE
+++ b/LICENSE
@@ -186,7 +186,7 @@
       same "printed page" as the copyright notice for easier
       identification within third-party archives.
 
-   Copyright 2017 Sourced Technologies, S.L.
+   Copyright 2018 Sourced Technologies, S.L.
 
    Licensed under the Apache License, Version 2.0 (the "License");
    you may not use this file except in compliance with the License.
@@ -198,4 +198,4 @@
    distributed under the License is distributed on an "AS IS" BASIS,
    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    See the License for the specific language governing permissions and
-   limitations under the License.
\ No newline at end of file
+   limitations under the License.
diff --git a/README.md b/README.md
index 8cdfef8..ed9306c 100644
--- a/README.md
+++ b/README.md
@@ -3,16 +3,16 @@
 
 *go-git* is a highly extensible git implementation library written in **pure Go**.
 
-It can be used to manipulate git repositories at low level *(plumbing)* or high level *(porcelain)*, through an idiomatic Go API. It also supports several type of storage, such as in-memory filesystems, or custom implementations thanks to the [`Storer`](https://godoc.org/gopkg.in/src-d/go-git.v4/plumbing/storer) interface.
+It can be used to manipulate git repositories at low level *(plumbing)* or high level *(porcelain)*, through an idiomatic Go API. It also supports several types of storage, such as in-memory filesystems, or custom implementations thanks to the [`Storer`](https://godoc.org/gopkg.in/src-d/go-git.v4/plumbing/storer) interface.
 
-It's being actively develop since 2015 and is being use extensively by [source{d}](https://sourced.tech/) and [Keybase](https://keybase.io/blog/encrypted-git-for-everyone), and by many other libraries and tools.
+It's being actively developed since 2015 and is being used extensively by [source{d}](https://sourced.tech/) and [Keybase](https://keybase.io/blog/encrypted-git-for-everyone), and by many other libraries and tools.
 
 Comparison with git
 -------------------
 
 *go-git* aims to be fully compatible with [git](https://github.com/git/git), all the *porcelain* operations are implemented to work exactly as *git* does.
 
-*git* is a humongous project with years of development by thousands of contributors, making it challenging for *go-git* implement all the features. You can find a comparison of *go-git* vs *git* in the [compatibility documentation](COMPATIBILITY.md).
+*git* is a humongous project with years of development by thousands of contributors, making it challenging for *go-git* to implement all the features. You can find a comparison of *go-git* vs *git* in the [compatibility documentation](COMPATIBILITY.md).
 
 
 Installation
@@ -24,12 +24,12 @@
 go get -u gopkg.in/src-d/go-git.v4/...
 ```
 
-> We use [gopkg.in](http://labix.org/gopkg.in) for having a versioned API, this means that when `go get` clones the package, is the latest tag matching `v4.*` cloned and not the master branch.
+> We use [gopkg.in](http://labix.org/gopkg.in) to version the API, this means that when `go get` clones the package, it's the latest tag matching `v4.*` that is cloned and not the master branch.
 
 Examples
 --------
 
-> Please note that the functions `CheckIfError` and `Info` used in the examples are from the [examples package](https://github.com/src-d/go-git/blob/master/_examples/common.go#L17) just to be used in the examples.
+> Please note that the `CheckIfError` and `Info` functions  used in the examples are from the [examples package](https://github.com/src-d/go-git/blob/master/_examples/common.go#L17) just to be used in the examples.
 
 
 ### Basic example
@@ -71,7 +71,7 @@
 
 CheckIfError(err)
 
-// Gets the HEAD history from HEAD, just like does:
+// Gets the HEAD history from HEAD, just like this command:
 Info("git log")
 
 // ... retrieves the branch pointed by HEAD
@@ -110,7 +110,7 @@
 ...
 ```
 
-You can find this [example](_examples/log/main.go) and many others at the [examples](_examples) folder
+You can find this [example](_examples/log/main.go) and many others in the [examples](_examples) folder.
 
 Contribute
 ----------
diff --git a/_examples/README.md b/_examples/README.md
index 26639b1..cf9c2d3 100644
--- a/_examples/README.md
+++ b/_examples/README.md
@@ -6,6 +6,10 @@
 - [showcase](showcase/main.go) - A small showcase of the capabilities of _go-git_
 - [open](open/main.go) - Opening a existing repository cloned by _git_
 - [clone](clone/main.go) - Cloning a repository
+    - [username and password](clone/auth/basic/username_password/main.go) - Cloning a repository
+      using a username and password
+    - [personal access token](clone/auth/basic/access_token/main.go) - Cloning
+      a repository using a GitHub personal access token
 - [commit](commit/main.go) - Commit changes to the current branch to an existent repository
 - [push](push/main.go) - Push repository to default remote (origin)
 - [pull](pull/main.go) - Pull changes from a remote repository
diff --git a/_examples/checkout/main.go b/_examples/checkout/main.go
index 2c54550..5969eb4 100644
--- a/_examples/checkout/main.go
+++ b/_examples/checkout/main.go
@@ -38,8 +38,8 @@
 	})
 	CheckIfError(err)
 
-	// ... retrieving the commit being pointed by HEAD, it's shows that the
-	// repository is poiting to the giving commit in detached mode
+	// ... retrieving the commit being pointed by HEAD, it shows that the
+	// repository is pointing to the giving commit in detached mode
 	Info("git show-ref --head HEAD")
 	ref, err = r.Head()
 	CheckIfError(err)
diff --git a/_examples/clone/auth/basic/access_token/main.go b/_examples/clone/auth/basic/access_token/main.go
new file mode 100644
index 0000000..7f6d121
--- /dev/null
+++ b/_examples/clone/auth/basic/access_token/main.go
@@ -0,0 +1,40 @@
+package main
+
+import (
+	"fmt"
+	"os"
+
+	git "gopkg.in/src-d/go-git.v4"
+	. "gopkg.in/src-d/go-git.v4/_examples"
+	"gopkg.in/src-d/go-git.v4/plumbing/transport/http"
+)
+
+func main() {
+	CheckArgs("<url>", "<directory>", "<github_access_token>")
+	url, directory, token := os.Args[1], os.Args[2], os.Args[3]
+
+	// Clone the given repository to the given directory
+	Info("git clone %s %s", url, directory)
+
+	r, err := git.PlainClone(directory, false, &git.CloneOptions{
+		// The intended use of a GitHub personal access token is in replace of your password
+		// because access tokens can easily be revoked.
+		// https://help.github.com/articles/creating-a-personal-access-token-for-the-command-line/
+		Auth: &http.BasicAuth{
+			Username: "abc123", // yes, this can be anything except an empty string
+			Password: token,
+		},
+		URL:      url,
+		Progress: os.Stdout,
+	})
+	CheckIfError(err)
+
+	// ... retrieving the branch being pointed by HEAD
+	ref, err := r.Head()
+	CheckIfError(err)
+	// ... retrieving the commit object
+	commit, err := r.CommitObject(ref.Hash())
+	CheckIfError(err)
+
+	fmt.Println(commit)
+}
diff --git a/_examples/clone/auth/basic/username_password/main.go b/_examples/clone/auth/basic/username_password/main.go
new file mode 100644
index 0000000..754558c
--- /dev/null
+++ b/_examples/clone/auth/basic/username_password/main.go
@@ -0,0 +1,37 @@
+package main
+
+import (
+	"fmt"
+	"os"
+
+	git "gopkg.in/src-d/go-git.v4"
+	. "gopkg.in/src-d/go-git.v4/_examples"
+	"gopkg.in/src-d/go-git.v4/plumbing/transport/http"
+)
+
+func main() {
+	CheckArgs("<url>", "<directory>", "<github_username>", "<github_password>")
+	url, directory, username, password := os.Args[1], os.Args[2], os.Args[3], os.Args[4]
+
+	// Clone the given repository to the given directory
+	Info("git clone %s %s", url, directory)
+
+	r, err := git.PlainClone(directory, false, &git.CloneOptions{
+		Auth: &http.BasicAuth{
+			Username: username,
+			Password: password,
+		},
+		URL:      url,
+		Progress: os.Stdout,
+	})
+	CheckIfError(err)
+
+	// ... retrieving the branch being pointed by HEAD
+	ref, err := r.Head()
+	CheckIfError(err)
+	// ... retrieving the commit object
+	commit, err := r.CommitObject(ref.Hash())
+	CheckIfError(err)
+
+	fmt.Println(commit)
+}
diff --git a/_examples/commit/main.go b/_examples/commit/main.go
index 556cb9c..ec296b9 100644
--- a/_examples/commit/main.go
+++ b/_examples/commit/main.go
@@ -12,13 +12,13 @@
 	"gopkg.in/src-d/go-git.v4/plumbing/object"
 )
 
-// Basic example of how to commit changes to the current branch to an existent
+// Basic example of how to commit changes to the current branch to an existing
 // repository.
 func main() {
 	CheckArgs("<directory>")
 	directory := os.Args[1]
 
-	// Opens an already existent repository.
+	// Opens an already existing repository.
 	r, err := git.PlainOpen(directory)
 	CheckIfError(err)
 
@@ -44,7 +44,7 @@
 
 	fmt.Println(status)
 
-	// Commits the current staging are to the repository, with the new file
+	// Commits the current staging area to the repository, with the new file
 	// just created. We should provide the object.Signature of Author of the
 	// commit.
 	Info("git commit -m \"example go-git commit\"")
diff --git a/_examples/log/main.go b/_examples/log/main.go
index 714d58f..ba0597a 100644
--- a/_examples/log/main.go
+++ b/_examples/log/main.go
@@ -23,7 +23,7 @@
 	})
 	CheckIfError(err)
 
-	// Gets the HEAD history from HEAD, just like does:
+	// Gets the HEAD history from HEAD, just like this command:
 	Info("git log")
 
 	// ... retrieves the branch pointed by HEAD
diff --git a/_examples/open/main.go b/_examples/open/main.go
index b890423..dec183e 100644
--- a/_examples/open/main.go
+++ b/_examples/open/main.go
@@ -14,7 +14,7 @@
 	CheckArgs("<path>")
 	path := os.Args[1]
 
-	// We instance a new repository targeting the given path (the .git folder)
+	// We instanciate a new repository targeting the given path (the .git folder)
 	r, err := git.PlainOpen(path)
 	CheckIfError(err)
 
diff --git a/_examples/pull/main.go b/_examples/pull/main.go
index ae751d2..06369fa 100644
--- a/_examples/pull/main.go
+++ b/_examples/pull/main.go
@@ -13,7 +13,7 @@
 	CheckArgs("<path>")
 	path := os.Args[1]
 
-	// We instance a new repository targeting the given path (the .git folder)
+	// We instance\iate a new repository targeting the given path (the .git folder)
 	r, err := git.PlainOpen(path)
 	CheckIfError(err)
 
diff --git a/_examples/showcase/main.go b/_examples/showcase/main.go
index aeeddb8..85f2b58 100644
--- a/_examples/showcase/main.go
+++ b/_examples/showcase/main.go
@@ -16,7 +16,7 @@
 // - Get the HEAD reference
 // - Using the HEAD reference, obtain the commit this reference is pointing to
 // - Print the commit content
-// - Using the commit, iterate all its files and print them
+// - Using the commit, iterate over all its files and print them
 // - Print all the commit history with commit messages, short hash and the
 // first line of the commit message
 func main() {
diff --git a/_examples/storage/README.md b/_examples/storage/README.md
index fc72e6f..b002515 100644
--- a/_examples/storage/README.md
+++ b/_examples/storage/README.md
@@ -6,7 +6,7 @@
 
 
 ### and what this means ...
-*git* has as very well defined storage system, the `.git` directory, present on any repository. This is the place where `git` stores al the [`objects`](https://git-scm.com/book/en/v2/Git-Internals-Git-Objects), [`references`](https://git-scm.com/book/es/v2/Git-Internals-Git-References) and [`configuration`](https://git-scm.com/docs/git-config#_configuration_file). This information is stored in plain files.
+*git* has a very well defined storage system, the `.git` directory, present on any repository. This is the place where `git` stores all the [`objects`](https://git-scm.com/book/en/v2/Git-Internals-Git-Objects), [`references`](https://git-scm.com/book/es/v2/Git-Internals-Git-References) and [`configuration`](https://git-scm.com/docs/git-config#_configuration_file). This information is stored in plain files.
 
 Our original **go-git** version was designed to work in memory, some time after we added support to read the `.git`, and now we have added support for fully customized [storages](https://godoc.org/gopkg.in/src-d/go-git.v4/storage#Storer).
 
diff --git a/_examples/tag/main.go b/_examples/tag/main.go
index 190c3ad..1e6212b 100644
--- a/_examples/tag/main.go
+++ b/_examples/tag/main.go
@@ -15,7 +15,7 @@
 	CheckArgs("<path>")
 	path := os.Args[1]
 
-	// We instance a new repository targeting the given path (the .git folder)
+	// We instanciate a new repository targeting the given path (the .git folder)
 	r, err := git.PlainOpen(path)
 	CheckIfError(err)
 
diff --git a/config/config.go b/config/config.go
index a637f6d..2c3b8b9 100644
--- a/config/config.go
+++ b/config/config.go
@@ -8,6 +8,7 @@
 	"sort"
 	"strconv"
 
+	"gopkg.in/src-d/go-git.v4/internal/url"
 	format "gopkg.in/src-d/go-git.v4/plumbing/format/config"
 )
 
@@ -399,3 +400,7 @@
 
 	return c.raw
 }
+
+func (c *RemoteConfig) IsFirstURLLocal() bool {
+	return url.IsLocalEndpoint(c.URLs[0])
+}
diff --git a/example_test.go b/example_test.go
index ef7e3d3..691b4ac 100644
--- a/example_test.go
+++ b/example_test.go
@@ -11,6 +11,7 @@
 	"gopkg.in/src-d/go-git.v4"
 	"gopkg.in/src-d/go-git.v4/config"
 	"gopkg.in/src-d/go-git.v4/plumbing"
+	"gopkg.in/src-d/go-git.v4/plumbing/transport/http"
 	"gopkg.in/src-d/go-git.v4/storage/memory"
 
 	"gopkg.in/src-d/go-billy.v4/memfs"
@@ -69,6 +70,52 @@
 	// Output: Initial changelog
 }
 
+func ExamplePlainClone_usernamePassword() {
+	// Tempdir to clone the repository
+	dir, err := ioutil.TempDir("", "clone-example")
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	defer os.RemoveAll(dir) // clean up
+
+	// Clones the repository into the given dir, just as a normal git clone does
+	_, err = git.PlainClone(dir, false, &git.CloneOptions{
+		URL: "https://github.com/git-fixtures/basic.git",
+		Auth: &http.BasicAuth{
+			Username: "username",
+			Password: "password",
+		},
+	})
+
+	if err != nil {
+		log.Fatal(err)
+	}
+}
+
+func ExamplePlainClone_accessToken() {
+	// Tempdir to clone the repository
+	dir, err := ioutil.TempDir("", "clone-example")
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	defer os.RemoveAll(dir) // clean up
+
+	// Clones the repository into the given dir, just as a normal git clone does
+	_, err = git.PlainClone(dir, false, &git.CloneOptions{
+		URL: "https://github.com/git-fixtures/basic.git",
+		Auth: &http.BasicAuth{
+			Username: "abc123", // anything except an empty string
+			Password: "github_access_token",
+		},
+	})
+
+	if err != nil {
+		log.Fatal(err)
+	}
+}
+
 func ExampleRepository_References() {
 	r, _ := git.Clone(memory.NewStorage(), nil, &git.CloneOptions{
 		URL: "https://github.com/git-fixtures/basic.git",
diff --git a/go.mod b/go.mod
index e269350..36a1bed 100644
--- a/go.mod
+++ b/go.mod
@@ -16,7 +16,7 @@
 	github.com/pkg/errors v0.8.0 // indirect
 	github.com/pmezard/go-difflib v1.0.0 // indirect
 	github.com/sergi/go-diff v1.0.0
-	github.com/src-d/gcfg v1.3.0
+	github.com/src-d/gcfg v1.4.0
 	github.com/stretchr/testify v1.2.2 // indirect
 	github.com/xanzy/ssh-agent v0.2.0
 	golang.org/x/crypto v0.0.0-20180904163835-0709b304e793
diff --git a/go.sum b/go.sum
index e262a66..98ba1d4 100644
--- a/go.sum
+++ b/go.sum
@@ -35,6 +35,8 @@
 github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
 github.com/src-d/gcfg v1.3.0 h1:2BEDr8r0I0b8h/fOqwtxCEiq2HJu8n2JGZJQFGXWLjg=
 github.com/src-d/gcfg v1.3.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI=
+github.com/src-d/gcfg v1.4.0 h1:xXbNR5AlLSA315x2UO+fTSSAXCDf+Ar38/6oyGbDKQ4=
+github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI=
 github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
 github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
 github.com/xanzy/ssh-agent v0.2.0 h1:Adglfbi5p9Z0BmK2oKU9nTG+zKfniSfnaMYB+ULd+Ro=
diff --git a/internal/url/url.go b/internal/url/url.go
new file mode 100644
index 0000000..0f0d709
--- /dev/null
+++ b/internal/url/url.go
@@ -0,0 +1,37 @@
+package url
+
+import (
+	"regexp"
+)
+
+var (
+	isSchemeRegExp   = regexp.MustCompile(`^[^:]+://`)
+	scpLikeUrlRegExp = regexp.MustCompile(`^(?:(?P<user>[^@]+)@)?(?P<host>[^:\s]+):(?:(?P<port>[0-9]{1,5})/)?(?P<path>[^\\].*)$`)
+)
+
+// MatchesScheme returns true if the given string matches a URL-like
+// format scheme.
+func MatchesScheme(url string) bool {
+	return isSchemeRegExp.MatchString(url)
+}
+
+// MatchesScpLike returns true if the given string matches an SCP-like
+// format scheme.
+func MatchesScpLike(url string) bool {
+	return scpLikeUrlRegExp.MatchString(url)
+}
+
+// FindScpLikeComponents returns the user, host, port and path of the
+// given SCP-like URL.
+func FindScpLikeComponents(url string) (user, host, port, path string) {
+	m := scpLikeUrlRegExp.FindStringSubmatch(url)
+	return m[1], m[2], m[3], m[4]
+}
+
+// IsLocalEndpoint returns true if the given URL string specifies a
+// local file endpoint.  For example, on a Linux machine,
+// `/home/user/src/go-git` would match as a local endpoint, but
+// `https://github.com/src-d/go-git` would not.
+func IsLocalEndpoint(url string) bool {
+	return !MatchesScheme(url) && !MatchesScpLike(url)
+}
diff --git a/options.go b/options.go
index 5d10a88..ed7689a 100644
--- a/options.go
+++ b/options.go
@@ -335,6 +335,11 @@
 	// Show only those commits in which the specified file was inserted/updated.
 	// It is equivalent to running `git log -- <file-name>`.
 	FileName *string
+
+	// Pretend as if all the refs in refs/, along with HEAD, are listed on the command line as <commit>.
+	// It is equivalent to running `git log --all`.
+	// If set on true, the From option will be ignored.
+	All bool
 }
 
 var (
diff --git a/plumbing/format/index/decoder.go b/plumbing/format/index/decoder.go
index df25530..ac57d08 100644
--- a/plumbing/format/index/decoder.go
+++ b/plumbing/format/index/decoder.go
@@ -261,6 +261,17 @@
 		if err := d.Decode(idx.ResolveUndo); err != nil {
 			return err
 		}
+	case bytes.Equal(header, endOfIndexEntryExtSignature):
+		r, err := d.getExtensionReader()
+		if err != nil {
+			return err
+		}
+
+		idx.EndOfIndexEntry = &EndOfIndexEntry{}
+		d := &endOfIndexEntryDecoder{r}
+		if err := d.Decode(idx.EndOfIndexEntry); err != nil {
+			return err
+		}
 	default:
 		return errUnknownExtension
 	}
@@ -449,3 +460,17 @@
 
 	return nil
 }
+
+type endOfIndexEntryDecoder struct {
+	r io.Reader
+}
+
+func (d *endOfIndexEntryDecoder) Decode(e *EndOfIndexEntry) error {
+	var err error
+	e.Offset, err = binary.ReadUint32(d.r)
+	if err != nil {
+		return err
+	}
+
+	return binary.Read(d.r, &e.Hash)
+}
diff --git a/plumbing/format/index/decoder_test.go b/plumbing/format/index/decoder_test.go
index b612ebb..7468ad0 100644
--- a/plumbing/format/index/decoder_test.go
+++ b/plumbing/format/index/decoder_test.go
@@ -202,3 +202,19 @@
 	c.Assert(idx.Entries[6].IntentToAdd, Equals, true)
 	c.Assert(idx.Entries[6].SkipWorktree, Equals, false)
 }
+
+func (s *IndexSuite) TestDecodeEndOfIndexEntry(c *C) {
+	f, err := fixtures.Basic().ByTag("end-of-index-entry").One().DotGit().Open("index")
+	c.Assert(err, IsNil)
+	defer func() { c.Assert(f.Close(), IsNil) }()
+
+	idx := &Index{}
+	d := NewDecoder(f)
+	err = d.Decode(idx)
+	c.Assert(err, IsNil)
+
+	c.Assert(idx.Version, Equals, uint32(2))
+	c.Assert(idx.EndOfIndexEntry, NotNil)
+	c.Assert(idx.EndOfIndexEntry.Offset, Equals, uint32(716))
+	c.Assert(idx.EndOfIndexEntry.Hash.String(), Equals, "922e89d9ffd7cefce93a211615b2053c0f42bd78")
+}
diff --git a/plumbing/format/index/doc.go b/plumbing/format/index/doc.go
index d1e7b33..f2b3d76 100644
--- a/plumbing/format/index/doc.go
+++ b/plumbing/format/index/doc.go
@@ -297,5 +297,64 @@
 //        in the previous ewah bitmap.
 //
 //      - One NUL.
-// Source https://www.kernel.org/pub/software/scm/git/docs/technical/index-format.txt
+//
+//   == File System Monitor cache
+//
+//     The file system monitor cache tracks files for which the core.fsmonitor
+//     hook has told us about changes.  The signature for this extension is
+//     { 'F', 'S', 'M', 'N' }.
+//
+//     The extension starts with
+//
+//     - 32-bit version number: the current supported version is 1.
+//
+//     - 64-bit time: the extension data reflects all changes through the given
+//       time which is stored as the nanoseconds elapsed since midnight,
+//       January 1, 1970.
+//
+//    - 32-bit bitmap size: the size of the CE_FSMONITOR_VALID bitmap.
+//
+//    - An ewah bitmap, the n-th bit indicates whether the n-th index entry
+//      is not CE_FSMONITOR_VALID.
+//
+//  == End of Index Entry
+//
+//    The End of Index Entry (EOIE) is used to locate the end of the variable
+//    length index entries and the begining of the extensions. Code can take
+//    advantage of this to quickly locate the index extensions without having
+//    to parse through all of the index entries.
+//
+//    Because it must be able to be loaded before the variable length cache
+//    entries and other index extensions, this extension must be written last.
+//    The signature for this extension is { 'E', 'O', 'I', 'E' }.
+//
+//    The extension consists of:
+//
+//    - 32-bit offset to the end of the index entries
+//
+//    - 160-bit SHA-1 over the extension types and their sizes (but not
+//      their contents).  E.g. if we have "TREE" extension that is N-bytes
+//      long, "REUC" extension that is M-bytes long, followed by "EOIE",
+//      then the hash would be:
+//
+//      SHA-1("TREE" + <binary representation of N> +
+//        "REUC" + <binary representation of M>)
+//
+//  == Index Entry Offset Table
+//
+//    The Index Entry Offset Table (IEOT) is used to help address the CPU
+//    cost of loading the index by enabling multi-threading the process of
+//    converting cache entries from the on-disk format to the in-memory format.
+//    The signature for this extension is { 'I', 'E', 'O', 'T' }.
+//
+//    The extension consists of:
+//
+//    - 32-bit version (currently 1)
+//
+//    - A number of index offset entries each consisting of:
+//
+//    - 32-bit offset from the begining of the file to the first cache entry
+//      in this block of entries.
+//
+//    - 32-bit count of cache entries in this blockpackage index
 package index
diff --git a/plumbing/format/index/index.go b/plumbing/format/index/index.go
index fc7b8cd..6c4b7ca 100644
--- a/plumbing/format/index/index.go
+++ b/plumbing/format/index/index.go
@@ -18,9 +18,10 @@
 	// ErrEntryNotFound is returned by Index.Entry, if an entry is not found.
 	ErrEntryNotFound = errors.New("entry not found")
 
-	indexSignature          = []byte{'D', 'I', 'R', 'C'}
-	treeExtSignature        = []byte{'T', 'R', 'E', 'E'}
-	resolveUndoExtSignature = []byte{'R', 'E', 'U', 'C'}
+	indexSignature              = []byte{'D', 'I', 'R', 'C'}
+	treeExtSignature            = []byte{'T', 'R', 'E', 'E'}
+	resolveUndoExtSignature     = []byte{'R', 'E', 'U', 'C'}
+	endOfIndexEntryExtSignature = []byte{'E', 'O', 'I', 'E'}
 )
 
 // Stage during merge
@@ -50,6 +51,8 @@
 	Cache *Tree
 	// ResolveUndo represents the 'Resolve undo' extension
 	ResolveUndo *ResolveUndo
+	// EndOfIndexEntry represents the 'End of Index Entry' extension
+	EndOfIndexEntry *EndOfIndexEntry
 }
 
 // Add creates a new Entry and returns it. The caller should first check that
@@ -193,3 +196,18 @@
 	Path   string
 	Stages map[Stage]plumbing.Hash
 }
+
+// EndOfIndexEntry is the End of Index Entry (EOIE) is used to locate the end of
+// the variable length index entries and the begining of the extensions. Code
+// can take advantage of this to quickly locate the index extensions without
+// having to parse through all of the index entries.
+//
+//  Because it must be able to be loaded before the variable length cache
+//  entries and other index extensions, this extension must be written last.
+type EndOfIndexEntry struct {
+	// Offset to the end of the index entries
+	Offset uint32
+	// Hash is a SHA-1 over the extension types and their sizes (but not
+	//	their contents).
+	Hash plumbing.Hash
+}
diff --git a/plumbing/format/packfile/common.go b/plumbing/format/packfile/common.go
index 2b4aceb..0d9ed54 100644
--- a/plumbing/format/packfile/common.go
+++ b/plumbing/format/packfile/common.go
@@ -51,7 +51,13 @@
 	}
 
 	defer ioutil.CheckClose(w, &err)
-	_, err = io.Copy(w, packfile)
+
+	var n int64
+	n, err = io.Copy(w, packfile)
+	if err == nil && n == 0 {
+		return ErrEmptyPackfile
+	}
+
 	return err
 }
 
diff --git a/plumbing/format/packfile/common_test.go b/plumbing/format/packfile/common_test.go
index 387c0d1..eafc617 100644
--- a/plumbing/format/packfile/common_test.go
+++ b/plumbing/format/packfile/common_test.go
@@ -1,15 +1,29 @@
 package packfile
 
 import (
+	"bytes"
 	"testing"
 
 	"gopkg.in/src-d/go-git.v4/plumbing"
+	"gopkg.in/src-d/go-git.v4/storage/memory"
 
 	. "gopkg.in/check.v1"
 )
 
 func Test(t *testing.T) { TestingT(t) }
 
+type CommonSuite struct{}
+
+var _ = Suite(&CommonSuite{})
+
+func (s *CommonSuite) TestEmptyUpdateObjectStorage(c *C) {
+	var buf bytes.Buffer
+	sto := memory.NewStorage()
+
+	err := UpdateObjectStorage(sto, &buf)
+	c.Assert(err, Equals, ErrEmptyPackfile)
+}
+
 func newObject(t plumbing.ObjectType, cont []byte) plumbing.EncodedObject {
 	o := plumbing.MemoryObject{}
 	o.SetType(t)
diff --git a/plumbing/format/packfile/fsobject.go b/plumbing/format/packfile/fsobject.go
index 330cb73..a268bce 100644
--- a/plumbing/format/packfile/fsobject.go
+++ b/plumbing/format/packfile/fsobject.go
@@ -48,7 +48,7 @@
 // Reader implements the plumbing.EncodedObject interface.
 func (o *FSObject) Reader() (io.ReadCloser, error) {
 	obj, ok := o.cache.Get(o.hash)
-	if ok {
+	if ok && obj != o {
 		reader, err := obj.Reader()
 		if err != nil {
 			return nil, err
diff --git a/plumbing/format/packfile/packfile.go b/plumbing/format/packfile/packfile.go
index 0d13066..69b6e85 100644
--- a/plumbing/format/packfile/packfile.go
+++ b/plumbing/format/packfile/packfile.go
@@ -21,6 +21,16 @@
 	ErrZLib = NewError("zlib reading error")
 )
 
+// When reading small objects from packfile it is beneficial to do so at
+// once to exploit the buffered I/O. In many cases the objects are so small
+// that they were already loaded to memory when the object header was
+// loaded from the packfile. Wrapping in FSObject would cause this buffered
+// data to be thrown away and then re-read later, with the additional
+// seeking causing reloads from disk. Objects smaller than this threshold
+// are now always read into memory and stored in cache instead of being
+// wrapped in FSObject.
+const smallObjectThreshold = 16 * 1024
+
 // Packfile allows retrieving information from inside a packfile.
 type Packfile struct {
 	idxfile.Index
@@ -79,15 +89,7 @@
 		}
 	}
 
-	if _, err := p.s.SeekFromStart(o); err != nil {
-		if err == io.EOF || isInvalid(err) {
-			return nil, plumbing.ErrObjectNotFound
-		}
-
-		return nil, err
-	}
-
-	return p.nextObject()
+	return p.objectAtOffset(o)
 }
 
 // GetSizeByOffset retrieves the size of the encoded object from the
@@ -105,7 +107,13 @@
 	if err != nil {
 		return 0, err
 	}
-	return h.Length, nil
+	return p.getObjectSize(h)
+}
+
+func (p *Packfile) objectHeaderAtOffset(offset int64) (*ObjectHeader, error) {
+	h, err := p.s.SeekObjectHeader(offset)
+	p.s.pendingObject = nil
+	return h, err
 }
 
 func (p *Packfile) nextObjectHeader() (*ObjectHeader, error) {
@@ -114,62 +122,6 @@
 	return h, err
 }
 
-func (p *Packfile) getObjectData(
-	h *ObjectHeader,
-) (typ plumbing.ObjectType, size int64, err error) {
-	switch h.Type {
-	case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject:
-		typ = h.Type
-		size = h.Length
-	case plumbing.REFDeltaObject, plumbing.OFSDeltaObject:
-		buf := bufPool.Get().(*bytes.Buffer)
-		buf.Reset()
-		defer bufPool.Put(buf)
-
-		_, _, err = p.s.NextObject(buf)
-		if err != nil {
-			return
-		}
-
-		delta := buf.Bytes()
-		_, delta = decodeLEB128(delta) // skip src size
-		sz, _ := decodeLEB128(delta)
-		size = int64(sz)
-
-		var offset int64
-		if h.Type == plumbing.REFDeltaObject {
-			offset, err = p.FindOffset(h.Reference)
-			if err != nil {
-				return
-			}
-		} else {
-			offset = h.OffsetReference
-		}
-
-		if baseType, ok := p.offsetToType[offset]; ok {
-			typ = baseType
-		} else {
-			if _, err = p.s.SeekFromStart(offset); err != nil {
-				return
-			}
-
-			h, err = p.nextObjectHeader()
-			if err != nil {
-				return
-			}
-
-			typ, _, err = p.getObjectData(h)
-			if err != nil {
-				return
-			}
-		}
-	default:
-		err = ErrInvalidObject.AddDetails("type %q", h.Type)
-	}
-
-	return
-}
-
 func (p *Packfile) getObjectSize(h *ObjectHeader) (int64, error) {
 	switch h.Type {
 	case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject:
@@ -210,11 +162,7 @@
 		if baseType, ok := p.offsetToType[offset]; ok {
 			typ = baseType
 		} else {
-			if _, err = p.s.SeekFromStart(offset); err != nil {
-				return
-			}
-
-			h, err = p.nextObjectHeader()
+			h, err = p.objectHeaderAtOffset(offset)
 			if err != nil {
 				return
 			}
@@ -231,8 +179,8 @@
 	return
 }
 
-func (p *Packfile) nextObject() (plumbing.EncodedObject, error) {
-	h, err := p.nextObjectHeader()
+func (p *Packfile) objectAtOffset(offset int64) (plumbing.EncodedObject, error) {
+	h, err := p.objectHeaderAtOffset(offset)
 	if err != nil {
 		if err == io.EOF || isInvalid(err) {
 			return nil, plumbing.ErrObjectNotFound
@@ -246,6 +194,13 @@
 		return p.getNextObject(h)
 	}
 
+	// If the object is not a delta and it's small enough then read it
+	// completely into memory now since it is already read from disk
+	// into buffer anyway.
+	if h.Length <= smallObjectThreshold && h.Type != plumbing.OFSDeltaObject && h.Type != plumbing.REFDeltaObject {
+		return p.getNextObject(h)
+	}
+
 	hash, err := p.FindHash(h.Offset)
 	if err != nil {
 		return nil, err
@@ -289,11 +244,7 @@
 		}
 	}
 
-	if _, err := p.s.SeekFromStart(offset); err != nil {
-		return nil, err
-	}
-
-	h, err := p.nextObjectHeader()
+	h, err := p.objectHeaderAtOffset(offset)
 	if err != nil {
 		return nil, err
 	}
@@ -385,8 +336,6 @@
 		if err != nil {
 			return err
 		}
-
-		p.cachePut(base)
 	}
 
 	obj.SetType(base.Type())
diff --git a/plumbing/format/packfile/packfile_test.go b/plumbing/format/packfile/packfile_test.go
index 05dc8a7..455fe65 100644
--- a/plumbing/format/packfile/packfile_test.go
+++ b/plumbing/format/packfile/packfile_test.go
@@ -277,3 +277,29 @@
 
 	return idxf
 }
+
+func (s *PackfileSuite) TestSize(c *C) {
+	f := fixtures.Basic().ByTag("ref-delta").One()
+
+	index := getIndexFromIdxFile(f.Idx())
+	fs := osfs.New("")
+	pf, err := fs.Open(f.Packfile().Name())
+	c.Assert(err, IsNil)
+
+	packfile := packfile.NewPackfile(index, fs, pf)
+	defer packfile.Close()
+
+	// Get the size of binary.jpg, which is not delta-encoded.
+	offset, err := packfile.FindOffset(plumbing.NewHash("d5c0f4ab811897cadf03aec358ae60d21f91c50d"))
+	c.Assert(err, IsNil)
+	size, err := packfile.GetSizeByOffset(offset)
+	c.Assert(err, IsNil)
+	c.Assert(size, Equals, int64(76110))
+
+	// Get the size of the root commit, which is delta-encoded.
+	offset, err = packfile.FindOffset(f.Head)
+	c.Assert(err, IsNil)
+	size, err = packfile.GetSizeByOffset(offset)
+	c.Assert(err, IsNil)
+	c.Assert(size, Equals, int64(245))
+}
diff --git a/plumbing/format/packfile/parser.go b/plumbing/format/packfile/parser.go
index 28582b5..71cbba9 100644
--- a/plumbing/format/packfile/parser.go
+++ b/plumbing/format/packfile/parser.go
@@ -38,15 +38,14 @@
 // Parser decodes a packfile and calls any observer associated to it. Is used
 // to generate indexes.
 type Parser struct {
-	storage          storer.EncodedObjectStorer
-	scanner          *Scanner
-	count            uint32
-	oi               []*objectInfo
-	oiByHash         map[plumbing.Hash]*objectInfo
-	oiByOffset       map[int64]*objectInfo
-	hashOffset       map[plumbing.Hash]int64
-	pendingRefDeltas map[plumbing.Hash][]*objectInfo
-	checksum         plumbing.Hash
+	storage    storer.EncodedObjectStorer
+	scanner    *Scanner
+	count      uint32
+	oi         []*objectInfo
+	oiByHash   map[plumbing.Hash]*objectInfo
+	oiByOffset map[int64]*objectInfo
+	hashOffset map[plumbing.Hash]int64
+	checksum   plumbing.Hash
 
 	cache *cache.BufferLRU
 	// delta content by offset, only used if source is not seekable
@@ -78,13 +77,12 @@
 	}
 
 	return &Parser{
-		storage:          storage,
-		scanner:          scanner,
-		ob:               ob,
-		count:            0,
-		cache:            cache.NewBufferLRUDefault(),
-		pendingRefDeltas: make(map[plumbing.Hash][]*objectInfo),
-		deltas:           deltas,
+		storage: storage,
+		scanner: scanner,
+		ob:      ob,
+		count:   0,
+		cache:   cache.NewBufferLRUDefault(),
+		deltas:  deltas,
 	}, nil
 }
 
@@ -150,10 +148,6 @@
 		return plumbing.ZeroHash, err
 	}
 
-	if len(p.pendingRefDeltas) > 0 {
-		return plumbing.ZeroHash, ErrReferenceDeltaNotFound
-	}
-
 	if err := p.onFooter(p.checksum); err != nil {
 		return plumbing.ZeroHash, err
 	}
@@ -205,18 +199,21 @@
 			parent.Children = append(parent.Children, ota)
 		case plumbing.REFDeltaObject:
 			delta = true
-
 			parent, ok := p.oiByHash[oh.Reference]
-			if ok {
-				ota = newDeltaObject(oh.Offset, oh.Length, t, parent)
-				parent.Children = append(parent.Children, ota)
-			} else {
-				ota = newBaseObject(oh.Offset, oh.Length, t)
-				p.pendingRefDeltas[oh.Reference] = append(
-					p.pendingRefDeltas[oh.Reference],
-					ota,
-				)
+			if !ok {
+				// can't find referenced object in this pack file
+				// this must be a "thin" pack.
+				parent = &objectInfo{ //Placeholder parent
+					SHA1:        oh.Reference,
+					ExternalRef: true, // mark as an external reference that must be resolved
+					Type:        plumbing.AnyObject,
+					DiskType:    plumbing.AnyObject,
+				}
+				p.oiByHash[oh.Reference] = parent
 			}
+			ota = newDeltaObject(oh.Offset, oh.Length, t, parent)
+			parent.Children = append(parent.Children, ota)
+
 		default:
 			ota = newBaseObject(oh.Offset, oh.Length, t)
 		}
@@ -297,16 +294,20 @@
 	return nil
 }
 
-func (p *Parser) get(o *objectInfo) ([]byte, error) {
-	b, ok := p.cache.Get(o.Offset)
+func (p *Parser) get(o *objectInfo) (b []byte, err error) {
+	var ok bool
+	if !o.ExternalRef { // skip cache check for placeholder parents
+		b, ok = p.cache.Get(o.Offset)
+	}
+
 	// If it's not on the cache and is not a delta we can try to find it in the
-	// storage, if there's one.
+	// storage, if there's one. External refs must enter here.
 	if !ok && p.storage != nil && !o.Type.IsDelta() {
-		var err error
 		e, err := p.storage.EncodedObject(plumbing.AnyObject, o.SHA1)
 		if err != nil {
 			return nil, err
 		}
+		o.Type = e.Type()
 
 		r, err := e.Reader()
 		if err != nil {
@@ -323,6 +324,11 @@
 		return b, nil
 	}
 
+	if o.ExternalRef {
+		// we were not able to resolve a ref in a thin pack
+		return nil, ErrReferenceDeltaNotFound
+	}
+
 	var data []byte
 	if o.DiskType.IsDelta() {
 		base, err := p.get(o.Parent)
@@ -335,7 +341,6 @@
 			return nil, err
 		}
 	} else {
-		var err error
 		data, err = p.readData(o)
 		if err != nil {
 			return nil, err
@@ -367,14 +372,6 @@
 		return nil, err
 	}
 
-	if pending, ok := p.pendingRefDeltas[o.SHA1]; ok {
-		for _, po := range pending {
-			po.Parent = o
-			o.Children = append(o.Children, po)
-		}
-		delete(p.pendingRefDeltas, o.SHA1)
-	}
-
 	if p.storage != nil {
 		obj := new(plumbing.MemoryObject)
 		obj.SetSize(o.Size())
@@ -401,11 +398,7 @@
 		return data, nil
 	}
 
-	if _, err := p.scanner.SeekFromStart(o.Offset); err != nil {
-		return nil, err
-	}
-
-	if _, err := p.scanner.NextObjectHeader(); err != nil {
+	if _, err := p.scanner.SeekObjectHeader(o.Offset); err != nil {
 		return nil, err
 	}
 
@@ -447,10 +440,11 @@
 }
 
 type objectInfo struct {
-	Offset   int64
-	Length   int64
-	Type     plumbing.ObjectType
-	DiskType plumbing.ObjectType
+	Offset      int64
+	Length      int64
+	Type        plumbing.ObjectType
+	DiskType    plumbing.ObjectType
+	ExternalRef bool // indicates this is an external reference in a thin pack file
 
 	Crc32 uint32
 
diff --git a/plumbing/format/packfile/parser_test.go b/plumbing/format/packfile/parser_test.go
index 012a140..6e7c84b 100644
--- a/plumbing/format/packfile/parser_test.go
+++ b/plumbing/format/packfile/parser_test.go
@@ -1,10 +1,13 @@
 package packfile_test
 
 import (
+	"io"
 	"testing"
 
+	git "gopkg.in/src-d/go-git.v4"
 	"gopkg.in/src-d/go-git.v4/plumbing"
 	"gopkg.in/src-d/go-git.v4/plumbing/format/packfile"
+	"gopkg.in/src-d/go-git.v4/plumbing/storer"
 
 	. "gopkg.in/check.v1"
 	"gopkg.in/src-d/go-git-fixtures.v3"
@@ -74,6 +77,53 @@
 	c.Assert(obs.objects, DeepEquals, objs)
 }
 
+func (s *ParserSuite) TestThinPack(c *C) {
+
+	// Initialize an empty repository
+	fs, err := git.PlainInit(c.MkDir(), true)
+	c.Assert(err, IsNil)
+
+	// Try to parse a thin pack without having the required objects in the repo to
+	// see if the correct errors are returned
+	thinpack := fixtures.ByTag("thinpack").One()
+	scanner := packfile.NewScanner(thinpack.Packfile())
+	parser, err := packfile.NewParserWithStorage(scanner, fs.Storer) // ParserWithStorage writes to the storer all parsed objects!
+	c.Assert(err, IsNil)
+
+	_, err = parser.Parse()
+	c.Assert(err, Equals, plumbing.ErrObjectNotFound)
+
+	// start over with a clean repo
+	fs, err = git.PlainInit(c.MkDir(), true)
+	c.Assert(err, IsNil)
+
+	// Now unpack a base packfile into our empty repo:
+	f := fixtures.ByURL("https://github.com/spinnaker/spinnaker.git").One()
+	w, err := fs.Storer.(storer.PackfileWriter).PackfileWriter()
+	c.Assert(err, IsNil)
+	_, err = io.Copy(w, f.Packfile())
+	c.Assert(err, IsNil)
+	w.Close()
+
+	// Check that the test object that will come with our thin pack is *not* in the repo
+	_, err = fs.Storer.EncodedObject(plumbing.CommitObject, thinpack.Head)
+	c.Assert(err, Equals, plumbing.ErrObjectNotFound)
+
+	// Now unpack the thin pack:
+	scanner = packfile.NewScanner(thinpack.Packfile())
+	parser, err = packfile.NewParserWithStorage(scanner, fs.Storer) // ParserWithStorage writes to the storer all parsed objects!
+	c.Assert(err, IsNil)
+
+	h, err := parser.Parse()
+	c.Assert(err, IsNil)
+	c.Assert(h, Equals, plumbing.NewHash("1288734cbe0b95892e663221d94b95de1f5d7be8"))
+
+	// Check that our test object is now accessible
+	_, err = fs.Storer.EncodedObject(plumbing.CommitObject, thinpack.Head)
+	c.Assert(err, IsNil)
+
+}
+
 type observerObject struct {
 	hash   string
 	otype  plumbing.ObjectType
diff --git a/plumbing/format/packfile/scanner.go b/plumbing/format/packfile/scanner.go
index 6fc183b..614b0d1 100644
--- a/plumbing/format/packfile/scanner.go
+++ b/plumbing/format/packfile/scanner.go
@@ -138,14 +138,52 @@
 	return binary.ReadUint32(s.r)
 }
 
+// SeekObjectHeader seeks to specified offset and returns the ObjectHeader
+// for the next object in the reader
+func (s *Scanner) SeekObjectHeader(offset int64) (*ObjectHeader, error) {
+	// if seeking we assume that you are not interested in the header
+	if s.version == 0 {
+		s.version = VersionSupported
+	}
+
+	if _, err := s.r.Seek(offset, io.SeekStart); err != nil {
+		return nil, err
+	}
+
+	h, err := s.nextObjectHeader()
+	if err != nil {
+		return nil, err
+	}
+
+	h.Offset = offset
+	return h, nil
+}
+
 // NextObjectHeader returns the ObjectHeader for the next object in the reader
 func (s *Scanner) NextObjectHeader() (*ObjectHeader, error) {
-	defer s.Flush()
-
 	if err := s.doPending(); err != nil {
 		return nil, err
 	}
 
+	offset, err := s.r.Seek(0, io.SeekCurrent)
+	if err != nil {
+		return nil, err
+	}
+
+	h, err := s.nextObjectHeader()
+	if err != nil {
+		return nil, err
+	}
+
+	h.Offset = offset
+	return h, nil
+}
+
+// nextObjectHeader returns the ObjectHeader for the next object in the reader
+// without the Offset field
+func (s *Scanner) nextObjectHeader() (*ObjectHeader, error) {
+	defer s.Flush()
+
 	s.crc.Reset()
 
 	h := &ObjectHeader{}
@@ -308,7 +346,7 @@
 // SeekFromStart sets a new offset from start, returns the old position before
 // the change.
 func (s *Scanner) SeekFromStart(offset int64) (previous int64, err error) {
-	// if seeking we assume that you are not interested on the header
+	// if seeking we assume that you are not interested in the header
 	if s.version == 0 {
 		s.version = VersionSupported
 	}
@@ -385,7 +423,7 @@
 }
 
 func (r *bufferedSeeker) Seek(offset int64, whence int) (int64, error) {
-	if whence == io.SeekCurrent {
+	if whence == io.SeekCurrent && offset == 0 {
 		current, err := r.r.Seek(offset, whence)
 		if err != nil {
 			return current, err
diff --git a/plumbing/format/packfile/scanner_test.go b/plumbing/format/packfile/scanner_test.go
index 644d0eb..091b457 100644
--- a/plumbing/format/packfile/scanner_test.go
+++ b/plumbing/format/packfile/scanner_test.go
@@ -118,6 +118,23 @@
 	c.Assert(n, Equals, f.PackfileHash)
 }
 
+func (s *ScannerSuite) TestSeekObjectHeader(c *C) {
+	r := fixtures.Basic().One().Packfile()
+	p := NewScanner(r)
+
+	h, err := p.SeekObjectHeader(expectedHeadersOFS[4].Offset)
+	c.Assert(err, IsNil)
+	c.Assert(h, DeepEquals, &expectedHeadersOFS[4])
+}
+
+func (s *ScannerSuite) TestSeekObjectHeaderNonSeekable(c *C) {
+	r := io.MultiReader(fixtures.Basic().One().Packfile())
+	p := NewScanner(r)
+
+	_, err := p.SeekObjectHeader(expectedHeadersOFS[4].Offset)
+	c.Assert(err, Equals, ErrSeekNotSupported)
+}
+
 var expectedHeadersOFS = []ObjectHeader{
 	{Type: plumbing.CommitObject, Offset: 12, Length: 254},
 	{Type: plumbing.OFSDeltaObject, Offset: 186, Length: 93, OffsetReference: 12},
diff --git a/plumbing/object/commit_walker.go b/plumbing/object/commit_walker.go
index 40ad258..0eff059 100644
--- a/plumbing/object/commit_walker.go
+++ b/plumbing/object/commit_walker.go
@@ -1,10 +1,12 @@
 package object
 
 import (
+	"container/list"
 	"io"
 
 	"gopkg.in/src-d/go-git.v4/plumbing"
 	"gopkg.in/src-d/go-git.v4/plumbing/storer"
+	"gopkg.in/src-d/go-git.v4/storage"
 )
 
 type commitPreIterator struct {
@@ -181,3 +183,145 @@
 }
 
 func (w *commitPostIterator) Close() {}
+
+// commitAllIterator stands for commit iterator for all refs.
+type commitAllIterator struct {
+	// currCommit points to the current commit.
+	currCommit *list.Element
+}
+
+// NewCommitAllIter returns a new commit iterator for all refs.
+// repoStorer is a repo Storer used to get commits and references.
+// commitIterFunc is a commit iterator function, used to iterate through ref commits in chosen order
+func NewCommitAllIter(repoStorer storage.Storer, commitIterFunc func(*Commit) CommitIter) (CommitIter, error) {
+	commitsPath := list.New()
+	commitsLookup := make(map[plumbing.Hash]*list.Element)
+	head, err := storer.ResolveReference(repoStorer, plumbing.HEAD)
+	if err == nil {
+		err = addReference(repoStorer, commitIterFunc, head, commitsPath, commitsLookup)
+	}
+
+	if err != nil && err != plumbing.ErrReferenceNotFound {
+		return nil, err
+	}
+
+	// add all references along with the HEAD
+	refIter, err := repoStorer.IterReferences()
+	if err != nil {
+		return nil, err
+	}
+	defer refIter.Close()
+
+	for {
+		ref, err := refIter.Next()
+		if err == io.EOF {
+			break
+		}
+
+		if err == plumbing.ErrReferenceNotFound {
+			continue
+		}
+
+		if err != nil {
+			return nil, err
+		}
+
+		if err = addReference(repoStorer, commitIterFunc, ref, commitsPath, commitsLookup); err != nil {
+			return nil, err
+		}
+	}
+
+	return &commitAllIterator{commitsPath.Front()}, nil
+}
+
+func addReference(
+	repoStorer storage.Storer,
+	commitIterFunc func(*Commit) CommitIter,
+	ref *plumbing.Reference,
+	commitsPath *list.List,
+	commitsLookup map[plumbing.Hash]*list.Element) error {
+
+	_, exists := commitsLookup[ref.Hash()]
+	if exists {
+		// we already have it - skip the reference.
+		return nil
+	}
+
+	refCommit, _ := GetCommit(repoStorer, ref.Hash())
+	if refCommit == nil {
+		// if it's not a commit - skip it.
+		return nil
+	}
+
+	var (
+		refCommits []*Commit
+		parent     *list.Element
+	)
+	// collect all ref commits to add
+	commitIter := commitIterFunc(refCommit)
+	for c, e := commitIter.Next(); e == nil; {
+		parent, exists = commitsLookup[c.Hash]
+		if exists {
+			break
+		}
+		refCommits = append(refCommits, c)
+		c, e = commitIter.Next()
+	}
+	commitIter.Close()
+
+	if parent == nil {
+		// common parent - not found
+		// add all commits to the path from this ref (maybe it's a HEAD and we don't have anything, yet)
+		for _, c := range refCommits {
+			parent = commitsPath.PushBack(c)
+			commitsLookup[c.Hash] = parent
+		}
+	} else {
+		// add ref's commits to the path in reverse order (from the latest)
+		for i := len(refCommits) - 1; i >= 0; i-- {
+			c := refCommits[i]
+			// insert before found common parent
+			parent = commitsPath.InsertBefore(c, parent)
+			commitsLookup[c.Hash] = parent
+		}
+	}
+
+	return nil
+}
+
+func (it *commitAllIterator) Next() (*Commit, error) {
+	if it.currCommit == nil {
+		return nil, io.EOF
+	}
+
+	c := it.currCommit.Value.(*Commit)
+	it.currCommit = it.currCommit.Next()
+
+	return c, nil
+}
+
+func (it *commitAllIterator) ForEach(cb func(*Commit) error) error {
+	for {
+		c, err := it.Next()
+		if err == io.EOF {
+			break
+		}
+		if err != nil {
+			return err
+		}
+
+		err = cb(c)
+		if err == storer.ErrStop {
+			break
+		}
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (it *commitAllIterator) Close() {
+	it.currCommit = nil
+}
diff --git a/plumbing/object/commit_walker_file.go b/plumbing/object/commit_walker_file.go
index 84e738a..6f16e61 100644
--- a/plumbing/object/commit_walker_file.go
+++ b/plumbing/object/commit_walker_file.go
@@ -1,23 +1,30 @@
 package object
 
 import (
-	"gopkg.in/src-d/go-git.v4/plumbing/storer"
 	"io"
+
+	"gopkg.in/src-d/go-git.v4/plumbing"
+
+	"gopkg.in/src-d/go-git.v4/plumbing/storer"
 )
 
 type commitFileIter struct {
 	fileName      string
 	sourceIter    CommitIter
 	currentCommit *Commit
+	checkParent   bool
 }
 
 // NewCommitFileIterFromIter returns a commit iterator which performs diffTree between
 // successive trees returned from the commit iterator from the argument. The purpose of this is
 // to find the commits that explain how the files that match the path came to be.
-func NewCommitFileIterFromIter(fileName string, commitIter CommitIter) CommitIter {
+// If checkParent is true then the function double checks if potential parent (next commit in a path)
+// is one of the parents in the tree (it's used by `git log --all`).
+func NewCommitFileIterFromIter(fileName string, commitIter CommitIter, checkParent bool) CommitIter {
 	iterator := new(commitFileIter)
 	iterator.sourceIter = commitIter
 	iterator.fileName = fileName
+	iterator.checkParent = checkParent
 	return iterator
 }
 
@@ -71,20 +78,14 @@
 			return nil, diffErr
 		}
 
-		foundChangeForFile := false
-		for _, change := range changes {
-			if change.name() == c.fileName {
-				foundChangeForFile = true
-				break
-			}
-		}
+		found := c.hasFileChange(changes, parentCommit)
 
 		// Storing the current-commit in-case a change is found, and
 		// Updating the current-commit for the next-iteration
 		prevCommit := c.currentCommit
 		c.currentCommit = parentCommit
 
-		if foundChangeForFile == true {
+		if found {
 			return prevCommit, nil
 		}
 
@@ -95,6 +96,35 @@
 	}
 }
 
+func (c *commitFileIter) hasFileChange(changes Changes, parent *Commit) bool {
+	for _, change := range changes {
+		if change.name() != c.fileName {
+			continue
+		}
+
+		// filename matches, now check if source iterator contains all commits (from all refs)
+		if c.checkParent {
+			if parent != nil && isParentHash(parent.Hash, c.currentCommit) {
+				return true
+			}
+			continue
+		}
+
+		return true
+	}
+
+	return false
+}
+
+func isParentHash(hash plumbing.Hash, commit *Commit) bool {
+	for _, h := range commit.ParentHashes {
+		if h == hash {
+			return true
+		}
+	}
+	return false
+}
+
 func (c *commitFileIter) ForEach(cb func(*Commit) error) error {
 	for {
 		commit, nextErr := c.Next()
diff --git a/plumbing/reference.go b/plumbing/reference.go
index 2f53d4e..08e908f 100644
--- a/plumbing/reference.go
+++ b/plumbing/reference.go
@@ -55,6 +55,36 @@
 // ReferenceName reference name's
 type ReferenceName string
 
+// NewBranchReferenceName returns a reference name describing a branch based on
+// his short name.
+func NewBranchReferenceName(name string) ReferenceName {
+	return ReferenceName(refHeadPrefix + name)
+}
+
+// NewNoteReferenceName returns a reference name describing a note based on his
+// short name.
+func NewNoteReferenceName(name string) ReferenceName {
+	return ReferenceName(refNotePrefix + name)
+}
+
+// NewRemoteReferenceName returns a reference name describing a remote branch
+// based on his short name and the remote name.
+func NewRemoteReferenceName(remote, name string) ReferenceName {
+	return ReferenceName(refRemotePrefix + fmt.Sprintf("%s/%s", remote, name))
+}
+
+// NewRemoteHEADReferenceName returns a reference name describing a the HEAD
+// branch of a remote.
+func NewRemoteHEADReferenceName(remote string) ReferenceName {
+	return ReferenceName(refRemotePrefix + fmt.Sprintf("%s/%s", remote, HEAD))
+}
+
+// NewTagReferenceName returns a reference name describing a tag based on short
+// his name.
+func NewTagReferenceName(name string) ReferenceName {
+	return ReferenceName(refTagPrefix + name)
+}
+
 // IsBranch check if a reference is a branch
 func (r ReferenceName) IsBranch() bool {
 	return strings.HasPrefix(string(r), refHeadPrefix)
diff --git a/plumbing/reference_test.go b/plumbing/reference_test.go
index 47919ef..b3ccf53 100644
--- a/plumbing/reference_test.go
+++ b/plumbing/reference_test.go
@@ -54,6 +54,31 @@
 	c.Assert(r.Hash(), Equals, NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5"))
 }
 
+func (s *ReferenceSuite) TestNewBranchReferenceName(c *C) {
+	r := NewBranchReferenceName("foo")
+	c.Assert(r.String(), Equals, "refs/heads/foo")
+}
+
+func (s *ReferenceSuite) TestNewNoteReferenceName(c *C) {
+	r := NewNoteReferenceName("foo")
+	c.Assert(r.String(), Equals, "refs/notes/foo")
+}
+
+func (s *ReferenceSuite) TestNewRemoteReferenceName(c *C) {
+	r := NewRemoteReferenceName("bar", "foo")
+	c.Assert(r.String(), Equals, "refs/remotes/bar/foo")
+}
+
+func (s *ReferenceSuite) TestNewRemoteHEADReferenceName(c *C) {
+	r := NewRemoteHEADReferenceName("foo")
+	c.Assert(r.String(), Equals, "refs/remotes/foo/HEAD")
+}
+
+func (s *ReferenceSuite) TestNewTagReferenceName(c *C) {
+	r := NewTagReferenceName("foo")
+	c.Assert(r.String(), Equals, "refs/tags/foo")
+}
+
 func (s *ReferenceSuite) TestIsBranch(c *C) {
 	r := ExampleReferenceName
 	c.Assert(r.IsBranch(), Equals, true)
diff --git a/plumbing/revlist/revlist.go b/plumbing/revlist/revlist.go
index 0a9d1e8..7ad71ac 100644
--- a/plumbing/revlist/revlist.go
+++ b/plumbing/revlist/revlist.go
@@ -21,7 +21,20 @@
 	objs,
 	ignore []plumbing.Hash,
 ) ([]plumbing.Hash, error) {
-	ignore, err := objects(s, ignore, nil, true)
+	return ObjectsWithStorageForIgnores(s, s, objs, ignore)
+}
+
+// ObjectsWithStorageForIgnores is the same as Objects, but a
+// secondary storage layer can be provided, to be used to finding the
+// full set of objects to be ignored while finding the reachable
+// objects.  This is useful when the main `s` storage layer is slow
+// and/or remote, while the ignore list is available somewhere local.
+func ObjectsWithStorageForIgnores(
+	s, ignoreStore storer.EncodedObjectStorer,
+	objs,
+	ignore []plumbing.Hash,
+) ([]plumbing.Hash, error) {
+	ignore, err := objects(ignoreStore, ignore, nil, true)
 	if err != nil {
 		return nil, err
 	}
@@ -114,7 +127,6 @@
 	i := object.NewCommitPreorderIter(commit, seen, ignore)
 	pending := make(map[plumbing.Hash]bool)
 	addPendingParents(pending, visited, commit)
-
 	for {
 		commit, err := i.Next()
 		if err == io.EOF {
diff --git a/plumbing/revlist/revlist_test.go b/plumbing/revlist/revlist_test.go
index dea1c73..ceae727 100644
--- a/plumbing/revlist/revlist_test.go
+++ b/plumbing/revlist/revlist_test.go
@@ -129,6 +129,32 @@
 	c.Assert(len(hist), Equals, len(expected))
 }
 
+func (s *RevListSuite) TestRevListObjectsWithStorageForIgnores(c *C) {
+	sto := filesystem.NewStorage(
+		fixtures.ByTag("merge-conflict").One().DotGit(),
+		cache.NewObjectLRUDefault())
+
+	// The "merge-conflict" repo has one extra commit in it, with a
+	// two files modified in two different subdirs.
+	expected := map[string]bool{
+		"1980fcf55330d9d94c34abee5ab734afecf96aba": true, // commit
+		"73d9cf44e9045254346c73f6646b08f9302c8570": true, // root dir
+		"e8435d512a98586bd2e4fcfcdf04101b0bb1b500": true, // go/
+		"257cc5642cb1a054f08cc83f2d943e56fd3ebe99": true, // haskal.hs
+		"d499a1a0b79b7d87a35155afd0c1cce78b37a91c": true, // example.go
+		"d108adc364fb6f21395d011ae2c8a11d96905b0d": true, // haskal/
+	}
+
+	hist, err := ObjectsWithStorageForIgnores(sto, s.Storer, []plumbing.Hash{plumbing.NewHash("1980fcf55330d9d94c34abee5ab734afecf96aba")}, []plumbing.Hash{plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")})
+	c.Assert(err, IsNil)
+
+	for _, h := range hist {
+		c.Assert(expected[h.String()], Equals, true)
+	}
+
+	c.Assert(len(hist), Equals, len(expected))
+}
+
 // ---
 // | |\
 // | | * b8e471f Creating changelog
diff --git a/plumbing/storer/object.go b/plumbing/storer/object.go
index 2ac9b09..98d1ec3 100644
--- a/plumbing/storer/object.go
+++ b/plumbing/storer/object.go
@@ -222,7 +222,7 @@
 }
 
 // NewMultiEncodedObjectIter returns an object iterator for the given slice of
-// objects.
+// EncodedObjectIters.
 func NewMultiEncodedObjectIter(iters []EncodedObjectIter) EncodedObjectIter {
 	return &MultiEncodedObjectIter{iters: iters}
 }
diff --git a/plumbing/storer/reference.go b/plumbing/storer/reference.go
index 5e85a3b..cce72b4 100644
--- a/plumbing/storer/reference.go
+++ b/plumbing/storer/reference.go
@@ -131,9 +131,27 @@
 // an error happens or the end of the iter is reached. If ErrStop is sent
 // the iteration is stop but no error is returned. The iterator is closed.
 func (iter *ReferenceSliceIter) ForEach(cb func(*plumbing.Reference) error) error {
+	return forEachReferenceIter(iter, cb)
+}
+
+type bareReferenceIterator interface {
+	Next() (*plumbing.Reference, error)
+	Close()
+}
+
+func forEachReferenceIter(iter bareReferenceIterator, cb func(*plumbing.Reference) error) error {
 	defer iter.Close()
-	for _, r := range iter.series {
-		if err := cb(r); err != nil {
+	for {
+		obj, err := iter.Next()
+		if err != nil {
+			if err == io.EOF {
+				return nil
+			}
+
+			return err
+		}
+
+		if err := cb(obj); err != nil {
 			if err == ErrStop {
 				return nil
 			}
@@ -141,8 +159,6 @@
 			return err
 		}
 	}
-
-	return nil
 }
 
 // Close releases any resources used by the iterator.
@@ -150,6 +166,52 @@
 	iter.pos = len(iter.series)
 }
 
+// MultiReferenceIter implements ReferenceIter. It iterates over several
+// ReferenceIter,
+//
+// The MultiReferenceIter must be closed with a call to Close() when it is no
+// longer needed.
+type MultiReferenceIter struct {
+	iters []ReferenceIter
+}
+
+// NewMultiReferenceIter returns an reference iterator for the given slice of
+// EncodedObjectIters.
+func NewMultiReferenceIter(iters []ReferenceIter) ReferenceIter {
+	return &MultiReferenceIter{iters: iters}
+}
+
+// Next returns the next reference from the iterator, if one iterator reach
+// io.EOF is removed and the next one is used.
+func (iter *MultiReferenceIter) Next() (*plumbing.Reference, error) {
+	if len(iter.iters) == 0 {
+		return nil, io.EOF
+	}
+
+	obj, err := iter.iters[0].Next()
+	if err == io.EOF {
+		iter.iters[0].Close()
+		iter.iters = iter.iters[1:]
+		return iter.Next()
+	}
+
+	return obj, err
+}
+
+// ForEach call the cb function for each reference contained on this iter until
+// an error happens or the end of the iter is reached. If ErrStop is sent
+// the iteration is stop but no error is returned. The iterator is closed.
+func (iter *MultiReferenceIter) ForEach(cb func(*plumbing.Reference) error) error {
+	return forEachReferenceIter(iter, cb)
+}
+
+// Close releases any resources used by the iterator.
+func (iter *MultiReferenceIter) Close() {
+	for _, i := range iter.iters {
+		i.Close()
+	}
+}
+
 // ResolveReference resolves a SymbolicReference to a HashReference.
 func ResolveReference(s ReferenceStorer, n plumbing.ReferenceName) (*plumbing.Reference, error) {
 	r, err := s.Reference(n)
diff --git a/plumbing/storer/reference_test.go b/plumbing/storer/reference_test.go
index 490ec95..1d02c22 100644
--- a/plumbing/storer/reference_test.go
+++ b/plumbing/storer/reference_test.go
@@ -172,3 +172,26 @@
 
 	c.Assert(count, Equals, 1)
 }
+
+func (s *ReferenceSuite) TestMultiReferenceIterForEach(c *C) {
+	i := NewMultiReferenceIter(
+		[]ReferenceIter{
+			NewReferenceSliceIter([]*plumbing.Reference{
+				plumbing.NewReferenceFromStrings("foo", "foo"),
+			}),
+			NewReferenceSliceIter([]*plumbing.Reference{
+				plumbing.NewReferenceFromStrings("bar", "bar"),
+			}),
+		},
+	)
+
+	var result []string
+	err := i.ForEach(func(r *plumbing.Reference) error {
+		result = append(result, r.Name().String())
+		return nil
+	})
+
+	c.Assert(err, IsNil)
+	c.Assert(result, HasLen, 2)
+	c.Assert(result, DeepEquals, []string{"foo", "bar"})
+}
diff --git a/plumbing/transport/common.go b/plumbing/transport/common.go
index f7b882b..dcf9391 100644
--- a/plumbing/transport/common.go
+++ b/plumbing/transport/common.go
@@ -19,10 +19,10 @@
 	"fmt"
 	"io"
 	"net/url"
-	"regexp"
 	"strconv"
 	"strings"
 
+	giturl "gopkg.in/src-d/go-git.v4/internal/url"
 	"gopkg.in/src-d/go-git.v4/plumbing"
 	"gopkg.in/src-d/go-git.v4/plumbing/protocol/packp"
 	"gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability"
@@ -224,34 +224,28 @@
 	return res
 }
 
-var (
-	isSchemeRegExp   = regexp.MustCompile(`^[^:]+://`)
-	scpLikeUrlRegExp = regexp.MustCompile(`^(?:(?P<user>[^@]+)@)?(?P<host>[^:\s]+):(?:(?P<port>[0-9]{1,5})/)?(?P<path>[^\\].*)$`)
-)
-
 func parseSCPLike(endpoint string) (*Endpoint, bool) {
-	if isSchemeRegExp.MatchString(endpoint) || !scpLikeUrlRegExp.MatchString(endpoint) {
+	if giturl.MatchesScheme(endpoint) || !giturl.MatchesScpLike(endpoint) {
 		return nil, false
 	}
 
-	m := scpLikeUrlRegExp.FindStringSubmatch(endpoint)
-
-	port, err := strconv.Atoi(m[3])
+	user, host, portStr, path := giturl.FindScpLikeComponents(endpoint)
+	port, err := strconv.Atoi(portStr)
 	if err != nil {
 		port = 22
 	}
 
 	return &Endpoint{
 		Protocol: "ssh",
-		User:     m[1],
-		Host:     m[2],
+		User:     user,
+		Host:     host,
 		Port:     port,
-		Path:     m[4],
+		Path:     path,
 	}, true
 }
 
 func parseFile(endpoint string) (*Endpoint, bool) {
-	if isSchemeRegExp.MatchString(endpoint) {
+	if giturl.MatchesScheme(endpoint) {
 		return nil, false
 	}
 
diff --git a/plumbing/transport/http/common.go b/plumbing/transport/http/common.go
index c034846..5d3535e 100644
--- a/plumbing/transport/http/common.go
+++ b/plumbing/transport/http/common.go
@@ -4,6 +4,7 @@
 import (
 	"bytes"
 	"fmt"
+	"net"
 	"net/http"
 	"strconv"
 	"strings"
@@ -151,6 +152,18 @@
 		return
 	}
 
+	h, p, err := net.SplitHostPort(r.URL.Host)
+	if err != nil {
+		h = r.URL.Host
+	}
+	if p != "" {
+		port, err := strconv.Atoi(p)
+		if err == nil {
+			s.endpoint.Port = port
+		}
+	}
+	s.endpoint.Host = h
+
 	s.endpoint.Protocol = r.URL.Scheme
 	s.endpoint.Path = r.URL.Path[:len(r.URL.Path)-len(infoRefsPath)]
 }
@@ -201,7 +214,14 @@
 	return fmt.Sprintf("%s - %s:%s", a.Name(), a.Username, masked)
 }
 
-// TokenAuth implements the go-git http.AuthMethod and transport.AuthMethod interfaces
+// TokenAuth implements an http.AuthMethod that can be used with http transport
+// to authenticate with HTTP token authentication (also known as bearer
+// authentication).
+//
+// IMPORTANT: If you are looking to use OAuth tokens with popular servers (e.g.
+// GitHub, Bitbucket, GitLab) you should use BasicAuth instead. These servers
+// use basic HTTP authentication, with the OAuth token as user or password.
+// Check the documentation of your git server for details.
 type TokenAuth struct {
 	Token string
 }
diff --git a/plumbing/transport/http/common_test.go b/plumbing/transport/http/common_test.go
index 71eede4..8b300e8 100644
--- a/plumbing/transport/http/common_test.go
+++ b/plumbing/transport/http/common_test.go
@@ -8,6 +8,7 @@
 	"net"
 	"net/http"
 	"net/http/cgi"
+	"net/url"
 	"os"
 	"os/exec"
 	"path/filepath"
@@ -119,6 +120,42 @@
 	c.Assert(err, Equals, transport.ErrInvalidAuthMethod)
 }
 
+func (s *ClientSuite) TestModifyEndpointIfRedirect(c *C) {
+	sess := &session{endpoint: nil}
+	u, _ := url.Parse("https://example.com/info/refs")
+	res := &http.Response{Request: &http.Request{URL: u}}
+	c.Assert(func() {
+		sess.ModifyEndpointIfRedirect(res)
+	}, PanicMatches, ".*nil pointer dereference.*")
+
+	sess = &session{endpoint: nil}
+	// no-op - should return and not panic
+	sess.ModifyEndpointIfRedirect(&http.Response{})
+
+	data := []struct {
+		url      string
+		endpoint *transport.Endpoint
+		expected *transport.Endpoint
+	}{
+		{"https://example.com/foo/bar", nil, nil},
+		{"https://example.com/foo.git/info/refs",
+			&transport.Endpoint{},
+			&transport.Endpoint{Protocol: "https", Host: "example.com", Path: "/foo.git"}},
+		{"https://example.com:8080/foo.git/info/refs",
+			&transport.Endpoint{},
+			&transport.Endpoint{Protocol: "https", Host: "example.com", Port: 8080, Path: "/foo.git"}},
+	}
+
+	for _, d := range data {
+		u, _ := url.Parse(d.url)
+		sess := &session{endpoint: d.endpoint}
+		sess.ModifyEndpointIfRedirect(&http.Response{
+			Request: &http.Request{URL: u},
+		})
+		c.Assert(d.endpoint, DeepEquals, d.expected)
+	}
+}
+
 type BaseSuite struct {
 	fixtures.Suite
 
diff --git a/plumbing/transport/ssh/upload_pack_test.go b/plumbing/transport/ssh/upload_pack_test.go
index 87fd4f5..2685ff0 100644
--- a/plumbing/transport/ssh/upload_pack_test.go
+++ b/plumbing/transport/ssh/upload_pack_test.go
@@ -10,6 +10,7 @@
 	"os/exec"
 	"path/filepath"
 	"strings"
+	"sync"
 
 	"gopkg.in/src-d/go-git.v4/plumbing/transport"
 	"gopkg.in/src-d/go-git.v4/plumbing/transport/test"
@@ -97,13 +98,20 @@
 		io.Copy(stdin, s)
 	}()
 
+	var wg sync.WaitGroup
+	wg.Add(2)
+
 	go func() {
-		defer stderr.Close()
+		defer wg.Done()
 		io.Copy(s.Stderr(), stderr)
 	}()
 
-	defer stdout.Close()
-	io.Copy(s, stdout)
+	go func() {
+		defer wg.Done()
+		io.Copy(s, stdout)
+	}()
+
+	wg.Wait()
 
 	if err := cmd.Wait(); err != nil {
 		return
diff --git a/plumbing/transport/test/receive_pack.go b/plumbing/transport/test/receive_pack.go
index 5aea1c0..8dcde8b 100644
--- a/plumbing/transport/test/receive_pack.go
+++ b/plumbing/transport/test/receive_pack.go
@@ -262,11 +262,7 @@
 		req.Packfile = s.emptyPackfile()
 	}
 
-	if s, err := r.ReceivePack(context.Background(), req); err != nil {
-		return s, err
-	} else {
-		return s, err
-	}
+	return r.ReceivePack(context.Background(), req)
 }
 
 func (s *ReceivePackSuite) receivePack(c *C, ep *transport.Endpoint,
diff --git a/remote.go b/remote.go
index 0556b98..de537ce 100644
--- a/remote.go
+++ b/remote.go
@@ -6,8 +6,10 @@
 	"fmt"
 	"io"
 
+	"gopkg.in/src-d/go-billy.v4/osfs"
 	"gopkg.in/src-d/go-git.v4/config"
 	"gopkg.in/src-d/go-git.v4/plumbing"
+	"gopkg.in/src-d/go-git.v4/plumbing/cache"
 	"gopkg.in/src-d/go-git.v4/plumbing/format/packfile"
 	"gopkg.in/src-d/go-git.v4/plumbing/object"
 	"gopkg.in/src-d/go-git.v4/plumbing/protocol/packp"
@@ -18,6 +20,7 @@
 	"gopkg.in/src-d/go-git.v4/plumbing/transport"
 	"gopkg.in/src-d/go-git.v4/plumbing/transport/client"
 	"gopkg.in/src-d/go-git.v4/storage"
+	"gopkg.in/src-d/go-git.v4/storage/filesystem"
 	"gopkg.in/src-d/go-git.v4/storage/memory"
 	"gopkg.in/src-d/go-git.v4/utils/ioutil"
 )
@@ -149,13 +152,23 @@
 	var hashesToPush []plumbing.Hash
 	// Avoid the expensive revlist operation if we're only doing deletes.
 	if !allDelete {
-		hashesToPush, err = revlist.Objects(r.s, objects, haves)
+		if r.c.IsFirstURLLocal() {
+			// If we're are pushing to a local repo, it might be much
+			// faster to use a local storage layer to get the commits
+			// to ignore, when calculating the object revlist.
+			localStorer := filesystem.NewStorage(
+				osfs.New(r.c.URLs[0]), cache.NewObjectLRUDefault())
+			hashesToPush, err = revlist.ObjectsWithStorageForIgnores(
+				r.s, localStorer, objects, haves)
+		} else {
+			hashesToPush, err = revlist.Objects(r.s, objects, haves)
+		}
 		if err != nil {
 			return err
 		}
 	}
 
-	rs, err := pushHashes(ctx, s, r.s, req, hashesToPush)
+	rs, err := pushHashes(ctx, s, r.s, req, hashesToPush, r.useRefDeltas(ar))
 	if err != nil {
 		return err
 	}
@@ -167,6 +180,10 @@
 	return r.updateRemoteReferenceStorage(req, rs)
 }
 
+func (r *Remote) useRefDeltas(ar *packp.AdvRefs) bool {
+	return !ar.Capabilities.Supports(capability.OFSDelta)
+}
+
 func (r *Remote) newReferenceUpdateRequest(
 	o *PushOptions,
 	localRefs []*plumbing.Reference,
@@ -994,6 +1011,7 @@
 	s storage.Storer,
 	req *packp.ReferenceUpdateRequest,
 	hs []plumbing.Hash,
+	useRefDeltas bool,
 ) (*packp.ReportStatus, error) {
 
 	rd, wr := io.Pipe()
@@ -1004,7 +1022,7 @@
 	}
 	done := make(chan error)
 	go func() {
-		e := packfile.NewEncoder(wr, s, false)
+		e := packfile.NewEncoder(wr, s, useRefDeltas)
 		if _, err := e.Encode(hs, config.Pack.Window); err != nil {
 			done <- wr.CloseWithError(err)
 			return
diff --git a/remote_test.go b/remote_test.go
index 175faed..28b0a3a 100644
--- a/remote_test.go
+++ b/remote_test.go
@@ -11,6 +11,7 @@
 	"gopkg.in/src-d/go-git.v4/plumbing"
 	"gopkg.in/src-d/go-git.v4/plumbing/cache"
 	"gopkg.in/src-d/go-git.v4/plumbing/protocol/packp"
+	"gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability"
 	"gopkg.in/src-d/go-git.v4/plumbing/storer"
 	"gopkg.in/src-d/go-git.v4/storage"
 	"gopkg.in/src-d/go-git.v4/storage/filesystem"
@@ -798,3 +799,25 @@
 		c.Assert(shallow, DeepEquals, t.result)
 	}
 }
+
+func (s *RemoteSuite) TestUseRefDeltas(c *C) {
+	url := c.MkDir()
+	_, err := PlainInit(url, true)
+	c.Assert(err, IsNil)
+
+	fs := fixtures.ByURL("https://github.com/git-fixtures/tags.git").One().DotGit()
+	sto := filesystem.NewStorage(fs, cache.NewObjectLRUDefault())
+
+	r := newRemote(sto, &config.RemoteConfig{
+		Name: DefaultRemoteName,
+		URLs: []string{url},
+	})
+
+	ar := packp.NewAdvRefs()
+
+	ar.Capabilities.Add(capability.OFSDelta)
+	c.Assert(r.useRefDeltas(ar), Equals, false)
+
+	ar.Capabilities.Delete(capability.OFSDelta)
+	c.Assert(r.useRefDeltas(ar), Equals, true)
+}
diff --git a/repository.go b/repository.go
index 507ff44..de92d64 100644
--- a/repository.go
+++ b/repository.go
@@ -5,6 +5,7 @@
 	"context"
 	"errors"
 	"fmt"
+	"io"
 	stdioutil "io/ioutil"
 	"os"
 	"path"
@@ -40,6 +41,8 @@
 	ErrTagExists = errors.New("tag already exists")
 	// ErrTagNotFound an error stating the specified tag does not exist
 	ErrTagNotFound = errors.New("tag not found")
+	// ErrFetching is returned when the packfile could not be downloaded
+	ErrFetching = errors.New("unable to fetch packfile")
 
 	ErrInvalidReference          = errors.New("invalid reference, should be a tag or a branch")
 	ErrRepositoryNotExists       = errors.New("repository does not exist")
@@ -341,13 +344,26 @@
 // transport operations.
 //
 // TODO(mcuadros): move isBare to CloneOptions in v5
+// TODO(smola): refuse upfront to clone on a non-empty directory in v5, see #1027
 func PlainCloneContext(ctx context.Context, path string, isBare bool, o *CloneOptions) (*Repository, error) {
+	cleanup, cleanupParent, err := checkIfCleanupIsNeeded(path)
+	if err != nil {
+		return nil, err
+	}
+
 	r, err := PlainInit(path, isBare)
 	if err != nil {
 		return nil, err
 	}
 
-	return r, r.clone(ctx, o)
+	err = r.clone(ctx, o)
+	if err != nil && err != ErrRepositoryAlreadyExists {
+		if cleanup {
+			cleanUpDir(path, cleanupParent)
+		}
+	}
+
+	return r, err
 }
 
 func newRepository(s storage.Storer, worktree billy.Filesystem) *Repository {
@@ -358,6 +374,65 @@
 	}
 }
 
+func checkIfCleanupIsNeeded(path string) (cleanup bool, cleanParent bool, err error) {
+	fi, err := os.Stat(path)
+	if err != nil {
+		if os.IsNotExist(err) {
+			return true, true, nil
+		}
+
+		return false, false, err
+	}
+
+	if !fi.IsDir() {
+		return false, false, fmt.Errorf("path is not a directory: %s", path)
+	}
+
+	f, err := os.Open(path)
+	if err != nil {
+		return false, false, err
+	}
+
+	defer ioutil.CheckClose(f, &err)
+
+	_, err = f.Readdirnames(1)
+	if err == io.EOF {
+		return true, false, nil
+	}
+
+	if err != nil {
+		return false, false, err
+	}
+
+	return false, false, nil
+}
+
+func cleanUpDir(path string, all bool) error {
+	if all {
+		return os.RemoveAll(path)
+	}
+
+	f, err := os.Open(path)
+	if err != nil {
+		return err
+	}
+
+	defer ioutil.CheckClose(f, &err)
+
+	names, err := f.Readdirnames(-1)
+	if err != nil {
+		return err
+	}
+
+	for _, name := range names {
+		if err := os.RemoveAll(filepath.Join(path, name)); err != nil {
+			return err
+		}
+	}
+
+	return err
+}
+
 // Config return the repository config
 func (r *Repository) Config() (*config.Config, error) {
 	return r.Storer.Config()
@@ -640,8 +715,9 @@
 	}
 
 	c := &config.RemoteConfig{
-		Name: o.RemoteName,
-		URLs: []string{o.URL},
+		Name:  o.RemoteName,
+		URLs:  []string{o.URL},
+		Fetch: r.cloneRefSpec(o),
 	}
 
 	if _, err := r.CreateRemote(c); err != nil {
@@ -649,7 +725,7 @@
 	}
 
 	ref, err := r.fetchAndUpdateReferences(ctx, &FetchOptions{
-		RefSpecs:   r.cloneRefSpec(o, c),
+		RefSpecs:   c.Fetch,
 		Depth:      o.Depth,
 		Auth:       o.Auth,
 		Progress:   o.Progress,
@@ -719,21 +795,26 @@
 	refspecSingleBranchHEAD = "+HEAD:refs/remotes/%s/HEAD"
 )
 
-func (r *Repository) cloneRefSpec(o *CloneOptions, c *config.RemoteConfig) []config.RefSpec {
-	var rs string
-
+func (r *Repository) cloneRefSpec(o *CloneOptions) []config.RefSpec {
 	switch {
 	case o.ReferenceName.IsTag():
-		rs = fmt.Sprintf(refspecTag, o.ReferenceName.Short())
+		return []config.RefSpec{
+			config.RefSpec(fmt.Sprintf(refspecTag, o.ReferenceName.Short())),
+		}
 	case o.SingleBranch && o.ReferenceName == plumbing.HEAD:
-		rs = fmt.Sprintf(refspecSingleBranchHEAD, c.Name)
+		return []config.RefSpec{
+			config.RefSpec(fmt.Sprintf(refspecSingleBranchHEAD, o.RemoteName)),
+			config.RefSpec(fmt.Sprintf(refspecSingleBranch, plumbing.Master.Short(), o.RemoteName)),
+		}
 	case o.SingleBranch:
-		rs = fmt.Sprintf(refspecSingleBranch, o.ReferenceName.Short(), c.Name)
+		return []config.RefSpec{
+			config.RefSpec(fmt.Sprintf(refspecSingleBranch, o.ReferenceName.Short(), o.RemoteName)),
+		}
 	default:
-		return c.Fetch
+		return []config.RefSpec{
+			config.RefSpec(fmt.Sprintf(config.DefaultFetchRefSpec, o.RemoteName)),
+		}
 	}
-
-	return []config.RefSpec{config.RefSpec(rs)}
 }
 
 func (r *Repository) setIsBare(isBare bool) error {
@@ -751,9 +832,7 @@
 		return nil
 	}
 
-	c.Fetch = []config.RefSpec{config.RefSpec(fmt.Sprintf(
-		refspecSingleBranch, head.Name().Short(), c.Name,
-	))}
+	c.Fetch = r.cloneRefSpec(o)
 
 	cfg, err := r.Storer.Config()
 	if err != nil {
@@ -781,6 +860,8 @@
 	remoteRefs, err := remote.fetch(ctx, o)
 	if err == NoErrAlreadyUpToDate {
 		objsUpdated = false
+	} else if err == packfile.ErrEmptyPackfile {
+		return nil, ErrFetching
 	} else if err != nil {
 		return nil, err
 	}
@@ -946,8 +1027,36 @@
 
 // Log returns the commit history from the given LogOptions.
 func (r *Repository) Log(o *LogOptions) (object.CommitIter, error) {
-	h := o.From
-	if o.From == plumbing.ZeroHash {
+	fn := commitIterFunc(o.Order)
+	if fn == nil {
+		return nil, fmt.Errorf("invalid Order=%v", o.Order)
+	}
+
+	var (
+		it  object.CommitIter
+		err error
+	)
+	if o.All {
+		it, err = r.logAll(fn)
+	} else {
+		it, err = r.log(o.From, fn)
+	}
+
+	if err != nil {
+		return nil, err
+	}
+
+	if o.FileName != nil {
+		// for `git log --all` also check parent (if the next commit comes from the real parent)
+		it = r.logWithFile(*o.FileName, it, o.All)
+	}
+
+	return it, nil
+}
+
+func (r *Repository) log(from plumbing.Hash, commitIterFunc func(*object.Commit) object.CommitIter) (object.CommitIter, error) {
+	h := from
+	if from == plumbing.ZeroHash {
 		head, err := r.Head()
 		if err != nil {
 			return nil, err
@@ -960,27 +1069,41 @@
 	if err != nil {
 		return nil, err
 	}
+	return commitIterFunc(commit), nil
+}
 
-	var commitIter object.CommitIter
-	switch o.Order {
+func (r *Repository) logAll(commitIterFunc func(*object.Commit) object.CommitIter) (object.CommitIter, error) {
+	return object.NewCommitAllIter(r.Storer, commitIterFunc)
+}
+
+func (*Repository) logWithFile(fileName string, commitIter object.CommitIter, checkParent bool) object.CommitIter {
+	return object.NewCommitFileIterFromIter(fileName, commitIter, checkParent)
+}
+
+func commitIterFunc(order LogOrder) func(c *object.Commit) object.CommitIter {
+	switch order {
 	case LogOrderDefault:
-		commitIter = object.NewCommitPreorderIter(commit, nil, nil)
+		return func(c *object.Commit) object.CommitIter {
+			return object.NewCommitPreorderIter(c, nil, nil)
+		}
 	case LogOrderDFS:
-		commitIter = object.NewCommitPreorderIter(commit, nil, nil)
+		return func(c *object.Commit) object.CommitIter {
+			return object.NewCommitPreorderIter(c, nil, nil)
+		}
 	case LogOrderDFSPost:
-		commitIter = object.NewCommitPostorderIter(commit, nil)
+		return func(c *object.Commit) object.CommitIter {
+			return object.NewCommitPostorderIter(c, nil)
+		}
 	case LogOrderBSF:
-		commitIter = object.NewCommitIterBSF(commit, nil, nil)
+		return func(c *object.Commit) object.CommitIter {
+			return object.NewCommitIterBSF(c, nil, nil)
+		}
 	case LogOrderCommitterTime:
-		commitIter = object.NewCommitIterCTime(commit, nil, nil)
-	default:
-		return nil, fmt.Errorf("invalid Order=%v", o.Order)
+		return func(c *object.Commit) object.CommitIter {
+			return object.NewCommitIterCTime(c, nil, nil)
+		}
 	}
-
-	if o.FileName == nil {
-		return commitIter, nil
-	}
-	return object.NewCommitFileIterFromIter(*o.FileName, commitIter), nil
+	return nil
 }
 
 // Tags returns all the tag References in a repository.
diff --git a/repository_test.go b/repository_test.go
index 07c3570..1549737 100644
--- a/repository_test.go
+++ b/repository_test.go
@@ -21,6 +21,7 @@
 	"gopkg.in/src-d/go-git.v4/plumbing/cache"
 	"gopkg.in/src-d/go-git.v4/plumbing/object"
 	"gopkg.in/src-d/go-git.v4/plumbing/storer"
+	"gopkg.in/src-d/go-git.v4/plumbing/transport"
 	"gopkg.in/src-d/go-git.v4/storage"
 	"gopkg.in/src-d/go-git.v4/storage/filesystem"
 	"gopkg.in/src-d/go-git.v4/storage/memory"
@@ -177,11 +178,12 @@
 	ctx, cancel := context.WithCancel(context.Background())
 	cancel()
 
-	_, err := CloneContext(ctx, memory.NewStorage(), nil, &CloneOptions{
+	r, err := CloneContext(ctx, memory.NewStorage(), nil, &CloneOptions{
 		URL: s.GetBasicLocalRepositoryURL(),
 	})
 
-	c.Assert(err, NotNil)
+	c.Assert(r, NotNil)
+	c.Assert(err, ErrorMatches, ".* context canceled")
 }
 
 func (s *RepositorySuite) TestCloneWithTags(c *C) {
@@ -581,15 +583,128 @@
 	c.Assert(remote, NotNil)
 }
 
-func (s *RepositorySuite) TestPlainCloneContext(c *C) {
+func (s *RepositorySuite) TestPlainCloneOverExistingGitDirectory(c *C) {
+	tmpDir := c.MkDir()
+	r, err := PlainInit(tmpDir, false)
+	c.Assert(r, NotNil)
+	c.Assert(err, IsNil)
+
+	r, err = PlainClone(tmpDir, false, &CloneOptions{
+		URL: s.GetBasicLocalRepositoryURL(),
+	})
+	c.Assert(r, IsNil)
+	c.Assert(err, Equals, ErrRepositoryAlreadyExists)
+}
+
+func (s *RepositorySuite) TestPlainCloneContextCancel(c *C) {
 	ctx, cancel := context.WithCancel(context.Background())
 	cancel()
 
-	_, err := PlainCloneContext(ctx, c.MkDir(), false, &CloneOptions{
+	r, err := PlainCloneContext(ctx, c.MkDir(), false, &CloneOptions{
 		URL: s.GetBasicLocalRepositoryURL(),
 	})
 
-	c.Assert(err, NotNil)
+	c.Assert(r, NotNil)
+	c.Assert(err, ErrorMatches, ".* context canceled")
+}
+
+func (s *RepositorySuite) TestPlainCloneContextNonExistentWithExistentDir(c *C) {
+	ctx, cancel := context.WithCancel(context.Background())
+	cancel()
+
+	tmpDir := c.MkDir()
+	repoDir := tmpDir
+
+	r, err := PlainCloneContext(ctx, repoDir, false, &CloneOptions{
+		URL: "incorrectOnPurpose",
+	})
+	c.Assert(r, NotNil)
+	c.Assert(err, Equals, transport.ErrRepositoryNotFound)
+
+	_, err = os.Stat(repoDir)
+	c.Assert(os.IsNotExist(err), Equals, false)
+
+	names, err := ioutil.ReadDir(repoDir)
+	c.Assert(err, IsNil)
+	c.Assert(names, HasLen, 0)
+}
+
+func (s *RepositorySuite) TestPlainCloneContextNonExistentWithNonExistentDir(c *C) {
+	ctx, cancel := context.WithCancel(context.Background())
+	cancel()
+
+	tmpDir := c.MkDir()
+	repoDir := filepath.Join(tmpDir, "repoDir")
+
+	r, err := PlainCloneContext(ctx, repoDir, false, &CloneOptions{
+		URL: "incorrectOnPurpose",
+	})
+	c.Assert(r, NotNil)
+	c.Assert(err, Equals, transport.ErrRepositoryNotFound)
+
+	_, err = os.Stat(repoDir)
+	c.Assert(os.IsNotExist(err), Equals, true)
+}
+
+func (s *RepositorySuite) TestPlainCloneContextNonExistentWithNotDir(c *C) {
+	ctx, cancel := context.WithCancel(context.Background())
+	cancel()
+
+	tmpDir := c.MkDir()
+	repoDir := filepath.Join(tmpDir, "repoDir")
+	f, err := os.Create(repoDir)
+	c.Assert(err, IsNil)
+	c.Assert(f.Close(), IsNil)
+
+	r, err := PlainCloneContext(ctx, repoDir, false, &CloneOptions{
+		URL: "incorrectOnPurpose",
+	})
+	c.Assert(r, IsNil)
+	c.Assert(err, ErrorMatches, ".*not a directory.*")
+
+	fi, err := os.Stat(repoDir)
+	c.Assert(err, IsNil)
+	c.Assert(fi.IsDir(), Equals, false)
+}
+
+func (s *RepositorySuite) TestPlainCloneContextNonExistentWithNotEmptyDir(c *C) {
+	ctx, cancel := context.WithCancel(context.Background())
+	cancel()
+
+	tmpDir := c.MkDir()
+	repoDirPath := filepath.Join(tmpDir, "repoDir")
+	err := os.Mkdir(repoDirPath, 0777)
+	c.Assert(err, IsNil)
+
+	dummyFile := filepath.Join(repoDirPath, "dummyFile")
+	err = ioutil.WriteFile(dummyFile, []byte(fmt.Sprint("dummyContent")), 0644)
+	c.Assert(err, IsNil)
+
+	r, err := PlainCloneContext(ctx, repoDirPath, false, &CloneOptions{
+		URL: "incorrectOnPurpose",
+	})
+	c.Assert(r, NotNil)
+	c.Assert(err, Equals, transport.ErrRepositoryNotFound)
+
+	_, err = os.Stat(dummyFile)
+	c.Assert(err, IsNil)
+
+}
+
+func (s *RepositorySuite) TestPlainCloneContextNonExistingOverExistingGitDirectory(c *C) {
+	ctx, cancel := context.WithCancel(context.Background())
+	cancel()
+
+	tmpDir := c.MkDir()
+	r, err := PlainInit(tmpDir, false)
+	c.Assert(r, NotNil)
+	c.Assert(err, IsNil)
+
+	r, err = PlainCloneContext(ctx, tmpDir, false, &CloneOptions{
+		URL: "incorrectOnPurpose",
+	})
+	c.Assert(r, IsNil)
+	c.Assert(err, Equals, ErrRepositoryAlreadyExists)
 }
 
 func (s *RepositorySuite) TestPlainCloneWithRecurseSubmodules(c *C) {
@@ -839,6 +954,32 @@
 	c.Assert(branch.Hash().String(), Equals, "6ecf0ef2c2dffb796033e5a02219af86ec6584e5")
 }
 
+func (s *RepositorySuite) TestCloneSingleTag(c *C) {
+	r, _ := Init(memory.NewStorage(), nil)
+
+	url := s.GetLocalRepositoryURL(
+		fixtures.ByURL("https://github.com/git-fixtures/tags.git").One(),
+	)
+
+	err := r.clone(context.Background(), &CloneOptions{
+		URL:           url,
+		SingleBranch:  true,
+		ReferenceName: plumbing.ReferenceName("refs/tags/commit-tag"),
+	})
+	c.Assert(err, IsNil)
+
+	branch, err := r.Reference("refs/tags/commit-tag", false)
+	c.Assert(err, IsNil)
+	c.Assert(branch, NotNil)
+
+	conf, err := r.Config()
+	c.Assert(err, IsNil)
+	originRemote := conf.Remotes["origin"]
+	c.Assert(originRemote, NotNil)
+	c.Assert(originRemote.Fetch, HasLen, 1)
+	c.Assert(originRemote.Fetch[0].String(), Equals, "+refs/tags/commit-tag:refs/tags/commit-tag")
+}
+
 func (s *RepositorySuite) TestCloneDetachedHEAD(c *C) {
 	r, _ := Init(memory.NewStorage(), nil)
 	err := r.clone(context.Background(), &CloneOptions{
@@ -1110,6 +1251,139 @@
 	c.Assert(err, Equals, io.EOF)
 }
 
+func (s *RepositorySuite) TestLogAll(c *C) {
+	r, _ := Init(memory.NewStorage(), nil)
+	err := r.clone(context.Background(), &CloneOptions{
+		URL: s.GetBasicLocalRepositoryURL(),
+	})
+	c.Assert(err, IsNil)
+
+	rIter, err := r.Storer.IterReferences()
+	c.Assert(err, IsNil)
+
+	refCount := 0
+	err = rIter.ForEach(func(ref *plumbing.Reference) error {
+		refCount++
+		return nil
+	})
+	c.Assert(err, IsNil)
+	c.Assert(refCount, Equals, 5)
+
+	cIter, err := r.Log(&LogOptions{
+		All: true,
+	})
+	c.Assert(err, IsNil)
+
+	commitOrder := []plumbing.Hash{
+		plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5"),
+		plumbing.NewHash("e8d3ffab552895c19b9fcf7aa264d277cde33881"),
+		plumbing.NewHash("918c48b83bd081e863dbe1b80f8998f058cd8294"),
+		plumbing.NewHash("af2d6a6954d532f8ffb47615169c8fdf9d383a1a"),
+		plumbing.NewHash("1669dce138d9b841a518c64b10914d88f5e488ea"),
+		plumbing.NewHash("35e85108805c84807bc66a02d91535e1e24b38b9"),
+		plumbing.NewHash("b029517f6300c2da0f4b651b8642506cd6aaf45d"),
+		plumbing.NewHash("a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69"),
+		plumbing.NewHash("b8e471f58bcbca63b07bda20e428190409c2db47"),
+	}
+
+	for _, o := range commitOrder {
+		commit, err := cIter.Next()
+		c.Assert(err, IsNil)
+		c.Assert(commit.Hash, Equals, o)
+	}
+	_, err = cIter.Next()
+	c.Assert(err, Equals, io.EOF)
+	cIter.Close()
+}
+
+func (s *RepositorySuite) TestLogAllMissingReferences(c *C) {
+	r, _ := Init(memory.NewStorage(), nil)
+	err := r.clone(context.Background(), &CloneOptions{
+		URL: s.GetBasicLocalRepositoryURL(),
+	})
+	c.Assert(err, IsNil)
+	err = r.Storer.RemoveReference(plumbing.HEAD)
+	c.Assert(err, IsNil)
+
+	rIter, err := r.Storer.IterReferences()
+	c.Assert(err, IsNil)
+
+	refCount := 0
+	err = rIter.ForEach(func(ref *plumbing.Reference) error {
+		refCount++
+		return nil
+	})
+	c.Assert(err, IsNil)
+	c.Assert(refCount, Equals, 4)
+
+	err = r.Storer.SetReference(plumbing.NewHashReference(plumbing.ReferenceName("DUMMY"), plumbing.NewHash("DUMMY")))
+	c.Assert(err, IsNil)
+
+	rIter, err = r.Storer.IterReferences()
+	c.Assert(err, IsNil)
+
+	refCount = 0
+	err = rIter.ForEach(func(ref *plumbing.Reference) error {
+		refCount++
+		return nil
+	})
+	c.Assert(err, IsNil)
+	c.Assert(refCount, Equals, 5)
+
+	cIter, err := r.Log(&LogOptions{
+		All: true,
+	})
+	c.Assert(cIter, NotNil)
+	c.Assert(err, IsNil)
+
+	cCount := 0
+	cIter.ForEach(func(c *object.Commit) error {
+		cCount++
+		return nil
+	})
+	c.Assert(cCount, Equals, 9)
+
+	_, err = cIter.Next()
+	c.Assert(err, Equals, io.EOF)
+	cIter.Close()
+}
+
+func (s *RepositorySuite) TestLogAllOrderByTime(c *C) {
+	r, _ := Init(memory.NewStorage(), nil)
+	err := r.clone(context.Background(), &CloneOptions{
+		URL: s.GetBasicLocalRepositoryURL(),
+	})
+
+	c.Assert(err, IsNil)
+
+	cIter, err := r.Log(&LogOptions{
+		Order: LogOrderCommitterTime,
+		All:   true,
+	})
+	c.Assert(err, IsNil)
+
+	commitOrder := []plumbing.Hash{
+		plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5"),
+		plumbing.NewHash("e8d3ffab552895c19b9fcf7aa264d277cde33881"),
+		plumbing.NewHash("918c48b83bd081e863dbe1b80f8998f058cd8294"),
+		plumbing.NewHash("af2d6a6954d532f8ffb47615169c8fdf9d383a1a"),
+		plumbing.NewHash("1669dce138d9b841a518c64b10914d88f5e488ea"),
+		plumbing.NewHash("a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69"),
+		plumbing.NewHash("35e85108805c84807bc66a02d91535e1e24b38b9"),
+		plumbing.NewHash("b8e471f58bcbca63b07bda20e428190409c2db47"),
+		plumbing.NewHash("b029517f6300c2da0f4b651b8642506cd6aaf45d"),
+	}
+
+	for _, o := range commitOrder {
+		commit, err := cIter.Next()
+		c.Assert(err, IsNil)
+		c.Assert(commit.Hash, Equals, o)
+	}
+	_, err = cIter.Next()
+	c.Assert(err, Equals, io.EOF)
+	cIter.Close()
+}
+
 func (s *RepositorySuite) TestLogHead(c *C) {
 	r, _ := Init(memory.NewStorage(), nil)
 	err := r.clone(context.Background(), &CloneOptions{
@@ -1192,8 +1466,8 @@
 
 	fileName := "php/crappy.php"
 	cIter, err := r.Log(&LogOptions{FileName: &fileName})
-
 	c.Assert(err, IsNil)
+	defer cIter.Close()
 
 	commitOrder := []plumbing.Hash{
 		plumbing.NewHash("918c48b83bd081e863dbe1b80f8998f058cd8294"),
@@ -1203,7 +1477,51 @@
 	cIter.ForEach(func(commit *object.Commit) error {
 		expectedCommitHash := commitOrder[expectedIndex]
 		c.Assert(commit.Hash.String(), Equals, expectedCommitHash.String())
-		expectedIndex += 1
+		expectedIndex++
+		return nil
+	})
+	c.Assert(expectedIndex, Equals, 1)
+}
+
+func (s *RepositorySuite) TestLogNonHeadFile(c *C) {
+	r, _ := Init(memory.NewStorage(), nil)
+	err := r.clone(context.Background(), &CloneOptions{
+		URL: s.GetBasicLocalRepositoryURL(),
+	})
+
+	c.Assert(err, IsNil)
+
+	fileName := "README"
+	cIter, err := r.Log(&LogOptions{FileName: &fileName})
+	c.Assert(err, IsNil)
+	defer cIter.Close()
+
+	_, err = cIter.Next()
+	c.Assert(err, Equals, io.EOF)
+}
+
+func (s *RepositorySuite) TestLogAllFileForEach(c *C) {
+	r, _ := Init(memory.NewStorage(), nil)
+	err := r.clone(context.Background(), &CloneOptions{
+		URL: s.GetBasicLocalRepositoryURL(),
+	})
+
+	c.Assert(err, IsNil)
+
+	fileName := "README"
+	cIter, err := r.Log(&LogOptions{FileName: &fileName, All: true})
+	c.Assert(err, IsNil)
+	defer cIter.Close()
+
+	commitOrder := []plumbing.Hash{
+		plumbing.NewHash("e8d3ffab552895c19b9fcf7aa264d277cde33881"),
+	}
+
+	expectedIndex := 0
+	cIter.ForEach(func(commit *object.Commit) error {
+		expectedCommitHash := commitOrder[expectedIndex]
+		c.Assert(commit.Hash.String(), Equals, expectedCommitHash.String())
+		expectedIndex++
 		return nil
 	})
 	c.Assert(expectedIndex, Equals, 1)
@@ -1221,6 +1539,7 @@
 	cIter, err := r.Log(&LogOptions{FileName: &fileName})
 	// Not raising an error since `git log -- vendor/foo12.go` responds silently
 	c.Assert(err, IsNil)
+	defer cIter.Close()
 
 	_, err = cIter.Next()
 	c.Assert(err, Equals, io.EOF)
@@ -1238,8 +1557,8 @@
 		Order:    LogOrderCommitterTime,
 		FileName: &fileName,
 	})
-
 	c.Assert(err, IsNil)
+	defer cIter.Close()
 
 	commitOrder := []plumbing.Hash{
 		plumbing.NewHash("b029517f6300c2da0f4b651b8642506cd6aaf45d"),
@@ -1249,7 +1568,7 @@
 	cIter.ForEach(func(commit *object.Commit) error {
 		expectedCommitHash := commitOrder[expectedIndex]
 		c.Assert(commit.Hash.String(), Equals, expectedCommitHash.String())
-		expectedIndex += 1
+		expectedIndex++
 		return nil
 	})
 	c.Assert(expectedIndex, Equals, 1)
@@ -1269,6 +1588,8 @@
 		From:     plumbing.NewHash("35e85108805c84807bc66a02d91535e1e24b38b9"),
 	})
 	c.Assert(err, IsNil)
+	defer cIter.Close()
+
 	_, iterErr := cIter.Next()
 	c.Assert(iterErr, Equals, io.EOF)
 }
@@ -2104,9 +2425,9 @@
 	c.Assert(err, IsNil)
 
 	datas := map[string]string{
-		"efs/heads/master~":                        "reference not found",
-		"HEAD^3":                                   `Revision invalid : "3" found must be 0, 1 or 2 after "^"`,
-		"HEAD^{/whatever}":                         `No commit message match regexp : "whatever"`,
+		"efs/heads/master~": "reference not found",
+		"HEAD^3":            `Revision invalid : "3" found must be 0, 1 or 2 after "^"`,
+		"HEAD^{/whatever}":  `No commit message match regexp : "whatever"`,
 		"4e1243bd22c66e76c2ba9eddc1f91394e57f9f83": "reference not found",
 		"918c48b83bd081e863dbe1b80f8998f058cd8294": `refname "918c48b83bd081e863dbe1b80f8998f058cd8294" is ambiguous`,
 	}
@@ -2202,8 +2523,6 @@
 	c.Stderr = buf
 	c.Stdout = buf
 
-	//defer func() { fmt.Println(buf.String()) }()
-
 	return c.Run()
 }
 
diff --git a/storage/filesystem/dotgit/dotgit.go b/storage/filesystem/dotgit/dotgit.go
index a58c248..ba9667e 100644
--- a/storage/filesystem/dotgit/dotgit.go
+++ b/storage/filesystem/dotgit/dotgit.go
@@ -14,6 +14,7 @@
 
 	"gopkg.in/src-d/go-billy.v4/osfs"
 	"gopkg.in/src-d/go-git.v4/plumbing"
+	"gopkg.in/src-d/go-git.v4/storage"
 	"gopkg.in/src-d/go-git.v4/utils/ioutil"
 
 	"gopkg.in/src-d/go-billy.v4"
@@ -596,7 +597,7 @@
 		return err
 	}
 	if ref.Hash() != old.Hash() {
-		return fmt.Errorf("reference has changed concurrently")
+		return storage.ErrReferenceHasChanged
 	}
 	_, err = f.Seek(0, io.SeekStart)
 	if err != nil {
diff --git a/storage/filesystem/dotgit/dotgit_setref.go b/storage/filesystem/dotgit/dotgit_setref.go
index d27c1a3..9da2f31 100644
--- a/storage/filesystem/dotgit/dotgit_setref.go
+++ b/storage/filesystem/dotgit/dotgit_setref.go
@@ -1,15 +1,24 @@
-// +build !norwfs
-
 package dotgit
 
 import (
+	"fmt"
 	"os"
 
 	"gopkg.in/src-d/go-git.v4/plumbing"
 	"gopkg.in/src-d/go-git.v4/utils/ioutil"
+
+	"gopkg.in/src-d/go-billy.v4"
 )
 
 func (d *DotGit) setRef(fileName, content string, old *plumbing.Reference) (err error) {
+	if billy.CapabilityCheck(d.fs, billy.ReadAndWriteCapability) {
+		return d.setRefRwfs(fileName, content, old)
+	}
+
+	return d.setRefNorwfs(fileName, content, old)
+}
+
+func (d *DotGit) setRefRwfs(fileName, content string, old *plumbing.Reference) (err error) {
 	// If we are not checking an old ref, just truncate the file.
 	mode := os.O_RDWR | os.O_CREATE
 	if old == nil {
@@ -41,3 +50,41 @@
 	_, err = f.Write([]byte(content))
 	return err
 }
+
+// There are some filesystems that don't support opening files in RDWD mode.
+// In these filesystems the standard SetRef function can not be used as it
+// reads the reference file to check that it's not modified before updating it.
+//
+// This version of the function writes the reference without extra checks
+// making it compatible with these simple filesystems. This is usually not
+// a problem as they should be accessed by only one process at a time.
+func (d *DotGit) setRefNorwfs(fileName, content string, old *plumbing.Reference) error {
+	_, err := d.fs.Stat(fileName)
+	if err == nil && old != nil {
+		fRead, err := d.fs.Open(fileName)
+		if err != nil {
+			return err
+		}
+
+		ref, err := d.readReferenceFrom(fRead, old.Name().String())
+		fRead.Close()
+
+		if err != nil {
+			return err
+		}
+
+		if ref.Hash() != old.Hash() {
+			return fmt.Errorf("reference has changed concurrently")
+		}
+	}
+
+	f, err := d.fs.Create(fileName)
+	if err != nil {
+		return err
+	}
+
+	defer f.Close()
+
+	_, err = f.Write([]byte(content))
+	return err
+}
diff --git a/storage/filesystem/dotgit/dotgit_setref_norwfs.go b/storage/filesystem/dotgit/dotgit_setref_norwfs.go
deleted file mode 100644
index 5695bd3..0000000
--- a/storage/filesystem/dotgit/dotgit_setref_norwfs.go
+++ /dev/null
@@ -1,47 +0,0 @@
-// +build norwfs
-
-package dotgit
-
-import (
-	"fmt"
-
-	"gopkg.in/src-d/go-git.v4/plumbing"
-)
-
-// There are some filesystems that don't support opening files in RDWD mode.
-// In these filesystems the standard SetRef function can not be used as i
-// reads the reference file to check that it's not modified before updating it.
-//
-// This version of the function writes the reference without extra checks
-// making it compatible with these simple filesystems. This is usually not
-// a problem as they should be accessed by only one process at a time.
-func (d *DotGit) setRef(fileName, content string, old *plumbing.Reference) error {
-	_, err := d.fs.Stat(fileName)
-	if err == nil && old != nil {
-		fRead, err := d.fs.Open(fileName)
-		if err != nil {
-			return err
-		}
-
-		ref, err := d.readReferenceFrom(fRead, old.Name().String())
-		fRead.Close()
-
-		if err != nil {
-			return err
-		}
-
-		if ref.Hash() != old.Hash() {
-			return fmt.Errorf("reference has changed concurrently")
-		}
-	}
-
-	f, err := d.fs.Create(fileName)
-	if err != nil {
-		return err
-	}
-
-	defer f.Close()
-
-	_, err = f.Write([]byte(content))
-	return err
-}
diff --git a/storage/filesystem/dotgit/dotgit_test.go b/storage/filesystem/dotgit/dotgit_test.go
index 308c6b7..73b0291 100644
--- a/storage/filesystem/dotgit/dotgit_test.go
+++ b/storage/filesystem/dotgit/dotgit_test.go
@@ -57,11 +57,26 @@
 	fs := osfs.New(tmp)
 	dir := New(fs)
 
+	testSetRefs(c, dir)
+}
+
+func (s *SuiteDotGit) TestSetRefsNorwfs(c *C) {
+	tmp, err := ioutil.TempDir("", "dot-git")
+	c.Assert(err, IsNil)
+	defer os.RemoveAll(tmp)
+
+	fs := osfs.New(tmp)
+	dir := New(&norwfs{fs})
+
+	testSetRefs(c, dir)
+}
+
+func testSetRefs(c *C, dir *DotGit) {
 	firstFoo := plumbing.NewReferenceFromStrings(
 		"refs/heads/foo",
 		"e8d3ffab552895c19b9fcf7aa264d277cde33881",
 	)
-	err = dir.SetRef(firstFoo, nil)
+	err := dir.SetRef(firstFoo, nil)
 
 	c.Assert(err, IsNil)
 
@@ -795,3 +810,11 @@
 	}
 	c.Assert(dotgits[1].fs.Root(), Equals, expectedPath)
 }
+
+type norwfs struct {
+	billy.Filesystem
+}
+
+func (f *norwfs) Capabilities() billy.Capability {
+	return billy.Capabilities(f.Filesystem) &^ billy.ReadAndWriteCapability
+}
diff --git a/storage/filesystem/object.go b/storage/filesystem/object.go
index 6cd2d4c..3eb62a2 100644
--- a/storage/filesystem/object.go
+++ b/storage/filesystem/object.go
@@ -20,24 +20,25 @@
 type ObjectStorage struct {
 	options Options
 
-	// deltaBaseCache is an object cache uses to cache delta's bases when
-	deltaBaseCache cache.Object
+	// objectCache is an object cache uses to cache delta's bases and also recently
+	// loaded loose objects
+	objectCache cache.Object
 
 	dir   *dotgit.DotGit
 	index map[plumbing.Hash]idxfile.Index
 }
 
 // NewObjectStorage creates a new ObjectStorage with the given .git directory and cache.
-func NewObjectStorage(dir *dotgit.DotGit, cache cache.Object) *ObjectStorage {
-	return NewObjectStorageWithOptions(dir, cache, Options{})
+func NewObjectStorage(dir *dotgit.DotGit, objectCache cache.Object) *ObjectStorage {
+	return NewObjectStorageWithOptions(dir, objectCache, Options{})
 }
 
 // NewObjectStorageWithOptions creates a new ObjectStorage with the given .git directory, cache and extra options
-func NewObjectStorageWithOptions(dir *dotgit.DotGit, cache cache.Object, ops Options) *ObjectStorage {
+func NewObjectStorageWithOptions(dir *dotgit.DotGit, objectCache cache.Object, ops Options) *ObjectStorage {
 	return &ObjectStorage{
-		options:        ops,
-		deltaBaseCache: cache,
-		dir:            dir,
+		options:     ops,
+		objectCache: objectCache,
+		dir:         dir,
 	}
 }
 
@@ -61,6 +62,11 @@
 	return nil
 }
 
+// Reindex indexes again all packfiles. Useful if git changed packfiles externally
+func (s *ObjectStorage) Reindex() {
+	s.index = nil
+}
+
 func (s *ObjectStorage) loadIdxFile(h plumbing.Hash) (err error) {
 	f, err := s.dir.ObjectPackIdx(h)
 	if err != nil {
@@ -201,7 +207,7 @@
 	idx := s.index[pack]
 	hash, err := idx.FindHash(offset)
 	if err == nil {
-		obj, ok := s.deltaBaseCache.Get(hash)
+		obj, ok := s.objectCache.Get(hash)
 		if ok {
 			return obj.Size(), nil
 		}
@@ -210,8 +216,8 @@
 	}
 
 	var p *packfile.Packfile
-	if s.deltaBaseCache != nil {
-		p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.deltaBaseCache)
+	if s.objectCache != nil {
+		p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache)
 	} else {
 		p = packfile.NewPackfile(idx, s.dir.Fs(), f)
 	}
@@ -236,9 +242,19 @@
 // EncodedObject returns the object with the given hash, by searching for it in
 // the packfile and the git object directories.
 func (s *ObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (plumbing.EncodedObject, error) {
-	obj, err := s.getFromUnpacked(h)
-	if err == plumbing.ErrObjectNotFound {
+	var obj plumbing.EncodedObject
+	var err error
+
+	if s.index != nil {
 		obj, err = s.getFromPackfile(h, false)
+		if err == plumbing.ErrObjectNotFound {
+			obj, err = s.getFromUnpacked(h)
+		}
+	} else {
+		obj, err = s.getFromUnpacked(h)
+		if err == plumbing.ErrObjectNotFound {
+			obj, err = s.getFromPackfile(h, false)
+		}
 	}
 
 	// If the error is still object not found, check if it's a shared object
@@ -249,7 +265,7 @@
 			// Create a new object storage with the DotGit(s) and check for the
 			// required hash object. Skip when not found.
 			for _, dg := range dotgits {
-				o := NewObjectStorage(dg, s.deltaBaseCache)
+				o := NewObjectStorage(dg, s.objectCache)
 				enobj, enerr := o.EncodedObject(t, h)
 				if enerr != nil {
 					continue
@@ -299,9 +315,12 @@
 
 		return nil, err
 	}
-
 	defer ioutil.CheckClose(f, &err)
 
+	if cacheObj, found := s.objectCache.Get(h); found {
+		return cacheObj, nil
+	}
+
 	obj = s.NewEncodedObject()
 	r, err := objfile.NewReader(f)
 	if err != nil {
@@ -322,6 +341,8 @@
 		return nil, err
 	}
 
+	s.objectCache.Put(obj)
+
 	_, err = io.Copy(w, r)
 	return obj, err
 }
@@ -364,7 +385,7 @@
 ) (plumbing.EncodedObject, error) {
 	hash, err := idx.FindHash(offset)
 	if err == nil {
-		obj, ok := s.deltaBaseCache.Get(hash)
+		obj, ok := s.objectCache.Get(hash)
 		if ok {
 			return obj, nil
 		}
@@ -375,8 +396,8 @@
 	}
 
 	var p *packfile.Packfile
-	if s.deltaBaseCache != nil {
-		p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.deltaBaseCache)
+	if s.objectCache != nil {
+		p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache)
 	} else {
 		p = packfile.NewPackfile(idx, s.dir.Fs(), f)
 	}
@@ -395,11 +416,7 @@
 	}
 
 	p := packfile.NewScanner(f)
-	if _, err := p.SeekFromStart(offset); err != nil {
-		return nil, err
-	}
-
-	header, err := p.NextObjectHeader()
+	header, err := p.SeekObjectHeader(offset)
 	if err != nil {
 		return nil, err
 	}
@@ -490,7 +507,7 @@
 			}
 			return newPackfileIter(
 				s.dir.Fs(), pack, t, seen, s.index[h],
-				s.deltaBaseCache, s.options.KeepDescriptors,
+				s.objectCache, s.options.KeepDescriptors,
 			)
 		},
 	}, nil
diff --git a/storage/filesystem/object_test.go b/storage/filesystem/object_test.go
index 4e6bbfb..5cfb227 100644
--- a/storage/filesystem/object_test.go
+++ b/storage/filesystem/object_test.go
@@ -1,8 +1,11 @@
 package filesystem
 
 import (
+	"fmt"
+	"io"
 	"io/ioutil"
 	"os"
+	"path/filepath"
 	"testing"
 
 	"gopkg.in/src-d/go-git.v4/plumbing"
@@ -204,6 +207,59 @@
 	})
 }
 
+func copyFile(c *C, dstDir, dstFilename string, srcFile *os.File) {
+	_, err := srcFile.Seek(0, 0)
+	c.Assert(err, IsNil)
+
+	err = os.MkdirAll(dstDir, 0750|os.ModeDir)
+	c.Assert(err, IsNil)
+
+	dst, err := os.OpenFile(filepath.Join(dstDir, dstFilename), os.O_CREATE|os.O_WRONLY, 0666)
+	c.Assert(err, IsNil)
+	defer dst.Close()
+
+	_, err = io.Copy(dst, srcFile)
+	c.Assert(err, IsNil)
+}
+
+// TestPackfileReindex tests that externally-added packfiles are considered by go-git
+// after calling the Reindex method
+func (s *FsSuite) TestPackfileReindex(c *C) {
+	// obtain a standalone packfile that is not part of any other repository
+	// in the fixtures:
+	packFixture := fixtures.ByTag("packfile").ByTag("standalone").One()
+	packFile := packFixture.Packfile()
+	idxFile := packFixture.Idx()
+	packFilename := packFixture.PackfileHash.String()
+	testObjectHash := plumbing.NewHash("a771b1e94141480861332fd0e4684d33071306c6") // this is an object we know exists in the standalone packfile
+	fixtures.ByTag(".git").Test(c, func(f *fixtures.Fixture) {
+		fs := f.DotGit()
+		storer := NewStorage(fs, cache.NewObjectLRUDefault())
+
+		// check that our test object is NOT found
+		_, err := storer.EncodedObject(plumbing.CommitObject, testObjectHash)
+		c.Assert(err, Equals, plumbing.ErrObjectNotFound)
+
+		// add the external packfile+idx to the packs folder
+		// this simulates a git bundle unbundle command, or a repack, for example.
+		copyFile(c, filepath.Join(storer.Filesystem().Root(), "objects", "pack"),
+			fmt.Sprintf("pack-%s.pack", packFilename), packFile)
+		copyFile(c, filepath.Join(storer.Filesystem().Root(), "objects", "pack"),
+			fmt.Sprintf("pack-%s.idx", packFilename), idxFile)
+
+		// check that we cannot still retrieve the test object
+		_, err = storer.EncodedObject(plumbing.CommitObject, testObjectHash)
+		c.Assert(err, Equals, plumbing.ErrObjectNotFound)
+
+		storer.Reindex() // actually reindex
+
+		// Now check that the test object can be retrieved
+		_, err = storer.EncodedObject(plumbing.CommitObject, testObjectHash)
+		c.Assert(err, IsNil)
+
+	})
+}
+
 func (s *FsSuite) TestPackfileIterKeepDescriptors(c *C) {
 	fixtures.ByTag(".git").Test(c, func(f *fixtures.Fixture) {
 		fs := f.DotGit()
@@ -241,6 +297,23 @@
 	})
 }
 
+func (s *FsSuite) TestGetFromObjectFileSharedCache(c *C) {
+	f1 := fixtures.ByTag("worktree").One().DotGit()
+	f2 := fixtures.ByTag("worktree").ByTag("submodule").One().DotGit()
+
+	ch := cache.NewObjectLRUDefault()
+	o1 := NewObjectStorage(dotgit.New(f1), ch)
+	o2 := NewObjectStorage(dotgit.New(f2), ch)
+
+	expected := plumbing.NewHash("af2d6a6954d532f8ffb47615169c8fdf9d383a1a")
+	obj, err := o1.EncodedObject(plumbing.CommitObject, expected)
+	c.Assert(err, IsNil)
+	c.Assert(obj.Hash(), Equals, expected)
+
+	obj, err = o2.EncodedObject(plumbing.CommitObject, expected)
+	c.Assert(err, Equals, plumbing.ErrObjectNotFound)
+}
+
 func BenchmarkPackfileIter(b *testing.B) {
 	if err := fixtures.Init(); err != nil {
 		b.Fatal(err)
diff --git a/storage/filesystem/storage.go b/storage/filesystem/storage.go
index 14a772a..370f7bd 100644
--- a/storage/filesystem/storage.go
+++ b/storage/filesystem/storage.go
@@ -51,11 +51,7 @@
 		fs:  fs,
 		dir: dir,
 
-		ObjectStorage: ObjectStorage{
-			options:        ops,
-			deltaBaseCache: cache,
-			dir:            dir,
-		},
+		ObjectStorage:    *NewObjectStorageWithOptions(dir, cache, ops),
 		ReferenceStorage: ReferenceStorage{dir: dir},
 		IndexStorage:     IndexStorage{dir: dir},
 		ShallowStorage:   ShallowStorage{dir: dir},
diff --git a/storage/memory/storage.go b/storage/memory/storage.go
index 6e11742..f240f2a 100644
--- a/storage/memory/storage.go
+++ b/storage/memory/storage.go
@@ -13,7 +13,6 @@
 )
 
 var ErrUnsupportedObjectType = fmt.Errorf("unsupported object type")
-var ErrRefHasChanged = fmt.Errorf("reference has changed concurrently")
 
 // Storage is an implementation of git.Storer that stores data on memory, being
 // ephemeral. The use of this storage should be done in controlled envoriments,
@@ -258,7 +257,7 @@
 	if old != nil {
 		tmp := r[ref.Name()]
 		if tmp != nil && tmp.Hash() != old.Hash() {
-			return ErrRefHasChanged
+			return storage.ErrReferenceHasChanged
 		}
 	}
 	r[ref.Name()] = ref
diff --git a/storage/storer.go b/storage/storer.go
index d1a94f2..5de0cfb 100644
--- a/storage/storer.go
+++ b/storage/storer.go
@@ -1,10 +1,14 @@
 package storage
 
 import (
+	"errors"
+
 	"gopkg.in/src-d/go-git.v4/config"
 	"gopkg.in/src-d/go-git.v4/plumbing/storer"
 )
 
+var ErrReferenceHasChanged = errors.New("reference has changed concurrently")
+
 // Storer is a generic storage of objects, references and any information
 // related to a particular repository. The package gopkg.in/src-d/go-git.v4/storage
 // contains two implementation a filesystem base implementation (such as `.git`)
diff --git a/storage/test/storage_suite.go b/storage/test/storage_suite.go
index 79a12c5..e050b73 100644
--- a/storage/test/storage_suite.go
+++ b/storage/test/storage_suite.go
@@ -280,6 +280,57 @@
 	c.Assert(e.Hash().String(), Equals, "bc9968d75e48de59f0870ffb71f5e160bbbdcf52")
 }
 
+func (s *BaseStorageSuite) TestCheckAndSetReference(c *C) {
+	err := s.Storer.SetReference(
+		plumbing.NewReferenceFromStrings("foo", "482e0eada5de4039e6f216b45b3c9b683b83bfa"),
+	)
+	c.Assert(err, IsNil)
+
+	err = s.Storer.CheckAndSetReference(
+		plumbing.NewReferenceFromStrings("foo", "bc9968d75e48de59f0870ffb71f5e160bbbdcf52"),
+		plumbing.NewReferenceFromStrings("foo", "482e0eada5de4039e6f216b45b3c9b683b83bfa"),
+	)
+	c.Assert(err, IsNil)
+
+	e, err := s.Storer.Reference(plumbing.ReferenceName("foo"))
+	c.Assert(err, IsNil)
+	c.Assert(e.Hash().String(), Equals, "bc9968d75e48de59f0870ffb71f5e160bbbdcf52")
+}
+
+func (s *BaseStorageSuite) TestCheckAndSetReferenceNil(c *C) {
+	err := s.Storer.SetReference(
+		plumbing.NewReferenceFromStrings("foo", "482e0eada5de4039e6f216b45b3c9b683b83bfa"),
+	)
+	c.Assert(err, IsNil)
+
+	err = s.Storer.CheckAndSetReference(
+		plumbing.NewReferenceFromStrings("foo", "bc9968d75e48de59f0870ffb71f5e160bbbdcf52"),
+		nil,
+	)
+	c.Assert(err, IsNil)
+
+	e, err := s.Storer.Reference(plumbing.ReferenceName("foo"))
+	c.Assert(err, IsNil)
+	c.Assert(e.Hash().String(), Equals, "bc9968d75e48de59f0870ffb71f5e160bbbdcf52")
+}
+
+func (s *BaseStorageSuite) TestCheckAndSetReferenceError(c *C) {
+	err := s.Storer.SetReference(
+		plumbing.NewReferenceFromStrings("foo", "c3f4688a08fd86f1bf8e055724c84b7a40a09733"),
+	)
+	c.Assert(err, IsNil)
+
+	err = s.Storer.CheckAndSetReference(
+		plumbing.NewReferenceFromStrings("foo", "bc9968d75e48de59f0870ffb71f5e160bbbdcf52"),
+		plumbing.NewReferenceFromStrings("foo", "482e0eada5de4039e6f216b45b3c9b683b83bfa"),
+	)
+	c.Assert(err, Equals, storage.ErrReferenceHasChanged)
+
+	e, err := s.Storer.Reference(plumbing.ReferenceName("foo"))
+	c.Assert(err, IsNil)
+	c.Assert(e.Hash().String(), Equals, "c3f4688a08fd86f1bf8e055724c84b7a40a09733")
+}
+
 func (s *BaseStorageSuite) TestRemoveReference(c *C) {
 	err := s.Storer.SetReference(
 		plumbing.NewReferenceFromStrings("foo", "bc9968d75e48de59f0870ffb71f5e160bbbdcf52"),
diff --git a/storage/transactional/config.go b/storage/transactional/config.go
new file mode 100644
index 0000000..4d8efe1
--- /dev/null
+++ b/storage/transactional/config.go
@@ -0,0 +1,50 @@
+package transactional
+
+import "gopkg.in/src-d/go-git.v4/config"
+
+// ConfigStorage implements the storer.ConfigStorage for the transactional package.
+type ConfigStorage struct {
+	config.ConfigStorer
+	temporal config.ConfigStorer
+
+	set bool
+}
+
+// NewConfigStorage returns a new ConfigStorer based on a base storer and a
+// temporal storer.
+func NewConfigStorage(s, temporal config.ConfigStorer) *ConfigStorage {
+	return &ConfigStorage{ConfigStorer: s, temporal: temporal}
+}
+
+// SetConfig honors the storer.ConfigStorer interface.
+func (c *ConfigStorage) SetConfig(cfg *config.Config) error {
+	if err := c.temporal.SetConfig(cfg); err != nil {
+		return err
+	}
+
+	c.set = true
+	return nil
+}
+
+// Config honors the storer.ConfigStorer interface.
+func (c *ConfigStorage) Config() (*config.Config, error) {
+	if !c.set {
+		return c.ConfigStorer.Config()
+	}
+
+	return c.temporal.Config()
+}
+
+// Commit it copies the config from the temporal storage into the base storage.
+func (c *ConfigStorage) Commit() error {
+	if !c.set {
+		return nil
+	}
+
+	cfg, err := c.temporal.Config()
+	if err != nil {
+		return err
+	}
+
+	return c.ConfigStorer.SetConfig(cfg)
+}
diff --git a/storage/transactional/config_test.go b/storage/transactional/config_test.go
new file mode 100644
index 0000000..5d1e019
--- /dev/null
+++ b/storage/transactional/config_test.go
@@ -0,0 +1,82 @@
+package transactional
+
+import (
+	. "gopkg.in/check.v1"
+	"gopkg.in/src-d/go-git.v4/config"
+	"gopkg.in/src-d/go-git.v4/storage/memory"
+)
+
+var _ = Suite(&ConfigSuite{})
+
+type ConfigSuite struct{}
+
+func (s *ConfigSuite) TestSetConfigBase(c *C) {
+	cfg := config.NewConfig()
+	cfg.Core.Worktree = "foo"
+
+	base := memory.NewStorage()
+	err := base.SetConfig(cfg)
+	c.Assert(err, IsNil)
+
+	temporal := memory.NewStorage()
+	cs := NewConfigStorage(base, temporal)
+
+	cfg, err = cs.Config()
+	c.Assert(err, IsNil)
+	c.Assert(cfg.Core.Worktree, Equals, "foo")
+}
+
+func (s *ConfigSuite) TestSetConfigTemporal(c *C) {
+	cfg := config.NewConfig()
+	cfg.Core.Worktree = "foo"
+
+	base := memory.NewStorage()
+	err := base.SetConfig(cfg)
+	c.Assert(err, IsNil)
+
+	temporal := memory.NewStorage()
+
+	cfg = config.NewConfig()
+	cfg.Core.Worktree = "bar"
+
+	cs := NewConfigStorage(base, temporal)
+	err = cs.SetConfig(cfg)
+	c.Assert(err, IsNil)
+
+	baseCfg, err := base.Config()
+	c.Assert(err, IsNil)
+	c.Assert(baseCfg.Core.Worktree, Equals, "foo")
+
+	temporalCfg, err := temporal.Config()
+	c.Assert(err, IsNil)
+	c.Assert(temporalCfg.Core.Worktree, Equals, "bar")
+
+	cfg, err = cs.Config()
+	c.Assert(err, IsNil)
+	c.Assert(temporalCfg.Core.Worktree, Equals, "bar")
+}
+
+func (s *ConfigSuite) TestCommit(c *C) {
+	cfg := config.NewConfig()
+	cfg.Core.Worktree = "foo"
+
+	base := memory.NewStorage()
+	err := base.SetConfig(cfg)
+	c.Assert(err, IsNil)
+
+	temporal := memory.NewStorage()
+
+	cfg = config.NewConfig()
+	cfg.Core.Worktree = "bar"
+
+	cs := NewConfigStorage(base, temporal)
+	err = cs.SetConfig(cfg)
+	c.Assert(err, IsNil)
+
+	err = cs.Commit()
+	c.Assert(err, IsNil)
+
+	baseCfg, err := base.Config()
+	c.Assert(err, IsNil)
+	c.Assert(baseCfg.Core.Worktree, Equals, "bar")
+}
diff --git a/storage/transactional/doc.go b/storage/transactional/doc.go
new file mode 100644
index 0000000..3a68f5f
--- /dev/null
+++ b/storage/transactional/doc.go
@@ -0,0 +1,7 @@
+// Package transactional is a transactional implementation of git.Storer, it
+// demux the write and read operation of two separate storers, allowing to merge
+// content calling Storage.Commit.
+//
+// The API and functionality of this package are considered EXPERIMENTAL and is
+// not considered stable nor production ready.
+package transactional
diff --git a/storage/transactional/index.go b/storage/transactional/index.go
new file mode 100644
index 0000000..84e0e2f
--- /dev/null
+++ b/storage/transactional/index.go
@@ -0,0 +1,56 @@
+package transactional
+
+import (
+	"gopkg.in/src-d/go-git.v4/plumbing/format/index"
+	"gopkg.in/src-d/go-git.v4/plumbing/storer"
+)
+
+// IndexStorage implements the storer.IndexStorage for the transactional package.
+type IndexStorage struct {
+	storer.IndexStorer
+	temporal storer.IndexStorer
+
+	set bool
+}
+
+// NewIndexStorage returns a new IndexStorer based on a base storer and a
+// temporal storer.
+func NewIndexStorage(s, temporal storer.IndexStorer) *IndexStorage {
+	return &IndexStorage{
+		IndexStorer: s,
+		temporal:    temporal,
+	}
+}
+
+// SetIndex honors the storer.IndexStorer interface.
+func (s *IndexStorage) SetIndex(idx *index.Index) (err error) {
+	if err := s.temporal.SetIndex(idx); err != nil {
+		return err
+	}
+
+	s.set = true
+	return nil
+}
+
+// Index honors the storer.IndexStorer interface.
+func (s *IndexStorage) Index() (*index.Index, error) {
+	if !s.set {
+		return s.IndexStorer.Index()
+	}
+
+	return s.temporal.Index()
+}
+
+// Commit it copies the index from the temporal storage into the base storage.
+func (s *IndexStorage) Commit() error {
+	if !s.set {
+		return nil
+	}
+
+	idx, err := s.temporal.Index()
+	if err != nil {
+		return err
+	}
+
+	return s.IndexStorer.SetIndex(idx)
+}
diff --git a/storage/transactional/index_test.go b/storage/transactional/index_test.go
new file mode 100644
index 0000000..e1c571a
--- /dev/null
+++ b/storage/transactional/index_test.go
@@ -0,0 +1,52 @@
+package transactional
+
+import (
+	. "gopkg.in/check.v1"
+	"gopkg.in/src-d/go-git.v4/plumbing/format/index"
+	"gopkg.in/src-d/go-git.v4/storage/memory"
+)
+
+var _ = Suite(&IndexSuite{})
+
+type IndexSuite struct{}
+
+func (s *IndexSuite) TestSetIndexBase(c *C) {
+	idx := &index.Index{}
+	idx.Version = 2
+
+	base := memory.NewStorage()
+	err := base.SetIndex(idx)
+	c.Assert(err, IsNil)
+
+	temporal := memory.NewStorage()
+	cs := NewIndexStorage(base, temporal)
+
+	idx, err = cs.Index()
+	c.Assert(err, IsNil)
+	c.Assert(idx.Version, Equals, uint32(2))
+}
+
+func (s *IndexSuite) TestCommit(c *C) {
+	idx := &index.Index{}
+	idx.Version = 2
+
+	base := memory.NewStorage()
+	err := base.SetIndex(idx)
+	c.Assert(err, IsNil)
+
+	temporal := memory.NewStorage()
+
+	idx = &index.Index{}
+	idx.Version = 3
+
+	is := NewIndexStorage(base, temporal)
+	err = is.SetIndex(idx)
+	c.Assert(err, IsNil)
+
+	err = is.Commit()
+	c.Assert(err, IsNil)
+
+	baseIndex, err := base.Index()
+	c.Assert(err, IsNil)
+	c.Assert(baseIndex.Version, Equals, uint32(3))
+}
diff --git a/storage/transactional/object.go b/storage/transactional/object.go
new file mode 100644
index 0000000..beb63d6
--- /dev/null
+++ b/storage/transactional/object.go
@@ -0,0 +1,84 @@
+package transactional
+
+import (
+	"gopkg.in/src-d/go-git.v4/plumbing"
+	"gopkg.in/src-d/go-git.v4/plumbing/storer"
+)
+
+// ObjectStorage implements the storer.EncodedObjectStorer for the transactional package.
+type ObjectStorage struct {
+	storer.EncodedObjectStorer
+	temporal storer.EncodedObjectStorer
+}
+
+// NewObjectStorage returns a new EncodedObjectStorer based on a base storer and
+// a temporal storer.
+func NewObjectStorage(base, temporal storer.EncodedObjectStorer) *ObjectStorage {
+	return &ObjectStorage{EncodedObjectStorer: base, temporal: temporal}
+}
+
+// SetEncodedObject honors the storer.EncodedObjectStorer interface.
+func (o *ObjectStorage) SetEncodedObject(obj plumbing.EncodedObject) (plumbing.Hash, error) {
+	return o.temporal.SetEncodedObject(obj)
+}
+
+// HasEncodedObject honors the storer.EncodedObjectStorer interface.
+func (o *ObjectStorage) HasEncodedObject(h plumbing.Hash) error {
+	err := o.EncodedObjectStorer.HasEncodedObject(h)
+	if err == plumbing.ErrObjectNotFound {
+		return o.temporal.HasEncodedObject(h)
+	}
+
+	return err
+}
+
+// EncodedObjectSize honors the storer.EncodedObjectStorer interface.
+func (o *ObjectStorage) EncodedObjectSize(h plumbing.Hash) (int64, error) {
+	sz, err := o.EncodedObjectStorer.EncodedObjectSize(h)
+	if err == plumbing.ErrObjectNotFound {
+		return o.temporal.EncodedObjectSize(h)
+	}
+
+	return sz, err
+}
+
+// EncodedObject honors the storer.EncodedObjectStorer interface.
+func (o *ObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (plumbing.EncodedObject, error) {
+	obj, err := o.EncodedObjectStorer.EncodedObject(t, h)
+	if err == plumbing.ErrObjectNotFound {
+		return o.temporal.EncodedObject(t, h)
+	}
+
+	return obj, err
+}
+
+// IterEncodedObjects honors the storer.EncodedObjectStorer interface.
+func (o *ObjectStorage) IterEncodedObjects(t plumbing.ObjectType) (storer.EncodedObjectIter, error) {
+	baseIter, err := o.EncodedObjectStorer.IterEncodedObjects(t)
+	if err != nil {
+		return nil, err
+	}
+
+	temporalIter, err := o.temporal.IterEncodedObjects(t)
+	if err != nil {
+		return nil, err
+	}
+
+	return storer.NewMultiEncodedObjectIter([]storer.EncodedObjectIter{
+		baseIter,
+		temporalIter,
+	}), nil
+}
+
+// Commit it copies the objects of the temporal storage into the base storage.
+func (o *ObjectStorage) Commit() error {
+	iter, err := o.temporal.IterEncodedObjects(plumbing.AnyObject)
+	if err != nil {
+		return err
+	}
+
+	return iter.ForEach(func(obj plumbing.EncodedObject) error {
+		_, err := o.EncodedObjectStorer.SetEncodedObject(obj)
+		return err
+	})
+}
diff --git a/storage/transactional/object_test.go b/storage/transactional/object_test.go
new file mode 100644
index 0000000..10b6318
--- /dev/null
+++ b/storage/transactional/object_test.go
@@ -0,0 +1,153 @@
+package transactional
+
+import (
+	. "gopkg.in/check.v1"
+	"gopkg.in/src-d/go-git.v4/plumbing"
+	"gopkg.in/src-d/go-git.v4/storage/memory"
+)
+
+var _ = Suite(&ObjectSuite{})
+
+type ObjectSuite struct{}
+
+func (s *ObjectSuite) TestHasEncodedObject(c *C) {
+	base := memory.NewStorage()
+	temporal := memory.NewStorage()
+
+	os := NewObjectStorage(base, temporal)
+
+	commit := base.NewEncodedObject()
+	commit.SetType(plumbing.CommitObject)
+
+	ch, err := base.SetEncodedObject(commit)
+	c.Assert(ch.IsZero(), Equals, false)
+	c.Assert(err, IsNil)
+
+	tree := base.NewEncodedObject()
+	tree.SetType(plumbing.TreeObject)
+
+	th, err := os.SetEncodedObject(tree)
+	c.Assert(th.IsZero(), Equals, false)
+	c.Assert(err, IsNil)
+
+	err = os.HasEncodedObject(th)
+	c.Assert(err, IsNil)
+
+	err = os.HasEncodedObject(ch)
+	c.Assert(err, IsNil)
+
+	err = base.HasEncodedObject(th)
+	c.Assert(err, Equals, plumbing.ErrObjectNotFound)
+}
+
+func (s *ObjectSuite) TestEncodedObjectAndEncodedObjectSize(c *C) {
+	base := memory.NewStorage()
+	temporal := memory.NewStorage()
+
+	os := NewObjectStorage(base, temporal)
+
+	commit := base.NewEncodedObject()
+	commit.SetType(plumbing.CommitObject)
+
+	ch, err := base.SetEncodedObject(commit)
+	c.Assert(ch.IsZero(), Equals, false)
+	c.Assert(err, IsNil)
+
+	tree := base.NewEncodedObject()
+	tree.SetType(plumbing.TreeObject)
+
+	th, err := os.SetEncodedObject(tree)
+	c.Assert(th.IsZero(), Equals, false)
+	c.Assert(err, IsNil)
+
+	otree, err := os.EncodedObject(plumbing.TreeObject, th)
+	c.Assert(err, IsNil)
+	c.Assert(otree.Hash(), Equals, tree.Hash())
+
+	treeSz, err := os.EncodedObjectSize(th)
+	c.Assert(err, IsNil)
+	c.Assert(treeSz, Equals, int64(0))
+
+	ocommit, err := os.EncodedObject(plumbing.CommitObject, ch)
+	c.Assert(err, IsNil)
+	c.Assert(ocommit.Hash(), Equals, commit.Hash())
+
+	commitSz, err := os.EncodedObjectSize(ch)
+	c.Assert(err, IsNil)
+	c.Assert(commitSz, Equals, int64(0))
+
+	_, err = base.EncodedObject(plumbing.TreeObject, th)
+	c.Assert(err, Equals, plumbing.ErrObjectNotFound)
+
+	_, err = base.EncodedObjectSize(th)
+	c.Assert(err, Equals, plumbing.ErrObjectNotFound)
+}
+
+func (s *ObjectSuite) TestIterEncodedObjects(c *C) {
+	base := memory.NewStorage()
+	temporal := memory.NewStorage()
+
+	os := NewObjectStorage(base, temporal)
+
+	commit := base.NewEncodedObject()
+	commit.SetType(plumbing.CommitObject)
+
+	ch, err := base.SetEncodedObject(commit)
+	c.Assert(ch.IsZero(), Equals, false)
+	c.Assert(err, IsNil)
+
+	tree := base.NewEncodedObject()
+	tree.SetType(plumbing.TreeObject)
+
+	th, err := os.SetEncodedObject(tree)
+	c.Assert(th.IsZero(), Equals, false)
+	c.Assert(err, IsNil)
+
+	iter, err := os.IterEncodedObjects(plumbing.AnyObject)
+	c.Assert(err, IsNil)
+
+	var hashes []plumbing.Hash
+	err = iter.ForEach(func(obj plumbing.EncodedObject) error {
+		hashes = append(hashes, obj.Hash())
+		return nil
+	})
+
+	c.Assert(err, IsNil)
+	c.Assert(hashes, HasLen, 2)
+	c.Assert(hashes[0], Equals, ch)
+	c.Assert(hashes[1], Equals, th)
+}
+
+func (s *ObjectSuite) TestCommit(c *C) {
+	base := memory.NewStorage()
+	temporal := memory.NewStorage()
+
+	os := NewObjectStorage(base, temporal)
+
+	commit := base.NewEncodedObject()
+	commit.SetType(plumbing.CommitObject)
+
+	_, err := os.SetEncodedObject(commit)
+	c.Assert(err, IsNil)
+
+	tree := base.NewEncodedObject()
+	tree.SetType(plumbing.TreeObject)
+
+	_, err = os.SetEncodedObject(tree)
+	c.Assert(err, IsNil)
+
+	err = os.Commit()
+	c.Assert(err, IsNil)
+
+	iter, err := base.IterEncodedObjects(plumbing.AnyObject)
+	c.Assert(err, IsNil)
+
+	var hashes []plumbing.Hash
+	err = iter.ForEach(func(obj plumbing.EncodedObject) error {
+		hashes = append(hashes, obj.Hash())
+		return nil
+	})
+
+	c.Assert(err, IsNil)
+	c.Assert(hashes, HasLen, 2)
+}
diff --git a/storage/transactional/reference.go b/storage/transactional/reference.go
new file mode 100644
index 0000000..a7be532
--- /dev/null
+++ b/storage/transactional/reference.go
@@ -0,0 +1,138 @@
+package transactional
+
+import (
+	"gopkg.in/src-d/go-git.v4/plumbing"
+	"gopkg.in/src-d/go-git.v4/plumbing/storer"
+	"gopkg.in/src-d/go-git.v4/storage"
+)
+
+// ReferenceStorage implements the storer.ReferenceStorage for the transactional package.
+type ReferenceStorage struct {
+	storer.ReferenceStorer
+	temporal storer.ReferenceStorer
+
+	// deleted, remaining references at this maps are going to be deleted when
+	// commit is requested, the entries are added when RemoveReference is called
+	// and deleted if SetReference is called.
+	deleted map[plumbing.ReferenceName]struct{}
+	// packRefs if true PackRefs is going to be called in the based storer when
+	// commit is called.
+	packRefs bool
+}
+
+// NewReferenceStorage returns a new ReferenceStorer based on a base storer and
+// a temporal storer.
+func NewReferenceStorage(base, temporal storer.ReferenceStorer) *ReferenceStorage {
+	return &ReferenceStorage{
+		ReferenceStorer: base,
+		temporal:        temporal,
+
+		deleted: make(map[plumbing.ReferenceName]struct{}, 0),
+	}
+}
+
+// SetReference honors the storer.ReferenceStorer interface.
+func (r *ReferenceStorage) SetReference(ref *plumbing.Reference) error {
+	delete(r.deleted, ref.Name())
+	return r.temporal.SetReference(ref)
+}
+
+// SetReference honors the storer.ReferenceStorer interface.
+func (r *ReferenceStorage) CheckAndSetReference(ref, old *plumbing.Reference) error {
+	if old == nil {
+		return r.SetReference(ref)
+	}
+
+	tmp, err := r.temporal.Reference(old.Name())
+	if err == plumbing.ErrReferenceNotFound {
+		tmp, err = r.ReferenceStorer.Reference(old.Name())
+	}
+
+	if err != nil {
+		return err
+	}
+
+	if tmp.Hash() != old.Hash() {
+		return storage.ErrReferenceHasChanged
+	}
+
+	return r.SetReference(ref)
+}
+
+// Reference honors the storer.ReferenceStorer interface.
+func (r ReferenceStorage) Reference(n plumbing.ReferenceName) (*plumbing.Reference, error) {
+	if _, deleted := r.deleted[n]; deleted {
+		return nil, plumbing.ErrReferenceNotFound
+	}
+
+	ref, err := r.temporal.Reference(n)
+	if err == plumbing.ErrReferenceNotFound {
+		return r.ReferenceStorer.Reference(n)
+	}
+
+	return ref, err
+}
+
+// IterReferences honors the storer.ReferenceStorer interface.
+func (r ReferenceStorage) IterReferences() (storer.ReferenceIter, error) {
+	baseIter, err := r.ReferenceStorer.IterReferences()
+	if err != nil {
+		return nil, err
+	}
+
+	temporalIter, err := r.temporal.IterReferences()
+	if err != nil {
+		return nil, err
+	}
+
+	return storer.NewMultiReferenceIter([]storer.ReferenceIter{
+		baseIter,
+		temporalIter,
+	}), nil
+}
+
+// CountLooseRefs honors the storer.ReferenceStorer interface.
+func (r ReferenceStorage) CountLooseRefs() (int, error) {
+	tc, err := r.temporal.CountLooseRefs()
+	if err != nil {
+		return -1, err
+	}
+
+	bc, err := r.ReferenceStorer.CountLooseRefs()
+	if err != nil {
+		return -1, err
+	}
+
+	return tc + bc, nil
+}
+
+// PackRefs honors the storer.ReferenceStorer interface.
+func (r ReferenceStorage) PackRefs() error {
+	r.packRefs = true
+	return nil
+}
+
+// RemoveReference honors the storer.ReferenceStorer interface.
+func (r ReferenceStorage) RemoveReference(n plumbing.ReferenceName) error {
+	r.deleted[n] = struct{}{}
+	return r.temporal.RemoveReference(n)
+}
+
+// Commit it copies the reference information of the temporal storage into the
+// base storage.
+func (r ReferenceStorage) Commit() error {
+	for name := range r.deleted {
+		if err := r.ReferenceStorer.RemoveReference(name); err != nil {
+			return err
+		}
+	}
+
+	iter, err := r.temporal.IterReferences()
+	if err != nil {
+		return err
+	}
+
+	return iter.ForEach(func(ref *plumbing.Reference) error {
+		return r.ReferenceStorer.SetReference(ref)
+	})
+}
diff --git a/storage/transactional/reference_test.go b/storage/transactional/reference_test.go
new file mode 100644
index 0000000..5793549
--- /dev/null
+++ b/storage/transactional/reference_test.go
@@ -0,0 +1,157 @@
+package transactional
+
+import (
+	. "gopkg.in/check.v1"
+	"gopkg.in/src-d/go-git.v4/plumbing"
+	"gopkg.in/src-d/go-git.v4/storage/memory"
+)
+
+var _ = Suite(&ReferenceSuite{})
+
+type ReferenceSuite struct{}
+
+func (s *ReferenceSuite) TestReference(c *C) {
+	base := memory.NewStorage()
+	temporal := memory.NewStorage()
+
+	rs := NewReferenceStorage(base, temporal)
+
+	refA := plumbing.NewReferenceFromStrings("refs/a", "bc9968d75e48de59f0870ffb71f5e160bbbdcf52")
+	refB := plumbing.NewReferenceFromStrings("refs/b", "bc9968d75e48de59f0870ffb71f5e160bbbdcf52")
+
+	err := base.SetReference(refA)
+	c.Assert(err, IsNil)
+
+	err = rs.SetReference(refB)
+	c.Assert(err, IsNil)
+
+	_, err = rs.Reference("refs/a")
+	c.Assert(err, IsNil)
+
+	_, err = rs.Reference("refs/b")
+	c.Assert(err, IsNil)
+
+	_, err = base.Reference("refs/b")
+	c.Assert(err, Equals, plumbing.ErrReferenceNotFound)
+}
+
+func (s *ReferenceSuite) TestRemoveReferenceTemporal(c *C) {
+	base := memory.NewStorage()
+	temporal := memory.NewStorage()
+
+	ref := plumbing.NewReferenceFromStrings("refs/a", "bc9968d75e48de59f0870ffb71f5e160bbbdcf52")
+
+	rs := NewReferenceStorage(base, temporal)
+	err := rs.SetReference(ref)
+	c.Assert(err, IsNil)
+
+	err = rs.RemoveReference("refs/a")
+	c.Assert(err, IsNil)
+
+	_, err = rs.Reference("refs/a")
+	c.Assert(err, Equals, plumbing.ErrReferenceNotFound)
+}
+
+func (s *ReferenceSuite) TestRemoveReferenceBase(c *C) {
+	base := memory.NewStorage()
+	temporal := memory.NewStorage()
+
+	ref := plumbing.NewReferenceFromStrings("refs/a", "bc9968d75e48de59f0870ffb71f5e160bbbdcf52")
+
+	rs := NewReferenceStorage(base, temporal)
+	err := base.SetReference(ref)
+	c.Assert(err, IsNil)
+
+	err = rs.RemoveReference("refs/a")
+	c.Assert(err, IsNil)
+
+	_, err = rs.Reference("refs/a")
+	c.Assert(err, Equals, plumbing.ErrReferenceNotFound)
+}
+
+func (s *ReferenceSuite) TestCheckAndSetReferenceInBase(c *C) {
+	base := memory.NewStorage()
+	temporal := memory.NewStorage()
+	rs := NewReferenceStorage(base, temporal)
+
+	err := base.SetReference(
+		plumbing.NewReferenceFromStrings("foo", "482e0eada5de4039e6f216b45b3c9b683b83bfa"),
+	)
+	c.Assert(err, IsNil)
+
+	err = rs.CheckAndSetReference(
+		plumbing.NewReferenceFromStrings("foo", "bc9968d75e48de59f0870ffb71f5e160bbbdcf52"),
+		plumbing.NewReferenceFromStrings("foo", "482e0eada5de4039e6f216b45b3c9b683b83bfa"),
+	)
+	c.Assert(err, IsNil)
+
+	e, err := rs.Reference(plumbing.ReferenceName("foo"))
+	c.Assert(err, IsNil)
+	c.Assert(e.Hash().String(), Equals, "bc9968d75e48de59f0870ffb71f5e160bbbdcf52")
+}
+
+func (s *ReferenceSuite) TestCommit(c *C) {
+	base := memory.NewStorage()
+	temporal := memory.NewStorage()
+
+	refA := plumbing.NewReferenceFromStrings("refs/a", "bc9968d75e48de59f0870ffb71f5e160bbbdcf52")
+	refB := plumbing.NewReferenceFromStrings("refs/b", "b66c08ba28aa1f81eb06a1127aa3936ff77e5e2c")
+	refC := plumbing.NewReferenceFromStrings("refs/c", "c3f4688a08fd86f1bf8e055724c84b7a40a09733")
+
+	rs := NewReferenceStorage(base, temporal)
+	c.Assert(rs.SetReference(refA), IsNil)
+	c.Assert(rs.SetReference(refB), IsNil)
+	c.Assert(rs.SetReference(refC), IsNil)
+
+	err := rs.Commit()
+	c.Assert(err, IsNil)
+
+	iter, err := base.IterReferences()
+	c.Assert(err, IsNil)
+
+	var count int
+	iter.ForEach(func(ref *plumbing.Reference) error {
+		count++
+		return nil
+	})
+
+	c.Assert(count, Equals, 3)
+}
+
+func (s *ReferenceSuite) TestCommitDelete(c *C) {
+	base := memory.NewStorage()
+	temporal := memory.NewStorage()
+
+	refA := plumbing.NewReferenceFromStrings("refs/a", "bc9968d75e48de59f0870ffb71f5e160bbbdcf52")
+	refB := plumbing.NewReferenceFromStrings("refs/b", "b66c08ba28aa1f81eb06a1127aa3936ff77e5e2c")
+	refC := plumbing.NewReferenceFromStrings("refs/c", "c3f4688a08fd86f1bf8e055724c84b7a40a09733")
+
+	rs := NewReferenceStorage(base, temporal)
+	c.Assert(base.SetReference(refA), IsNil)
+	c.Assert(base.SetReference(refB), IsNil)
+	c.Assert(base.SetReference(refC), IsNil)
+
+	c.Assert(rs.RemoveReference(refA.Name()), IsNil)
+	c.Assert(rs.RemoveReference(refB.Name()), IsNil)
+	c.Assert(rs.RemoveReference(refC.Name()), IsNil)
+	c.Assert(rs.SetReference(refC), IsNil)
+
+	err := rs.Commit()
+	c.Assert(err, IsNil)
+
+	iter, err := base.IterReferences()
+	c.Assert(err, IsNil)
+
+	var count int
+	iter.ForEach(func(ref *plumbing.Reference) error {
+		count++
+		return nil
+	})
+
+	c.Assert(count, Equals, 1)
+
+	ref, err := rs.Reference(refC.Name())
+	c.Assert(err, IsNil)
+	c.Assert(ref.Hash().String(), Equals, "c3f4688a08fd86f1bf8e055724c84b7a40a09733")
+
+}
diff --git a/storage/transactional/shallow.go b/storage/transactional/shallow.go
new file mode 100644
index 0000000..bedc325
--- /dev/null
+++ b/storage/transactional/shallow.go
@@ -0,0 +1,51 @@
+package transactional
+
+import (
+	"gopkg.in/src-d/go-git.v4/plumbing"
+	"gopkg.in/src-d/go-git.v4/plumbing/storer"
+)
+
+// ShallowStorage implements the storer.ShallowStorer for the transactional package.
+type ShallowStorage struct {
+	storer.ShallowStorer
+	temporal storer.ShallowStorer
+}
+
+// NewShallowStorage returns a new ShallowStorage based on a base storer and
+// a temporal storer.
+func NewShallowStorage(base, temporal storer.ShallowStorer) *ShallowStorage {
+	return &ShallowStorage{
+		ShallowStorer: base,
+		temporal:      temporal,
+	}
+}
+
+// SetShallow honors the storer.ShallowStorer interface.
+func (s *ShallowStorage) SetShallow(commits []plumbing.Hash) error {
+	return s.temporal.SetShallow(commits)
+}
+
+// Shallow honors the storer.ShallowStorer interface.
+func (s *ShallowStorage) Shallow() ([]plumbing.Hash, error) {
+	shallow, err := s.temporal.Shallow()
+	if err != nil {
+		return nil, err
+	}
+
+	if len(shallow) != 0 {
+		return shallow, nil
+	}
+
+	return s.ShallowStorer.Shallow()
+}
+
+// Commit it copies the shallow information of the temporal storage into the
+// base storage.
+func (s *ShallowStorage) Commit() error {
+	commits, err := s.temporal.Shallow()
+	if err != nil || len(commits) == 0 {
+		return err
+	}
+
+	return s.ShallowStorer.SetShallow(commits)
+}
diff --git a/storage/transactional/shallow_test.go b/storage/transactional/shallow_test.go
new file mode 100644
index 0000000..5141782
--- /dev/null
+++ b/storage/transactional/shallow_test.go
@@ -0,0 +1,62 @@
+package transactional
+
+import (
+	. "gopkg.in/check.v1"
+	"gopkg.in/src-d/go-git.v4/plumbing"
+	"gopkg.in/src-d/go-git.v4/storage/memory"
+)
+
+var _ = Suite(&ShallowSuite{})
+
+type ShallowSuite struct{}
+
+func (s *ShallowSuite) TestShallow(c *C) {
+	base := memory.NewStorage()
+	temporal := memory.NewStorage()
+
+	rs := NewShallowStorage(base, temporal)
+
+	commitA := plumbing.NewHash("bc9968d75e48de59f0870ffb71f5e160bbbdcf52")
+	commitB := plumbing.NewHash("aa9968d75e48de59f0870ffb71f5e160bbbdcf52")
+
+	err := base.SetShallow([]plumbing.Hash{commitA})
+	c.Assert(err, IsNil)
+
+	err = rs.SetShallow([]plumbing.Hash{commitB})
+	c.Assert(err, IsNil)
+
+	commits, err := rs.Shallow()
+	c.Assert(err, IsNil)
+	c.Assert(commits, HasLen, 1)
+	c.Assert(commits[0], Equals, commitB)
+
+	commits, err = base.Shallow()
+	c.Assert(err, IsNil)
+	c.Assert(commits, HasLen, 1)
+	c.Assert(commits[0], Equals, commitA)
+}
+
+func (s *ShallowSuite) TestCommit(c *C) {
+	base := memory.NewStorage()
+	temporal := memory.NewStorage()
+
+	rs := NewShallowStorage(base, temporal)
+
+	commitA := plumbing.NewHash("bc9968d75e48de59f0870ffb71f5e160bbbdcf52")
+	commitB := plumbing.NewHash("aa9968d75e48de59f0870ffb71f5e160bbbdcf52")
+
+	c.Assert(base.SetShallow([]plumbing.Hash{commitA}), IsNil)
+	c.Assert(rs.SetShallow([]plumbing.Hash{commitB}), IsNil)
+
+	c.Assert(rs.Commit(), IsNil)
+
+	commits, err := rs.Shallow()
+	c.Assert(err, IsNil)
+	c.Assert(commits, HasLen, 1)
+	c.Assert(commits[0], Equals, commitB)
+
+	commits, err = base.Shallow()
+	c.Assert(err, IsNil)
+	c.Assert(commits, HasLen, 1)
+	c.Assert(commits[0], Equals, commitB)
+}
diff --git a/storage/transactional/storage.go b/storage/transactional/storage.go
new file mode 100644
index 0000000..fbb3d35
--- /dev/null
+++ b/storage/transactional/storage.go
@@ -0,0 +1,69 @@
+package transactional
+
+import (
+	"gopkg.in/src-d/go-git.v4/storage"
+)
+
+// Storage is a transactional implementation of git.Storer, it demux the write
+// and read operation of two separate storers, allowing to merge content calling
+// Storage.Commit.
+//
+// The API and functionality of this package are considered EXPERIMENTAL and is
+// not considered stable nor production ready.
+type Storage struct {
+	s, temporal storage.Storer
+
+	*ObjectStorage
+	*ReferenceStorage
+	*IndexStorage
+	*ShallowStorage
+	*ConfigStorage
+}
+
+// NewStorage returns a new Storage based on two repositories, base is the base
+// repository where the read operations are read and temportal is were all
+// the write operations are stored.
+func NewStorage(base, temporal storage.Storer) *Storage {
+	return &Storage{
+		s:        base,
+		temporal: temporal,
+
+		ObjectStorage:    NewObjectStorage(base, temporal),
+		ReferenceStorage: NewReferenceStorage(base, temporal),
+		IndexStorage:     NewIndexStorage(base, temporal),
+		ShallowStorage:   NewShallowStorage(base, temporal),
+		ConfigStorage:    NewConfigStorage(base, temporal),
+	}
+}
+
+// Module it honors the storage.ModuleStorer interface.
+func (s *Storage) Module(name string) (storage.Storer, error) {
+	base, err := s.s.Module(name)
+	if err != nil {
+		return nil, err
+	}
+
+	temporal, err := s.temporal.Module(name)
+	if err != nil {
+		return nil, err
+	}
+
+	return NewStorage(base, temporal), nil
+}
+
+// Commit it copies the content of the temporal storage into the base storage.
+func (s *Storage) Commit() error {
+	for _, c := range []interface{ Commit() error }{
+		s.ObjectStorage,
+		s.ReferenceStorage,
+		s.IndexStorage,
+		s.ShallowStorage,
+		s.ConfigStorage,
+	} {
+		if err := c.Commit(); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
diff --git a/storage/transactional/storage_test.go b/storage/transactional/storage_test.go
new file mode 100644
index 0000000..6aaea0d
--- /dev/null
+++ b/storage/transactional/storage_test.go
@@ -0,0 +1,52 @@
+package transactional
+
+import (
+	"testing"
+
+	. "gopkg.in/check.v1"
+	"gopkg.in/src-d/go-git.v4/plumbing"
+	"gopkg.in/src-d/go-git.v4/storage/memory"
+	"gopkg.in/src-d/go-git.v4/storage/test"
+)
+
+func Test(t *testing.T) { TestingT(t) }
+
+type StorageSuite struct {
+	test.BaseStorageSuite
+}
+
+var _ = Suite(&StorageSuite{})
+
+func (s *StorageSuite) SetUpTest(c *C) {
+	base := memory.NewStorage()
+	temporal := memory.NewStorage()
+
+	s.BaseStorageSuite = test.NewBaseStorageSuite(NewStorage(base, temporal))
+	s.BaseStorageSuite.SetUpTest(c)
+}
+
+func (s *StorageSuite) TestCommit(c *C) {
+	base := memory.NewStorage()
+	temporal := memory.NewStorage()
+	st := NewStorage(base, temporal)
+
+	commit := base.NewEncodedObject()
+	commit.SetType(plumbing.CommitObject)
+
+	_, err := st.SetEncodedObject(commit)
+	c.Assert(err, IsNil)
+
+	ref := plumbing.NewHashReference("refs/a", commit.Hash())
+	c.Assert(st.SetReference(ref), IsNil)
+
+	err = st.Commit()
+	c.Assert(err, IsNil)
+
+	ref, err = base.Reference(ref.Name())
+	c.Assert(err, IsNil)
+	c.Assert(ref.Hash(), Equals, commit.Hash())
+
+	obj, err := base.EncodedObject(plumbing.AnyObject, commit.Hash())
+	c.Assert(err, IsNil)
+	c.Assert(obj.Hash(), Equals, commit.Hash())
+}
diff --git a/worktree.go b/worktree.go
index e45d815..a14fd8d 100644
--- a/worktree.go
+++ b/worktree.go
@@ -25,10 +25,11 @@
 )
 
 var (
-	ErrWorktreeNotClean  = errors.New("worktree is not clean")
-	ErrSubmoduleNotFound = errors.New("submodule not found")
-	ErrUnstagedChanges   = errors.New("worktree contains unstaged changes")
-	ErrGitModulesSymlink = errors.New(gitmodulesFile + " is a symlink")
+	ErrWorktreeNotClean     = errors.New("worktree is not clean")
+	ErrSubmoduleNotFound    = errors.New("submodule not found")
+	ErrUnstagedChanges      = errors.New("worktree contains unstaged changes")
+	ErrGitModulesSymlink    = errors.New(gitmodulesFile + " is a symlink")
+	ErrNonFastForwardUpdate = errors.New("non-fast-forward update")
 )
 
 // Worktree represents a git worktree.
@@ -101,7 +102,7 @@
 		}
 
 		if !ff {
-			return fmt.Errorf("non-fast-forward update")
+			return ErrNonFastForwardUpdate
 		}
 	}
 
diff --git a/worktree_bsd.go b/worktree_bsd.go
index 3b374c7..9ff670e 100644
--- a/worktree_bsd.go
+++ b/worktree_bsd.go
@@ -1,4 +1,4 @@
-// +build darwin freebsd netbsd openbsd
+// +build darwin freebsd netbsd
 
 package git
 
diff --git a/worktree_test.go b/worktree_test.go
index 5c5aef9..872cd82 100644
--- a/worktree_test.go
+++ b/worktree_test.go
@@ -119,7 +119,7 @@
 	c.Assert(err, IsNil)
 
 	err = w.Pull(&PullOptions{})
-	c.Assert(err, ErrorMatches, "non-fast-forward update")
+	c.Assert(err, Equals, ErrNonFastForwardUpdate)
 }
 
 func (s *WorktreeSuite) TestPullUpdateReferencesIfNeeded(c *C) {
diff --git a/worktree_unix_other.go b/worktree_unix_other.go
new file mode 100644
index 0000000..d632767
--- /dev/null
+++ b/worktree_unix_other.go
@@ -0,0 +1,26 @@
+// +build openbsd dragonfly solaris
+
+package git
+
+import (
+	"syscall"
+	"time"
+
+	"gopkg.in/src-d/go-git.v4/plumbing/format/index"
+)
+
+func init() {
+	fillSystemInfo = func(e *index.Entry, sys interface{}) {
+		if os, ok := sys.(*syscall.Stat_t); ok {
+			e.CreatedAt = time.Unix(int64(os.Atim.Sec), int64(os.Atim.Nsec))
+			e.Dev = uint32(os.Dev)
+			e.Inode = uint32(os.Ino)
+			e.GID = os.Gid
+			e.UID = os.Uid
+		}
+	}
+}
+
+func isSymlinkWindowsNonAdmin(err error) bool {
+	return false
+}