worktree: solve merge conflicts
diff --git a/common_test.go b/common_test.go
index f8f4e61..efe1ecc 100644
--- a/common_test.go
+++ b/common_test.go
@@ -113,14 +113,7 @@
 	p := f.Packfile()
 	defer p.Close()
 
-	n := packfile.NewScanner(p)
-	d, err := packfile.NewDecoder(n, storer)
-	if err != nil {
-		panic(err)
-	}
-
-	_, err = d.Decode()
-	if err != nil {
+	if err := packfile.UpdateObjectStorage(storer, p); err != nil {
 		panic(err)
 	}
 
diff --git a/config/config.go b/config/config.go
index ce6506d..a637f6d 100644
--- a/config/config.go
+++ b/config/config.go
@@ -40,6 +40,9 @@
 		IsBare bool
 		// Worktree is the path to the root of the working tree.
 		Worktree string
+		// CommentChar is the character indicating the start of a
+		// comment for commands like commit and tag
+		CommentChar string
 	}
 
 	Pack struct {
@@ -113,6 +116,7 @@
 	urlKey           = "url"
 	bareKey          = "bare"
 	worktreeKey      = "worktree"
+	commentCharKey   = "commentChar"
 	windowKey        = "window"
 	mergeKey         = "merge"
 
@@ -151,6 +155,7 @@
 	}
 
 	c.Core.Worktree = s.Options.Get(worktreeKey)
+	c.Core.CommentChar = s.Options.Get(commentCharKey)
 }
 
 func (c *Config) unmarshalPack() error {
diff --git a/config/config_test.go b/config/config_test.go
index 5cd713e..fe73de8 100644
--- a/config/config_test.go
+++ b/config/config_test.go
@@ -13,6 +13,7 @@
 	input := []byte(`[core]
         bare = true
 		worktree = foo
+		commentchar = bar
 [pack]
 		window = 20
 [remote "origin"]
@@ -38,6 +39,7 @@
 
 	c.Assert(cfg.Core.IsBare, Equals, true)
 	c.Assert(cfg.Core.Worktree, Equals, "foo")
+	c.Assert(cfg.Core.CommentChar, Equals, "bar")
 	c.Assert(cfg.Pack.Window, Equals, uint(20))
 	c.Assert(cfg.Remotes, HasLen, 2)
 	c.Assert(cfg.Remotes["origin"].Name, Equals, "origin")
diff --git a/options.go b/options.go
index 885980e..7b1570f 100644
--- a/options.go
+++ b/options.go
@@ -4,6 +4,7 @@
 	"errors"
 	"regexp"
 
+	"golang.org/x/crypto/openpgp"
 	"gopkg.in/src-d/go-git.v4/config"
 	"gopkg.in/src-d/go-git.v4/plumbing"
 	"gopkg.in/src-d/go-git.v4/plumbing/object"
@@ -347,6 +348,9 @@
 	// Parents are the parents commits for the new commit, by default when
 	// len(Parents) is zero, the hash of HEAD reference is used.
 	Parents []plumbing.Hash
+	// A key to sign the commit with. A nil value here means the commit will not
+	// be signed. The private key must be present and already decrypted.
+	SignKey *openpgp.Entity
 }
 
 // Validate validates the fields and sets the default values.
diff --git a/plumbing/cache/buffer_lru.go b/plumbing/cache/buffer_lru.go
new file mode 100644
index 0000000..f2c0f90
--- /dev/null
+++ b/plumbing/cache/buffer_lru.go
@@ -0,0 +1,98 @@
+package cache
+
+import (
+	"container/list"
+	"sync"
+)
+
+// BufferLRU implements an object cache with an LRU eviction policy and a
+// maximum size (measured in object size).
+type BufferLRU struct {
+	MaxSize FileSize
+
+	actualSize FileSize
+	ll         *list.List
+	cache      map[int64]*list.Element
+	mut        sync.Mutex
+}
+
+// NewBufferLRU creates a new BufferLRU with the given maximum size. The maximum
+// size will never be exceeded.
+func NewBufferLRU(maxSize FileSize) *BufferLRU {
+	return &BufferLRU{MaxSize: maxSize}
+}
+
+// NewBufferLRUDefault creates a new BufferLRU with the default cache size.
+func NewBufferLRUDefault() *BufferLRU {
+	return &BufferLRU{MaxSize: DefaultMaxSize}
+}
+
+type buffer struct {
+	Key   int64
+	Slice []byte
+}
+
+// Put puts a buffer into the cache. If the buffer is already in the cache, it
+// will be marked as used. Otherwise, it will be inserted. A buffers might
+// be evicted to make room for the new one.
+func (c *BufferLRU) Put(key int64, slice []byte) {
+	c.mut.Lock()
+	defer c.mut.Unlock()
+
+	if c.cache == nil {
+		c.actualSize = 0
+		c.cache = make(map[int64]*list.Element, 1000)
+		c.ll = list.New()
+	}
+
+	if ee, ok := c.cache[key]; ok {
+		c.ll.MoveToFront(ee)
+		ee.Value = buffer{key, slice}
+		return
+	}
+
+	objSize := FileSize(len(slice))
+
+	if objSize > c.MaxSize {
+		return
+	}
+
+	for c.actualSize+objSize > c.MaxSize {
+		last := c.ll.Back()
+		lastObj := last.Value.(buffer)
+		lastSize := FileSize(len(lastObj.Slice))
+
+		c.ll.Remove(last)
+		delete(c.cache, lastObj.Key)
+		c.actualSize -= lastSize
+	}
+
+	ee := c.ll.PushFront(buffer{key, slice})
+	c.cache[key] = ee
+	c.actualSize += objSize
+}
+
+// Get returns a buffer by its key. It marks the buffer as used. If the buffer
+// is not in the cache, (nil, false) will be returned.
+func (c *BufferLRU) Get(key int64) ([]byte, bool) {
+	c.mut.Lock()
+	defer c.mut.Unlock()
+
+	ee, ok := c.cache[key]
+	if !ok {
+		return nil, false
+	}
+
+	c.ll.MoveToFront(ee)
+	return ee.Value.(buffer).Slice, true
+}
+
+// Clear the content of this buffer cache.
+func (c *BufferLRU) Clear() {
+	c.mut.Lock()
+	defer c.mut.Unlock()
+
+	c.ll = nil
+	c.cache = nil
+	c.actualSize = 0
+}
diff --git a/plumbing/cache/buffer_test.go b/plumbing/cache/buffer_test.go
new file mode 100644
index 0000000..262138a
--- /dev/null
+++ b/plumbing/cache/buffer_test.go
@@ -0,0 +1,128 @@
+package cache
+
+import (
+	"sync"
+
+	. "gopkg.in/check.v1"
+)
+
+type BufferSuite struct {
+	c       map[string]Buffer
+	aBuffer []byte
+	bBuffer []byte
+	cBuffer []byte
+	dBuffer []byte
+	eBuffer []byte
+}
+
+var _ = Suite(&BufferSuite{})
+
+func (s *BufferSuite) SetUpTest(c *C) {
+	s.aBuffer = []byte("a")
+	s.bBuffer = []byte("bbb")
+	s.cBuffer = []byte("c")
+	s.dBuffer = []byte("d")
+	s.eBuffer = []byte("ee")
+
+	s.c = make(map[string]Buffer)
+	s.c["two_bytes"] = NewBufferLRU(2 * Byte)
+	s.c["default_lru"] = NewBufferLRUDefault()
+}
+
+func (s *BufferSuite) TestPutSameBuffer(c *C) {
+	for _, o := range s.c {
+		o.Put(1, s.aBuffer)
+		o.Put(1, s.aBuffer)
+		_, ok := o.Get(1)
+		c.Assert(ok, Equals, true)
+	}
+}
+
+func (s *BufferSuite) TestPutBigBuffer(c *C) {
+	for _, o := range s.c {
+		o.Put(1, s.bBuffer)
+		_, ok := o.Get(2)
+		c.Assert(ok, Equals, false)
+	}
+}
+
+func (s *BufferSuite) TestPutCacheOverflow(c *C) {
+	// this test only works with an specific size
+	o := s.c["two_bytes"]
+
+	o.Put(1, s.aBuffer)
+	o.Put(2, s.cBuffer)
+	o.Put(3, s.dBuffer)
+
+	obj, ok := o.Get(1)
+	c.Assert(ok, Equals, false)
+	c.Assert(obj, IsNil)
+	obj, ok = o.Get(2)
+	c.Assert(ok, Equals, true)
+	c.Assert(obj, NotNil)
+	obj, ok = o.Get(3)
+	c.Assert(ok, Equals, true)
+	c.Assert(obj, NotNil)
+}
+
+func (s *BufferSuite) TestEvictMultipleBuffers(c *C) {
+	o := s.c["two_bytes"]
+
+	o.Put(1, s.cBuffer)
+	o.Put(2, s.dBuffer) // now cache is full with two objects
+	o.Put(3, s.eBuffer) // this put should evict all previous objects
+
+	obj, ok := o.Get(1)
+	c.Assert(ok, Equals, false)
+	c.Assert(obj, IsNil)
+	obj, ok = o.Get(2)
+	c.Assert(ok, Equals, false)
+	c.Assert(obj, IsNil)
+	obj, ok = o.Get(3)
+	c.Assert(ok, Equals, true)
+	c.Assert(obj, NotNil)
+}
+
+func (s *BufferSuite) TestClear(c *C) {
+	for _, o := range s.c {
+		o.Put(1, s.aBuffer)
+		o.Clear()
+		obj, ok := o.Get(1)
+		c.Assert(ok, Equals, false)
+		c.Assert(obj, IsNil)
+	}
+}
+
+func (s *BufferSuite) TestConcurrentAccess(c *C) {
+	for _, o := range s.c {
+		var wg sync.WaitGroup
+
+		for i := 0; i < 1000; i++ {
+			wg.Add(3)
+			go func(i int) {
+				o.Put(int64(i), []byte{00})
+				wg.Done()
+			}(i)
+
+			go func(i int) {
+				if i%30 == 0 {
+					o.Clear()
+				}
+				wg.Done()
+			}(i)
+
+			go func(i int) {
+				o.Get(int64(i))
+				wg.Done()
+			}(i)
+		}
+
+		wg.Wait()
+	}
+}
+
+func (s *BufferSuite) TestDefaultLRU(c *C) {
+	defaultLRU := s.c["default_lru"].(*BufferLRU)
+
+	c.Assert(defaultLRU.MaxSize, Equals, DefaultMaxSize)
+}
diff --git a/plumbing/cache/common.go b/plumbing/cache/common.go
index e77baf0..2b7f36a 100644
--- a/plumbing/cache/common.go
+++ b/plumbing/cache/common.go
@@ -24,3 +24,16 @@
 	// Clear clears every object from the cache.
 	Clear()
 }
+
+// Buffer is an interface to a buffer cache.
+type Buffer interface {
+	// Put puts a buffer into the cache. If the buffer is already in the cache,
+	// it will be marked as used. Otherwise, it will be inserted. Buffer might
+	// be evicted to make room for the new one.
+	Put(key int64, slice []byte)
+	// Get returns a buffer by its key. It marks the buffer as used. If the
+	// buffer is not in the cache, (nil, false) will be returned.
+	Get(key int64) ([]byte, bool)
+	// Clear clears every object from the cache.
+	Clear()
+}
diff --git a/plumbing/format/gitignore/pattern.go b/plumbing/format/gitignore/pattern.go
index 2603352..098cb50 100644
--- a/plumbing/format/gitignore/pattern.go
+++ b/plumbing/format/gitignore/pattern.go
@@ -133,6 +133,9 @@
 				} else if match {
 					matched = true
 					break
+				} else if len(path) == 0 {
+					// if nothing left then fail
+					matched = false
 				}
 			}
 		} else {
diff --git a/plumbing/format/gitignore/pattern_test.go b/plumbing/format/gitignore/pattern_test.go
index f94cef3..c410442 100644
--- a/plumbing/format/gitignore/pattern_test.go
+++ b/plumbing/format/gitignore/pattern_test.go
@@ -281,3 +281,9 @@
 	r := p.Match([]string{"value", "head", "vol["}, false)
 	c.Assert(r, Equals, NoMatch)
 }
+
+func (s *PatternSuite) TestGlobMatch_issue_923(c *C) {
+	p := ParsePattern("**/android/**/GeneratedPluginRegistrant.java", nil)
+	r := p.Match([]string{"packages", "flutter_tools", "lib", "src", "android", "gradle.dart"}, false)
+	c.Assert(r, Equals, NoMatch)
+}
diff --git a/plumbing/format/idxfile/decoder.go b/plumbing/format/idxfile/decoder.go
index 45afb1e..5b92782 100644
--- a/plumbing/format/idxfile/decoder.go
+++ b/plumbing/format/idxfile/decoder.go
@@ -17,6 +17,11 @@
 	ErrMalformedIdxFile = errors.New("Malformed IDX file")
 )
 
+const (
+	fanout         = 256
+	objectIDLength = 20
+)
+
 // Decoder reads and decodes idx files from an input stream.
 type Decoder struct {
 	*bufio.Reader
@@ -27,13 +32,13 @@
 	return &Decoder{bufio.NewReader(r)}
 }
 
-// Decode reads from the stream and decode the content into the Idxfile struct.
-func (d *Decoder) Decode(idx *Idxfile) error {
+// Decode reads from the stream and decode the content into the MemoryIndex struct.
+func (d *Decoder) Decode(idx *MemoryIndex) error {
 	if err := validateHeader(d); err != nil {
 		return err
 	}
 
-	flow := []func(*Idxfile, io.Reader) error{
+	flow := []func(*MemoryIndex, io.Reader) error{
 		readVersion,
 		readFanout,
 		readObjectNames,
@@ -48,10 +53,6 @@
 		}
 	}
 
-	if !idx.isValid() {
-		return ErrMalformedIdxFile
-	}
-
 	return nil
 }
 
@@ -68,7 +69,7 @@
 	return nil
 }
 
-func readVersion(idx *Idxfile, r io.Reader) error {
+func readVersion(idx *MemoryIndex, r io.Reader) error {
 	v, err := binary.ReadUint32(r)
 	if err != nil {
 		return err
@@ -82,74 +83,92 @@
 	return nil
 }
 
-func readFanout(idx *Idxfile, r io.Reader) error {
-	var err error
-	for i := 0; i < 255; i++ {
-		idx.Fanout[i], err = binary.ReadUint32(r)
+func readFanout(idx *MemoryIndex, r io.Reader) error {
+	for k := 0; k < fanout; k++ {
+		n, err := binary.ReadUint32(r)
 		if err != nil {
 			return err
 		}
-	}
 
-	idx.ObjectCount, err = binary.ReadUint32(r)
-	return err
-}
-
-func readObjectNames(idx *Idxfile, r io.Reader) error {
-	c := int(idx.ObjectCount)
-	new := make([]Entry, c)
-	for i := 0; i < c; i++ {
-		e := &new[i]
-		if _, err := io.ReadFull(r, e.Hash[:]); err != nil {
-			return err
-		}
-
-		idx.Entries = append(idx.Entries, e)
+		idx.Fanout[k] = n
+		idx.FanoutMapping[k] = noMapping
 	}
 
 	return nil
 }
 
-func readCRC32(idx *Idxfile, r io.Reader) error {
-	c := int(idx.ObjectCount)
-	for i := 0; i < c; i++ {
-		if err := binary.Read(r, &idx.Entries[i].CRC32); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func readOffsets(idx *Idxfile, r io.Reader) error {
-	c := int(idx.ObjectCount)
-
-	for i := 0; i < c; i++ {
-		o, err := binary.ReadUint32(r)
-		if err != nil {
-			return err
+func readObjectNames(idx *MemoryIndex, r io.Reader) error {
+	for k := 0; k < fanout; k++ {
+		var buckets uint32
+		if k == 0 {
+			buckets = idx.Fanout[k]
+		} else {
+			buckets = idx.Fanout[k] - idx.Fanout[k-1]
 		}
 
-		idx.Entries[i].Offset = uint64(o)
-	}
-
-	for i := 0; i < c; i++ {
-		if idx.Entries[i].Offset <= offsetLimit {
+		if buckets == 0 {
 			continue
 		}
 
-		o, err := binary.ReadUint64(r)
-		if err != nil {
+		if buckets < 0 {
+			return ErrMalformedIdxFile
+		}
+
+		idx.FanoutMapping[k] = len(idx.Names)
+
+		nameLen := int(buckets * objectIDLength)
+		bin := make([]byte, nameLen)
+		if _, err := io.ReadFull(r, bin); err != nil {
 			return err
 		}
 
-		idx.Entries[i].Offset = o
+		idx.Names = append(idx.Names, bin)
+		idx.Offset32 = append(idx.Offset32, make([]byte, buckets*4))
+		idx.CRC32 = append(idx.CRC32, make([]byte, buckets*4))
 	}
 
 	return nil
 }
 
-func readChecksums(idx *Idxfile, r io.Reader) error {
+func readCRC32(idx *MemoryIndex, r io.Reader) error {
+	for k := 0; k < fanout; k++ {
+		if pos := idx.FanoutMapping[k]; pos != noMapping {
+			if _, err := io.ReadFull(r, idx.CRC32[pos]); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+func readOffsets(idx *MemoryIndex, r io.Reader) error {
+	var o64cnt int
+	for k := 0; k < fanout; k++ {
+		if pos := idx.FanoutMapping[k]; pos != noMapping {
+			if _, err := io.ReadFull(r, idx.Offset32[pos]); err != nil {
+				return err
+			}
+
+			for p := 0; p < len(idx.Offset32[pos]); p += 4 {
+				if idx.Offset32[pos][p]&(byte(1)<<7) > 0 {
+					o64cnt++
+				}
+			}
+		}
+	}
+
+	if o64cnt > 0 {
+		idx.Offset64 = make([]byte, o64cnt*8)
+		if _, err := io.ReadFull(r, idx.Offset64); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func readChecksums(idx *MemoryIndex, r io.Reader) error {
 	if _, err := io.ReadFull(r, idx.PackfileChecksum[:]); err != nil {
 		return err
 	}
diff --git a/plumbing/format/idxfile/decoder_test.go b/plumbing/format/idxfile/decoder_test.go
index 20d6859..b43d7c5 100644
--- a/plumbing/format/idxfile/decoder_test.go
+++ b/plumbing/format/idxfile/decoder_test.go
@@ -4,11 +4,12 @@
 	"bytes"
 	"encoding/base64"
 	"fmt"
+	"io"
+	"io/ioutil"
 	"testing"
 
+	"gopkg.in/src-d/go-git.v4/plumbing"
 	. "gopkg.in/src-d/go-git.v4/plumbing/format/idxfile"
-	"gopkg.in/src-d/go-git.v4/plumbing/format/packfile"
-	"gopkg.in/src-d/go-git.v4/storage/memory"
 
 	. "gopkg.in/check.v1"
 	"gopkg.in/src-d/go-git-fixtures.v3"
@@ -26,51 +27,34 @@
 	f := fixtures.Basic().One()
 
 	d := NewDecoder(f.Idx())
-	idx := &Idxfile{}
+	idx := new(MemoryIndex)
 	err := d.Decode(idx)
 	c.Assert(err, IsNil)
 
-	c.Assert(idx.Entries, HasLen, 31)
-	c.Assert(idx.Entries[0].Hash.String(), Equals, "1669dce138d9b841a518c64b10914d88f5e488ea")
-	c.Assert(idx.Entries[0].Offset, Equals, uint64(615))
-	c.Assert(idx.Entries[0].CRC32, Equals, uint32(3645019190))
+	count, _ := idx.Count()
+	c.Assert(count, Equals, int64(31))
+
+	hash := plumbing.NewHash("1669dce138d9b841a518c64b10914d88f5e488ea")
+	ok, err := idx.Contains(hash)
+	c.Assert(err, IsNil)
+	c.Assert(ok, Equals, true)
+
+	offset, err := idx.FindOffset(hash)
+	c.Assert(err, IsNil)
+	c.Assert(offset, Equals, int64(615))
+
+	crc32, err := idx.FindCRC32(hash)
+	c.Assert(err, IsNil)
+	c.Assert(crc32, Equals, uint32(3645019190))
 
 	c.Assert(fmt.Sprintf("%x", idx.IdxChecksum), Equals, "fb794f1ec720b9bc8e43257451bd99c4be6fa1c9")
 	c.Assert(fmt.Sprintf("%x", idx.PackfileChecksum), Equals, f.PackfileHash.String())
 }
 
-func (s *IdxfileSuite) TestDecodeCRCs(c *C) {
-	f := fixtures.Basic().ByTag("ofs-delta").One()
-
-	scanner := packfile.NewScanner(f.Packfile())
-	storage := memory.NewStorage()
-
-	pd, err := packfile.NewDecoder(scanner, storage)
-	c.Assert(err, IsNil)
-	_, err = pd.Decode()
-	c.Assert(err, IsNil)
-
-	i := pd.Index().ToIdxFile()
-	i.Version = VersionSupported
-
-	buf := bytes.NewBuffer(nil)
-	e := NewEncoder(buf)
-	_, err = e.Encode(i)
-	c.Assert(err, IsNil)
-
-	idx := &Idxfile{}
-
-	d := NewDecoder(buf)
-	err = d.Decode(idx)
-	c.Assert(err, IsNil)
-
-	c.Assert(idx.Entries, DeepEquals, i.Entries)
-}
-
 func (s *IdxfileSuite) TestDecode64bitsOffsets(c *C) {
 	f := bytes.NewBufferString(fixtureLarge4GB)
 
-	idx := &Idxfile{}
+	idx := new(MemoryIndex)
 
 	d := NewDecoder(base64.NewDecoder(base64.StdEncoding, f))
 	err := d.Decode(idx)
@@ -88,29 +72,22 @@
 		"35858be9c6f5914cbe6768489c41eb6809a2bceb": 5924278919,
 	}
 
-	for _, e := range idx.Entries {
+	iter, err := idx.Entries()
+	c.Assert(err, IsNil)
+
+	var entries int
+	for {
+		e, err := iter.Next()
+		if err == io.EOF {
+			break
+		}
+		c.Assert(err, IsNil)
+		entries++
+
 		c.Assert(expected[e.Hash.String()], Equals, e.Offset)
 	}
-}
 
-func (s *IdxfileSuite) TestDecode64bitsOffsetsIdempotent(c *C) {
-	f := bytes.NewBufferString(fixtureLarge4GB)
-
-	expected := &Idxfile{}
-
-	d := NewDecoder(base64.NewDecoder(base64.StdEncoding, f))
-	err := d.Decode(expected)
-	c.Assert(err, IsNil)
-
-	buf := bytes.NewBuffer(nil)
-	_, err = NewEncoder(buf).Encode(expected)
-	c.Assert(err, IsNil)
-
-	idx := &Idxfile{}
-	err = NewDecoder(buf).Decode(idx)
-	c.Assert(err, IsNil)
-
-	c.Assert(idx.Entries, DeepEquals, expected.Entries)
+	c.Assert(entries, Equals, len(expected))
 }
 
 const fixtureLarge4GB = `/3RPYwAAAAIAAAAAAAAAAAAAAAAAAAABAAAAAQAAAAEAAAABAAAAAQAAAAEAAAABAAAAAQAAAAEA
@@ -139,3 +116,30 @@
 ANz1Di4AAAABPUnxJAAAAADNxzlGr6vCJpIFz4XaG/fi/f9C9zgQ8ptKSQpfQ1NMJBGTDTxxYGGp
 ch2xUA==
 `
+
+func BenchmarkDecode(b *testing.B) {
+	if err := fixtures.Init(); err != nil {
+		b.Errorf("unexpected error initializing fixtures: %s", err)
+	}
+
+	f := fixtures.Basic().One()
+	fixture, err := ioutil.ReadAll(f.Idx())
+	if err != nil {
+		b.Errorf("unexpected error reading idx file: %s", err)
+	}
+
+	defer func() {
+		if err := fixtures.Clean(); err != nil {
+			b.Errorf("unexpected error cleaning fixtures: %s", err)
+		}
+	}()
+
+	for i := 0; i < b.N; i++ {
+		f := bytes.NewBuffer(fixture)
+		idx := new(MemoryIndex)
+		d := NewDecoder(f)
+		if err := d.Decode(idx); err != nil {
+			b.Errorf("unexpected error decoding: %s", err)
+		}
+	}
+}
diff --git a/plumbing/format/idxfile/encoder.go b/plumbing/format/idxfile/encoder.go
index 40abfb8..e479511 100644
--- a/plumbing/format/idxfile/encoder.go
+++ b/plumbing/format/idxfile/encoder.go
@@ -4,12 +4,11 @@
 	"crypto/sha1"
 	"hash"
 	"io"
-	"sort"
 
 	"gopkg.in/src-d/go-git.v4/utils/binary"
 )
 
-// Encoder writes Idxfile structs to an output stream.
+// Encoder writes MemoryIndex structs to an output stream.
 type Encoder struct {
 	io.Writer
 	hash hash.Hash
@@ -22,11 +21,9 @@
 	return &Encoder{mw, h}
 }
 
-// Encode encodes an Idxfile to the encoder writer.
-func (e *Encoder) Encode(idx *Idxfile) (int, error) {
-	idx.Entries.Sort()
-
-	flow := []func(*Idxfile) (int, error){
+// Encode encodes an MemoryIndex to the encoder writer.
+func (e *Encoder) Encode(idx *MemoryIndex) (int, error) {
+	flow := []func(*MemoryIndex) (int, error){
 		e.encodeHeader,
 		e.encodeFanout,
 		e.encodeHashes,
@@ -48,7 +45,7 @@
 	return sz, nil
 }
 
-func (e *Encoder) encodeHeader(idx *Idxfile) (int, error) {
+func (e *Encoder) encodeHeader(idx *MemoryIndex) (int, error) {
 	c, err := e.Write(idxHeader)
 	if err != nil {
 		return c, err
@@ -57,75 +54,81 @@
 	return c + 4, binary.WriteUint32(e, idx.Version)
 }
 
-func (e *Encoder) encodeFanout(idx *Idxfile) (int, error) {
-	fanout := idx.calculateFanout()
-	for _, c := range fanout {
+func (e *Encoder) encodeFanout(idx *MemoryIndex) (int, error) {
+	for _, c := range idx.Fanout {
 		if err := binary.WriteUint32(e, c); err != nil {
 			return 0, err
 		}
 	}
 
-	return 1024, nil
+	return fanout * 4, nil
 }
 
-func (e *Encoder) encodeHashes(idx *Idxfile) (int, error) {
-	sz := 0
-	for _, ent := range idx.Entries {
-		i, err := e.Write(ent.Hash[:])
-		sz += i
+func (e *Encoder) encodeHashes(idx *MemoryIndex) (int, error) {
+	var size int
+	for k := 0; k < fanout; k++ {
+		pos := idx.FanoutMapping[k]
+		if pos == noMapping {
+			continue
+		}
 
+		n, err := e.Write(idx.Names[pos])
 		if err != nil {
-			return sz, err
+			return size, err
 		}
+		size += n
 	}
-
-	return sz, nil
+	return size, nil
 }
 
-func (e *Encoder) encodeCRC32(idx *Idxfile) (int, error) {
-	sz := 0
-	for _, ent := range idx.Entries {
-		err := binary.Write(e, ent.CRC32)
-		sz += 4
+func (e *Encoder) encodeCRC32(idx *MemoryIndex) (int, error) {
+	var size int
+	for k := 0; k < fanout; k++ {
+		pos := idx.FanoutMapping[k]
+		if pos == noMapping {
+			continue
+		}
 
+		n, err := e.Write(idx.CRC32[pos])
 		if err != nil {
-			return sz, err
+			return size, err
 		}
+
+		size += n
 	}
 
-	return sz, nil
+	return size, nil
 }
 
-func (e *Encoder) encodeOffsets(idx *Idxfile) (int, error) {
-	sz := 0
-
-	var o64bits []uint64
-	for _, ent := range idx.Entries {
-		o := ent.Offset
-		if o > offsetLimit {
-			o64bits = append(o64bits, o)
-			o = offsetLimit + uint64(len(o64bits))
+func (e *Encoder) encodeOffsets(idx *MemoryIndex) (int, error) {
+	var size int
+	for k := 0; k < fanout; k++ {
+		pos := idx.FanoutMapping[k]
+		if pos == noMapping {
+			continue
 		}
 
-		if err := binary.WriteUint32(e, uint32(o)); err != nil {
-			return sz, err
+		n, err := e.Write(idx.Offset32[pos])
+		if err != nil {
+			return size, err
 		}
 
-		sz += 4
+		size += n
 	}
 
-	for _, o := range o64bits {
-		if err := binary.WriteUint64(e, o); err != nil {
-			return sz, err
+	if len(idx.Offset64) > 0 {
+		n, err := e.Write(idx.Offset64)
+		if err != nil {
+			return size, err
 		}
 
-		sz += 8
+		size += n
 	}
 
-	return sz, nil
+	return size, nil
 }
 
-func (e *Encoder) encodeChecksums(idx *Idxfile) (int, error) {
+func (e *Encoder) encodeChecksums(idx *MemoryIndex) (int, error) {
 	if _, err := e.Write(idx.PackfileChecksum[:]); err != nil {
 		return 0, err
 	}
@@ -137,11 +140,3 @@
 
 	return 40, nil
 }
-
-// EntryList implements sort.Interface allowing sorting in increasing order.
-type EntryList []*Entry
-
-func (p EntryList) Len() int           { return len(p) }
-func (p EntryList) Less(i, j int) bool { return p[i].Hash.String() < p[j].Hash.String() }
-func (p EntryList) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
-func (p EntryList) Sort()              { sort.Sort(p) }
diff --git a/plumbing/format/idxfile/encoder_test.go b/plumbing/format/idxfile/encoder_test.go
index e5b96b7..e8deeea 100644
--- a/plumbing/format/idxfile/encoder_test.go
+++ b/plumbing/format/idxfile/encoder_test.go
@@ -4,37 +4,18 @@
 	"bytes"
 	"io/ioutil"
 
-	"gopkg.in/src-d/go-git.v4/plumbing"
 	. "gopkg.in/src-d/go-git.v4/plumbing/format/idxfile"
 
 	. "gopkg.in/check.v1"
 	"gopkg.in/src-d/go-git-fixtures.v3"
 )
 
-func (s *IdxfileSuite) TestEncode(c *C) {
-	expected := &Idxfile{}
-	expected.Add(plumbing.NewHash("4bfc730165c370df4a012afbb45ba3f9c332c0d4"), 82, 82)
-	expected.Add(plumbing.NewHash("8fa2238efdae08d83c12ee176fae65ff7c99af46"), 42, 42)
-
-	buf := bytes.NewBuffer(nil)
-	e := NewEncoder(buf)
-	_, err := e.Encode(expected)
-	c.Assert(err, IsNil)
-
-	idx := &Idxfile{}
-	d := NewDecoder(buf)
-	err = d.Decode(idx)
-	c.Assert(err, IsNil)
-
-	c.Assert(idx.Entries, DeepEquals, expected.Entries)
-}
-
 func (s *IdxfileSuite) TestDecodeEncode(c *C) {
 	fixtures.ByTag("packfile").Test(c, func(f *fixtures.Fixture) {
 		expected, err := ioutil.ReadAll(f.Idx())
 		c.Assert(err, IsNil)
 
-		idx := &Idxfile{}
+		idx := new(MemoryIndex)
 		d := NewDecoder(bytes.NewBuffer(expected))
 		err = d.Decode(idx)
 		c.Assert(err, IsNil)
diff --git a/plumbing/format/idxfile/idxfile.go b/plumbing/format/idxfile/idxfile.go
index 6b05eaa..5fed278 100644
--- a/plumbing/format/idxfile/idxfile.go
+++ b/plumbing/format/idxfile/idxfile.go
@@ -1,30 +1,307 @@
 package idxfile
 
-import "gopkg.in/src-d/go-git.v4/plumbing"
+import (
+	"bytes"
+	"io"
+	"sort"
+
+	"gopkg.in/src-d/go-git.v4/plumbing"
+	"gopkg.in/src-d/go-git.v4/utils/binary"
+)
 
 const (
 	// VersionSupported is the only idx version supported.
 	VersionSupported = 2
 
-	offsetLimit = 0x7fffffff
+	noMapping = -1
 )
 
 var (
 	idxHeader = []byte{255, 't', 'O', 'c'}
 )
 
-// Idxfile is the in memory representation of an idx file.
-type Idxfile struct {
-	Version          uint32
-	Fanout           [255]uint32
-	ObjectCount      uint32
-	Entries          EntryList
-	PackfileChecksum [20]byte
-	IdxChecksum      [20]byte
+// Index represents an index of a packfile.
+type Index interface {
+	// Contains checks whether the given hash is in the index.
+	Contains(h plumbing.Hash) (bool, error)
+	// FindOffset finds the offset in the packfile for the object with
+	// the given hash.
+	FindOffset(h plumbing.Hash) (int64, error)
+	// FindCRC32 finds the CRC32 of the object with the given hash.
+	FindCRC32(h plumbing.Hash) (uint32, error)
+	// FindHash finds the hash for the object with the given offset.
+	FindHash(o int64) (plumbing.Hash, error)
+	// Count returns the number of entries in the index.
+	Count() (int64, error)
+	// Entries returns an iterator to retrieve all index entries.
+	Entries() (EntryIter, error)
+	// EntriesByOffset returns an iterator to retrieve all index entries ordered
+	// by offset.
+	EntriesByOffset() (EntryIter, error)
 }
 
-func NewIdxfile() *Idxfile {
-	return &Idxfile{}
+// MemoryIndex is the in memory representation of an idx file.
+type MemoryIndex struct {
+	Version uint32
+	Fanout  [256]uint32
+	// FanoutMapping maps the position in the fanout table to the position
+	// in the Names, Offset32 and CRC32 slices. This improves the memory
+	// usage by not needing an array with unnecessary empty slots.
+	FanoutMapping    [256]int
+	Names            [][]byte
+	Offset32         [][]byte
+	CRC32            [][]byte
+	Offset64         []byte
+	PackfileChecksum [20]byte
+	IdxChecksum      [20]byte
+
+	offsetHash map[int64]plumbing.Hash
+}
+
+var _ Index = (*MemoryIndex)(nil)
+
+// NewMemoryIndex returns an instance of a new MemoryIndex.
+func NewMemoryIndex() *MemoryIndex {
+	return &MemoryIndex{}
+}
+
+func (idx *MemoryIndex) findHashIndex(h plumbing.Hash) (int, bool) {
+	k := idx.FanoutMapping[h[0]]
+	if k == noMapping {
+		return 0, false
+	}
+
+	if len(idx.Names) <= k {
+		return 0, false
+	}
+
+	data := idx.Names[k]
+	high := uint64(len(idx.Offset32[k])) >> 2
+	if high == 0 {
+		return 0, false
+	}
+
+	low := uint64(0)
+	for {
+		mid := (low + high) >> 1
+		offset := mid * objectIDLength
+
+		cmp := bytes.Compare(h[:], data[offset:offset+objectIDLength])
+		if cmp < 0 {
+			high = mid
+		} else if cmp == 0 {
+			return int(mid), true
+		} else {
+			low = mid + 1
+		}
+
+		if low >= high {
+			break
+		}
+	}
+
+	return 0, false
+}
+
+// Contains implements the Index interface.
+func (idx *MemoryIndex) Contains(h plumbing.Hash) (bool, error) {
+	_, ok := idx.findHashIndex(h)
+	return ok, nil
+}
+
+// FindOffset implements the Index interface.
+func (idx *MemoryIndex) FindOffset(h plumbing.Hash) (int64, error) {
+	if len(idx.FanoutMapping) <= int(h[0]) {
+		return 0, plumbing.ErrObjectNotFound
+	}
+
+	k := idx.FanoutMapping[h[0]]
+	i, ok := idx.findHashIndex(h)
+	if !ok {
+		return 0, plumbing.ErrObjectNotFound
+	}
+
+	return idx.getOffset(k, i)
+}
+
+const isO64Mask = uint64(1) << 31
+
+func (idx *MemoryIndex) getOffset(firstLevel, secondLevel int) (int64, error) {
+	offset := secondLevel << 2
+	buf := bytes.NewBuffer(idx.Offset32[firstLevel][offset : offset+4])
+	ofs, err := binary.ReadUint32(buf)
+	if err != nil {
+		return -1, err
+	}
+
+	if (uint64(ofs) & isO64Mask) != 0 {
+		offset := 8 * (uint64(ofs) & ^isO64Mask)
+		buf := bytes.NewBuffer(idx.Offset64[offset : offset+8])
+		n, err := binary.ReadUint64(buf)
+		if err != nil {
+			return -1, err
+		}
+
+		return int64(n), nil
+	}
+
+	return int64(ofs), nil
+}
+
+// FindCRC32 implements the Index interface.
+func (idx *MemoryIndex) FindCRC32(h plumbing.Hash) (uint32, error) {
+	k := idx.FanoutMapping[h[0]]
+	i, ok := idx.findHashIndex(h)
+	if !ok {
+		return 0, plumbing.ErrObjectNotFound
+	}
+
+	return idx.getCRC32(k, i)
+}
+
+func (idx *MemoryIndex) getCRC32(firstLevel, secondLevel int) (uint32, error) {
+	offset := secondLevel << 2
+	buf := bytes.NewBuffer(idx.CRC32[firstLevel][offset : offset+4])
+	return binary.ReadUint32(buf)
+}
+
+// FindHash implements the Index interface.
+func (idx *MemoryIndex) FindHash(o int64) (plumbing.Hash, error) {
+	// Lazily generate the reverse offset/hash map if required.
+	if idx.offsetHash == nil {
+		if err := idx.genOffsetHash(); err != nil {
+			return plumbing.ZeroHash, err
+		}
+	}
+
+	hash, ok := idx.offsetHash[o]
+	if !ok {
+		return plumbing.ZeroHash, plumbing.ErrObjectNotFound
+	}
+
+	return hash, nil
+}
+
+// genOffsetHash generates the offset/hash mapping for reverse search.
+func (idx *MemoryIndex) genOffsetHash() error {
+	count, err := idx.Count()
+	if err != nil {
+		return err
+	}
+
+	idx.offsetHash = make(map[int64]plumbing.Hash, count)
+
+	iter, err := idx.Entries()
+	if err != nil {
+		return err
+	}
+
+	for {
+		entry, err := iter.Next()
+		if err != nil {
+			if err == io.EOF {
+				return nil
+			}
+			return err
+		}
+
+		idx.offsetHash[int64(entry.Offset)] = entry.Hash
+	}
+}
+
+// Count implements the Index interface.
+func (idx *MemoryIndex) Count() (int64, error) {
+	return int64(idx.Fanout[fanout-1]), nil
+}
+
+// Entries implements the Index interface.
+func (idx *MemoryIndex) Entries() (EntryIter, error) {
+	return &idxfileEntryIter{idx, 0, 0, 0}, nil
+}
+
+// EntriesByOffset implements the Index interface.
+func (idx *MemoryIndex) EntriesByOffset() (EntryIter, error) {
+	count, err := idx.Count()
+	if err != nil {
+		return nil, err
+	}
+
+	iter := &idxfileEntryOffsetIter{
+		entries: make(entriesByOffset, count),
+	}
+
+	entries, err := idx.Entries()
+	if err != nil {
+		return nil, err
+	}
+
+	for pos := 0; int64(pos) < count; pos++ {
+		entry, err := entries.Next()
+		if err != nil {
+			return nil, err
+		}
+
+		iter.entries[pos] = entry
+	}
+
+	sort.Sort(iter.entries)
+
+	return iter, nil
+}
+
+// EntryIter is an iterator that will return the entries in a packfile index.
+type EntryIter interface {
+	// Next returns the next entry in the packfile index.
+	Next() (*Entry, error)
+	// Close closes the iterator.
+	Close() error
+}
+
+type idxfileEntryIter struct {
+	idx                     *MemoryIndex
+	total                   int
+	firstLevel, secondLevel int
+}
+
+func (i *idxfileEntryIter) Next() (*Entry, error) {
+	for {
+		if i.firstLevel >= fanout {
+			return nil, io.EOF
+		}
+
+		if i.total >= int(i.idx.Fanout[i.firstLevel]) {
+			i.firstLevel++
+			i.secondLevel = 0
+			continue
+		}
+
+		entry := new(Entry)
+		ofs := i.secondLevel * objectIDLength
+		copy(entry.Hash[:], i.idx.Names[i.idx.FanoutMapping[i.firstLevel]][ofs:])
+
+		pos := i.idx.FanoutMapping[entry.Hash[0]]
+
+		offset, err := i.idx.getOffset(pos, i.secondLevel)
+		if err != nil {
+			return nil, err
+		}
+		entry.Offset = uint64(offset)
+
+		entry.CRC32, err = i.idx.getCRC32(pos, i.secondLevel)
+		if err != nil {
+			return nil, err
+		}
+
+		i.secondLevel++
+		i.total++
+
+		return entry, nil
+	}
+}
+
+func (i *idxfileEntryIter) Close() error {
+	i.firstLevel = fanout
+	return nil
 }
 
 // Entry is the in memory representation of an object entry in the idx file.
@@ -34,35 +311,37 @@
 	Offset uint64
 }
 
-// Add adds a new Entry with the given values to the Idxfile.
-func (idx *Idxfile) Add(h plumbing.Hash, offset uint64, crc32 uint32) {
-	idx.Entries = append(idx.Entries, &Entry{
-		Hash:   h,
-		Offset: offset,
-		CRC32:  crc32,
-	})
+type idxfileEntryOffsetIter struct {
+	entries entriesByOffset
+	pos     int
 }
 
-func (idx *Idxfile) isValid() bool {
-	fanout := idx.calculateFanout()
-	for k, c := range idx.Fanout {
-		if fanout[k] != c {
-			return false
-		}
+func (i *idxfileEntryOffsetIter) Next() (*Entry, error) {
+	if i.pos >= len(i.entries) {
+		return nil, io.EOF
 	}
 
-	return true
+	entry := i.entries[i.pos]
+	i.pos++
+
+	return entry, nil
 }
 
-func (idx *Idxfile) calculateFanout() [256]uint32 {
-	fanout := [256]uint32{}
-	for _, e := range idx.Entries {
-		fanout[e.Hash[0]]++
-	}
+func (i *idxfileEntryOffsetIter) Close() error {
+	i.pos = len(i.entries) + 1
+	return nil
+}
 
-	for i := 1; i < 256; i++ {
-		fanout[i] += fanout[i-1]
-	}
+type entriesByOffset []*Entry
 
-	return fanout
+func (o entriesByOffset) Len() int {
+	return len(o)
+}
+
+func (o entriesByOffset) Less(i int, j int) bool {
+	return o[i].Offset < o[j].Offset
+}
+
+func (o entriesByOffset) Swap(i int, j int) {
+	o[i], o[j] = o[j], o[i]
 }
diff --git a/plumbing/format/idxfile/idxfile_test.go b/plumbing/format/idxfile/idxfile_test.go
new file mode 100644
index 0000000..0e0ca2a
--- /dev/null
+++ b/plumbing/format/idxfile/idxfile_test.go
@@ -0,0 +1,169 @@
+package idxfile_test
+
+import (
+	"bytes"
+	"encoding/base64"
+	"fmt"
+	"io"
+	"testing"
+
+	"gopkg.in/src-d/go-git.v4/plumbing"
+	"gopkg.in/src-d/go-git.v4/plumbing/format/idxfile"
+
+	. "gopkg.in/check.v1"
+	"gopkg.in/src-d/go-git-fixtures.v3"
+)
+
+func BenchmarkFindOffset(b *testing.B) {
+	idx, err := fixtureIndex()
+	if err != nil {
+		b.Fatalf(err.Error())
+	}
+
+	for i := 0; i < b.N; i++ {
+		for _, h := range fixtureHashes {
+			_, err := idx.FindOffset(h)
+			if err != nil {
+				b.Fatalf("error getting offset: %s", err)
+			}
+		}
+	}
+}
+
+func BenchmarkFindCRC32(b *testing.B) {
+	idx, err := fixtureIndex()
+	if err != nil {
+		b.Fatalf(err.Error())
+	}
+
+	for i := 0; i < b.N; i++ {
+		for _, h := range fixtureHashes {
+			_, err := idx.FindCRC32(h)
+			if err != nil {
+				b.Fatalf("error getting crc32: %s", err)
+			}
+		}
+	}
+}
+
+func BenchmarkContains(b *testing.B) {
+	idx, err := fixtureIndex()
+	if err != nil {
+		b.Fatalf(err.Error())
+	}
+
+	for i := 0; i < b.N; i++ {
+		for _, h := range fixtureHashes {
+			ok, err := idx.Contains(h)
+			if err != nil {
+				b.Fatalf("error checking if hash is in index: %s", err)
+			}
+
+			if !ok {
+				b.Error("expected hash to be in index")
+			}
+		}
+	}
+}
+
+func BenchmarkEntries(b *testing.B) {
+	idx, err := fixtureIndex()
+	if err != nil {
+		b.Fatalf(err.Error())
+	}
+
+	for i := 0; i < b.N; i++ {
+		iter, err := idx.Entries()
+		if err != nil {
+			b.Fatalf("unexpected error getting entries: %s", err)
+		}
+
+		var entries int
+		for {
+			_, err := iter.Next()
+			if err != nil {
+				if err == io.EOF {
+					break
+				}
+
+				b.Errorf("unexpected error getting entry: %s", err)
+			}
+
+			entries++
+		}
+
+		if entries != len(fixtureHashes) {
+			b.Errorf("expecting entries to be %d, got %d", len(fixtureHashes), entries)
+		}
+	}
+}
+
+type IndexSuite struct {
+	fixtures.Suite
+}
+
+var _ = Suite(&IndexSuite{})
+
+func (s *IndexSuite) TestFindHash(c *C) {
+	idx, err := fixtureIndex()
+	c.Assert(err, IsNil)
+
+	for i, pos := range fixtureOffsets {
+		hash, err := idx.FindHash(pos)
+		c.Assert(err, IsNil)
+		c.Assert(hash, Equals, fixtureHashes[i])
+	}
+}
+
+func (s *IndexSuite) TestEntriesByOffset(c *C) {
+	idx, err := fixtureIndex()
+	c.Assert(err, IsNil)
+
+	entries, err := idx.EntriesByOffset()
+	c.Assert(err, IsNil)
+
+	for _, pos := range fixtureOffsets {
+		e, err := entries.Next()
+		c.Assert(err, IsNil)
+
+		c.Assert(e.Offset, Equals, uint64(pos))
+	}
+}
+
+var fixtureHashes = []plumbing.Hash{
+	plumbing.NewHash("303953e5aa461c203a324821bc1717f9b4fff895"),
+	plumbing.NewHash("5296768e3d9f661387ccbff18c4dea6c997fd78c"),
+	plumbing.NewHash("03fc8d58d44267274edef4585eaeeb445879d33f"),
+	plumbing.NewHash("8f3ceb4ea4cb9e4a0f751795eb41c9a4f07be772"),
+	plumbing.NewHash("e0d1d625010087f79c9e01ad9d8f95e1628dda02"),
+	plumbing.NewHash("90eba326cdc4d1d61c5ad25224ccbf08731dd041"),
+	plumbing.NewHash("bab53055add7bc35882758a922c54a874d6b1272"),
+	plumbing.NewHash("1b8995f51987d8a449ca5ea4356595102dc2fbd4"),
+	plumbing.NewHash("35858be9c6f5914cbe6768489c41eb6809a2bceb"),
+}
+
+var fixtureOffsets = []int64{
+	12,
+	142,
+	1601322837,
+	2646996529,
+	3452385606,
+	3707047470,
+	5323223332,
+	5894072943,
+	5924278919,
+}
+
+func fixtureIndex() (*idxfile.MemoryIndex, error) {
+	f := bytes.NewBufferString(fixtureLarge4GB)
+
+	idx := new(idxfile.MemoryIndex)
+
+	d := idxfile.NewDecoder(base64.NewDecoder(base64.StdEncoding, f))
+	err := d.Decode(idx)
+	if err != nil {
+		return nil, fmt.Errorf("unexpected error decoding index: %s", err)
+	}
+
+	return idx, nil
+}
diff --git a/plumbing/format/idxfile/writer.go b/plumbing/format/idxfile/writer.go
new file mode 100644
index 0000000..aa919e7
--- /dev/null
+++ b/plumbing/format/idxfile/writer.go
@@ -0,0 +1,186 @@
+package idxfile
+
+import (
+	"bytes"
+	"fmt"
+	"math"
+	"sort"
+	"sync"
+
+	"gopkg.in/src-d/go-git.v4/plumbing"
+	"gopkg.in/src-d/go-git.v4/utils/binary"
+)
+
+// objects implements sort.Interface and uses hash as sorting key.
+type objects []Entry
+
+// Writer implements a packfile Observer interface and is used to generate
+// indexes.
+type Writer struct {
+	m sync.Mutex
+
+	count    uint32
+	checksum plumbing.Hash
+	objects  objects
+	offset64 uint32
+	finished bool
+	index    *MemoryIndex
+	added    map[plumbing.Hash]struct{}
+}
+
+// Index returns a previously created MemoryIndex or creates a new one if
+// needed.
+func (w *Writer) Index() (*MemoryIndex, error) {
+	w.m.Lock()
+	defer w.m.Unlock()
+
+	if w.index == nil {
+		return w.createIndex()
+	}
+
+	return w.index, nil
+}
+
+// Add appends new object data.
+func (w *Writer) Add(h plumbing.Hash, pos uint64, crc uint32) {
+	w.m.Lock()
+	defer w.m.Unlock()
+
+	if w.added == nil {
+		w.added = make(map[plumbing.Hash]struct{})
+	}
+
+	if _, ok := w.added[h]; !ok {
+		w.added[h] = struct{}{}
+		w.objects = append(w.objects, Entry{h, crc, pos})
+	}
+
+}
+
+func (w *Writer) Finished() bool {
+	return w.finished
+}
+
+// OnHeader implements packfile.Observer interface.
+func (w *Writer) OnHeader(count uint32) error {
+	w.count = count
+	w.objects = make(objects, 0, count)
+	return nil
+}
+
+// OnInflatedObjectHeader implements packfile.Observer interface.
+func (w *Writer) OnInflatedObjectHeader(t plumbing.ObjectType, objSize int64, pos int64) error {
+	return nil
+}
+
+// OnInflatedObjectContent implements packfile.Observer interface.
+func (w *Writer) OnInflatedObjectContent(h plumbing.Hash, pos int64, crc uint32, _ []byte) error {
+	w.Add(h, uint64(pos), crc)
+	return nil
+}
+
+// OnFooter implements packfile.Observer interface.
+func (w *Writer) OnFooter(h plumbing.Hash) error {
+	w.checksum = h
+	w.finished = true
+	_, err := w.createIndex()
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// creatIndex returns a filled MemoryIndex with the information filled by
+// the observer callbacks.
+func (w *Writer) createIndex() (*MemoryIndex, error) {
+	if !w.finished {
+		return nil, fmt.Errorf("the index still hasn't finished building")
+	}
+
+	idx := new(MemoryIndex)
+	w.index = idx
+
+	sort.Sort(w.objects)
+
+	// unmap all fans by default
+	for i := range idx.FanoutMapping {
+		idx.FanoutMapping[i] = noMapping
+	}
+
+	buf := new(bytes.Buffer)
+
+	last := -1
+	bucket := -1
+	for i, o := range w.objects {
+		fan := o.Hash[0]
+
+		// fill the gaps between fans
+		for j := last + 1; j < int(fan); j++ {
+			idx.Fanout[j] = uint32(i)
+		}
+
+		// update the number of objects for this position
+		idx.Fanout[fan] = uint32(i + 1)
+
+		// we move from one bucket to another, update counters and allocate
+		// memory
+		if last != int(fan) {
+			bucket++
+			idx.FanoutMapping[fan] = bucket
+			last = int(fan)
+
+			idx.Names = append(idx.Names, make([]byte, 0))
+			idx.Offset32 = append(idx.Offset32, make([]byte, 0))
+			idx.CRC32 = append(idx.CRC32, make([]byte, 0))
+		}
+
+		idx.Names[bucket] = append(idx.Names[bucket], o.Hash[:]...)
+
+		offset := o.Offset
+		if offset > math.MaxInt32 {
+			offset = w.addOffset64(offset)
+		}
+
+		buf.Truncate(0)
+		binary.WriteUint32(buf, uint32(offset))
+		idx.Offset32[bucket] = append(idx.Offset32[bucket], buf.Bytes()...)
+
+		buf.Truncate(0)
+		binary.WriteUint32(buf, uint32(o.CRC32))
+		idx.CRC32[bucket] = append(idx.CRC32[bucket], buf.Bytes()...)
+	}
+
+	for j := last + 1; j < 256; j++ {
+		idx.Fanout[j] = uint32(len(w.objects))
+	}
+
+	idx.Version = VersionSupported
+	idx.PackfileChecksum = w.checksum
+
+	return idx, nil
+}
+
+func (w *Writer) addOffset64(pos uint64) uint64 {
+	buf := new(bytes.Buffer)
+	binary.WriteUint64(buf, pos)
+	w.index.Offset64 = append(w.index.Offset64, buf.Bytes()...)
+
+	index := uint64(w.offset64 | (1 << 31))
+	w.offset64++
+
+	return index
+}
+
+func (o objects) Len() int {
+	return len(o)
+}
+
+func (o objects) Less(i int, j int) bool {
+	cmp := bytes.Compare(o[i].Hash[:], o[j].Hash[:])
+	return cmp < 0
+}
+
+func (o objects) Swap(i int, j int) {
+	o[i], o[j] = o[j], o[i]
+}
diff --git a/plumbing/format/idxfile/writer_test.go b/plumbing/format/idxfile/writer_test.go
new file mode 100644
index 0000000..912211d
--- /dev/null
+++ b/plumbing/format/idxfile/writer_test.go
@@ -0,0 +1,98 @@
+package idxfile_test
+
+import (
+	"bytes"
+	"encoding/base64"
+	"io/ioutil"
+
+	"gopkg.in/src-d/go-git.v4/plumbing"
+	"gopkg.in/src-d/go-git.v4/plumbing/format/idxfile"
+	"gopkg.in/src-d/go-git.v4/plumbing/format/packfile"
+
+	. "gopkg.in/check.v1"
+	"gopkg.in/src-d/go-git-fixtures.v3"
+)
+
+type WriterSuite struct {
+	fixtures.Suite
+}
+
+var _ = Suite(&WriterSuite{})
+
+func (s *WriterSuite) TestWriter(c *C) {
+	f := fixtures.Basic().One()
+	scanner := packfile.NewScanner(f.Packfile())
+
+	obs := new(idxfile.Writer)
+	parser, err := packfile.NewParser(scanner, obs)
+	c.Assert(err, IsNil)
+
+	_, err = parser.Parse()
+	c.Assert(err, IsNil)
+
+	idx, err := obs.Index()
+	c.Assert(err, IsNil)
+
+	idxFile := f.Idx()
+	expected, err := ioutil.ReadAll(idxFile)
+	c.Assert(err, IsNil)
+	idxFile.Close()
+
+	buf := new(bytes.Buffer)
+	encoder := idxfile.NewEncoder(buf)
+	n, err := encoder.Encode(idx)
+	c.Assert(err, IsNil)
+	c.Assert(n, Equals, len(expected))
+
+	c.Assert(buf.Bytes(), DeepEquals, expected)
+}
+
+func (s *WriterSuite) TestWriterLarge(c *C) {
+	writer := new(idxfile.Writer)
+	err := writer.OnHeader(uint32(len(fixture4GbEntries)))
+	c.Assert(err, IsNil)
+
+	for _, o := range fixture4GbEntries {
+		err = writer.OnInflatedObjectContent(plumbing.NewHash(o.hash), o.offset, o.crc, nil)
+		c.Assert(err, IsNil)
+	}
+
+	err = writer.OnFooter(fixture4GbChecksum)
+	c.Assert(err, IsNil)
+
+	idx, err := writer.Index()
+	c.Assert(err, IsNil)
+
+	// load fixture index
+	f := bytes.NewBufferString(fixtureLarge4GB)
+	expected, err := ioutil.ReadAll(base64.NewDecoder(base64.StdEncoding, f))
+	c.Assert(err, IsNil)
+
+	buf := new(bytes.Buffer)
+	encoder := idxfile.NewEncoder(buf)
+	n, err := encoder.Encode(idx)
+	c.Assert(err, IsNil)
+	c.Assert(n, Equals, len(expected))
+
+	c.Assert(buf.Bytes(), DeepEquals, expected)
+}
+
+var (
+	fixture4GbChecksum = plumbing.NewHash("afabc2269205cf85da1bf7e2fdff42f73810f29b")
+
+	fixture4GbEntries = []struct {
+		offset int64
+		hash   string
+		crc    uint32
+	}{
+		{12, "303953e5aa461c203a324821bc1717f9b4fff895", 0xbc347c4c},
+		{142, "5296768e3d9f661387ccbff18c4dea6c997fd78c", 0xcdc22842},
+		{1601322837, "03fc8d58d44267274edef4585eaeeb445879d33f", 0x929dfaaa},
+		{2646996529, "8f3ceb4ea4cb9e4a0f751795eb41c9a4f07be772", 0xa61def8a},
+		{3452385606, "e0d1d625010087f79c9e01ad9d8f95e1628dda02", 0x06bea180},
+		{3707047470, "90eba326cdc4d1d61c5ad25224ccbf08731dd041", 0x7193f3ba},
+		{5323223332, "bab53055add7bc35882758a922c54a874d6b1272", 0xac269b8e},
+		{5894072943, "1b8995f51987d8a449ca5ea4356595102dc2fbd4", 0x2187c056},
+		{5924278919, "35858be9c6f5914cbe6768489c41eb6809a2bceb", 0x9c89d9d2},
+	}
+)
diff --git a/plumbing/format/packfile/common.go b/plumbing/format/packfile/common.go
index beb015d..2b4aceb 100644
--- a/plumbing/format/packfile/common.go
+++ b/plumbing/format/packfile/common.go
@@ -23,24 +23,28 @@
 	maskType        = uint8(112) // 0111 0000
 )
 
-// UpdateObjectStorage updates the given storer.EncodedObjectStorer with the contents of the
+// UpdateObjectStorage updates the storer with the objects in the given
 // packfile.
-func UpdateObjectStorage(s storer.EncodedObjectStorer, packfile io.Reader) error {
-	if sw, ok := s.(storer.PackfileWriter); ok {
-		return writePackfileToObjectStorage(sw, packfile)
+func UpdateObjectStorage(s storer.Storer, packfile io.Reader) error {
+	if pw, ok := s.(storer.PackfileWriter); ok {
+		return WritePackfileToObjectStorage(pw, packfile)
 	}
 
-	stream := NewScanner(packfile)
-	d, err := NewDecoder(stream, s)
+	p, err := NewParserWithStorage(NewScanner(packfile), s)
 	if err != nil {
 		return err
 	}
 
-	_, err = d.Decode()
+	_, err = p.Parse()
 	return err
 }
 
-func writePackfileToObjectStorage(sw storer.PackfileWriter, packfile io.Reader) (err error) {
+// WritePackfileToObjectStorage writes all the packfile objects into the given
+// object storage.
+func WritePackfileToObjectStorage(
+	sw storer.PackfileWriter,
+	packfile io.Reader,
+) (err error) {
 	w, err := sw.PackfileWriter()
 	if err != nil {
 		return err
diff --git a/plumbing/format/packfile/decoder.go b/plumbing/format/packfile/decoder.go
deleted file mode 100644
index f706e5d..0000000
--- a/plumbing/format/packfile/decoder.go
+++ /dev/null
@@ -1,495 +0,0 @@
-package packfile
-
-import (
-	"bytes"
-
-	"gopkg.in/src-d/go-git.v4/plumbing"
-	"gopkg.in/src-d/go-git.v4/plumbing/cache"
-	"gopkg.in/src-d/go-git.v4/plumbing/storer"
-)
-
-// Format specifies if the packfile uses ref-deltas or ofs-deltas.
-type Format int
-
-// Possible values of the Format type.
-const (
-	UnknownFormat Format = iota
-	OFSDeltaFormat
-	REFDeltaFormat
-)
-
-var (
-	// ErrMaxObjectsLimitReached is returned by Decode when the number
-	// of objects in the packfile is higher than
-	// Decoder.MaxObjectsLimit.
-	ErrMaxObjectsLimitReached = NewError("max. objects limit reached")
-	// ErrInvalidObject is returned by Decode when an invalid object is
-	// found in the packfile.
-	ErrInvalidObject = NewError("invalid git object")
-	// ErrPackEntryNotFound is returned by Decode when a reference in
-	// the packfile references and unknown object.
-	ErrPackEntryNotFound = NewError("can't find a pack entry")
-	// ErrZLib is returned by Decode when there was an error unzipping
-	// the packfile contents.
-	ErrZLib = NewError("zlib reading error")
-	// ErrCannotRecall is returned by RecallByOffset or RecallByHash if the object
-	// to recall cannot be returned.
-	ErrCannotRecall = NewError("cannot recall object")
-	// ErrResolveDeltasNotSupported is returned if a NewDecoder is used with a
-	// non-seekable scanner and without a plumbing.ObjectStorage
-	ErrResolveDeltasNotSupported = NewError("resolve delta is not supported")
-	// ErrNonSeekable is returned if a ReadObjectAt method is called without a
-	// seekable scanner
-	ErrNonSeekable = NewError("non-seekable scanner")
-	// ErrRollback error making Rollback over a transaction after an error
-	ErrRollback = NewError("rollback error, during set error")
-	// ErrAlreadyDecoded is returned if NewDecoder is called for a second time
-	ErrAlreadyDecoded = NewError("packfile was already decoded")
-)
-
-// Decoder reads and decodes packfiles from an input Scanner, if an ObjectStorer
-// was provided the decoded objects are store there. If not the decode object
-// is destroyed. The Offsets and CRCs are calculated whether an
-// ObjectStorer was provided or not.
-type Decoder struct {
-	deltaBaseCache cache.Object
-
-	s  *Scanner
-	o  storer.EncodedObjectStorer
-	tx storer.Transaction
-
-	isDecoded bool
-
-	// hasBuiltIndex indicates if the index is fully built or not. If it is not,
-	// will be built incrementally while decoding.
-	hasBuiltIndex bool
-	idx           *Index
-
-	offsetToType map[int64]plumbing.ObjectType
-	decoderType  plumbing.ObjectType
-}
-
-// NewDecoder returns a new Decoder that decodes a Packfile using the given
-// Scanner and stores the objects in the provided EncodedObjectStorer. ObjectStorer can be nil, in this
-// If the passed EncodedObjectStorer is nil, objects are not stored, but
-// offsets on the Packfile and CRCs are calculated.
-//
-// If EncodedObjectStorer is nil and the Scanner is not Seekable, ErrNonSeekable is
-// returned.
-//
-// If the ObjectStorer implements storer.Transactioner, a transaction is created
-// during the Decode execution. If anything fails, Rollback is called
-func NewDecoder(s *Scanner, o storer.EncodedObjectStorer) (*Decoder, error) {
-	return NewDecoderForType(s, o, plumbing.AnyObject,
-		cache.NewObjectLRUDefault())
-}
-
-// NewDecoderWithCache is a version of NewDecoder where cache can be specified.
-func NewDecoderWithCache(s *Scanner, o storer.EncodedObjectStorer,
-	cacheObject cache.Object) (*Decoder, error) {
-
-	return NewDecoderForType(s, o, plumbing.AnyObject, cacheObject)
-}
-
-// NewDecoderForType returns a new Decoder but in this case for a specific object type.
-// When an object is read using this Decoder instance and it is not of the same type of
-// the specified one, nil will be returned. This is intended to avoid the content
-// deserialization of all the objects.
-//
-// cacheObject is a cache.Object implementation that is used to speed up the
-// process. If cache is not needed you can pass nil. To create an LRU cache
-// object with the default size you can use the helper cache.ObjectLRUDefault().
-func NewDecoderForType(s *Scanner, o storer.EncodedObjectStorer,
-	t plumbing.ObjectType, cacheObject cache.Object) (*Decoder, error) {
-
-	if t == plumbing.OFSDeltaObject ||
-		t == plumbing.REFDeltaObject ||
-		t == plumbing.InvalidObject {
-		return nil, plumbing.ErrInvalidType
-	}
-
-	if !canResolveDeltas(s, o) {
-		return nil, ErrResolveDeltasNotSupported
-	}
-
-	return &Decoder{
-		s:              s,
-		o:              o,
-		deltaBaseCache: cacheObject,
-
-		idx:          NewIndex(0),
-		offsetToType: make(map[int64]plumbing.ObjectType),
-		decoderType:  t,
-	}, nil
-}
-
-func canResolveDeltas(s *Scanner, o storer.EncodedObjectStorer) bool {
-	return s.IsSeekable || o != nil
-}
-
-// Decode reads a packfile and stores it in the value pointed to by s. The
-// offsets and the CRCs are calculated by this method
-func (d *Decoder) Decode() (checksum plumbing.Hash, err error) {
-	defer func() { d.isDecoded = true }()
-
-	if d.isDecoded {
-		return plumbing.ZeroHash, ErrAlreadyDecoded
-	}
-
-	if err := d.doDecode(); err != nil {
-		return plumbing.ZeroHash, err
-	}
-
-	return d.s.Checksum()
-}
-
-func (d *Decoder) doDecode() error {
-	_, count, err := d.s.Header()
-	if err != nil {
-		return err
-	}
-
-	if !d.hasBuiltIndex {
-		d.idx = NewIndex(int(count))
-	}
-	defer func() { d.hasBuiltIndex = true }()
-
-	_, isTxStorer := d.o.(storer.Transactioner)
-	switch {
-	case d.o == nil:
-		return d.decodeObjects(int(count))
-	case isTxStorer:
-		return d.decodeObjectsWithObjectStorerTx(int(count))
-	default:
-		return d.decodeObjectsWithObjectStorer(int(count))
-	}
-}
-
-func (d *Decoder) decodeObjects(count int) error {
-	for i := 0; i < count; i++ {
-		if _, err := d.DecodeObject(); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (d *Decoder) decodeObjectsWithObjectStorer(count int) error {
-	for i := 0; i < count; i++ {
-		obj, err := d.DecodeObject()
-		if err != nil {
-			return err
-		}
-
-		if _, err := d.o.SetEncodedObject(obj); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (d *Decoder) decodeObjectsWithObjectStorerTx(count int) error {
-	d.tx = d.o.(storer.Transactioner).Begin()
-
-	for i := 0; i < count; i++ {
-		obj, err := d.DecodeObject()
-		if err != nil {
-			return err
-		}
-
-		if _, err := d.tx.SetEncodedObject(obj); err != nil {
-			if rerr := d.tx.Rollback(); rerr != nil {
-				return ErrRollback.AddDetails(
-					"error: %s, during tx.Set error: %s", rerr, err,
-				)
-			}
-
-			return err
-		}
-
-	}
-
-	return d.tx.Commit()
-}
-
-// DecodeObject reads the next object from the scanner and returns it. This
-// method can be used in replacement of the Decode method, to work in a
-// interactive way. If you created a new decoder instance using NewDecoderForType
-// constructor, if the object decoded is not equals to the specified one, nil will
-// be returned
-func (d *Decoder) DecodeObject() (plumbing.EncodedObject, error) {
-	return d.doDecodeObject(d.decoderType)
-}
-
-func (d *Decoder) doDecodeObject(t plumbing.ObjectType) (plumbing.EncodedObject, error) {
-	h, err := d.s.NextObjectHeader()
-	if err != nil {
-		return nil, err
-	}
-
-	if t == plumbing.AnyObject {
-		return d.decodeByHeader(h)
-	}
-
-	return d.decodeIfSpecificType(h)
-}
-
-func (d *Decoder) decodeIfSpecificType(h *ObjectHeader) (plumbing.EncodedObject, error) {
-	var (
-		obj      plumbing.EncodedObject
-		realType plumbing.ObjectType
-		err      error
-	)
-	switch h.Type {
-	case plumbing.OFSDeltaObject:
-		realType, err = d.ofsDeltaType(h.OffsetReference)
-	case plumbing.REFDeltaObject:
-		realType, err = d.refDeltaType(h.Reference)
-		if err == plumbing.ErrObjectNotFound {
-			obj, err = d.decodeByHeader(h)
-			if err != nil {
-				realType = obj.Type()
-			}
-		}
-	default:
-		realType = h.Type
-	}
-
-	if err != nil {
-		return nil, err
-	}
-
-	d.offsetToType[h.Offset] = realType
-
-	if d.decoderType == realType {
-		if obj != nil {
-			return obj, nil
-		}
-
-		return d.decodeByHeader(h)
-	}
-
-	return nil, nil
-}
-
-func (d *Decoder) ofsDeltaType(offset int64) (plumbing.ObjectType, error) {
-	t, ok := d.offsetToType[offset]
-	if !ok {
-		return plumbing.InvalidObject, plumbing.ErrObjectNotFound
-	}
-
-	return t, nil
-}
-
-func (d *Decoder) refDeltaType(ref plumbing.Hash) (plumbing.ObjectType, error) {
-	e, ok := d.idx.LookupHash(ref)
-	if !ok {
-		return plumbing.InvalidObject, plumbing.ErrObjectNotFound
-	}
-
-	return d.ofsDeltaType(int64(e.Offset))
-}
-
-func (d *Decoder) decodeByHeader(h *ObjectHeader) (plumbing.EncodedObject, error) {
-	obj := d.newObject()
-	obj.SetSize(h.Length)
-	obj.SetType(h.Type)
-
-	var crc uint32
-	var err error
-	switch h.Type {
-	case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject:
-		crc, err = d.fillRegularObjectContent(obj)
-	case plumbing.REFDeltaObject:
-		crc, err = d.fillREFDeltaObjectContent(obj, h.Reference)
-	case plumbing.OFSDeltaObject:
-		crc, err = d.fillOFSDeltaObjectContent(obj, h.OffsetReference)
-	default:
-		err = ErrInvalidObject.AddDetails("type %q", h.Type)
-	}
-
-	if err != nil {
-		return obj, err
-	}
-
-	if !d.hasBuiltIndex {
-		d.idx.Add(obj.Hash(), uint64(h.Offset), crc)
-	}
-
-	return obj, nil
-}
-
-func (d *Decoder) newObject() plumbing.EncodedObject {
-	if d.o == nil {
-		return &plumbing.MemoryObject{}
-	}
-
-	return d.o.NewEncodedObject()
-}
-
-// DecodeObjectAt reads an object at the given location. Every EncodedObject
-// returned is added into a internal index. This is intended to be able to regenerate
-// objects from deltas (offset deltas or reference deltas) without an package index
-// (.idx file). If Decode wasn't called previously objects offset should provided
-// using the SetOffsets method. It decodes the object regardless of the Decoder
-// type.
-func (d *Decoder) DecodeObjectAt(offset int64) (plumbing.EncodedObject, error) {
-	if !d.s.IsSeekable {
-		return nil, ErrNonSeekable
-	}
-
-	beforeJump, err := d.s.SeekFromStart(offset)
-	if err != nil {
-		return nil, err
-	}
-
-	defer func() {
-		_, seekErr := d.s.SeekFromStart(beforeJump)
-		if err == nil {
-			err = seekErr
-		}
-	}()
-
-	return d.doDecodeObject(plumbing.AnyObject)
-}
-
-func (d *Decoder) fillRegularObjectContent(obj plumbing.EncodedObject) (uint32, error) {
-	w, err := obj.Writer()
-	if err != nil {
-		return 0, err
-	}
-
-	_, crc, err := d.s.NextObject(w)
-	return crc, err
-}
-
-func (d *Decoder) fillREFDeltaObjectContent(obj plumbing.EncodedObject, ref plumbing.Hash) (uint32, error) {
-	buf := bufPool.Get().(*bytes.Buffer)
-	buf.Reset()
-	_, crc, err := d.s.NextObject(buf)
-	if err != nil {
-		return 0, err
-	}
-
-	base, ok := d.cacheGet(ref)
-	if !ok {
-		base, err = d.recallByHash(ref)
-		if err != nil {
-			return 0, err
-		}
-	}
-
-	obj.SetType(base.Type())
-	err = ApplyDelta(obj, base, buf.Bytes())
-	d.cachePut(obj)
-	bufPool.Put(buf)
-
-	return crc, err
-}
-
-func (d *Decoder) fillOFSDeltaObjectContent(obj plumbing.EncodedObject, offset int64) (uint32, error) {
-	buf := bytes.NewBuffer(nil)
-	_, crc, err := d.s.NextObject(buf)
-	if err != nil {
-		return 0, err
-	}
-
-	e, ok := d.idx.LookupOffset(uint64(offset))
-	var base plumbing.EncodedObject
-	if ok {
-		base, ok = d.cacheGet(e.Hash)
-	}
-
-	if !ok {
-		base, err = d.recallByOffset(offset)
-		if err != nil {
-			return 0, err
-		}
-
-		d.cachePut(base)
-	}
-
-	obj.SetType(base.Type())
-	err = ApplyDelta(obj, base, buf.Bytes())
-	d.cachePut(obj)
-
-	return crc, err
-}
-
-func (d *Decoder) cacheGet(h plumbing.Hash) (plumbing.EncodedObject, bool) {
-	if d.deltaBaseCache == nil {
-		return nil, false
-	}
-
-	return d.deltaBaseCache.Get(h)
-}
-
-func (d *Decoder) cachePut(obj plumbing.EncodedObject) {
-	if d.deltaBaseCache == nil {
-		return
-	}
-
-	d.deltaBaseCache.Put(obj)
-}
-
-func (d *Decoder) recallByOffset(o int64) (plumbing.EncodedObject, error) {
-	if d.s.IsSeekable {
-		return d.DecodeObjectAt(o)
-	}
-
-	if e, ok := d.idx.LookupOffset(uint64(o)); ok {
-		return d.recallByHashNonSeekable(e.Hash)
-	}
-
-	return nil, plumbing.ErrObjectNotFound
-}
-
-func (d *Decoder) recallByHash(h plumbing.Hash) (plumbing.EncodedObject, error) {
-	if d.s.IsSeekable {
-		if e, ok := d.idx.LookupHash(h); ok {
-			return d.DecodeObjectAt(int64(e.Offset))
-		}
-	}
-
-	return d.recallByHashNonSeekable(h)
-}
-
-// recallByHashNonSeekable if we are in a transaction the objects are read from
-// the transaction, if not are directly read from the ObjectStorer
-func (d *Decoder) recallByHashNonSeekable(h plumbing.Hash) (obj plumbing.EncodedObject, err error) {
-	if d.tx != nil {
-		obj, err = d.tx.EncodedObject(plumbing.AnyObject, h)
-	} else {
-		obj, err = d.o.EncodedObject(plumbing.AnyObject, h)
-	}
-
-	if err != plumbing.ErrObjectNotFound {
-		return obj, err
-	}
-
-	return nil, plumbing.ErrObjectNotFound
-}
-
-// SetIndex sets an index for the packfile. It is recommended to set this.
-// The index might be read from a file or reused from a previous Decoder usage
-// (see Index function).
-func (d *Decoder) SetIndex(idx *Index) {
-	d.hasBuiltIndex = true
-	d.idx = idx
-}
-
-// Index returns the index for the packfile. If index was set with SetIndex,
-// Index will return it. Otherwise, it will return an index that is built while
-// decoding. If neither SetIndex was called with a full index or Decode called
-// for the whole packfile, then the returned index will be incomplete.
-func (d *Decoder) Index() *Index {
-	return d.idx
-}
-
-// Close closes the Scanner. usually this mean that the whole reader is read and
-// discarded
-func (d *Decoder) Close() error {
-	return d.s.Close()
-}
diff --git a/plumbing/format/packfile/decoder_test.go b/plumbing/format/packfile/decoder_test.go
deleted file mode 100644
index b5bc7b7..0000000
--- a/plumbing/format/packfile/decoder_test.go
+++ /dev/null
@@ -1,396 +0,0 @@
-package packfile_test
-
-import (
-	"io"
-
-	"gopkg.in/src-d/go-git.v4/plumbing"
-	"gopkg.in/src-d/go-git.v4/plumbing/cache"
-	"gopkg.in/src-d/go-git.v4/plumbing/format/idxfile"
-	"gopkg.in/src-d/go-git.v4/plumbing/format/packfile"
-	"gopkg.in/src-d/go-git.v4/plumbing/storer"
-	"gopkg.in/src-d/go-git.v4/storage/filesystem"
-	"gopkg.in/src-d/go-git.v4/storage/memory"
-
-	. "gopkg.in/check.v1"
-	"gopkg.in/src-d/go-billy.v4/memfs"
-	"gopkg.in/src-d/go-git-fixtures.v3"
-)
-
-type ReaderSuite struct {
-	fixtures.Suite
-}
-
-var _ = Suite(&ReaderSuite{})
-
-func (s *ReaderSuite) TestNewDecodeNonSeekable(c *C) {
-	scanner := packfile.NewScanner(nil)
-	d, err := packfile.NewDecoder(scanner, nil)
-
-	c.Assert(d, IsNil)
-	c.Assert(err, NotNil)
-}
-
-func (s *ReaderSuite) TestDecode(c *C) {
-	fixtures.Basic().ByTag("packfile").Test(c, func(f *fixtures.Fixture) {
-		scanner := packfile.NewScanner(f.Packfile())
-		storage := memory.NewStorage()
-
-		d, err := packfile.NewDecoder(scanner, storage)
-		c.Assert(err, IsNil)
-		defer d.Close()
-
-		ch, err := d.Decode()
-		c.Assert(err, IsNil)
-		c.Assert(ch, Equals, f.PackfileHash)
-
-		assertObjects(c, storage, expectedHashes)
-	})
-}
-
-func (s *ReaderSuite) TestDecodeByTypeRefDelta(c *C) {
-	f := fixtures.Basic().ByTag("ref-delta").One()
-
-	storage := memory.NewStorage()
-	scanner := packfile.NewScanner(f.Packfile())
-	d, err := packfile.NewDecoderForType(scanner, storage, plumbing.CommitObject,
-		cache.NewObjectLRUDefault())
-	c.Assert(err, IsNil)
-
-	// Index required to decode by ref-delta.
-	d.SetIndex(getIndexFromIdxFile(f.Idx()))
-
-	defer d.Close()
-
-	_, count, err := scanner.Header()
-	c.Assert(err, IsNil)
-
-	var i uint32
-	for i = 0; i < count; i++ {
-		obj, err := d.DecodeObject()
-		c.Assert(err, IsNil)
-
-		if obj != nil {
-			c.Assert(obj.Type(), Equals, plumbing.CommitObject)
-		}
-	}
-}
-
-func (s *ReaderSuite) TestDecodeByTypeRefDeltaError(c *C) {
-	fixtures.Basic().ByTag("ref-delta").Test(c, func(f *fixtures.Fixture) {
-		storage := memory.NewStorage()
-		scanner := packfile.NewScanner(f.Packfile())
-		d, err := packfile.NewDecoderForType(scanner, storage,
-			plumbing.CommitObject, cache.NewObjectLRUDefault())
-		c.Assert(err, IsNil)
-
-		defer d.Close()
-
-		_, count, err := scanner.Header()
-		c.Assert(err, IsNil)
-
-		isError := false
-		var i uint32
-		for i = 0; i < count; i++ {
-			_, err := d.DecodeObject()
-			if err != nil {
-				isError = true
-				break
-			}
-		}
-		c.Assert(isError, Equals, true)
-	})
-
-}
-
-func (s *ReaderSuite) TestDecodeByType(c *C) {
-	ts := []plumbing.ObjectType{
-		plumbing.CommitObject,
-		plumbing.TagObject,
-		plumbing.TreeObject,
-		plumbing.BlobObject,
-	}
-
-	fixtures.Basic().ByTag("packfile").Test(c, func(f *fixtures.Fixture) {
-		for _, t := range ts {
-			storage := memory.NewStorage()
-			scanner := packfile.NewScanner(f.Packfile())
-			d, err := packfile.NewDecoderForType(scanner, storage, t,
-				cache.NewObjectLRUDefault())
-			c.Assert(err, IsNil)
-
-			// when the packfile is ref-delta based, the offsets are required
-			if f.Is("ref-delta") {
-				d.SetIndex(getIndexFromIdxFile(f.Idx()))
-			}
-
-			defer d.Close()
-
-			_, count, err := scanner.Header()
-			c.Assert(err, IsNil)
-
-			var i uint32
-			for i = 0; i < count; i++ {
-				obj, err := d.DecodeObject()
-				c.Assert(err, IsNil)
-
-				if obj != nil {
-					c.Assert(obj.Type(), Equals, t)
-				}
-			}
-		}
-	})
-}
-func (s *ReaderSuite) TestDecodeByTypeConstructor(c *C) {
-	f := fixtures.Basic().ByTag("packfile").One()
-	storage := memory.NewStorage()
-	scanner := packfile.NewScanner(f.Packfile())
-
-	_, err := packfile.NewDecoderForType(scanner, storage,
-		plumbing.OFSDeltaObject, cache.NewObjectLRUDefault())
-	c.Assert(err, Equals, plumbing.ErrInvalidType)
-
-	_, err = packfile.NewDecoderForType(scanner, storage,
-		plumbing.REFDeltaObject, cache.NewObjectLRUDefault())
-
-	c.Assert(err, Equals, plumbing.ErrInvalidType)
-
-	_, err = packfile.NewDecoderForType(scanner, storage, plumbing.InvalidObject,
-		cache.NewObjectLRUDefault())
-	c.Assert(err, Equals, plumbing.ErrInvalidType)
-}
-
-func (s *ReaderSuite) TestDecodeMultipleTimes(c *C) {
-	f := fixtures.Basic().ByTag("packfile").One()
-	scanner := packfile.NewScanner(f.Packfile())
-	storage := memory.NewStorage()
-
-	d, err := packfile.NewDecoder(scanner, storage)
-	c.Assert(err, IsNil)
-	defer d.Close()
-
-	ch, err := d.Decode()
-	c.Assert(err, IsNil)
-	c.Assert(ch, Equals, f.PackfileHash)
-
-	ch, err = d.Decode()
-	c.Assert(err, Equals, packfile.ErrAlreadyDecoded)
-	c.Assert(ch, Equals, plumbing.ZeroHash)
-}
-
-func (s *ReaderSuite) TestDecodeInMemory(c *C) {
-	fixtures.Basic().ByTag("packfile").Test(c, func(f *fixtures.Fixture) {
-		scanner := packfile.NewScanner(f.Packfile())
-		d, err := packfile.NewDecoder(scanner, nil)
-		c.Assert(err, IsNil)
-
-		ch, err := d.Decode()
-		c.Assert(err, IsNil)
-		c.Assert(ch, Equals, f.PackfileHash)
-	})
-}
-
-type nonSeekableReader struct {
-	r io.Reader
-}
-
-func (nsr nonSeekableReader) Read(b []byte) (int, error) {
-	return nsr.r.Read(b)
-}
-
-func (s *ReaderSuite) TestDecodeNoSeekableWithTxStorer(c *C) {
-	fixtures.Basic().ByTag("packfile").Test(c, func(f *fixtures.Fixture) {
-		reader := nonSeekableReader{
-			r: f.Packfile(),
-		}
-
-		scanner := packfile.NewScanner(reader)
-
-		var storage storer.EncodedObjectStorer = memory.NewStorage()
-		_, isTxStorer := storage.(storer.Transactioner)
-		c.Assert(isTxStorer, Equals, true)
-
-		d, err := packfile.NewDecoder(scanner, storage)
-		c.Assert(err, IsNil)
-		defer d.Close()
-
-		ch, err := d.Decode()
-		c.Assert(err, IsNil)
-		c.Assert(ch, Equals, f.PackfileHash)
-
-		assertObjects(c, storage, expectedHashes)
-	})
-}
-
-func (s *ReaderSuite) TestDecodeNoSeekableWithoutTxStorer(c *C) {
-	fixtures.Basic().ByTag("packfile").Test(c, func(f *fixtures.Fixture) {
-		reader := nonSeekableReader{
-			r: f.Packfile(),
-		}
-
-		scanner := packfile.NewScanner(reader)
-
-		var storage storer.EncodedObjectStorer
-		storage, _ = filesystem.NewStorage(memfs.New())
-		_, isTxStorer := storage.(storer.Transactioner)
-		c.Assert(isTxStorer, Equals, false)
-
-		d, err := packfile.NewDecoder(scanner, storage)
-		c.Assert(err, IsNil)
-		defer d.Close()
-
-		ch, err := d.Decode()
-		c.Assert(err, IsNil)
-		c.Assert(ch, Equals, f.PackfileHash)
-
-		assertObjects(c, storage, expectedHashes)
-	})
-}
-
-var expectedHashes = []string{
-	"918c48b83bd081e863dbe1b80f8998f058cd8294",
-	"af2d6a6954d532f8ffb47615169c8fdf9d383a1a",
-	"1669dce138d9b841a518c64b10914d88f5e488ea",
-	"a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69",
-	"b8e471f58bcbca63b07bda20e428190409c2db47",
-	"35e85108805c84807bc66a02d91535e1e24b38b9",
-	"b029517f6300c2da0f4b651b8642506cd6aaf45d",
-	"32858aad3c383ed1ff0a0f9bdf231d54a00c9e88",
-	"d3ff53e0564a9f87d8e84b6e28e5060e517008aa",
-	"c192bd6a24ea1ab01d78686e417c8bdc7c3d197f",
-	"d5c0f4ab811897cadf03aec358ae60d21f91c50d",
-	"49c6bb89b17060d7b4deacb7b338fcc6ea2352a9",
-	"cf4aa3b38974fb7d81f367c0830f7d78d65ab86b",
-	"9dea2395f5403188298c1dabe8bdafe562c491e3",
-	"586af567d0bb5e771e49bdd9434f5e0fb76d25fa",
-	"9a48f23120e880dfbe41f7c9b7b708e9ee62a492",
-	"5a877e6a906a2743ad6e45d99c1793642aaf8eda",
-	"c8f1d8c61f9da76f4cb49fd86322b6e685dba956",
-	"a8d315b2b1c615d43042c3a62402b8a54288cf5c",
-	"a39771a7651f97faf5c72e08224d857fc35133db",
-	"880cd14280f4b9b6ed3986d6671f907d7cc2a198",
-	"fb72698cab7617ac416264415f13224dfd7a165e",
-	"4d081c50e250fa32ea8b1313cf8bb7c2ad7627fd",
-	"eba74343e2f15d62adedfd8c883ee0262b5c8021",
-	"c2d30fa8ef288618f65f6eed6e168e0d514886f4",
-	"8dcef98b1d52143e1e2dbc458ffe38f925786bf2",
-	"aa9b383c260e1d05fbbf6b30a02914555e20c725",
-	"6ecf0ef2c2dffb796033e5a02219af86ec6584e5",
-	"dbd3641b371024f44d0e469a9c8f5457b0660de1",
-	"e8d3ffab552895c19b9fcf7aa264d277cde33881",
-	"7e59600739c96546163833214c36459e324bad0a",
-}
-
-func (s *ReaderSuite) TestDecodeCRCs(c *C) {
-	f := fixtures.Basic().ByTag("ofs-delta").One()
-
-	scanner := packfile.NewScanner(f.Packfile())
-	storage := memory.NewStorage()
-
-	d, err := packfile.NewDecoder(scanner, storage)
-	c.Assert(err, IsNil)
-	_, err = d.Decode()
-	c.Assert(err, IsNil)
-
-	var sum uint64
-	idx := d.Index().ToIdxFile()
-	for _, e := range idx.Entries {
-		sum += uint64(e.CRC32)
-	}
-
-	c.Assert(int(sum), Equals, 78022211966)
-}
-
-func (s *ReaderSuite) TestDecodeObjectAt(c *C) {
-	f := fixtures.Basic().One()
-	scanner := packfile.NewScanner(f.Packfile())
-	d, err := packfile.NewDecoder(scanner, nil)
-	c.Assert(err, IsNil)
-
-	// when the packfile is ref-delta based, the offsets are required
-	if f.Is("ref-delta") {
-		d.SetIndex(getIndexFromIdxFile(f.Idx()))
-	}
-
-	// the objects at reference 186, is a delta, so should be recall,
-	// without being read before.
-	obj, err := d.DecodeObjectAt(186)
-	c.Assert(err, IsNil)
-	c.Assert(obj.Hash().String(), Equals, "6ecf0ef2c2dffb796033e5a02219af86ec6584e5")
-}
-
-func (s *ReaderSuite) TestDecodeObjectAtForType(c *C) {
-	f := fixtures.Basic().One()
-	scanner := packfile.NewScanner(f.Packfile())
-	d, err := packfile.NewDecoderForType(scanner, nil, plumbing.TreeObject,
-		cache.NewObjectLRUDefault())
-	c.Assert(err, IsNil)
-
-	// when the packfile is ref-delta based, the offsets are required
-	if f.Is("ref-delta") {
-		d.SetIndex(getIndexFromIdxFile(f.Idx()))
-	}
-
-	// the objects at reference 186, is a delta, so should be recall,
-	// without being read before.
-	obj, err := d.DecodeObjectAt(186)
-	c.Assert(err, IsNil)
-	c.Assert(obj.Type(), Equals, plumbing.CommitObject)
-	c.Assert(obj.Hash().String(), Equals, "6ecf0ef2c2dffb796033e5a02219af86ec6584e5")
-}
-
-func (s *ReaderSuite) TestIndex(c *C) {
-	f := fixtures.Basic().One()
-	scanner := packfile.NewScanner(f.Packfile())
-	d, err := packfile.NewDecoder(scanner, nil)
-	c.Assert(err, IsNil)
-
-	c.Assert(d.Index().ToIdxFile().Entries, HasLen, 0)
-
-	_, err = d.Decode()
-	c.Assert(err, IsNil)
-
-	c.Assert(len(d.Index().ToIdxFile().Entries), Equals, 31)
-}
-
-func (s *ReaderSuite) TestSetIndex(c *C) {
-	f := fixtures.Basic().One()
-	scanner := packfile.NewScanner(f.Packfile())
-	d, err := packfile.NewDecoder(scanner, nil)
-	c.Assert(err, IsNil)
-
-	idx := packfile.NewIndex(1)
-	h := plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")
-	idx.Add(h, uint64(42), 0)
-	d.SetIndex(idx)
-
-	idxf := d.Index().ToIdxFile()
-	c.Assert(idxf.Entries, HasLen, 1)
-	c.Assert(idxf.Entries[0].Offset, Equals, uint64(42))
-}
-
-func assertObjects(c *C, s storer.EncodedObjectStorer, expects []string) {
-
-	i, err := s.IterEncodedObjects(plumbing.AnyObject)
-	c.Assert(err, IsNil)
-
-	var count int
-	err = i.ForEach(func(plumbing.EncodedObject) error { count++; return nil })
-	c.Assert(err, IsNil)
-	c.Assert(count, Equals, len(expects))
-
-	for _, exp := range expects {
-		obt, err := s.EncodedObject(plumbing.AnyObject, plumbing.NewHash(exp))
-		c.Assert(err, IsNil)
-		c.Assert(obt.Hash().String(), Equals, exp)
-	}
-}
-
-func getIndexFromIdxFile(r io.Reader) *packfile.Index {
-	idxf := idxfile.NewIdxfile()
-	d := idxfile.NewDecoder(r)
-	if err := d.Decode(idxf); err != nil {
-		panic(err)
-	}
-
-	return packfile.NewIndexFromIdxFile(idxf)
-}
diff --git a/plumbing/format/packfile/encoder_advanced_test.go b/plumbing/format/packfile/encoder_advanced_test.go
index 8cc7180..fc1419e 100644
--- a/plumbing/format/packfile/encoder_advanced_test.go
+++ b/plumbing/format/packfile/encoder_advanced_test.go
@@ -2,14 +2,16 @@
 
 import (
 	"bytes"
+	"io"
 	"math/rand"
 	"testing"
 
+	"gopkg.in/src-d/go-billy.v4/memfs"
 	"gopkg.in/src-d/go-git.v4/plumbing"
+	"gopkg.in/src-d/go-git.v4/plumbing/format/idxfile"
 	. "gopkg.in/src-d/go-git.v4/plumbing/format/packfile"
 	"gopkg.in/src-d/go-git.v4/plumbing/storer"
 	"gopkg.in/src-d/go-git.v4/storage/filesystem"
-	"gopkg.in/src-d/go-git.v4/storage/memory"
 
 	. "gopkg.in/check.v1"
 	"gopkg.in/src-d/go-git-fixtures.v3"
@@ -34,7 +36,6 @@
 		c.Assert(err, IsNil)
 		s.testEncodeDecode(c, storage, 10)
 	})
-
 }
 
 func (s *EncoderAdvancedSuite) TestEncodeDecodeNoDeltaCompression(c *C) {
@@ -52,8 +53,11 @@
 	})
 }
 
-func (s *EncoderAdvancedSuite) testEncodeDecode(c *C, storage storer.Storer, packWindow uint) {
-
+func (s *EncoderAdvancedSuite) testEncodeDecode(
+	c *C,
+	storage storer.Storer,
+	packWindow uint,
+) {
 	objIter, err := storage.IterEncodedObjects(plumbing.AnyObject)
 	c.Assert(err, IsNil)
 
@@ -80,16 +84,35 @@
 	encodeHash, err := enc.Encode(hashes, packWindow)
 	c.Assert(err, IsNil)
 
-	scanner := NewScanner(buf)
-	storage = memory.NewStorage()
-	d, err := NewDecoder(scanner, storage)
-	c.Assert(err, IsNil)
-	decodeHash, err := d.Decode()
+	fs := memfs.New()
+	f, err := fs.Create("packfile")
 	c.Assert(err, IsNil)
 
+	_, err = f.Write(buf.Bytes())
+	c.Assert(err, IsNil)
+
+	_, err = f.Seek(0, io.SeekStart)
+	c.Assert(err, IsNil)
+
+	w := new(idxfile.Writer)
+	parser, err := NewParser(NewScanner(f), w)
+	c.Assert(err, IsNil)
+
+	_, err = parser.Parse()
+	c.Assert(err, IsNil)
+	index, err := w.Index()
+	c.Assert(err, IsNil)
+
+	_, err = f.Seek(0, io.SeekStart)
+	c.Assert(err, IsNil)
+
+	p := NewPackfile(index, fs, f)
+
+	decodeHash, err := p.ID()
+	c.Assert(err, IsNil)
 	c.Assert(encodeHash, Equals, decodeHash)
 
-	objIter, err = storage.IterEncodedObjects(plumbing.AnyObject)
+	objIter, err = p.GetAll()
 	c.Assert(err, IsNil)
 	obtainedObjects := map[plumbing.Hash]bool{}
 	err = objIter.ForEach(func(o plumbing.EncodedObject) error {
diff --git a/plumbing/format/packfile/encoder_test.go b/plumbing/format/packfile/encoder_test.go
index 84d03fb..80b916d 100644
--- a/plumbing/format/packfile/encoder_test.go
+++ b/plumbing/format/packfile/encoder_test.go
@@ -2,8 +2,12 @@
 
 import (
 	"bytes"
+	"io"
+	stdioutil "io/ioutil"
 
+	"gopkg.in/src-d/go-billy.v4/memfs"
 	"gopkg.in/src-d/go-git.v4/plumbing"
+	"gopkg.in/src-d/go-git.v4/plumbing/format/idxfile"
 	"gopkg.in/src-d/go-git.v4/storage/memory"
 
 	. "gopkg.in/check.v1"
@@ -130,24 +134,20 @@
 	})
 	c.Assert(err, IsNil)
 
-	scanner := NewScanner(s.buf)
-
-	storage := memory.NewStorage()
-	d, err := NewDecoder(scanner, storage)
-	c.Assert(err, IsNil)
-
-	decHash, err := d.Decode()
+	p, cleanup := packfileFromReader(c, s.buf)
+	defer cleanup()
+	decHash, err := p.ID()
 	c.Assert(err, IsNil)
 
 	c.Assert(encHash, Equals, decHash)
 
-	decSrc, err := storage.EncodedObject(srcObject.Type(), srcObject.Hash())
+	decSrc, err := p.Get(srcObject.Hash())
 	c.Assert(err, IsNil)
-	c.Assert(decSrc, DeepEquals, srcObject)
+	objectsEqual(c, decSrc, srcObject)
 
-	decTarget, err := storage.EncodedObject(targetObject.Type(), targetObject.Hash())
+	decTarget, err := p.Get(targetObject.Hash())
 	c.Assert(err, IsNil)
-	c.Assert(decTarget, DeepEquals, targetObject)
+	objectsEqual(c, decTarget, targetObject)
 }
 
 func (s *EncoderSuite) deltaOverDeltaTest(c *C) {
@@ -173,27 +173,24 @@
 	})
 	c.Assert(err, IsNil)
 
-	scanner := NewScanner(s.buf)
-	storage := memory.NewStorage()
-	d, err := NewDecoder(scanner, storage)
-	c.Assert(err, IsNil)
-
-	decHash, err := d.Decode()
+	p, cleanup := packfileFromReader(c, s.buf)
+	defer cleanup()
+	decHash, err := p.ID()
 	c.Assert(err, IsNil)
 
 	c.Assert(encHash, Equals, decHash)
 
-	decSrc, err := storage.EncodedObject(srcObject.Type(), srcObject.Hash())
+	decSrc, err := p.Get(srcObject.Hash())
 	c.Assert(err, IsNil)
-	c.Assert(decSrc, DeepEquals, srcObject)
+	objectsEqual(c, decSrc, srcObject)
 
-	decTarget, err := storage.EncodedObject(targetObject.Type(), targetObject.Hash())
+	decTarget, err := p.Get(targetObject.Hash())
 	c.Assert(err, IsNil)
-	c.Assert(decTarget, DeepEquals, targetObject)
+	objectsEqual(c, decTarget, targetObject)
 
-	decOtherTarget, err := storage.EncodedObject(otherTargetObject.Type(), otherTargetObject.Hash())
+	decOtherTarget, err := p.Get(otherTargetObject.Hash())
 	c.Assert(err, IsNil)
-	c.Assert(decOtherTarget, DeepEquals, otherTargetObject)
+	objectsEqual(c, decOtherTarget, otherTargetObject)
 }
 
 func (s *EncoderSuite) deltaOverDeltaCyclicTest(c *C) {
@@ -248,29 +245,74 @@
 	})
 	c.Assert(err, IsNil)
 
-	scanner := NewScanner(s.buf)
-	storage := memory.NewStorage()
-	d, err := NewDecoder(scanner, storage)
-	c.Assert(err, IsNil)
-
-	decHash, err := d.Decode()
+	p, cleanup := packfileFromReader(c, s.buf)
+	defer cleanup()
+	decHash, err := p.ID()
 	c.Assert(err, IsNil)
 
 	c.Assert(encHash, Equals, decHash)
 
-	decSrc, err := storage.EncodedObject(o1.Type(), o1.Hash())
+	decSrc, err := p.Get(o1.Hash())
 	c.Assert(err, IsNil)
-	c.Assert(decSrc, DeepEquals, o1)
+	objectsEqual(c, decSrc, o1)
 
-	decTarget, err := storage.EncodedObject(o2.Type(), o2.Hash())
+	decTarget, err := p.Get(o2.Hash())
 	c.Assert(err, IsNil)
-	c.Assert(decTarget, DeepEquals, o2)
+	objectsEqual(c, decTarget, o2)
 
-	decOtherTarget, err := storage.EncodedObject(o3.Type(), o3.Hash())
+	decOtherTarget, err := p.Get(o3.Hash())
 	c.Assert(err, IsNil)
-	c.Assert(decOtherTarget, DeepEquals, o3)
+	objectsEqual(c, decOtherTarget, o3)
 
-	decAnotherTarget, err := storage.EncodedObject(o4.Type(), o4.Hash())
+	decAnotherTarget, err := p.Get(o4.Hash())
 	c.Assert(err, IsNil)
-	c.Assert(decAnotherTarget, DeepEquals, o4)
+	objectsEqual(c, decAnotherTarget, o4)
+}
+
+func objectsEqual(c *C, o1, o2 plumbing.EncodedObject) {
+	c.Assert(o1.Type(), Equals, o2.Type())
+	c.Assert(o1.Hash(), Equals, o2.Hash())
+	c.Assert(o1.Size(), Equals, o2.Size())
+
+	r1, err := o1.Reader()
+	c.Assert(err, IsNil)
+
+	b1, err := stdioutil.ReadAll(r1)
+	c.Assert(err, IsNil)
+
+	r2, err := o2.Reader()
+	c.Assert(err, IsNil)
+
+	b2, err := stdioutil.ReadAll(r2)
+	c.Assert(err, IsNil)
+
+	c.Assert(bytes.Compare(b1, b2), Equals, 0)
+}
+
+func packfileFromReader(c *C, buf *bytes.Buffer) (*Packfile, func()) {
+	fs := memfs.New()
+	file, err := fs.Create("packfile")
+	c.Assert(err, IsNil)
+
+	_, err = file.Write(buf.Bytes())
+	c.Assert(err, IsNil)
+
+	_, err = file.Seek(0, io.SeekStart)
+	c.Assert(err, IsNil)
+
+	scanner := NewScanner(file)
+
+	w := new(idxfile.Writer)
+	p, err := NewParser(scanner, w)
+	c.Assert(err, IsNil)
+
+	_, err = p.Parse()
+	c.Assert(err, IsNil)
+
+	index, err := w.Index()
+	c.Assert(err, IsNil)
+
+	return NewPackfile(index, fs, file), func() {
+		c.Assert(file.Close(), IsNil)
+	}
 }
diff --git a/plumbing/format/packfile/fsobject.go b/plumbing/format/packfile/fsobject.go
new file mode 100644
index 0000000..330cb73
--- /dev/null
+++ b/plumbing/format/packfile/fsobject.go
@@ -0,0 +1,116 @@
+package packfile
+
+import (
+	"io"
+
+	billy "gopkg.in/src-d/go-billy.v4"
+	"gopkg.in/src-d/go-git.v4/plumbing"
+	"gopkg.in/src-d/go-git.v4/plumbing/cache"
+	"gopkg.in/src-d/go-git.v4/plumbing/format/idxfile"
+)
+
+// FSObject is an object from the packfile on the filesystem.
+type FSObject struct {
+	hash   plumbing.Hash
+	h      *ObjectHeader
+	offset int64
+	size   int64
+	typ    plumbing.ObjectType
+	index  idxfile.Index
+	fs     billy.Filesystem
+	path   string
+	cache  cache.Object
+}
+
+// NewFSObject creates a new filesystem object.
+func NewFSObject(
+	hash plumbing.Hash,
+	finalType plumbing.ObjectType,
+	offset int64,
+	contentSize int64,
+	index idxfile.Index,
+	fs billy.Filesystem,
+	path string,
+	cache cache.Object,
+) *FSObject {
+	return &FSObject{
+		hash:   hash,
+		offset: offset,
+		size:   contentSize,
+		typ:    finalType,
+		index:  index,
+		fs:     fs,
+		path:   path,
+		cache:  cache,
+	}
+}
+
+// Reader implements the plumbing.EncodedObject interface.
+func (o *FSObject) Reader() (io.ReadCloser, error) {
+	obj, ok := o.cache.Get(o.hash)
+	if ok {
+		reader, err := obj.Reader()
+		if err != nil {
+			return nil, err
+		}
+
+		return reader, nil
+	}
+
+	f, err := o.fs.Open(o.path)
+	if err != nil {
+		return nil, err
+	}
+
+	p := NewPackfileWithCache(o.index, nil, f, o.cache)
+	r, err := p.getObjectContent(o.offset)
+	if err != nil {
+		_ = f.Close()
+		return nil, err
+	}
+
+	if err := f.Close(); err != nil {
+		return nil, err
+	}
+
+	return r, nil
+}
+
+// SetSize implements the plumbing.EncodedObject interface. This method
+// is a noop.
+func (o *FSObject) SetSize(int64) {}
+
+// SetType implements the plumbing.EncodedObject interface. This method is
+// a noop.
+func (o *FSObject) SetType(plumbing.ObjectType) {}
+
+// Hash implements the plumbing.EncodedObject interface.
+func (o *FSObject) Hash() plumbing.Hash { return o.hash }
+
+// Size implements the plumbing.EncodedObject interface.
+func (o *FSObject) Size() int64 { return o.size }
+
+// Type implements the plumbing.EncodedObject interface.
+func (o *FSObject) Type() plumbing.ObjectType {
+	return o.typ
+}
+
+// Writer implements the plumbing.EncodedObject interface. This method always
+// returns a nil writer.
+func (o *FSObject) Writer() (io.WriteCloser, error) {
+	return nil, nil
+}
+
+type objectReader struct {
+	io.ReadCloser
+	f billy.File
+}
+
+func (r *objectReader) Close() error {
+	if err := r.ReadCloser.Close(); err != nil {
+		_ = r.f.Close()
+		return err
+	}
+
+	return r.f.Close()
+}
diff --git a/plumbing/format/packfile/index.go b/plumbing/format/packfile/index.go
deleted file mode 100644
index 021b2d1..0000000
--- a/plumbing/format/packfile/index.go
+++ /dev/null
@@ -1,125 +0,0 @@
-package packfile
-
-import (
-	"sort"
-
-	"gopkg.in/src-d/go-git.v4/plumbing"
-	"gopkg.in/src-d/go-git.v4/plumbing/format/idxfile"
-)
-
-// Index is an in-memory representation of a packfile index.
-// This uses idxfile.Idxfile under the hood to obtain indexes from .idx files
-// or to store them.
-type Index struct {
-	byHash   map[plumbing.Hash]*idxfile.Entry
-	byOffset []*idxfile.Entry // sorted by their offset
-}
-
-// NewIndex creates a new empty index with the given size. Size is a hint and
-// can be 0. It is recommended to set it to the number of objects to be indexed
-// if it is known beforehand (e.g. reading from a packfile).
-func NewIndex(size int) *Index {
-	return &Index{
-		byHash:   make(map[plumbing.Hash]*idxfile.Entry, size),
-		byOffset: make([]*idxfile.Entry, 0, size),
-	}
-}
-
-// NewIndexFromIdxFile creates a new Index from an idxfile.IdxFile.
-func NewIndexFromIdxFile(idxf *idxfile.Idxfile) *Index {
-	idx := &Index{
-		byHash:   make(map[plumbing.Hash]*idxfile.Entry, idxf.ObjectCount),
-		byOffset: make([]*idxfile.Entry, 0, idxf.ObjectCount),
-	}
-	sorted := true
-	for i, e := range idxf.Entries {
-		idx.addUnsorted(e)
-		if i > 0 && idx.byOffset[i-1].Offset >= e.Offset {
-			sorted = false
-		}
-	}
-
-	// If the idxfile was loaded from a regular packfile index
-	// then it will already be in offset order, in which case we
-	// can avoid doing a relatively expensive idempotent sort.
-	if !sorted {
-		sort.Sort(orderByOffset(idx.byOffset))
-	}
-
-	return idx
-}
-
-// orderByOffset is a sort.Interface adapter that arranges
-// a slice of entries by their offset.
-type orderByOffset []*idxfile.Entry
-
-func (o orderByOffset) Len() int           { return len(o) }
-func (o orderByOffset) Less(i, j int) bool { return o[i].Offset < o[j].Offset }
-func (o orderByOffset) Swap(i, j int)      { o[i], o[j] = o[j], o[i] }
-
-// Add adds a new Entry with the given values to the index.
-func (idx *Index) Add(h plumbing.Hash, offset uint64, crc32 uint32) {
-	e := &idxfile.Entry{
-		Hash:   h,
-		Offset: offset,
-		CRC32:  crc32,
-	}
-	idx.byHash[e.Hash] = e
-
-	// Find the right position in byOffset.
-	// Look for the first position whose offset is *greater* than e.Offset.
-	i := sort.Search(len(idx.byOffset), func(i int) bool {
-		return idx.byOffset[i].Offset > offset
-	})
-	if i == len(idx.byOffset) {
-		// Simple case: add it to the end.
-		idx.byOffset = append(idx.byOffset, e)
-		return
-	}
-	// Harder case: shift existing entries down by one to make room.
-	// Append a nil entry first so we can use existing capacity in case
-	// the index was carefully preallocated.
-	idx.byOffset = append(idx.byOffset, nil)
-	copy(idx.byOffset[i+1:], idx.byOffset[i:len(idx.byOffset)-1])
-	idx.byOffset[i] = e
-}
-
-func (idx *Index) addUnsorted(e *idxfile.Entry) {
-	idx.byHash[e.Hash] = e
-	idx.byOffset = append(idx.byOffset, e)
-}
-
-// LookupHash looks an entry up by its hash. An idxfile.Entry is returned and
-// a bool, which is true if it was found or false if it wasn't.
-func (idx *Index) LookupHash(h plumbing.Hash) (*idxfile.Entry, bool) {
-	e, ok := idx.byHash[h]
-	return e, ok
-}
-
-// LookupHash looks an entry up by its offset in the packfile. An idxfile.Entry
-// is returned and a bool, which is true if it was found or false if it wasn't.
-func (idx *Index) LookupOffset(offset uint64) (*idxfile.Entry, bool) {
-	i := sort.Search(len(idx.byOffset), func(i int) bool {
-		return idx.byOffset[i].Offset >= offset
-	})
-	if i >= len(idx.byOffset) || idx.byOffset[i].Offset != offset {
-		return nil, false // not present
-	}
-	return idx.byOffset[i], true
-}
-
-// Size returns the number of entries in the index.
-func (idx *Index) Size() int {
-	return len(idx.byHash)
-}
-
-// ToIdxFile converts the index to an idxfile.Idxfile, which can then be used
-// to serialize.
-func (idx *Index) ToIdxFile() *idxfile.Idxfile {
-	idxf := idxfile.NewIdxfile()
-	for _, e := range idx.byHash {
-		idxf.Entries = append(idxf.Entries, e)
-	}
-
-	return idxf
-}
diff --git a/plumbing/format/packfile/index_test.go b/plumbing/format/packfile/index_test.go
deleted file mode 100644
index 8de886d..0000000
--- a/plumbing/format/packfile/index_test.go
+++ /dev/null
@@ -1,133 +0,0 @@
-package packfile
-
-import (
-	"strconv"
-	"strings"
-	"testing"
-
-	"gopkg.in/src-d/go-git.v4/plumbing"
-
-	. "gopkg.in/check.v1"
-)
-
-type IndexSuite struct{}
-
-var _ = Suite(&IndexSuite{})
-
-func (s *IndexSuite) TestLookupOffset(c *C) {
-	idx := NewIndex(0)
-
-	for o1 := 0; o1 < 10000; o1 += 100 {
-		for o2 := 0; o2 < 10000; o2 += 100 {
-			if o2 >= o1 {
-				e, ok := idx.LookupOffset(uint64(o2))
-				c.Assert(ok, Equals, false)
-				c.Assert(e, IsNil)
-			} else {
-				e, ok := idx.LookupOffset(uint64(o2))
-				c.Assert(ok, Equals, true)
-				c.Assert(e, NotNil)
-				c.Assert(e.Hash, Equals, toHash(o2))
-				c.Assert(e.Offset, Equals, uint64(o2))
-			}
-		}
-
-		h1 := toHash(o1)
-		idx.Add(h1, uint64(o1), 0)
-
-		for o2 := 0; o2 < 10000; o2 += 100 {
-			if o2 > o1 {
-				e, ok := idx.LookupOffset(uint64(o2))
-				c.Assert(ok, Equals, false)
-				c.Assert(e, IsNil)
-			} else {
-				e, ok := idx.LookupOffset(uint64(o2))
-				c.Assert(ok, Equals, true)
-				c.Assert(e, NotNil)
-				c.Assert(e.Hash, Equals, toHash(o2))
-				c.Assert(e.Offset, Equals, uint64(o2))
-			}
-		}
-	}
-}
-
-func (s *IndexSuite) TestLookupHash(c *C) {
-	idx := NewIndex(0)
-
-	for o1 := 0; o1 < 10000; o1 += 100 {
-		for o2 := 0; o2 < 10000; o2 += 100 {
-			if o2 >= o1 {
-				e, ok := idx.LookupHash(toHash(o2))
-				c.Assert(ok, Equals, false)
-				c.Assert(e, IsNil)
-			} else {
-				e, ok := idx.LookupHash(toHash(o2))
-				c.Assert(ok, Equals, true)
-				c.Assert(e, NotNil)
-				c.Assert(e.Hash, Equals, toHash(o2))
-				c.Assert(e.Offset, Equals, uint64(o2))
-			}
-		}
-
-		h1 := toHash(o1)
-		idx.Add(h1, uint64(o1), 0)
-
-		for o2 := 0; o2 < 10000; o2 += 100 {
-			if o2 > o1 {
-				e, ok := idx.LookupHash(toHash(o2))
-				c.Assert(ok, Equals, false)
-				c.Assert(e, IsNil)
-			} else {
-				e, ok := idx.LookupHash(toHash(o2))
-				c.Assert(ok, Equals, true)
-				c.Assert(e, NotNil)
-				c.Assert(e.Hash, Equals, toHash(o2))
-				c.Assert(e.Offset, Equals, uint64(o2))
-			}
-		}
-	}
-}
-
-func (s *IndexSuite) TestSize(c *C) {
-	idx := NewIndex(0)
-
-	for o1 := 0; o1 < 1000; o1++ {
-		c.Assert(idx.Size(), Equals, o1)
-		h1 := toHash(o1)
-		idx.Add(h1, uint64(o1), 0)
-	}
-}
-
-func (s *IndexSuite) TestIdxFileEmpty(c *C) {
-	idx := NewIndex(0)
-	idxf := idx.ToIdxFile()
-	idx2 := NewIndexFromIdxFile(idxf)
-	c.Assert(idx, DeepEquals, idx2)
-}
-
-func (s *IndexSuite) TestIdxFile(c *C) {
-	idx := NewIndex(0)
-	for o1 := 0; o1 < 1000; o1++ {
-		h1 := toHash(o1)
-		idx.Add(h1, uint64(o1), 0)
-	}
-
-	idx2 := NewIndexFromIdxFile(idx.ToIdxFile())
-	c.Assert(idx, DeepEquals, idx2)
-}
-
-func toHash(i int) plumbing.Hash {
-	is := strconv.Itoa(i)
-	padding := strings.Repeat("a", 40-len(is))
-	return plumbing.NewHash(padding + is)
-}
-
-func BenchmarkIndexConstruction(b *testing.B) {
-	b.ReportAllocs()
-
-	idx := NewIndex(0)
-	for o := 0; o < 1e6*b.N; o += 100 {
-		h1 := toHash(o)
-		idx.Add(h1, uint64(o), 0)
-	}
-}
diff --git a/plumbing/format/packfile/packfile.go b/plumbing/format/packfile/packfile.go
new file mode 100644
index 0000000..852a834
--- /dev/null
+++ b/plumbing/format/packfile/packfile.go
@@ -0,0 +1,520 @@
+package packfile
+
+import (
+	"bytes"
+	"io"
+	"os"
+
+	billy "gopkg.in/src-d/go-billy.v4"
+	"gopkg.in/src-d/go-git.v4/plumbing"
+	"gopkg.in/src-d/go-git.v4/plumbing/cache"
+	"gopkg.in/src-d/go-git.v4/plumbing/format/idxfile"
+	"gopkg.in/src-d/go-git.v4/plumbing/storer"
+)
+
+var (
+	// ErrInvalidObject is returned by Decode when an invalid object is
+	// found in the packfile.
+	ErrInvalidObject = NewError("invalid git object")
+	// ErrZLib is returned by Decode when there was an error unzipping
+	// the packfile contents.
+	ErrZLib = NewError("zlib reading error")
+)
+
+// Packfile allows retrieving information from inside a packfile.
+type Packfile struct {
+	idxfile.Index
+	fs             billy.Filesystem
+	file           billy.File
+	s              *Scanner
+	deltaBaseCache cache.Object
+	offsetToType   map[int64]plumbing.ObjectType
+}
+
+// NewPackfileWithCache creates a new Packfile with the given object cache.
+// If the filesystem is provided, the packfile will return FSObjects, otherwise
+// it will return MemoryObjects.
+func NewPackfileWithCache(
+	index idxfile.Index,
+	fs billy.Filesystem,
+	file billy.File,
+	cache cache.Object,
+) *Packfile {
+	s := NewScanner(file)
+	return &Packfile{
+		index,
+		fs,
+		file,
+		s,
+		cache,
+		make(map[int64]plumbing.ObjectType),
+	}
+}
+
+// NewPackfile returns a packfile representation for the given packfile file
+// and packfile idx.
+// If the filesystem is provided, the packfile will return FSObjects, otherwise
+// it will return MemoryObjects.
+func NewPackfile(index idxfile.Index, fs billy.Filesystem, file billy.File) *Packfile {
+	return NewPackfileWithCache(index, fs, file, cache.NewObjectLRUDefault())
+}
+
+// Get retrieves the encoded object in the packfile with the given hash.
+func (p *Packfile) Get(h plumbing.Hash) (plumbing.EncodedObject, error) {
+	offset, err := p.FindOffset(h)
+	if err != nil {
+		return nil, err
+	}
+
+	return p.GetByOffset(offset)
+}
+
+// GetByOffset retrieves the encoded object from the packfile with the given
+// offset.
+func (p *Packfile) GetByOffset(o int64) (plumbing.EncodedObject, error) {
+	hash, err := p.FindHash(o)
+	if err == nil {
+		if obj, ok := p.deltaBaseCache.Get(hash); ok {
+			return obj, nil
+		}
+	}
+
+	if _, err := p.s.SeekFromStart(o); err != nil {
+		if err == io.EOF || isInvalid(err) {
+			return nil, plumbing.ErrObjectNotFound
+		}
+
+		return nil, err
+	}
+
+	return p.nextObject()
+}
+
+func (p *Packfile) nextObjectHeader() (*ObjectHeader, error) {
+	h, err := p.s.NextObjectHeader()
+	p.s.pendingObject = nil
+	return h, err
+}
+
+func (p *Packfile) getObjectData(
+	h *ObjectHeader,
+) (typ plumbing.ObjectType, size int64, err error) {
+	switch h.Type {
+	case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject:
+		typ = h.Type
+		size = h.Length
+	case plumbing.REFDeltaObject, plumbing.OFSDeltaObject:
+		buf := bufPool.Get().(*bytes.Buffer)
+		buf.Reset()
+		defer bufPool.Put(buf)
+
+		_, _, err = p.s.NextObject(buf)
+		if err != nil {
+			return
+		}
+
+		delta := buf.Bytes()
+		_, delta = decodeLEB128(delta) // skip src size
+		sz, _ := decodeLEB128(delta)
+		size = int64(sz)
+
+		var offset int64
+		if h.Type == plumbing.REFDeltaObject {
+			offset, err = p.FindOffset(h.Reference)
+			if err != nil {
+				return
+			}
+		} else {
+			offset = h.OffsetReference
+		}
+
+		if baseType, ok := p.offsetToType[offset]; ok {
+			typ = baseType
+		} else {
+			if _, err = p.s.SeekFromStart(offset); err != nil {
+				return
+			}
+
+			h, err = p.nextObjectHeader()
+			if err != nil {
+				return
+			}
+
+			typ, _, err = p.getObjectData(h)
+			if err != nil {
+				return
+			}
+		}
+	default:
+		err = ErrInvalidObject.AddDetails("type %q", h.Type)
+	}
+
+	return
+}
+
+func (p *Packfile) getObjectSize(h *ObjectHeader) (int64, error) {
+	switch h.Type {
+	case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject:
+		return h.Length, nil
+	case plumbing.REFDeltaObject, plumbing.OFSDeltaObject:
+		buf := bufPool.Get().(*bytes.Buffer)
+		buf.Reset()
+		defer bufPool.Put(buf)
+
+		if _, _, err := p.s.NextObject(buf); err != nil {
+			return 0, err
+		}
+
+		delta := buf.Bytes()
+		_, delta = decodeLEB128(delta) // skip src size
+		sz, _ := decodeLEB128(delta)
+		return int64(sz), nil
+	default:
+		return 0, ErrInvalidObject.AddDetails("type %q", h.Type)
+	}
+}
+
+func (p *Packfile) getObjectType(h *ObjectHeader) (typ plumbing.ObjectType, err error) {
+	switch h.Type {
+	case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject:
+		return h.Type, nil
+	case plumbing.REFDeltaObject, plumbing.OFSDeltaObject:
+		var offset int64
+		if h.Type == plumbing.REFDeltaObject {
+			offset, err = p.FindOffset(h.Reference)
+			if err != nil {
+				return
+			}
+		} else {
+			offset = h.OffsetReference
+		}
+
+		if baseType, ok := p.offsetToType[offset]; ok {
+			typ = baseType
+		} else {
+			if _, err = p.s.SeekFromStart(offset); err != nil {
+				return
+			}
+
+			h, err = p.nextObjectHeader()
+			if err != nil {
+				return
+			}
+
+			typ, err = p.getObjectType(h)
+			if err != nil {
+				return
+			}
+		}
+	default:
+		err = ErrInvalidObject.AddDetails("type %q", h.Type)
+	}
+
+	return
+}
+
+func (p *Packfile) nextObject() (plumbing.EncodedObject, error) {
+	h, err := p.nextObjectHeader()
+	if err != nil {
+		if err == io.EOF || isInvalid(err) {
+			return nil, plumbing.ErrObjectNotFound
+		}
+		return nil, err
+	}
+
+	// If we have no filesystem, we will return a MemoryObject instead
+	// of an FSObject.
+	if p.fs == nil {
+		return p.getNextObject(h)
+	}
+
+	hash, err := p.FindHash(h.Offset)
+	if err != nil {
+		return nil, err
+	}
+
+	size, err := p.getObjectSize(h)
+	if err != nil {
+		return nil, err
+	}
+
+	typ, err := p.getObjectType(h)
+	if err != nil {
+		return nil, err
+	}
+
+	p.offsetToType[h.Offset] = typ
+
+	return NewFSObject(
+		hash,
+		typ,
+		h.Offset,
+		size,
+		p.Index,
+		p.fs,
+		p.file.Name(),
+		p.deltaBaseCache,
+	), nil
+}
+
+func (p *Packfile) getObjectContent(offset int64) (io.ReadCloser, error) {
+	ref, err := p.FindHash(offset)
+	if err == nil {
+		obj, ok := p.cacheGet(ref)
+		if ok {
+			reader, err := obj.Reader()
+			if err != nil {
+				return nil, err
+			}
+
+			return reader, nil
+		}
+	}
+
+	if _, err := p.s.SeekFromStart(offset); err != nil {
+		return nil, err
+	}
+
+	h, err := p.nextObjectHeader()
+	if err != nil {
+		return nil, err
+	}
+
+	obj, err := p.getNextObject(h)
+	if err != nil {
+		return nil, err
+	}
+
+	return obj.Reader()
+}
+
+func (p *Packfile) getNextObject(h *ObjectHeader) (plumbing.EncodedObject, error) {
+	var obj = new(plumbing.MemoryObject)
+	obj.SetSize(h.Length)
+	obj.SetType(h.Type)
+
+	var err error
+	switch h.Type {
+	case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject:
+		err = p.fillRegularObjectContent(obj)
+	case plumbing.REFDeltaObject:
+		err = p.fillREFDeltaObjectContent(obj, h.Reference)
+	case plumbing.OFSDeltaObject:
+		err = p.fillOFSDeltaObjectContent(obj, h.OffsetReference)
+	default:
+		err = ErrInvalidObject.AddDetails("type %q", h.Type)
+	}
+
+	if err != nil {
+		return nil, err
+	}
+
+	return obj, nil
+}
+
+func (p *Packfile) fillRegularObjectContent(obj plumbing.EncodedObject) error {
+	w, err := obj.Writer()
+	if err != nil {
+		return err
+	}
+
+	_, _, err = p.s.NextObject(w)
+	p.cachePut(obj)
+
+	return err
+}
+
+func (p *Packfile) fillREFDeltaObjectContent(obj plumbing.EncodedObject, ref plumbing.Hash) error {
+	buf := bufPool.Get().(*bytes.Buffer)
+	buf.Reset()
+	_, _, err := p.s.NextObject(buf)
+	if err != nil {
+		return err
+	}
+
+	base, ok := p.cacheGet(ref)
+	if !ok {
+		base, err = p.Get(ref)
+		if err != nil {
+			return err
+		}
+	}
+
+	obj.SetType(base.Type())
+	err = ApplyDelta(obj, base, buf.Bytes())
+	p.cachePut(obj)
+	bufPool.Put(buf)
+
+	return err
+}
+
+func (p *Packfile) fillOFSDeltaObjectContent(obj plumbing.EncodedObject, offset int64) error {
+	buf := bytes.NewBuffer(nil)
+	_, _, err := p.s.NextObject(buf)
+	if err != nil {
+		return err
+	}
+
+	var base plumbing.EncodedObject
+	var ok bool
+	hash, err := p.FindHash(offset)
+	if err == nil {
+		base, ok = p.cacheGet(hash)
+	}
+
+	if !ok {
+		base, err = p.GetByOffset(offset)
+		if err != nil {
+			return err
+		}
+
+		p.cachePut(base)
+	}
+
+	obj.SetType(base.Type())
+	err = ApplyDelta(obj, base, buf.Bytes())
+	p.cachePut(obj)
+
+	return err
+}
+
+func (p *Packfile) cacheGet(h plumbing.Hash) (plumbing.EncodedObject, bool) {
+	if p.deltaBaseCache == nil {
+		return nil, false
+	}
+
+	return p.deltaBaseCache.Get(h)
+}
+
+func (p *Packfile) cachePut(obj plumbing.EncodedObject) {
+	if p.deltaBaseCache == nil {
+		return
+	}
+
+	p.deltaBaseCache.Put(obj)
+}
+
+// GetAll returns an iterator with all encoded objects in the packfile.
+// The iterator returned is not thread-safe, it should be used in the same
+// thread as the Packfile instance.
+func (p *Packfile) GetAll() (storer.EncodedObjectIter, error) {
+	return p.GetByType(plumbing.AnyObject)
+}
+
+// GetByType returns all the objects of the given type.
+func (p *Packfile) GetByType(typ plumbing.ObjectType) (storer.EncodedObjectIter, error) {
+	switch typ {
+	case plumbing.AnyObject,
+		plumbing.BlobObject,
+		plumbing.TreeObject,
+		plumbing.CommitObject,
+		plumbing.TagObject:
+		entries, err := p.EntriesByOffset()
+		if err != nil {
+			return nil, err
+		}
+
+		return &objectIter{
+			// Easiest way to provide an object decoder is just to pass a Packfile
+			// instance. To not mess with the seeks, it's a new instance with a
+			// different scanner but the same cache and offset to hash map for
+			// reusing as much cache as possible.
+			p:    p,
+			iter: entries,
+			typ:  typ,
+		}, nil
+	default:
+		return nil, plumbing.ErrInvalidType
+	}
+}
+
+// ID returns the ID of the packfile, which is the checksum at the end of it.
+func (p *Packfile) ID() (plumbing.Hash, error) {
+	prev, err := p.file.Seek(-20, io.SeekEnd)
+	if err != nil {
+		return plumbing.ZeroHash, err
+	}
+
+	var hash plumbing.Hash
+	if _, err := io.ReadFull(p.file, hash[:]); err != nil {
+		return plumbing.ZeroHash, err
+	}
+
+	if _, err := p.file.Seek(prev, io.SeekStart); err != nil {
+		return plumbing.ZeroHash, err
+	}
+
+	return hash, nil
+}
+
+// Close the packfile and its resources.
+func (p *Packfile) Close() error {
+	closer, ok := p.file.(io.Closer)
+	if !ok {
+		return nil
+	}
+
+	return closer.Close()
+}
+
+type objectIter struct {
+	p    *Packfile
+	typ  plumbing.ObjectType
+	iter idxfile.EntryIter
+}
+
+func (i *objectIter) Next() (plumbing.EncodedObject, error) {
+	for {
+		e, err := i.iter.Next()
+		if err != nil {
+			return nil, err
+		}
+
+		obj, err := i.p.GetByOffset(int64(e.Offset))
+		if err != nil {
+			return nil, err
+		}
+
+		if i.typ == plumbing.AnyObject || obj.Type() == i.typ {
+			return obj, nil
+		}
+	}
+}
+
+func (i *objectIter) ForEach(f func(plumbing.EncodedObject) error) error {
+	for {
+		o, err := i.Next()
+		if err != nil {
+			if err == io.EOF {
+				return nil
+			}
+			return err
+		}
+
+		if err := f(o); err != nil {
+			return err
+		}
+	}
+}
+
+func (i *objectIter) Close() {
+	i.iter.Close()
+}
+
+// isInvalid checks whether an error is an os.PathError with an os.ErrInvalid
+// error inside. It also checks for the windows error, which is different from
+// os.ErrInvalid.
+func isInvalid(err error) bool {
+	pe, ok := err.(*os.PathError)
+	if !ok {
+		return false
+	}
+
+	errstr := pe.Err.Error()
+	return errstr == errInvalidUnix || errstr == errInvalidWindows
+}
+
+// errInvalidWindows is the Windows equivalent to os.ErrInvalid
+const errInvalidWindows = "The parameter is incorrect."
+
+var errInvalidUnix = os.ErrInvalid.Error()
diff --git a/plumbing/format/packfile/packfile_test.go b/plumbing/format/packfile/packfile_test.go
new file mode 100644
index 0000000..05dc8a7
--- /dev/null
+++ b/plumbing/format/packfile/packfile_test.go
@@ -0,0 +1,279 @@
+package packfile_test
+
+import (
+	"io"
+	"math"
+
+	. "gopkg.in/check.v1"
+	"gopkg.in/src-d/go-billy.v4/osfs"
+	fixtures "gopkg.in/src-d/go-git-fixtures.v3"
+	"gopkg.in/src-d/go-git.v4/plumbing"
+	"gopkg.in/src-d/go-git.v4/plumbing/format/idxfile"
+	"gopkg.in/src-d/go-git.v4/plumbing/format/packfile"
+	"gopkg.in/src-d/go-git.v4/plumbing/storer"
+)
+
+type PackfileSuite struct {
+	fixtures.Suite
+	p   *packfile.Packfile
+	idx *idxfile.MemoryIndex
+	f   *fixtures.Fixture
+}
+
+var _ = Suite(&PackfileSuite{})
+
+func (s *PackfileSuite) TestGet(c *C) {
+	for h := range expectedEntries {
+		obj, err := s.p.Get(h)
+		c.Assert(err, IsNil)
+		c.Assert(obj, Not(IsNil))
+		c.Assert(obj.Hash(), Equals, h)
+	}
+
+	_, err := s.p.Get(plumbing.ZeroHash)
+	c.Assert(err, Equals, plumbing.ErrObjectNotFound)
+}
+
+func (s *PackfileSuite) TestGetByOffset(c *C) {
+	for h, o := range expectedEntries {
+		obj, err := s.p.GetByOffset(o)
+		c.Assert(err, IsNil)
+		c.Assert(obj, Not(IsNil))
+		c.Assert(obj.Hash(), Equals, h)
+	}
+
+	_, err := s.p.GetByOffset(math.MaxInt64)
+	c.Assert(err, Equals, plumbing.ErrObjectNotFound)
+}
+
+func (s *PackfileSuite) TestID(c *C) {
+	id, err := s.p.ID()
+	c.Assert(err, IsNil)
+	c.Assert(id, Equals, s.f.PackfileHash)
+}
+
+func (s *PackfileSuite) TestGetAll(c *C) {
+	iter, err := s.p.GetAll()
+	c.Assert(err, IsNil)
+
+	var objects int
+	for {
+		o, err := iter.Next()
+		if err == io.EOF {
+			break
+		}
+		c.Assert(err, IsNil)
+
+		objects++
+		_, ok := expectedEntries[o.Hash()]
+		c.Assert(ok, Equals, true)
+	}
+
+	c.Assert(objects, Equals, len(expectedEntries))
+}
+
+var expectedEntries = map[plumbing.Hash]int64{
+	plumbing.NewHash("1669dce138d9b841a518c64b10914d88f5e488ea"): 615,
+	plumbing.NewHash("32858aad3c383ed1ff0a0f9bdf231d54a00c9e88"): 1524,
+	plumbing.NewHash("35e85108805c84807bc66a02d91535e1e24b38b9"): 1063,
+	plumbing.NewHash("49c6bb89b17060d7b4deacb7b338fcc6ea2352a9"): 78882,
+	plumbing.NewHash("4d081c50e250fa32ea8b1313cf8bb7c2ad7627fd"): 84688,
+	plumbing.NewHash("586af567d0bb5e771e49bdd9434f5e0fb76d25fa"): 84559,
+	plumbing.NewHash("5a877e6a906a2743ad6e45d99c1793642aaf8eda"): 84479,
+	plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5"): 186,
+	plumbing.NewHash("7e59600739c96546163833214c36459e324bad0a"): 84653,
+	plumbing.NewHash("880cd14280f4b9b6ed3986d6671f907d7cc2a198"): 78050,
+	plumbing.NewHash("8dcef98b1d52143e1e2dbc458ffe38f925786bf2"): 84741,
+	plumbing.NewHash("918c48b83bd081e863dbe1b80f8998f058cd8294"): 286,
+	plumbing.NewHash("9a48f23120e880dfbe41f7c9b7b708e9ee62a492"): 80998,
+	plumbing.NewHash("9dea2395f5403188298c1dabe8bdafe562c491e3"): 84032,
+	plumbing.NewHash("a39771a7651f97faf5c72e08224d857fc35133db"): 84430,
+	plumbing.NewHash("a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69"): 838,
+	plumbing.NewHash("a8d315b2b1c615d43042c3a62402b8a54288cf5c"): 84375,
+	plumbing.NewHash("aa9b383c260e1d05fbbf6b30a02914555e20c725"): 84760,
+	plumbing.NewHash("af2d6a6954d532f8ffb47615169c8fdf9d383a1a"): 449,
+	plumbing.NewHash("b029517f6300c2da0f4b651b8642506cd6aaf45d"): 1392,
+	plumbing.NewHash("b8e471f58bcbca63b07bda20e428190409c2db47"): 1230,
+	plumbing.NewHash("c192bd6a24ea1ab01d78686e417c8bdc7c3d197f"): 1713,
+	plumbing.NewHash("c2d30fa8ef288618f65f6eed6e168e0d514886f4"): 84725,
+	plumbing.NewHash("c8f1d8c61f9da76f4cb49fd86322b6e685dba956"): 80725,
+	plumbing.NewHash("cf4aa3b38974fb7d81f367c0830f7d78d65ab86b"): 84608,
+	plumbing.NewHash("d3ff53e0564a9f87d8e84b6e28e5060e517008aa"): 1685,
+	plumbing.NewHash("d5c0f4ab811897cadf03aec358ae60d21f91c50d"): 2351,
+	plumbing.NewHash("dbd3641b371024f44d0e469a9c8f5457b0660de1"): 84115,
+	plumbing.NewHash("e8d3ffab552895c19b9fcf7aa264d277cde33881"): 12,
+	plumbing.NewHash("eba74343e2f15d62adedfd8c883ee0262b5c8021"): 84708,
+	plumbing.NewHash("fb72698cab7617ac416264415f13224dfd7a165e"): 84671,
+}
+
+func (s *PackfileSuite) SetUpTest(c *C) {
+	s.f = fixtures.Basic().One()
+
+	fs := osfs.New("")
+	f, err := fs.Open(s.f.Packfile().Name())
+	c.Assert(err, IsNil)
+
+	s.idx = idxfile.NewMemoryIndex()
+	c.Assert(idxfile.NewDecoder(s.f.Idx()).Decode(s.idx), IsNil)
+
+	s.p = packfile.NewPackfile(s.idx, fs, f)
+}
+
+func (s *PackfileSuite) TearDownTest(c *C) {
+	c.Assert(s.p.Close(), IsNil)
+}
+
+func (s *PackfileSuite) TestDecode(c *C) {
+	fixtures.Basic().ByTag("packfile").Test(c, func(f *fixtures.Fixture) {
+		index := getIndexFromIdxFile(f.Idx())
+		fs := osfs.New("")
+		pf, err := fs.Open(f.Packfile().Name())
+		c.Assert(err, IsNil)
+
+		p := packfile.NewPackfile(index, fs, pf)
+		defer p.Close()
+
+		for _, h := range expectedHashes {
+			obj, err := p.Get(plumbing.NewHash(h))
+			c.Assert(err, IsNil)
+			c.Assert(obj.Hash().String(), Equals, h)
+		}
+	})
+}
+
+func (s *PackfileSuite) TestDecodeByTypeRefDelta(c *C) {
+	f := fixtures.Basic().ByTag("ref-delta").One()
+
+	index := getIndexFromIdxFile(f.Idx())
+	fs := osfs.New("")
+	pf, err := fs.Open(f.Packfile().Name())
+	c.Assert(err, IsNil)
+
+	packfile := packfile.NewPackfile(index, fs, pf)
+	defer packfile.Close()
+
+	iter, err := packfile.GetByType(plumbing.CommitObject)
+	c.Assert(err, IsNil)
+
+	var count int
+	for {
+		obj, err := iter.Next()
+		if err == io.EOF {
+			break
+		}
+		count++
+		c.Assert(err, IsNil)
+		c.Assert(obj.Type(), Equals, plumbing.CommitObject)
+	}
+
+	c.Assert(count > 0, Equals, true)
+}
+
+func (s *PackfileSuite) TestDecodeByType(c *C) {
+	ts := []plumbing.ObjectType{
+		plumbing.CommitObject,
+		plumbing.TagObject,
+		plumbing.TreeObject,
+		plumbing.BlobObject,
+	}
+
+	fixtures.Basic().ByTag("packfile").Test(c, func(f *fixtures.Fixture) {
+		for _, t := range ts {
+			index := getIndexFromIdxFile(f.Idx())
+			fs := osfs.New("")
+			pf, err := fs.Open(f.Packfile().Name())
+			c.Assert(err, IsNil)
+
+			packfile := packfile.NewPackfile(index, fs, pf)
+			defer packfile.Close()
+
+			iter, err := packfile.GetByType(t)
+			c.Assert(err, IsNil)
+
+			c.Assert(iter.ForEach(func(obj plumbing.EncodedObject) error {
+				c.Assert(obj.Type(), Equals, t)
+				return nil
+			}), IsNil)
+		}
+	})
+}
+
+func (s *PackfileSuite) TestDecodeByTypeConstructor(c *C) {
+	f := fixtures.Basic().ByTag("packfile").One()
+	index := getIndexFromIdxFile(f.Idx())
+	fs := osfs.New("")
+	pf, err := fs.Open(f.Packfile().Name())
+	c.Assert(err, IsNil)
+
+	packfile := packfile.NewPackfile(index, fs, pf)
+	defer packfile.Close()
+
+	_, err = packfile.GetByType(plumbing.OFSDeltaObject)
+	c.Assert(err, Equals, plumbing.ErrInvalidType)
+
+	_, err = packfile.GetByType(plumbing.REFDeltaObject)
+	c.Assert(err, Equals, plumbing.ErrInvalidType)
+
+	_, err = packfile.GetByType(plumbing.InvalidObject)
+	c.Assert(err, Equals, plumbing.ErrInvalidType)
+}
+
+var expectedHashes = []string{
+	"918c48b83bd081e863dbe1b80f8998f058cd8294",
+	"af2d6a6954d532f8ffb47615169c8fdf9d383a1a",
+	"1669dce138d9b841a518c64b10914d88f5e488ea",
+	"a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69",
+	"b8e471f58bcbca63b07bda20e428190409c2db47",
+	"35e85108805c84807bc66a02d91535e1e24b38b9",
+	"b029517f6300c2da0f4b651b8642506cd6aaf45d",
+	"32858aad3c383ed1ff0a0f9bdf231d54a00c9e88",
+	"d3ff53e0564a9f87d8e84b6e28e5060e517008aa",
+	"c192bd6a24ea1ab01d78686e417c8bdc7c3d197f",
+	"d5c0f4ab811897cadf03aec358ae60d21f91c50d",
+	"49c6bb89b17060d7b4deacb7b338fcc6ea2352a9",
+	"cf4aa3b38974fb7d81f367c0830f7d78d65ab86b",
+	"9dea2395f5403188298c1dabe8bdafe562c491e3",
+	"586af567d0bb5e771e49bdd9434f5e0fb76d25fa",
+	"9a48f23120e880dfbe41f7c9b7b708e9ee62a492",
+	"5a877e6a906a2743ad6e45d99c1793642aaf8eda",
+	"c8f1d8c61f9da76f4cb49fd86322b6e685dba956",
+	"a8d315b2b1c615d43042c3a62402b8a54288cf5c",
+	"a39771a7651f97faf5c72e08224d857fc35133db",
+	"880cd14280f4b9b6ed3986d6671f907d7cc2a198",
+	"fb72698cab7617ac416264415f13224dfd7a165e",
+	"4d081c50e250fa32ea8b1313cf8bb7c2ad7627fd",
+	"eba74343e2f15d62adedfd8c883ee0262b5c8021",
+	"c2d30fa8ef288618f65f6eed6e168e0d514886f4",
+	"8dcef98b1d52143e1e2dbc458ffe38f925786bf2",
+	"aa9b383c260e1d05fbbf6b30a02914555e20c725",
+	"6ecf0ef2c2dffb796033e5a02219af86ec6584e5",
+	"dbd3641b371024f44d0e469a9c8f5457b0660de1",
+	"e8d3ffab552895c19b9fcf7aa264d277cde33881",
+	"7e59600739c96546163833214c36459e324bad0a",
+}
+
+func assertObjects(c *C, s storer.EncodedObjectStorer, expects []string) {
+	i, err := s.IterEncodedObjects(plumbing.AnyObject)
+	c.Assert(err, IsNil)
+
+	var count int
+	err = i.ForEach(func(plumbing.EncodedObject) error { count++; return nil })
+	c.Assert(err, IsNil)
+	c.Assert(count, Equals, len(expects))
+
+	for _, exp := range expects {
+		obt, err := s.EncodedObject(plumbing.AnyObject, plumbing.NewHash(exp))
+		c.Assert(err, IsNil)
+		c.Assert(obt.Hash().String(), Equals, exp)
+	}
+}
+
+func getIndexFromIdxFile(r io.Reader) idxfile.Index {
+	idxf := idxfile.NewMemoryIndex()
+	d := idxfile.NewDecoder(r)
+	if err := d.Decode(idxf); err != nil {
+		panic(err)
+	}
+
+	return idxf
+}
diff --git a/plumbing/format/packfile/parser.go b/plumbing/format/packfile/parser.go
new file mode 100644
index 0000000..28582b5
--- /dev/null
+++ b/plumbing/format/packfile/parser.go
@@ -0,0 +1,489 @@
+package packfile
+
+import (
+	"bytes"
+	"errors"
+	"io"
+
+	"gopkg.in/src-d/go-git.v4/plumbing"
+	"gopkg.in/src-d/go-git.v4/plumbing/cache"
+	"gopkg.in/src-d/go-git.v4/plumbing/storer"
+)
+
+var (
+	// ErrReferenceDeltaNotFound is returned when the reference delta is not
+	// found.
+	ErrReferenceDeltaNotFound = errors.New("reference delta not found")
+
+	// ErrNotSeekableSource is returned when the source for the parser is not
+	// seekable and a storage was not provided, so it can't be parsed.
+	ErrNotSeekableSource = errors.New("parser source is not seekable and storage was not provided")
+
+	// ErrDeltaNotCached is returned when the delta could not be found in cache.
+	ErrDeltaNotCached = errors.New("delta could not be found in cache")
+)
+
+// Observer interface is implemented by index encoders.
+type Observer interface {
+	// OnHeader is called when a new packfile is opened.
+	OnHeader(count uint32) error
+	// OnInflatedObjectHeader is called for each object header read.
+	OnInflatedObjectHeader(t plumbing.ObjectType, objSize int64, pos int64) error
+	// OnInflatedObjectContent is called for each decoded object.
+	OnInflatedObjectContent(h plumbing.Hash, pos int64, crc uint32, content []byte) error
+	// OnFooter is called when decoding is done.
+	OnFooter(h plumbing.Hash) error
+}
+
+// Parser decodes a packfile and calls any observer associated to it. Is used
+// to generate indexes.
+type Parser struct {
+	storage          storer.EncodedObjectStorer
+	scanner          *Scanner
+	count            uint32
+	oi               []*objectInfo
+	oiByHash         map[plumbing.Hash]*objectInfo
+	oiByOffset       map[int64]*objectInfo
+	hashOffset       map[plumbing.Hash]int64
+	pendingRefDeltas map[plumbing.Hash][]*objectInfo
+	checksum         plumbing.Hash
+
+	cache *cache.BufferLRU
+	// delta content by offset, only used if source is not seekable
+	deltas map[int64][]byte
+
+	ob []Observer
+}
+
+// NewParser creates a new Parser. The Scanner source must be seekable.
+// If it's not, NewParserWithStorage should be used instead.
+func NewParser(scanner *Scanner, ob ...Observer) (*Parser, error) {
+	return NewParserWithStorage(scanner, nil, ob...)
+}
+
+// NewParserWithStorage creates a new Parser. The scanner source must either
+// be seekable or a storage must be provided.
+func NewParserWithStorage(
+	scanner *Scanner,
+	storage storer.EncodedObjectStorer,
+	ob ...Observer,
+) (*Parser, error) {
+	if !scanner.IsSeekable && storage == nil {
+		return nil, ErrNotSeekableSource
+	}
+
+	var deltas map[int64][]byte
+	if !scanner.IsSeekable {
+		deltas = make(map[int64][]byte)
+	}
+
+	return &Parser{
+		storage:          storage,
+		scanner:          scanner,
+		ob:               ob,
+		count:            0,
+		cache:            cache.NewBufferLRUDefault(),
+		pendingRefDeltas: make(map[plumbing.Hash][]*objectInfo),
+		deltas:           deltas,
+	}, nil
+}
+
+func (p *Parser) forEachObserver(f func(o Observer) error) error {
+	for _, o := range p.ob {
+		if err := f(o); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (p *Parser) onHeader(count uint32) error {
+	return p.forEachObserver(func(o Observer) error {
+		return o.OnHeader(count)
+	})
+}
+
+func (p *Parser) onInflatedObjectHeader(
+	t plumbing.ObjectType,
+	objSize int64,
+	pos int64,
+) error {
+	return p.forEachObserver(func(o Observer) error {
+		return o.OnInflatedObjectHeader(t, objSize, pos)
+	})
+}
+
+func (p *Parser) onInflatedObjectContent(
+	h plumbing.Hash,
+	pos int64,
+	crc uint32,
+	content []byte,
+) error {
+	return p.forEachObserver(func(o Observer) error {
+		return o.OnInflatedObjectContent(h, pos, crc, content)
+	})
+}
+
+func (p *Parser) onFooter(h plumbing.Hash) error {
+	return p.forEachObserver(func(o Observer) error {
+		return o.OnFooter(h)
+	})
+}
+
+// Parse start decoding phase of the packfile.
+func (p *Parser) Parse() (plumbing.Hash, error) {
+	if err := p.init(); err != nil {
+		return plumbing.ZeroHash, err
+	}
+
+	if err := p.indexObjects(); err != nil {
+		return plumbing.ZeroHash, err
+	}
+
+	var err error
+	p.checksum, err = p.scanner.Checksum()
+	if err != nil && err != io.EOF {
+		return plumbing.ZeroHash, err
+	}
+
+	if err := p.resolveDeltas(); err != nil {
+		return plumbing.ZeroHash, err
+	}
+
+	if len(p.pendingRefDeltas) > 0 {
+		return plumbing.ZeroHash, ErrReferenceDeltaNotFound
+	}
+
+	if err := p.onFooter(p.checksum); err != nil {
+		return plumbing.ZeroHash, err
+	}
+
+	return p.checksum, nil
+}
+
+func (p *Parser) init() error {
+	_, c, err := p.scanner.Header()
+	if err != nil {
+		return err
+	}
+
+	if err := p.onHeader(c); err != nil {
+		return err
+	}
+
+	p.count = c
+	p.oiByHash = make(map[plumbing.Hash]*objectInfo, p.count)
+	p.oiByOffset = make(map[int64]*objectInfo, p.count)
+	p.oi = make([]*objectInfo, p.count)
+
+	return nil
+}
+
+func (p *Parser) indexObjects() error {
+	buf := new(bytes.Buffer)
+
+	for i := uint32(0); i < p.count; i++ {
+		buf.Reset()
+
+		oh, err := p.scanner.NextObjectHeader()
+		if err != nil {
+			return err
+		}
+
+		delta := false
+		var ota *objectInfo
+		switch t := oh.Type; t {
+		case plumbing.OFSDeltaObject:
+			delta = true
+
+			parent, ok := p.oiByOffset[oh.OffsetReference]
+			if !ok {
+				return plumbing.ErrObjectNotFound
+			}
+
+			ota = newDeltaObject(oh.Offset, oh.Length, t, parent)
+			parent.Children = append(parent.Children, ota)
+		case plumbing.REFDeltaObject:
+			delta = true
+
+			parent, ok := p.oiByHash[oh.Reference]
+			if ok {
+				ota = newDeltaObject(oh.Offset, oh.Length, t, parent)
+				parent.Children = append(parent.Children, ota)
+			} else {
+				ota = newBaseObject(oh.Offset, oh.Length, t)
+				p.pendingRefDeltas[oh.Reference] = append(
+					p.pendingRefDeltas[oh.Reference],
+					ota,
+				)
+			}
+		default:
+			ota = newBaseObject(oh.Offset, oh.Length, t)
+		}
+
+		_, crc, err := p.scanner.NextObject(buf)
+		if err != nil {
+			return err
+		}
+
+		ota.Crc32 = crc
+		ota.Length = oh.Length
+
+		data := buf.Bytes()
+		if !delta {
+			sha1, err := getSHA1(ota.Type, data)
+			if err != nil {
+				return err
+			}
+
+			ota.SHA1 = sha1
+			p.oiByHash[ota.SHA1] = ota
+		}
+
+		if p.storage != nil && !delta {
+			obj := new(plumbing.MemoryObject)
+			obj.SetSize(oh.Length)
+			obj.SetType(oh.Type)
+			if _, err := obj.Write(data); err != nil {
+				return err
+			}
+
+			if _, err := p.storage.SetEncodedObject(obj); err != nil {
+				return err
+			}
+		}
+
+		if delta && !p.scanner.IsSeekable {
+			p.deltas[oh.Offset] = make([]byte, len(data))
+			copy(p.deltas[oh.Offset], data)
+		}
+
+		p.oiByOffset[oh.Offset] = ota
+		p.oi[i] = ota
+	}
+
+	return nil
+}
+
+func (p *Parser) resolveDeltas() error {
+	for _, obj := range p.oi {
+		content, err := p.get(obj)
+		if err != nil {
+			return err
+		}
+
+		if err := p.onInflatedObjectHeader(obj.Type, obj.Length, obj.Offset); err != nil {
+			return err
+		}
+
+		if err := p.onInflatedObjectContent(obj.SHA1, obj.Offset, obj.Crc32, content); err != nil {
+			return err
+		}
+
+		if !obj.IsDelta() && len(obj.Children) > 0 {
+			for _, child := range obj.Children {
+				if _, err := p.resolveObject(child, content); err != nil {
+					return err
+				}
+			}
+
+			// Remove the delta from the cache.
+			if obj.DiskType.IsDelta() && !p.scanner.IsSeekable {
+				delete(p.deltas, obj.Offset)
+			}
+		}
+	}
+
+	return nil
+}
+
+func (p *Parser) get(o *objectInfo) ([]byte, error) {
+	b, ok := p.cache.Get(o.Offset)
+	// If it's not on the cache and is not a delta we can try to find it in the
+	// storage, if there's one.
+	if !ok && p.storage != nil && !o.Type.IsDelta() {
+		var err error
+		e, err := p.storage.EncodedObject(plumbing.AnyObject, o.SHA1)
+		if err != nil {
+			return nil, err
+		}
+
+		r, err := e.Reader()
+		if err != nil {
+			return nil, err
+		}
+
+		b = make([]byte, e.Size())
+		if _, err = r.Read(b); err != nil {
+			return nil, err
+		}
+	}
+
+	if b != nil {
+		return b, nil
+	}
+
+	var data []byte
+	if o.DiskType.IsDelta() {
+		base, err := p.get(o.Parent)
+		if err != nil {
+			return nil, err
+		}
+
+		data, err = p.resolveObject(o, base)
+		if err != nil {
+			return nil, err
+		}
+	} else {
+		var err error
+		data, err = p.readData(o)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	if len(o.Children) > 0 {
+		p.cache.Put(o.Offset, data)
+	}
+
+	return data, nil
+}
+
+func (p *Parser) resolveObject(
+	o *objectInfo,
+	base []byte,
+) ([]byte, error) {
+	if !o.DiskType.IsDelta() {
+		return nil, nil
+	}
+
+	data, err := p.readData(o)
+	if err != nil {
+		return nil, err
+	}
+
+	data, err = applyPatchBase(o, data, base)
+	if err != nil {
+		return nil, err
+	}
+
+	if pending, ok := p.pendingRefDeltas[o.SHA1]; ok {
+		for _, po := range pending {
+			po.Parent = o
+			o.Children = append(o.Children, po)
+		}
+		delete(p.pendingRefDeltas, o.SHA1)
+	}
+
+	if p.storage != nil {
+		obj := new(plumbing.MemoryObject)
+		obj.SetSize(o.Size())
+		obj.SetType(o.Type)
+		if _, err := obj.Write(data); err != nil {
+			return nil, err
+		}
+
+		if _, err := p.storage.SetEncodedObject(obj); err != nil {
+			return nil, err
+		}
+	}
+
+	return data, nil
+}
+
+func (p *Parser) readData(o *objectInfo) ([]byte, error) {
+	if !p.scanner.IsSeekable && o.DiskType.IsDelta() {
+		data, ok := p.deltas[o.Offset]
+		if !ok {
+			return nil, ErrDeltaNotCached
+		}
+
+		return data, nil
+	}
+
+	if _, err := p.scanner.SeekFromStart(o.Offset); err != nil {
+		return nil, err
+	}
+
+	if _, err := p.scanner.NextObjectHeader(); err != nil {
+		return nil, err
+	}
+
+	buf := new(bytes.Buffer)
+	if _, _, err := p.scanner.NextObject(buf); err != nil {
+		return nil, err
+	}
+
+	return buf.Bytes(), nil
+}
+
+func applyPatchBase(ota *objectInfo, data, base []byte) ([]byte, error) {
+	patched, err := PatchDelta(base, data)
+	if err != nil {
+		return nil, err
+	}
+
+	if ota.SHA1 == plumbing.ZeroHash {
+		ota.Type = ota.Parent.Type
+		sha1, err := getSHA1(ota.Type, patched)
+		if err != nil {
+			return nil, err
+		}
+
+		ota.SHA1 = sha1
+		ota.Length = int64(len(patched))
+	}
+
+	return patched, nil
+}
+
+func getSHA1(t plumbing.ObjectType, data []byte) (plumbing.Hash, error) {
+	hasher := plumbing.NewHasher(t, int64(len(data)))
+	if _, err := hasher.Write(data); err != nil {
+		return plumbing.ZeroHash, err
+	}
+
+	return hasher.Sum(), nil
+}
+
+type objectInfo struct {
+	Offset   int64
+	Length   int64
+	Type     plumbing.ObjectType
+	DiskType plumbing.ObjectType
+
+	Crc32 uint32
+
+	Parent   *objectInfo
+	Children []*objectInfo
+	SHA1     plumbing.Hash
+}
+
+func newBaseObject(offset, length int64, t plumbing.ObjectType) *objectInfo {
+	return newDeltaObject(offset, length, t, nil)
+}
+
+func newDeltaObject(
+	offset, length int64,
+	t plumbing.ObjectType,
+	parent *objectInfo,
+) *objectInfo {
+	obj := &objectInfo{
+		Offset:   offset,
+		Length:   length,
+		Type:     t,
+		DiskType: t,
+		Crc32:    0,
+		Parent:   parent,
+	}
+
+	return obj
+}
+
+func (o *objectInfo) IsDelta() bool {
+	return o.Type.IsDelta()
+}
+
+func (o *objectInfo) Size() int64 {
+	return o.Length
+}
diff --git a/plumbing/format/packfile/parser_test.go b/plumbing/format/packfile/parser_test.go
new file mode 100644
index 0000000..012a140
--- /dev/null
+++ b/plumbing/format/packfile/parser_test.go
@@ -0,0 +1,195 @@
+package packfile_test
+
+import (
+	"testing"
+
+	"gopkg.in/src-d/go-git.v4/plumbing"
+	"gopkg.in/src-d/go-git.v4/plumbing/format/packfile"
+
+	. "gopkg.in/check.v1"
+	"gopkg.in/src-d/go-git-fixtures.v3"
+)
+
+type ParserSuite struct {
+	fixtures.Suite
+}
+
+var _ = Suite(&ParserSuite{})
+
+func (s *ParserSuite) TestParserHashes(c *C) {
+	f := fixtures.Basic().One()
+	scanner := packfile.NewScanner(f.Packfile())
+
+	obs := new(testObserver)
+	parser, err := packfile.NewParser(scanner, obs)
+	c.Assert(err, IsNil)
+
+	ch, err := parser.Parse()
+	c.Assert(err, IsNil)
+
+	checksum := "a3fed42da1e8189a077c0e6846c040dcf73fc9dd"
+	c.Assert(ch.String(), Equals, checksum)
+
+	c.Assert(obs.checksum, Equals, checksum)
+	c.Assert(int(obs.count), Equals, int(31))
+
+	commit := plumbing.CommitObject
+	blob := plumbing.BlobObject
+	tree := plumbing.TreeObject
+
+	objs := []observerObject{
+		{"e8d3ffab552895c19b9fcf7aa264d277cde33881", commit, 254, 12, 0xaa07ba4b},
+		{"6ecf0ef2c2dffb796033e5a02219af86ec6584e5", commit, 245, 186, 0xf706df58},
+		{"918c48b83bd081e863dbe1b80f8998f058cd8294", commit, 242, 286, 0x12438846},
+		{"af2d6a6954d532f8ffb47615169c8fdf9d383a1a", commit, 242, 449, 0x2905a38c},
+		{"1669dce138d9b841a518c64b10914d88f5e488ea", commit, 333, 615, 0xd9429436},
+		{"a5b8b09e2f8fcb0bb99d3ccb0958157b40890d69", commit, 332, 838, 0xbecfde4e},
+		{"35e85108805c84807bc66a02d91535e1e24b38b9", commit, 244, 1063, 0x780e4b3e},
+		{"b8e471f58bcbca63b07bda20e428190409c2db47", commit, 243, 1230, 0xdc18344f},
+		{"b029517f6300c2da0f4b651b8642506cd6aaf45d", commit, 187, 1392, 0xcf4e4280},
+		{"32858aad3c383ed1ff0a0f9bdf231d54a00c9e88", blob, 189, 1524, 0x1f08118a},
+		{"d3ff53e0564a9f87d8e84b6e28e5060e517008aa", blob, 18, 1685, 0xafded7b8},
+		{"c192bd6a24ea1ab01d78686e417c8bdc7c3d197f", blob, 1072, 1713, 0xcc1428ed},
+		{"d5c0f4ab811897cadf03aec358ae60d21f91c50d", blob, 76110, 2351, 0x1631d22f},
+		{"880cd14280f4b9b6ed3986d6671f907d7cc2a198", blob, 2780, 78050, 0xbfff5850},
+		{"49c6bb89b17060d7b4deacb7b338fcc6ea2352a9", blob, 217848, 78882, 0xd108e1d8},
+		{"c8f1d8c61f9da76f4cb49fd86322b6e685dba956", blob, 706, 80725, 0x8e97ba25},
+		{"9a48f23120e880dfbe41f7c9b7b708e9ee62a492", blob, 11488, 80998, 0x7316ff70},
+		{"9dea2395f5403188298c1dabe8bdafe562c491e3", blob, 78, 84032, 0xdb4fce56},
+		{"dbd3641b371024f44d0e469a9c8f5457b0660de1", tree, 272, 84115, 0x901cce2c},
+		{"a8d315b2b1c615d43042c3a62402b8a54288cf5c", tree, 271, 84375, 0xec4552b0},
+		{"a39771a7651f97faf5c72e08224d857fc35133db", tree, 38, 84430, 0x847905bf},
+		{"5a877e6a906a2743ad6e45d99c1793642aaf8eda", tree, 75, 84479, 0x3689459a},
+		{"586af567d0bb5e771e49bdd9434f5e0fb76d25fa", tree, 38, 84559, 0xe67af94a},
+		{"cf4aa3b38974fb7d81f367c0830f7d78d65ab86b", tree, 34, 84608, 0xc2314a2e},
+		{"7e59600739c96546163833214c36459e324bad0a", blob, 9, 84653, 0xcd987848},
+		{"fb72698cab7617ac416264415f13224dfd7a165e", tree, 238, 84671, 0x8a853a6d},
+		{"4d081c50e250fa32ea8b1313cf8bb7c2ad7627fd", tree, 179, 84688, 0x70c6518},
+		{"eba74343e2f15d62adedfd8c883ee0262b5c8021", tree, 148, 84708, 0x4f4108e2},
+		{"c2d30fa8ef288618f65f6eed6e168e0d514886f4", tree, 110, 84725, 0xd6fe09e9},
+		{"8dcef98b1d52143e1e2dbc458ffe38f925786bf2", tree, 111, 84741, 0xf07a2804},
+		{"aa9b383c260e1d05fbbf6b30a02914555e20c725", tree, 73, 84760, 0x1d75d6be},
+	}
+
+	c.Assert(obs.objects, DeepEquals, objs)
+}
+
+type observerObject struct {
+	hash   string
+	otype  plumbing.ObjectType
+	size   int64
+	offset int64
+	crc    uint32
+}
+
+type testObserver struct {
+	count    uint32
+	checksum string
+	objects  []observerObject
+	pos      map[int64]int
+}
+
+func (t *testObserver) OnHeader(count uint32) error {
+	t.count = count
+	t.pos = make(map[int64]int, count)
+	return nil
+}
+
+func (t *testObserver) OnInflatedObjectHeader(otype plumbing.ObjectType, objSize int64, pos int64) error {
+	o := t.get(pos)
+	o.otype = otype
+	o.size = objSize
+	o.offset = pos
+
+	t.put(pos, o)
+
+	return nil
+}
+
+func (t *testObserver) OnInflatedObjectContent(h plumbing.Hash, pos int64, crc uint32, _ []byte) error {
+	o := t.get(pos)
+	o.hash = h.String()
+	o.crc = crc
+
+	t.put(pos, o)
+
+	return nil
+}
+
+func (t *testObserver) OnFooter(h plumbing.Hash) error {
+	t.checksum = h.String()
+	return nil
+}
+
+func (t *testObserver) get(pos int64) observerObject {
+	i, ok := t.pos[pos]
+	if ok {
+		return t.objects[i]
+	}
+
+	return observerObject{}
+}
+
+func (t *testObserver) put(pos int64, o observerObject) {
+	i, ok := t.pos[pos]
+	if ok {
+		t.objects[i] = o
+		return
+	}
+
+	t.pos[pos] = len(t.objects)
+	t.objects = append(t.objects, o)
+}
+
+func BenchmarkParse(b *testing.B) {
+	if err := fixtures.Init(); err != nil {
+		b.Fatal(err)
+	}
+
+	defer func() {
+		if err := fixtures.Clean(); err != nil {
+			b.Fatal(err)
+		}
+	}()
+
+	for _, f := range fixtures.ByTag("packfile") {
+		b.Run(f.URL, func(b *testing.B) {
+			for i := 0; i < b.N; i++ {
+				parser, err := packfile.NewParser(packfile.NewScanner(f.Packfile()))
+				if err != nil {
+					b.Fatal(err)
+				}
+
+				_, err = parser.Parse()
+				if err != nil {
+					b.Fatal(err)
+				}
+			}
+		})
+	}
+}
+
+func BenchmarkParseBasic(b *testing.B) {
+	if err := fixtures.Init(); err != nil {
+		b.Fatal(err)
+	}
+
+	defer func() {
+		if err := fixtures.Clean(); err != nil {
+			b.Fatal(err)
+		}
+	}()
+
+	f := fixtures.Basic().One()
+	for i := 0; i < b.N; i++ {
+		parser, err := packfile.NewParser(packfile.NewScanner(f.Packfile()))
+		if err != nil {
+			b.Fatal(err)
+		}
+
+		_, err = parser.Parse()
+		if err != nil {
+			b.Fatal(err)
+		}
+	}
+}
diff --git a/plumbing/format/packfile/patch_delta.go b/plumbing/format/packfile/patch_delta.go
index c604851..a972f1c 100644
--- a/plumbing/format/packfile/patch_delta.go
+++ b/plumbing/format/packfile/patch_delta.go
@@ -63,8 +63,8 @@
 	targetSz, delta := decodeLEB128(delta)
 	remainingTargetSz := targetSz
 
-	var dest []byte
 	var cmd byte
+	dest := make([]byte, 0, targetSz)
 	for {
 		if len(delta) == 0 {
 			return nil, ErrInvalidDelta
diff --git a/plumbing/memory.go b/plumbing/memory.go
index 51cbb54..b8e1e1b 100644
--- a/plumbing/memory.go
+++ b/plumbing/memory.go
@@ -14,10 +14,10 @@
 	sz   int64
 }
 
-// Hash return the object Hash, the hash is calculated on-the-fly the first
-// time is called, the subsequent calls the same Hash is returned even if the
-// type or the content has changed. The Hash is only generated if the size of
-// the content is exactly the Object.Size
+// Hash returns the object Hash, the hash is calculated on-the-fly the first
+// time it's called, in all subsequent calls the same Hash is returned even
+// if the type or the content have changed. The Hash is only generated if the
+// size of the content is exactly the object size.
 func (o *MemoryObject) Hash() Hash {
 	if o.h == ZeroHash && int64(len(o.cont)) == o.sz {
 		o.h = ComputeHash(o.t, o.cont)
diff --git a/plumbing/object/blob_test.go b/plumbing/object/blob_test.go
index 5ed9de0..181436d 100644
--- a/plumbing/object/blob_test.go
+++ b/plumbing/object/blob_test.go
@@ -1,6 +1,7 @@
 package object
 
 import (
+	"bytes"
 	"io"
 	"io/ioutil"
 
@@ -88,8 +89,26 @@
 		}
 
 		c.Assert(err, IsNil)
-		c.Assert(b, DeepEquals, blobs[i])
-		i += 1
+		c.Assert(b.ID(), Equals, blobs[i].ID())
+		c.Assert(b.Size, Equals, blobs[i].Size)
+		c.Assert(b.Type(), Equals, blobs[i].Type())
+
+		r1, err := b.Reader()
+		c.Assert(err, IsNil)
+
+		b1, err := ioutil.ReadAll(r1)
+		c.Assert(err, IsNil)
+		c.Assert(r1.Close(), IsNil)
+
+		r2, err := blobs[i].Reader()
+		c.Assert(err, IsNil)
+
+		b2, err := ioutil.ReadAll(r2)
+		c.Assert(err, IsNil)
+		c.Assert(r2.Close(), IsNil)
+
+		c.Assert(bytes.Compare(b1, b2), Equals, 0)
+		i++
 	}
 
 	iter.Close()
diff --git a/plumbing/object/change.go b/plumbing/object/change.go
index 729ff5a..a1b4c27 100644
--- a/plumbing/object/change.go
+++ b/plumbing/object/change.go
@@ -2,6 +2,7 @@
 
 import (
 	"bytes"
+	"context"
 	"fmt"
 	"strings"
 
@@ -81,7 +82,15 @@
 // Patch returns a Patch with all the file changes in chunks. This
 // representation can be used to create several diff outputs.
 func (c *Change) Patch() (*Patch, error) {
-	return getPatch("", c)
+	return c.PatchContext(context.Background())
+}
+
+// Patch returns a Patch with all the file changes in chunks. This
+// representation can be used to create several diff outputs.
+// If context expires, an non-nil error will be returned
+// Provided context must be non-nil
+func (c *Change) PatchContext(ctx context.Context) (*Patch, error) {
+	return getPatchContext(ctx, "", c)
 }
 
 func (c *Change) name() string {
@@ -136,5 +145,13 @@
 // Patch returns a Patch with all the changes in chunks. This
 // representation can be used to create several diff outputs.
 func (c Changes) Patch() (*Patch, error) {
-	return getPatch("", c...)
+	return c.PatchContext(context.Background())
+}
+
+// Patch returns a Patch with all the changes in chunks. This
+// representation can be used to create several diff outputs.
+// If context expires, an non-nil error will be returned
+// Provided context must be non-nil
+func (c Changes) PatchContext(ctx context.Context) (*Patch, error) {
+	return getPatchContext(ctx, "", c...)
 }
diff --git a/plumbing/object/change_test.go b/plumbing/object/change_test.go
index 7036fa3..b0e89c7 100644
--- a/plumbing/object/change_test.go
+++ b/plumbing/object/change_test.go
@@ -1,6 +1,7 @@
 package object
 
 import (
+	"context"
 	"sort"
 
 	"gopkg.in/src-d/go-git.v4/plumbing"
@@ -82,6 +83,12 @@
 	c.Assert(len(p.FilePatches()[0].Chunks()), Equals, 1)
 	c.Assert(p.FilePatches()[0].Chunks()[0].Type(), Equals, diff.Add)
 
+	p, err = change.PatchContext(context.Background())
+	c.Assert(err, IsNil)
+	c.Assert(len(p.FilePatches()), Equals, 1)
+	c.Assert(len(p.FilePatches()[0].Chunks()), Equals, 1)
+	c.Assert(p.FilePatches()[0].Chunks()[0].Type(), Equals, diff.Add)
+
 	str := change.String()
 	c.Assert(str, Equals, "<Action: Insert, Path: examples/clone/main.go>")
 }
@@ -134,6 +141,12 @@
 	c.Assert(len(p.FilePatches()[0].Chunks()), Equals, 1)
 	c.Assert(p.FilePatches()[0].Chunks()[0].Type(), Equals, diff.Delete)
 
+	p, err = change.PatchContext(context.Background())
+	c.Assert(err, IsNil)
+	c.Assert(len(p.FilePatches()), Equals, 1)
+	c.Assert(len(p.FilePatches()[0].Chunks()), Equals, 1)
+	c.Assert(p.FilePatches()[0].Chunks()[0].Type(), Equals, diff.Delete)
+
 	str := change.String()
 	c.Assert(str, Equals, "<Action: Delete, Path: utils/difftree/difftree.go>")
 }
@@ -206,6 +219,18 @@
 	c.Assert(p.FilePatches()[0].Chunks()[5].Type(), Equals, diff.Add)
 	c.Assert(p.FilePatches()[0].Chunks()[6].Type(), Equals, diff.Equal)
 
+	p, err = change.PatchContext(context.Background())
+	c.Assert(err, IsNil)
+	c.Assert(len(p.FilePatches()), Equals, 1)
+	c.Assert(len(p.FilePatches()[0].Chunks()), Equals, 7)
+	c.Assert(p.FilePatches()[0].Chunks()[0].Type(), Equals, diff.Equal)
+	c.Assert(p.FilePatches()[0].Chunks()[1].Type(), Equals, diff.Delete)
+	c.Assert(p.FilePatches()[0].Chunks()[2].Type(), Equals, diff.Add)
+	c.Assert(p.FilePatches()[0].Chunks()[3].Type(), Equals, diff.Equal)
+	c.Assert(p.FilePatches()[0].Chunks()[4].Type(), Equals, diff.Delete)
+	c.Assert(p.FilePatches()[0].Chunks()[5].Type(), Equals, diff.Add)
+	c.Assert(p.FilePatches()[0].Chunks()[6].Type(), Equals, diff.Equal)
+
 	str := change.String()
 	c.Assert(str, Equals, "<Action: Modify, Path: utils/difftree/difftree.go>")
 }
@@ -367,3 +392,39 @@
 	sort.Sort(changes)
 	c.Assert(changes.String(), Equals, expected)
 }
+
+func (s *ChangeSuite) TestCancel(c *C) {
+	// Commit a5078b19f08f63e7948abd0a5e2fb7d319d3a565 of the go-git
+	// fixture inserted "examples/clone/main.go".
+	//
+	// On that commit, the "examples/clone" tree is
+	//     6efca3ff41cab651332f9ebc0c96bb26be809615
+	//
+	// and the "examples/colone/main.go" is
+	//     f95dc8f7923add1a8b9f72ecb1e8db1402de601a
+
+	path := "examples/clone/main.go"
+	name := "main.go"
+	mode := filemode.Regular
+	blob := plumbing.NewHash("f95dc8f7923add1a8b9f72ecb1e8db1402de601a")
+	tree := plumbing.NewHash("6efca3ff41cab651332f9ebc0c96bb26be809615")
+
+	change := &Change{
+		From: empty,
+		To: ChangeEntry{
+			Name: path,
+			Tree: s.tree(c, tree),
+			TreeEntry: TreeEntry{
+				Name: name,
+				Mode: mode,
+				Hash: blob,
+			},
+		},
+	}
+
+	ctx, cancel := context.WithCancel(context.Background())
+	cancel()
+	p, err := change.PatchContext(ctx)
+	c.Assert(p, IsNil)
+	c.Assert(err, ErrorMatches, "operation canceled")
+}
diff --git a/plumbing/object/commit.go b/plumbing/object/commit.go
index c9a4c0e..e254342 100644
--- a/plumbing/object/commit.go
+++ b/plumbing/object/commit.go
@@ -3,6 +3,7 @@
 import (
 	"bufio"
 	"bytes"
+	"context"
 	"errors"
 	"fmt"
 	"io"
@@ -16,8 +17,9 @@
 )
 
 const (
-	beginpgp string = "-----BEGIN PGP SIGNATURE-----"
-	endpgp   string = "-----END PGP SIGNATURE-----"
+	beginpgp  string = "-----BEGIN PGP SIGNATURE-----"
+	endpgp    string = "-----END PGP SIGNATURE-----"
+	headerpgp string = "gpgsig"
 )
 
 // Hash represents the hash of an object
@@ -75,7 +77,8 @@
 }
 
 // Patch returns the Patch between the actual commit and the provided one.
-func (c *Commit) Patch(to *Commit) (*Patch, error) {
+// Error will be return if context expires. Provided context must be non-nil
+func (c *Commit) PatchContext(ctx context.Context, to *Commit) (*Patch, error) {
 	fromTree, err := c.Tree()
 	if err != nil {
 		return nil, err
@@ -86,7 +89,12 @@
 		return nil, err
 	}
 
-	return fromTree.Patch(toTree)
+	return fromTree.PatchContext(ctx, toTree)
+}
+
+// Patch returns the Patch between the actual commit and the provided one.
+func (c *Commit) Patch(to *Commit) (*Patch, error) {
+	return c.PatchContext(context.Background(), to)
 }
 
 // Parents return a CommitIter to the parent Commits.
@@ -174,23 +182,13 @@
 		}
 
 		if pgpsig {
-			// Check if it's the end of a PGP signature.
-			if bytes.Contains(line, []byte(endpgp)) {
-				c.PGPSignature += endpgp + "\n"
-				pgpsig = false
-			} else {
-				// Trim the left padding.
+			if len(line) > 0 && line[0] == ' ' {
 				line = bytes.TrimLeft(line, " ")
 				c.PGPSignature += string(line)
+				continue
+			} else {
+				pgpsig = false
 			}
-			continue
-		}
-
-		// Check if it's the beginning of a PGP signature.
-		if bytes.Contains(line, []byte(beginpgp)) {
-			c.PGPSignature += beginpgp + "\n"
-			pgpsig = true
-			continue
 		}
 
 		if !message {
@@ -201,15 +199,24 @@
 			}
 
 			split := bytes.SplitN(line, []byte{' '}, 2)
+
+			var data []byte
+			if len(split) == 2 {
+				data = split[1]
+			}
+
 			switch string(split[0]) {
 			case "tree":
-				c.TreeHash = plumbing.NewHash(string(split[1]))
+				c.TreeHash = plumbing.NewHash(string(data))
 			case "parent":
-				c.ParentHashes = append(c.ParentHashes, plumbing.NewHash(string(split[1])))
+				c.ParentHashes = append(c.ParentHashes, plumbing.NewHash(string(data)))
 			case "author":
-				c.Author.Decode(split[1])
+				c.Author.Decode(data)
 			case "committer":
-				c.Committer.Decode(split[1])
+				c.Committer.Decode(data)
+			case headerpgp:
+				c.PGPSignature += string(data) + "\n"
+				pgpsig = true
 			}
 		} else {
 			c.Message += string(line)
@@ -262,17 +269,18 @@
 	}
 
 	if b.PGPSignature != "" && includeSig {
-		if _, err = fmt.Fprint(w, "pgpsig"); err != nil {
+		if _, err = fmt.Fprint(w, "\n"+headerpgp+" "); err != nil {
 			return err
 		}
 
-		// Split all the signature lines and write with a left padding and
-		// newline at the end.
-		lines := strings.Split(b.PGPSignature, "\n")
-		for _, line := range lines {
-			if _, err = fmt.Fprintf(w, " %s\n", line); err != nil {
-				return err
-			}
+		// Split all the signature lines and re-write with a left padding and
+		// newline. Use join for this so it's clear that a newline should not be
+		// added after this section, as it will be added when the message is
+		// printed.
+		signature := strings.TrimSuffix(b.PGPSignature, "\n")
+		lines := strings.Split(signature, "\n")
+		if _, err = fmt.Fprint(w, strings.Join(lines, "\n ")); err != nil {
+			return err
 		}
 	}
 
diff --git a/plumbing/object/commit_test.go b/plumbing/object/commit_test.go
index 191b14d..e72b703 100644
--- a/plumbing/object/commit_test.go
+++ b/plumbing/object/commit_test.go
@@ -2,6 +2,7 @@
 
 import (
 	"bytes"
+	"context"
 	"io"
 	"strings"
 	"time"
@@ -132,6 +133,59 @@
 	c.Assert(buf.String(), Equals, patch.String())
 }
 
+func (s *SuiteCommit) TestPatchContext(c *C) {
+	from := s.commit(c, plumbing.NewHash("918c48b83bd081e863dbe1b80f8998f058cd8294"))
+	to := s.commit(c, plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5"))
+
+	patch, err := from.PatchContext(context.Background(), to)
+	c.Assert(err, IsNil)
+
+	buf := bytes.NewBuffer(nil)
+	err = patch.Encode(buf)
+	c.Assert(err, IsNil)
+
+	c.Assert(buf.String(), Equals, `diff --git a/vendor/foo.go b/vendor/foo.go
+new file mode 100644
+index 0000000000000000000000000000000000000000..9dea2395f5403188298c1dabe8bdafe562c491e3
+--- /dev/null
++++ b/vendor/foo.go
+@@ -0,0 +1,7 @@
++package main
++
++import "fmt"
++
++func main() {
++	fmt.Println("Hello, playground")
++}
+`)
+	c.Assert(buf.String(), Equals, patch.String())
+
+	from = s.commit(c, plumbing.NewHash("b8e471f58bcbca63b07bda20e428190409c2db47"))
+	to = s.commit(c, plumbing.NewHash("35e85108805c84807bc66a02d91535e1e24b38b9"))
+
+	patch, err = from.PatchContext(context.Background(), to)
+	c.Assert(err, IsNil)
+
+	buf.Reset()
+	err = patch.Encode(buf)
+	c.Assert(err, IsNil)
+
+	c.Assert(buf.String(), Equals, `diff --git a/CHANGELOG b/CHANGELOG
+deleted file mode 100644
+index d3ff53e0564a9f87d8e84b6e28e5060e517008aa..0000000000000000000000000000000000000000
+--- a/CHANGELOG
++++ /dev/null
+@@ -1 +0,0 @@
+-Initial changelog
+diff --git a/binary.jpg b/binary.jpg
+new file mode 100644
+index 0000000000000000000000000000000000000000..d5c0f4ab811897cadf03aec358ae60d21f91c50d
+Binary files /dev/null and b/binary.jpg differ
+`)
+
+	c.Assert(buf.String(), Equals, patch.String())
+}
+
 func (s *SuiteCommit) TestCommitEncodeDecodeIdempotent(c *C) {
 	ts, err := time.Parse(time.RFC3339, "2006-01-02T15:04:05-07:00")
 	c.Assert(err, IsNil)
@@ -270,6 +324,54 @@
 	err = decoded.Decode(encoded)
 	c.Assert(err, IsNil)
 	c.Assert(decoded.PGPSignature, Equals, pgpsignature)
+
+	// signature with extra empty line, it caused "index out of range" when
+	// parsing it
+
+	pgpsignature2 := "\n" + pgpsignature
+
+	commit.PGPSignature = pgpsignature2
+	encoded = &plumbing.MemoryObject{}
+	decoded = &Commit{}
+
+	err = commit.Encode(encoded)
+	c.Assert(err, IsNil)
+
+	err = decoded.Decode(encoded)
+	c.Assert(err, IsNil)
+	c.Assert(decoded.PGPSignature, Equals, pgpsignature2)
+
+	// signature in author name
+
+	commit.PGPSignature = ""
+	commit.Author.Name = beginpgp
+	encoded = &plumbing.MemoryObject{}
+	decoded = &Commit{}
+
+	err = commit.Encode(encoded)
+	c.Assert(err, IsNil)
+
+	err = decoded.Decode(encoded)
+	c.Assert(err, IsNil)
+	c.Assert(decoded.PGPSignature, Equals, "")
+	c.Assert(decoded.Author.Name, Equals, beginpgp)
+
+	// broken signature
+
+	commit.PGPSignature = beginpgp + "\n" +
+		"some\n" +
+		"trash\n" +
+		endpgp +
+		"text\n"
+	encoded = &plumbing.MemoryObject{}
+	decoded = &Commit{}
+
+	err = commit.Encode(encoded)
+	c.Assert(err, IsNil)
+
+	err = decoded.Decode(encoded)
+	c.Assert(err, IsNil)
+	c.Assert(decoded.PGPSignature, Equals, commit.PGPSignature)
 }
 
 func (s *SuiteCommit) TestStat(c *C) {
@@ -363,3 +465,33 @@
 	_, ok := e.Identities["Sunny <me@darkowlzz.space>"]
 	c.Assert(ok, Equals, true)
 }
+
+func (s *SuiteCommit) TestPatchCancel(c *C) {
+	from := s.commit(c, plumbing.NewHash("918c48b83bd081e863dbe1b80f8998f058cd8294"))
+	to := s.commit(c, plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5"))
+
+	ctx, cancel := context.WithCancel(context.Background())
+	cancel()
+	patch, err := from.PatchContext(ctx, to)
+	c.Assert(patch, IsNil)
+	c.Assert(err, ErrorMatches, "operation canceled")
+
+}
+
+func (s *SuiteCommit) TestMalformedHeader(c *C) {
+	encoded := &plumbing.MemoryObject{}
+	decoded := &Commit{}
+	commit := *s.Commit
+
+	commit.PGPSignature = "\n"
+	commit.Author.Name = "\n"
+	commit.Author.Email = "\n"
+	commit.Committer.Name = "\n"
+	commit.Committer.Email = "\n"
+
+	err := commit.Encode(encoded)
+	c.Assert(err, IsNil)
+
+	err = decoded.Decode(encoded)
+	c.Assert(err, IsNil)
+}
diff --git a/plumbing/object/difftree.go b/plumbing/object/difftree.go
index ac58c4d..a30a29e 100644
--- a/plumbing/object/difftree.go
+++ b/plumbing/object/difftree.go
@@ -2,6 +2,7 @@
 
 import (
 	"bytes"
+	"context"
 
 	"gopkg.in/src-d/go-git.v4/utils/merkletrie"
 	"gopkg.in/src-d/go-git.v4/utils/merkletrie/noder"
@@ -10,6 +11,13 @@
 // DiffTree compares the content and mode of the blobs found via two
 // tree objects.
 func DiffTree(a, b *Tree) (Changes, error) {
+	return DiffTreeContext(context.Background(), a, b)
+}
+
+// DiffTree compares the content and mode of the blobs found via two
+// tree objects. Provided context must be non-nil.
+// An error will be return if context expires
+func DiffTreeContext(ctx context.Context, a, b *Tree) (Changes, error) {
 	from := NewTreeRootNode(a)
 	to := NewTreeRootNode(b)
 
@@ -17,8 +25,11 @@
 		return bytes.Equal(a.Hash(), b.Hash())
 	}
 
-	merkletrieChanges, err := merkletrie.DiffTree(from, to, hashEqual)
+	merkletrieChanges, err := merkletrie.DiffTreeContext(ctx, from, to, hashEqual)
 	if err != nil {
+		if err == merkletrie.ErrCanceled {
+			return nil, ErrCanceled
+		}
 		return nil, err
 	}
 
diff --git a/plumbing/object/difftree_test.go b/plumbing/object/difftree_test.go
index 40af8f2..ff9ecbc 100644
--- a/plumbing/object/difftree_test.go
+++ b/plumbing/object/difftree_test.go
@@ -45,25 +45,17 @@
 		return sto
 	}
 
-	sto = memory.NewStorage()
+	storer := memory.NewStorage()
 
 	pf := f.Packfile()
-
 	defer pf.Close()
 
-	n := packfile.NewScanner(pf)
-	d, err := packfile.NewDecoder(n, sto)
-	if err != nil {
+	if err := packfile.UpdateObjectStorage(storer, pf); err != nil {
 		panic(err)
 	}
 
-	_, err = d.Decode()
-	if err != nil {
-		panic(err)
-	}
-
-	s.cache[f.URL] = sto
-	return sto
+	s.cache[f.URL] = storer
+	return storer
 }
 
 var _ = Suite(&DiffTreeSuite{})
diff --git a/plumbing/object/object.go b/plumbing/object/object.go
index 4b59aba..e960e50 100644
--- a/plumbing/object/object.go
+++ b/plumbing/object/object.go
@@ -152,7 +152,11 @@
 }
 
 func (s *Signature) encodeTimeAndTimeZone(w io.Writer) error {
-	_, err := fmt.Fprintf(w, "%d %s", s.When.Unix(), s.When.Format("-0700"))
+	u := s.When.Unix()
+	if u < 0 {
+		u = 0
+	}
+	_, err := fmt.Fprintf(w, "%d %s", u, s.When.Format("-0700"))
 	return err
 }
 
diff --git a/plumbing/object/object_test.go b/plumbing/object/object_test.go
index 4f0fcb3..68aa1a1 100644
--- a/plumbing/object/object_test.go
+++ b/plumbing/object/object_test.go
@@ -197,8 +197,9 @@
 		}
 
 		c.Assert(err, IsNil)
-		c.Assert(o, DeepEquals, objects[i])
-		i += 1
+		c.Assert(o.ID(), Equals, objects[i].ID())
+		c.Assert(o.Type(), Equals, objects[i].Type())
+		i++
 	}
 
 	iter.Close()
diff --git a/plumbing/object/patch.go b/plumbing/object/patch.go
index aa96a96..adeaccb 100644
--- a/plumbing/object/patch.go
+++ b/plumbing/object/patch.go
@@ -2,6 +2,8 @@
 
 import (
 	"bytes"
+	"context"
+	"errors"
 	"fmt"
 	"io"
 	"math"
@@ -15,10 +17,25 @@
 	dmp "github.com/sergi/go-diff/diffmatchpatch"
 )
 
+var (
+	ErrCanceled = errors.New("operation canceled")
+)
+
 func getPatch(message string, changes ...*Change) (*Patch, error) {
+	ctx := context.Background()
+	return getPatchContext(ctx, message, changes...)
+}
+
+func getPatchContext(ctx context.Context, message string, changes ...*Change) (*Patch, error) {
 	var filePatches []fdiff.FilePatch
 	for _, c := range changes {
-		fp, err := filePatch(c)
+		select {
+		case <-ctx.Done():
+			return nil, ErrCanceled
+		default:
+		}
+
+		fp, err := filePatchWithContext(ctx, c)
 		if err != nil {
 			return nil, err
 		}
@@ -29,7 +46,7 @@
 	return &Patch{message, filePatches}, nil
 }
 
-func filePatch(c *Change) (fdiff.FilePatch, error) {
+func filePatchWithContext(ctx context.Context, c *Change) (fdiff.FilePatch, error) {
 	from, to, err := c.Files()
 	if err != nil {
 		return nil, err
@@ -52,6 +69,12 @@
 
 	var chunks []fdiff.Chunk
 	for _, d := range diffs {
+		select {
+		case <-ctx.Done():
+			return nil, ErrCanceled
+		default:
+		}
+
 		var op fdiff.Operation
 		switch d.Type {
 		case dmp.DiffEqual:
@@ -70,6 +93,11 @@
 		from:   c.From,
 		to:     c.To,
 	}, nil
+
+}
+
+func filePatch(c *Change) (fdiff.FilePatch, error) {
+	return filePatchWithContext(context.Background(), c)
 }
 
 func fileContent(f *File) (content string, isBinary bool, err error) {
diff --git a/plumbing/object/tag_test.go b/plumbing/object/tag_test.go
index 9900093..e7dd06e 100644
--- a/plumbing/object/tag_test.go
+++ b/plumbing/object/tag_test.go
@@ -265,7 +265,7 @@
 	c.Assert(tag.String(), Equals,
 		"tag TAG TWO\n"+
 			"Tagger:  <>\n"+
-			"Date:   Mon Jan 01 00:00:00 0001 +0000\n"+
+			"Date:   Thu Jan 01 00:00:00 1970 +0000\n"+
 			"\n"+
 			"tag two\n")
 }
diff --git a/plumbing/object/tree.go b/plumbing/object/tree.go
index 30bbcb0..c36a137 100644
--- a/plumbing/object/tree.go
+++ b/plumbing/object/tree.go
@@ -2,6 +2,7 @@
 
 import (
 	"bufio"
+	"context"
 	"errors"
 	"fmt"
 	"io"
@@ -25,6 +26,7 @@
 	ErrMaxTreeDepth      = errors.New("maximum tree depth exceeded")
 	ErrFileNotFound      = errors.New("file not found")
 	ErrDirectoryNotFound = errors.New("directory not found")
+	ErrEntryNotFound     = errors.New("entry not found")
 )
 
 // Tree is basically like a directory - it references a bunch of other trees
@@ -166,8 +168,6 @@
 	return tree, err
 }
 
-var errEntryNotFound = errors.New("entry not found")
-
 func (t *Tree) entry(baseName string) (*TreeEntry, error) {
 	if t.m == nil {
 		t.buildMap()
@@ -175,7 +175,7 @@
 
 	entry, ok := t.m[baseName]
 	if !ok {
-		return nil, errEntryNotFound
+		return nil, ErrEntryNotFound
 	}
 
 	return entry, nil
@@ -295,15 +295,30 @@
 	return DiffTree(from, to)
 }
 
+// Diff returns a list of changes between this tree and the provided one
+// Error will be returned if context expires
+// Provided context must be non nil
+func (from *Tree) DiffContext(ctx context.Context, to *Tree) (Changes, error) {
+	return DiffTreeContext(ctx, from, to)
+}
+
 // Patch returns a slice of Patch objects with all the changes between trees
 // in chunks. This representation can be used to create several diff outputs.
 func (from *Tree) Patch(to *Tree) (*Patch, error) {
-	changes, err := DiffTree(from, to)
+	return from.PatchContext(context.Background(), to)
+}
+
+// Patch returns a slice of Patch objects with all the changes between trees
+// in chunks. This representation can be used to create several diff outputs.
+// If context expires, an error will be returned
+// Provided context must be non-nil
+func (from *Tree) PatchContext(ctx context.Context, to *Tree) (*Patch, error) {
+	changes, err := DiffTreeContext(ctx, from, to)
 	if err != nil {
 		return nil, err
 	}
 
-	return changes.Patch()
+	return changes.PatchContext(ctx)
 }
 
 // treeEntryIter facilitates iterating through the TreeEntry objects in a Tree.
diff --git a/plumbing/object/tree_test.go b/plumbing/object/tree_test.go
index 3a687dd..59d5d21 100644
--- a/plumbing/object/tree_test.go
+++ b/plumbing/object/tree_test.go
@@ -114,6 +114,12 @@
 	c.Assert(e.Name, Equals, "foo.go")
 }
 
+func (s *TreeSuite) TestFindEntryNotFound(c *C) {
+	e, err := s.Tree.FindEntry("not-found")
+	c.Assert(e, IsNil)
+	c.Assert(err, Equals, ErrEntryNotFound)
+}
+
 // Overrides returned plumbing.EncodedObject for given hash.
 // Otherwise, delegates to actual storer to get real object
 type fakeStorer struct {
diff --git a/plumbing/transport/internal/common/common.go b/plumbing/transport/internal/common/common.go
index 8ec1ea5..00497f3 100644
--- a/plumbing/transport/internal/common/common.go
+++ b/plumbing/transport/internal/common/common.go
@@ -382,6 +382,7 @@
 	gitProtocolNotFoundErr     = "ERR \n  Repository not found."
 	gitProtocolNoSuchErr       = "ERR no such repository"
 	gitProtocolAccessDeniedErr = "ERR access denied"
+	gogsAccessDeniedErr        = "Gogs: Repository does not exist or you do not have access"
 )
 
 func isRepoNotFoundError(s string) bool {
@@ -409,6 +410,10 @@
 		return true
 	}
 
+	if strings.HasPrefix(s, gogsAccessDeniedErr) {
+		return true
+	}
+
 	return false
 }
 
diff --git a/plumbing/transport/internal/common/common_test.go b/plumbing/transport/internal/common/common_test.go
new file mode 100644
index 0000000..b2f035d
--- /dev/null
+++ b/plumbing/transport/internal/common/common_test.go
@@ -0,0 +1,78 @@
+package common
+
+import (
+	"fmt"
+	"testing"
+
+	. "gopkg.in/check.v1"
+)
+
+func Test(t *testing.T) { TestingT(t) }
+
+type CommonSuite struct{}
+
+var _ = Suite(&CommonSuite{})
+
+func (s *CommonSuite) TestIsRepoNotFoundErrorForUnknowSource(c *C) {
+	msg := "unknown system is complaining of something very sad :("
+
+	isRepoNotFound := isRepoNotFoundError(msg)
+
+	c.Assert(isRepoNotFound, Equals, false)
+}
+
+func (s *CommonSuite) TestIsRepoNotFoundErrorForGithub(c *C) {
+	msg := fmt.Sprintf("%s : some error stuf", githubRepoNotFoundErr)
+
+	isRepoNotFound := isRepoNotFoundError(msg)
+
+	c.Assert(isRepoNotFound, Equals, true)
+}
+
+func (s *CommonSuite) TestIsRepoNotFoundErrorForBitBucket(c *C) {
+	msg := fmt.Sprintf("%s : some error stuf", bitbucketRepoNotFoundErr)
+
+	isRepoNotFound := isRepoNotFoundError(msg)
+
+	c.Assert(isRepoNotFound, Equals, true)
+}
+
+func (s *CommonSuite) TestIsRepoNotFoundErrorForLocal(c *C) {
+	msg := fmt.Sprintf("some error stuf : %s", localRepoNotFoundErr)
+
+	isRepoNotFound := isRepoNotFoundError(msg)
+
+	c.Assert(isRepoNotFound, Equals, true)
+}
+
+func (s *CommonSuite) TestIsRepoNotFoundErrorForGitProtocolNotFound(c *C) {
+	msg := fmt.Sprintf("%s : some error stuf", gitProtocolNotFoundErr)
+
+	isRepoNotFound := isRepoNotFoundError(msg)
+
+	c.Assert(isRepoNotFound, Equals, true)
+}
+
+func (s *CommonSuite) TestIsRepoNotFoundErrorForGitProtocolNoSuch(c *C) {
+	msg := fmt.Sprintf("%s : some error stuf", gitProtocolNoSuchErr)
+
+	isRepoNotFound := isRepoNotFoundError(msg)
+
+	c.Assert(isRepoNotFound, Equals, true)
+}
+
+func (s *CommonSuite) TestIsRepoNotFoundErrorForGitProtocolAccessDenied(c *C) {
+	msg := fmt.Sprintf("%s : some error stuf", gitProtocolAccessDeniedErr)
+
+	isRepoNotFound := isRepoNotFoundError(msg)
+
+	c.Assert(isRepoNotFound, Equals, true)
+}
+
+func (s *CommonSuite) TestIsRepoNotFoundErrorForGogsAccessDenied(c *C) {
+	msg := fmt.Sprintf("%s : some error stuf", gogsAccessDeniedErr)
+
+	isRepoNotFound := isRepoNotFoundError(msg)
+
+	c.Assert(isRepoNotFound, Equals, true)
+}
diff --git a/plumbing/transport/test/receive_pack.go b/plumbing/transport/test/receive_pack.go
index 57f602d..5aea1c0 100644
--- a/plumbing/transport/test/receive_pack.go
+++ b/plumbing/transport/test/receive_pack.go
@@ -262,13 +262,16 @@
 		req.Packfile = s.emptyPackfile()
 	}
 
-	return r.ReceivePack(context.Background(), req)
+	if s, err := r.ReceivePack(context.Background(), req); err != nil {
+		return s, err
+	} else {
+		return s, err
+	}
 }
 
 func (s *ReceivePackSuite) receivePack(c *C, ep *transport.Endpoint,
 	req *packp.ReferenceUpdateRequest, fixture *fixtures.Fixture,
 	callAdvertisedReferences bool) {
-
 	url := ""
 	if fixture != nil {
 		url = fixture.URL
@@ -279,7 +282,6 @@
 		ep.String(), url, callAdvertisedReferences,
 	)
 	report, err := s.receivePackNoCheck(c, ep, req, fixture, callAdvertisedReferences)
-
 	c.Assert(err, IsNil, comment)
 	if req.Capabilities.Supports(capability.ReportStatus) {
 		c.Assert(report, NotNil, comment)
diff --git a/plumbing/transport/test/upload_pack.go b/plumbing/transport/test/upload_pack.go
index 70e4e56..8709ac2 100644
--- a/plumbing/transport/test/upload_pack.go
+++ b/plumbing/transport/test/upload_pack.go
@@ -258,11 +258,8 @@
 	b, err := ioutil.ReadAll(r)
 	c.Assert(err, IsNil)
 	buf := bytes.NewBuffer(b)
-	scanner := packfile.NewScanner(buf)
 	storage := memory.NewStorage()
-	d, err := packfile.NewDecoder(scanner, storage)
-	c.Assert(err, IsNil)
-	_, err = d.Decode()
+	err = packfile.UpdateObjectStorage(storage, buf)
 	c.Assert(err, IsNil)
 	c.Assert(len(storage.Objects), Equals, n)
 }
diff --git a/remote.go b/remote.go
index 60461d6..0556b98 100644
--- a/remote.go
+++ b/remote.go
@@ -619,7 +619,7 @@
 	return result, nil
 }
 
-const refspecTag = "+refs/tags/*:refs/tags/*"
+const refspecAllTags = "+refs/tags/*:refs/tags/*"
 
 func calculateRefs(
 	spec []config.RefSpec,
@@ -627,17 +627,32 @@
 	tagMode TagMode,
 ) (memory.ReferenceStorage, error) {
 	if tagMode == AllTags {
-		spec = append(spec, refspecTag)
-	}
-
-	iter, err := remoteRefs.IterReferences()
-	if err != nil {
-		return nil, err
+		spec = append(spec, refspecAllTags)
 	}
 
 	refs := make(memory.ReferenceStorage)
-	return refs, iter.ForEach(func(ref *plumbing.Reference) error {
-		if !config.MatchAny(spec, ref.Name()) {
+	for _, s := range spec {
+		if err := doCalculateRefs(s, remoteRefs, refs); err != nil {
+			return nil, err
+		}
+	}
+
+	return refs, nil
+}
+
+func doCalculateRefs(
+	s config.RefSpec,
+	remoteRefs storer.ReferenceStorer,
+	refs memory.ReferenceStorage,
+) error {
+	iter, err := remoteRefs.IterReferences()
+	if err != nil {
+		return err
+	}
+
+	var matched bool
+	err = iter.ForEach(func(ref *plumbing.Reference) error {
+		if !s.Match(ref.Name()) {
 			return nil
 		}
 
@@ -654,8 +669,23 @@
 			return nil
 		}
 
-		return refs.SetReference(ref)
+		matched = true
+		if err := refs.SetReference(ref); err != nil {
+			return err
+		}
+
+		if !s.IsWildcard() {
+			return storer.ErrStop
+		}
+
+		return nil
 	})
+
+	if !matched && !s.IsWildcard() {
+		return fmt.Errorf("couldn't find remote ref %q", s.Src())
+	}
+
+	return err
 }
 
 func getWants(localStorer storage.Storer, refs memory.ReferenceStorage) ([]plumbing.Hash, error) {
diff --git a/remote_test.go b/remote_test.go
index 82ec1fc..dd386b0 100644
--- a/remote_test.go
+++ b/remote_test.go
@@ -100,6 +100,20 @@
 	})
 }
 
+func (s *RemoteSuite) TestFetchNonExistantReference(c *C) {
+	r := newRemote(memory.NewStorage(), &config.RemoteConfig{
+		URLs: []string{s.GetLocalRepositoryURL(fixtures.ByTag("tags").One())},
+	})
+
+	err := r.Fetch(&FetchOptions{
+		RefSpecs: []config.RefSpec{
+			config.RefSpec("+refs/heads/foo:refs/remotes/origin/foo"),
+		},
+	})
+
+	c.Assert(err, ErrorMatches, "couldn't find remote ref.*")
+}
+
 func (s *RemoteSuite) TestFetchContext(c *C) {
 	r := newRemote(memory.NewStorage(), &config.RemoteConfig{
 		URLs: []string{s.GetLocalRepositoryURL(fixtures.ByTag("tags").One())},
diff --git a/repository.go b/repository.go
index 717381b..f619934 100644
--- a/repository.go
+++ b/repository.go
@@ -235,9 +235,8 @@
 	return PlainOpenWithOptions(path, &PlainOpenOptions{})
 }
 
-// PlainOpen opens a git repository from the given path. It detects if the
-// repository is bare or a normal one. If the path doesn't contain a valid
-// repository ErrRepositoryNotExists is returned
+// PlainOpenWithOptions opens a git repository from the given path with specific
+// options. See PlainOpen for more info.
 func PlainOpenWithOptions(path string, o *PlainOpenOptions) (*Repository, error) {
 	dot, wt, err := dotGitToOSFilesystems(path, o.DetectDotGit)
 	if err != nil {
@@ -583,7 +582,7 @@
 }
 
 const (
-	refspecTagWithDepth     = "+refs/tags/%s:refs/tags/%[1]s"
+	refspecTag              = "+refs/tags/%s:refs/tags/%[1]s"
 	refspecSingleBranch     = "+refs/heads/%s:refs/remotes/%s/%[1]s"
 	refspecSingleBranchHEAD = "+HEAD:refs/remotes/%s/HEAD"
 )
@@ -592,8 +591,8 @@
 	var rs string
 
 	switch {
-	case o.ReferenceName.IsTag() && o.Depth > 0:
-		rs = fmt.Sprintf(refspecTagWithDepth, o.ReferenceName.Short())
+	case o.ReferenceName.IsTag():
+		rs = fmt.Sprintf(refspecTag, o.ReferenceName.Short())
 	case o.SingleBranch && o.ReferenceName == plumbing.HEAD:
 		rs = fmt.Sprintf(refspecSingleBranchHEAD, c.Name)
 	case o.SingleBranch:
@@ -845,8 +844,9 @@
 	return nil, fmt.Errorf("invalid Order=%v", o.Order)
 }
 
-// Tags returns all the References from Tags. This method returns all the tag
-// types, lightweight, and annotated ones.
+// Tags returns all the References from Tags. This method returns only lightweight
+// tags. Note that not all the tags are lightweight ones. To return annotated tags
+// too, you need to call TagObjects() method.
 func (r *Repository) Tags() (storer.ReferenceIter, error) {
 	refIter, err := r.Storer.IterReferences()
 	if err != nil {
@@ -872,7 +872,8 @@
 		}, refIter), nil
 }
 
-// Notes returns all the References that are Branches.
+// Notes returns all the References that are notes. For more information:
+// https://git-scm.com/docs/git-notes
 func (r *Repository) Notes() (storer.ReferenceIter, error) {
 	refIter, err := r.Storer.IterReferences()
 	if err != nil {
diff --git a/repository_test.go b/repository_test.go
index b78fbb7..261af7a 100644
--- a/repository_test.go
+++ b/repository_test.go
@@ -846,7 +846,33 @@
 	objects, err := r.Objects()
 	c.Assert(err, IsNil)
 	objects.ForEach(func(object.Object) error { count++; return nil })
-	c.Assert(count, Equals, 31)
+	c.Assert(count, Equals, 28)
+}
+
+func (s *RepositorySuite) TestCloneDetachedHEADAndSingle(c *C) {
+	r, _ := Init(memory.NewStorage(), nil)
+	err := r.clone(context.Background(), &CloneOptions{
+		URL:           s.GetBasicLocalRepositoryURL(),
+		ReferenceName: plumbing.ReferenceName("refs/tags/v1.0.0"),
+		SingleBranch:  true,
+	})
+	c.Assert(err, IsNil)
+
+	cfg, err := r.Config()
+	c.Assert(err, IsNil)
+	c.Assert(cfg.Branches, HasLen, 0)
+
+	head, err := r.Reference(plumbing.HEAD, false)
+	c.Assert(err, IsNil)
+	c.Assert(head, NotNil)
+	c.Assert(head.Type(), Equals, plumbing.HashReference)
+	c.Assert(head.Hash().String(), Equals, "6ecf0ef2c2dffb796033e5a02219af86ec6584e5")
+
+	count := 0
+	objects, err := r.Objects()
+	c.Assert(err, IsNil)
+	objects.ForEach(func(object.Object) error { count++; return nil })
+	c.Assert(count, Equals, 28)
 }
 
 func (s *RepositorySuite) TestCloneDetachedHEADAndShallow(c *C) {
@@ -1688,3 +1714,59 @@
 	})
 	c.Assert(err, IsNil)
 }
+
+func BenchmarkObjects(b *testing.B) {
+	if err := fixtures.Init(); err != nil {
+		b.Fatal(err)
+	}
+
+	defer func() {
+		if err := fixtures.Clean(); err != nil {
+			b.Fatal(err)
+		}
+	}()
+
+	for _, f := range fixtures.ByTag("packfile") {
+		if f.DotGitHash == plumbing.ZeroHash {
+			continue
+		}
+
+		b.Run(f.URL, func(b *testing.B) {
+			fs := f.DotGit()
+			storer, err := filesystem.NewStorage(fs)
+			if err != nil {
+				b.Fatal(err)
+			}
+
+			worktree, err := fs.Chroot(filepath.Dir(fs.Root()))
+			if err != nil {
+				b.Fatal(err)
+			}
+
+			repo, err := Open(storer, worktree)
+			if err != nil {
+				b.Fatal(err)
+			}
+
+			for i := 0; i < b.N; i++ {
+				iter, err := repo.Objects()
+				if err != nil {
+					b.Fatal(err)
+				}
+
+				for {
+					_, err := iter.Next()
+					if err == io.EOF {
+						break
+					}
+
+					if err != nil {
+						b.Fatal(err)
+					}
+				}
+
+				iter.Close()
+			}
+		})
+	}
+}
diff --git a/status.go b/status.go
index ef8a500..ecbf793 100644
--- a/status.go
+++ b/status.go
@@ -1,7 +1,10 @@
 package git
 
-import "fmt"
-import "bytes"
+import (
+	"bytes"
+	"fmt"
+	"path/filepath"
+)
 
 // Status represents the current status of a Worktree.
 // The key of the map is the path of the file.
@@ -17,6 +20,12 @@
 	return s[path]
 }
 
+// IsUntracked checks if file for given path is 'Untracked'
+func (s Status) IsUntracked(path string) bool {
+	stat, ok := (s)[filepath.ToSlash(path)]
+	return ok && stat.Worktree == Untracked
+}
+
 // IsClean returns true if all the files aren't in Unmodified status.
 func (s Status) IsClean() bool {
 	for _, status := range s {
diff --git a/storage/filesystem/dotgit/dotgit.go b/storage/filesystem/dotgit/dotgit.go
index 52b621c..df5cd10 100644
--- a/storage/filesystem/dotgit/dotgit.go
+++ b/storage/filesystem/dotgit/dotgit.go
@@ -57,17 +57,48 @@
 	ErrSymRefTargetNotFound = errors.New("symbolic reference target not found")
 )
 
+// Options holds configuration for the storage.
+type Options struct {
+	// ExclusiveAccess means that the filesystem is not modified externally
+	// while the repo is open.
+	ExclusiveAccess bool
+	// KeepDescriptors makes the file descriptors to be reused but they will
+	// need to be manually closed calling Close().
+	KeepDescriptors bool
+}
+
 // The DotGit type represents a local git repository on disk. This
 // type is not zero-value-safe, use the New function to initialize it.
 type DotGit struct {
-	fs billy.Filesystem
+	options Options
+	fs      billy.Filesystem
+
+	// incoming object directory information
+	incomingChecked bool
+	incomingDirName string
+
+	objectList []plumbing.Hash
+	objectMap  map[plumbing.Hash]struct{}
+	packList   []plumbing.Hash
+	packMap    map[plumbing.Hash]struct{}
+
+	files map[string]billy.File
 }
 
 // New returns a DotGit value ready to be used. The path argument must
 // be the absolute path of a git repository directory (e.g.
 // "/foo/bar/.git").
 func New(fs billy.Filesystem) *DotGit {
-	return &DotGit{fs: fs}
+	return NewWithOptions(fs, Options{})
+}
+
+// NewWithOptions creates a new DotGit and sets non default configuration
+// options. See New for complete help.
+func NewWithOptions(fs billy.Filesystem, o Options) *DotGit {
+	return &DotGit{
+		options: o,
+		fs:      fs,
+	}
 }
 
 // Initialize creates all the folder scaffolding.
@@ -97,6 +128,28 @@
 	return nil
 }
 
+// Close closes all opened files.
+func (d *DotGit) Close() error {
+	var firstError error
+	if d.files != nil {
+		for _, f := range d.files {
+			err := f.Close()
+			if err != nil && firstError == nil {
+				firstError = err
+				continue
+			}
+		}
+
+		d.files = nil
+	}
+
+	if firstError != nil {
+		return firstError
+	}
+
+	return nil
+}
+
 // ConfigWriter returns a file pointer for write to the config file
 func (d *DotGit) ConfigWriter() (billy.File, error) {
 	return d.fs.Create(configPath)
@@ -139,11 +192,25 @@
 // NewObjectPack return a writer for a new packfile, it saves the packfile to
 // disk and also generates and save the index for the given packfile.
 func (d *DotGit) NewObjectPack() (*PackWriter, error) {
+	d.cleanPackList()
 	return newPackWrite(d.fs)
 }
 
 // ObjectPacks returns the list of availables packfiles
 func (d *DotGit) ObjectPacks() ([]plumbing.Hash, error) {
+	if !d.options.ExclusiveAccess {
+		return d.objectPacks()
+	}
+
+	err := d.genPackList()
+	if err != nil {
+		return nil, err
+	}
+
+	return d.packList, nil
+}
+
+func (d *DotGit) objectPacks() ([]plumbing.Hash, error) {
 	packDir := d.fs.Join(objectsPath, packPath)
 	files, err := d.fs.ReadDir(packDir)
 	if err != nil {
@@ -177,7 +244,22 @@
 }
 
 func (d *DotGit) objectPackOpen(hash plumbing.Hash, extension string) (billy.File, error) {
-	pack, err := d.fs.Open(d.objectPackPath(hash, extension))
+	if d.files == nil {
+		d.files = make(map[string]billy.File)
+	}
+
+	err := d.hasPack(hash)
+	if err != nil {
+		return nil, err
+	}
+
+	path := d.objectPackPath(hash, extension)
+	f, ok := d.files[path]
+	if ok {
+		return f, nil
+	}
+
+	pack, err := d.fs.Open(path)
 	if err != nil {
 		if os.IsNotExist(err) {
 			return nil, ErrPackfileNotFound
@@ -186,20 +268,36 @@
 		return nil, err
 	}
 
+	if d.options.KeepDescriptors && extension == "pack" {
+		d.files[path] = pack
+	}
+
 	return pack, nil
 }
 
 // ObjectPack returns a fs.File of the given packfile
 func (d *DotGit) ObjectPack(hash plumbing.Hash) (billy.File, error) {
+	err := d.hasPack(hash)
+	if err != nil {
+		return nil, err
+	}
+
 	return d.objectPackOpen(hash, `pack`)
 }
 
 // ObjectPackIdx returns a fs.File of the index file for a given packfile
 func (d *DotGit) ObjectPackIdx(hash plumbing.Hash) (billy.File, error) {
+	err := d.hasPack(hash)
+	if err != nil {
+		return nil, err
+	}
+
 	return d.objectPackOpen(hash, `idx`)
 }
 
 func (d *DotGit) DeleteOldObjectPackAndIndex(hash plumbing.Hash, t time.Time) error {
+	d.cleanPackList()
+
 	path := d.objectPackPath(hash, `pack`)
 	if !t.IsZero() {
 		fi, err := d.fs.Stat(path)
@@ -220,12 +318,23 @@
 
 // NewObject return a writer for a new object file.
 func (d *DotGit) NewObject() (*ObjectWriter, error) {
+	d.cleanObjectList()
+
 	return newObjectWriter(d.fs)
 }
 
 // Objects returns a slice with the hashes of objects found under the
 // .git/objects/ directory.
 func (d *DotGit) Objects() ([]plumbing.Hash, error) {
+	if d.options.ExclusiveAccess {
+		err := d.genObjectList()
+		if err != nil {
+			return nil, err
+		}
+
+		return d.objectList, nil
+	}
+
 	var objects []plumbing.Hash
 	err := d.ForEachObjectHash(func(hash plumbing.Hash) error {
 		objects = append(objects, hash)
@@ -237,9 +346,29 @@
 	return objects, nil
 }
 
-// Objects returns a slice with the hashes of objects found under the
-// .git/objects/ directory.
+// ForEachObjectHash iterates over the hashes of objects found under the
+// .git/objects/ directory and executes the provided function.
 func (d *DotGit) ForEachObjectHash(fun func(plumbing.Hash) error) error {
+	if !d.options.ExclusiveAccess {
+		return d.forEachObjectHash(fun)
+	}
+
+	err := d.genObjectList()
+	if err != nil {
+		return err
+	}
+
+	for _, h := range d.objectList {
+		err := fun(h)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (d *DotGit) forEachObjectHash(fun func(plumbing.Hash) error) error {
 	files, err := d.fs.ReadDir(objectsPath)
 	if err != nil {
 		if os.IsNotExist(err) {
@@ -274,24 +403,178 @@
 	return nil
 }
 
+func (d *DotGit) cleanObjectList() {
+	d.objectMap = nil
+	d.objectList = nil
+}
+
+func (d *DotGit) genObjectList() error {
+	if d.objectMap != nil {
+		return nil
+	}
+
+	d.objectMap = make(map[plumbing.Hash]struct{})
+	return d.forEachObjectHash(func(h plumbing.Hash) error {
+		d.objectList = append(d.objectList, h)
+		d.objectMap[h] = struct{}{}
+
+		return nil
+	})
+}
+
+func (d *DotGit) hasObject(h plumbing.Hash) error {
+	if !d.options.ExclusiveAccess {
+		return nil
+	}
+
+	err := d.genObjectList()
+	if err != nil {
+		return err
+	}
+
+	_, ok := d.objectMap[h]
+	if !ok {
+		return plumbing.ErrObjectNotFound
+	}
+
+	return nil
+}
+
+func (d *DotGit) cleanPackList() {
+	d.packMap = nil
+	d.packList = nil
+}
+
+func (d *DotGit) genPackList() error {
+	if d.packMap != nil {
+		return nil
+	}
+
+	op, err := d.objectPacks()
+	if err != nil {
+		return err
+	}
+
+	d.packMap = make(map[plumbing.Hash]struct{})
+	d.packList = nil
+
+	for _, h := range op {
+		d.packList = append(d.packList, h)
+		d.packMap[h] = struct{}{}
+	}
+
+	return nil
+}
+
+func (d *DotGit) hasPack(h plumbing.Hash) error {
+	if !d.options.ExclusiveAccess {
+		return nil
+	}
+
+	err := d.genPackList()
+	if err != nil {
+		return err
+	}
+
+	_, ok := d.packMap[h]
+	if !ok {
+		return ErrPackfileNotFound
+	}
+
+	return nil
+}
+
 func (d *DotGit) objectPath(h plumbing.Hash) string {
 	hash := h.String()
 	return d.fs.Join(objectsPath, hash[0:2], hash[2:40])
 }
 
+// incomingObjectPath is intended to add support for a git pre-receive hook
+// to be written it adds support for go-git to find objects in an "incoming"
+// directory, so that the library can be used to write a pre-receive hook
+// that deals with the incoming objects.
+//
+// More on git hooks found here : https://git-scm.com/docs/githooks
+// More on 'quarantine'/incoming directory here:
+//     https://git-scm.com/docs/git-receive-pack
+func (d *DotGit) incomingObjectPath(h plumbing.Hash) string {
+	hString := h.String()
+
+	if d.incomingDirName == "" {
+		return d.fs.Join(objectsPath, hString[0:2], hString[2:40])
+	}
+
+	return d.fs.Join(objectsPath, d.incomingDirName, hString[0:2], hString[2:40])
+}
+
+// hasIncomingObjects searches for an incoming directory and keeps its name
+// so it doesn't have to be found each time an object is accessed.
+func (d *DotGit) hasIncomingObjects() bool {
+	if !d.incomingChecked {
+		directoryContents, err := d.fs.ReadDir(objectsPath)
+		if err == nil {
+			for _, file := range directoryContents {
+				if strings.HasPrefix(file.Name(), "incoming-") && file.IsDir() {
+					d.incomingDirName = file.Name()
+				}
+			}
+		}
+
+		d.incomingChecked = true
+	}
+
+	return d.incomingDirName != ""
+}
+
 // Object returns a fs.File pointing the object file, if exists
 func (d *DotGit) Object(h plumbing.Hash) (billy.File, error) {
-	return d.fs.Open(d.objectPath(h))
+	err := d.hasObject(h)
+	if err != nil {
+		return nil, err
+	}
+
+	obj1, err1 := d.fs.Open(d.objectPath(h))
+	if os.IsNotExist(err1) && d.hasIncomingObjects() {
+		obj2, err2 := d.fs.Open(d.incomingObjectPath(h))
+		if err2 != nil {
+			return obj1, err1
+		}
+		return obj2, err2
+	}
+	return obj1, err1
 }
 
 // ObjectStat returns a os.FileInfo pointing the object file, if exists
 func (d *DotGit) ObjectStat(h plumbing.Hash) (os.FileInfo, error) {
-	return d.fs.Stat(d.objectPath(h))
+	err := d.hasObject(h)
+	if err != nil {
+		return nil, err
+	}
+
+	obj1, err1 := d.fs.Stat(d.objectPath(h))
+	if os.IsNotExist(err1) && d.hasIncomingObjects() {
+		obj2, err2 := d.fs.Stat(d.incomingObjectPath(h))
+		if err2 != nil {
+			return obj1, err1
+		}
+		return obj2, err2
+	}
+	return obj1, err1
 }
 
 // ObjectDelete removes the object file, if exists
 func (d *DotGit) ObjectDelete(h plumbing.Hash) error {
-	return d.fs.Remove(d.objectPath(h))
+	d.cleanObjectList()
+
+	err1 := d.fs.Remove(d.objectPath(h))
+	if os.IsNotExist(err1) && d.hasIncomingObjects() {
+		err2 := d.fs.Remove(d.incomingObjectPath(h))
+		if err2 != nil {
+			return err1
+		}
+		return err2
+	}
+	return err1
 }
 
 func (d *DotGit) readReferenceFrom(rd io.Reader, name string) (ref *plumbing.Reference, err error) {
@@ -469,7 +752,7 @@
 	// File mode is retrieved from a constant defined in the target specific
 	// files (dotgit_rewrite_packed_refs_*). Some modes are not available
 	// in all filesystems.
-	openFlags := openAndLockPackedRefsMode
+	openFlags := d.openAndLockPackedRefsMode()
 	if doCreate {
 		openFlags |= os.O_CREATE
 	}
@@ -784,6 +1067,11 @@
 	return alternates, nil
 }
 
+// Fs returns the underlying filesystem of the DotGit folder.
+func (d *DotGit) Fs() billy.Filesystem {
+	return d.fs
+}
+
 func isHex(s string) bool {
 	for _, b := range []byte(s) {
 		if isNum(b) {
diff --git a/storage/filesystem/dotgit/dotgit_rewrite_packed_refs.go b/storage/filesystem/dotgit/dotgit_rewrite_packed_refs.go
new file mode 100644
index 0000000..7f1c02c
--- /dev/null
+++ b/storage/filesystem/dotgit/dotgit_rewrite_packed_refs.go
@@ -0,0 +1,81 @@
+package dotgit
+
+import (
+	"io"
+	"os"
+	"runtime"
+
+	"gopkg.in/src-d/go-billy.v4"
+	"gopkg.in/src-d/go-git.v4/utils/ioutil"
+)
+
+func (d *DotGit) openAndLockPackedRefsMode() int {
+	if billy.CapabilityCheck(d.fs, billy.ReadAndWriteCapability) {
+		return os.O_RDWR
+	}
+
+	return os.O_RDONLY
+}
+
+func (d *DotGit) rewritePackedRefsWhileLocked(
+	tmp billy.File, pr billy.File) error {
+	// Try plain rename. If we aren't using the bare Windows filesystem as the
+	// storage layer, we might be able to get away with a rename over a locked
+	// file.
+	err := d.fs.Rename(tmp.Name(), pr.Name())
+	if err == nil {
+		return nil
+	}
+
+	// If we are in a filesystem that does not support rename (e.g. sivafs)
+	// a full copy is done.
+	if err == billy.ErrNotSupported {
+		return d.copyNewFile(tmp, pr)
+	}
+
+	if runtime.GOOS != "windows" {
+		return err
+	}
+
+	// Otherwise, Windows doesn't let us rename over a locked file, so
+	// we have to do a straight copy.  Unfortunately this could result
+	// in a partially-written file if the process fails before the
+	// copy completes.
+	return d.copyToExistingFile(tmp, pr)
+}
+
+func (d *DotGit) copyToExistingFile(tmp, pr billy.File) error {
+	_, err := pr.Seek(0, io.SeekStart)
+	if err != nil {
+		return err
+	}
+	err = pr.Truncate(0)
+	if err != nil {
+		return err
+	}
+	_, err = tmp.Seek(0, io.SeekStart)
+	if err != nil {
+		return err
+	}
+	_, err = io.Copy(pr, tmp)
+
+	return err
+}
+
+func (d *DotGit) copyNewFile(tmp billy.File, pr billy.File) (err error) {
+	prWrite, err := d.fs.Create(pr.Name())
+	if err != nil {
+		return err
+	}
+
+	defer ioutil.CheckClose(prWrite, &err)
+
+	_, err = tmp.Seek(0, io.SeekStart)
+	if err != nil {
+		return err
+	}
+
+	_, err = io.Copy(prWrite, tmp)
+
+	return err
+}
diff --git a/storage/filesystem/dotgit/dotgit_rewrite_packed_refs_nix.go b/storage/filesystem/dotgit/dotgit_rewrite_packed_refs_nix.go
deleted file mode 100644
index c760793..0000000
--- a/storage/filesystem/dotgit/dotgit_rewrite_packed_refs_nix.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// +build !windows,!norwfs
-
-package dotgit
-
-import (
-	"os"
-
-	"gopkg.in/src-d/go-billy.v4"
-)
-
-const openAndLockPackedRefsMode = os.O_RDWR
-
-func (d *DotGit) rewritePackedRefsWhileLocked(
-	tmp billy.File, pr billy.File) error {
-	// On non-Windows platforms, we can have atomic rename.
-	return d.fs.Rename(tmp.Name(), pr.Name())
-}
diff --git a/storage/filesystem/dotgit/dotgit_rewrite_packed_refs_norwfs.go b/storage/filesystem/dotgit/dotgit_rewrite_packed_refs_norwfs.go
deleted file mode 100644
index 6e43b42..0000000
--- a/storage/filesystem/dotgit/dotgit_rewrite_packed_refs_norwfs.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// +build norwfs
-
-package dotgit
-
-import (
-	"io"
-	"os"
-
-	"gopkg.in/src-d/go-billy.v4"
-)
-
-const openAndLockPackedRefsMode = os.O_RDONLY
-
-// Instead of renaming that can not be supported in simpler filesystems
-// a full copy is done.
-func (d *DotGit) rewritePackedRefsWhileLocked(
-	tmp billy.File, pr billy.File) error {
-
-	prWrite, err := d.fs.Create(pr.Name())
-	if err != nil {
-		return err
-	}
-
-	defer prWrite.Close()
-
-	_, err = tmp.Seek(0, io.SeekStart)
-	if err != nil {
-		return err
-	}
-
-	_, err = io.Copy(prWrite, tmp)
-
-	return err
-}
diff --git a/storage/filesystem/dotgit/dotgit_rewrite_packed_refs_windows.go b/storage/filesystem/dotgit/dotgit_rewrite_packed_refs_windows.go
deleted file mode 100644
index 897d2c9..0000000
--- a/storage/filesystem/dotgit/dotgit_rewrite_packed_refs_windows.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// +build windows,!norwfs
-
-package dotgit
-
-import (
-	"io"
-	"os"
-
-	"gopkg.in/src-d/go-billy.v4"
-)
-
-const openAndLockPackedRefsMode = os.O_RDWR
-
-func (d *DotGit) rewritePackedRefsWhileLocked(
-	tmp billy.File, pr billy.File) error {
-	// If we aren't using the bare Windows filesystem as the storage
-	// layer, we might be able to get away with a rename over a locked
-	// file.
-	err := d.fs.Rename(tmp.Name(), pr.Name())
-	if err == nil {
-		return nil
-	}
-
-	// Otherwise, Windows doesn't let us rename over a locked file, so
-	// we have to do a straight copy.  Unfortunately this could result
-	// in a partially-written file if the process fails before the
-	// copy completes.
-	_, err = pr.Seek(0, io.SeekStart)
-	if err != nil {
-		return err
-	}
-	err = pr.Truncate(0)
-	if err != nil {
-		return err
-	}
-	_, err = tmp.Seek(0, io.SeekStart)
-	if err != nil {
-		return err
-	}
-	_, err = io.Copy(pr, tmp)
-	return err
-}
diff --git a/storage/filesystem/dotgit/dotgit_test.go b/storage/filesystem/dotgit/dotgit_test.go
index 7733eef..308c6b7 100644
--- a/storage/filesystem/dotgit/dotgit_test.go
+++ b/storage/filesystem/dotgit/dotgit_test.go
@@ -9,6 +9,7 @@
 	"strings"
 	"testing"
 
+	"gopkg.in/src-d/go-billy.v4"
 	"gopkg.in/src-d/go-git.v4/plumbing"
 
 	. "gopkg.in/check.v1"
@@ -424,6 +425,18 @@
 	fs := f.DotGit()
 	dir := New(fs)
 
+	testObjectPacks(c, fs, dir, f)
+}
+
+func (s *SuiteDotGit) TestObjectPacksExclusive(c *C) {
+	f := fixtures.Basic().ByTag(".git").One()
+	fs := f.DotGit()
+	dir := NewWithOptions(fs, Options{ExclusiveAccess: true})
+
+	testObjectPacks(c, fs, dir, f)
+}
+
+func testObjectPacks(c *C, fs billy.Filesystem, dir *DotGit, f *fixtures.Fixture) {
 	hashes, err := dir.ObjectPacks()
 	c.Assert(err, IsNil)
 	c.Assert(hashes, HasLen, 1)
@@ -452,6 +465,45 @@
 	c.Assert(filepath.Ext(pack.Name()), Equals, ".pack")
 }
 
+func (s *SuiteDotGit) TestObjectPackWithKeepDescriptors(c *C) {
+	f := fixtures.Basic().ByTag(".git").One()
+	fs := f.DotGit()
+	dir := NewWithOptions(fs, Options{KeepDescriptors: true})
+
+	pack, err := dir.ObjectPack(f.PackfileHash)
+	c.Assert(err, IsNil)
+	c.Assert(filepath.Ext(pack.Name()), Equals, ".pack")
+
+	// Move to an specific offset
+	pack.Seek(42, os.SEEK_SET)
+
+	pack2, err := dir.ObjectPack(f.PackfileHash)
+	c.Assert(err, IsNil)
+
+	// If the file is the same the offset should be the same
+	offset, err := pack2.Seek(0, os.SEEK_CUR)
+	c.Assert(err, IsNil)
+	c.Assert(offset, Equals, int64(42))
+
+	err = dir.Close()
+	c.Assert(err, IsNil)
+
+	pack2, err = dir.ObjectPack(f.PackfileHash)
+	c.Assert(err, IsNil)
+
+	// If the file is opened again its offset should be 0
+	offset, err = pack2.Seek(0, os.SEEK_CUR)
+	c.Assert(err, IsNil)
+	c.Assert(offset, Equals, int64(0))
+
+	err = pack2.Close()
+	c.Assert(err, IsNil)
+
+	err = dir.Close()
+	c.Assert(err, NotNil)
+
+}
+
 func (s *SuiteDotGit) TestObjectPackIdx(c *C) {
 	f := fixtures.Basic().ByTag(".git").One()
 	fs := f.DotGit()
@@ -506,6 +558,17 @@
 	fs := fixtures.ByTag(".git").ByTag("unpacked").One().DotGit()
 	dir := New(fs)
 
+	testObjects(c, fs, dir)
+}
+
+func (s *SuiteDotGit) TestObjectsExclusive(c *C) {
+	fs := fixtures.ByTag(".git").ByTag("unpacked").One().DotGit()
+	dir := NewWithOptions(fs, Options{ExclusiveAccess: true})
+
+	testObjects(c, fs, dir)
+}
+
+func testObjects(c *C, fs billy.Filesystem, dir *DotGit) {
 	hashes, err := dir.Objects()
 	c.Assert(err, IsNil)
 	c.Assert(hashes, HasLen, 187)
@@ -537,6 +600,57 @@
 		file.Name(), fs.Join("objects", "03", "db8e1fbe133a480f2867aac478fd866686d69e")),
 		Equals, true,
 	)
+	incomingHash := "9d25e0f9bde9f82882b49fe29117b9411cb157b7" //made up hash
+	incomingDirPath := fs.Join("objects", "incoming-123456")
+	incomingFilePath := fs.Join(incomingDirPath, incomingHash[0:2], incomingHash[2:40])
+	fs.MkdirAll(incomingDirPath, os.FileMode(0755))
+	fs.Create(incomingFilePath)
+
+	file, err = dir.Object(plumbing.NewHash(incomingHash))
+	c.Assert(err, IsNil)
+}
+
+func (s *SuiteDotGit) TestObjectStat(c *C) {
+	fs := fixtures.ByTag(".git").ByTag("unpacked").One().DotGit()
+	dir := New(fs)
+
+	hash := plumbing.NewHash("03db8e1fbe133a480f2867aac478fd866686d69e")
+	_, err := dir.ObjectStat(hash)
+	c.Assert(err, IsNil)
+	incomingHash := "9d25e0f9bde9f82882b49fe29117b9411cb157b7" //made up hash
+	incomingDirPath := fs.Join("objects", "incoming-123456")
+	incomingFilePath := fs.Join(incomingDirPath, incomingHash[0:2], incomingHash[2:40])
+	fs.MkdirAll(incomingDirPath, os.FileMode(0755))
+	fs.Create(incomingFilePath)
+
+	_, err = dir.ObjectStat(plumbing.NewHash(incomingHash))
+	c.Assert(err, IsNil)
+}
+
+func (s *SuiteDotGit) TestObjectDelete(c *C) {
+	fs := fixtures.ByTag(".git").ByTag("unpacked").One().DotGit()
+	dir := New(fs)
+
+	hash := plumbing.NewHash("03db8e1fbe133a480f2867aac478fd866686d69e")
+	err := dir.ObjectDelete(hash)
+	c.Assert(err, IsNil)
+
+	incomingHash := "9d25e0f9bde9f82882b49fe29117b9411cb157b7" //made up hash
+	incomingDirPath := fs.Join("objects", "incoming-123456")
+	incomingSubDirPath := fs.Join(incomingDirPath, incomingHash[0:2])
+	incomingFilePath := fs.Join(incomingSubDirPath, incomingHash[2:40])
+
+	err = fs.MkdirAll(incomingSubDirPath, os.FileMode(0755))
+	c.Assert(err, IsNil)
+
+	f, err := fs.Create(incomingFilePath)
+	c.Assert(err, IsNil)
+
+	err = f.Close()
+	c.Assert(err, IsNil)
+
+	err = dir.ObjectDelete(plumbing.NewHash(incomingHash))
+	c.Assert(err, IsNil)
 }
 
 func (s *SuiteDotGit) TestObjectNotFound(c *C) {
diff --git a/storage/filesystem/dotgit/writers.go b/storage/filesystem/dotgit/writers.go
index c2b420f..93d2d8c 100644
--- a/storage/filesystem/dotgit/writers.go
+++ b/storage/filesystem/dotgit/writers.go
@@ -20,13 +20,14 @@
 // is renamed/moved (depends on the Filesystem implementation) to the final
 // location, if the PackWriter is not used, nothing is written
 type PackWriter struct {
-	Notify func(plumbing.Hash, *packfile.Index)
+	Notify func(plumbing.Hash, *idxfile.Writer)
 
 	fs       billy.Filesystem
 	fr, fw   billy.File
 	synced   *syncedReader
 	checksum plumbing.Hash
-	index    *packfile.Index
+	parser   *packfile.Parser
+	writer   *idxfile.Writer
 	result   chan error
 }
 
@@ -55,20 +56,21 @@
 
 func (w *PackWriter) buildIndex() {
 	s := packfile.NewScanner(w.synced)
-	d, err := packfile.NewDecoder(s, nil)
+	w.writer = new(idxfile.Writer)
+	var err error
+	w.parser, err = packfile.NewParser(s, w.writer)
 	if err != nil {
 		w.result <- err
 		return
 	}
 
-	checksum, err := d.Decode()
+	checksum, err := w.parser.Parse()
 	if err != nil {
 		w.result <- err
 		return
 	}
 
 	w.checksum = checksum
-	w.index = d.Index()
 	w.result <- err
 }
 
@@ -92,8 +94,8 @@
 // was written, the tempfiles are deleted without writing a packfile.
 func (w *PackWriter) Close() error {
 	defer func() {
-		if w.Notify != nil && w.index != nil && w.index.Size() > 0 {
-			w.Notify(w.checksum, w.index)
+		if w.Notify != nil && w.writer != nil && w.writer.Finished() {
+			w.Notify(w.checksum, w.writer)
 		}
 
 		close(w.result)
@@ -115,7 +117,7 @@
 		return err
 	}
 
-	if w.index == nil || w.index.Size() == 0 {
+	if w.writer == nil || !w.writer.Finished() {
 		return w.clean()
 	}
 
@@ -145,11 +147,13 @@
 }
 
 func (w *PackWriter) encodeIdx(writer io.Writer) error {
-	idx := w.index.ToIdxFile()
-	idx.PackfileChecksum = w.checksum
-	idx.Version = idxfile.VersionSupported
+	idx, err := w.writer.Index()
+	if err != nil {
+		return err
+	}
+
 	e := idxfile.NewEncoder(writer)
-	_, err := e.Encode(idx)
+	_, err = e.Encode(idx)
 	return err
 }
 
@@ -209,7 +213,6 @@
 
 func (s *syncedReader) wake() {
 	if s.isBlocked() {
-		//	fmt.Println("wake")
 		atomic.StoreUint32(&s.blocked, 0)
 		s.news <- true
 	}
@@ -220,7 +223,6 @@
 	written := atomic.LoadUint64(&s.written)
 	if read >= written {
 		atomic.StoreUint32(&s.blocked, 1)
-		//	fmt.Println("sleep", read, written)
 		<-s.news
 	}
 
diff --git a/storage/filesystem/dotgit/writers_test.go b/storage/filesystem/dotgit/writers_test.go
index bf00762..5a5f7b4 100644
--- a/storage/filesystem/dotgit/writers_test.go
+++ b/storage/filesystem/dotgit/writers_test.go
@@ -9,6 +9,7 @@
 	"strconv"
 
 	"gopkg.in/src-d/go-git.v4/plumbing"
+	"gopkg.in/src-d/go-git.v4/plumbing/format/idxfile"
 	"gopkg.in/src-d/go-git.v4/plumbing/format/packfile"
 
 	. "gopkg.in/check.v1"
@@ -148,7 +149,7 @@
 	w, err := newPackWrite(fs)
 	c.Assert(err, IsNil)
 
-	w.Notify = func(h plumbing.Hash, idx *packfile.Index) {
+	w.Notify = func(h plumbing.Hash, idx *idxfile.Writer) {
 		c.Fatal("unexpected call to PackWriter.Notify")
 	}
 
diff --git a/storage/filesystem/object.go b/storage/filesystem/object.go
index 9ffe4dc..3545e27 100644
--- a/storage/filesystem/object.go
+++ b/storage/filesystem/object.go
@@ -12,23 +12,34 @@
 	"gopkg.in/src-d/go-git.v4/plumbing/format/packfile"
 	"gopkg.in/src-d/go-git.v4/plumbing/storer"
 	"gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit"
-	"gopkg.in/src-d/go-git.v4/storage/memory"
 	"gopkg.in/src-d/go-git.v4/utils/ioutil"
 
 	"gopkg.in/src-d/go-billy.v4"
 )
 
 type ObjectStorage struct {
+	options Options
+
 	// deltaBaseCache is an object cache uses to cache delta's bases when
 	deltaBaseCache cache.Object
 
 	dir   *dotgit.DotGit
-	index map[plumbing.Hash]*packfile.Index
+	index map[plumbing.Hash]idxfile.Index
 }
 
 // NewObjectStorage creates a new ObjectStorage with the given .git directory.
 func NewObjectStorage(dir *dotgit.DotGit) (ObjectStorage, error) {
+	return NewObjectStorageWithOptions(dir, Options{})
+}
+
+// NewObjectStorageWithOptions creates a new ObjectStorage with the given .git
+// directory and sets its options.
+func NewObjectStorageWithOptions(
+	dir *dotgit.DotGit,
+	ops Options,
+) (ObjectStorage, error) {
 	s := ObjectStorage{
+		options:        ops,
 		deltaBaseCache: cache.NewObjectLRUDefault(),
 		dir:            dir,
 	}
@@ -41,7 +52,7 @@
 		return nil
 	}
 
-	s.index = make(map[plumbing.Hash]*packfile.Index)
+	s.index = make(map[plumbing.Hash]idxfile.Index)
 	packs, err := s.dir.ObjectPacks()
 	if err != nil {
 		return err
@@ -63,13 +74,14 @@
 	}
 
 	defer ioutil.CheckClose(f, &err)
-	idxf := idxfile.NewIdxfile()
+
+	idxf := idxfile.NewMemoryIndex()
 	d := idxfile.NewDecoder(f)
 	if err = d.Decode(idxf); err != nil {
 		return err
 	}
 
-	s.index[h] = packfile.NewIndexFromIdxFile(idxf)
+	s.index[h] = idxf
 	return err
 }
 
@@ -87,8 +99,11 @@
 		return nil, err
 	}
 
-	w.Notify = func(h plumbing.Hash, idx *packfile.Index) {
-		s.index[h] = idx
+	w.Notify = func(h plumbing.Hash, writer *idxfile.Writer) {
+		index, err := writer.Index()
+		if err == nil {
+			s.index[h] = index
+		}
 	}
 
 	return w, nil
@@ -266,7 +281,9 @@
 		return nil, err
 	}
 
-	defer ioutil.CheckClose(f, &err)
+	if !s.options.KeepDescriptors {
+		defer ioutil.CheckClose(f, &err)
+	}
 
 	idx := s.index[pack]
 	if canBeDelta {
@@ -278,30 +295,37 @@
 
 func (s *ObjectStorage) decodeObjectAt(
 	f billy.File,
-	idx *packfile.Index,
-	offset int64) (plumbing.EncodedObject, error) {
-	if _, err := f.Seek(0, io.SeekStart); err != nil {
+	idx idxfile.Index,
+	offset int64,
+) (plumbing.EncodedObject, error) {
+	hash, err := idx.FindHash(offset)
+	if err == nil {
+		obj, ok := s.deltaBaseCache.Get(hash)
+		if ok {
+			return obj, nil
+		}
+	}
+
+	if err != nil && err != plumbing.ErrObjectNotFound {
 		return nil, err
 	}
 
-	p := packfile.NewScanner(f)
-
-	d, err := packfile.NewDecoderWithCache(p, memory.NewStorage(),
-		s.deltaBaseCache)
-	if err != nil {
-		return nil, err
+	var p *packfile.Packfile
+	if s.deltaBaseCache != nil {
+		p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.deltaBaseCache)
+	} else {
+		p = packfile.NewPackfile(idx, s.dir.Fs(), f)
 	}
 
-	d.SetIndex(idx)
-	obj, err := d.DecodeObjectAt(offset)
-	return obj, err
+	return p.GetByOffset(offset)
 }
 
 func (s *ObjectStorage) decodeDeltaObjectAt(
 	f billy.File,
-	idx *packfile.Index,
+	idx idxfile.Index,
 	offset int64,
-	hash plumbing.Hash) (plumbing.EncodedObject, error) {
+	hash plumbing.Hash,
+) (plumbing.EncodedObject, error) {
 	if _, err := f.Seek(0, io.SeekStart); err != nil {
 		return nil, err
 	}
@@ -324,12 +348,10 @@
 	case plumbing.REFDeltaObject:
 		base = header.Reference
 	case plumbing.OFSDeltaObject:
-		e, ok := idx.LookupOffset(uint64(header.OffsetReference))
-		if !ok {
-			return nil, plumbing.ErrObjectNotFound
+		base, err = idx.FindHash(header.OffsetReference)
+		if err != nil {
+			return nil, err
 		}
-
-		base = e.Hash
 	default:
 		return s.decodeObjectAt(f, idx, offset)
 	}
@@ -350,8 +372,9 @@
 
 func (s *ObjectStorage) findObjectInPackfile(h plumbing.Hash) (plumbing.Hash, plumbing.Hash, int64) {
 	for packfile, index := range s.index {
-		if e, ok := index.LookupHash(h); ok {
-			return packfile, e.Hash, int64(e.Offset)
+		offset, err := index.FindOffset(h)
+		if err == nil {
+			return packfile, h, offset
 		}
 	}
 
@@ -398,11 +421,16 @@
 			if err != nil {
 				return nil, err
 			}
-			return newPackfileIter(pack, t, seen, s.index[h], s.deltaBaseCache)
+			return newPackfileIter(s.dir.Fs(), pack, t, seen, s.index[h], s.deltaBaseCache)
 		},
 	}, nil
 }
 
+// Close closes all opened files.
+func (s *ObjectStorage) Close() error {
+	return s.dir.Close()
+}
+
 type lazyPackfilesIter struct {
 	hashes []plumbing.Hash
 	open   func(h plumbing.Hash) (storer.EncodedObjectIter, error)
@@ -451,76 +479,94 @@
 }
 
 type packfileIter struct {
-	f billy.File
-	d *packfile.Decoder
-	t plumbing.ObjectType
-
-	seen     map[plumbing.Hash]struct{}
-	position uint32
-	total    uint32
+	pack billy.File
+	iter storer.EncodedObjectIter
+	seen map[plumbing.Hash]struct{}
 }
 
-func NewPackfileIter(f billy.File, t plumbing.ObjectType) (storer.EncodedObjectIter, error) {
-	return newPackfileIter(f, t, make(map[plumbing.Hash]struct{}), nil, nil)
-}
-
-func newPackfileIter(f billy.File, t plumbing.ObjectType, seen map[plumbing.Hash]struct{},
-	index *packfile.Index, cache cache.Object) (storer.EncodedObjectIter, error) {
-	s := packfile.NewScanner(f)
-	_, total, err := s.Header()
-	if err != nil {
+// NewPackfileIter returns a new EncodedObjectIter for the provided packfile
+// and object type. Packfile and index file will be closed after they're
+// used.
+func NewPackfileIter(
+	fs billy.Filesystem,
+	f billy.File,
+	idxFile billy.File,
+	t plumbing.ObjectType,
+) (storer.EncodedObjectIter, error) {
+	idx := idxfile.NewMemoryIndex()
+	if err := idxfile.NewDecoder(idxFile).Decode(idx); err != nil {
 		return nil, err
 	}
 
-	d, err := packfile.NewDecoderForType(s, memory.NewStorage(), t, cache)
-	if err != nil {
+	if err := idxFile.Close(); err != nil {
 		return nil, err
 	}
 
-	d.SetIndex(index)
+	return newPackfileIter(fs, f, t, make(map[plumbing.Hash]struct{}), idx, nil)
+}
+
+func newPackfileIter(
+	fs billy.Filesystem,
+	f billy.File,
+	t plumbing.ObjectType,
+	seen map[plumbing.Hash]struct{},
+	index idxfile.Index,
+	cache cache.Object,
+) (storer.EncodedObjectIter, error) {
+	var p *packfile.Packfile
+	if cache != nil {
+		p = packfile.NewPackfileWithCache(index, fs, f, cache)
+	} else {
+		p = packfile.NewPackfile(index, fs, f)
+	}
+
+	iter, err := p.GetByType(t)
+	if err != nil {
+		return nil, err
+	}
 
 	return &packfileIter{
-		f: f,
-		d: d,
-		t: t,
-
-		total: total,
-		seen:  seen,
+		pack: f,
+		iter: iter,
+		seen: seen,
 	}, nil
 }
 
 func (iter *packfileIter) Next() (plumbing.EncodedObject, error) {
 	for {
-		if iter.position >= iter.total {
-			return nil, io.EOF
-		}
-
-		obj, err := iter.d.DecodeObject()
+		obj, err := iter.iter.Next()
 		if err != nil {
 			return nil, err
 		}
 
-		iter.position++
-		if obj == nil {
-			continue
-		}
-
 		if _, ok := iter.seen[obj.Hash()]; ok {
-			return iter.Next()
+			continue
 		}
 
 		return obj, nil
 	}
 }
 
-// ForEach is never called since is used inside of a MultiObjectIterator
 func (iter *packfileIter) ForEach(cb func(plumbing.EncodedObject) error) error {
-	return nil
+	for {
+		o, err := iter.Next()
+		if err != nil {
+			if err == io.EOF {
+				iter.Close()
+				return nil
+			}
+			return err
+		}
+
+		if err := cb(o); err != nil {
+			return err
+		}
+	}
 }
 
 func (iter *packfileIter) Close() {
-	iter.f.Close()
-	iter.d.Close()
+	iter.iter.Close()
+	_ = iter.pack.Close()
 }
 
 type objectsIter struct {
@@ -548,9 +594,20 @@
 	return obj, err
 }
 
-// ForEach is never called since is used inside of a MultiObjectIterator
 func (iter *objectsIter) ForEach(cb func(plumbing.EncodedObject) error) error {
-	return nil
+	for {
+		o, err := iter.Next()
+		if err != nil {
+			if err == io.EOF {
+				return nil
+			}
+			return err
+		}
+
+		if err := cb(o); err != nil {
+			return err
+		}
+	}
 }
 
 func (iter *objectsIter) Close() {
diff --git a/storage/filesystem/object_test.go b/storage/filesystem/object_test.go
index ecd6beb..4a921a9 100644
--- a/storage/filesystem/object_test.go
+++ b/storage/filesystem/object_test.go
@@ -1,6 +1,10 @@
 package filesystem
 
 import (
+	"io/ioutil"
+	"os"
+	"testing"
+
 	"gopkg.in/src-d/go-git.v4/plumbing"
 	"gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit"
 
@@ -10,17 +14,16 @@
 
 type FsSuite struct {
 	fixtures.Suite
-	Types []plumbing.ObjectType
 }
 
-var _ = Suite(&FsSuite{
-	Types: []plumbing.ObjectType{
-		plumbing.CommitObject,
-		plumbing.TagObject,
-		plumbing.TreeObject,
-		plumbing.BlobObject,
-	},
-})
+var objectTypes = []plumbing.ObjectType{
+	plumbing.CommitObject,
+	plumbing.TagObject,
+	plumbing.TreeObject,
+	plumbing.BlobObject,
+}
+
+var _ = Suite(&FsSuite{})
 
 func (s *FsSuite) TestGetFromObjectFile(c *C) {
 	fs := fixtures.ByTag(".git").ByTag("unpacked").One().DotGit()
@@ -46,6 +49,42 @@
 	})
 }
 
+func (s *FsSuite) TestGetFromPackfileKeepDescriptors(c *C) {
+	fixtures.Basic().ByTag(".git").Test(c, func(f *fixtures.Fixture) {
+		fs := f.DotGit()
+		dg := dotgit.NewWithOptions(fs, dotgit.Options{KeepDescriptors: true})
+		o, err := NewObjectStorageWithOptions(dg, Options{KeepDescriptors: true})
+		c.Assert(err, IsNil)
+
+		expected := plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")
+		obj, err := o.EncodedObject(plumbing.AnyObject, expected)
+		c.Assert(err, IsNil)
+		c.Assert(obj.Hash(), Equals, expected)
+
+		packfiles, err := dg.ObjectPacks()
+		c.Assert(err, IsNil)
+
+		pack1, err := dg.ObjectPack(packfiles[0])
+		c.Assert(err, IsNil)
+
+		pack1.Seek(42, os.SEEK_SET)
+
+		err = o.Close()
+		c.Assert(err, IsNil)
+
+		pack2, err := dg.ObjectPack(packfiles[0])
+		c.Assert(err, IsNil)
+
+		offset, err := pack2.Seek(0, os.SEEK_CUR)
+		c.Assert(err, IsNil)
+		c.Assert(offset, Equals, int64(0))
+
+		err = o.Close()
+		c.Assert(err, IsNil)
+
+	})
+}
+
 func (s *FsSuite) TestGetFromPackfileMultiplePackfiles(c *C) {
 	fs := fixtures.ByTag(".git").ByTag("multi-packfile").One().DotGit()
 	o, err := NewObjectStorage(dotgit.New(fs))
@@ -84,7 +123,7 @@
 
 func (s *FsSuite) TestIterWithType(c *C) {
 	fixtures.ByTag(".git").Test(c, func(f *fixtures.Fixture) {
-		for _, t := range s.Types {
+		for _, t := range objectTypes {
 			fs := f.DotGit()
 			o, err := NewObjectStorage(dotgit.New(fs))
 			c.Assert(err, IsNil)
@@ -108,14 +147,18 @@
 		fs := f.DotGit()
 		dg := dotgit.New(fs)
 
-		for _, t := range s.Types {
+		for _, t := range objectTypes {
 			ph, err := dg.ObjectPacks()
 			c.Assert(err, IsNil)
 
 			for _, h := range ph {
 				f, err := dg.ObjectPack(h)
 				c.Assert(err, IsNil)
-				iter, err := NewPackfileIter(f, t)
+
+				idxf, err := dg.ObjectPackIdx(h)
+				c.Assert(err, IsNil)
+
+				iter, err := NewPackfileIter(fs, f, idxf, t)
 				c.Assert(err, IsNil)
 				err = iter.ForEach(func(o plumbing.EncodedObject) error {
 					c.Assert(o.Type(), Equals, t)
@@ -128,3 +171,159 @@
 	})
 
 }
+
+func BenchmarkPackfileIter(b *testing.B) {
+	if err := fixtures.Init(); err != nil {
+		b.Fatal(err)
+	}
+
+	defer func() {
+		if err := fixtures.Clean(); err != nil {
+			b.Fatal(err)
+		}
+	}()
+
+	for _, f := range fixtures.ByTag(".git") {
+		b.Run(f.URL, func(b *testing.B) {
+			fs := f.DotGit()
+			dg := dotgit.New(fs)
+
+			for i := 0; i < b.N; i++ {
+				for _, t := range objectTypes {
+					ph, err := dg.ObjectPacks()
+					if err != nil {
+						b.Fatal(err)
+					}
+
+					for _, h := range ph {
+						f, err := dg.ObjectPack(h)
+						if err != nil {
+							b.Fatal(err)
+						}
+
+						idxf, err := dg.ObjectPackIdx(h)
+						if err != nil {
+							b.Fatal(err)
+						}
+
+						iter, err := NewPackfileIter(fs, f, idxf, t)
+						if err != nil {
+							b.Fatal(err)
+						}
+
+						err = iter.ForEach(func(o plumbing.EncodedObject) error {
+							if o.Type() != t {
+								b.Errorf("expecting %s, got %s", t, o.Type())
+							}
+							return nil
+						})
+
+						if err != nil {
+							b.Fatal(err)
+						}
+					}
+				}
+			}
+		})
+	}
+}
+
+func BenchmarkPackfileIterReadContent(b *testing.B) {
+	if err := fixtures.Init(); err != nil {
+		b.Fatal(err)
+	}
+
+	defer func() {
+		if err := fixtures.Clean(); err != nil {
+			b.Fatal(err)
+		}
+	}()
+
+	for _, f := range fixtures.ByTag(".git") {
+		b.Run(f.URL, func(b *testing.B) {
+			fs := f.DotGit()
+			dg := dotgit.New(fs)
+
+			for i := 0; i < b.N; i++ {
+				for _, t := range objectTypes {
+					ph, err := dg.ObjectPacks()
+					if err != nil {
+						b.Fatal(err)
+					}
+
+					for _, h := range ph {
+						f, err := dg.ObjectPack(h)
+						if err != nil {
+							b.Fatal(err)
+						}
+
+						idxf, err := dg.ObjectPackIdx(h)
+						if err != nil {
+							b.Fatal(err)
+						}
+
+						iter, err := NewPackfileIter(fs, f, idxf, t)
+						if err != nil {
+							b.Fatal(err)
+						}
+
+						err = iter.ForEach(func(o plumbing.EncodedObject) error {
+							if o.Type() != t {
+								b.Errorf("expecting %s, got %s", t, o.Type())
+							}
+
+							r, err := o.Reader()
+							if err != nil {
+								b.Fatal(err)
+							}
+
+							if _, err := ioutil.ReadAll(r); err != nil {
+								b.Fatal(err)
+							}
+
+							return r.Close()
+						})
+
+						if err != nil {
+							b.Fatal(err)
+						}
+					}
+				}
+			}
+		})
+	}
+}
+
+func BenchmarkGetObjectFromPackfile(b *testing.B) {
+	if err := fixtures.Init(); err != nil {
+		b.Fatal(err)
+	}
+
+	defer func() {
+		if err := fixtures.Clean(); err != nil {
+			b.Fatal(err)
+		}
+	}()
+
+	for _, f := range fixtures.Basic() {
+		b.Run(f.URL, func(b *testing.B) {
+			fs := f.DotGit()
+			o, err := NewObjectStorage(dotgit.New(fs))
+			if err != nil {
+				b.Fatal(err)
+			}
+
+			for i := 0; i < b.N; i++ {
+				expected := plumbing.NewHash("6ecf0ef2c2dffb796033e5a02219af86ec6584e5")
+				obj, err := o.EncodedObject(plumbing.AnyObject, expected)
+				if err != nil {
+					b.Fatal(err)
+				}
+
+				if obj.Hash() != expected {
+					b.Errorf("expecting %s, got %s", expected, obj.Hash())
+				}
+			}
+		})
+	}
+}
diff --git a/storage/filesystem/storage.go b/storage/filesystem/storage.go
index 622bb4a..7fae789 100644
--- a/storage/filesystem/storage.go
+++ b/storage/filesystem/storage.go
@@ -22,10 +22,33 @@
 	ModuleStorage
 }
 
+// Options holds configuration for the storage.
+type Options struct {
+	// ExclusiveAccess means that the filesystem is not modified externally
+	// while the repo is open.
+	ExclusiveAccess bool
+	// KeepDescriptors makes the file descriptors to be reused but they will
+	// need to be manually closed calling Close().
+	KeepDescriptors bool
+}
+
 // NewStorage returns a new Storage backed by a given `fs.Filesystem`
 func NewStorage(fs billy.Filesystem) (*Storage, error) {
-	dir := dotgit.New(fs)
-	o, err := NewObjectStorage(dir)
+	return NewStorageWithOptions(fs, Options{})
+}
+
+// NewStorageWithOptions returns a new Storage backed by a given `fs.Filesystem`
+func NewStorageWithOptions(
+	fs billy.Filesystem,
+	ops Options,
+) (*Storage, error) {
+	dirOps := dotgit.Options{
+		ExclusiveAccess: ops.ExclusiveAccess,
+		KeepDescriptors: ops.KeepDescriptors,
+	}
+
+	dir := dotgit.NewWithOptions(fs, dirOps)
+	o, err := NewObjectStorageWithOptions(dir, ops)
 	if err != nil {
 		return nil, err
 	}
diff --git a/storage/filesystem/storage_test.go b/storage/filesystem/storage_test.go
index 4d9ba6f..7f85ef5 100644
--- a/storage/filesystem/storage_test.go
+++ b/storage/filesystem/storage_test.go
@@ -26,6 +26,10 @@
 	storage, err := NewStorage(osfs.New(s.dir))
 	c.Assert(err, IsNil)
 
+	setUpTest(s, c, storage)
+}
+
+func setUpTest(s *StorageSuite, c *C, storage *Storage) {
 	// ensure that right interfaces are implemented
 	var _ storer.EncodedObjectStorer = storage
 	var _ storer.IndexStorer = storage
@@ -51,3 +55,19 @@
 	c.Assert(err, IsNil)
 	c.Assert(fis, HasLen, 0)
 }
+
+type StorageExclusiveSuite struct {
+	StorageSuite
+}
+
+var _ = Suite(&StorageExclusiveSuite{})
+
+func (s *StorageExclusiveSuite) SetUpTest(c *C) {
+	s.dir = c.MkDir()
+	storage, err := NewStorageWithOptions(
+		osfs.New(s.dir),
+		Options{ExclusiveAccess: true})
+	c.Assert(err, IsNil)
+
+	setUpTest(&s.StorageSuite, c, storage)
+}
diff --git a/utils/diff/diff.go b/utils/diff/diff.go
index b840ad6..f49ae55 100644
--- a/utils/diff/diff.go
+++ b/utils/diff/diff.go
@@ -16,8 +16,8 @@
 // string into the dst string.
 func Do(src, dst string) (diffs []diffmatchpatch.Diff) {
 	dmp := diffmatchpatch.New()
-	wSrc, wDst, warray := dmp.DiffLinesToChars(src, dst)
-	diffs = dmp.DiffMain(wSrc, wDst, false)
+	wSrc, wDst, warray := dmp.DiffLinesToRunes(src, dst)
+	diffs = dmp.DiffMainRunes(wSrc, wDst, false)
 	diffs = dmp.DiffCharsToLines(diffs, warray)
 	return diffs
 }
diff --git a/utils/merkletrie/difftree.go b/utils/merkletrie/difftree.go
index 2294096..d57ed13 100644
--- a/utils/merkletrie/difftree.go
+++ b/utils/merkletrie/difftree.go
@@ -248,15 +248,30 @@
 // h: else of i
 
 import (
+	"context"
+	"errors"
 	"fmt"
 
 	"gopkg.in/src-d/go-git.v4/utils/merkletrie/noder"
 )
 
+var (
+	ErrCanceled = errors.New("operation canceled")
+)
+
 // DiffTree calculates the list of changes between two merkletries.  It
 // uses the provided hashEqual callback to compare noders.
 func DiffTree(fromTree, toTree noder.Noder,
 	hashEqual noder.Equal) (Changes, error) {
+	return DiffTreeContext(context.Background(), fromTree, toTree, hashEqual)
+}
+
+// DiffTree calculates the list of changes between two merkletries.  It
+// uses the provided hashEqual callback to compare noders.
+// Error will be returned if context expires
+// Provided context must be non nil
+func DiffTreeContext(ctx context.Context, fromTree, toTree noder.Noder,
+	hashEqual noder.Equal) (Changes, error) {
 	ret := NewChanges()
 
 	ii, err := newDoubleIter(fromTree, toTree, hashEqual)
@@ -265,6 +280,12 @@
 	}
 
 	for {
+		select {
+		case <-ctx.Done():
+			return nil, ErrCanceled
+		default:
+		}
+
 		from := ii.from.current
 		to := ii.to.current
 
diff --git a/utils/merkletrie/difftree_test.go b/utils/merkletrie/difftree_test.go
index 9f033b1..ab0eb57 100644
--- a/utils/merkletrie/difftree_test.go
+++ b/utils/merkletrie/difftree_test.go
@@ -2,6 +2,7 @@
 
 import (
 	"bytes"
+	ctx "context"
 	"fmt"
 	"reflect"
 	"sort"
@@ -61,9 +62,45 @@
 	c.Assert(obtained, changesEquals, expected, comment)
 }
 
+func (t diffTreeTest) innerRunCtx(c *C, context string, reverse bool) {
+	comment := Commentf("\n%s", context)
+	if reverse {
+		comment = Commentf("%s [REVERSED]", comment.CheckCommentString())
+	}
+
+	a, err := fsnoder.New(t.from)
+	c.Assert(err, IsNil, comment)
+	comment = Commentf("%s\n\t    from = %s", comment.CheckCommentString(), a)
+
+	b, err := fsnoder.New(t.to)
+	c.Assert(err, IsNil, comment)
+	comment = Commentf("%s\n\t      to = %s", comment.CheckCommentString(), b)
+
+	expected, err := newChangesFromString(t.expected)
+	c.Assert(err, IsNil, comment)
+
+	if reverse {
+		a, b = b, a
+		expected = expected.reverse()
+	}
+	comment = Commentf("%s\n\texpected = %s", comment.CheckCommentString(), expected)
+
+	results, err := merkletrie.DiffTreeContext(ctx.Background(), a, b, fsnoder.HashEqual)
+	c.Assert(err, IsNil, comment)
+
+	obtained, err := newChanges(results)
+	c.Assert(err, IsNil, comment)
+
+	comment = Commentf("%s\n\tobtained = %s", comment.CheckCommentString(), obtained)
+
+	c.Assert(obtained, changesEquals, expected, comment)
+}
+
 func (t diffTreeTest) run(c *C, context string) {
 	t.innerRun(c, context, false)
 	t.innerRun(c, context, true)
+	t.innerRunCtx(c, context, false)
+	t.innerRunCtx(c, context, true)
 }
 
 type change struct {
@@ -437,3 +474,27 @@
 		},
 	})
 }
+
+func (s *DiffTreeSuite) TestCancel(c *C) {
+	t :=  diffTreeTest{"()", "(a<> b<1> c() d<> e<2> f())", "+a +b +d +e"}
+	comment := Commentf("\n%s", "test cancel:")
+
+	a, err := fsnoder.New(t.from)
+	c.Assert(err, IsNil, comment)
+	comment = Commentf("%s\n\t    from = %s", comment.CheckCommentString(), a)
+
+	b, err := fsnoder.New(t.to)
+	c.Assert(err, IsNil, comment)
+	comment = Commentf("%s\n\t      to = %s", comment.CheckCommentString(), b)
+
+	expected, err := newChangesFromString(t.expected)
+	c.Assert(err, IsNil, comment)
+
+	comment = Commentf("%s\n\texpected = %s", comment.CheckCommentString(), expected)
+	context, cancel := ctx.WithCancel(ctx.Background())
+	cancel()
+	results, err := merkletrie.DiffTreeContext(context, a, b, fsnoder.HashEqual)
+	c.Assert(results, IsNil, comment)
+	c.Assert(err, ErrorMatches, "operation canceled")
+
+}
diff --git a/worktree.go b/worktree.go
index 99b2cd1..e45d815 100644
--- a/worktree.go
+++ b/worktree.go
@@ -713,29 +713,54 @@
 }
 
 // Clean the worktree by removing untracked files.
+// An empty dir could be removed - this is what  `git clean -f -d .` does.
 func (w *Worktree) Clean(opts *CleanOptions) error {
 	s, err := w.Status()
 	if err != nil {
 		return err
 	}
 
-	// Check Worktree status to be Untracked, obtain absolute path and delete.
-	for relativePath, status := range s {
-		// Check if the path contains a directory and if Dir options is false,
-		// skip the path.
-		if relativePath != filepath.Base(relativePath) && !opts.Dir {
+	root := ""
+	files, err := w.Filesystem.ReadDir(root)
+	if err != nil {
+		return err
+	}
+	return w.doClean(s, opts, root, files)
+}
+
+func (w *Worktree) doClean(status Status, opts *CleanOptions, dir string, files []os.FileInfo) error {
+	for _, fi := range files {
+		if fi.Name() == ".git" {
 			continue
 		}
 
-		// Remove the file only if it's an untracked file.
-		if status.Worktree == Untracked {
-			absPath := filepath.Join(w.Filesystem.Root(), relativePath)
-			if err := os.Remove(absPath); err != nil {
+		// relative path under the root
+		path := filepath.Join(dir, fi.Name())
+		if fi.IsDir() {
+			if !opts.Dir {
+				continue
+			}
+
+			subfiles, err := w.Filesystem.ReadDir(path)
+			if err != nil {
 				return err
 			}
+			err = w.doClean(status, opts, path, subfiles)
+			if err != nil {
+				return err
+			}
+		} else {
+			if status.IsUntracked(path) {
+				if err := w.Filesystem.Remove(path); err != nil {
+					return err
+				}
+			}
 		}
 	}
 
+	if opts.Dir {
+		return doCleanDirectories(w.Filesystem, dir)
+	}
 	return nil
 }
 
@@ -881,15 +906,18 @@
 		return err
 	}
 
-	path := filepath.Dir(name)
-	files, err := fs.ReadDir(path)
+	dir := filepath.Dir(name)
+	return doCleanDirectories(fs, dir)
+}
+
+// doCleanDirectories removes empty subdirs (without files)
+func doCleanDirectories(fs billy.Filesystem, dir string) error {
+	files, err := fs.ReadDir(dir)
 	if err != nil {
 		return err
 	}
-
 	if len(files) == 0 {
-		fs.Remove(path)
+		return fs.Remove(dir)
 	}
-
 	return nil
 }
diff --git a/worktree_commit.go b/worktree_commit.go
index f0e0b42..673eb16 100644
--- a/worktree_commit.go
+++ b/worktree_commit.go
@@ -1,10 +1,12 @@
 package git
 
 import (
+	"bytes"
 	"path"
 	"sort"
 	"strings"
 
+	"golang.org/x/crypto/openpgp"
 	"gopkg.in/src-d/go-git.v4/plumbing"
 	"gopkg.in/src-d/go-git.v4/plumbing/filemode"
 	"gopkg.in/src-d/go-git.v4/plumbing/format/index"
@@ -93,6 +95,14 @@
 		ParentHashes: opts.Parents,
 	}
 
+	if opts.SignKey != nil {
+		sig, err := w.buildCommitSignature(commit, opts.SignKey)
+		if err != nil {
+			return plumbing.ZeroHash, err
+		}
+		commit.PGPSignature = sig
+	}
+
 	obj := w.r.Storer.NewEncodedObject()
 	if err := commit.Encode(obj); err != nil {
 		return plumbing.ZeroHash, err
@@ -100,6 +110,22 @@
 	return w.r.Storer.SetEncodedObject(obj)
 }
 
+func (w *Worktree) buildCommitSignature(commit *object.Commit, signKey *openpgp.Entity) (string, error) {
+	encoded := &plumbing.MemoryObject{}
+	if err := commit.Encode(encoded); err != nil {
+		return "", err
+	}
+	r, err := encoded.Reader()
+	if err != nil {
+		return "", err
+	}
+	var b bytes.Buffer
+	if err := openpgp.ArmoredDetachSign(&b, signKey, r, nil); err != nil {
+		return "", err
+	}
+	return b.String(), nil
+}
+
 // buildTreeHelper converts a given index.Index file into multiple git objects
 // reading the blobs from the given filesystem and creating the trees from the
 // index structure. The created objects are pushed to a given Storer.
diff --git a/worktree_commit_test.go b/worktree_commit_test.go
index 5ca9b51..6979bd5 100644
--- a/worktree_commit_test.go
+++ b/worktree_commit_test.go
@@ -5,14 +5,18 @@
 	"io/ioutil"
 	"os"
 	"os/exec"
+	"strings"
 	"time"
 
 	"gopkg.in/src-d/go-git.v4/plumbing"
 	"gopkg.in/src-d/go-git.v4/plumbing/object"
 	"gopkg.in/src-d/go-git.v4/plumbing/storer"
-	"gopkg.in/src-d/go-git.v4/storage/memory"
 	"gopkg.in/src-d/go-git.v4/storage/filesystem"
+	"gopkg.in/src-d/go-git.v4/storage/memory"
 
+	"golang.org/x/crypto/openpgp"
+	"golang.org/x/crypto/openpgp/armor"
+	"golang.org/x/crypto/openpgp/errors"
 	. "gopkg.in/check.v1"
 	"gopkg.in/src-d/go-billy.v4/memfs"
 	"gopkg.in/src-d/go-billy.v4/osfs"
@@ -141,6 +145,62 @@
 	assertStorageStatus(c, s.Repository, 13, 11, 11, expected)
 }
 
+func (s *WorktreeSuite) TestCommitSign(c *C) {
+	fs := memfs.New()
+	storage := memory.NewStorage()
+
+	r, err := Init(storage, fs)
+	c.Assert(err, IsNil)
+
+	w, err := r.Worktree()
+	c.Assert(err, IsNil)
+
+	util.WriteFile(fs, "foo", []byte("foo"), 0644)
+
+	_, err = w.Add("foo")
+	c.Assert(err, IsNil)
+
+	key := commitSignKey(c, true)
+	hash, err := w.Commit("foo\n", &CommitOptions{Author: defaultSignature(), SignKey: key})
+	c.Assert(err, IsNil)
+
+	// Verify the commit.
+	pks := new(bytes.Buffer)
+	pkw, err := armor.Encode(pks, openpgp.PublicKeyType, nil)
+	c.Assert(err, IsNil)
+
+	err = key.Serialize(pkw)
+	c.Assert(err, IsNil)
+	err = pkw.Close()
+	c.Assert(err, IsNil)
+
+	expectedCommit, err := r.CommitObject(hash)
+	c.Assert(err, IsNil)
+	actual, err := expectedCommit.Verify(pks.String())
+	c.Assert(err, IsNil)
+	c.Assert(actual.PrimaryKey, DeepEquals, key.PrimaryKey)
+}
+
+func (s *WorktreeSuite) TestCommitSignBadKey(c *C) {
+	fs := memfs.New()
+	storage := memory.NewStorage()
+
+	r, err := Init(storage, fs)
+	c.Assert(err, IsNil)
+
+	w, err := r.Worktree()
+	c.Assert(err, IsNil)
+
+	util.WriteFile(fs, "foo", []byte("foo"), 0644)
+
+	_, err = w.Add("foo")
+	c.Assert(err, IsNil)
+
+	key := commitSignKey(c, false)
+	_, err = w.Commit("foo\n", &CommitOptions{Author: defaultSignature(), SignKey: key})
+	c.Assert(err, Equals, errors.InvalidArgumentError("signing key is encrypted"))
+}
+
 func (s *WorktreeSuite) TestCommitTreeSort(c *C) {
 	path, err := ioutil.TempDir(os.TempDir(), "test-commit-tree-sort")
 	c.Assert(err, IsNil)
@@ -227,3 +287,83 @@
 		When:  when,
 	}
 }
+
+func commitSignKey(c *C, decrypt bool) *openpgp.Entity {
+	s := strings.NewReader(armoredKeyRing)
+	es, err := openpgp.ReadArmoredKeyRing(s)
+	c.Assert(err, IsNil)
+
+	c.Assert(es, HasLen, 1)
+	c.Assert(es[0].Identities, HasLen, 1)
+	_, ok := es[0].Identities["foo bar <foo@foo.foo>"]
+	c.Assert(ok, Equals, true)
+
+	key := es[0]
+	if decrypt {
+		err = key.PrivateKey.Decrypt([]byte(keyPassphrase))
+		c.Assert(err, IsNil)
+	}
+
+	return key
+}
+
+const armoredKeyRing = `
+-----BEGIN PGP PRIVATE KEY BLOCK-----
+
+lQdGBFt2OHgBEADQpRmFm9X9xBfUljVs1B24MXWRHcEP5tx2k6Cp90sSz/ZOJcxH
+RjzYuXjpkE7g/PaZxAMVS1PptJip/w1/+5l2gZ7RmzU/e3hKe4vALHzKMVp8t7Ta
+0e2K3STxapCr9FNITjQRGOhnFwqiYoPCf9u5Iy8uszDH7HHnBZx+Nvbl95dDvmMs
+aFUKMeaoFD19iwEdRu6gJo7YIWF/8zwHi49neKigisGKh5PI0KUYeRPydXeCZIKQ
+ofdk+CPUS4r3dVhxTMYeHn/Vrep3blEA45E7KJ+TESmKkwliEgdjJwaVkUfJhBkb
+p2pMPKwbxLma9GCJBimOkehFv8/S+xn/xrLSsTxeOCIzMp3I5vgjR5QfONq5kuB1
+qbr8rDpSCHmTd7tzixFA0tVPBsvToA5Cz2MahJ+vmouusiWq/2YzGNE4zlzezNZ1
+3dgsVJm67xUSs0qY5ipKzButCFSKnaj1hLNR1NsUd0NPrVBTGblxULLuD99GhoXk
+/pcM5dCGTUX7XIarSFTEgBNQytpmfgt1Xbw2ErmlAdiFb4/5uBdbsVFAjglBvRI5
+VhFXr7mUd+XR/23aRczdAnp+Zg7VvyaJQi0ZwEj7VvLzpSAneVrxEcnuc2MBkUgT
+TN/Z5LYqC93nr6vB7+HMwoBZ8hBAkO4rTKYQl3eMUSkIsE45CqI7Hz0eXQARAQAB
+/gcDAqG5KzRnSp/38h4JKzJhSBRyyBPrgpYqR6ivFABzPUPJjO0gqRYzx/C+HJyl
+z+QED0WH+sW8Ns4PkAgNWZ+225fzSssavLcPwjncy9pzcV+7bc76cFb77fSve+1D
+LxhpzN58q03cSXPoamcDD7yY8GYYkAquLDZw+eRQ57BbBrNjXyfpGkBmtULymLqZ
+SgkuV5we7//lRPDIuPk+9lszJXBUW3k5e32CR47B/hI6Pu0DTlN9VesAEmXRNsi9
+YlRiO74nGPQPEWGjnEUQ++W8ip0CzoSrmPhrdGQlSR+SBEbBCuXz1lsj7D9cBxwH
+qHgwhYKvWz/gaY702+i/S1Cu/PjEpY3WPC5oSSNSSgypD8uSpcb4s2LffIegJNck
+e1AuiovG6u/3QXPot0jHhdy+Qwe+oaJfSEBGQ4fD4W6GbPxwOIQGgXV0bRaeHYgL
+iUWbN3rTLLVfDJKVo2ahvqZ7i4whfMuu1gGWQ4OEizrCDqp0x48HchEOC+T1eP3T
+Zjth2YMtzZdXlpt5HNKeaY6ZP+NWILwvOQBd3UtNpeaCNhUa0YyB7GD/k7tZnCIZ
+aNyF/DpnRrSQ9mAOffVn2NDGUv+01LnhIfa2tJes9XPmTc6ASrn/RGE9xH0X7wBD
+HfAdGhHgbkzwNeYkQvSh1WyWj5C0Sq7X70dIYdcO81i5MMtlJrzrlB5/YCFVWSxt
+7/EqwMBT3g9mkjAqo6beHxI1Hukn9rt9A6+MU64r0/cB+mVZuiBDoU/+KIiXBWiE
+F/C1n/BO115WoWG35vj5oH+syuv3lRuPaz8GxoffcT+FUkmevZO1/BjEAABAwMS1
+nlB4y6xMJ0i2aCB2kp7ThDOOeTIQpdvtDLqRtQsVTpk73AEuDeKmULJnE2+Shi7v
+yrNj1CPiBdYzz8jBDJYQH87iFQrro7VQNZzMMxpMWXQOZYWidHuBz4TgJJ0ll0JN
+KwLtqv5wdf2zG8zNli0Dz+JwiwQ1kXDcA03rxHBCFALvkdIX0KUvTaTSV7OJ65VI
+rcIwB5fSZgRE7m/9RjBGq/U+n4Kw+vlfpL7UeECJM0N7l8ekgTqqKv2Czu29eTjF
+QOnpQtjgsWVpOnHKpQUfCN1Nxg8H1ytH9HQwLn+cGjm/yK55yIK+03X/vSs2m2qz
+2zDhWlgvHLsDOEQkNsuOIvLkNM6Hv3MLTldknC+vMla34fYqpHfV1phL4npVByMW
+CFOOzLa3qCoBXIGWvtnDx06r/8apHnt256G2X0iuRWWK+XpatMjmriZnj8vyGdIg
+TZ1sNXnuFKMcXYMIvLANZXz4Rabbe6tTJ+BUVkbCGja4Z9iwmYvga77Mr2vjhtwi
+CesRpcz6gR6U5fLddRZXyzKGxC3uQzokc9RtTuRNgSBZQ0oki++d6sr0+jOb54Mr
+wfcMbMgpkQK0IJsMoOxzPLU8s6rISJvFi4IQ2dPYog17GS7Kjb1IGjGUxNKVHiIE
+Is9wB+6bB51ZUUwc0zDSkuS6EaXLLVzmS7a3TOkVzu6J760TDVLL2+PDYkkBUP6O
+SA2yeHirpyMma9QII1sw3xcKH/kDeyWigiB1VDKQpuq1PP98lYjQwAbe3Xrpy2FO
+L/v6dSOJ+imgxD4osT0SanGkZEwPqJFvs6BI9Af8q9ia0xfK3Iu6F2F8JxmG1YiR
+tUm9kCu3X/fNyE08G2sxD8QzGP9VS529nEDRBqkAgY6EHTpRKhPer9QrkUnqEyDZ
+4s7RPcJW+cII/FPW8mSMgTqxFtTZgqNaqPPLevrTnTYTdrW/RkEs1mm0FWZvbyBi
+YXIgPGZvb0Bmb28uZm9vPokCVAQTAQgAPhYhBJICM5a3zdmD+nRGF3grx+nZaj4C
+BQJbdjh4AhsDBQkDwmcABQsJCAcCBhUICQoLAgQWAgMBAh4BAheAAAoJEHgrx+nZ
+aj4CTyUP/2+4k4hXkkBrEeD0yDpmR/FrAgCOZ3iRWca9bJwKtV0hW0HSztlPEfng
+wkwBmmyrnDevA+Ur4/hsBoTzfL4Fzo4OQDg2PZpSpIAHC1m/SQMN/s188RM8eK+Q
+JBtinAo2IDoZyBi5Ar4rVNXrRpgvzwOLm15kpuPp15wxO+4gYOkNIT06yUrDNh3J
+ccXmgZoVD54JmvKrEXscqX71/1NkaUhwZfFALN3+TVXUUdv1icQUJtxNBc29arwM
+LuPuj9XAm5XJaVXDfsJyGu4aj4g6AJDXjVW1d2MgXv1rMRud7CGuX2PmO3CUUua9
+cUaavop5AmtF/+IsHae9qRt8PiMGTebV8IZ3Z6DZeOYDnfJVOXoIUcrAvX3LoImc
+ephBdZ0KmYvaxlDrjtWAvmD6sPgwSvjLiXTmbmAkjRBXCVve4THf05kVUMcr8tmz
+Il8LB+Dri2TfanBKykf6ulH0p2MHgSGQbYA5MuSp+soOitD5YvCxM7o/O0frrfit
+p/O8mPerMEaYF1+3QbF5ApJkXCmjFCj71EPwXEDcl3VIGc+zA49oNjZMMmCcX2Gc
+JyKTWizfuRBGeG5VhCCmTQQjZHPMVO255mdzsPkb6ZHEnolDapY6QXccV5x05XqD
+sObFTy6iwEITdGmxN40pNE3WbhYGqOoXb8iRIG2hURv0gfG1/iI0
+=8g3t
+-----END PGP PRIVATE KEY BLOCK-----
+`
+
+const keyPassphrase = "abcdef0123456789"
diff --git a/worktree_test.go b/worktree_test.go
index df191b0..c714011 100644
--- a/worktree_test.go
+++ b/worktree_test.go
@@ -1591,6 +1591,10 @@
 
 	c.Assert(len(status), Equals, 1)
 
+	fi, err := fs.Lstat("pkgA")
+	c.Assert(err, IsNil)
+	c.Assert(fi.IsDir(), Equals, true)
+
 	// Clean with Dir: true.
 	err = wt.Clean(&CleanOptions{Dir: true})
 	c.Assert(err, IsNil)
@@ -1599,6 +1603,11 @@
 	c.Assert(err, IsNil)
 
 	c.Assert(len(status), Equals, 0)
+
+	// An empty dir should be deleted, as well.
+	_, err = fs.Lstat("pkgA")
+	c.Assert(err, ErrorMatches, ".*(no such file or directory.*|.*file does not exist)*.")
+
 }
 
 func (s *WorktreeSuite) TestAlternatesRepo(c *C) {