Merge pull request #501 from smola/config-multiple-urls

config: multiple values in RemoteConfig (URLs and Fetch)
diff --git a/options.go b/options.go
index 9f328cb..0ec18d4 100644
--- a/options.go
+++ b/options.go
@@ -194,6 +194,8 @@
 	// the current repository but also in any nested submodules inside those
 	// submodules (and so on). Until the SubmoduleRescursivity is reached.
 	RecurseSubmodules SubmoduleRescursivity
+	// Auth credentials, if required, to use with the remote repository.
+	Auth transport.AuthMethod
 }
 
 var (
diff --git a/plumbing/format/packfile/delta_selector.go b/plumbing/format/packfile/delta_selector.go
index 20c8cea..efcbd53 100644
--- a/plumbing/format/packfile/delta_selector.go
+++ b/plumbing/format/packfile/delta_selector.go
@@ -47,17 +47,123 @@
 func (dw *deltaSelector) objectsToPack(hashes []plumbing.Hash) ([]*ObjectToPack, error) {
 	var objectsToPack []*ObjectToPack
 	for _, h := range hashes {
-		o, err := dw.storer.EncodedObject(plumbing.AnyObject, h)
+		o, err := dw.encodedDeltaObject(h)
 		if err != nil {
 			return nil, err
 		}
 
-		objectsToPack = append(objectsToPack, newObjectToPack(o))
+		otp := newObjectToPack(o)
+		if _, ok := o.(plumbing.DeltaObject); ok {
+			otp.Original = nil
+		}
+
+		objectsToPack = append(objectsToPack, otp)
+	}
+
+	if err := dw.fixAndBreakChains(objectsToPack); err != nil {
+		return nil, err
 	}
 
 	return objectsToPack, nil
 }
 
+func (dw *deltaSelector) encodedDeltaObject(h plumbing.Hash) (plumbing.EncodedObject, error) {
+	edos, ok := dw.storer.(storer.DeltaObjectStorer)
+	if !ok {
+		return dw.encodedObject(h)
+	}
+
+	return edos.DeltaObject(plumbing.AnyObject, h)
+}
+
+func (dw *deltaSelector) encodedObject(h plumbing.Hash) (plumbing.EncodedObject, error) {
+	return dw.storer.EncodedObject(plumbing.AnyObject, h)
+}
+
+func (dw *deltaSelector) fixAndBreakChains(objectsToPack []*ObjectToPack) error {
+	m := make(map[plumbing.Hash]*ObjectToPack, len(objectsToPack))
+	for _, otp := range objectsToPack {
+		m[otp.Hash()] = otp
+	}
+
+	for _, otp := range objectsToPack {
+		if err := dw.fixAndBreakChainsOne(m, otp); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (dw *deltaSelector) fixAndBreakChainsOne(objectsToPack map[plumbing.Hash]*ObjectToPack, otp *ObjectToPack) error {
+	if !otp.Object.Type().IsDelta() {
+		return nil
+	}
+
+	// Initial ObjectToPack instances might have a delta assigned to Object
+	// but no actual base initially. Once Base is assigned to a delta, it means
+	// we already fixed it.
+	if otp.Base != nil {
+		return nil
+	}
+
+	do, ok := otp.Object.(plumbing.DeltaObject)
+	if !ok {
+		// if this is not a DeltaObject, then we cannot retrieve its base,
+		// so we have to break the delta chain here.
+		return dw.undeltify(otp)
+	}
+
+	base, ok := objectsToPack[do.BaseHash()]
+	if !ok {
+		// The base of the delta is not in our list of objects to pack, so
+		// we break the chain.
+		return dw.undeltify(otp)
+	}
+
+	if base.Size() <= otp.Size() {
+		// Bases should be bigger
+		return dw.undeltify(otp)
+	}
+
+	if err := dw.fixAndBreakChainsOne(objectsToPack, base); err != nil {
+		return err
+	}
+
+	otp.SetDelta(base, otp.Object)
+	return nil
+}
+
+func (dw *deltaSelector) restoreOriginal(otp *ObjectToPack) error {
+	if otp.Original != nil {
+		return nil
+	}
+
+	if !otp.Object.Type().IsDelta() {
+		return nil
+	}
+
+	obj, err := dw.encodedObject(otp.Hash())
+	if err != nil {
+		return err
+	}
+
+	otp.Original = obj
+	return nil
+}
+
+// undeltify undeltifies an *ObjectToPack by retrieving the original object from
+// the storer and resetting it.
+func (dw *deltaSelector) undeltify(otp *ObjectToPack) error {
+	if err := dw.restoreOriginal(otp); err != nil {
+		return err
+	}
+
+	otp.Object = otp.Original
+	otp.Depth = 0
+	return nil
+}
+
 func (dw *deltaSelector) sort(objectsToPack []*ObjectToPack) {
 	sort.Sort(byTypeAndSize(objectsToPack))
 }
@@ -66,15 +172,24 @@
 	for i := 0; i < len(objectsToPack); i++ {
 		target := objectsToPack[i]
 
-		// We only want to create deltas from specific types
-		if !applyDelta[target.Original.Type()] {
+		// If we already have a delta, we don't try to find a new one for this
+		// object. This happens when a delta is set to be reused from an existing
+		// packfile.
+		if target.IsDelta() {
+			continue
+		}
+
+		// We only want to create deltas from specific types.
+		if !applyDelta[target.Type()] {
 			continue
 		}
 
 		for j := i - 1; j >= 0; j-- {
 			base := objectsToPack[j]
 			// Objects must use only the same type as their delta base.
-			if base.Original.Type() != target.Original.Type() {
+			// Since objectsToPack is sorted by type and size, once we find
+			// a different type, we know we won't find more of them.
+			if base.Type() != target.Type() {
 				break
 			}
 
@@ -89,7 +204,7 @@
 
 func (dw *deltaSelector) tryToDeltify(base, target *ObjectToPack) error {
 	// If the sizes are radically different, this is a bad pairing.
-	if target.Original.Size() < base.Original.Size()>>4 {
+	if target.Size() < base.Size()>>4 {
 		return nil
 	}
 
@@ -106,10 +221,20 @@
 	}
 
 	// If we have to insert a lot to make this work, find another.
-	if base.Original.Size()-target.Object.Size() > msz {
+	if base.Size()-target.Size() > msz {
 		return nil
 	}
 
+	// Original object might not be present if we're reusing a delta, so we
+	// ensure it is restored.
+	if err := dw.restoreOriginal(target); err != nil {
+		return err
+	}
+
+	if err := dw.restoreOriginal(base); err != nil {
+		return err
+	}
+
 	// Now we can generate the delta using originals
 	delta, err := GetDelta(base.Original, target.Original)
 	if err != nil {
@@ -162,13 +287,13 @@
 func (a byTypeAndSize) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
 
 func (a byTypeAndSize) Less(i, j int) bool {
-	if a[i].Object.Type() < a[j].Object.Type() {
+	if a[i].Type() < a[j].Type() {
 		return false
 	}
 
-	if a[i].Object.Type() > a[j].Object.Type() {
+	if a[i].Type() > a[j].Type() {
 		return true
 	}
 
-	return a[i].Object.Size() > a[j].Object.Size()
+	return a[i].Size() > a[j].Size()
 }
diff --git a/plumbing/format/packfile/encoder.go b/plumbing/format/packfile/encoder.go
index ae83752..1426559 100644
--- a/plumbing/format/packfile/encoder.go
+++ b/plumbing/format/packfile/encoder.go
@@ -18,6 +18,9 @@
 	w            *offsetWriter
 	zw           *zlib.Writer
 	hasher       plumbing.Hasher
+	// offsets is a map of object hashes to corresponding offsets in the packfile.
+	// It is used to determine offset of the base of a delta when a OFS_DELTA is
+	// used.
 	offsets      map[plumbing.Hash]int64
 	useRefDeltas bool
 }
@@ -78,25 +81,24 @@
 
 func (e *Encoder) entry(o *ObjectToPack) error {
 	offset := e.w.Offset()
+	e.offsets[o.Hash()] = offset
 
 	if o.IsDelta() {
 		if err := e.writeDeltaHeader(o, offset); err != nil {
 			return err
 		}
 	} else {
-		if err := e.entryHead(o.Object.Type(), o.Object.Size()); err != nil {
+		if err := e.entryHead(o.Type(), o.Size()); err != nil {
 			return err
 		}
 	}
 
-	// Save the position using the original hash, maybe a delta will need it
-	e.offsets[o.Original.Hash()] = offset
-
 	e.zw.Reset(e.w)
 	or, err := o.Object.Reader()
 	if err != nil {
 		return err
 	}
+
 	_, err = io.Copy(e.zw, or)
 	if err != nil {
 		return err
@@ -117,9 +119,9 @@
 	}
 
 	if e.useRefDeltas {
-		return e.writeRefDeltaHeader(o.Base.Original.Hash())
+		return e.writeRefDeltaHeader(o.Base.Hash())
 	} else {
-		return e.writeOfsDeltaHeader(offset, o.Base.Original.Hash())
+		return e.writeOfsDeltaHeader(offset, o.Base.Hash())
 	}
 }
 
@@ -128,14 +130,19 @@
 }
 
 func (e *Encoder) writeOfsDeltaHeader(deltaOffset int64, base plumbing.Hash) error {
-	// because it is an offset delta, we need the base
-	// object position
-	offset, ok := e.offsets[base]
+	baseOffset, ok := e.offsets[base]
 	if !ok {
-		return fmt.Errorf("delta base not found. Hash: %v", base)
+		return fmt.Errorf("base for delta not found, base hash: %v", base)
 	}
 
-	return binary.WriteVariableWidthInt(e.w, deltaOffset-offset)
+	// for OFS_DELTA, offset of the base is interpreted as negative offset
+	// relative to the type-byte of the header of the ofs-delta entry.
+	relativeOffset := deltaOffset-baseOffset
+	if relativeOffset <= 0 {
+		return fmt.Errorf("bad offset for OFS_DELTA entry: %d", relativeOffset)
+	}
+
+	return binary.WriteVariableWidthInt(e.w, relativeOffset)
 }
 
 func (e *Encoder) entryHead(typeNum plumbing.ObjectType, size int64) error {
diff --git a/plumbing/format/packfile/encoder_advanced_test.go b/plumbing/format/packfile/encoder_advanced_test.go
new file mode 100644
index 0000000..d92e2c4
--- /dev/null
+++ b/plumbing/format/packfile/encoder_advanced_test.go
@@ -0,0 +1,91 @@
+package packfile_test
+
+import (
+	"bytes"
+	"math/rand"
+
+	"gopkg.in/src-d/go-git.v4/plumbing"
+	. "gopkg.in/src-d/go-git.v4/plumbing/format/packfile"
+	"gopkg.in/src-d/go-git.v4/plumbing/storer"
+	"gopkg.in/src-d/go-git.v4/storage/filesystem"
+	"gopkg.in/src-d/go-git.v4/storage/memory"
+
+	"github.com/src-d/go-git-fixtures"
+	. "gopkg.in/check.v1"
+)
+
+type EncoderAdvancedSuite struct {
+	fixtures.Suite
+}
+
+var _ = Suite(&EncoderAdvancedSuite{})
+
+func (s *EncoderAdvancedSuite) TestEncodeDecode(c *C) {
+	fixs := fixtures.Basic().ByTag("packfile").ByTag(".git")
+	fixs = append(fixs, fixtures.ByURL("https://github.com/src-d/go-git.git").
+		ByTag("packfile").ByTag(".git").One())
+	fixs.Test(c, func(f *fixtures.Fixture) {
+		storage, err := filesystem.NewStorage(f.DotGit())
+		c.Assert(err, IsNil)
+		s.testEncodeDecode(c, storage)
+	})
+
+}
+
+func (s *EncoderAdvancedSuite) testEncodeDecode(c *C, storage storer.Storer) {
+
+	objIter, err := storage.IterEncodedObjects(plumbing.AnyObject)
+	c.Assert(err, IsNil)
+
+	expectedObjects := map[plumbing.Hash]bool{}
+	var hashes []plumbing.Hash
+	err = objIter.ForEach(func(o plumbing.EncodedObject) error {
+		expectedObjects[o.Hash()] = true
+		hashes = append(hashes, o.Hash())
+		return err
+
+	})
+	c.Assert(err, IsNil)
+
+	// Shuffle hashes to avoid delta selector getting order right just because
+	// the initial order is correct.
+	auxHashes := make([]plumbing.Hash, len(hashes))
+	for i, j := range rand.Perm(len(hashes)) {
+		auxHashes[j] = hashes[i]
+	}
+	hashes = auxHashes
+
+	buf := bytes.NewBuffer(nil)
+	enc := NewEncoder(buf, storage, false)
+	_, err = enc.Encode(hashes)
+	c.Assert(err, IsNil)
+
+	scanner := NewScanner(buf)
+	storage = memory.NewStorage()
+	d, err := NewDecoder(scanner, storage)
+	c.Assert(err, IsNil)
+	_, err = d.Decode()
+	c.Assert(err, IsNil)
+
+	objIter, err = storage.IterEncodedObjects(plumbing.AnyObject)
+	c.Assert(err, IsNil)
+	obtainedObjects := map[plumbing.Hash]bool{}
+	err = objIter.ForEach(func(o plumbing.EncodedObject) error {
+		obtainedObjects[o.Hash()] = true
+		return nil
+	})
+	c.Assert(err, IsNil)
+	c.Assert(obtainedObjects, DeepEquals, expectedObjects)
+
+	for h := range obtainedObjects {
+		if !expectedObjects[h] {
+			c.Errorf("obtained unexpected object: %s", h)
+		}
+	}
+
+	for h := range expectedObjects {
+		if !obtainedObjects[h] {
+			c.Errorf("missing object: %s", h)
+		}
+	}
+}
diff --git a/plumbing/format/packfile/encoder_test.go b/plumbing/format/packfile/encoder_test.go
index fa01ed0..b5b0c42 100644
--- a/plumbing/format/packfile/encoder_test.go
+++ b/plumbing/format/packfile/encoder_test.go
@@ -86,68 +86,6 @@
 	c.Assert(err, Equals, plumbing.ErrObjectNotFound)
 }
 
-func (s *EncoderSuite) TestDecodeEncodeDecode(c *C) {
-	fixtures.Basic().ByTag("packfile").Test(c, func(f *fixtures.Fixture) {
-		scanner := NewScanner(f.Packfile())
-		storage := memory.NewStorage()
-
-		d, err := NewDecoder(scanner, storage)
-		c.Assert(err, IsNil)
-
-		ch, err := d.Decode()
-		c.Assert(err, IsNil)
-		c.Assert(ch, Equals, f.PackfileHash)
-
-		objIter, err := d.o.IterEncodedObjects(plumbing.AnyObject)
-		c.Assert(err, IsNil)
-
-		objects := []plumbing.EncodedObject{}
-		hashes := []plumbing.Hash{}
-		err = objIter.ForEach(func(o plumbing.EncodedObject) error {
-			objects = append(objects, o)
-			hash, err := s.store.SetEncodedObject(o)
-			c.Assert(err, IsNil)
-
-			hashes = append(hashes, hash)
-
-			return err
-
-		})
-		c.Assert(err, IsNil)
-		_, err = s.enc.Encode(hashes)
-		c.Assert(err, IsNil)
-
-		scanner = NewScanner(s.buf)
-		storage = memory.NewStorage()
-		d, err = NewDecoder(scanner, storage)
-		c.Assert(err, IsNil)
-		_, err = d.Decode()
-		c.Assert(err, IsNil)
-
-		objIter, err = d.o.IterEncodedObjects(plumbing.AnyObject)
-		c.Assert(err, IsNil)
-		obtainedObjects := []plumbing.EncodedObject{}
-		err = objIter.ForEach(func(o plumbing.EncodedObject) error {
-			obtainedObjects = append(obtainedObjects, o)
-
-			return nil
-		})
-		c.Assert(err, IsNil)
-		c.Assert(len(obtainedObjects), Equals, len(objects))
-
-		equals := 0
-		for _, oo := range obtainedObjects {
-			for _, o := range objects {
-				if o.Hash() == oo.Hash() {
-					equals++
-				}
-			}
-		}
-
-		c.Assert(len(obtainedObjects), Equals, equals)
-	})
-}
-
 func (s *EncoderSuite) TestDecodeEncodeWithDeltaDecodeREF(c *C) {
 	s.enc = NewEncoder(s.buf, s.store, true)
 	s.simpleDeltaTest(c)
diff --git a/plumbing/format/packfile/object_pack.go b/plumbing/format/packfile/object_pack.go
index a3e99c0..14337d1 100644
--- a/plumbing/format/packfile/object_pack.go
+++ b/plumbing/format/packfile/object_pack.go
@@ -1,6 +1,8 @@
 package packfile
 
-import "gopkg.in/src-d/go-git.v4/plumbing"
+import (
+	"gopkg.in/src-d/go-git.v4/plumbing"
+)
 
 // ObjectToPack is a representation of an object that is going to be into a
 // pack file.
@@ -39,6 +41,48 @@
 	}
 }
 
+func (o *ObjectToPack) Type() plumbing.ObjectType {
+	if o.Original != nil {
+		return o.Original.Type()
+	}
+
+	if o.Base != nil {
+		return o.Base.Type()
+	}
+
+	if o.Object != nil {
+		return o.Object.Type()
+	}
+
+	panic("cannot get type")
+}
+
+func (o *ObjectToPack) Hash() plumbing.Hash {
+	if o.Original != nil {
+		return o.Original.Hash()
+	}
+
+	do, ok := o.Object.(plumbing.DeltaObject)
+	if ok {
+		return do.ActualHash()
+	}
+
+	panic("cannot get hash")
+}
+
+func (o *ObjectToPack) Size() int64 {
+	if o.Original != nil {
+		return o.Original.Size()
+	}
+
+	do, ok := o.Object.(plumbing.DeltaObject)
+	if ok {
+		return do.ActualSize()
+	}
+
+	panic("cannot get ObjectToPack size")
+}
+
 func (o *ObjectToPack) IsDelta() bool {
 	if o.Base != nil {
 		return true
diff --git a/plumbing/object.go b/plumbing/object.go
index 3304da2..2655dee 100644
--- a/plumbing/object.go
+++ b/plumbing/object.go
@@ -23,6 +23,17 @@
 	Writer() (io.WriteCloser, error)
 }
 
+// DeltaObject is an EncodedObject representing a delta.
+type DeltaObject interface {
+	EncodedObject
+	// BaseHash returns the hash of the object used as base for this delta.
+	BaseHash() Hash
+	// ActualHash returns the hash of the object after applying the delta.
+	ActualHash() Hash
+	// Size returns the size of the object after applying the delta.
+	ActualSize() int64
+}
+
 // ObjectType internal object type
 // Integer values from 0 to 7 map to those exposed by git.
 // AnyObject is used to represent any from 0 to 7.
@@ -71,6 +82,12 @@
 	return t >= CommitObject && t <= REFDeltaObject
 }
 
+// IsDelta returns true for any ObjectTyoe that represents a delta (i.e.
+// REFDeltaObject or OFSDeltaObject).
+func (t ObjectType) IsDelta() bool {
+	return t == REFDeltaObject || t == OFSDeltaObject
+}
+
 // ParseObjectType parses a string representation of ObjectType. It returns an
 // error on parse failure.
 func ParseObjectType(value string) (typ ObjectType, err error) {
diff --git a/plumbing/revlist/revlist.go b/plumbing/revlist/revlist.go
index 1bda2fa..f56cf28 100644
--- a/plumbing/revlist/revlist.go
+++ b/plumbing/revlist/revlist.go
@@ -16,7 +16,26 @@
 // the reachable objects from the given objects. Ignore param are object hashes
 // that we want to ignore on the result. All that objects must be accessible
 // from the object storer.
-func Objects(s storer.EncodedObjectStorer, objects, ignore []plumbing.Hash) ([]plumbing.Hash, error) {
+func Objects(
+	s storer.EncodedObjectStorer,
+	objs,
+	ignore []plumbing.Hash,
+) ([]plumbing.Hash, error) {
+	ignore, err := objects(s, ignore, nil, true)
+	if err != nil {
+		return nil, err
+	}
+
+	return objects(s, objs, ignore, false)
+}
+
+func objects(
+	s storer.EncodedObjectStorer,
+	objects,
+	ignore []plumbing.Hash,
+	allowMissingObjects bool,
+) ([]plumbing.Hash, error) {
+
 	seen := hashListToSet(ignore)
 	result := make(map[plumbing.Hash]bool)
 
@@ -29,6 +48,10 @@
 
 	for _, h := range objects {
 		if err := processObject(s, h, seen, ignore, walkerFunc); err != nil {
+			if allowMissingObjects && err == plumbing.ErrObjectNotFound {
+				continue
+			}
+
 			return nil, err
 		}
 	}
@@ -44,6 +67,10 @@
 	ignore []plumbing.Hash,
 	walkerFunc func(h plumbing.Hash),
 ) error {
+	if seen[h] {
+		return nil
+	}
+
 	o, err := s.EncodedObject(plumbing.AnyObject, h)
 	if err != nil {
 		return err
diff --git a/plumbing/storer/object.go b/plumbing/storer/object.go
index a733ee6..3f41468 100644
--- a/plumbing/storer/object.go
+++ b/plumbing/storer/object.go
@@ -38,6 +38,14 @@
 	IterEncodedObjects(plumbing.ObjectType) (EncodedObjectIter, error)
 }
 
+// DeltaObjectStorer is an EncodedObjectStorer that can return delta
+// objects.
+type DeltaObjectStorer interface {
+	// DeltaObject is the same as EncodedObject but without resolving deltas.
+	// Deltas will be returned as plumbing.DeltaObject instances.
+	DeltaObject(plumbing.ObjectType, plumbing.Hash) (plumbing.EncodedObject, error)
+}
+
 // Transactioner is a optional method for ObjectStorer, it enable transaction
 // base write and read operations in the storage
 type Transactioner interface {
diff --git a/plumbing/transport/ssh/common.go b/plumbing/transport/ssh/common.go
index 6d1c51b..af79dfb 100644
--- a/plumbing/transport/ssh/common.go
+++ b/plumbing/transport/ssh/common.go
@@ -146,11 +146,14 @@
 		return
 	}
 
-	vo := reflect.ValueOf(*overrides)
-	vc := reflect.ValueOf(*c)
-	for i := 0; i < vc.Type().NumField(); i++ {
-		vcf := vc.Field(i)
-		vof := vo.Field(i)
+	t := reflect.TypeOf(*c)
+	vc := reflect.ValueOf(c).Elem()
+	vo := reflect.ValueOf(overrides).Elem()
+
+	for i := 0; i < t.NumField(); i++ {
+		f := t.Field(i)
+		vcf := vc.FieldByName(f.Name)
+		vof := vo.FieldByName(f.Name)
 		if isZeroValue(vcf) {
 			vcf.Set(vof)
 		}
diff --git a/plumbing/transport/ssh/common_test.go b/plumbing/transport/ssh/common_test.go
index da99148..1b07eee 100644
--- a/plumbing/transport/ssh/common_test.go
+++ b/plumbing/transport/ssh/common_test.go
@@ -3,7 +3,39 @@
 import (
 	"testing"
 
+	"golang.org/x/crypto/ssh"
+
 	. "gopkg.in/check.v1"
 )
 
 func Test(t *testing.T) { TestingT(t) }
+
+func (s *SuiteCommon) TestOverrideConfig(c *C) {
+	config := &ssh.ClientConfig{
+		User: "foo",
+		Auth: []ssh.AuthMethod{
+			ssh.Password("yourpassword"),
+		},
+		HostKeyCallback: ssh.FixedHostKey(nil),
+	}
+
+	target := &ssh.ClientConfig{}
+	overrideConfig(config, target)
+
+	c.Assert(target.User, Equals, "foo")
+	c.Assert(target.Auth, HasLen, 1)
+	c.Assert(target.HostKeyCallback, NotNil)
+}
+
+func (s *SuiteCommon) TestOverrideConfigKeep(c *C) {
+	config := &ssh.ClientConfig{
+		User: "foo",
+	}
+
+	target := &ssh.ClientConfig{
+		User: "bar",
+	}
+
+	overrideConfig(config, target)
+	c.Assert(target.User, Equals, "bar")
+}
diff --git a/repository.go b/repository.go
index 50109f7..92174d6 100644
--- a/repository.go
+++ b/repository.go
@@ -436,7 +436,10 @@
 		}
 
 		if o.RecurseSubmodules != NoRecurseSubmodules {
-			if err := w.updateSubmodules(o.RecurseSubmodules); err != nil {
+			if err := w.updateSubmodules(&SubmoduleUpdateOptions{
+				RecurseSubmodules: o.RecurseSubmodules,
+				Auth:              o.Auth,
+			}); err != nil {
 				return err
 			}
 		}
diff --git a/storage/filesystem/deltaobject.go b/storage/filesystem/deltaobject.go
new file mode 100644
index 0000000..66cfb71
--- /dev/null
+++ b/storage/filesystem/deltaobject.go
@@ -0,0 +1,37 @@
+package filesystem
+
+import (
+	"gopkg.in/src-d/go-git.v4/plumbing"
+)
+
+type deltaObject struct {
+	plumbing.EncodedObject
+	base plumbing.Hash
+	hash plumbing.Hash
+	size int64
+}
+
+func newDeltaObject(
+	obj plumbing.EncodedObject,
+	hash plumbing.Hash,
+	base plumbing.Hash,
+	size int64) plumbing.DeltaObject {
+	return &deltaObject{
+		EncodedObject: obj,
+		hash:          hash,
+		base:          base,
+		size:          size,
+	}
+}
+
+func (o *deltaObject) BaseHash() plumbing.Hash {
+	return o.base
+}
+
+func (o *deltaObject) ActualSize() int64 {
+	return o.size
+}
+
+func (o *deltaObject) ActualHash() plumbing.Hash {
+	return o.hash
+}
diff --git a/storage/filesystem/object.go b/storage/filesystem/object.go
index 068abd3..5073a38 100644
--- a/storage/filesystem/object.go
+++ b/storage/filesystem/object.go
@@ -130,7 +130,27 @@
 func (s *ObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (plumbing.EncodedObject, error) {
 	obj, err := s.getFromUnpacked(h)
 	if err == plumbing.ErrObjectNotFound {
-		obj, err = s.getFromPackfile(h)
+		obj, err = s.getFromPackfile(h, false)
+	}
+
+	if err != nil {
+		return nil, err
+	}
+
+	if plumbing.AnyObject != t && obj.Type() != t {
+		return nil, plumbing.ErrObjectNotFound
+	}
+
+	return obj, nil
+}
+
+// DeltaObject returns the object with the given hash, by searching for
+// it in the packfile and the git object directories.
+func (s *ObjectStorage) DeltaObject(t plumbing.ObjectType,
+	h plumbing.Hash) (plumbing.EncodedObject, error) {
+	obj, err := s.getFromUnpacked(h)
+	if err == plumbing.ErrObjectNotFound {
+		obj, err = s.getFromPackfile(h, true)
 	}
 
 	if err != nil {
@@ -182,12 +202,14 @@
 
 // Get returns the object with the given hash, by searching for it in
 // the packfile.
-func (s *ObjectStorage) getFromPackfile(h plumbing.Hash) (plumbing.EncodedObject, error) {
+func (s *ObjectStorage) getFromPackfile(h plumbing.Hash, canBeDelta bool) (
+	plumbing.EncodedObject, error) {
+
 	if err := s.requireIndex(); err != nil {
 		return nil, err
 	}
 
-	pack, offset := s.findObjectInPackfile(h)
+	pack, hash, offset := s.findObjectInPackfile(h)
 	if offset == -1 {
 		return nil, plumbing.ErrObjectNotFound
 	}
@@ -199,26 +221,94 @@
 
 	defer ioutil.CheckClose(f, &err)
 
+	idx := s.index[pack]
+	if canBeDelta {
+		return s.decodeDeltaObjectAt(f, idx, offset, hash)
+	}
+
+	return s.decodeObjectAt(f, idx, offset)
+}
+
+func (s *ObjectStorage) decodeObjectAt(
+	f billy.File,
+	idx *packfile.Index,
+	offset int64) (plumbing.EncodedObject, error) {
+	if _, err := f.Seek(0, io.SeekStart); err != nil {
+		return nil, err
+	}
+
 	p := packfile.NewScanner(f)
+
 	d, err := packfile.NewDecoder(p, memory.NewStorage())
 	if err != nil {
 		return nil, err
 	}
 
+	d.SetIndex(idx)
 	d.DeltaBaseCache = s.DeltaBaseCache
-	d.SetIndex(s.index[pack])
 	obj, err := d.DecodeObjectAt(offset)
 	return obj, err
 }
 
-func (s *ObjectStorage) findObjectInPackfile(h plumbing.Hash) (plumbing.Hash, int64) {
+func (s *ObjectStorage) decodeDeltaObjectAt(
+	f billy.File,
+	idx *packfile.Index,
+	offset int64,
+	hash plumbing.Hash) (plumbing.EncodedObject, error) {
+	if _, err := f.Seek(0, io.SeekStart); err != nil {
+		return nil, err
+	}
+
+	p := packfile.NewScanner(f)
+	if _, err := p.SeekFromStart(offset); err != nil {
+		return nil, err
+	}
+
+	header, err := p.NextObjectHeader()
+	if err != nil {
+		return nil, err
+	}
+
+	var (
+		base plumbing.Hash
+	)
+
+	switch header.Type {
+	case plumbing.REFDeltaObject:
+		base = header.Reference
+	case plumbing.OFSDeltaObject:
+		e, ok := idx.LookupOffset(uint64(header.OffsetReference))
+		if !ok {
+			return nil, plumbing.ErrObjectNotFound
+		}
+
+		base = e.Hash
+	default:
+		return s.decodeObjectAt(f, idx, offset)
+	}
+
+	obj := &plumbing.MemoryObject{}
+	obj.SetType(header.Type)
+	w, err := obj.Writer()
+	if err != nil {
+		return nil, err
+	}
+
+	if _, _, err := p.NextObject(w); err != nil {
+		return nil, err
+	}
+
+	return newDeltaObject(obj, hash, base, header.Length), nil
+}
+
+func (s *ObjectStorage) findObjectInPackfile(h plumbing.Hash) (plumbing.Hash, plumbing.Hash, int64) {
 	for packfile, index := range s.index {
 		if e, ok := index.LookupHash(h); ok {
-			return packfile, int64(e.Offset)
+			return packfile, e.Hash, int64(e.Offset)
 		}
 	}
 
-	return plumbing.ZeroHash, -1
+	return plumbing.ZeroHash, plumbing.ZeroHash, -1
 }
 
 // IterEncodedObjects returns an iterator for all the objects in the packfile
diff --git a/storage/filesystem/object_test.go b/storage/filesystem/object_test.go
index d741fa2..504bd45 100644
--- a/storage/filesystem/object_test.go
+++ b/storage/filesystem/object_test.go
@@ -52,12 +52,12 @@
 	c.Assert(err, IsNil)
 
 	expected := plumbing.NewHash("8d45a34641d73851e01d3754320b33bb5be3c4d3")
-	obj, err := o.getFromPackfile(expected)
+	obj, err := o.getFromPackfile(expected, false)
 	c.Assert(err, IsNil)
 	c.Assert(obj.Hash(), Equals, expected)
 
 	expected = plumbing.NewHash("e9cfa4c9ca160546efd7e8582ec77952a27b17db")
-	obj, err = o.getFromPackfile(expected)
+	obj, err = o.getFromPackfile(expected, false)
 	c.Assert(err, IsNil)
 	c.Assert(obj.Hash(), Equals, expected)
 }
diff --git a/storage/filesystem/storage_test.go b/storage/filesystem/storage_test.go
index 22709f5..b165c5e 100644
--- a/storage/filesystem/storage_test.go
+++ b/storage/filesystem/storage_test.go
@@ -4,6 +4,7 @@
 	"io/ioutil"
 	"testing"
 
+	"gopkg.in/src-d/go-git.v4/plumbing/storer"
 	"gopkg.in/src-d/go-git.v4/storage/test"
 
 	. "gopkg.in/check.v1"
@@ -25,6 +26,14 @@
 	storage, err := NewStorage(osfs.New(s.dir))
 	c.Assert(err, IsNil)
 
+	// ensure that right interfaces are implemented
+	var _ storer.EncodedObjectStorer = storage
+	var _ storer.IndexStorer = storage
+	var _ storer.ReferenceStorer = storage
+	var _ storer.ShallowStorer = storage
+	var _ storer.DeltaObjectStorer = storage
+	var _ storer.PackfileWriter = storage
+
 	s.BaseStorageSuite = test.NewBaseStorageSuite(storage)
 	s.BaseStorageSuite.SetUpTest(c)
 }
diff --git a/storage/test/storage_suite.go b/storage/test/storage_suite.go
index ac1baa0..bf93515 100644
--- a/storage/test/storage_suite.go
+++ b/storage/test/storage_suite.go
@@ -403,6 +403,40 @@
 	c.Assert(storer, NotNil)
 }
 
+func (s *BaseStorageSuite) TestDeltaObjectStorer(c *C) {
+	dos, ok := s.Storer.(storer.DeltaObjectStorer)
+	if !ok {
+		c.Skip("not an DeltaObjectStorer")
+	}
+
+	pwr, ok := s.Storer.(storer.PackfileWriter)
+	if !ok {
+		c.Skip("not a storer.PackWriter")
+	}
+
+	pw, err := pwr.PackfileWriter()
+	c.Assert(err, IsNil)
+
+	f := fixtures.Basic().One()
+	_, err = io.Copy(pw, f.Packfile())
+	c.Assert(err, IsNil)
+
+	err = pw.Close()
+	c.Assert(err, IsNil)
+
+	h := plumbing.NewHash("32858aad3c383ed1ff0a0f9bdf231d54a00c9e88")
+	obj, err := dos.DeltaObject(plumbing.AnyObject, h)
+	c.Assert(err, IsNil)
+	c.Assert(obj.Type(), Equals, plumbing.BlobObject)
+
+	h = plumbing.NewHash("aa9b383c260e1d05fbbf6b30a02914555e20c725")
+	obj, err = dos.DeltaObject(plumbing.AnyObject, h)
+	c.Assert(err, IsNil)
+	c.Assert(obj.Type(), Equals, plumbing.OFSDeltaObject)
+	_, ok = obj.(plumbing.DeltaObject)
+	c.Assert(ok, Equals, true)
+}
+
 func objectEquals(a plumbing.EncodedObject, b plumbing.EncodedObject) error {
 	ha := a.Hash()
 	hb := b.Hash()
diff --git a/submodule.go b/submodule.go
index cf90c63..fd3d173 100644
--- a/submodule.go
+++ b/submodule.go
@@ -218,7 +218,7 @@
 	ctx context.Context, r *Repository, o *SubmoduleUpdateOptions, hash plumbing.Hash,
 ) error {
 	if !o.NoFetch {
-		err := r.FetchContext(ctx, &FetchOptions{})
+		err := r.FetchContext(ctx, &FetchOptions{Auth: o.Auth})
 		if err != nil && err != NoErrAlreadyUpToDate {
 			return err
 		}
diff --git a/worktree.go b/worktree.go
index 4e4d544..60c8adb 100644
--- a/worktree.go
+++ b/worktree.go
@@ -112,22 +112,22 @@
 	}
 
 	if o.RecurseSubmodules != NoRecurseSubmodules {
-		return w.updateSubmodules(o.RecurseSubmodules)
+		return w.updateSubmodules(&SubmoduleUpdateOptions{
+			RecurseSubmodules: o.RecurseSubmodules,
+			Auth:              o.Auth,
+		})
 	}
 
 	return nil
 }
 
-func (w *Worktree) updateSubmodules(recursion SubmoduleRescursivity) error {
+func (w *Worktree) updateSubmodules(o *SubmoduleUpdateOptions) error {
 	s, err := w.Submodules()
 	if err != nil {
 		return err
 	}
-
-	return s.Update(&SubmoduleUpdateOptions{
-		Init:              true,
-		RecurseSubmodules: recursion,
-	})
+	o.Init = true
+	return s.Update(o)
 }
 
 // Checkout switch branches or restore working tree files.