Bump libnetwork to 7dd202d11b2078c063fefc614c9fb9e4615da0e1

Signed-off-by: jose-bigio <jose.bigio@docker.com>
diff --git a/vendor.conf b/vendor.conf
index 93a5038..f59f9cf 100644
--- a/vendor.conf
+++ b/vendor.conf
@@ -30,7 +30,7 @@
 github.com/tonistiigi/fsutil dea3a0da73aee887fc02142d995be764106ac5e2
 
 #get libnetwork packages
-github.com/docker/libnetwork 9bca9a4a220b158cc94402e0f8c2c7714eb6f503
+github.com/docker/libnetwork 7dd202d11b2078c063fefc614c9fb9e4615da0e1
 github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
 github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80
 github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
diff --git a/vendor/github.com/docker/libnetwork/diagnose/server.go b/vendor/github.com/docker/libnetwork/diagnose/server.go
index c841e51..2330b65 100644
--- a/vendor/github.com/docker/libnetwork/diagnose/server.go
+++ b/vendor/github.com/docker/libnetwork/diagnose/server.go
@@ -91,7 +91,7 @@
 	}
 
 	logrus.Infof("Starting the diagnose server listening on %d for commands", port)
-	srv := &http.Server{Addr: fmt.Sprintf("127.0.0.1:%d", port), Handler: s}
+	srv := &http.Server{Addr: fmt.Sprintf("%s:%d", ip, port), Handler: s}
 	s.srv = srv
 	s.enable = 1
 	go func(n *Server) {
@@ -101,7 +101,6 @@
 			atomic.SwapInt32(&n.enable, 0)
 		}
 	}(s)
-
 }
 
 // DisableDebug stop the dubug and closes the tcp socket
diff --git a/vendor/github.com/docker/libnetwork/networkdb/delegate.go b/vendor/github.com/docker/libnetwork/networkdb/delegate.go
index 21c3bc0..072c622 100644
--- a/vendor/github.com/docker/libnetwork/networkdb/delegate.go
+++ b/vendor/github.com/docker/libnetwork/networkdb/delegate.go
@@ -16,46 +16,28 @@
 	return []byte{}
 }
 
-// getNode searches the node inside the tables
-// returns true if the node was respectively in the active list, explicit node leave list or failed list
-func (nDB *NetworkDB) getNode(nEvent *NodeEvent, extract bool) (bool, bool, bool, *node) {
-	var active bool
-	var left bool
-	var failed bool
-
-	for _, nodes := range []map[string]*node{
-		nDB.failedNodes,
-		nDB.leftNodes,
-		nDB.nodes,
-	} {
-		if n, ok := nodes[nEvent.NodeName]; ok {
-			active = &nodes == &nDB.nodes
-			left = &nodes == &nDB.leftNodes
-			failed = &nodes == &nDB.failedNodes
-			if n.ltime >= nEvent.LTime {
-				return active, left, failed, nil
-			}
-			if extract {
-				delete(nodes, n.Name)
-			}
-			return active, left, failed, n
-		}
-	}
-	return active, left, failed, nil
-}
-
 func (nDB *NetworkDB) handleNodeEvent(nEvent *NodeEvent) bool {
 	// Update our local clock if the received messages has newer
 	// time.
 	nDB.networkClock.Witness(nEvent.LTime)
 
 	nDB.RLock()
-	active, left, _, n := nDB.getNode(nEvent, false)
+	defer nDB.RUnlock()
+
+	// check if the node exists
+	n, _, _ := nDB.findNode(nEvent.NodeName)
 	if n == nil {
-		nDB.RUnlock()
 		return false
 	}
-	nDB.RUnlock()
+
+	// check if the event is fresh
+	if n.ltime >= nEvent.LTime {
+		return false
+	}
+
+	// If we are here means that the event is fresher and the node is known. Update the laport time
+	n.ltime = nEvent.LTime
+
 	// If it is a node leave event for a manager and this is the only manager we
 	// know of we want the reconnect logic to kick in. In a single manager
 	// cluster manager's gossip can't be bootstrapped unless some other node
@@ -63,45 +45,32 @@
 	if len(nDB.bootStrapIP) == 1 && nEvent.Type == NodeEventTypeLeave {
 		for _, ip := range nDB.bootStrapIP {
 			if ip.Equal(n.Addr) {
-				n.ltime = nEvent.LTime
 				return true
 			}
 		}
 	}
 
-	n.ltime = nEvent.LTime
-
 	switch nEvent.Type {
 	case NodeEventTypeJoin:
-		if active {
-			// the node is already marked as active nothing to do
+		moved, err := nDB.changeNodeState(n.Name, nodeActiveState)
+		if err != nil {
+			logrus.WithError(err).Error("unable to find the node to move")
 			return false
 		}
-		nDB.Lock()
-		// Because the lock got released on the previous check we have to do it again and re verify the status of the node
-		// All of this is to avoid a big lock on the function
-		if active, _, _, n = nDB.getNode(nEvent, true); !active && n != nil {
-			n.reapTime = 0
-			nDB.nodes[n.Name] = n
+		if moved {
 			logrus.Infof("%v(%v): Node join event for %s/%s", nDB.config.Hostname, nDB.config.NodeID, n.Name, n.Addr)
 		}
-		nDB.Unlock()
-		return true
+		return moved
 	case NodeEventTypeLeave:
-		if left {
-			// the node is already marked as left nothing to do.
+		moved, err := nDB.changeNodeState(n.Name, nodeLeftState)
+		if err != nil {
+			logrus.WithError(err).Error("unable to find the node to move")
 			return false
 		}
-		nDB.Lock()
-		// Because the lock got released on the previous check we have to do it again and re verify the status of the node
-		// All of this is to avoid a big lock on the function
-		if _, left, _, n = nDB.getNode(nEvent, true); !left && n != nil {
-			n.reapTime = nodeReapInterval
-			nDB.leftNodes[n.Name] = n
+		if moved {
 			logrus.Infof("%v(%v): Node leave event for %s/%s", nDB.config.Hostname, nDB.config.NodeID, n.Name, n.Addr)
 		}
-		nDB.Unlock()
-		return true
+		return moved
 	}
 
 	return false
diff --git a/vendor/github.com/docker/libnetwork/networkdb/event_delegate.go b/vendor/github.com/docker/libnetwork/networkdb/event_delegate.go
index 6e11805..89aa7c4 100644
--- a/vendor/github.com/docker/libnetwork/networkdb/event_delegate.go
+++ b/vendor/github.com/docker/libnetwork/networkdb/event_delegate.go
@@ -21,24 +21,6 @@
 	}
 }
 
-func (e *eventDelegate) purgeReincarnation(mn *memberlist.Node) {
-	for name, node := range e.nDB.failedNodes {
-		if node.Addr.Equal(mn.Addr) {
-			logrus.Infof("Node %s/%s, is the new incarnation of the failed node %s/%s", mn.Name, mn.Addr, name, node.Addr)
-			delete(e.nDB.failedNodes, name)
-			return
-		}
-	}
-
-	for name, node := range e.nDB.leftNodes {
-		if node.Addr.Equal(mn.Addr) {
-			logrus.Infof("Node %s/%s, is the new incarnation of the shutdown node %s/%s", mn.Name, mn.Addr, name, node.Addr)
-			delete(e.nDB.leftNodes, name)
-			return
-		}
-	}
-}
-
 func (e *eventDelegate) NotifyJoin(mn *memberlist.Node) {
 	logrus.Infof("Node %s/%s, joined gossip cluster", mn.Name, mn.Addr)
 	e.broadcastNodeEvent(mn.Addr, opCreate)
@@ -57,44 +39,35 @@
 	// Every node has a unique ID
 	// Check on the base of the IP address if the new node that joined is actually a new incarnation of a previous
 	// failed or shutdown one
-	e.purgeReincarnation(mn)
+	e.nDB.purgeReincarnation(mn)
 
 	e.nDB.nodes[mn.Name] = &node{Node: *mn}
 	logrus.Infof("Node %s/%s, added to nodes list", mn.Name, mn.Addr)
 }
 
 func (e *eventDelegate) NotifyLeave(mn *memberlist.Node) {
-	var failed bool
 	logrus.Infof("Node %s/%s, left gossip cluster", mn.Name, mn.Addr)
 	e.broadcastNodeEvent(mn.Addr, opDelete)
-	// The node left or failed, delete all the entries created by it.
-	// If the node was temporary down, deleting the entries will guarantee that the CREATE events will be accepted
-	// If the node instead left because was going down, then it makes sense to just delete all its state
+
 	e.nDB.Lock()
 	defer e.nDB.Unlock()
-	e.nDB.deleteNodeFromNetworks(mn.Name)
-	e.nDB.deleteNodeTableEntries(mn.Name)
-	if n, ok := e.nDB.nodes[mn.Name]; ok {
-		delete(e.nDB.nodes, mn.Name)
 
-		// Check if a new incarnation of the same node already joined
-		// In that case this node can simply be removed and no further action are needed
-		for name, node := range e.nDB.nodes {
-			if node.Addr.Equal(mn.Addr) {
-				logrus.Infof("Node %s/%s, is the new incarnation of the failed node %s/%s", name, node.Addr, mn.Name, mn.Addr)
-				return
-			}
-		}
-
-		// In case of node failure, keep retrying to reconnect every retryInterval (1sec) for nodeReapInterval (24h)
-		// Explicit leave will have already removed the node from the list of nodes (nDB.nodes) and put it into the leftNodes map
-		n.reapTime = nodeReapInterval
-		e.nDB.failedNodes[mn.Name] = n
-		failed = true
+	n, currState, _ := e.nDB.findNode(mn.Name)
+	if n == nil {
+		logrus.Errorf("Node %s/%s not found in the node lists", mn.Name, mn.Addr)
+		return
 	}
-
-	if failed {
-		logrus.Infof("Node %s/%s, added to failed nodes list", mn.Name, mn.Addr)
+	// if the node was active means that did not send the leave cluster message, so it's probable that
+	// failed. Else would be already in the left list so nothing else has to be done
+	if currState == nodeActiveState {
+		moved, err := e.nDB.changeNodeState(mn.Name, nodeFailedState)
+		if err != nil {
+			logrus.WithError(err).Errorf("impossible condition, node %s/%s not present in the list", mn.Name, mn.Addr)
+			return
+		}
+		if moved {
+			logrus.Infof("Node %s/%s, added to failed nodes list", mn.Name, mn.Addr)
+		}
 	}
 }
 
diff --git a/vendor/github.com/docker/libnetwork/networkdb/networkdbdiagnose.go b/vendor/github.com/docker/libnetwork/networkdb/networkdbdiagnose.go
index 3c6032b..a4443bc 100644
--- a/vendor/github.com/docker/libnetwork/networkdb/networkdbdiagnose.go
+++ b/vendor/github.com/docker/libnetwork/networkdb/networkdbdiagnose.go
@@ -399,6 +399,7 @@
 					Value: encodedValue,
 					Owner: v.owner,
 				})
+			i++
 		}
 		log.WithField("response", fmt.Sprintf("%+v", rsp)).Info("get table done")
 		diagnose.HTTPReply(w, diagnose.CommandSucceed(rsp), json)
diff --git a/vendor/github.com/docker/libnetwork/networkdb/nodemgmt.go b/vendor/github.com/docker/libnetwork/networkdb/nodemgmt.go
new file mode 100644
index 0000000..f5a7498
--- /dev/null
+++ b/vendor/github.com/docker/libnetwork/networkdb/nodemgmt.go
@@ -0,0 +1,120 @@
+package networkdb
+
+import (
+	"fmt"
+
+	"github.com/hashicorp/memberlist"
+	"github.com/sirupsen/logrus"
+)
+
+type nodeState int
+
+const (
+	nodeNotFound    nodeState = -1
+	nodeActiveState nodeState = 0
+	nodeLeftState   nodeState = 1
+	nodeFailedState nodeState = 2
+)
+
+var nodeStateName = map[nodeState]string{
+	-1: "NodeNotFound",
+	0:  "NodeActive",
+	1:  "NodeLeft",
+	2:  "NodeFailed",
+}
+
+// findNode search the node into the 3 node lists and returns the node pointer and the list
+// where it got found
+func (nDB *NetworkDB) findNode(nodeName string) (*node, nodeState, map[string]*node) {
+	for i, nodes := range []map[string]*node{
+		nDB.nodes,
+		nDB.leftNodes,
+		nDB.failedNodes,
+	} {
+		if n, ok := nodes[nodeName]; ok {
+			return n, nodeState(i), nodes
+		}
+	}
+	return nil, nodeNotFound, nil
+}
+
+// changeNodeState changes the state of the node specified, returns true if the node was moved,
+// false if there was no need to change the node state. Error will be returned if the node does not
+// exists
+func (nDB *NetworkDB) changeNodeState(nodeName string, newState nodeState) (bool, error) {
+	n, currState, m := nDB.findNode(nodeName)
+	if n == nil {
+		return false, fmt.Errorf("node %s not found", nodeName)
+	}
+
+	switch newState {
+	case nodeActiveState:
+		if currState == nodeActiveState {
+			return false, nil
+		}
+
+		delete(m, nodeName)
+		// reset the node reap time
+		n.reapTime = 0
+		nDB.nodes[nodeName] = n
+	case nodeLeftState:
+		if currState == nodeLeftState {
+			return false, nil
+		}
+
+		delete(m, nodeName)
+		nDB.leftNodes[nodeName] = n
+	case nodeFailedState:
+		if currState == nodeFailedState {
+			return false, nil
+		}
+
+		delete(m, nodeName)
+		nDB.failedNodes[nodeName] = n
+	}
+
+	logrus.Infof("Node %s change state %s --> %s", nodeName, nodeStateName[currState], nodeStateName[newState])
+
+	if newState == nodeLeftState || newState == nodeFailedState {
+		// set the node reap time, if not already set
+		// It is possible that a node passes from failed to left and the reaptime was already set so keep that value
+		if n.reapTime == 0 {
+			n.reapTime = nodeReapInterval
+		}
+		// The node leave or fails, delete all the entries created by it.
+		// If the node was temporary down, deleting the entries will guarantee that the CREATE events will be accepted
+		// If the node instead left because was going down, then it makes sense to just delete all its state
+		nDB.deleteNodeFromNetworks(n.Name)
+		nDB.deleteNodeTableEntries(n.Name)
+	}
+
+	return true, nil
+}
+
+func (nDB *NetworkDB) purgeReincarnation(mn *memberlist.Node) bool {
+	for name, node := range nDB.nodes {
+		if node.Addr.Equal(mn.Addr) && node.Port == mn.Port && mn.Name != name {
+			logrus.Infof("Node %s/%s, is the new incarnation of the active node %s/%s", mn.Name, mn.Addr, name, node.Addr)
+			nDB.changeNodeState(name, nodeLeftState)
+			return true
+		}
+	}
+
+	for name, node := range nDB.failedNodes {
+		if node.Addr.Equal(mn.Addr) && node.Port == mn.Port && mn.Name != name {
+			logrus.Infof("Node %s/%s, is the new incarnation of the failed node %s/%s", mn.Name, mn.Addr, name, node.Addr)
+			nDB.changeNodeState(name, nodeLeftState)
+			return true
+		}
+	}
+
+	for name, node := range nDB.leftNodes {
+		if node.Addr.Equal(mn.Addr) && node.Port == mn.Port && mn.Name != name {
+			logrus.Infof("Node %s/%s, is the new incarnation of the shutdown node %s/%s", mn.Name, mn.Addr, name, node.Addr)
+			nDB.changeNodeState(name, nodeLeftState)
+			return true
+		}
+	}
+
+	return false
+}