[otBase] Rewrite table packing

This moves table packing into fontTools.ttLib.tables.distillery
Currently overflows are NOT handled, but the algorithm is changed
from DFS to Dijkstra's algorithm.  This already handles more input
without overflow, including the input in the issue below:

https://github.com/fonttools/fonttools/issues/1105

The trick now is to find input that does overflow, so I can go ahead
and implement overflow resolution.

Also, this introduces a dependency on fibonacci_heap_mod.  To be
removed by copying a Fibonacci Heap implementation in-tree, which
will also ensure reproducible output.
diff --git a/Lib/fontTools/ttLib/tables/distillery/__init__.py b/Lib/fontTools/ttLib/tables/distillery/__init__.py
new file mode 100644
index 0000000..3eee741
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/distillery/__init__.py
@@ -0,0 +1,141 @@
+"""This module helps with distilling a set of objects (aka tables) linked together through
+a set of object references (represented eventually by offsets) into a bytestream in a compact
+and efficient manner.  This is useful for compiling tables like GSUB/GPOS/GDEF and other
+tables represented in the otData.py module."""
+
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+
+from fibonacci_heap_mod import Fibonacci_heap
+
+
+def sortMeOut(table):
+	distillery = Distillery()
+	return distillery.distill(table)
+
+class ObjectSet(object):
+
+	def __init__(self):
+		self._s = set()
+		self._d = dict()
+
+	def add(self, obj):
+		self._s.add(id(obj))
+		self._d[id(obj)] = obj
+
+	def remove(self, obj):
+		self._s.remove(id(obj))
+		del self._d[id(obj)]
+
+	def issubset(self, other):
+		assert type(other) == ObjectSet
+		return self._s.issubset(other._s)
+
+	def __iter__(self):
+		return iter(self._d.values())
+
+	def __contains__(self, obj):
+		return id(obj) in self._s
+
+	def __bool__(self):
+		return bool(self._s)
+
+	__nonzero__ = __bool__
+
+	def __len__(self):
+		return len(self._s)
+
+	def __repr__(self):
+		return repr(self._s)
+
+class Distillery(object):
+
+	def __init__(self):
+		self.packedS = ObjectSet()
+		self.readyQ = Fibonacci_heap()
+		self.readyS = ObjectSet()
+		#self.awaitQ = Fibonacci_heap()
+		#self.awaitS = ObjectSet()
+
+	def _itemPriority(self, item):
+		leashLen = (65536)**2 if item.longOffset else 65536
+		packedParentsPos = [p.pos for p in item.parents if p in self.packedS]
+		leashStart = min(packedParentsPos) if packedParentsPos else (65536)**2 * 2
+		itemLen = len(item)
+		return (leashStart + leashLen + itemLen)# / 4
+
+	def _itemIsReady(self, item):
+		return item.parents.issubset(self.packedS)
+
+	def _readyEnqueue(self, item):
+		item.node = self.readyQ.enqueue(item, self._itemPriority(item))
+		self.readyS.add(item)
+
+	#def _awaitEnqueue(self, item):
+	#	item.node = self.awaitQ.enqueue(item, self._itemPriority(item))
+	#	self.awaitS.add(item)
+
+	#def _awaitDelete(self, item):
+	#	assert item in self.awaitS
+	#	self.awaitS.remove(item)
+	#	self.awaitQ.delete(item.node)
+	#	del item.node
+
+	def _setParents(self, root, done):
+		done.add(root)
+		root.parents = ObjectSet()
+		for item in root.items:
+			if not hasattr(item, 'getData'):
+				continue
+			if item not in done:
+				self._setParents(item, done)
+			item.parents.add(root)
+
+	def distill(self, root):
+		self._setParents(root, ObjectSet())
+
+		out = []
+
+		self._readyEnqueue(root)
+
+		pos = 0
+		while self.readyQ:
+			entry = self.readyQ.dequeue_min()
+			obj = entry.get_value()
+			self.readyS.remove(obj)
+			prio = entry.get_priority()
+			del entry
+			del obj.node
+
+			out.append(obj)
+			obj.pos = pos
+			self.packedS.add(obj)
+
+			for item in obj.items:
+				if not hasattr(item, "getData"):
+					continue
+
+				assert item not in self.packedS
+				if item in self.readyS:
+					continue
+
+				if self._itemIsReady(item):
+					#if hasattr(item, 'node'):
+					#	self._awaitDelete(item)
+
+					self._readyEnqueue(item)
+				else:
+					pass
+					#if hasattr(item, 'node'):
+					#	self.awaitQ.decrease_key(item.node, self._itemPriority(item))
+					#else:
+					#	self._awaitEnqueue(item)
+
+			pos += len(obj)
+
+		#assert not self.awaitS, self.awaitS
+		#assert not self.awaitQ, self.awaitQ
+		assert not self.readyS, self.readyS
+		assert not self.readyQ, self.readyQ
+
+		return out
diff --git a/Lib/fontTools/ttLib/tables/otBase.py b/Lib/fontTools/ttLib/tables/otBase.py
index edef765..b20a0f7 100644
--- a/Lib/fontTools/ttLib/tables/otBase.py
+++ b/Lib/fontTools/ttLib/tables/otBase.py
@@ -5,6 +5,7 @@
 import array
 import struct
 import logging
+from . import distillery
 
 log = logging.getLogger(__name__)
 
@@ -67,6 +68,7 @@
 		while True:
 			try:
 				writer = OTTableWriter(tableTag=self.tableTag)
+				writer.name = self.tableTag
 				self.table.compile(writer, font)
 				return writer.getAllData()
 
@@ -222,7 +224,6 @@
 		self.tableTag = tableTag
 		self.name = name
 		self.longOffset = False
-		self.parents = []
 		self.items = []
 		self.pos = None
 
@@ -277,7 +278,6 @@
 		return bytesjoin(items)
 
 	def __hash__(self):
-		# only works after self._doneWriting() has been called
 		if not hasattr(self, '_hash'):
 			items = tuple(id(x) if hasattr(x, 'getData') else x for x in self.items)
 			self._hash = hash(items)
@@ -290,19 +290,23 @@
 	def __eq__(self, other):
 		if type(self) != type(other):
 			return NotImplemented
-		if len(self.items) != len(other.items):
+		if len(self.items) != len(other.items) or self.longOffset != other.longOffset:
 			return False
 		return all(x is y if hasattr(x, 'getData') else x == y for x,y in zip(self.items, other.items))
 
-	def _doneWriting(self, internedTables):
-		# Convert CountData references to data string items
-		# collapse duplicate table references to a unique entry
-		# "tables" are OTTableWriter objects.
+	def _writeCountData(self):
+		# Convert CountData references to data string items.
+		items = self.items
+		for i in range(len(items)):
+			item = items[i]
+			if hasattr(item, "getCountData"):
+				items[i] = item.getCountData()
+			elif hasattr(item, "getData"):
+				item._writeCountData()
 
-		# For Extension Lookup types, we can
-		# eliminate duplicates only within the tree under the Extension Lookup,
-		# as offsets may exceed 64K even between Extension LookupTable subtables.
-		isExtension = hasattr(self, "Extension")
+	def _doneWriting(self, internedTables):
+		# Collapse duplicate table references to a unique entry
+		# "tables" are OTTableWriter objects.
 
 		# Certain versions of Uniscribe reject the font if the GSUB/GPOS top-level
 		# arrays (ScriptList, FeatureList, LookupList) point to the same, possibly
@@ -310,96 +314,35 @@
 		# See: https://github.com/behdad/fonttools/issues/518
 		dontShare = hasattr(self, 'DontShare')
 
-		if isExtension:
-			internedTables = {}
-
 		items = self.items
 		for i in range(len(items)):
 			item = items[i]
-			if hasattr(item, "getCountData"):
-				items[i] = item.getCountData()
-			elif hasattr(item, "getData"):
-				item._doneWriting(internedTables)
-				if not dontShare:
-					internedItem = internedTables.get(item)
-					if internedItem:
-						internedItem.parents.extend(item.parents)
-						item = items[i] = internedItem
-
-					else:
-						internedTables[item] = item
-		self.items = tuple(items)
-
-	def _gatherTables(self, tables, extTables, done):
-		# Convert table references in self.items tree to a flat
-		# list of tables in depth-first traversal order.
-		# "tables" are OTTableWriter objects.
-		# We do the traversal in reverse order at each level, in order to
-		# resolve duplicate references to be the last reference in the list of tables.
-		# For extension lookups, duplicate references can be merged only within the
-		# writer tree under the  extension lookup.
-
-		done[id(self)] = True
-
-		isExtension = hasattr(self, "Extension")
-
-		selfTables = tables
-
-		if isExtension:
-			assert extTables is not None, "Program or XML editing error. Extension subtables cannot contain extensions subtables"
-			tables, extTables, done = extTables, None, {}
-
-		for i,item in reversed(list(enumerate(self.items))):
-
 			if not hasattr(item, "getData"):
 				continue
 
-			if id(item) not in done:
-				item._gatherTables(tables, extTables, done)
-			else:
-				# Item is already written out by other parent
-				pass
+			item._doneWriting(internedTables)
+			if not dontShare:
+				internedItem = internedTables.get(item)
+				if internedItem:
+					item = items[i] = internedItem
+				else:
+					internedTables[item] = item
 
-		selfTables.append(self)
+		self.items = tuple(items)
 
 	def getAllData(self):
 		"""Assemble all data, including all subtables."""
+		self._writeCountData()
 		internedTables = {}
 		self._doneWriting(internedTables)
-		tables = []
-		extTables = []
-		done = {}
-		self._gatherTables(tables, extTables, done)
-		tables.reverse()
-		extTables.reverse()
-		# Gather all data in two passes: the absolute positions of all
-		# subtable are needed before the actual data can be assembled.
-		pos = 0
-		for table in tables:
-			table.pos = pos
-			pos = pos + table.getDataLength()
-
-		for table in extTables:
-			table.pos = pos
-			pos = pos + table.getDataLength()
-
-		data = []
-		for table in tables:
-			tableData = table.getData()
-			data.append(tableData)
-
-		for table in extTables:
-			tableData = table.getData()
-			data.append(tableData)
-
-		return bytesjoin(data)
+		tables = distillery.sortMeOut(self)
+		return bytesjoin(table.getData() for table in tables)
 
 	# interface for gathering data, as used by table.compile()
 
 	def getSubWriter(self, longOffset=False, name=''):
 		subwriter = self.__class__(self.localState, self.tableTag, name)
 		subwriter.longOffset = longOffset
-		subwriter.parents = [self]
 		return subwriter
 
 	def writeUShort(self, value):