Make Python scripts portable across Python 2/3

Mostly:

- harmonize print function
- harmonize item iteration
- explicitly force list creation when needed

Differential Revision: https://reviews.llvm.org/D55829

llvm-svn: 350382
diff --git a/CompareDebugInfo.py b/CompareDebugInfo.py
index 7ec2d99..121014e 100755
--- a/CompareDebugInfo.py
+++ b/CompareDebugInfo.py
@@ -1,4 +1,5 @@
 #!/usr/bin/python
+from __future__ import print_function
 
 import os
 import sys
@@ -45,14 +46,14 @@
         self.values[arg_name] = value
         
     def __repr__(self):
-        print self.name
+        print(self.name)
         for k, v in self.values.items():
-            print k, "=", v
+            print(k, "=", v)
         return ''
 
     def compare_args(self, other, file):
         myitems = self.values.items()
-        otheritems = other.values.items()
+        otheritems = list(other.values.items())
         match = False
         for i, my_item in enumerate(my_items):
             if i >= len(otheritems):
diff --git a/litsupport/modules/stats.py b/litsupport/modules/stats.py
index 4cba3e8..125342c 100644
--- a/litsupport/modules/stats.py
+++ b/litsupport/modules/stats.py
@@ -14,7 +14,7 @@
     except Exception as e:
         logging.warning("Could not read '%s'", statsfilename, exc_info=e)
         return
-    for name, value in stats.iteritems():
+    for name, value in stats.items():
         global_stats[name] += value
 
 
@@ -37,7 +37,7 @@
         logging.warning("No stats for '%s'", context.test.getFullName())
 
     result = dict()
-    for key, value in stats.iteritems():
+    for key, value in stats.items():
         result[key] = value
     return result
 
diff --git a/utils/compare.py b/utils/compare.py
index b657877..fa93b24 100755
--- a/utils/compare.py
+++ b/utils/compare.py
@@ -1,8 +1,10 @@
-#!/usr/bin/env python2.7
+#!/usr/bin/env python
 """Tool to filter, organize, compare and display benchmarking results. Usefull
 for smaller datasets. It works great with a few dozen runs it is not designed to
 deal with hundreds.
 Requires the pandas library to be installed."""
+from __future__ import print_function
+
 import pandas as pd
 import sys
 import os.path
@@ -19,7 +21,7 @@
     info_columns = ['hash']
     # Pass1: Figure out metrics (= the column index)
     if 'tests' not in jsondata:
-        print "%s: Could not find toplevel 'tests' key"
+        print("%s: Could not find toplevel 'tests' key")
         sys.exit(1)
     for test in jsondata['tests']:
         name = test.get("name")
@@ -31,7 +33,7 @@
             sys.exit(1)
         names.add(name)
         if "metrics" not in test:
-            print "Warning: '%s' has No metrics!" % test['name']
+            print("Warning: '%s' has No metrics!" % test['name'])
             continue
         for name in test["metrics"].keys():
             if name not in columnindexes:
@@ -54,9 +56,9 @@
 
         datarow = [nan] * len(columns)
         if "metrics" in test:
-            for (metricname, value) in test['metrics'].iteritems():
+            for (metricname, value) in test['metrics'].items():
                 datarow[columnindexes[metricname]] = value
-        for (name, value) in test.iteritems():
+        for (name, value) in test.items():
             index = columnindexes.get(name)
             if index is not None:
                 datarow[index] = test[name]
@@ -148,7 +150,7 @@
     n_after = len(after.groupby(level=1))
     n_filtered = n_before - n_after
     if n_filtered != 0:
-        print "%s: %s (filtered out)" % (reason, n_filtered)
+        print("%s: %s (filtered out)" % (reason, n_filtered))
 
 # Truncate a string to a maximum length by keeping a prefix, a suffix and ...
 # in the middle
@@ -222,8 +224,8 @@
     pd.set_option("display.max_colwidth", 0)
     out = dataout.to_string(index=False, justify='left',
                             float_format=float_format, formatters=formatters)
-    print out
-    print d.describe()
+    print(out)
+    print(d.describe())
 
 if __name__ == "__main__":
     parser = argparse.ArgumentParser(prog='compare.py')
@@ -303,7 +305,7 @@
     # Filter data
     proggroup = data.groupby(level=1)
     initial_size = len(proggroup.indices)
-    print "Tests: %s" % (initial_size,)
+    print("Tests: %s" % (initial_size,))
     if config.filter_failed and hasattr(data, 'Exec'):
         newdata = filter_failed(data)
         print_filter_stats("Failed", data, newdata)
@@ -326,10 +328,10 @@
         data = newdata
     final_size = len(data.groupby(level=1))
     if final_size != initial_size:
-        print "Remaining: %d" % (final_size,)
+        print("Remaining: %d" % (final_size,))
 
     # Reduce / add columns
-    print "Metric: %s" % (",".join(metrics),)
+    print("Metric: %s" % (",".join(metrics),))
     if len(metrics) > 0:
         data = data[metrics]
     data = add_diff_column(data)
@@ -339,7 +341,7 @@
         sortkey = data.columns[0]
 
     # Print data
-    print ""
+    print("")
     shorten_names = not config.full
     limit_output = (not config.all) and (not config.full)
     print_result(data, limit_output, shorten_names, config.show_diff, sortkey)
diff --git a/utils/tdiff.py b/utils/tdiff.py
index 9f4cedb..dcfa167 100755
--- a/utils/tdiff.py
+++ b/utils/tdiff.py
@@ -95,7 +95,7 @@
     if sc_arg_max <= 0:
         return 10000 # wild guess
     env_len = 0
-    for key,val in os.environ.iteritems():
+    for key,val in os.environ.items():
         env_len += len(key) + len(val) + 10
     return sc_arg_max - env_len
 
@@ -140,12 +140,12 @@
 
     if config.mode == 'sources':
         # Take leafs in the dependency tree
-        for target, depnode in tree.iteritems():
+        for target, depnode in tree.items():
             if len(depnode.inputs) == 0:
                 yield target
     else:
         # Take files ending in '.o'
-        for target, depnode in tree.iteritems():
+        for target, depnode in tree.items():
             if target.endswith(".o"):
                 # Determine .s/.stats ending used by -save-temps=obj or
                 # -save-stats=obj