Remove `inlining_default` as it's not used by anything (#344)

The compiler side change will follow, but training can continue with this
repository at HEAD because LLVM will patch features it can't find by
allocating appropriate buffers and just never passing them to a model.
diff --git a/compiler_opt/es/blackbox_learner_test.py b/compiler_opt/es/blackbox_learner_test.py
index 7e2bb16..b6a3902 100644
--- a/compiler_opt/es/blackbox_learner_test.py
+++ b/compiler_opt/es/blackbox_learner_test.py
@@ -109,7 +109,7 @@
         actor_network=actor_network)
 
     # make the policy all zeros to be deterministic
-    expected_policy_length = 17218
+    expected_policy_length = 17154
     policy_utils.set_vectorized_parameters_for_policy(policy, [0.0] *
                                                       expected_policy_length)
     init_params = policy_utils.get_vectorized_parameters_from_policy(policy)
@@ -183,7 +183,7 @@
         ESWorker, count=3, arg='', kwarg='') as pool:
       self._learner.run_step(pool)  # pylint: disable=protected-access
       # expected length calculated from expected shapes of variables
-      self.assertEqual(len(self._learner.get_model_weights()), 17218)
+      self.assertEqual(len(self._learner.get_model_weights()), 17154)
       # check that first 5 weights are not all zero
       # this will indicate general validity of all the values
       for value in self._learner.get_model_weights()[:5]:
diff --git a/compiler_opt/es/policy_utils_test.py b/compiler_opt/es/policy_utils_test.py
index 1e6a555..b266ceb 100644
--- a/compiler_opt/es/policy_utils_test.py
+++ b/compiler_opt/es/policy_utils_test.py
@@ -100,7 +100,7 @@
 
 class VectorTest(absltest.TestCase):
 
-  expected_variable_shapes = [(71, 64), (64), (64, 64), (64), (64, 64), (64),
+  expected_variable_shapes = [(70, 64), (64), (64, 64), (64), (64, 64), (64),
                               (64, 64), (64), (64, 2), (2)]
   expected_length_of_a_perturbation = sum(
       np.prod(shape) for shape in expected_variable_shapes)
diff --git a/compiler_opt/rl/inlining/config.py b/compiler_opt/rl/inlining/config.py
index 00771ce..a8bd692 100644
--- a/compiler_opt/rl/inlining/config.py
+++ b/compiler_opt/rl/inlining/config.py
@@ -64,10 +64,7 @@
           'is_multiple_blocks',
           'nested_inlines',
           'nested_inline_cost_estimate',
-          'threshold',
-
-          # inlining_default is not used as feature in training.
-          'inlining_default'))
+          'threshold'))
   reward_spec = tf.TensorSpec(dtype=tf.float32, shape=(), name='reward')
   time_step_spec = time_step.time_step_spec(observation_spec, reward_spec)
   action_spec = tensor_spec.BoundedTensorSpec(
@@ -86,9 +83,6 @@
 
   def observation_processing_layer(obs_spec):
     """Creates the layer to process observation given obs_spec."""
-    if obs_spec.name == 'inlining_default':
-      return tf.keras.layers.Lambda(feature_ops.discard_fn)
-
     quantile = quantile_map[obs_spec.name]
     return tf.keras.layers.Lambda(
         feature_ops.get_normalize_fn(quantile, with_sqrt,
@@ -98,4 +92,4 @@
 
 
 def get_nonnormalized_features():
-  return ['reward', 'inlining_default', 'inlining_decision']
+  return ['reward', 'inlining_decision']
diff --git a/compiler_opt/rl/trainer_test.py b/compiler_opt/rl/trainer_test.py
index bd26c5f..0f9d02f 100644
--- a/compiler_opt/rl/trainer_test.py
+++ b/compiler_opt/rl/trainer_test.py
@@ -31,7 +31,7 @@
   test_trajectory = trajectory.Trajectory(
       step_type=tf.fill([batch_size, sequence_length], 1),
       observation={
-          'inlining_default':
+          'callee_users':
               tf.fill([batch_size, sequence_length],
                       tf.constant(10, dtype=tf.int64))
       },
@@ -54,8 +54,8 @@
 
   def setUp(self):
     observation_spec = {
-        'inlining_default':
-            tf.TensorSpec(dtype=tf.int64, shape=(), name='inlining_default')
+        'callee_users':
+            tf.TensorSpec(dtype=tf.int64, shape=(), name='callee_users')
     }
     self._time_step_spec = time_step.time_step_spec(observation_spec)
     self._action_spec = tensor_spec.BoundedTensorSpec(
@@ -69,7 +69,7 @@
         action_spec=self._action_spec,
         lstm_size=(40,),
         preprocessing_layers={
-            'inlining_default': tf.keras.layers.Lambda(lambda x: x)
+            'callee_users': tf.keras.layers.Lambda(lambda x: x)
         })
     super().setUp()