Auto merge of #62100 - ehuss:update-cargo, r=alexcrichton

Update cargo

17 commits in 807429e1b6da4e2ec52488ef2f59e77068c31e1f..4c1fa54d10f58d69ac9ff55be68e1b1c25ecb816
2019-06-11 14:06:10 +0000 to 2019-06-24 11:24:18 +0000
- Fix typo in comment (rust-lang/cargo#7066)
- travis: enforce formatting of subcrates as well (rust-lang/cargo#7063)
- _cargo: Make function style consistent (rust-lang/cargo#7060)
- Update some fix comments. (rust-lang/cargo#7061)
- Stabilize default-run (rust-lang/cargo#7056)
- Fix typo in comment (rust-lang/cargo#7054)
- fix(fingerpring): do not touch intermediate artifacts (rust-lang/cargo#7050)
- Resolver test/debug cleanup (rust-lang/cargo#7045)
- Rename to_url → into_url (rust-lang/cargo#7048)
- Remove needless lifetimes (rust-lang/cargo#7047)
- Revert test directory cleaning change. (rust-lang/cargo#7042)
- cargo book /reference/manifest: fix typo (rust-lang/cargo#7041)
- Extract resolver tests to their own crate (rust-lang/cargo#7011)
- ci: Do not install addons on rustfmt build jobs (rust-lang/cargo#7038)
- Support absolute paths in dep-info files (rust-lang/cargo#7030)
- ci: Run cargo fmt on all workspaces (rust-lang/cargo#7033)
- Deprecated ONCE_INIT in favor of Once::new() (rust-lang/cargo#7031)
diff --git a/src/librustc/mir/interpret/allocation.rs b/src/librustc/mir/interpret/allocation.rs
index 8381125..c8bf250 100644
--- a/src/librustc/mir/interpret/allocation.rs
+++ b/src/librustc/mir/interpret/allocation.rs
@@ -6,44 +6,13 @@
 
 use crate::ty::layout::{Size, Align};
 use syntax::ast::Mutability;
-use std::{iter, fmt::{self, Display}};
+use std::iter;
 use crate::mir;
-use std::ops::{Deref, DerefMut};
+use std::ops::{Range, Deref, DerefMut};
 use rustc_data_structures::sorted_map::SortedMap;
-use rustc_macros::HashStable;
 use rustc_target::abi::HasDataLayout;
 use std::borrow::Cow;
 
-/// Used by `check_bounds` to indicate whether the pointer needs to be just inbounds
-/// or also inbounds of a *live* allocation.
-#[derive(Debug, Copy, Clone, RustcEncodable, RustcDecodable, HashStable)]
-pub enum InboundsCheck {
-    Live,
-    MaybeDead,
-}
-
-/// Used by `check_in_alloc` to indicate context of check
-#[derive(Debug, Copy, Clone, RustcEncodable, RustcDecodable, HashStable)]
-pub enum CheckInAllocMsg {
-    MemoryAccessTest,
-    NullPointerTest,
-    PointerArithmeticTest,
-    InboundsTest,
-}
-
-impl Display for CheckInAllocMsg {
-    /// When this is printed as an error the context looks like this
-    /// "{test name} failed: pointer must be in-bounds at offset..."
-    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        write!(f, "{}", match *self {
-            CheckInAllocMsg::MemoryAccessTest => "Memory access",
-            CheckInAllocMsg::NullPointerTest => "Null pointer test",
-            CheckInAllocMsg::PointerArithmeticTest => "Pointer arithmetic",
-            CheckInAllocMsg::InboundsTest => "Inbounds test",
-        })
-    }
-}
-
 #[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)]
 pub struct Allocation<Tag=(),Extra=()> {
     /// The actual bytes of the allocation.
@@ -146,37 +115,30 @@
 
 impl<'tcx> ::serialize::UseSpecializedDecodable for &'tcx Allocation {}
 
-/// Alignment and bounds checks
-impl<'tcx, Tag, Extra> Allocation<Tag, Extra> {
-    /// Checks if the pointer is "in-bounds". Notice that a pointer pointing at the end
-    /// of an allocation (i.e., at the first *inaccessible* location) *is* considered
-    /// in-bounds!  This follows C's/LLVM's rules.
-    /// If you want to check bounds before doing a memory access, better use `check_bounds`.
-    fn check_bounds_ptr(
-        &self,
-        ptr: Pointer<Tag>,
-        msg: CheckInAllocMsg,
-    ) -> InterpResult<'tcx> {
-        let allocation_size = self.bytes.len() as u64;
-        ptr.check_in_alloc(Size::from_bytes(allocation_size), msg)
-    }
-
-    /// Checks if the memory range beginning at `ptr` and of size `Size` is "in-bounds".
-    #[inline(always)]
-    pub fn check_bounds(
-        &self,
-        cx: &impl HasDataLayout,
-        ptr: Pointer<Tag>,
-        size: Size,
-        msg: CheckInAllocMsg,
-    ) -> InterpResult<'tcx> {
-        // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow)
-        self.check_bounds_ptr(ptr.offset(size, cx)?, msg)
-    }
-}
-
 /// Byte accessors
 impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
+    /// Just a small local helper function to avoid a bit of code repetition.
+    /// Returns the range of this allocation that was meant.
+    #[inline]
+    fn check_bounds(
+        &self,
+        offset: Size,
+        size: Size
+    ) -> Range<usize> {
+        let end = offset + size; // this does overflow checking
+        assert_eq!(
+            end.bytes() as usize as u64, end.bytes(),
+            "cannot handle this access on this host architecture"
+        );
+        let end = end.bytes() as usize;
+        assert!(
+            end <= self.bytes.len(),
+            "Out-of-bounds access at offset {}, size {} in allocation of size {}",
+            offset.bytes(), size.bytes(), self.bytes.len()
+        );
+        (offset.bytes() as usize)..end
+    }
+
     /// The last argument controls whether we error out when there are undefined
     /// or pointer bytes. You should never call this, call `get_bytes` or
     /// `get_bytes_with_undef_and_ptr` instead,
@@ -184,16 +146,17 @@
     /// This function also guarantees that the resulting pointer will remain stable
     /// even when new allocations are pushed to the `HashMap`. `copy_repeatedly` relies
     /// on that.
+    ///
+    /// It is the caller's responsibility to check bounds and alignment beforehand.
     fn get_bytes_internal(
         &self,
         cx: &impl HasDataLayout,
         ptr: Pointer<Tag>,
         size: Size,
         check_defined_and_ptr: bool,
-        msg: CheckInAllocMsg,
     ) -> InterpResult<'tcx, &[u8]>
     {
-        self.check_bounds(cx, ptr, size, msg)?;
+        let range = self.check_bounds(ptr.offset, size);
 
         if check_defined_and_ptr {
             self.check_defined(ptr, size)?;
@@ -205,12 +168,13 @@
 
         AllocationExtra::memory_read(self, ptr, size)?;
 
-        assert_eq!(ptr.offset.bytes() as usize as u64, ptr.offset.bytes());
-        assert_eq!(size.bytes() as usize as u64, size.bytes());
-        let offset = ptr.offset.bytes() as usize;
-        Ok(&self.bytes[offset..offset + size.bytes() as usize])
+        Ok(&self.bytes[range])
     }
 
+    /// Check that these bytes are initialized and not pointer bytes, and then return them
+    /// as a slice.
+    ///
+    /// It is the caller's responsibility to check bounds and alignment beforehand.
     #[inline]
     pub fn get_bytes(
         &self,
@@ -219,11 +183,13 @@
         size: Size,
     ) -> InterpResult<'tcx, &[u8]>
     {
-        self.get_bytes_internal(cx, ptr, size, true, CheckInAllocMsg::MemoryAccessTest)
+        self.get_bytes_internal(cx, ptr, size, true)
     }
 
     /// It is the caller's responsibility to handle undefined and pointer bytes.
     /// However, this still checks that there are no relocations on the *edges*.
+    ///
+    /// It is the caller's responsibility to check bounds and alignment beforehand.
     #[inline]
     pub fn get_bytes_with_undef_and_ptr(
         &self,
@@ -232,11 +198,13 @@
         size: Size,
     ) -> InterpResult<'tcx, &[u8]>
     {
-        self.get_bytes_internal(cx, ptr, size, false, CheckInAllocMsg::MemoryAccessTest)
+        self.get_bytes_internal(cx, ptr, size, false)
     }
 
     /// Just calling this already marks everything as defined and removes relocations,
     /// so be sure to actually put data there!
+    ///
+    /// It is the caller's responsibility to check bounds and alignment beforehand.
     pub fn get_bytes_mut(
         &mut self,
         cx: &impl HasDataLayout,
@@ -244,18 +212,14 @@
         size: Size,
     ) -> InterpResult<'tcx, &mut [u8]>
     {
-        assert_ne!(size.bytes(), 0, "0-sized accesses should never even get a `Pointer`");
-        self.check_bounds(cx, ptr, size, CheckInAllocMsg::MemoryAccessTest)?;
+        let range = self.check_bounds(ptr.offset, size);
 
         self.mark_definedness(ptr, size, true);
         self.clear_relocations(cx, ptr, size)?;
 
         AllocationExtra::memory_written(self, ptr, size)?;
 
-        assert_eq!(ptr.offset.bytes() as usize as u64, ptr.offset.bytes());
-        assert_eq!(size.bytes() as usize as u64, size.bytes());
-        let offset = ptr.offset.bytes() as usize;
-        Ok(&mut self.bytes[offset..offset + size.bytes() as usize])
+        Ok(&mut self.bytes[range])
     }
 }
 
@@ -276,9 +240,10 @@
                 let size_with_null = Size::from_bytes((size + 1) as u64);
                 // Go through `get_bytes` for checks and AllocationExtra hooks.
                 // We read the null, so we include it in the request, but we want it removed
-                // from the result!
+                // from the result, so we do subslicing.
                 Ok(&self.get_bytes(cx, ptr, size_with_null)?[..size])
             }
+            // This includes the case where `offset` is out-of-bounds to begin with.
             None => err!(UnterminatedCString(ptr.erase_tag())),
         }
     }
@@ -306,7 +271,7 @@
 
     /// Writes `src` to the memory starting at `ptr.offset`.
     ///
-    /// Will do bounds checks on the allocation.
+    /// It is the caller's responsibility to check bounds and alignment beforehand.
     pub fn write_bytes(
         &mut self,
         cx: &impl HasDataLayout,
@@ -320,6 +285,8 @@
     }
 
     /// Sets `count` bytes starting at `ptr.offset` with `val`. Basically `memset`.
+    ///
+    /// It is the caller's responsibility to check bounds and alignment beforehand.
     pub fn write_repeat(
         &mut self,
         cx: &impl HasDataLayout,
@@ -342,7 +309,7 @@
     /// * in oder to obtain a `Pointer` we need to check for ZSTness anyway due to integer pointers
     ///   being valid for ZSTs
     ///
-    /// Note: This function does not do *any* alignment checks, you need to do these before calling
+    /// It is the caller's responsibility to check bounds and alignment beforehand.
     pub fn read_scalar(
         &self,
         cx: &impl HasDataLayout,
@@ -378,7 +345,9 @@
         Ok(ScalarMaybeUndef::Scalar(Scalar::from_uint(bits, size)))
     }
 
-    /// Note: This function does not do *any* alignment checks, you need to do these before calling
+    /// Read a pointer-sized scalar.
+    ///
+    /// It is the caller's responsibility to check bounds and alignment beforehand.
     pub fn read_ptr_sized(
         &self,
         cx: &impl HasDataLayout,
@@ -395,7 +364,7 @@
     /// * in oder to obtain a `Pointer` we need to check for ZSTness anyway due to integer pointers
     ///   being valid for ZSTs
     ///
-    /// Note: This function does not do *any* alignment checks, you need to do these before calling
+    /// It is the caller's responsibility to check bounds and alignment beforehand.
     pub fn write_scalar(
         &mut self,
         cx: &impl HasDataLayout,
@@ -435,7 +404,9 @@
         Ok(())
     }
 
-    /// Note: This function does not do *any* alignment checks, you need to do these before calling
+    /// Write a pointer-sized scalar.
+    ///
+    /// It is the caller's responsibility to check bounds and alignment beforehand.
     pub fn write_ptr_sized(
         &mut self,
         cx: &impl HasDataLayout,
diff --git a/src/librustc/mir/interpret/mod.rs b/src/librustc/mir/interpret/mod.rs
index a36c788..1b29425 100644
--- a/src/librustc/mir/interpret/mod.rs
+++ b/src/librustc/mir/interpret/mod.rs
@@ -17,12 +17,9 @@
 
 pub use self::value::{Scalar, ScalarMaybeUndef, RawConst, ConstValue};
 
-pub use self::allocation::{
-    InboundsCheck, Allocation, AllocationExtra,
-    Relocations, UndefMask, CheckInAllocMsg,
-};
+pub use self::allocation::{Allocation, AllocationExtra, Relocations, UndefMask};
 
-pub use self::pointer::{Pointer, PointerArithmetic};
+pub use self::pointer::{Pointer, PointerArithmetic, CheckInAllocMsg};
 
 use std::fmt;
 use crate::mir;
diff --git a/src/librustc/mir/interpret/pointer.rs b/src/librustc/mir/interpret/pointer.rs
index 26002a4..a17bc1f 100644
--- a/src/librustc/mir/interpret/pointer.rs
+++ b/src/librustc/mir/interpret/pointer.rs
@@ -1,13 +1,35 @@
-use std::fmt;
+use std::fmt::{self, Display};
 
 use crate::mir;
 use crate::ty::layout::{self, HasDataLayout, Size};
 use rustc_macros::HashStable;
 
 use super::{
-    AllocId, InterpResult, CheckInAllocMsg
+    AllocId, InterpResult,
 };
 
+/// Used by `check_in_alloc` to indicate context of check
+#[derive(Debug, Copy, Clone, RustcEncodable, RustcDecodable, HashStable)]
+pub enum CheckInAllocMsg {
+    MemoryAccessTest,
+    NullPointerTest,
+    PointerArithmeticTest,
+    InboundsTest,
+}
+
+impl Display for CheckInAllocMsg {
+    /// When this is printed as an error the context looks like this
+    /// "{test name} failed: pointer must be in-bounds at offset..."
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "{}", match *self {
+            CheckInAllocMsg::MemoryAccessTest => "Memory access",
+            CheckInAllocMsg::NullPointerTest => "Null pointer test",
+            CheckInAllocMsg::PointerArithmeticTest => "Pointer arithmetic",
+            CheckInAllocMsg::InboundsTest => "Inbounds test",
+        })
+    }
+}
+
 ////////////////////////////////////////////////////////////////////////////////
 // Pointer arithmetic
 ////////////////////////////////////////////////////////////////////////////////
diff --git a/src/librustc_data_structures/bit_set.rs b/src/librustc_data_structures/bit_set.rs
index 3430d83..5d8388d 100644
--- a/src/librustc_data_structures/bit_set.rs
+++ b/src/librustc_data_structures/bit_set.rs
@@ -316,11 +316,6 @@
     }
 }
 
-pub trait BitSetOperator {
-    /// Combine one bitset into another.
-    fn join<T: Idx>(&self, inout_set: &mut BitSet<T>, in_set: &BitSet<T>) -> bool;
-}
-
 #[inline]
 fn bitwise<Op>(out_vec: &mut [Word], in_vec: &[Word], op: Op) -> bool
     where Op: Fn(Word, Word) -> Word
diff --git a/src/librustc_mir/dataflow/at_location.rs b/src/librustc_mir/dataflow/at_location.rs
index 9cba34b..7735528 100644
--- a/src/librustc_mir/dataflow/at_location.rs
+++ b/src/librustc_mir/dataflow/at_location.rs
@@ -4,7 +4,7 @@
 use rustc::mir::{BasicBlock, Location};
 use rustc_data_structures::bit_set::{BitIter, BitSet, HybridBitSet};
 
-use crate::dataflow::{BitDenotation, BlockSets, DataflowResults};
+use crate::dataflow::{BitDenotation, DataflowResults, GenKillSet};
 use crate::dataflow::move_paths::{HasMoveData, MovePathIndex};
 
 use std::iter;
@@ -66,8 +66,7 @@
 {
     base_results: DataflowResults<'tcx, BD>,
     curr_state: BitSet<BD::Idx>,
-    stmt_gen: HybridBitSet<BD::Idx>,
-    stmt_kill: HybridBitSet<BD::Idx>,
+    stmt_trans: GenKillSet<BD::Idx>,
 }
 
 impl<'tcx, BD> FlowAtLocation<'tcx, BD>
@@ -89,19 +88,17 @@
     where
         F: FnMut(BD::Idx),
     {
-        self.stmt_gen.iter().for_each(f)
+        self.stmt_trans.gen_set.iter().for_each(f)
     }
 
     pub fn new(results: DataflowResults<'tcx, BD>) -> Self {
         let bits_per_block = results.sets().bits_per_block();
         let curr_state = BitSet::new_empty(bits_per_block);
-        let stmt_gen = HybridBitSet::new_empty(bits_per_block);
-        let stmt_kill = HybridBitSet::new_empty(bits_per_block);
+        let stmt_trans = GenKillSet::from_elem(HybridBitSet::new_empty(bits_per_block));
         FlowAtLocation {
             base_results: results,
-            curr_state: curr_state,
-            stmt_gen: stmt_gen,
-            stmt_kill: stmt_kill,
+            curr_state,
+            stmt_trans,
         }
     }
 
@@ -127,8 +124,7 @@
         F: FnOnce(BitIter<'_, BD::Idx>),
     {
         let mut curr_state = self.curr_state.clone();
-        curr_state.union(&self.stmt_gen);
-        curr_state.subtract(&self.stmt_kill);
+        self.stmt_trans.apply(&mut curr_state);
         f(curr_state.iter());
     }
 
@@ -142,68 +138,41 @@
     where BD: BitDenotation<'tcx>
 {
     fn reset_to_entry_of(&mut self, bb: BasicBlock) {
-        self.curr_state.overwrite(self.base_results.sets().on_entry_set_for(bb.index()));
+        self.curr_state.overwrite(self.base_results.sets().entry_set_for(bb.index()));
     }
 
     fn reset_to_exit_of(&mut self, bb: BasicBlock) {
         self.reset_to_entry_of(bb);
-        self.curr_state.union(self.base_results.sets().gen_set_for(bb.index()));
-        self.curr_state.subtract(self.base_results.sets().kill_set_for(bb.index()));
+        let trans = self.base_results.sets().trans_for(bb.index());
+        trans.apply(&mut self.curr_state)
     }
 
     fn reconstruct_statement_effect(&mut self, loc: Location) {
-        self.stmt_gen.clear();
-        self.stmt_kill.clear();
-        {
-            let mut sets = BlockSets {
-                on_entry: &mut self.curr_state,
-                gen_set: &mut self.stmt_gen,
-                kill_set: &mut self.stmt_kill,
-            };
-            self.base_results
-                .operator()
-                .before_statement_effect(&mut sets, loc);
-        }
-        self.apply_local_effect(loc);
-
-        let mut sets = BlockSets {
-            on_entry: &mut self.curr_state,
-            gen_set: &mut self.stmt_gen,
-            kill_set: &mut self.stmt_kill,
-        };
+        self.stmt_trans.clear();
         self.base_results
             .operator()
-            .statement_effect(&mut sets, loc);
+            .before_statement_effect(&mut self.stmt_trans, loc);
+        self.stmt_trans.apply(&mut self.curr_state);
+
+        self.base_results
+            .operator()
+            .statement_effect(&mut self.stmt_trans, loc);
     }
 
     fn reconstruct_terminator_effect(&mut self, loc: Location) {
-        self.stmt_gen.clear();
-        self.stmt_kill.clear();
-        {
-            let mut sets = BlockSets {
-                on_entry: &mut self.curr_state,
-                gen_set: &mut self.stmt_gen,
-                kill_set: &mut self.stmt_kill,
-            };
-            self.base_results
-                .operator()
-                .before_terminator_effect(&mut sets, loc);
-        }
-        self.apply_local_effect(loc);
-
-        let mut sets = BlockSets {
-            on_entry: &mut self.curr_state,
-            gen_set: &mut self.stmt_gen,
-            kill_set: &mut self.stmt_kill,
-        };
+        self.stmt_trans.clear();
         self.base_results
             .operator()
-            .terminator_effect(&mut sets, loc);
+            .before_terminator_effect(&mut self.stmt_trans, loc);
+        self.stmt_trans.apply(&mut self.curr_state);
+
+        self.base_results
+            .operator()
+            .terminator_effect(&mut self.stmt_trans, loc);
     }
 
     fn apply_local_effect(&mut self, _loc: Location) {
-        self.curr_state.union(&self.stmt_gen);
-        self.curr_state.subtract(&self.stmt_kill);
+        self.stmt_trans.apply(&mut self.curr_state)
     }
 }
 
diff --git a/src/librustc_mir/dataflow/graphviz.rs b/src/librustc_mir/dataflow/graphviz.rs
index 7896592..b0d8581 100644
--- a/src/librustc_mir/dataflow/graphviz.rs
+++ b/src/librustc_mir/dataflow/graphviz.rs
@@ -170,7 +170,7 @@
 
         write!(w, "<tr>")?;
         // Entry
-        dump_set_for!(on_entry_set_for, interpret_set);
+        dump_set_for!(entry_set_for, interpret_set);
 
         // MIR statements
         write!(w, "<td>")?;
@@ -208,7 +208,7 @@
         write!(w, "<tr>")?;
 
         // Entry
-        let set = flow.sets.on_entry_set_for(i);
+        let set = flow.sets.entry_set_for(i);
         write!(w, "<td>{:?}</td>", dot::escape_html(&set.to_string()))?;
 
         // Terminator
@@ -221,13 +221,10 @@
         }
         write!(w, "</td>")?;
 
-        // Gen
-        let set = flow.sets.gen_set_for(i);
-        write!(w, "<td>{:?}</td>", dot::escape_html(&format!("{:?}", set)))?;
-
-        // Kill
-        let set = flow.sets.kill_set_for(i);
-        write!(w, "<td>{:?}</td>", dot::escape_html(&format!("{:?}", set)))?;
+        // Gen/Kill
+        let trans = flow.sets.trans_for(i);
+        write!(w, "<td>{:?}</td>", dot::escape_html(&format!("{:?}", trans.gen_set)))?;
+        write!(w, "<td>{:?}</td>", dot::escape_html(&format!("{:?}", trans.kill_set)))?;
 
         write!(w, "</tr>")?;
 
diff --git a/src/librustc_mir/dataflow/impls/borrowed_locals.rs b/src/librustc_mir/dataflow/impls/borrowed_locals.rs
index 069ce3a..0f7f37f 100644
--- a/src/librustc_mir/dataflow/impls/borrowed_locals.rs
+++ b/src/librustc_mir/dataflow/impls/borrowed_locals.rs
@@ -2,7 +2,7 @@
 
 use rustc::mir::*;
 use rustc::mir::visit::Visitor;
-use crate::dataflow::BitDenotation;
+use crate::dataflow::{BitDenotation, GenKillSet};
 
 /// This calculates if any part of a MIR local could have previously been borrowed.
 /// This means that once a local has been borrowed, its bit will be set
@@ -33,39 +33,39 @@
         self.body.local_decls.len()
     }
 
-    fn start_block_effect(&self, _sets: &mut BitSet<Local>) {
+    fn start_block_effect(&self, _on_entry: &mut BitSet<Local>) {
         // Nothing is borrowed on function entry
     }
 
     fn statement_effect(&self,
-                        sets: &mut BlockSets<'_, Local>,
+                        trans: &mut GenKillSet<Local>,
                         loc: Location) {
         let stmt = &self.body[loc.block].statements[loc.statement_index];
 
         BorrowedLocalsVisitor {
-            sets,
+            trans,
         }.visit_statement(stmt, loc);
 
         // StorageDead invalidates all borrows and raw pointers to a local
         match stmt.kind {
-            StatementKind::StorageDead(l) => sets.kill(l),
+            StatementKind::StorageDead(l) => trans.kill(l),
             _ => (),
         }
     }
 
     fn terminator_effect(&self,
-                         sets: &mut BlockSets<'_, Local>,
+                         trans: &mut GenKillSet<Local>,
                          loc: Location) {
         let terminator = self.body[loc.block].terminator();
         BorrowedLocalsVisitor {
-            sets,
+            trans,
         }.visit_terminator(terminator, loc);
         match &terminator.kind {
             // Drop terminators borrows the location
             TerminatorKind::Drop { location, .. } |
             TerminatorKind::DropAndReplace { location, .. } => {
                 if let Some(local) = find_local(location) {
-                    sets.gen(local);
+                    trans.gen(local);
                 }
             }
             _ => (),
@@ -83,22 +83,13 @@
     }
 }
 
-impl<'a, 'tcx> BitSetOperator for HaveBeenBorrowedLocals<'a, 'tcx> {
-    #[inline]
-    fn join<T: Idx>(&self, inout_set: &mut BitSet<T>, in_set: &BitSet<T>) -> bool {
-        inout_set.union(in_set) // "maybe" means we union effects of both preds
-    }
+impl<'a, 'tcx> BottomValue for HaveBeenBorrowedLocals<'a, 'tcx> {
+    // bottom = unborrowed
+    const BOTTOM_VALUE: bool = false;
 }
 
-impl<'a, 'tcx> InitialFlow for HaveBeenBorrowedLocals<'a, 'tcx> {
-    #[inline]
-    fn bottom_value() -> bool {
-        false // bottom = unborrowed
-    }
-}
-
-struct BorrowedLocalsVisitor<'b, 'c> {
-    sets: &'b mut BlockSets<'c, Local>,
+struct BorrowedLocalsVisitor<'gk> {
+    trans: &'gk mut GenKillSet<Local>,
 }
 
 fn find_local<'tcx>(place: &Place<'tcx>) -> Option<Local> {
@@ -117,13 +108,13 @@
     })
 }
 
-impl<'tcx, 'b, 'c> Visitor<'tcx> for BorrowedLocalsVisitor<'b, 'c> {
+impl<'tcx> Visitor<'tcx> for BorrowedLocalsVisitor<'_> {
     fn visit_rvalue(&mut self,
                     rvalue: &Rvalue<'tcx>,
                     location: Location) {
         if let Rvalue::Ref(_, _, ref place) = *rvalue {
             if let Some(local) = find_local(place) {
-                self.sets.gen(local);
+                self.trans.gen(local);
             }
         }
 
diff --git a/src/librustc_mir/dataflow/impls/borrows.rs b/src/librustc_mir/dataflow/impls/borrows.rs
index 7617d3b..53d00d4 100644
--- a/src/librustc_mir/dataflow/impls/borrows.rs
+++ b/src/librustc_mir/dataflow/impls/borrows.rs
@@ -5,11 +5,11 @@
 use rustc::ty::TyCtxt;
 use rustc::ty::RegionVid;
 
-use rustc_data_structures::bit_set::{BitSet, BitSetOperator};
+use rustc_data_structures::bit_set::BitSet;
 use rustc_data_structures::fx::FxHashMap;
 use rustc_data_structures::indexed_vec::{Idx, IndexVec};
 
-use crate::dataflow::{BitDenotation, BlockSets, InitialFlow};
+use crate::dataflow::{BitDenotation, BottomValue, GenKillSet};
 use crate::borrow_check::nll::region_infer::RegionInferenceContext;
 use crate::borrow_check::nll::ToRegionVid;
 use crate::borrow_check::places_conflict;
@@ -168,7 +168,7 @@
     /// Add all borrows to the kill set, if those borrows are out of scope at `location`.
     /// That means they went out of a nonlexical scope
     fn kill_loans_out_of_scope_at_location(&self,
-                                           sets: &mut BlockSets<'_, BorrowIndex>,
+                                           trans: &mut GenKillSet<BorrowIndex>,
                                            location: Location) {
         // NOTE: The state associated with a given `location`
         // reflects the dataflow on entry to the statement.
@@ -182,14 +182,14 @@
         // region, then setting that gen-bit will override any
         // potential kill introduced here.
         if let Some(indices) = self.borrows_out_of_scope_at_location.get(&location) {
-            sets.kill_all(indices);
+            trans.kill_all(indices);
         }
     }
 
     /// Kill any borrows that conflict with `place`.
     fn kill_borrows_on_place(
         &self,
-        sets: &mut BlockSets<'_, BorrowIndex>,
+        trans: &mut GenKillSet<BorrowIndex>,
         place: &Place<'tcx>
     ) {
         debug!("kill_borrows_on_place: place={:?}", place);
@@ -206,7 +206,7 @@
             // local must conflict. This is purely an optimization so we don't have to call
             // `places_conflict` for every borrow.
             if let Place::Base(PlaceBase::Local(_)) = place {
-                sets.kill_all(other_borrows_of_local);
+                trans.kill_all(other_borrows_of_local);
                 return;
             }
 
@@ -224,7 +224,7 @@
                         places_conflict::PlaceConflictBias::NoOverlap)
                 });
 
-            sets.kill_all(definitely_conflicting_borrows);
+            trans.kill_all(definitely_conflicting_borrows);
         }
     }
 }
@@ -236,21 +236,24 @@
         self.borrow_set.borrows.len() * 2
     }
 
-    fn start_block_effect(&self, _entry_set: &mut BitSet<BorrowIndex>) {
+    fn start_block_effect(&self, _entry_set: &mut BitSet<Self::Idx>) {
         // no borrows of code region_scopes have been taken prior to
-        // function execution, so this method has no effect on
-        // `_sets`.
+        // function execution, so this method has no effect.
     }
 
     fn before_statement_effect(&self,
-                               sets: &mut BlockSets<'_, BorrowIndex>,
+                               trans: &mut GenKillSet<Self::Idx>,
                                location: Location) {
-        debug!("Borrows::before_statement_effect sets: {:?} location: {:?}", sets, location);
-        self.kill_loans_out_of_scope_at_location(sets, location);
+        debug!("Borrows::before_statement_effect trans: {:?} location: {:?}",
+               trans, location);
+        self.kill_loans_out_of_scope_at_location(trans, location);
     }
 
-    fn statement_effect(&self, sets: &mut BlockSets<'_, BorrowIndex>, location: Location) {
-        debug!("Borrows::statement_effect: sets={:?} location={:?}", sets, location);
+    fn statement_effect(&self,
+                        trans: &mut GenKillSet<Self::Idx>,
+                        location: Location) {
+        debug!("Borrows::statement_effect: trans={:?} location={:?}",
+               trans, location);
 
         let block = &self.body.basic_blocks().get(location.block).unwrap_or_else(|| {
             panic!("could not find block at location {:?}", location);
@@ -264,7 +267,7 @@
             mir::StatementKind::Assign(ref lhs, ref rhs) => {
                 // Make sure there are no remaining borrows for variables
                 // that are assigned over.
-                self.kill_borrows_on_place(sets, lhs);
+                self.kill_borrows_on_place(trans, lhs);
 
                 if let mir::Rvalue::Ref(_, _, ref place) = **rhs {
                     if place.ignore_borrow(
@@ -278,20 +281,20 @@
                         panic!("could not find BorrowIndex for location {:?}", location);
                     });
 
-                    sets.gen(*index);
+                    trans.gen(*index);
                 }
             }
 
             mir::StatementKind::StorageDead(local) => {
                 // Make sure there are no remaining borrows for locals that
                 // are gone out of scope.
-                self.kill_borrows_on_place(sets, &Place::Base(PlaceBase::Local(local)));
+                self.kill_borrows_on_place(trans, &Place::Base(PlaceBase::Local(local)));
             }
 
             mir::StatementKind::InlineAsm(ref asm) => {
                 for (output, kind) in asm.outputs.iter().zip(&asm.asm.outputs) {
                     if !kind.is_indirect && !kind.is_rw {
-                        self.kill_borrows_on_place(sets, output);
+                        self.kill_borrows_on_place(trans, output);
                     }
                 }
             }
@@ -307,13 +310,16 @@
     }
 
     fn before_terminator_effect(&self,
-                                sets: &mut BlockSets<'_, BorrowIndex>,
+                                trans: &mut GenKillSet<Self::Idx>,
                                 location: Location) {
-        debug!("Borrows::before_terminator_effect sets: {:?} location: {:?}", sets, location);
-        self.kill_loans_out_of_scope_at_location(sets, location);
+        debug!("Borrows::before_terminator_effect: trans={:?} location={:?}",
+               trans, location);
+        self.kill_loans_out_of_scope_at_location(trans, location);
     }
 
-    fn terminator_effect(&self, _: &mut BlockSets<'_, BorrowIndex>, _: Location) {}
+    fn terminator_effect(&self,
+                         _: &mut GenKillSet<Self::Idx>,
+                         _: Location) {}
 
     fn propagate_call_return(
         &self,
@@ -325,16 +331,7 @@
     }
 }
 
-impl<'a, 'tcx> BitSetOperator for Borrows<'a, 'tcx> {
-    #[inline]
-    fn join<T: Idx>(&self, inout_set: &mut BitSet<T>, in_set: &BitSet<T>) -> bool {
-        inout_set.union(in_set) // "maybe" means we union effects of both preds
-    }
-}
-
-impl<'a, 'tcx> InitialFlow for Borrows<'a, 'tcx> {
-    #[inline]
-    fn bottom_value() -> bool {
-        false // bottom = nothing is reserved or activated yet
-    }
+impl<'a, 'tcx> BottomValue for Borrows<'a, 'tcx> {
+    /// bottom = nothing is reserved or activated yet;
+    const BOTTOM_VALUE: bool = false;
 }
diff --git a/src/librustc_mir/dataflow/impls/mod.rs b/src/librustc_mir/dataflow/impls/mod.rs
index 0a572d2..065cfe8 100644
--- a/src/librustc_mir/dataflow/impls/mod.rs
+++ b/src/librustc_mir/dataflow/impls/mod.rs
@@ -4,7 +4,7 @@
 
 use rustc::ty::TyCtxt;
 use rustc::mir::{self, Body, Location};
-use rustc_data_structures::bit_set::{BitSet, BitSetOperator};
+use rustc_data_structures::bit_set::BitSet;
 use rustc_data_structures::indexed_vec::Idx;
 
 use super::MoveDataParamEnv;
@@ -12,7 +12,7 @@
 use crate::util::elaborate_drops::DropFlagState;
 
 use super::move_paths::{HasMoveData, MoveData, MovePathIndex, InitIndex, InitKind};
-use super::{BitDenotation, BlockSets, InitialFlow};
+use super::{BitDenotation, BottomValue, GenKillSet};
 
 use super::drop_flag_effects_for_function_entry;
 use super::drop_flag_effects_for_location;
@@ -226,34 +226,37 @@
 }
 
 impl<'a, 'tcx> MaybeInitializedPlaces<'a, 'tcx> {
-    fn update_bits(sets: &mut BlockSets<'_, MovePathIndex>, path: MovePathIndex,
+    fn update_bits(trans: &mut GenKillSet<MovePathIndex>,
+                   path: MovePathIndex,
                    state: DropFlagState)
     {
         match state {
-            DropFlagState::Absent => sets.kill(path),
-            DropFlagState::Present => sets.gen(path),
+            DropFlagState::Absent => trans.kill(path),
+            DropFlagState::Present => trans.gen(path),
         }
     }
 }
 
 impl<'a, 'tcx> MaybeUninitializedPlaces<'a, 'tcx> {
-    fn update_bits(sets: &mut BlockSets<'_, MovePathIndex>, path: MovePathIndex,
+    fn update_bits(trans: &mut GenKillSet<MovePathIndex>,
+                   path: MovePathIndex,
                    state: DropFlagState)
     {
         match state {
-            DropFlagState::Absent => sets.gen(path),
-            DropFlagState::Present => sets.kill(path),
+            DropFlagState::Absent => trans.gen(path),
+            DropFlagState::Present => trans.kill(path),
         }
     }
 }
 
 impl<'a, 'tcx> DefinitelyInitializedPlaces<'a, 'tcx> {
-    fn update_bits(sets: &mut BlockSets<'_, MovePathIndex>, path: MovePathIndex,
+    fn update_bits(trans: &mut GenKillSet<MovePathIndex>,
+                   path: MovePathIndex,
                    state: DropFlagState)
     {
         match state {
-            DropFlagState::Absent => sets.kill(path),
-            DropFlagState::Present => sets.gen(path),
+            DropFlagState::Absent => trans.kill(path),
+            DropFlagState::Present => trans.gen(path),
         }
     }
 }
@@ -275,24 +278,24 @@
     }
 
     fn statement_effect(&self,
-                        sets: &mut BlockSets<'_, MovePathIndex>,
+                        trans: &mut GenKillSet<Self::Idx>,
                         location: Location)
     {
         drop_flag_effects_for_location(
             self.tcx, self.body, self.mdpe,
             location,
-            |path, s| Self::update_bits(sets, path, s)
+            |path, s| Self::update_bits(trans, path, s)
         )
     }
 
     fn terminator_effect(&self,
-                         sets: &mut BlockSets<'_, MovePathIndex>,
+                         trans: &mut GenKillSet<Self::Idx>,
                          location: Location)
     {
         drop_flag_effects_for_location(
             self.tcx, self.body, self.mdpe,
             location,
-            |path, s| Self::update_bits(sets, path, s)
+            |path, s| Self::update_bits(trans, path, s)
         )
     }
 
@@ -333,24 +336,24 @@
     }
 
     fn statement_effect(&self,
-                        sets: &mut BlockSets<'_, MovePathIndex>,
+                        trans: &mut GenKillSet<Self::Idx>,
                         location: Location)
     {
         drop_flag_effects_for_location(
             self.tcx, self.body, self.mdpe,
             location,
-            |path, s| Self::update_bits(sets, path, s)
+            |path, s| Self::update_bits(trans, path, s)
         )
     }
 
     fn terminator_effect(&self,
-                         sets: &mut BlockSets<'_, MovePathIndex>,
+                         trans: &mut GenKillSet<Self::Idx>,
                          location: Location)
     {
         drop_flag_effects_for_location(
             self.tcx, self.body, self.mdpe,
             location,
-            |path, s| Self::update_bits(sets, path, s)
+            |path, s| Self::update_bits(trans, path, s)
         )
     }
 
@@ -389,24 +392,24 @@
     }
 
     fn statement_effect(&self,
-                        sets: &mut BlockSets<'_, MovePathIndex>,
+                        trans: &mut GenKillSet<Self::Idx>,
                         location: Location)
     {
         drop_flag_effects_for_location(
             self.tcx, self.body, self.mdpe,
             location,
-            |path, s| Self::update_bits(sets, path, s)
+            |path, s| Self::update_bits(trans, path, s)
         )
     }
 
     fn terminator_effect(&self,
-                         sets: &mut BlockSets<'_, MovePathIndex>,
+                         trans: &mut GenKillSet<Self::Idx>,
                          location: Location)
     {
         drop_flag_effects_for_location(
             self.tcx, self.body, self.mdpe,
             location,
-            |path, s| Self::update_bits(sets, path, s)
+            |path, s| Self::update_bits(trans, path, s)
         )
     }
 
@@ -439,7 +442,7 @@
     }
 
     fn statement_effect(&self,
-                        sets: &mut BlockSets<'_, InitIndex>,
+                        trans: &mut GenKillSet<Self::Idx>,
                         location: Location) {
         let (_, body, move_data) = (self.tcx, self.body, self.move_data());
         let stmt = &body[location.block].statements[location.statement_index];
@@ -449,7 +452,7 @@
 
         debug!("statement {:?} at loc {:?} initializes move_indexes {:?}",
                stmt, location, &init_loc_map[location]);
-        sets.gen_all(&init_loc_map[location]);
+        trans.gen_all(&init_loc_map[location]);
 
         match stmt.kind {
             mir::StatementKind::StorageDead(local) => {
@@ -458,14 +461,14 @@
                 let move_path_index = rev_lookup.find_local(local);
                 debug!("stmt {:?} at loc {:?} clears the ever initialized status of {:?}",
                         stmt, location, &init_path_map[move_path_index]);
-                sets.kill_all(&init_path_map[move_path_index]);
+                trans.kill_all(&init_path_map[move_path_index]);
             }
             _ => {}
         }
     }
 
     fn terminator_effect(&self,
-                         sets: &mut BlockSets<'_, InitIndex>,
+                         trans: &mut GenKillSet<Self::Idx>,
                          location: Location)
     {
         let (body, move_data) = (self.body, self.move_data());
@@ -473,7 +476,7 @@
         let init_loc_map = &move_data.init_loc_map;
         debug!("terminator {:?} at loc {:?} initializes move_indexes {:?}",
                term, location, &init_loc_map[location]);
-        sets.gen_all(
+        trans.gen_all(
             init_loc_map[location].iter().filter(|init_index| {
                 move_data.inits[**init_index].kind != InitKind::NonPanicPathOnly
             })
@@ -502,68 +505,22 @@
     }
 }
 
-impl<'a, 'tcx> BitSetOperator for MaybeInitializedPlaces<'a, 'tcx> {
-    #[inline]
-    fn join<T: Idx>(&self, inout_set: &mut BitSet<T>, in_set: &BitSet<T>) -> bool {
-        inout_set.union(in_set) // "maybe" means we union effects of both preds
-    }
+impl<'a, 'tcx> BottomValue for MaybeInitializedPlaces<'a, 'tcx> {
+    /// bottom = uninitialized
+    const BOTTOM_VALUE: bool = false;
 }
 
-impl<'a, 'tcx> BitSetOperator for MaybeUninitializedPlaces<'a, 'tcx> {
-    #[inline]
-    fn join<T: Idx>(&self, inout_set: &mut BitSet<T>, in_set: &BitSet<T>) -> bool {
-        inout_set.union(in_set) // "maybe" means we union effects of both preds
-    }
+impl<'a, 'tcx> BottomValue for MaybeUninitializedPlaces<'a, 'tcx> {
+    /// bottom = initialized (start_block_effect counters this at outset)
+    const BOTTOM_VALUE: bool = false;
 }
 
-impl<'a, 'tcx> BitSetOperator for DefinitelyInitializedPlaces<'a, 'tcx> {
-    #[inline]
-    fn join<T: Idx>(&self, inout_set: &mut BitSet<T>, in_set: &BitSet<T>) -> bool {
-        inout_set.intersect(in_set) // "definitely" means we intersect effects of both preds
-    }
+impl<'a, 'tcx> BottomValue for DefinitelyInitializedPlaces<'a, 'tcx> {
+    /// bottom = initialized (start_block_effect counters this at outset)
+    const BOTTOM_VALUE: bool = true;
 }
 
-impl<'a, 'tcx> BitSetOperator for EverInitializedPlaces<'a, 'tcx> {
-    #[inline]
-    fn join<T: Idx>(&self, inout_set: &mut BitSet<T>, in_set: &BitSet<T>) -> bool {
-        inout_set.union(in_set) // inits from both preds are in scope
-    }
-}
-
-// The way that dataflow fixed point iteration works, you want to
-// start at bottom and work your way to a fixed point. Control-flow
-// merges will apply the `join` operator to each block entry's current
-// state (which starts at that bottom value).
-//
-// This means, for propagation across the graph, that you either want
-// to start at all-zeroes and then use Union as your merge when
-// propagating, or you start at all-ones and then use Intersect as
-// your merge when propagating.
-
-impl<'a, 'tcx> InitialFlow for MaybeInitializedPlaces<'a, 'tcx> {
-    #[inline]
-    fn bottom_value() -> bool {
-        false // bottom = uninitialized
-    }
-}
-
-impl<'a, 'tcx> InitialFlow for MaybeUninitializedPlaces<'a, 'tcx> {
-    #[inline]
-    fn bottom_value() -> bool {
-        false // bottom = initialized (start_block_effect counters this at outset)
-    }
-}
-
-impl<'a, 'tcx> InitialFlow for DefinitelyInitializedPlaces<'a, 'tcx> {
-    #[inline]
-    fn bottom_value() -> bool {
-        true // bottom = initialized (start_block_effect counters this at outset)
-    }
-}
-
-impl<'a, 'tcx> InitialFlow for EverInitializedPlaces<'a, 'tcx> {
-    #[inline]
-    fn bottom_value() -> bool {
-        false // bottom = no initialized variables by default
-    }
+impl<'a, 'tcx> BottomValue for EverInitializedPlaces<'a, 'tcx> {
+    /// bottom = no initialized variables by default
+    const BOTTOM_VALUE: bool = false;
 }
diff --git a/src/librustc_mir/dataflow/impls/storage_liveness.rs b/src/librustc_mir/dataflow/impls/storage_liveness.rs
index d7575b0..d200399 100644
--- a/src/librustc_mir/dataflow/impls/storage_liveness.rs
+++ b/src/librustc_mir/dataflow/impls/storage_liveness.rs
@@ -26,24 +26,24 @@
         self.body.local_decls.len()
     }
 
-    fn start_block_effect(&self, _sets: &mut BitSet<Local>) {
+    fn start_block_effect(&self, _on_entry: &mut BitSet<Local>) {
         // Nothing is live on function entry
     }
 
     fn statement_effect(&self,
-                        sets: &mut BlockSets<'_, Local>,
+                        trans: &mut GenKillSet<Local>,
                         loc: Location) {
         let stmt = &self.body[loc.block].statements[loc.statement_index];
 
         match stmt.kind {
-            StatementKind::StorageLive(l) => sets.gen(l),
-            StatementKind::StorageDead(l) => sets.kill(l),
+            StatementKind::StorageLive(l) => trans.gen(l),
+            StatementKind::StorageDead(l) => trans.kill(l),
             _ => (),
         }
     }
 
     fn terminator_effect(&self,
-                         _sets: &mut BlockSets<'_, Local>,
+                         _trans: &mut GenKillSet<Local>,
                          _loc: Location) {
         // Terminators have no effect
     }
@@ -59,16 +59,7 @@
     }
 }
 
-impl<'a, 'tcx> BitSetOperator for MaybeStorageLive<'a, 'tcx> {
-    #[inline]
-    fn join<T: Idx>(&self, inout_set: &mut BitSet<T>, in_set: &BitSet<T>) -> bool {
-        inout_set.union(in_set) // "maybe" means we union effects of both preds
-    }
-}
-
-impl<'a, 'tcx> InitialFlow for MaybeStorageLive<'a, 'tcx> {
-    #[inline]
-    fn bottom_value() -> bool {
-        false // bottom = dead
-    }
+impl<'a, 'tcx> BottomValue for MaybeStorageLive<'a, 'tcx> {
+    /// bottom = dead
+    const BOTTOM_VALUE: bool = false;
 }
diff --git a/src/librustc_mir/dataflow/mod.rs b/src/librustc_mir/dataflow/mod.rs
index 89b952e..80f65a9 100644
--- a/src/librustc_mir/dataflow/mod.rs
+++ b/src/librustc_mir/dataflow/mod.rs
@@ -1,7 +1,7 @@
 use syntax::ast::{self, MetaItem};
 use syntax::symbol::{Symbol, sym};
 
-use rustc_data_structures::bit_set::{BitSet, BitSetOperator, HybridBitSet};
+use rustc_data_structures::bit_set::{BitSet, HybridBitSet};
 use rustc_data_structures::indexed_vec::Idx;
 use rustc_data_structures::work_queue::WorkQueue;
 
@@ -131,7 +131,7 @@
     p: P,
 ) -> DataflowResults<'tcx, BD>
 where
-    BD: BitDenotation<'tcx> + InitialFlow,
+    BD: BitDenotation<'tcx>,
     P: Fn(&BD, BD::Idx) -> DebugFormatted,
 {
     let flow_state = DataflowAnalysis::new(body, dead_unwinds, bd);
@@ -199,42 +199,27 @@
     }
 
     fn build_sets(&mut self) {
-        // First we need to build the entry-, gen- and kill-sets.
-
-        {
-            let sets = &mut self.flow_state.sets.for_block(mir::START_BLOCK.index());
-            self.flow_state.operator.start_block_effect(&mut sets.on_entry);
-        }
-
+        // Build the transfer function for each block.
         for (bb, data) in self.body.basic_blocks().iter_enumerated() {
             let &mir::BasicBlockData { ref statements, ref terminator, is_cleanup: _ } = data;
 
-            let mut interim_state;
-            let sets = &mut self.flow_state.sets.for_block(bb.index());
-            let track_intrablock = BD::accumulates_intrablock_state();
-            if track_intrablock {
-                debug!("swapping in mutable on_entry, initially {:?}", sets.on_entry);
-                interim_state = sets.on_entry.to_owned();
-                sets.on_entry = &mut interim_state;
-            }
+            let trans = self.flow_state.sets.trans_mut_for(bb.index());
             for j_stmt in 0..statements.len() {
                 let location = Location { block: bb, statement_index: j_stmt };
-                self.flow_state.operator.before_statement_effect(sets, location);
-                self.flow_state.operator.statement_effect(sets, location);
-                if track_intrablock {
-                    sets.apply_local_effect();
-                }
+                self.flow_state.operator.before_statement_effect(trans, location);
+                self.flow_state.operator.statement_effect(trans, location);
             }
 
             if terminator.is_some() {
                 let location = Location { block: bb, statement_index: statements.len() };
-                self.flow_state.operator.before_terminator_effect(sets, location);
-                self.flow_state.operator.terminator_effect(sets, location);
-                if track_intrablock {
-                    sets.apply_local_effect();
-                }
+                self.flow_state.operator.before_terminator_effect(trans, location);
+                self.flow_state.operator.terminator_effect(trans, location);
             }
         }
+
+        // Initialize the flow state at entry to the start block.
+        let on_entry = self.flow_state.sets.entry_set_mut_for(mir::START_BLOCK.index());
+        self.flow_state.operator.start_block_effect(on_entry);
     }
 }
 
@@ -247,14 +232,12 @@
             WorkQueue::with_all(self.builder.body.basic_blocks().len());
         let body = self.builder.body;
         while let Some(bb) = dirty_queue.pop() {
+            let (on_entry, trans) = self.builder.flow_state.sets.get_mut(bb.index());
+            debug_assert!(in_out.words().len() == on_entry.words().len());
+            in_out.overwrite(on_entry);
+            trans.apply(in_out);
+
             let bb_data = &body[bb];
-            {
-                let sets = self.builder.flow_state.sets.for_block(bb.index());
-                debug_assert!(in_out.words().len() == sets.on_entry.words().len());
-                in_out.overwrite(sets.on_entry);
-                in_out.union(sets.gen_set);
-                in_out.subtract(sets.kill_set);
-            }
             self.builder.propagate_bits_into_graph_successors_of(
                 in_out, (bb, bb_data), &mut dirty_queue);
         }
@@ -366,33 +349,27 @@
                                                         result: &DataflowResults<'tcx, T>,
                                                         body: &Body<'tcx>)
     -> BitSet<T::Idx> {
-    let mut on_entry = result.sets().on_entry_set_for(loc.block.index()).to_owned();
-    let mut kill_set = on_entry.to_hybrid();
-    let mut gen_set = kill_set.clone();
+    let mut trans = GenKill::from_elem(HybridBitSet::new_empty(analysis.bits_per_block()));
 
-    {
-        let mut sets = BlockSets {
-            on_entry: &mut on_entry,
-            kill_set: &mut kill_set,
-            gen_set: &mut gen_set,
-        };
-
-        for stmt in 0..loc.statement_index {
-            let mut stmt_loc = loc;
-            stmt_loc.statement_index = stmt;
-            analysis.before_statement_effect(&mut sets, stmt_loc);
-            analysis.statement_effect(&mut sets, stmt_loc);
-        }
-
-        // Apply the pre-statement effect of the statement we're evaluating.
-        if loc.statement_index == body[loc.block].statements.len() {
-            analysis.before_terminator_effect(&mut sets, loc);
-        } else {
-            analysis.before_statement_effect(&mut sets, loc);
-        }
+    for stmt in 0..loc.statement_index {
+        let mut stmt_loc = loc;
+        stmt_loc.statement_index = stmt;
+        analysis.before_statement_effect(&mut trans, stmt_loc);
+        analysis.statement_effect(&mut trans, stmt_loc);
     }
 
-    gen_set.to_dense()
+    // Apply the pre-statement effect of the statement we're evaluating.
+    if loc.statement_index == body[loc.block].statements.len() {
+        analysis.before_terminator_effect(&mut trans, loc);
+    } else {
+        analysis.before_statement_effect(&mut trans, loc);
+    }
+
+    // Apply the transfer function for all preceding statements to the fixpoint
+    // at the start of the block.
+    let mut state = result.sets().entry_set_for(loc.block.index()).to_owned();
+    trans.apply(&mut state);
+    state
 }
 
 pub struct DataflowAnalysis<'a, 'tcx, O>
@@ -462,36 +439,8 @@
     }
 }
 
-#[derive(Debug)]
-pub struct AllSets<E: Idx> {
-    /// Analysis bitwidth for each block.
-    bits_per_block: usize,
-
-    /// For each block, bits valid on entry to the block.
-    on_entry_sets: Vec<BitSet<E>>,
-
-    /// For each block, bits generated by executing the statements +
-    /// terminator in the block -- with one caveat. In particular, for
-    /// *call terminators*, the effect of storing the destination is
-    /// not included, since that only takes effect on the **success**
-    /// edge (and not the unwind edge).
-    gen_sets: Vec<HybridBitSet<E>>,
-
-    /// For each block, bits killed by executing the statements +
-    /// terminator in the block -- with one caveat. In particular, for
-    /// *call terminators*, the effect of storing the destination is
-    /// not included, since that only takes effect on the **success**
-    /// edge (and not the unwind edge).
-    kill_sets: Vec<HybridBitSet<E>>,
-}
-
-/// Triple of sets associated with a given block.
-///
-/// Generally, one sets up `on_entry`, `gen_set`, and `kill_set` for
-/// each block individually, and then runs the dataflow analysis which
-/// iteratively modifies the various `on_entry` sets (but leaves the
-/// other two sets unchanged, since they represent the effect of the
-/// block, which should be invariant over the course of the analysis).
+/// A 2-tuple representing the "gen" and "kill" bitsets during
+/// dataflow analysis.
 ///
 /// It is best to ensure that the intersection of `gen_set` and
 /// `kill_set` is empty; otherwise the results of the dataflow will
@@ -499,21 +448,32 @@
 /// killed during the iteration. (This is such a good idea that the
 /// `fn gen` and `fn kill` methods that set their state enforce this
 /// for you.)
-#[derive(Debug)]
-pub struct BlockSets<'a, E: Idx> {
-    /// Dataflow state immediately before control flow enters the given block.
-    pub(crate) on_entry: &'a mut BitSet<E>,
-
-    /// Bits that are set to 1 by the time we exit the given block. Hybrid
-    /// because it usually contains only 0 or 1 elements.
-    pub(crate) gen_set: &'a mut HybridBitSet<E>,
-
-    /// Bits that are set to 0 by the time we exit the given block. Hybrid
-    /// because it usually contains only 0 or 1 elements.
-    pub(crate) kill_set: &'a mut HybridBitSet<E>,
+#[derive(Debug, Clone, Copy)]
+pub struct GenKill<T> {
+    pub(crate) gen_set: T,
+    pub(crate) kill_set: T,
 }
 
-impl<'a, E:Idx> BlockSets<'a, E> {
+type GenKillSet<T> = GenKill<HybridBitSet<T>>;
+
+impl<T> GenKill<T> {
+    /// Creates a new tuple where `gen_set == kill_set == elem`.
+    pub(crate) fn from_elem(elem: T) -> Self
+        where T: Clone
+    {
+        GenKill {
+            gen_set: elem.clone(),
+            kill_set: elem,
+        }
+    }
+}
+
+impl<E:Idx> GenKillSet<E> {
+    pub(crate) fn clear(&mut self) {
+        self.gen_set.clear();
+        self.kill_set.clear();
+    }
+
     fn gen(&mut self, e: E) {
         self.gen_set.insert(e);
         self.kill_set.remove(e);
@@ -541,73 +501,93 @@
         }
     }
 
-    fn apply_local_effect(&mut self) {
-        self.on_entry.union(self.gen_set);
-        self.on_entry.subtract(self.kill_set);
+    /// Computes `(set ∪ gen) - kill` and assigns the result to `set`.
+    pub(crate) fn apply(&self, set: &mut BitSet<E>) {
+        set.union(&self.gen_set);
+        set.subtract(&self.kill_set);
     }
 }
 
+#[derive(Debug)]
+pub struct AllSets<E: Idx> {
+    /// Analysis bitwidth for each block.
+    bits_per_block: usize,
+
+    /// For each block, bits valid on entry to the block.
+    on_entry: Vec<BitSet<E>>,
+
+    /// The transfer function of each block expressed as the set of bits
+    /// generated and killed by executing the statements + terminator in the
+    /// block -- with one caveat. In particular, for *call terminators*, the
+    /// effect of storing the destination is not included, since that only takes
+    /// effect on the **success** edge (and not the unwind edge).
+    trans: Vec<GenKillSet<E>>,
+}
+
 impl<E:Idx> AllSets<E> {
     pub fn bits_per_block(&self) -> usize { self.bits_per_block }
-    pub fn for_block(&mut self, block_idx: usize) -> BlockSets<'_, E> {
-        BlockSets {
-            on_entry: &mut self.on_entry_sets[block_idx],
-            gen_set: &mut self.gen_sets[block_idx],
-            kill_set: &mut self.kill_sets[block_idx],
-        }
+
+    pub fn get_mut(&mut self, block_idx: usize) -> (&mut BitSet<E>, &mut GenKillSet<E>) {
+        (&mut self.on_entry[block_idx], &mut self.trans[block_idx])
     }
 
-    pub fn on_entry_set_for(&self, block_idx: usize) -> &BitSet<E> {
-        &self.on_entry_sets[block_idx]
+    pub fn trans_for(&self, block_idx: usize) -> &GenKillSet<E> {
+        &self.trans[block_idx]
+    }
+    pub fn trans_mut_for(&mut self, block_idx: usize) -> &mut GenKillSet<E> {
+        &mut self.trans[block_idx]
+    }
+    pub fn entry_set_for(&self, block_idx: usize) -> &BitSet<E> {
+        &self.on_entry[block_idx]
+    }
+    pub fn entry_set_mut_for(&mut self, block_idx: usize) -> &mut BitSet<E> {
+        &mut self.on_entry[block_idx]
     }
     pub fn gen_set_for(&self, block_idx: usize) -> &HybridBitSet<E> {
-        &self.gen_sets[block_idx]
+        &self.trans_for(block_idx).gen_set
     }
     pub fn kill_set_for(&self, block_idx: usize) -> &HybridBitSet<E> {
-        &self.kill_sets[block_idx]
+        &self.trans_for(block_idx).kill_set
     }
 }
 
 /// Parameterization for the precise form of data flow that is used.
-/// `InitialFlow` handles initializing the bitvectors before any
-/// code is inspected by the analysis. Analyses that need more nuanced
-/// initialization (e.g., they need to consult the results of some other
-/// dataflow analysis to set up the initial bitvectors) should not
-/// implement this.
-pub trait InitialFlow {
-    /// Specifies the initial value for each bit in the `on_entry` set
-    fn bottom_value() -> bool;
+///
+/// `BottomValue` determines whether the initial entry set for each basic block is empty or full.
+/// This also determines the semantics of the lattice `join` operator used to merge dataflow
+/// results, since dataflow works by starting at the bottom and moving monotonically to a fixed
+/// point.
+///
+/// This means, for propagation across the graph, that you either want to start at all-zeroes and
+/// then use Union as your merge when propagating, or you start at all-ones and then use Intersect
+/// as your merge when propagating.
+pub trait BottomValue {
+    /// Specifies the initial value for each bit in the entry set for each basic block.
+    const BOTTOM_VALUE: bool;
+
+    /// Merges `in_set` into `inout_set`, returning `true` if `inout_set` changed.
+    #[inline]
+    fn join<T: Idx>(&self, inout_set: &mut BitSet<T>, in_set: &BitSet<T>) -> bool {
+        if Self::BOTTOM_VALUE == false {
+            inout_set.union(in_set)
+        } else {
+            inout_set.intersect(in_set)
+        }
+    }
 }
 
-pub trait BitDenotation<'tcx>: BitSetOperator {
+/// A specific flavor of dataflow analysis.
+///
+/// To run a dataflow analysis, one sets up an initial state for the
+/// `START_BLOCK` via `start_block_effect` and a transfer function (`trans`)
+/// for each block individually. The entry set for all other basic blocks is
+/// initialized to `Self::BOTTOM_VALUE`. The dataflow analysis then
+/// iteratively modifies the various entry sets (but leaves the the transfer
+/// function unchanged).
+pub trait BitDenotation<'tcx>: BottomValue {
     /// Specifies what index type is used to access the bitvector.
     type Idx: Idx;
 
-    /// Some analyses want to accumulate knowledge within a block when
-    /// analyzing its statements for building the gen/kill sets. Override
-    /// this method to return true in such cases.
-    ///
-    /// When this returns true, the statement-effect (re)construction
-    /// will clone the `on_entry` state and pass along a reference via
-    /// `sets.on_entry` to that local clone into `statement_effect` and
-    /// `terminator_effect`).
-    ///
-    /// When it's false, no local clone is constructed; instead a
-    /// reference directly into `on_entry` is passed along via
-    /// `sets.on_entry` instead, which represents the flow state at
-    /// the block's start, not necessarily the state immediately prior
-    /// to the statement/terminator under analysis.
-    ///
-    /// In either case, the passed reference is mutable, but this is a
-    /// wart from using the `BlockSets` type in the API; the intention
-    /// is that the `statement_effect` and `terminator_effect` methods
-    /// mutate only the gen/kill sets.
-    //
-    // FIXME: we should consider enforcing the intention described in
-    // the previous paragraph by passing the three sets in separate
-    // parameters to encode their distinct mutabilities.
-    fn accumulates_intrablock_state() -> bool { false }
-
     /// A name describing the dataflow analysis that this
     /// `BitDenotation` is supporting. The name should be something
     /// suitable for plugging in as part of a filename (i.e., avoid
@@ -640,7 +620,7 @@
     /// applied, in that order, before moving for the next
     /// statement.
     fn before_statement_effect(&self,
-                               _sets: &mut BlockSets<'_, Self::Idx>,
+                               _trans: &mut GenKillSet<Self::Idx>,
                                _location: Location) {}
 
     /// Mutates the block-sets (the flow sets for the given
@@ -654,7 +634,7 @@
     /// `bb_data` is the sequence of statements identified by `bb` in
     /// the MIR.
     fn statement_effect(&self,
-                        sets: &mut BlockSets<'_, Self::Idx>,
+                        trans: &mut GenKillSet<Self::Idx>,
                         location: Location);
 
     /// Similar to `terminator_effect`, except it applies
@@ -669,7 +649,7 @@
     /// applied, in that order, before moving for the next
     /// terminator.
     fn before_terminator_effect(&self,
-                                _sets: &mut BlockSets<'_, Self::Idx>,
+                                _trans: &mut GenKillSet<Self::Idx>,
                                 _location: Location) {}
 
     /// Mutates the block-sets (the flow sets for the given
@@ -683,7 +663,7 @@
     /// The effects applied here cannot depend on which branch the
     /// terminator took.
     fn terminator_effect(&self,
-                         sets: &mut BlockSets<'_, Self::Idx>,
+                         trans: &mut GenKillSet<Self::Idx>,
                          location: Location);
 
     /// Mutates the block-sets according to the (flow-dependent)
@@ -718,17 +698,16 @@
 {
     pub fn new(body: &'a Body<'tcx>,
                dead_unwinds: &'a BitSet<mir::BasicBlock>,
-               denotation: D) -> Self where D: InitialFlow {
+               denotation: D) -> Self {
         let bits_per_block = denotation.bits_per_block();
         let num_blocks = body.basic_blocks().len();
 
-        let on_entry_sets = if D::bottom_value() {
+        let on_entry = if D::BOTTOM_VALUE == true {
             vec![BitSet::new_filled(bits_per_block); num_blocks]
         } else {
             vec![BitSet::new_empty(bits_per_block); num_blocks]
         };
-        let gen_sets = vec![HybridBitSet::new_empty(bits_per_block); num_blocks];
-        let kill_sets = gen_sets.clone();
+        let nop = GenKill::from_elem(HybridBitSet::new_empty(bits_per_block));
 
         DataflowAnalysis {
             body,
@@ -736,9 +715,8 @@
             flow_state: DataflowState {
                 sets: AllSets {
                     bits_per_block,
-                    on_entry_sets,
-                    gen_sets,
-                    kill_sets,
+                    on_entry,
+                    trans: vec![nop; num_blocks],
                 },
                 operator: denotation,
             }
@@ -836,7 +814,7 @@
                                          in_out: &BitSet<D::Idx>,
                                          bb: mir::BasicBlock,
                                          dirty_queue: &mut WorkQueue<mir::BasicBlock>) {
-        let entry_set = &mut self.flow_state.sets.for_block(bb.index()).on_entry;
+        let entry_set = self.flow_state.sets.entry_set_mut_for(bb.index());
         let set_changed = self.flow_state.operator.join(entry_set, &in_out);
         if set_changed {
             dirty_queue.insert(bb);
diff --git a/src/librustc_mir/interpret/eval_context.rs b/src/librustc_mir/interpret/eval_context.rs
index fbba8d1..c6e762b 100644
--- a/src/librustc_mir/interpret/eval_context.rs
+++ b/src/librustc_mir/interpret/eval_context.rs
@@ -442,7 +442,7 @@
                 Ok(Some((size.align_to(align), align)))
             }
             ty::Dynamic(..) => {
-                let vtable = metadata.expect("dyn trait fat ptr must have vtable").to_ptr()?;
+                let vtable = metadata.expect("dyn trait fat ptr must have vtable");
                 // the second entry in the vtable is the dynamic size of the object.
                 Ok(Some(self.read_size_and_align_from_vtable(vtable)?))
             }
diff --git a/src/librustc_mir/interpret/memory.rs b/src/librustc_mir/interpret/memory.rs
index de035ed..c3eec67 100644
--- a/src/librustc_mir/interpret/memory.rs
+++ b/src/librustc_mir/interpret/memory.rs
@@ -19,8 +19,7 @@
 use super::{
     Pointer, AllocId, Allocation, GlobalId, AllocationExtra,
     InterpResult, Scalar, InterpError, GlobalAlloc, PointerArithmetic,
-    Machine, AllocMap, MayLeak, ErrorHandled, CheckInAllocMsg, InboundsCheck,
-    InterpError::ValidationFailure,
+    Machine, AllocMap, MayLeak, ErrorHandled, CheckInAllocMsg,
 };
 
 #[derive(Debug, PartialEq, Eq, Copy, Clone, Hash)]
@@ -44,6 +43,17 @@
     }
 }
 
+/// Used by `get_size_and_align` to indicate whether the allocation needs to be live.
+#[derive(Debug, Copy, Clone)]
+pub enum AllocCheck {
+    /// Allocation must be live and not a function pointer.
+    Dereferencable,
+    /// Allocations needs to be live, but may be a function pointer.
+    Live,
+    /// Allocation may be dead.
+    MaybeDead,
+}
+
 // `Memory` has to depend on the `Machine` because some of its operations
 // (e.g., `get`) call a `Machine` hook.
 pub struct Memory<'mir, 'tcx, M: Machine<'mir, 'tcx>> {
@@ -248,63 +258,93 @@
         Ok(())
     }
 
-    /// Checks that the pointer is aligned AND non-NULL. This supports ZSTs in two ways:
-    /// You can pass a scalar, and a `Pointer` does not have to actually still be allocated.
-    pub fn check_align(
+    /// Check if the given scalar is allowed to do a memory access of given `size`
+    /// and `align`. On success, returns `None` for zero-sized accesses (where
+    /// nothing else is left to do) and a `Pointer` to use for the actual access otherwise.
+    /// Crucially, if the input is a `Pointer`, we will test it for liveness
+    /// *even of* the size is 0.
+    ///
+    /// Everyone accessing memory based on a `Scalar` should use this method to get the
+    /// `Pointer` they need. And even if you already have a `Pointer`, call this method
+    /// to make sure it is sufficiently aligned and not dangling.  Not doing that may
+    /// cause ICEs.
+    pub fn check_ptr_access(
         &self,
-        ptr: Scalar<M::PointerTag>,
-        required_align: Align
-    ) -> InterpResult<'tcx> {
-        // Check non-NULL/Undef, extract offset
-        let (offset, alloc_align) = match ptr.to_bits_or_ptr(self.pointer_size(), self) {
-            Err(ptr) => {
-                // check this is not NULL -- which we can ensure only if this is in-bounds
-                // of some (potentially dead) allocation.
-                let align = self.check_bounds_ptr(ptr, InboundsCheck::MaybeDead,
-                                                  CheckInAllocMsg::NullPointerTest)?;
-                (ptr.offset.bytes(), align)
+        sptr: Scalar<M::PointerTag>,
+        size: Size,
+        align: Align,
+    ) -> InterpResult<'tcx, Option<Pointer<M::PointerTag>>> {
+        fn check_offset_align(offset: u64, align: Align) -> InterpResult<'static> {
+            if offset % align.bytes() == 0 {
+                Ok(())
+            } else {
+                // The biggest power of two through which `offset` is divisible.
+                let offset_pow2 = 1 << offset.trailing_zeros();
+                err!(AlignmentCheckFailed {
+                    has: Align::from_bytes(offset_pow2).unwrap(),
+                    required: align,
+                })
             }
-            Ok(data) => {
-                // check this is not NULL
-                if data == 0 {
+        }
+
+        // Normalize to a `Pointer` if we definitely need one.
+        let normalized = if size.bytes() == 0 {
+            // Can be an integer, just take what we got.  We do NOT `force_bits` here;
+            // if this is already a `Pointer` we want to do the bounds checks!
+            sptr
+        } else {
+            // A "real" access, we must get a pointer.
+            Scalar::Ptr(self.force_ptr(sptr)?)
+        };
+        Ok(match normalized.to_bits_or_ptr(self.pointer_size(), self) {
+            Ok(bits) => {
+                let bits = bits as u64; // it's ptr-sized
+                assert!(size.bytes() == 0);
+                // Must be non-NULL and aligned.
+                if bits == 0 {
                     return err!(InvalidNullPointerUsage);
                 }
-                // the "base address" is 0 and hence always aligned
-                (data as u64, required_align)
+                check_offset_align(bits, align)?;
+                None
             }
-        };
-        // Check alignment
-        if alloc_align.bytes() < required_align.bytes() {
-            return err!(AlignmentCheckFailed {
-                has: alloc_align,
-                required: required_align,
-            });
-        }
-        if offset % required_align.bytes() == 0 {
-            Ok(())
-        } else {
-            let has = offset % required_align.bytes();
-            err!(AlignmentCheckFailed {
-                has: Align::from_bytes(has).unwrap(),
-                required: required_align,
-            })
-        }
+            Err(ptr) => {
+                let (allocation_size, alloc_align) =
+                    self.get_size_and_align(ptr.alloc_id, AllocCheck::Dereferencable)?;
+                // Test bounds. This also ensures non-NULL.
+                // It is sufficient to check this for the end pointer. The addition
+                // checks for overflow.
+                let end_ptr = ptr.offset(size, self)?;
+                end_ptr.check_in_alloc(allocation_size, CheckInAllocMsg::MemoryAccessTest)?;
+                // Test align. Check this last; if both bounds and alignment are violated
+                // we want the error to be about the bounds.
+                if alloc_align.bytes() < align.bytes() {
+                    // The allocation itself is not aligned enough.
+                    // FIXME: Alignment check is too strict, depending on the base address that
+                    // got picked we might be aligned even if this check fails.
+                    // We instead have to fall back to converting to an integer and checking
+                    // the "real" alignment.
+                    return err!(AlignmentCheckFailed {
+                        has: alloc_align,
+                        required: align,
+                    });
+                }
+                check_offset_align(ptr.offset.bytes(), align)?;
+
+                // We can still be zero-sized in this branch, in which case we have to
+                // return `None`.
+                if size.bytes() == 0 { None } else { Some(ptr) }
+            }
+        })
     }
 
-    /// Checks if the pointer is "in-bounds". Notice that a pointer pointing at the end
-    /// of an allocation (i.e., at the first *inaccessible* location) *is* considered
-    /// in-bounds!  This follows C's/LLVM's rules.
-    /// If you want to check bounds before doing a memory access, better first obtain
-    /// an `Allocation` and call `check_bounds`.
-    pub fn check_bounds_ptr(
+    /// Test if the pointer might be NULL.
+    pub fn ptr_may_be_null(
         &self,
         ptr: Pointer<M::PointerTag>,
-        liveness: InboundsCheck,
-        msg: CheckInAllocMsg,
-    ) -> InterpResult<'tcx, Align> {
-        let (allocation_size, align) = self.get_size_and_align(ptr.alloc_id, liveness)?;
-        ptr.check_in_alloc(allocation_size, msg)?;
-        Ok(align)
+    ) -> bool {
+        let (size, _align) = self.get_size_and_align(ptr.alloc_id, AllocCheck::MaybeDead)
+            .expect("alloc info with MaybeDead cannot fail");
+        ptr.check_in_alloc(size, CheckInAllocMsg::NullPointerTest).is_err()
     }
 }
 
@@ -443,13 +483,14 @@
         }
     }
 
-    /// Obtain the size and alignment of an allocation, even if that allocation has been deallocated
+    /// Obtain the size and alignment of an allocation, even if that allocation has
+    /// been deallocated.
     ///
-    /// If `liveness` is `InboundsCheck::MaybeDead`, this function always returns `Ok`
+    /// If `liveness` is `AllocCheck::MaybeDead`, this function always returns `Ok`.
     pub fn get_size_and_align(
         &self,
         id: AllocId,
-        liveness: InboundsCheck,
+        liveness: AllocCheck,
     ) -> InterpResult<'static, (Size, Align)> {
         if let Ok(alloc) = self.get(id) {
             return Ok((Size::from_bytes(alloc.bytes.len() as u64), alloc.align));
@@ -459,7 +500,14 @@
         let alloc = self.tcx.alloc_map.lock().get(id);
         // Could also be a fn ptr or extern static
         match alloc {
-            Some(GlobalAlloc::Function(..)) => Ok((Size::ZERO, Align::from_bytes(1).unwrap())),
+            Some(GlobalAlloc::Function(..)) => {
+                if let AllocCheck::Dereferencable = liveness {
+                    // The caller requested no function pointers.
+                    err!(DerefFunctionPointer)
+                } else {
+                    Ok((Size::ZERO, Align::from_bytes(1).unwrap()))
+                }
+            }
             // `self.get` would also work, but can cause cycles if a static refers to itself
             Some(GlobalAlloc::Static(did)) => {
                 // The only way `get` couldn't have worked here is if this is an extern static
@@ -471,17 +519,15 @@
             }
             _ => {
                 if let Ok(alloc) = self.get(id) {
-                    return Ok((Size::from_bytes(alloc.bytes.len() as u64), alloc.align));
+                    Ok((Size::from_bytes(alloc.bytes.len() as u64), alloc.align))
                 }
-                match liveness {
-                    InboundsCheck::MaybeDead => {
-                        // Must be a deallocated pointer
-                        self.dead_alloc_map.get(&id).cloned().ok_or_else(||
-                            ValidationFailure("allocation missing in dead_alloc_map".to_string())
-                                .into()
-                        )
-                    },
-                    InboundsCheck::Live => err!(DanglingPointerDeref),
+                else if let AllocCheck::MaybeDead = liveness {
+                    // Deallocated pointers are allowed, we should be able to find
+                    // them in the map.
+                    Ok(*self.dead_alloc_map.get(&id)
+                        .expect("deallocated pointers should all be recorded in `dead_alloc_map`"))
+                } else {
+                    err!(DanglingPointerDeref)
                 }
             },
         }
@@ -629,24 +675,22 @@
     }
 }
 
-/// Byte Accessors
+/// Reading and writing.
 impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
+    /// Performs appropriate bounds checks.
     pub fn read_bytes(
         &self,
         ptr: Scalar<M::PointerTag>,
         size: Size,
     ) -> InterpResult<'tcx, &[u8]> {
-        if size.bytes() == 0 {
-            Ok(&[])
-        } else {
-            let ptr = self.force_ptr(ptr)?;
-            self.get(ptr.alloc_id)?.get_bytes(self, ptr, size)
-        }
+        let ptr = match self.check_ptr_access(ptr, size, Align::from_bytes(1).unwrap())? {
+            Some(ptr) => ptr,
+            None => return Ok(&[]), // zero-sized access
+        };
+        self.get(ptr.alloc_id)?.get_bytes(self, ptr, size)
     }
-}
 
-/// Reading and writing.
-impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
+    /// Performs appropriate bounds checks.
     pub fn copy(
         &mut self,
         src: Scalar<M::PointerTag>,
@@ -659,6 +703,7 @@
         self.copy_repeatedly(src, src_align, dest, dest_align, size, 1, nonoverlapping)
     }
 
+    /// Performs appropriate bounds checks.
     pub fn copy_repeatedly(
         &mut self,
         src: Scalar<M::PointerTag>,
@@ -669,15 +714,14 @@
         length: u64,
         nonoverlapping: bool,
     ) -> InterpResult<'tcx> {
-        self.check_align(src, src_align)?;
-        self.check_align(dest, dest_align)?;
-        if size.bytes() == 0 {
-            // Nothing to do for ZST, other than checking alignment and
-            // non-NULLness which already happened.
-            return Ok(());
-        }
-        let src = self.force_ptr(src)?;
-        let dest = self.force_ptr(dest)?;
+        // We need to check *both* before early-aborting due to the size being 0.
+        let (src, dest) = match (self.check_ptr_access(src, size, src_align)?,
+                self.check_ptr_access(dest, size * length, dest_align)?)
+        {
+            (Some(src), Some(dest)) => (src, dest),
+            // One of the two sizes is 0.
+            _ => return Ok(()),
+        };
 
         // first copy the relocations to a temporary buffer, because
         // `get_bytes_mut` will clear the relocations, which is correct,
diff --git a/src/librustc_mir/interpret/mod.rs b/src/librustc_mir/interpret/mod.rs
index 0293a83..259bd6a 100644
--- a/src/librustc_mir/interpret/mod.rs
+++ b/src/librustc_mir/interpret/mod.rs
@@ -24,7 +24,7 @@
 
 pub use self::place::{Place, PlaceTy, MemPlace, MPlaceTy};
 
-pub use self::memory::{Memory, MemoryKind};
+pub use self::memory::{Memory, MemoryKind, AllocCheck};
 
 pub use self::machine::{Machine, AllocMap, MayLeak};
 
diff --git a/src/librustc_mir/interpret/operand.rs b/src/librustc_mir/interpret/operand.rs
index ec391b7..c72078f 100644
--- a/src/librustc_mir/interpret/operand.rs
+++ b/src/librustc_mir/interpret/operand.rs
@@ -9,9 +9,9 @@
 };
 
 use rustc::mir::interpret::{
-    GlobalId, AllocId, CheckInAllocMsg,
+    GlobalId, AllocId,
     ConstValue, Pointer, Scalar,
-    InterpResult, InterpError, InboundsCheck,
+    InterpResult, InterpError,
     sign_extend, truncate,
 };
 use super::{
@@ -226,19 +226,14 @@
         }
         let (ptr, ptr_align) = mplace.to_scalar_ptr_align();
 
-        if mplace.layout.is_zst() {
-            // Not all ZSTs have a layout we would handle below, so just short-circuit them
-            // all here.
-            self.memory.check_align(ptr, ptr_align)?;
-            return Ok(Some(ImmTy {
+        let ptr = match self.memory.check_ptr_access(ptr, mplace.layout.size, ptr_align)? {
+            Some(ptr) => ptr,
+            None => return Ok(Some(ImmTy { // zero-sized type
                 imm: Immediate::Scalar(Scalar::zst().into()),
                 layout: mplace.layout,
-            }));
-        }
+            })),
+        };
 
-        // check for integer pointers before alignment to report better errors
-        let ptr = self.force_ptr(ptr)?;
-        self.memory.check_align(ptr.into(), ptr_align)?;
         match mplace.layout.abi {
             layout::Abi::Scalar(..) => {
                 let scalar = self.memory
@@ -250,17 +245,18 @@
                 }))
             }
             layout::Abi::ScalarPair(ref a, ref b) => {
+                // We checked `ptr_align` above, so all fields will have the alignment they need.
+                // We would anyway check against `ptr_align.restrict_for_offset(b_offset)`,
+                // which `ptr.offset(b_offset)` cannot possibly fail to satisfy.
                 let (a, b) = (&a.value, &b.value);
                 let (a_size, b_size) = (a.size(self), b.size(self));
                 let a_ptr = ptr;
                 let b_offset = a_size.align_to(b.align(self).abi);
-                assert!(b_offset.bytes() > 0); // we later use the offset to test which field to use
+                assert!(b_offset.bytes() > 0); // we later use the offset to tell apart the fields
                 let b_ptr = ptr.offset(b_offset, self)?;
                 let a_val = self.memory
                     .get(ptr.alloc_id)?
                     .read_scalar(self, a_ptr, a_size)?;
-                let b_align = ptr_align.restrict_for_offset(b_offset);
-                self.memory.check_align(b_ptr.into(), b_align)?;
                 let b_val = self.memory
                     .get(ptr.alloc_id)?
                     .read_scalar(self, b_ptr, b_size)?;
@@ -639,8 +635,7 @@
                     Err(ptr) => {
                         // The niche must be just 0 (which an inbounds pointer value never is)
                         let ptr_valid = niche_start == 0 && variants_start == variants_end &&
-                            self.memory.check_bounds_ptr(ptr, InboundsCheck::MaybeDead,
-                                                         CheckInAllocMsg::NullPointerTest).is_ok();
+                            !self.memory.ptr_may_be_null(ptr);
                         if !ptr_valid {
                             return err!(InvalidDiscriminant(raw_discr.erase_tag().into()));
                         }
diff --git a/src/librustc_mir/interpret/place.rs b/src/librustc_mir/interpret/place.rs
index 1285549..1351b5b 100644
--- a/src/librustc_mir/interpret/place.rs
+++ b/src/librustc_mir/interpret/place.rs
@@ -222,9 +222,9 @@
     }
 
     #[inline]
-    pub(super) fn vtable(self) -> InterpResult<'tcx, Pointer<Tag>> {
+    pub(super) fn vtable(self) -> Scalar<Tag> {
         match self.layout.ty.sty {
-            ty::Dynamic(..) => self.mplace.meta.unwrap().to_ptr(),
+            ty::Dynamic(..) => self.mplace.meta.unwrap(),
             _ => bug!("vtable not supported on type {:?}", self.layout.ty),
         }
     }
@@ -746,15 +746,13 @@
         // type things are read at. In case `src_val` is a `ScalarPair`, we don't do any magic here
         // to handle padding properly, which is only correct if we never look at this data with the
         // wrong type.
+        assert!(!dest.layout.is_unsized());
 
-        // Nothing to do for ZSTs, other than checking alignment
-        if dest.layout.is_zst() {
-            return self.memory.check_align(ptr, ptr_align);
-        }
+        let ptr = match self.memory.check_ptr_access(ptr, dest.layout.size, ptr_align)? {
+            Some(ptr) => ptr,
+            None => return Ok(()), // zero-sized access
+        };
 
-        // check for integer pointers before alignment to report better errors
-        let ptr = self.force_ptr(ptr)?;
-        self.memory.check_align(ptr.into(), ptr_align)?;
         let tcx = &*self.tcx;
         // FIXME: We should check that there are dest.layout.size many bytes available in
         // memory.  The code below is not sufficient, with enough padding it might not
@@ -771,6 +769,9 @@
                 )
             }
             Immediate::ScalarPair(a_val, b_val) => {
+                // We checked `ptr_align` above, so all fields will have the alignment they need.
+                // We would anyway check against `ptr_align.restrict_for_offset(b_offset)`,
+                // which `ptr.offset(b_offset)` cannot possibly fail to satisfy.
                 let (a, b) = match dest.layout.abi {
                     layout::Abi::ScalarPair(ref a, ref b) => (&a.value, &b.value),
                     _ => bug!("write_immediate_to_mplace: invalid ScalarPair layout: {:#?}",
@@ -778,11 +779,8 @@
                 };
                 let (a_size, b_size) = (a.size(self), b.size(self));
                 let b_offset = a_size.align_to(b.align(self).abi);
-                let b_align = ptr_align.restrict_for_offset(b_offset);
                 let b_ptr = ptr.offset(b_offset, self)?;
 
-                self.memory.check_align(b_ptr.into(), b_align)?;
-
                 // It is tempting to verify `b_offset` against `layout.fields.offset(1)`,
                 // but that does not work: We could be a newtype around a pair, then the
                 // fields do not match the `ScalarPair` components.
@@ -1053,7 +1051,7 @@
     /// Also return some more information so drop doesn't have to run the same code twice.
     pub(super) fn unpack_dyn_trait(&self, mplace: MPlaceTy<'tcx, M::PointerTag>)
     -> InterpResult<'tcx, (ty::Instance<'tcx>, MPlaceTy<'tcx, M::PointerTag>)> {
-        let vtable = mplace.vtable()?; // also sanity checks the type
+        let vtable = mplace.vtable(); // also sanity checks the type
         let (instance, ty) = self.read_drop_type_from_vtable(vtable)?;
         let layout = self.layout_of(ty)?;
 
diff --git a/src/librustc_mir/interpret/terminator.rs b/src/librustc_mir/interpret/terminator.rs
index 190f781..af061f9 100644
--- a/src/librustc_mir/interpret/terminator.rs
+++ b/src/librustc_mir/interpret/terminator.rs
@@ -425,12 +425,15 @@
                     }
                 };
                 // Find and consult vtable
-                let vtable = receiver_place.vtable()?;
-                self.memory.check_align(vtable.into(), self.tcx.data_layout.pointer_align.abi)?;
-                let fn_ptr = self.memory.get(vtable.alloc_id)?.read_ptr_sized(
-                    self,
-                    vtable.offset(ptr_size * (idx as u64 + 3), self)?,
-                )?.to_ptr()?;
+                let vtable = receiver_place.vtable();
+                let vtable_slot = vtable.ptr_offset(ptr_size * (idx as u64 + 3), self)?;
+                let vtable_slot = self.memory.check_ptr_access(
+                    vtable_slot,
+                    ptr_size,
+                    self.tcx.data_layout.pointer_align.abi,
+                )?.expect("cannot be a ZST");
+                let fn_ptr = self.memory.get(vtable_slot.alloc_id)?
+                    .read_ptr_sized(self, vtable_slot)?.to_ptr()?;
                 let instance = self.memory.get_fn(fn_ptr)?;
 
                 // `*mut receiver_place.layout.ty` is almost the layout that we
diff --git a/src/librustc_mir/interpret/traits.rs b/src/librustc_mir/interpret/traits.rs
index 4ae0ee5..5d2f268 100644
--- a/src/librustc_mir/interpret/traits.rs
+++ b/src/librustc_mir/interpret/traits.rs
@@ -101,10 +101,14 @@
     /// Returns the drop fn instance as well as the actual dynamic type
     pub fn read_drop_type_from_vtable(
         &self,
-        vtable: Pointer<M::PointerTag>,
+        vtable: Scalar<M::PointerTag>,
     ) -> InterpResult<'tcx, (ty::Instance<'tcx>, Ty<'tcx>)> {
         // we don't care about the pointee type, we just want a pointer
-        self.memory.check_align(vtable.into(), self.tcx.data_layout.pointer_align.abi)?;
+        let vtable = self.memory.check_ptr_access(
+            vtable,
+            self.tcx.data_layout.pointer_size,
+            self.tcx.data_layout.pointer_align.abi,
+        )?.expect("cannot be a ZST");
         let drop_fn = self.memory
             .get(vtable.alloc_id)?
             .read_ptr_sized(self, vtable)?
@@ -113,20 +117,28 @@
         trace!("Found drop fn: {:?}", drop_instance);
         let fn_sig = drop_instance.ty(*self.tcx).fn_sig(*self.tcx);
         let fn_sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, &fn_sig);
-        // the drop function takes *mut T where T is the type being dropped, so get that
+        // The drop function takes `*mut T` where `T` is the type being dropped, so get that.
         let ty = fn_sig.inputs()[0].builtin_deref(true).unwrap().ty;
         Ok((drop_instance, ty))
     }
 
     pub fn read_size_and_align_from_vtable(
         &self,
-        vtable: Pointer<M::PointerTag>,
+        vtable: Scalar<M::PointerTag>,
     ) -> InterpResult<'tcx, (Size, Align)> {
         let pointer_size = self.pointer_size();
-        self.memory.check_align(vtable.into(), self.tcx.data_layout.pointer_align.abi)?;
+        // We check for size = 3*ptr_size, that covers the drop fn (unused here),
+        // the size, and the align (which we read below).
+        let vtable = self.memory.check_ptr_access(
+            vtable,
+            3*pointer_size,
+            self.tcx.data_layout.pointer_align.abi,
+        )?.expect("cannot be a ZST");
         let alloc = self.memory.get(vtable.alloc_id)?;
-        let size = alloc.read_ptr_sized(self, vtable.offset(pointer_size, self)?)?
-            .to_bits(pointer_size)? as u64;
+        let size = alloc.read_ptr_sized(
+            self,
+            vtable.offset(pointer_size, self)?
+        )?.to_bits(pointer_size)? as u64;
         let align = alloc.read_ptr_sized(
             self,
             vtable.offset(pointer_size * 2, self)?,
diff --git a/src/librustc_mir/interpret/validity.rs b/src/librustc_mir/interpret/validity.rs
index e42c667..b2a159f 100644
--- a/src/librustc_mir/interpret/validity.rs
+++ b/src/librustc_mir/interpret/validity.rs
@@ -3,11 +3,11 @@
 
 use syntax_pos::symbol::{sym, Symbol};
 use rustc::hir;
-use rustc::ty::layout::{self, Size, Align, TyLayout, LayoutOf, VariantIdx};
+use rustc::ty::layout::{self, TyLayout, LayoutOf, VariantIdx};
 use rustc::ty;
 use rustc_data_structures::fx::FxHashSet;
 use rustc::mir::interpret::{
-    Scalar, GlobalAlloc, InterpResult, InterpError, CheckInAllocMsg,
+    GlobalAlloc, InterpResult, InterpError,
 };
 
 use std::hash::Hash;
@@ -365,8 +365,16 @@
                     let tail = self.ecx.tcx.struct_tail(layout.ty);
                     match tail.sty {
                         ty::Dynamic(..) => {
-                            let vtable = try_validation!(meta.unwrap().to_ptr(),
-                                "non-pointer vtable in fat pointer", self.path);
+                            let vtable = meta.unwrap();
+                            try_validation!(
+                                self.ecx.memory.check_ptr_access(
+                                    vtable,
+                                    3*self.ecx.tcx.data_layout.pointer_size, // drop, size, align
+                                    self.ecx.tcx.data_layout.pointer_align.abi,
+                                ),
+                                "dangling or unaligned vtable pointer or too small vtable",
+                                self.path
+                            );
                             try_validation!(self.ecx.read_drop_type_from_vtable(vtable),
                                 "invalid drop fn in vtable", self.path);
                             try_validation!(self.ecx.read_size_and_align_from_vtable(vtable),
@@ -384,16 +392,19 @@
                             bug!("Unexpected unsized type tail: {:?}", tail),
                     }
                 }
-                // Make sure this is non-NULL and aligned
+                // Make sure this is dereferencable and all.
                 let (size, align) = self.ecx.size_and_align_of(meta, layout)?
                     // for the purpose of validity, consider foreign types to have
                     // alignment and size determined by the layout (size will be 0,
                     // alignment should take attributes into account).
                     .unwrap_or_else(|| (layout.size, layout.align.abi));
-                match self.ecx.memory.check_align(ptr, align) {
-                    Ok(_) => {},
+                let ptr: Option<_> = match self.ecx.memory.check_ptr_access(ptr, size, align) {
+                    Ok(ptr) => ptr,
                     Err(err) => {
-                        info!("{:?} is not aligned to {:?}", ptr, align);
+                        info!(
+                            "{:?} did not pass access check for size {:?}, align {:?}",
+                            ptr, size, align
+                        );
                         match err.kind {
                             InterpError::InvalidNullPointerUsage =>
                                 return validation_failure!("NULL reference", self.path),
@@ -401,23 +412,23 @@
                                 return validation_failure!(format!("unaligned reference \
                                     (required {} byte alignment but found {})",
                                     required.bytes(), has.bytes()), self.path),
+                            InterpError::ReadBytesAsPointer =>
+                                return validation_failure!(
+                                    "integer pointer in non-ZST reference",
+                                    self.path
+                                ),
                             _ =>
                                 return validation_failure!(
-                                    "dangling (out-of-bounds) reference (might be NULL at \
-                                        run-time)",
+                                    "dangling (not entirely in bounds) reference",
                                     self.path
                                 ),
                         }
                     }
-                }
+                };
                 // Recursive checking
                 if let Some(ref mut ref_tracking) = self.ref_tracking_for_consts {
                     let place = self.ecx.ref_to_mplace(value)?;
-                    // FIXME(RalfJ): check ZST for inbound pointers
-                    if size != Size::ZERO {
-                        // Non-ZST also have to be dereferencable
-                        let ptr = try_validation!(place.ptr.to_ptr(),
-                            "integer pointer in non-ZST reference", self.path);
+                    if let Some(ptr) = ptr { // not a ZST
                         // Skip validation entirely for some external statics
                         let alloc_kind = self.ecx.tcx.alloc_map.lock().get(ptr.alloc_id);
                         if let Some(GlobalAlloc::Static(did)) = alloc_kind {
@@ -429,18 +440,10 @@
                                 return Ok(());
                             }
                         }
-                        // Maintain the invariant that the place we are checking is
-                        // already verified to be in-bounds.
-                        try_validation!(
-                            self.ecx.memory
-                                .get(ptr.alloc_id)?
-                                .check_bounds(self.ecx, ptr, size, CheckInAllocMsg::InboundsTest),
-                            "dangling (not entirely in bounds) reference", self.path);
                     }
                     // Check if we have encountered this pointer+layout combination
-                    // before.  Proceed recursively even for integer pointers, no
-                    // reason to skip them! They are (recursively) valid for some ZST,
-                    // but not for others (e.g., `!` is a ZST).
+                    // before.  Proceed recursively even for ZST, no
+                    // reason to skip them! E.g., `!` is a ZST and we want to validate it.
                     let path = &self.path;
                     ref_tracking.track(place, || {
                         // We need to clone the path anyway, make sure it gets created
@@ -499,14 +502,8 @@
         let bits = match value.to_bits_or_ptr(op.layout.size, self.ecx) {
             Err(ptr) => {
                 if lo == 1 && hi == max_hi {
-                    // only NULL is not allowed.
-                    // We can call `check_align` to check non-NULL-ness, but have to also look
-                    // for function pointers.
-                    let non_null =
-                        self.ecx.memory.check_align(
-                            Scalar::Ptr(ptr), Align::from_bytes(1).unwrap()
-                        ).is_ok();
-                    if !non_null {
+                    // Only NULL is the niche.  So make sure the ptr is NOT NULL.
+                    if self.ecx.memory.ptr_may_be_null(ptr) {
                         // These conditions are just here to improve the diagnostics so we can
                         // differentiate between null pointers and dangling pointers
                         if self.ref_tracking_for_consts.is_some() &&
diff --git a/src/librustc_mir/transform/elaborate_drops.rs b/src/librustc_mir/transform/elaborate_drops.rs
index d805c66..b2b489b 100644
--- a/src/librustc_mir/transform/elaborate_drops.rs
+++ b/src/librustc_mir/transform/elaborate_drops.rs
@@ -95,7 +95,7 @@
         };
 
         let mut init_data = InitializationData {
-            live: flow_inits.sets().on_entry_set_for(bb.index()).to_owned(),
+            live: flow_inits.sets().entry_set_for(bb.index()).to_owned(),
             dead: BitSet::new_empty(env.move_data.move_paths.len()),
         };
         debug!("find_dead_unwinds @ {:?}: {:?}; init_data={:?}",
@@ -304,9 +304,9 @@
 
     fn initialization_data_at(&self, loc: Location) -> InitializationData {
         let mut data = InitializationData {
-            live: self.flow_inits.sets().on_entry_set_for(loc.block.index())
+            live: self.flow_inits.sets().entry_set_for(loc.block.index())
                 .to_owned(),
-            dead: self.flow_uninits.sets().on_entry_set_for(loc.block.index())
+            dead: self.flow_uninits.sets().entry_set_for(loc.block.index())
                 .to_owned(),
         };
         for stmt in 0..loc.statement_index {
diff --git a/src/librustc_mir/transform/rustc_peek.rs b/src/librustc_mir/transform/rustc_peek.rs
index c4601229..f12309c 100644
--- a/src/librustc_mir/transform/rustc_peek.rs
+++ b/src/librustc_mir/transform/rustc_peek.rs
@@ -18,7 +18,6 @@
 };
 use crate::dataflow::move_paths::{MovePathIndex, LookupResult};
 use crate::dataflow::move_paths::{HasMoveData, MoveData};
-use crate::dataflow;
 
 use crate::dataflow::has_rustc_mir_with;
 
@@ -133,9 +132,8 @@
         }
     };
 
-    let mut on_entry = results.0.sets.on_entry_set_for(bb.index()).to_owned();
-    let mut gen_set = results.0.sets.gen_set_for(bb.index()).clone();
-    let mut kill_set = results.0.sets.kill_set_for(bb.index()).clone();
+    let mut on_entry = results.0.sets.entry_set_for(bb.index()).to_owned();
+    let mut trans = results.0.sets.trans_for(bb.index()).clone();
 
     // Emulate effect of all statements in the block up to (but not
     // including) the borrow within `peek_arg_place`. Do *not* include
@@ -143,10 +141,6 @@
     // of the argument at time immediate preceding Call to
     // `rustc_peek`).
 
-    let mut sets = dataflow::BlockSets { on_entry: &mut on_entry,
-                                         gen_set: &mut gen_set,
-                                         kill_set: &mut kill_set };
-
     for (j, stmt) in statements.iter().enumerate() {
         debug!("rustc_peek: ({:?},{}) {:?}", bb, j, stmt);
         let (place, rvalue) = match stmt.kind {
@@ -170,7 +164,7 @@
                 // Okay, our search is over.
                 match move_data.rev_lookup.find(peeking_at_place) {
                     LookupResult::Exact(peek_mpi) => {
-                        let bit_state = sets.on_entry.contains(peek_mpi);
+                        let bit_state = on_entry.contains(peek_mpi);
                         debug!("rustc_peek({:?} = &{:?}) bit_state: {}",
                                place, peeking_at_place, bit_state);
                         if !bit_state {
@@ -197,18 +191,18 @@
         debug!("rustc_peek: computing effect on place: {:?} ({:?}) in stmt: {:?}",
                place, lhs_mpi, stmt);
         // reset GEN and KILL sets before emulating their effect.
-        sets.gen_set.clear();
-        sets.kill_set.clear();
+        trans.clear();
         results.0.operator.before_statement_effect(
-            &mut sets, Location { block: bb, statement_index: j });
+            &mut trans,
+            Location { block: bb, statement_index: j });
         results.0.operator.statement_effect(
-            &mut sets, Location { block: bb, statement_index: j });
-        sets.on_entry.union(sets.gen_set);
-        sets.on_entry.subtract(sets.kill_set);
+            &mut trans,
+            Location { block: bb, statement_index: j });
+        trans.apply(&mut on_entry);
     }
 
     results.0.operator.before_terminator_effect(
-        &mut sets,
+        &mut trans,
         Location { block: bb, statement_index: statements.len() });
 
     tcx.sess.span_err(span, &format!("rustc_peek: MIR did not match \
diff --git a/src/test/ui/consts/const-eval/union-ub-fat-ptr.stderr b/src/test/ui/consts/const-eval/union-ub-fat-ptr.stderr
index 5048a97..19db90c 100644
--- a/src/test/ui/consts/const-eval/union-ub-fat-ptr.stderr
+++ b/src/test/ui/consts/const-eval/union-ub-fat-ptr.stderr
@@ -42,7 +42,7 @@
   --> $DIR/union-ub-fat-ptr.rs:97:1
    |
 LL | const D: &dyn Trait = unsafe { DynTransmute { repr: DynRepr { ptr: &92, vtable: &3 } }.rust};
-   | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered invalid drop fn in vtable
+   | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered dangling or unaligned vtable pointer or too small vtable
    |
    = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior
 
@@ -50,7 +50,7 @@
   --> $DIR/union-ub-fat-ptr.rs:100:1
    |
 LL | const E: &dyn Trait = unsafe { DynTransmute { repr2: DynRepr2 { ptr: &92, vtable: &3 } }.rust};
-   | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered invalid drop fn in vtable
+   | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered dangling or unaligned vtable pointer or too small vtable
    |
    = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior
 
@@ -58,7 +58,7 @@
   --> $DIR/union-ub-fat-ptr.rs:103:1
    |
 LL | const F: &dyn Trait = unsafe { DynTransmute { bad: BadDynRepr { ptr: &92, vtable: 3 } }.rust};
-   | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered non-pointer vtable in fat pointer
+   | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ type validation failed: encountered dangling or unaligned vtable pointer or too small vtable
    |
    = note: The rules on what exactly is undefined behavior aren't clear, so this check might be overzealous. Please open an issue on the rust compiler repository if you believe it should not be considered undefined behavior