Auto merge of #47209 - eddyb:ccx, r=nikomatsakis

rustc_trans: reorganize CrateContext and rename context types.

Firstly, the `{Shared,Local}CrateContext` hasn't been meaningful for a while now, and this PR resolves it by moving all their fields to `CrateContext` and removing redundant accessor methods.

Secondly, this PR contains the following mass-renames:
* `ccx: CrateContext` -> `cx: CodegenCx`
* `mircx: MirContext` -> `fx: FunctionCx`
* `bcx: Builder` -> `bx: Builder`

r? @nikomatsakis
diff --git a/src/librustc_llvm/ffi.rs b/src/librustc_llvm/ffi.rs
index 2cfb151..b97e37f 100644
--- a/src/librustc_llvm/ffi.rs
+++ b/src/librustc_llvm/ffi.rs
@@ -1661,7 +1661,6 @@
     pub fn LLVMRustArchiveMemberFree(Member: RustArchiveMemberRef);
 
     pub fn LLVMRustSetDataLayoutFromTargetMachine(M: ModuleRef, TM: TargetMachineRef);
-    pub fn LLVMRustGetModuleDataLayout(M: ModuleRef) -> TargetDataRef;
 
     pub fn LLVMRustBuildOperandBundleDef(Name: *const c_char,
                                          Inputs: *const ValueRef,
diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs
index 32dc106..07f9b8f 100644
--- a/src/librustc_trans/abi.rs
+++ b/src/librustc_trans/abi.rs
@@ -12,7 +12,7 @@
 use base;
 use builder::Builder;
 use common::{ty_fn_sig, C_usize};
-use context::CrateContext;
+use context::CodegenCx;
 use cabi_x86;
 use cabi_x86_64;
 use cabi_x86_win64;
@@ -209,8 +209,8 @@
 }
 
 impl Reg {
-    pub fn align(&self, ccx: &CrateContext) -> Align {
-        let dl = ccx.data_layout();
+    pub fn align(&self, cx: &CodegenCx) -> Align {
+        let dl = cx.data_layout();
         match self.kind {
             RegKind::Integer => {
                 match self.size.bits() {
@@ -234,18 +234,18 @@
         }
     }
 
-    pub fn llvm_type(&self, ccx: &CrateContext) -> Type {
+    pub fn llvm_type(&self, cx: &CodegenCx) -> Type {
         match self.kind {
-            RegKind::Integer => Type::ix(ccx, self.size.bits()),
+            RegKind::Integer => Type::ix(cx, self.size.bits()),
             RegKind::Float => {
                 match self.size.bits() {
-                    32 => Type::f32(ccx),
-                    64 => Type::f64(ccx),
+                    32 => Type::f32(cx),
+                    64 => Type::f64(cx),
                     _ => bug!("unsupported float: {:?}", self)
                 }
             }
             RegKind::Vector => {
-                Type::vector(&Type::i8(ccx), self.size.bytes())
+                Type::vector(&Type::i8(cx), self.size.bytes())
             }
         }
     }
@@ -276,12 +276,12 @@
 }
 
 impl Uniform {
-    pub fn align(&self, ccx: &CrateContext) -> Align {
-        self.unit.align(ccx)
+    pub fn align(&self, cx: &CodegenCx) -> Align {
+        self.unit.align(cx)
     }
 
-    pub fn llvm_type(&self, ccx: &CrateContext) -> Type {
-        let llunit = self.unit.llvm_type(ccx);
+    pub fn llvm_type(&self, cx: &CodegenCx) -> Type {
+        let llunit = self.unit.llvm_type(cx);
 
         if self.total <= self.unit.size {
             return llunit;
@@ -298,16 +298,16 @@
         assert_eq!(self.unit.kind, RegKind::Integer);
 
         let args: Vec<_> = (0..count).map(|_| llunit)
-            .chain(iter::once(Type::ix(ccx, rem_bytes * 8)))
+            .chain(iter::once(Type::ix(cx, rem_bytes * 8)))
             .collect();
 
-        Type::struct_(ccx, &args, false)
+        Type::struct_(cx, &args, false)
     }
 }
 
 pub trait LayoutExt<'tcx> {
     fn is_aggregate(&self) -> bool;
-    fn homogeneous_aggregate<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Option<Reg>;
+    fn homogeneous_aggregate<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> Option<Reg>;
 }
 
 impl<'tcx> LayoutExt<'tcx> for TyLayout<'tcx> {
@@ -321,7 +321,7 @@
         }
     }
 
-    fn homogeneous_aggregate<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Option<Reg> {
+    fn homogeneous_aggregate<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> Option<Reg> {
         match self.abi {
             layout::Abi::Uninhabited => None,
 
@@ -354,7 +354,7 @@
                 let is_union = match self.fields {
                     layout::FieldPlacement::Array { count, .. } => {
                         if count > 0 {
-                            return self.field(ccx, 0).homogeneous_aggregate(ccx);
+                            return self.field(cx, 0).homogeneous_aggregate(cx);
                         } else {
                             return None;
                         }
@@ -368,8 +368,8 @@
                         return None;
                     }
 
-                    let field = self.field(ccx, i);
-                    match (result, field.homogeneous_aggregate(ccx)) {
+                    let field = self.field(cx, i);
+                    match (result, field.homogeneous_aggregate(cx)) {
                         // The field itself must be a homogeneous aggregate.
                         (_, None) => return None,
                         // If this is the first field, record the unit.
@@ -423,34 +423,34 @@
 }
 
 impl CastTarget {
-    pub fn size(&self, ccx: &CrateContext) -> Size {
+    pub fn size(&self, cx: &CodegenCx) -> Size {
         match *self {
             CastTarget::Uniform(u) => u.total,
             CastTarget::Pair(a, b) => {
-                (a.size.abi_align(a.align(ccx)) + b.size)
-                    .abi_align(self.align(ccx))
+                (a.size.abi_align(a.align(cx)) + b.size)
+                    .abi_align(self.align(cx))
             }
         }
     }
 
-    pub fn align(&self, ccx: &CrateContext) -> Align {
+    pub fn align(&self, cx: &CodegenCx) -> Align {
         match *self {
-            CastTarget::Uniform(u) => u.align(ccx),
+            CastTarget::Uniform(u) => u.align(cx),
             CastTarget::Pair(a, b) => {
-                ccx.data_layout().aggregate_align
-                    .max(a.align(ccx))
-                    .max(b.align(ccx))
+                cx.data_layout().aggregate_align
+                    .max(a.align(cx))
+                    .max(b.align(cx))
             }
         }
     }
 
-    pub fn llvm_type(&self, ccx: &CrateContext) -> Type {
+    pub fn llvm_type(&self, cx: &CodegenCx) -> Type {
         match *self {
-            CastTarget::Uniform(u) => u.llvm_type(ccx),
+            CastTarget::Uniform(u) => u.llvm_type(cx),
             CastTarget::Pair(a, b) => {
-                Type::struct_(ccx, &[
-                    a.llvm_type(ccx),
-                    b.llvm_type(ccx)
+                Type::struct_(cx, &[
+                    a.llvm_type(cx),
+                    b.llvm_type(cx)
                 ], false)
             }
         }
@@ -547,28 +547,28 @@
 
     /// Get the LLVM type for an place of the original Rust type of
     /// this argument/return, i.e. the result of `type_of::type_of`.
-    pub fn memory_ty(&self, ccx: &CrateContext<'a, 'tcx>) -> Type {
-        self.layout.llvm_type(ccx)
+    pub fn memory_ty(&self, cx: &CodegenCx<'a, 'tcx>) -> Type {
+        self.layout.llvm_type(cx)
     }
 
     /// Store a direct/indirect value described by this ArgType into a
     /// place for the original Rust type of this argument/return.
     /// Can be used for both storing formal arguments into Rust variables
     /// or results of call/invoke instructions into their destinations.
-    pub fn store(&self, bcx: &Builder<'a, 'tcx>, val: ValueRef, dst: PlaceRef<'tcx>) {
+    pub fn store(&self, bx: &Builder<'a, 'tcx>, val: ValueRef, dst: PlaceRef<'tcx>) {
         if self.is_ignore() {
             return;
         }
-        let ccx = bcx.ccx;
+        let cx = bx.cx;
         if self.is_indirect() {
-            OperandValue::Ref(val, self.layout.align).store(bcx, dst)
+            OperandValue::Ref(val, self.layout.align).store(bx, dst)
         } else if let PassMode::Cast(cast) = self.mode {
             // FIXME(eddyb): Figure out when the simpler Store is safe, clang
             // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
             let can_store_through_cast_ptr = false;
             if can_store_through_cast_ptr {
-                let cast_dst = bcx.pointercast(dst.llval, cast.llvm_type(ccx).ptr_to());
-                bcx.store(val, cast_dst, self.layout.align);
+                let cast_dst = bx.pointercast(dst.llval, cast.llvm_type(cx).ptr_to());
+                bx.store(val, cast_dst, self.layout.align);
             } else {
                 // The actual return type is a struct, but the ABI
                 // adaptation code has cast it into some scalar type.  The
@@ -585,44 +585,44 @@
                 //   bitcasting to the struct type yields invalid cast errors.
 
                 // We instead thus allocate some scratch space...
-                let scratch_size = cast.size(ccx);
-                let scratch_align = cast.align(ccx);
-                let llscratch = bcx.alloca(cast.llvm_type(ccx), "abi_cast", scratch_align);
-                bcx.lifetime_start(llscratch, scratch_size);
+                let scratch_size = cast.size(cx);
+                let scratch_align = cast.align(cx);
+                let llscratch = bx.alloca(cast.llvm_type(cx), "abi_cast", scratch_align);
+                bx.lifetime_start(llscratch, scratch_size);
 
                 // ...where we first store the value...
-                bcx.store(val, llscratch, scratch_align);
+                bx.store(val, llscratch, scratch_align);
 
                 // ...and then memcpy it to the intended destination.
-                base::call_memcpy(bcx,
-                                  bcx.pointercast(dst.llval, Type::i8p(ccx)),
-                                  bcx.pointercast(llscratch, Type::i8p(ccx)),
-                                  C_usize(ccx, self.layout.size.bytes()),
+                base::call_memcpy(bx,
+                                  bx.pointercast(dst.llval, Type::i8p(cx)),
+                                  bx.pointercast(llscratch, Type::i8p(cx)),
+                                  C_usize(cx, self.layout.size.bytes()),
                                   self.layout.align.min(scratch_align));
 
-                bcx.lifetime_end(llscratch, scratch_size);
+                bx.lifetime_end(llscratch, scratch_size);
             }
         } else {
-            OperandValue::Immediate(val).store(bcx, dst);
+            OperandValue::Immediate(val).store(bx, dst);
         }
     }
 
-    pub fn store_fn_arg(&self, bcx: &Builder<'a, 'tcx>, idx: &mut usize, dst: PlaceRef<'tcx>) {
+    pub fn store_fn_arg(&self, bx: &Builder<'a, 'tcx>, idx: &mut usize, dst: PlaceRef<'tcx>) {
         if self.pad.is_some() {
             *idx += 1;
         }
         let mut next = || {
-            let val = llvm::get_param(bcx.llfn(), *idx as c_uint);
+            let val = llvm::get_param(bx.llfn(), *idx as c_uint);
             *idx += 1;
             val
         };
         match self.mode {
             PassMode::Ignore => {},
             PassMode::Pair(..) => {
-                OperandValue::Pair(next(), next()).store(bcx, dst);
+                OperandValue::Pair(next(), next()).store(bx, dst);
             }
             PassMode::Direct(_) | PassMode::Indirect(_) | PassMode::Cast(_) => {
-                self.store(bcx, next(), dst);
+                self.store(bx, next(), dst);
             }
         }
     }
@@ -647,26 +647,26 @@
 }
 
 impl<'a, 'tcx> FnType<'tcx> {
-    pub fn of_instance(ccx: &CrateContext<'a, 'tcx>, instance: &ty::Instance<'tcx>)
+    pub fn of_instance(cx: &CodegenCx<'a, 'tcx>, instance: &ty::Instance<'tcx>)
                        -> Self {
-        let fn_ty = instance.ty(ccx.tcx());
-        let sig = ty_fn_sig(ccx, fn_ty);
-        let sig = ccx.tcx().erase_late_bound_regions_and_normalize(&sig);
-        FnType::new(ccx, sig, &[])
+        let fn_ty = instance.ty(cx.tcx);
+        let sig = ty_fn_sig(cx, fn_ty);
+        let sig = cx.tcx.erase_late_bound_regions_and_normalize(&sig);
+        FnType::new(cx, sig, &[])
     }
 
-    pub fn new(ccx: &CrateContext<'a, 'tcx>,
+    pub fn new(cx: &CodegenCx<'a, 'tcx>,
                sig: ty::FnSig<'tcx>,
                extra_args: &[Ty<'tcx>]) -> FnType<'tcx> {
-        let mut fn_ty = FnType::unadjusted(ccx, sig, extra_args);
-        fn_ty.adjust_for_abi(ccx, sig.abi);
+        let mut fn_ty = FnType::unadjusted(cx, sig, extra_args);
+        fn_ty.adjust_for_abi(cx, sig.abi);
         fn_ty
     }
 
-    pub fn new_vtable(ccx: &CrateContext<'a, 'tcx>,
+    pub fn new_vtable(cx: &CodegenCx<'a, 'tcx>,
                       sig: ty::FnSig<'tcx>,
                       extra_args: &[Ty<'tcx>]) -> FnType<'tcx> {
-        let mut fn_ty = FnType::unadjusted(ccx, sig, extra_args);
+        let mut fn_ty = FnType::unadjusted(cx, sig, extra_args);
         // Don't pass the vtable, it's not an argument of the virtual fn.
         {
             let self_arg = &mut fn_ty.args[0];
@@ -681,20 +681,20 @@
                 .unwrap_or_else(|| {
                     bug!("FnType::new_vtable: non-pointer self {:?}", self_arg)
                 }).ty;
-            let fat_ptr_ty = ccx.tcx().mk_mut_ptr(pointee);
-            self_arg.layout = ccx.layout_of(fat_ptr_ty).field(ccx, 0);
+            let fat_ptr_ty = cx.tcx.mk_mut_ptr(pointee);
+            self_arg.layout = cx.layout_of(fat_ptr_ty).field(cx, 0);
         }
-        fn_ty.adjust_for_abi(ccx, sig.abi);
+        fn_ty.adjust_for_abi(cx, sig.abi);
         fn_ty
     }
 
-    pub fn unadjusted(ccx: &CrateContext<'a, 'tcx>,
+    pub fn unadjusted(cx: &CodegenCx<'a, 'tcx>,
                       sig: ty::FnSig<'tcx>,
                       extra_args: &[Ty<'tcx>]) -> FnType<'tcx> {
         debug!("FnType::unadjusted({:?}, {:?})", sig, extra_args);
 
         use self::Abi::*;
-        let cconv = match ccx.sess().target.target.adjust_abi(sig.abi) {
+        let cconv = match cx.sess().target.target.adjust_abi(sig.abi) {
             RustIntrinsic | PlatformIntrinsic |
             Rust | RustCall => llvm::CCallConv,
 
@@ -737,7 +737,7 @@
             extra_args
         };
 
-        let target = &ccx.sess().target.target;
+        let target = &cx.sess().target.target;
         let win_x64_gnu = target.target_os == "windows"
                        && target.arch == "x86_64"
                        && target.target_env == "gnu";
@@ -772,7 +772,7 @@
                 }
             }
 
-            if let Some(pointee) = layout.pointee_info_at(ccx, offset) {
+            if let Some(pointee) = layout.pointee_info_at(cx, offset) {
                 if let Some(kind) = pointee.safe {
                     attrs.pointee_size = pointee.size;
                     attrs.pointee_align = Some(pointee.align);
@@ -809,7 +809,7 @@
         };
 
         let arg_of = |ty: Ty<'tcx>, is_return: bool| {
-            let mut arg = ArgType::new(ccx.layout_of(ty));
+            let mut arg = ArgType::new(cx.layout_of(ty));
             if arg.layout.is_zst() {
                 // For some forsaken reason, x86_64-pc-windows-gnu
                 // doesn't ignore zero-sized struct arguments.
@@ -832,7 +832,7 @@
                     adjust_for_rust_scalar(&mut b_attrs,
                                            b,
                                            arg.layout,
-                                           a.value.size(ccx).abi_align(b.value.align(ccx)),
+                                           a.value.size(cx).abi_align(b.value.align(cx)),
                                            false);
                     arg.mode = PassMode::Pair(a_attrs, b_attrs);
                     return arg;
@@ -863,7 +863,7 @@
     }
 
     fn adjust_for_abi(&mut self,
-                      ccx: &CrateContext<'a, 'tcx>,
+                      cx: &CodegenCx<'a, 'tcx>,
                       abi: Abi) {
         if abi == Abi::Unadjusted { return }
 
@@ -878,7 +878,7 @@
                 }
 
                 let size = arg.layout.size;
-                if size > layout::Pointer.size(ccx) {
+                if size > layout::Pointer.size(cx) {
                     arg.make_indirect();
                 } else {
                     // We want to pass small aggregates as immediates, but using
@@ -900,38 +900,38 @@
             return;
         }
 
-        match &ccx.sess().target.target.arch[..] {
+        match &cx.sess().target.target.arch[..] {
             "x86" => {
                 let flavor = if abi == Abi::Fastcall {
                     cabi_x86::Flavor::Fastcall
                 } else {
                     cabi_x86::Flavor::General
                 };
-                cabi_x86::compute_abi_info(ccx, self, flavor);
+                cabi_x86::compute_abi_info(cx, self, flavor);
             },
             "x86_64" => if abi == Abi::SysV64 {
-                cabi_x86_64::compute_abi_info(ccx, self);
-            } else if abi == Abi::Win64 || ccx.sess().target.target.options.is_like_windows {
+                cabi_x86_64::compute_abi_info(cx, self);
+            } else if abi == Abi::Win64 || cx.sess().target.target.options.is_like_windows {
                 cabi_x86_win64::compute_abi_info(self);
             } else {
-                cabi_x86_64::compute_abi_info(ccx, self);
+                cabi_x86_64::compute_abi_info(cx, self);
             },
-            "aarch64" => cabi_aarch64::compute_abi_info(ccx, self),
-            "arm" => cabi_arm::compute_abi_info(ccx, self),
-            "mips" => cabi_mips::compute_abi_info(ccx, self),
-            "mips64" => cabi_mips64::compute_abi_info(ccx, self),
-            "powerpc" => cabi_powerpc::compute_abi_info(ccx, self),
-            "powerpc64" => cabi_powerpc64::compute_abi_info(ccx, self),
-            "s390x" => cabi_s390x::compute_abi_info(ccx, self),
-            "asmjs" => cabi_asmjs::compute_abi_info(ccx, self),
-            "wasm32" => cabi_asmjs::compute_abi_info(ccx, self),
+            "aarch64" => cabi_aarch64::compute_abi_info(cx, self),
+            "arm" => cabi_arm::compute_abi_info(cx, self),
+            "mips" => cabi_mips::compute_abi_info(cx, self),
+            "mips64" => cabi_mips64::compute_abi_info(cx, self),
+            "powerpc" => cabi_powerpc::compute_abi_info(cx, self),
+            "powerpc64" => cabi_powerpc64::compute_abi_info(cx, self),
+            "s390x" => cabi_s390x::compute_abi_info(cx, self),
+            "asmjs" => cabi_asmjs::compute_abi_info(cx, self),
+            "wasm32" => cabi_asmjs::compute_abi_info(cx, self),
             "msp430" => cabi_msp430::compute_abi_info(self),
-            "sparc" => cabi_sparc::compute_abi_info(ccx, self),
-            "sparc64" => cabi_sparc64::compute_abi_info(ccx, self),
+            "sparc" => cabi_sparc::compute_abi_info(cx, self),
+            "sparc64" => cabi_sparc64::compute_abi_info(cx, self),
             "nvptx" => cabi_nvptx::compute_abi_info(self),
             "nvptx64" => cabi_nvptx64::compute_abi_info(self),
             "hexagon" => cabi_hexagon::compute_abi_info(self),
-            a => ccx.sess().fatal(&format!("unrecognized arch \"{}\" in target specification", a))
+            a => cx.sess().fatal(&format!("unrecognized arch \"{}\" in target specification", a))
         }
 
         if let PassMode::Indirect(ref mut attrs) = self.ret.mode {
@@ -939,37 +939,37 @@
         }
     }
 
-    pub fn llvm_type(&self, ccx: &CrateContext<'a, 'tcx>) -> Type {
+    pub fn llvm_type(&self, cx: &CodegenCx<'a, 'tcx>) -> Type {
         let mut llargument_tys = Vec::new();
 
         let llreturn_ty = match self.ret.mode {
-            PassMode::Ignore => Type::void(ccx),
+            PassMode::Ignore => Type::void(cx),
             PassMode::Direct(_) | PassMode::Pair(..) => {
-                self.ret.layout.immediate_llvm_type(ccx)
+                self.ret.layout.immediate_llvm_type(cx)
             }
-            PassMode::Cast(cast) => cast.llvm_type(ccx),
+            PassMode::Cast(cast) => cast.llvm_type(cx),
             PassMode::Indirect(_) => {
-                llargument_tys.push(self.ret.memory_ty(ccx).ptr_to());
-                Type::void(ccx)
+                llargument_tys.push(self.ret.memory_ty(cx).ptr_to());
+                Type::void(cx)
             }
         };
 
         for arg in &self.args {
             // add padding
             if let Some(ty) = arg.pad {
-                llargument_tys.push(ty.llvm_type(ccx));
+                llargument_tys.push(ty.llvm_type(cx));
             }
 
             let llarg_ty = match arg.mode {
                 PassMode::Ignore => continue,
-                PassMode::Direct(_) => arg.layout.immediate_llvm_type(ccx),
+                PassMode::Direct(_) => arg.layout.immediate_llvm_type(cx),
                 PassMode::Pair(..) => {
-                    llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(ccx, 0));
-                    llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(ccx, 1));
+                    llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 0));
+                    llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 1));
                     continue;
                 }
-                PassMode::Cast(cast) => cast.llvm_type(ccx),
-                PassMode::Indirect(_) => arg.memory_ty(ccx).ptr_to(),
+                PassMode::Cast(cast) => cast.llvm_type(cx),
+                PassMode::Indirect(_) => arg.memory_ty(cx).ptr_to(),
             };
             llargument_tys.push(llarg_ty);
         }
diff --git a/src/librustc_trans/asm.rs b/src/librustc_trans/asm.rs
index ef76fec..c7be0c4 100644
--- a/src/librustc_trans/asm.rs
+++ b/src/librustc_trans/asm.rs
@@ -27,7 +27,7 @@
 
 // Take an inline assembly expression and splat it out via LLVM
 pub fn trans_inline_asm<'a, 'tcx>(
-    bcx: &Builder<'a, 'tcx>,
+    bx: &Builder<'a, 'tcx>,
     ia: &hir::InlineAsm,
     outputs: Vec<PlaceRef<'tcx>>,
     mut inputs: Vec<ValueRef>
@@ -39,13 +39,13 @@
     let mut indirect_outputs = vec![];
     for (i, (out, place)) in ia.outputs.iter().zip(&outputs).enumerate() {
         if out.is_rw {
-            inputs.push(place.load(bcx).immediate());
+            inputs.push(place.load(bx).immediate());
             ext_constraints.push(i.to_string());
         }
         if out.is_indirect {
-            indirect_outputs.push(place.load(bcx).immediate());
+            indirect_outputs.push(place.load(bx).immediate());
         } else {
-            output_types.push(place.layout.llvm_type(bcx.ccx));
+            output_types.push(place.layout.llvm_type(bx.cx));
         }
     }
     if !indirect_outputs.is_empty() {
@@ -58,7 +58,7 @@
 
     // Default per-arch clobbers
     // Basically what clang does
-    let arch_clobbers = match &bcx.sess().target.target.arch[..] {
+    let arch_clobbers = match &bx.sess().target.target.arch[..] {
         "x86" | "x86_64" => vec!["~{dirflag}", "~{fpsr}", "~{flags}"],
         _                => Vec::new()
     };
@@ -76,9 +76,9 @@
     // Depending on how many outputs we have, the return type is different
     let num_outputs = output_types.len();
     let output_type = match num_outputs {
-        0 => Type::void(bcx.ccx),
+        0 => Type::void(bx.cx),
         1 => output_types[0],
-        _ => Type::struct_(bcx.ccx, &output_types, false)
+        _ => Type::struct_(bx.cx, &output_types, false)
     };
 
     let dialect = match ia.dialect {
@@ -88,7 +88,7 @@
 
     let asm = CString::new(ia.asm.as_str().as_bytes()).unwrap();
     let constraint_cstr = CString::new(all_constraints).unwrap();
-    let r = bcx.inline_asm_call(
+    let r = bx.inline_asm_call(
         asm.as_ptr(),
         constraint_cstr.as_ptr(),
         &inputs,
@@ -101,28 +101,28 @@
     // Again, based on how many outputs we have
     let outputs = ia.outputs.iter().zip(&outputs).filter(|&(ref o, _)| !o.is_indirect);
     for (i, (_, &place)) in outputs.enumerate() {
-        let v = if num_outputs == 1 { r } else { bcx.extract_value(r, i as u64) };
-        OperandValue::Immediate(v).store(bcx, place);
+        let v = if num_outputs == 1 { r } else { bx.extract_value(r, i as u64) };
+        OperandValue::Immediate(v).store(bx, place);
     }
 
     // Store mark in a metadata node so we can map LLVM errors
     // back to source locations.  See #17552.
     unsafe {
         let key = "srcloc";
-        let kind = llvm::LLVMGetMDKindIDInContext(bcx.ccx.llcx(),
+        let kind = llvm::LLVMGetMDKindIDInContext(bx.cx.llcx,
             key.as_ptr() as *const c_char, key.len() as c_uint);
 
-        let val: llvm::ValueRef = C_i32(bcx.ccx, ia.ctxt.outer().as_u32() as i32);
+        let val: llvm::ValueRef = C_i32(bx.cx, ia.ctxt.outer().as_u32() as i32);
 
         llvm::LLVMSetMetadata(r, kind,
-            llvm::LLVMMDNodeInContext(bcx.ccx.llcx(), &val, 1));
+            llvm::LLVMMDNodeInContext(bx.cx.llcx, &val, 1));
     }
 }
 
-pub fn trans_global_asm<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+pub fn trans_global_asm<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
                                   ga: &hir::GlobalAsm) {
     let asm = CString::new(ga.asm.as_str().as_bytes()).unwrap();
     unsafe {
-        llvm::LLVMRustAppendModuleInlineAsm(ccx.llmod(), asm.as_ptr());
+        llvm::LLVMRustAppendModuleInlineAsm(cx.llmod, asm.as_ptr());
     }
 }
diff --git a/src/librustc_trans/attributes.rs b/src/librustc_trans/attributes.rs
index f3105e0..6c80883 100644
--- a/src/librustc_trans/attributes.rs
+++ b/src/librustc_trans/attributes.rs
@@ -24,7 +24,7 @@
 use llvm_util;
 pub use syntax::attr::{self, InlineAttr};
 use syntax::ast;
-use context::CrateContext;
+use context::CodegenCx;
 
 /// Mark LLVM function to use provided inline heuristic.
 #[inline]
@@ -67,27 +67,27 @@
     Attribute::Naked.toggle_llfn(Function, val, is_naked);
 }
 
-pub fn set_frame_pointer_elimination(ccx: &CrateContext, llfn: ValueRef) {
+pub fn set_frame_pointer_elimination(cx: &CodegenCx, llfn: ValueRef) {
     // FIXME: #11906: Omitting frame pointers breaks retrieving the value of a
     // parameter.
-    if ccx.sess().must_not_eliminate_frame_pointers() {
+    if cx.sess().must_not_eliminate_frame_pointers() {
         llvm::AddFunctionAttrStringValue(
             llfn, llvm::AttributePlace::Function,
             cstr("no-frame-pointer-elim\0"), cstr("true\0"));
     }
 }
 
-pub fn set_probestack(ccx: &CrateContext, llfn: ValueRef) {
+pub fn set_probestack(cx: &CodegenCx, llfn: ValueRef) {
     // Only use stack probes if the target specification indicates that we
     // should be using stack probes
-    if !ccx.sess().target.target.options.stack_probes {
+    if !cx.sess().target.target.options.stack_probes {
         return
     }
 
     // Currently stack probes seem somewhat incompatible with the address
     // sanitizer. With asan we're already protected from stack overflow anyway
     // so we don't really need stack probes regardless.
-    match ccx.sess().opts.debugging_opts.sanitizer {
+    match cx.sess().opts.debugging_opts.sanitizer {
         Some(Sanitizer::Address) => return,
         _ => {}
     }
@@ -101,13 +101,13 @@
 
 /// Composite function which sets LLVM attributes for function depending on its AST (#[attribute])
 /// attributes.
-pub fn from_fn_attrs(ccx: &CrateContext, llfn: ValueRef, id: DefId) {
+pub fn from_fn_attrs(cx: &CodegenCx, llfn: ValueRef, id: DefId) {
     use syntax::attr::*;
-    let attrs = ccx.tcx().get_attrs(id);
-    inline(llfn, find_inline_attr(Some(ccx.sess().diagnostic()), &attrs));
+    let attrs = cx.tcx.get_attrs(id);
+    inline(llfn, find_inline_attr(Some(cx.sess().diagnostic()), &attrs));
 
-    set_frame_pointer_elimination(ccx, llfn);
-    set_probestack(ccx, llfn);
+    set_frame_pointer_elimination(cx, llfn);
+    set_probestack(cx, llfn);
 
     for attr in attrs.iter() {
         if attr.check_name("cold") {
@@ -124,7 +124,7 @@
         }
     }
 
-    let target_features = ccx.tcx().target_features_enabled(id);
+    let target_features = cx.tcx.target_features_enabled(id);
     if !target_features.is_empty() {
         let val = CString::new(target_features.join(",")).unwrap();
         llvm::AddFunctionAttrStringValue(
diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs
index 7bb81e9..633ed9b 100644
--- a/src/librustc_trans/base.rs
+++ b/src/librustc_trans/base.rs
@@ -57,9 +57,9 @@
 use callee;
 use common::{C_bool, C_bytes_in_context, C_i32, C_usize};
 use rustc_mir::monomorphize::collector::{self, MonoItemCollectionMode};
-use common::{self, C_struct_in_context, C_array, CrateContext, val_ty};
+use common::{self, C_struct_in_context, C_array, val_ty};
 use consts;
-use context::{self, LocalCrateContext, SharedCrateContext};
+use context::{self, CodegenCx};
 use debuginfo;
 use declare;
 use meth;
@@ -94,16 +94,16 @@
 pub use rustc_mir::monomorphize::item::linkage_by_name;
 
 pub struct StatRecorder<'a, 'tcx: 'a> {
-    ccx: &'a CrateContext<'a, 'tcx>,
+    cx: &'a CodegenCx<'a, 'tcx>,
     name: Option<String>,
     istart: usize,
 }
 
 impl<'a, 'tcx> StatRecorder<'a, 'tcx> {
-    pub fn new(ccx: &'a CrateContext<'a, 'tcx>, name: String) -> StatRecorder<'a, 'tcx> {
-        let istart = ccx.stats().borrow().n_llvm_insns;
+    pub fn new(cx: &'a CodegenCx<'a, 'tcx>, name: String) -> StatRecorder<'a, 'tcx> {
+        let istart = cx.stats.borrow().n_llvm_insns;
         StatRecorder {
-            ccx,
+            cx,
             name: Some(name),
             istart,
         }
@@ -112,8 +112,8 @@
 
 impl<'a, 'tcx> Drop for StatRecorder<'a, 'tcx> {
     fn drop(&mut self) {
-        if self.ccx.sess().trans_stats() {
-            let mut stats = self.ccx.stats().borrow_mut();
+        if self.cx.sess().trans_stats() {
+            let mut stats = self.cx.stats.borrow_mut();
             let iend = stats.n_llvm_insns;
             stats.fn_stats.push((self.name.take().unwrap(), iend - self.istart));
             stats.n_fns += 1;
@@ -158,7 +158,7 @@
 }
 
 pub fn compare_simd_types<'a, 'tcx>(
-    bcx: &Builder<'a, 'tcx>,
+    bx: &Builder<'a, 'tcx>,
     lhs: ValueRef,
     rhs: ValueRef,
     t: Ty<'tcx>,
@@ -168,7 +168,7 @@
     let signed = match t.sty {
         ty::TyFloat(_) => {
             let cmp = bin_op_to_fcmp_predicate(op);
-            return bcx.sext(bcx.fcmp(cmp, lhs, rhs), ret_ty);
+            return bx.sext(bx.fcmp(cmp, lhs, rhs), ret_ty);
         },
         ty::TyUint(_) => false,
         ty::TyInt(_) => true,
@@ -180,7 +180,7 @@
     // to get the correctly sized type. This will compile to a single instruction
     // once the IR is converted to assembly if the SIMD instruction is supported
     // by the target architecture.
-    bcx.sext(bcx.icmp(cmp, lhs, rhs), ret_ty)
+    bx.sext(bx.icmp(cmp, lhs, rhs), ret_ty)
 }
 
 /// Retrieve the information we are losing (making dynamic) in an unsizing
@@ -189,15 +189,15 @@
 /// The `old_info` argument is a bit funny. It is intended for use
 /// in an upcast, where the new vtable for an object will be derived
 /// from the old one.
-pub fn unsized_info<'ccx, 'tcx>(ccx: &CrateContext<'ccx, 'tcx>,
+pub fn unsized_info<'cx, 'tcx>(cx: &CodegenCx<'cx, 'tcx>,
                                 source: Ty<'tcx>,
                                 target: Ty<'tcx>,
                                 old_info: Option<ValueRef>)
                                 -> ValueRef {
-    let (source, target) = ccx.tcx().struct_lockstep_tails(source, target);
+    let (source, target) = cx.tcx.struct_lockstep_tails(source, target);
     match (&source.sty, &target.sty) {
         (&ty::TyArray(_, len), &ty::TySlice(_)) => {
-            C_usize(ccx, len.val.to_const_int().unwrap().to_u64().unwrap())
+            C_usize(cx, len.val.to_const_int().unwrap().to_u64().unwrap())
         }
         (&ty::TyDynamic(..), &ty::TyDynamic(..)) => {
             // For now, upcasts are limited to changes in marker
@@ -206,10 +206,10 @@
             old_info.expect("unsized_info: missing old info for trait upcast")
         }
         (_, &ty::TyDynamic(ref data, ..)) => {
-            let vtable_ptr = ccx.layout_of(ccx.tcx().mk_mut_ptr(target))
-                .field(ccx, abi::FAT_PTR_EXTRA);
-            consts::ptrcast(meth::get_vtable(ccx, source, data.principal()),
-                            vtable_ptr.llvm_type(ccx))
+            let vtable_ptr = cx.layout_of(cx.tcx.mk_mut_ptr(target))
+                .field(cx, abi::FAT_PTR_EXTRA);
+            consts::ptrcast(meth::get_vtable(cx, source, data.principal()),
+                            vtable_ptr.llvm_type(cx))
         }
         _ => bug!("unsized_info: invalid unsizing {:?} -> {:?}",
                                      source,
@@ -219,7 +219,7 @@
 
 /// Coerce `src` to `dst_ty`. `src_ty` must be a thin pointer.
 pub fn unsize_thin_ptr<'a, 'tcx>(
-    bcx: &Builder<'a, 'tcx>,
+    bx: &Builder<'a, 'tcx>,
     src: ValueRef,
     src_ty: Ty<'tcx>,
     dst_ty: Ty<'tcx>
@@ -232,24 +232,24 @@
          &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) |
         (&ty::TyRawPtr(ty::TypeAndMut { ty: a, .. }),
          &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) => {
-            assert!(bcx.ccx.shared().type_is_sized(a));
-            let ptr_ty = bcx.ccx.layout_of(b).llvm_type(bcx.ccx).ptr_to();
-            (bcx.pointercast(src, ptr_ty), unsized_info(bcx.ccx, a, b, None))
+            assert!(bx.cx.type_is_sized(a));
+            let ptr_ty = bx.cx.layout_of(b).llvm_type(bx.cx).ptr_to();
+            (bx.pointercast(src, ptr_ty), unsized_info(bx.cx, a, b, None))
         }
         (&ty::TyAdt(def_a, _), &ty::TyAdt(def_b, _)) if def_a.is_box() && def_b.is_box() => {
             let (a, b) = (src_ty.boxed_ty(), dst_ty.boxed_ty());
-            assert!(bcx.ccx.shared().type_is_sized(a));
-            let ptr_ty = bcx.ccx.layout_of(b).llvm_type(bcx.ccx).ptr_to();
-            (bcx.pointercast(src, ptr_ty), unsized_info(bcx.ccx, a, b, None))
+            assert!(bx.cx.type_is_sized(a));
+            let ptr_ty = bx.cx.layout_of(b).llvm_type(bx.cx).ptr_to();
+            (bx.pointercast(src, ptr_ty), unsized_info(bx.cx, a, b, None))
         }
         (&ty::TyAdt(def_a, _), &ty::TyAdt(def_b, _)) => {
             assert_eq!(def_a, def_b);
 
-            let src_layout = bcx.ccx.layout_of(src_ty);
-            let dst_layout = bcx.ccx.layout_of(dst_ty);
+            let src_layout = bx.cx.layout_of(src_ty);
+            let dst_layout = bx.cx.layout_of(dst_ty);
             let mut result = None;
             for i in 0..src_layout.fields.count() {
-                let src_f = src_layout.field(bcx.ccx, i);
+                let src_f = src_layout.field(bx.cx, i);
                 assert_eq!(src_layout.fields.offset(i).bytes(), 0);
                 assert_eq!(dst_layout.fields.offset(i).bytes(), 0);
                 if src_f.is_zst() {
@@ -257,15 +257,15 @@
                 }
                 assert_eq!(src_layout.size, src_f.size);
 
-                let dst_f = dst_layout.field(bcx.ccx, i);
+                let dst_f = dst_layout.field(bx.cx, i);
                 assert_ne!(src_f.ty, dst_f.ty);
                 assert_eq!(result, None);
-                result = Some(unsize_thin_ptr(bcx, src, src_f.ty, dst_f.ty));
+                result = Some(unsize_thin_ptr(bx, src, src_f.ty, dst_f.ty));
             }
             let (lldata, llextra) = result.unwrap();
             // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
-            (bcx.bitcast(lldata, dst_layout.scalar_pair_element_llvm_type(bcx.ccx, 0)),
-             bcx.bitcast(llextra, dst_layout.scalar_pair_element_llvm_type(bcx.ccx, 1)))
+            (bx.bitcast(lldata, dst_layout.scalar_pair_element_llvm_type(bx.cx, 0)),
+             bx.bitcast(llextra, dst_layout.scalar_pair_element_llvm_type(bx.cx, 1)))
         }
         _ => bug!("unsize_thin_ptr: called on bad types"),
     }
@@ -273,27 +273,27 @@
 
 /// Coerce `src`, which is a reference to a value of type `src_ty`,
 /// to a value of type `dst_ty` and store the result in `dst`
-pub fn coerce_unsized_into<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
+pub fn coerce_unsized_into<'a, 'tcx>(bx: &Builder<'a, 'tcx>,
                                      src: PlaceRef<'tcx>,
                                      dst: PlaceRef<'tcx>) {
     let src_ty = src.layout.ty;
     let dst_ty = dst.layout.ty;
     let coerce_ptr = || {
-        let (base, info) = match src.load(bcx).val {
+        let (base, info) = match src.load(bx).val {
             OperandValue::Pair(base, info) => {
                 // fat-ptr to fat-ptr unsize preserves the vtable
                 // i.e. &'a fmt::Debug+Send => &'a fmt::Debug
                 // So we need to pointercast the base to ensure
                 // the types match up.
-                let thin_ptr = dst.layout.field(bcx.ccx, abi::FAT_PTR_ADDR);
-                (bcx.pointercast(base, thin_ptr.llvm_type(bcx.ccx)), info)
+                let thin_ptr = dst.layout.field(bx.cx, abi::FAT_PTR_ADDR);
+                (bx.pointercast(base, thin_ptr.llvm_type(bx.cx)), info)
             }
             OperandValue::Immediate(base) => {
-                unsize_thin_ptr(bcx, base, src_ty, dst_ty)
+                unsize_thin_ptr(bx, base, src_ty, dst_ty)
             }
             OperandValue::Ref(..) => bug!()
         };
-        OperandValue::Pair(base, info).store(bcx, dst);
+        OperandValue::Pair(base, info).store(bx, dst);
     };
     match (&src_ty.sty, &dst_ty.sty) {
         (&ty::TyRef(..), &ty::TyRef(..)) |
@@ -309,18 +309,18 @@
             assert_eq!(def_a, def_b);
 
             for i in 0..def_a.variants[0].fields.len() {
-                let src_f = src.project_field(bcx, i);
-                let dst_f = dst.project_field(bcx, i);
+                let src_f = src.project_field(bx, i);
+                let dst_f = dst.project_field(bx, i);
 
                 if dst_f.layout.is_zst() {
                     continue;
                 }
 
                 if src_f.layout.ty == dst_f.layout.ty {
-                    memcpy_ty(bcx, dst_f.llval, src_f.llval, src_f.layout,
+                    memcpy_ty(bx, dst_f.llval, src_f.llval, src_f.layout,
                         src_f.align.min(dst_f.align));
                 } else {
-                    coerce_unsized_into(bcx, src_f, dst_f);
+                    coerce_unsized_into(bx, src_f, dst_f);
                 }
             }
         }
@@ -388,47 +388,47 @@
     sess.target.target.options.is_like_msvc
 }
 
-pub fn call_assume<'a, 'tcx>(b: &Builder<'a, 'tcx>, val: ValueRef) {
-    let assume_intrinsic = b.ccx.get_intrinsic("llvm.assume");
-    b.call(assume_intrinsic, &[val], None);
+pub fn call_assume<'a, 'tcx>(bx: &Builder<'a, 'tcx>, val: ValueRef) {
+    let assume_intrinsic = bx.cx.get_intrinsic("llvm.assume");
+    bx.call(assume_intrinsic, &[val], None);
 }
 
-pub fn from_immediate(bcx: &Builder, val: ValueRef) -> ValueRef {
-    if val_ty(val) == Type::i1(bcx.ccx) {
-        bcx.zext(val, Type::i8(bcx.ccx))
+pub fn from_immediate(bx: &Builder, val: ValueRef) -> ValueRef {
+    if val_ty(val) == Type::i1(bx.cx) {
+        bx.zext(val, Type::i8(bx.cx))
     } else {
         val
     }
 }
 
-pub fn to_immediate(bcx: &Builder, val: ValueRef, layout: layout::TyLayout) -> ValueRef {
+pub fn to_immediate(bx: &Builder, val: ValueRef, layout: layout::TyLayout) -> ValueRef {
     if let layout::Abi::Scalar(ref scalar) = layout.abi {
         if scalar.is_bool() {
-            return bcx.trunc(val, Type::i1(bcx.ccx));
+            return bx.trunc(val, Type::i1(bx.cx));
         }
     }
     val
 }
 
-pub fn call_memcpy(b: &Builder,
+pub fn call_memcpy(bx: &Builder,
                    dst: ValueRef,
                    src: ValueRef,
                    n_bytes: ValueRef,
                    align: Align) {
-    let ccx = b.ccx;
-    let ptr_width = &ccx.sess().target.target.target_pointer_width;
+    let cx = bx.cx;
+    let ptr_width = &cx.sess().target.target.target_pointer_width;
     let key = format!("llvm.memcpy.p0i8.p0i8.i{}", ptr_width);
-    let memcpy = ccx.get_intrinsic(&key);
-    let src_ptr = b.pointercast(src, Type::i8p(ccx));
-    let dst_ptr = b.pointercast(dst, Type::i8p(ccx));
-    let size = b.intcast(n_bytes, ccx.isize_ty(), false);
-    let align = C_i32(ccx, align.abi() as i32);
-    let volatile = C_bool(ccx, false);
-    b.call(memcpy, &[dst_ptr, src_ptr, size, align, volatile], None);
+    let memcpy = cx.get_intrinsic(&key);
+    let src_ptr = bx.pointercast(src, Type::i8p(cx));
+    let dst_ptr = bx.pointercast(dst, Type::i8p(cx));
+    let size = bx.intcast(n_bytes, cx.isize_ty, false);
+    let align = C_i32(cx, align.abi() as i32);
+    let volatile = C_bool(cx, false);
+    bx.call(memcpy, &[dst_ptr, src_ptr, size, align, volatile], None);
 }
 
 pub fn memcpy_ty<'a, 'tcx>(
-    bcx: &Builder<'a, 'tcx>,
+    bx: &Builder<'a, 'tcx>,
     dst: ValueRef,
     src: ValueRef,
     layout: TyLayout<'tcx>,
@@ -439,28 +439,28 @@
         return;
     }
 
-    call_memcpy(bcx, dst, src, C_usize(bcx.ccx, size), align);
+    call_memcpy(bx, dst, src, C_usize(bx.cx, size), align);
 }
 
-pub fn call_memset<'a, 'tcx>(b: &Builder<'a, 'tcx>,
+pub fn call_memset<'a, 'tcx>(bx: &Builder<'a, 'tcx>,
                              ptr: ValueRef,
                              fill_byte: ValueRef,
                              size: ValueRef,
                              align: ValueRef,
                              volatile: bool) -> ValueRef {
-    let ptr_width = &b.ccx.sess().target.target.target_pointer_width;
+    let ptr_width = &bx.cx.sess().target.target.target_pointer_width;
     let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width);
-    let llintrinsicfn = b.ccx.get_intrinsic(&intrinsic_key);
-    let volatile = C_bool(b.ccx, volatile);
-    b.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None)
+    let llintrinsicfn = bx.cx.get_intrinsic(&intrinsic_key);
+    let volatile = C_bool(bx.cx, volatile);
+    bx.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None)
 }
 
-pub fn trans_instance<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, instance: Instance<'tcx>) {
-    let _s = if ccx.sess().trans_stats() {
+pub fn trans_instance<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, instance: Instance<'tcx>) {
+    let _s = if cx.sess().trans_stats() {
         let mut instance_name = String::new();
-        DefPathBasedNames::new(ccx.tcx(), true, true)
+        DefPathBasedNames::new(cx.tcx, true, true)
             .push_def_path(instance.def_id(), &mut instance_name);
-        Some(StatRecorder::new(ccx, instance_name))
+        Some(StatRecorder::new(cx, instance_name))
     } else {
         None
     };
@@ -470,16 +470,16 @@
     // release builds.
     info!("trans_instance({})", instance);
 
-    let fn_ty = instance.ty(ccx.tcx());
-    let sig = common::ty_fn_sig(ccx, fn_ty);
-    let sig = ccx.tcx().erase_late_bound_regions_and_normalize(&sig);
+    let fn_ty = instance.ty(cx.tcx);
+    let sig = common::ty_fn_sig(cx, fn_ty);
+    let sig = cx.tcx.erase_late_bound_regions_and_normalize(&sig);
 
-    let lldecl = match ccx.instances().borrow().get(&instance) {
+    let lldecl = match cx.instances.borrow().get(&instance) {
         Some(&val) => val,
         None => bug!("Instance `{:?}` not already declared", instance)
     };
 
-    ccx.stats().borrow_mut().n_closures += 1;
+    cx.stats.borrow_mut().n_closures += 1;
 
     // The `uwtable` attribute according to LLVM is:
     //
@@ -497,21 +497,21 @@
     //
     // You can also find more info on why Windows is whitelisted here in:
     //      https://bugzilla.mozilla.org/show_bug.cgi?id=1302078
-    if !ccx.sess().no_landing_pads() ||
-       ccx.sess().target.target.options.is_like_windows {
+    if !cx.sess().no_landing_pads() ||
+       cx.sess().target.target.options.is_like_windows {
         attributes::emit_uwtable(lldecl, true);
     }
 
-    let mir = ccx.tcx().instance_mir(instance.def);
-    mir::trans_mir(ccx, lldecl, &mir, instance, sig);
+    let mir = cx.tcx.instance_mir(instance.def);
+    mir::trans_mir(cx, lldecl, &mir, instance, sig);
 }
 
-pub fn set_link_section(ccx: &CrateContext,
+pub fn set_link_section(cx: &CodegenCx,
                         llval: ValueRef,
                         attrs: &[ast::Attribute]) {
     if let Some(sect) = attr::first_attr_value_str_by_name(attrs, "link_section") {
         if contains_null(&sect.as_str()) {
-            ccx.sess().fatal(&format!("Illegal null byte in link_section value: `{}`", &sect));
+            cx.sess().fatal(&format!("Illegal null byte in link_section value: `{}`", &sect));
         }
         unsafe {
             let buf = CString::new(sect.as_str().as_bytes()).unwrap();
@@ -522,39 +522,39 @@
 
 /// Create the `main` function which will initialize the rust runtime and call
 /// users main function.
-fn maybe_create_entry_wrapper(ccx: &CrateContext) {
-    let (main_def_id, span) = match *ccx.sess().entry_fn.borrow() {
+fn maybe_create_entry_wrapper(cx: &CodegenCx) {
+    let (main_def_id, span) = match *cx.sess().entry_fn.borrow() {
         Some((id, span)) => {
-            (ccx.tcx().hir.local_def_id(id), span)
+            (cx.tcx.hir.local_def_id(id), span)
         }
         None => return,
     };
 
-    let instance = Instance::mono(ccx.tcx(), main_def_id);
+    let instance = Instance::mono(cx.tcx, main_def_id);
 
-    if !ccx.codegen_unit().contains_item(&MonoItem::Fn(instance)) {
+    if !cx.codegen_unit.contains_item(&MonoItem::Fn(instance)) {
         // We want to create the wrapper in the same codegen unit as Rust's main
         // function.
         return;
     }
 
-    let main_llfn = callee::get_fn(ccx, instance);
+    let main_llfn = callee::get_fn(cx, instance);
 
-    let et = ccx.sess().entry_type.get().unwrap();
+    let et = cx.sess().entry_type.get().unwrap();
     match et {
-        config::EntryMain => create_entry_fn(ccx, span, main_llfn, main_def_id, true),
-        config::EntryStart => create_entry_fn(ccx, span, main_llfn, main_def_id, false),
+        config::EntryMain => create_entry_fn(cx, span, main_llfn, main_def_id, true),
+        config::EntryStart => create_entry_fn(cx, span, main_llfn, main_def_id, false),
         config::EntryNone => {}    // Do nothing.
     }
 
-    fn create_entry_fn<'ccx>(ccx: &'ccx CrateContext,
+    fn create_entry_fn<'cx>(cx: &'cx CodegenCx,
                        sp: Span,
                        rust_main: ValueRef,
                        rust_main_def_id: DefId,
                        use_start_lang_item: bool) {
-        let llfty = Type::func(&[Type::c_int(ccx), Type::i8p(ccx).ptr_to()], &Type::c_int(ccx));
+        let llfty = Type::func(&[Type::c_int(cx), Type::i8p(cx).ptr_to()], &Type::c_int(cx));
 
-        let main_ret_ty = ccx.tcx().fn_sig(rust_main_def_id).output();
+        let main_ret_ty = cx.tcx.fn_sig(rust_main_def_id).output();
         // Given that `main()` has no arguments,
         // then its return type cannot have
         // late-bound regions, since late-bound
@@ -562,42 +562,42 @@
         // listing.
         let main_ret_ty = main_ret_ty.no_late_bound_regions().unwrap();
 
-        if declare::get_defined_value(ccx, "main").is_some() {
+        if declare::get_defined_value(cx, "main").is_some() {
             // FIXME: We should be smart and show a better diagnostic here.
-            ccx.sess().struct_span_err(sp, "entry symbol `main` defined multiple times")
+            cx.sess().struct_span_err(sp, "entry symbol `main` defined multiple times")
                       .help("did you use #[no_mangle] on `fn main`? Use #[start] instead")
                       .emit();
-            ccx.sess().abort_if_errors();
+            cx.sess().abort_if_errors();
             bug!();
         }
-        let llfn = declare::declare_cfn(ccx, "main", llfty);
+        let llfn = declare::declare_cfn(cx, "main", llfty);
 
         // `main` should respect same config for frame pointer elimination as rest of code
-        attributes::set_frame_pointer_elimination(ccx, llfn);
+        attributes::set_frame_pointer_elimination(cx, llfn);
 
-        let bld = Builder::new_block(ccx, llfn, "top");
+        let bx = Builder::new_block(cx, llfn, "top");
 
-        debuginfo::gdb::insert_reference_to_gdb_debug_scripts_section_global(ccx, &bld);
+        debuginfo::gdb::insert_reference_to_gdb_debug_scripts_section_global(&bx);
 
         // Params from native main() used as args for rust start function
         let param_argc = get_param(llfn, 0);
         let param_argv = get_param(llfn, 1);
-        let arg_argc = bld.intcast(param_argc, ccx.isize_ty(), true);
+        let arg_argc = bx.intcast(param_argc, cx.isize_ty, true);
         let arg_argv = param_argv;
 
         let (start_fn, args) = if use_start_lang_item {
-            let start_def_id = ccx.tcx().require_lang_item(StartFnLangItem);
-            let start_fn = callee::resolve_and_get_fn(ccx, start_def_id, ccx.tcx().mk_substs(
+            let start_def_id = cx.tcx.require_lang_item(StartFnLangItem);
+            let start_fn = callee::resolve_and_get_fn(cx, start_def_id, cx.tcx.mk_substs(
                 iter::once(Kind::from(main_ret_ty))));
-            (start_fn, vec![bld.pointercast(rust_main, Type::i8p(ccx).ptr_to()),
+            (start_fn, vec![bx.pointercast(rust_main, Type::i8p(cx).ptr_to()),
                             arg_argc, arg_argv])
         } else {
             debug!("using user-defined start fn");
             (rust_main, vec![arg_argc, arg_argv])
         };
 
-        let result = bld.call(start_fn, &args, None);
-        bld.ret(bld.intcast(result, Type::c_int(ccx), true));
+        let result = bx.call(start_fn, &args, None);
+        bx.ret(bx.intcast(result, Type::c_int(cx), true));
     }
 }
 
@@ -721,7 +721,6 @@
     let link_meta = link::build_link_meta(crate_hash);
     let exported_symbol_node_ids = find_exported_symbols(tcx);
 
-    let shared_ccx = SharedCrateContext::new(tcx);
     // Translate the metadata.
     let llmod_id = "metadata";
     let (metadata_llcx, metadata_llmod, metadata) =
@@ -770,7 +769,7 @@
     // Run the translation item collector and partition the collected items into
     // codegen units.
     let codegen_units =
-        shared_ccx.tcx().collect_and_partition_translation_items(LOCAL_CRATE).1;
+        tcx.collect_and_partition_translation_items(LOCAL_CRATE).1;
     let codegen_units = (*codegen_units).clone();
 
     // Force all codegen_unit queries so they are already either red or green
@@ -910,7 +909,7 @@
 
     symbol_names_test::report_symbol_names(tcx);
 
-    if shared_ccx.sess().trans_stats() {
+    if tcx.sess.trans_stats() {
         println!("--- trans stats ---");
         println!("n_glues_created: {}", all_stats.n_glues_created);
         println!("n_null_glues: {}", all_stats.n_null_glues);
@@ -926,7 +925,7 @@
         }
     }
 
-    if shared_ccx.sess().count_llvm_insns() {
+    if tcx.sess.count_llvm_insns() {
         for (k, v) in all_stats.llvm_insns.iter() {
             println!("{:7} {}", *v, *k);
         }
@@ -1204,27 +1203,25 @@
                                    .to_fingerprint().to_hex());
 
         // Instantiate translation items without filling out definitions yet...
-        let scx = SharedCrateContext::new(tcx);
-        let lcx = LocalCrateContext::new(&scx, cgu, &llmod_id);
+        let cx = CodegenCx::new(tcx, cgu, &llmod_id);
         let module = {
-            let ccx = CrateContext::new(&scx, &lcx);
-            let trans_items = ccx.codegen_unit()
-                                 .items_in_deterministic_order(ccx.tcx());
+            let trans_items = cx.codegen_unit
+                                 .items_in_deterministic_order(cx.tcx);
             for &(trans_item, (linkage, visibility)) in &trans_items {
-                trans_item.predefine(&ccx, linkage, visibility);
+                trans_item.predefine(&cx, linkage, visibility);
             }
 
             // ... and now that we have everything pre-defined, fill out those definitions.
             for &(trans_item, _) in &trans_items {
-                trans_item.define(&ccx);
+                trans_item.define(&cx);
             }
 
             // If this codegen unit contains the main function, also create the
             // wrapper here
-            maybe_create_entry_wrapper(&ccx);
+            maybe_create_entry_wrapper(&cx);
 
             // Run replace-all-uses-with for statics that need it
-            for &(old_g, new_g) in ccx.statics_to_rauw().borrow().iter() {
+            for &(old_g, new_g) in cx.statics_to_rauw.borrow().iter() {
                 unsafe {
                     let bitcast = llvm::LLVMConstPointerCast(new_g, llvm::LLVMTypeOf(old_g));
                     llvm::LLVMReplaceAllUsesWith(old_g, bitcast);
@@ -1234,13 +1231,13 @@
 
             // Create the llvm.used variable
             // This variable has type [N x i8*] and is stored in the llvm.metadata section
-            if !ccx.used_statics().borrow().is_empty() {
+            if !cx.used_statics.borrow().is_empty() {
                 let name = CString::new("llvm.used").unwrap();
                 let section = CString::new("llvm.metadata").unwrap();
-                let array = C_array(Type::i8(&ccx).ptr_to(), &*ccx.used_statics().borrow());
+                let array = C_array(Type::i8(&cx).ptr_to(), &*cx.used_statics.borrow());
 
                 unsafe {
-                    let g = llvm::LLVMAddGlobal(ccx.llmod(),
+                    let g = llvm::LLVMAddGlobal(cx.llmod,
                                                 val_ty(array).to_ref(),
                                                 name.as_ptr());
                     llvm::LLVMSetInitializer(g, array);
@@ -1250,14 +1247,14 @@
             }
 
             // Finalize debuginfo
-            if ccx.sess().opts.debuginfo != NoDebugInfo {
-                debuginfo::finalize(&ccx);
+            if cx.sess().opts.debuginfo != NoDebugInfo {
+                debuginfo::finalize(&cx);
             }
 
             let llvm_module = ModuleLlvm {
-                llcx: ccx.llcx(),
-                llmod: ccx.llmod(),
-                tm: create_target_machine(ccx.sess()),
+                llcx: cx.llcx,
+                llmod: cx.llmod,
+                tm: create_target_machine(cx.sess()),
             };
 
             ModuleTranslation {
@@ -1268,7 +1265,7 @@
             }
         };
 
-        (lcx.into_stats(), module)
+        (cx.into_stats(), module)
     }
 }
 
diff --git a/src/librustc_trans/builder.rs b/src/librustc_trans/builder.rs
index 4a0b138..5ab8d03 100644
--- a/src/librustc_trans/builder.rs
+++ b/src/librustc_trans/builder.rs
@@ -32,7 +32,7 @@
 #[must_use]
 pub struct Builder<'a, 'tcx: 'a> {
     pub llbuilder: BuilderRef,
-    pub ccx: &'a CrateContext<'a, 'tcx>,
+    pub cx: &'a CodegenCx<'a, 'tcx>,
 }
 
 impl<'a, 'tcx> Drop for Builder<'a, 'tcx> {
@@ -51,41 +51,41 @@
 }
 
 impl<'a, 'tcx> Builder<'a, 'tcx> {
-    pub fn new_block<'b>(ccx: &'a CrateContext<'a, 'tcx>, llfn: ValueRef, name: &'b str) -> Self {
-        let builder = Builder::with_ccx(ccx);
+    pub fn new_block<'b>(cx: &'a CodegenCx<'a, 'tcx>, llfn: ValueRef, name: &'b str) -> Self {
+        let bx = Builder::with_cx(cx);
         let llbb = unsafe {
             let name = CString::new(name).unwrap();
             llvm::LLVMAppendBasicBlockInContext(
-                ccx.llcx(),
+                cx.llcx,
                 llfn,
                 name.as_ptr()
             )
         };
-        builder.position_at_end(llbb);
-        builder
+        bx.position_at_end(llbb);
+        bx
     }
 
-    pub fn with_ccx(ccx: &'a CrateContext<'a, 'tcx>) -> Self {
+    pub fn with_cx(cx: &'a CodegenCx<'a, 'tcx>) -> Self {
         // Create a fresh builder from the crate context.
         let llbuilder = unsafe {
-            llvm::LLVMCreateBuilderInContext(ccx.llcx())
+            llvm::LLVMCreateBuilderInContext(cx.llcx)
         };
         Builder {
             llbuilder,
-            ccx,
+            cx,
         }
     }
 
     pub fn build_sibling_block<'b>(&self, name: &'b str) -> Builder<'a, 'tcx> {
-        Builder::new_block(self.ccx, self.llfn(), name)
+        Builder::new_block(self.cx, self.llfn(), name)
     }
 
     pub fn sess(&self) -> &Session {
-        self.ccx.sess()
+        self.cx.sess()
     }
 
     pub fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx> {
-        self.ccx.tcx()
+        self.cx.tcx
     }
 
     pub fn llfn(&self) -> ValueRef {
@@ -101,11 +101,11 @@
     }
 
     fn count_insn(&self, category: &str) {
-        if self.ccx.sess().trans_stats() {
-            self.ccx.stats().borrow_mut().n_llvm_insns += 1;
+        if self.cx.sess().trans_stats() {
+            self.cx.stats.borrow_mut().n_llvm_insns += 1;
         }
-        if self.ccx.sess().count_llvm_insns() {
-            *self.ccx.stats()
+        if self.cx.sess().count_llvm_insns() {
+            *self.cx.stats
                 .borrow_mut()
                 .llvm_insns
                 .entry(category.to_string())
@@ -489,11 +489,11 @@
     }
 
     pub fn alloca(&self, ty: Type, name: &str, align: Align) -> ValueRef {
-        let builder = Builder::with_ccx(self.ccx);
-        builder.position_at_start(unsafe {
+        let bx = Builder::with_cx(self.cx);
+        bx.position_at_start(unsafe {
             llvm::LLVMGetFirstBasicBlock(self.llfn())
         });
-        builder.dynamic_alloca(ty, name, align)
+        bx.dynamic_alloca(ty, name, align)
     }
 
     pub fn dynamic_alloca(&self, ty: Type, name: &str, align: Align) -> ValueRef {
@@ -558,7 +558,7 @@
             ];
 
             llvm::LLVMSetMetadata(load, llvm::MD_range as c_uint,
-                                  llvm::LLVMMDNodeInContext(self.ccx.llcx(),
+                                  llvm::LLVMMDNodeInContext(self.cx.llcx,
                                                             v.as_ptr(),
                                                             v.len() as c_uint));
         }
@@ -567,7 +567,7 @@
     pub fn nonnull_metadata(&self, load: ValueRef) {
         unsafe {
             llvm::LLVMSetMetadata(load, llvm::MD_nonnull as c_uint,
-                                  llvm::LLVMMDNodeInContext(self.ccx.llcx(), ptr::null(), 0));
+                                  llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0));
         }
     }
 
@@ -620,8 +620,8 @@
             // point to a metadata value of the integer 1. Who knew?
             //
             // [1]: http://llvm.org/docs/LangRef.html#store-instruction
-            let one = C_i32(self.ccx, 1);
-            let node = llvm::LLVMMDNodeInContext(self.ccx.llcx(),
+            let one = C_i32(self.cx, 1);
+            let node = llvm::LLVMMDNodeInContext(self.cx.llcx,
                                                  &one,
                                                  1);
             llvm::LLVMSetMetadata(insn,
@@ -840,24 +840,24 @@
     }
 
     pub fn add_span_comment(&self, sp: Span, text: &str) {
-        if self.ccx.sess().asm_comments() {
+        if self.cx.sess().asm_comments() {
             let s = format!("{} ({})",
                             text,
-                            self.ccx.sess().codemap().span_to_string(sp));
+                            self.cx.sess().codemap().span_to_string(sp));
             debug!("{}", s);
             self.add_comment(&s);
         }
     }
 
     pub fn add_comment(&self, text: &str) {
-        if self.ccx.sess().asm_comments() {
+        if self.cx.sess().asm_comments() {
             let sanitized = text.replace("$", "");
             let comment_text = format!("{} {}", "#",
                                        sanitized.replace("\n", "\n\t# "));
             self.count_insn("inlineasm");
             let comment_text = CString::new(comment_text).unwrap();
             let asm = unsafe {
-                llvm::LLVMConstInlineAsm(Type::func(&[], &Type::void(self.ccx)).to_ref(),
+                llvm::LLVMConstInlineAsm(Type::func(&[], &Type::void(self.cx)).to_ref(),
                                          comment_text.as_ptr(), noname(), False,
                                          False)
             };
@@ -949,8 +949,8 @@
         unsafe {
             let elt_ty = val_ty(elt);
             let undef = llvm::LLVMGetUndef(Type::vector(&elt_ty, num_elts as u64).to_ref());
-            let vec = self.insert_element(undef, elt, C_i32(self.ccx, 0));
-            let vec_i32_ty = Type::vector(&Type::i32(self.ccx), num_elts as u64);
+            let vec = self.insert_element(undef, elt, C_i32(self.cx, 0));
+            let vec_i32_ty = Type::vector(&Type::i32(self.cx), num_elts as u64);
             self.shuffle_vector(vec, undef, C_null(vec_i32_ty))
         }
     }
@@ -1160,7 +1160,7 @@
     pub fn set_invariant_load(&self, load: ValueRef) {
         unsafe {
             llvm::LLVMSetMetadata(load, llvm::MD_invariant_load as c_uint,
-                                  llvm::LLVMMDNodeInContext(self.ccx.llcx(), ptr::null(), 0));
+                                  llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0));
         }
     }
 
@@ -1245,7 +1245,7 @@
     /// If LLVM lifetime intrinsic support is disabled (i.e.  optimizations
     /// off) or `ptr` is zero-sized, then no-op (does not call `emit`).
     fn call_lifetime_intrinsic(&self, intrinsic: &str, ptr: ValueRef, size: Size) {
-        if self.ccx.sess().opts.optimize == config::OptLevel::No {
+        if self.cx.sess().opts.optimize == config::OptLevel::No {
             return;
         }
 
@@ -1254,9 +1254,9 @@
             return;
         }
 
-        let lifetime_intrinsic = self.ccx.get_intrinsic(intrinsic);
+        let lifetime_intrinsic = self.cx.get_intrinsic(intrinsic);
 
-        let ptr = self.pointercast(ptr, Type::i8p(self.ccx));
-        self.call(lifetime_intrinsic, &[C_u64(self.ccx, size), ptr], None);
+        let ptr = self.pointercast(ptr, Type::i8p(self.cx));
+        self.call(lifetime_intrinsic, &[C_u64(self.cx, size), ptr], None);
     }
 }
diff --git a/src/librustc_trans/cabi_aarch64.rs b/src/librustc_trans/cabi_aarch64.rs
index d5f341f..72ae144 100644
--- a/src/librustc_trans/cabi_aarch64.rs
+++ b/src/librustc_trans/cabi_aarch64.rs
@@ -9,15 +9,15 @@
 // except according to those terms.
 
 use abi::{FnType, ArgType, LayoutExt, Reg, RegKind, Uniform};
-use context::CrateContext;
+use context::CodegenCx;
 
-fn is_homogeneous_aggregate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>)
+fn is_homogeneous_aggregate<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, arg: &mut ArgType<'tcx>)
                                      -> Option<Uniform> {
-    arg.layout.homogeneous_aggregate(ccx).and_then(|unit| {
+    arg.layout.homogeneous_aggregate(cx).and_then(|unit| {
         let size = arg.layout.size;
 
         // Ensure we have at most four uniquely addressable members.
-        if size > unit.size.checked_mul(4, ccx).unwrap() {
+        if size > unit.size.checked_mul(4, cx).unwrap() {
             return None;
         }
 
@@ -38,12 +38,12 @@
     })
 }
 
-fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) {
+fn classify_ret_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, ret: &mut ArgType<'tcx>) {
     if !ret.layout.is_aggregate() {
         ret.extend_integer_width_to(32);
         return;
     }
-    if let Some(uniform) = is_homogeneous_aggregate(ccx, ret) {
+    if let Some(uniform) = is_homogeneous_aggregate(cx, ret) {
         ret.cast_to(uniform);
         return;
     }
@@ -69,12 +69,12 @@
     ret.make_indirect();
 }
 
-fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) {
+fn classify_arg_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, arg: &mut ArgType<'tcx>) {
     if !arg.layout.is_aggregate() {
         arg.extend_integer_width_to(32);
         return;
     }
-    if let Some(uniform) = is_homogeneous_aggregate(ccx, arg) {
+    if let Some(uniform) = is_homogeneous_aggregate(cx, arg) {
         arg.cast_to(uniform);
         return;
     }
@@ -100,13 +100,13 @@
     arg.make_indirect();
 }
 
-pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) {
+pub fn compute_abi_info<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, fty: &mut FnType<'tcx>) {
     if !fty.ret.is_ignore() {
-        classify_ret_ty(ccx, &mut fty.ret);
+        classify_ret_ty(cx, &mut fty.ret);
     }
 
     for arg in &mut fty.args {
         if arg.is_ignore() { continue; }
-        classify_arg_ty(ccx, arg);
+        classify_arg_ty(cx, arg);
     }
 }
diff --git a/src/librustc_trans/cabi_arm.rs b/src/librustc_trans/cabi_arm.rs
index 438053d..b6cf16c 100644
--- a/src/librustc_trans/cabi_arm.rs
+++ b/src/librustc_trans/cabi_arm.rs
@@ -9,16 +9,16 @@
 // except according to those terms.
 
 use abi::{FnType, ArgType, LayoutExt, Reg, RegKind, Uniform};
-use context::CrateContext;
+use context::CodegenCx;
 use llvm::CallConv;
 
-fn is_homogeneous_aggregate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>)
+fn is_homogeneous_aggregate<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, arg: &mut ArgType<'tcx>)
                                      -> Option<Uniform> {
-    arg.layout.homogeneous_aggregate(ccx).and_then(|unit| {
+    arg.layout.homogeneous_aggregate(cx).and_then(|unit| {
         let size = arg.layout.size;
 
         // Ensure we have at most four uniquely addressable members.
-        if size > unit.size.checked_mul(4, ccx).unwrap() {
+        if size > unit.size.checked_mul(4, cx).unwrap() {
             return None;
         }
 
@@ -39,14 +39,14 @@
     })
 }
 
-fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>, vfp: bool) {
+fn classify_ret_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, ret: &mut ArgType<'tcx>, vfp: bool) {
     if !ret.layout.is_aggregate() {
         ret.extend_integer_width_to(32);
         return;
     }
 
     if vfp {
-        if let Some(uniform) = is_homogeneous_aggregate(ccx, ret) {
+        if let Some(uniform) = is_homogeneous_aggregate(cx, ret) {
             ret.cast_to(uniform);
             return;
         }
@@ -71,14 +71,14 @@
     ret.make_indirect();
 }
 
-fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>, vfp: bool) {
+fn classify_arg_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, arg: &mut ArgType<'tcx>, vfp: bool) {
     if !arg.layout.is_aggregate() {
         arg.extend_integer_width_to(32);
         return;
     }
 
     if vfp {
-        if let Some(uniform) = is_homogeneous_aggregate(ccx, arg) {
+        if let Some(uniform) = is_homogeneous_aggregate(cx, arg) {
             arg.cast_to(uniform);
             return;
         }
@@ -92,19 +92,19 @@
     });
 }
 
-pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) {
+pub fn compute_abi_info<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, fty: &mut FnType<'tcx>) {
     // If this is a target with a hard-float ABI, and the function is not explicitly
     // `extern "aapcs"`, then we must use the VFP registers for homogeneous aggregates.
-    let vfp = ccx.sess().target.target.llvm_target.ends_with("hf")
+    let vfp = cx.sess().target.target.llvm_target.ends_with("hf")
         && fty.cconv != CallConv::ArmAapcsCallConv
         && !fty.variadic;
 
     if !fty.ret.is_ignore() {
-        classify_ret_ty(ccx, &mut fty.ret, vfp);
+        classify_ret_ty(cx, &mut fty.ret, vfp);
     }
 
     for arg in &mut fty.args {
         if arg.is_ignore() { continue; }
-        classify_arg_ty(ccx, arg, vfp);
+        classify_arg_ty(cx, arg, vfp);
     }
 }
diff --git a/src/librustc_trans/cabi_asmjs.rs b/src/librustc_trans/cabi_asmjs.rs
index 1664251..b182f83 100644
--- a/src/librustc_trans/cabi_asmjs.rs
+++ b/src/librustc_trans/cabi_asmjs.rs
@@ -9,16 +9,16 @@
 // except according to those terms.
 
 use abi::{FnType, ArgType, LayoutExt, Uniform};
-use context::CrateContext;
+use context::CodegenCx;
 
 // Data layout: e-p:32:32-i64:64-v128:32:128-n32-S128
 
 // See the https://github.com/kripken/emscripten-fastcomp-clang repository.
 // The class `EmscriptenABIInfo` in `/lib/CodeGen/TargetInfo.cpp` contains the ABI definitions.
 
-fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) {
+fn classify_ret_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, ret: &mut ArgType<'tcx>) {
     if ret.layout.is_aggregate() {
-        if let Some(unit) = ret.layout.homogeneous_aggregate(ccx) {
+        if let Some(unit) = ret.layout.homogeneous_aggregate(cx) {
             let size = ret.layout.size;
             if unit.size == size {
                 ret.cast_to(Uniform {
@@ -39,9 +39,9 @@
     }
 }
 
-pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) {
+pub fn compute_abi_info<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, fty: &mut FnType<'tcx>) {
     if !fty.ret.is_ignore() {
-        classify_ret_ty(ccx, &mut fty.ret);
+        classify_ret_ty(cx, &mut fty.ret);
     }
 
     for arg in &mut fty.args {
diff --git a/src/librustc_trans/cabi_mips.rs b/src/librustc_trans/cabi_mips.rs
index fe61670..cd567f5 100644
--- a/src/librustc_trans/cabi_mips.rs
+++ b/src/librustc_trans/cabi_mips.rs
@@ -9,23 +9,23 @@
 // except according to those terms.
 
 use abi::{ArgType, FnType, LayoutExt, Reg, Uniform};
-use context::CrateContext;
+use context::CodegenCx;
 
 use rustc::ty::layout::Size;
 
-fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+fn classify_ret_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
                              ret: &mut ArgType<'tcx>,
                              offset: &mut Size) {
     if !ret.layout.is_aggregate() {
         ret.extend_integer_width_to(32);
     } else {
         ret.make_indirect();
-        *offset += ccx.tcx().data_layout.pointer_size;
+        *offset += cx.tcx.data_layout.pointer_size;
     }
 }
 
-fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut Size) {
-    let dl = &ccx.tcx().data_layout;
+fn classify_arg_ty(cx: &CodegenCx, arg: &mut ArgType, offset: &mut Size) {
+    let dl = &cx.tcx.data_layout;
     let size = arg.layout.size;
     let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align);
 
@@ -44,14 +44,14 @@
     *offset = offset.abi_align(align) + size.abi_align(align);
 }
 
-pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) {
+pub fn compute_abi_info<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, fty: &mut FnType<'tcx>) {
     let mut offset = Size::from_bytes(0);
     if !fty.ret.is_ignore() {
-        classify_ret_ty(ccx, &mut fty.ret, &mut offset);
+        classify_ret_ty(cx, &mut fty.ret, &mut offset);
     }
 
     for arg in &mut fty.args {
         if arg.is_ignore() { continue; }
-        classify_arg_ty(ccx, arg, &mut offset);
+        classify_arg_ty(cx, arg, &mut offset);
     }
 }
diff --git a/src/librustc_trans/cabi_mips64.rs b/src/librustc_trans/cabi_mips64.rs
index 16d0cfe..e44063f 100644
--- a/src/librustc_trans/cabi_mips64.rs
+++ b/src/librustc_trans/cabi_mips64.rs
@@ -9,23 +9,23 @@
 // except according to those terms.
 
 use abi::{ArgType, FnType, LayoutExt, Reg, Uniform};
-use context::CrateContext;
+use context::CodegenCx;
 
 use rustc::ty::layout::Size;
 
-fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+fn classify_ret_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
                              ret: &mut ArgType<'tcx>,
                              offset: &mut Size) {
     if !ret.layout.is_aggregate() {
         ret.extend_integer_width_to(64);
     } else {
         ret.make_indirect();
-        *offset += ccx.tcx().data_layout.pointer_size;
+        *offset += cx.tcx.data_layout.pointer_size;
     }
 }
 
-fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut Size) {
-    let dl = &ccx.tcx().data_layout;
+fn classify_arg_ty(cx: &CodegenCx, arg: &mut ArgType, offset: &mut Size) {
+    let dl = &cx.tcx.data_layout;
     let size = arg.layout.size;
     let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align);
 
@@ -44,14 +44,14 @@
     *offset = offset.abi_align(align) + size.abi_align(align);
 }
 
-pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) {
+pub fn compute_abi_info<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, fty: &mut FnType<'tcx>) {
     let mut offset = Size::from_bytes(0);
     if !fty.ret.is_ignore() {
-        classify_ret_ty(ccx, &mut fty.ret, &mut offset);
+        classify_ret_ty(cx, &mut fty.ret, &mut offset);
     }
 
     for arg in &mut fty.args {
         if arg.is_ignore() { continue; }
-        classify_arg_ty(ccx, arg, &mut offset);
+        classify_arg_ty(cx, arg, &mut offset);
     }
 }
diff --git a/src/librustc_trans/cabi_powerpc.rs b/src/librustc_trans/cabi_powerpc.rs
index c3c8c74..1ea6e9b 100644
--- a/src/librustc_trans/cabi_powerpc.rs
+++ b/src/librustc_trans/cabi_powerpc.rs
@@ -9,23 +9,23 @@
 // except according to those terms.
 
 use abi::{ArgType, FnType, LayoutExt, Reg, Uniform};
-use context::CrateContext;
+use context::CodegenCx;
 
 use rustc::ty::layout::Size;
 
-fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+fn classify_ret_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
                              ret: &mut ArgType<'tcx>,
                              offset: &mut Size) {
     if !ret.layout.is_aggregate() {
         ret.extend_integer_width_to(32);
     } else {
         ret.make_indirect();
-        *offset += ccx.tcx().data_layout.pointer_size;
+        *offset += cx.tcx.data_layout.pointer_size;
     }
 }
 
-fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut Size) {
-    let dl = &ccx.tcx().data_layout;
+fn classify_arg_ty(cx: &CodegenCx, arg: &mut ArgType, offset: &mut Size) {
+    let dl = &cx.tcx.data_layout;
     let size = arg.layout.size;
     let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align);
 
@@ -44,14 +44,14 @@
     *offset = offset.abi_align(align) + size.abi_align(align);
 }
 
-pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) {
+pub fn compute_abi_info<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, fty: &mut FnType<'tcx>) {
     let mut offset = Size::from_bytes(0);
     if !fty.ret.is_ignore() {
-        classify_ret_ty(ccx, &mut fty.ret, &mut offset);
+        classify_ret_ty(cx, &mut fty.ret, &mut offset);
     }
 
     for arg in &mut fty.args {
         if arg.is_ignore() { continue; }
-        classify_arg_ty(ccx, arg, &mut offset);
+        classify_arg_ty(cx, arg, &mut offset);
     }
 }
diff --git a/src/librustc_trans/cabi_powerpc64.rs b/src/librustc_trans/cabi_powerpc64.rs
index 2206a4f..c614cf3 100644
--- a/src/librustc_trans/cabi_powerpc64.rs
+++ b/src/librustc_trans/cabi_powerpc64.rs
@@ -13,7 +13,7 @@
 // need to be fixed when PowerPC vector support is added.
 
 use abi::{FnType, ArgType, LayoutExt, Reg, RegKind, Uniform};
-use context::CrateContext;
+use context::CodegenCx;
 use rustc::ty::layout;
 
 #[derive(Debug, Clone, Copy, PartialEq)]
@@ -23,15 +23,15 @@
 }
 use self::ABI::*;
 
-fn is_homogeneous_aggregate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+fn is_homogeneous_aggregate<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
                                       arg: &mut ArgType<'tcx>,
                                       abi: ABI)
                                      -> Option<Uniform> {
-    arg.layout.homogeneous_aggregate(ccx).and_then(|unit| {
+    arg.layout.homogeneous_aggregate(cx).and_then(|unit| {
         // ELFv1 only passes one-member aggregates transparently.
         // ELFv2 passes up to eight uniquely addressable members.
         if (abi == ELFv1 && arg.layout.size > unit.size)
-                || arg.layout.size > unit.size.checked_mul(8, ccx).unwrap() {
+                || arg.layout.size > unit.size.checked_mul(8, cx).unwrap() {
             return None;
         }
 
@@ -52,7 +52,7 @@
     })
 }
 
-fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>, abi: ABI) {
+fn classify_ret_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, ret: &mut ArgType<'tcx>, abi: ABI) {
     if !ret.layout.is_aggregate() {
         ret.extend_integer_width_to(64);
         return;
@@ -64,7 +64,7 @@
         return;
     }
 
-    if let Some(uniform) = is_homogeneous_aggregate(ccx, ret, abi) {
+    if let Some(uniform) = is_homogeneous_aggregate(cx, ret, abi) {
         ret.cast_to(uniform);
         return;
     }
@@ -92,13 +92,13 @@
     ret.make_indirect();
 }
 
-fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>, abi: ABI) {
+fn classify_arg_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, arg: &mut ArgType<'tcx>, abi: ABI) {
     if !arg.layout.is_aggregate() {
         arg.extend_integer_width_to(64);
         return;
     }
 
-    if let Some(uniform) = is_homogeneous_aggregate(ccx, arg, abi) {
+    if let Some(uniform) = is_homogeneous_aggregate(cx, arg, abi) {
         arg.cast_to(uniform);
         return;
     }
@@ -128,19 +128,19 @@
     });
 }
 
-pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) {
-    let abi = match ccx.sess().target.target.target_endian.as_str() {
+pub fn compute_abi_info<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, fty: &mut FnType<'tcx>) {
+    let abi = match cx.sess().target.target.target_endian.as_str() {
         "big" => ELFv1,
         "little" => ELFv2,
         _ => unimplemented!(),
     };
 
     if !fty.ret.is_ignore() {
-        classify_ret_ty(ccx, &mut fty.ret, abi);
+        classify_ret_ty(cx, &mut fty.ret, abi);
     }
 
     for arg in &mut fty.args {
         if arg.is_ignore() { continue; }
-        classify_arg_ty(ccx, arg, abi);
+        classify_arg_ty(cx, arg, abi);
     }
 }
diff --git a/src/librustc_trans/cabi_s390x.rs b/src/librustc_trans/cabi_s390x.rs
index 9fb4600..5e81768 100644
--- a/src/librustc_trans/cabi_s390x.rs
+++ b/src/librustc_trans/cabi_s390x.rs
@@ -12,7 +12,7 @@
 // for a pre-z13 machine or using -mno-vx.
 
 use abi::{FnType, ArgType, LayoutExt, Reg};
-use context::CrateContext;
+use context::CodegenCx;
 
 use rustc::ty::layout::{self, TyLayout};
 
@@ -24,7 +24,7 @@
     }
 }
 
-fn is_single_fp_element<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+fn is_single_fp_element<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
                                   layout: TyLayout<'tcx>) -> bool {
     match layout.abi {
         layout::Abi::Scalar(ref scalar) => {
@@ -35,7 +35,7 @@
         }
         layout::Abi::Aggregate { .. } => {
             if layout.fields.count() == 1 && layout.fields.offset(0).bytes() == 0 {
-                is_single_fp_element(ccx, layout.field(ccx, 0))
+                is_single_fp_element(cx, layout.field(cx, 0))
             } else {
                 false
             }
@@ -44,13 +44,13 @@
     }
 }
 
-fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) {
+fn classify_arg_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, arg: &mut ArgType<'tcx>) {
     if !arg.layout.is_aggregate() && arg.layout.size.bits() <= 64 {
         arg.extend_integer_width_to(64);
         return;
     }
 
-    if is_single_fp_element(ccx, arg.layout) {
+    if is_single_fp_element(cx, arg.layout) {
         match arg.layout.size.bytes() {
             4 => arg.cast_to(Reg::f32()),
             8 => arg.cast_to(Reg::f64()),
@@ -67,13 +67,13 @@
     }
 }
 
-pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) {
+pub fn compute_abi_info<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, fty: &mut FnType<'tcx>) {
     if !fty.ret.is_ignore() {
         classify_ret_ty(&mut fty.ret);
     }
 
     for arg in &mut fty.args {
         if arg.is_ignore() { continue; }
-        classify_arg_ty(ccx, arg);
+        classify_arg_ty(cx, arg);
     }
 }
diff --git a/src/librustc_trans/cabi_sparc.rs b/src/librustc_trans/cabi_sparc.rs
index fe61670..cd567f5 100644
--- a/src/librustc_trans/cabi_sparc.rs
+++ b/src/librustc_trans/cabi_sparc.rs
@@ -9,23 +9,23 @@
 // except according to those terms.
 
 use abi::{ArgType, FnType, LayoutExt, Reg, Uniform};
-use context::CrateContext;
+use context::CodegenCx;
 
 use rustc::ty::layout::Size;
 
-fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+fn classify_ret_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
                              ret: &mut ArgType<'tcx>,
                              offset: &mut Size) {
     if !ret.layout.is_aggregate() {
         ret.extend_integer_width_to(32);
     } else {
         ret.make_indirect();
-        *offset += ccx.tcx().data_layout.pointer_size;
+        *offset += cx.tcx.data_layout.pointer_size;
     }
 }
 
-fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut Size) {
-    let dl = &ccx.tcx().data_layout;
+fn classify_arg_ty(cx: &CodegenCx, arg: &mut ArgType, offset: &mut Size) {
+    let dl = &cx.tcx.data_layout;
     let size = arg.layout.size;
     let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align);
 
@@ -44,14 +44,14 @@
     *offset = offset.abi_align(align) + size.abi_align(align);
 }
 
-pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) {
+pub fn compute_abi_info<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, fty: &mut FnType<'tcx>) {
     let mut offset = Size::from_bytes(0);
     if !fty.ret.is_ignore() {
-        classify_ret_ty(ccx, &mut fty.ret, &mut offset);
+        classify_ret_ty(cx, &mut fty.ret, &mut offset);
     }
 
     for arg in &mut fty.args {
         if arg.is_ignore() { continue; }
-        classify_arg_ty(ccx, arg, &mut offset);
+        classify_arg_ty(cx, arg, &mut offset);
     }
 }
diff --git a/src/librustc_trans/cabi_sparc64.rs b/src/librustc_trans/cabi_sparc64.rs
index 7c52e27..fb94b09 100644
--- a/src/librustc_trans/cabi_sparc64.rs
+++ b/src/librustc_trans/cabi_sparc64.rs
@@ -11,13 +11,13 @@
 // FIXME: This needs an audit for correctness and completeness.
 
 use abi::{FnType, ArgType, LayoutExt, Reg, RegKind, Uniform};
-use context::CrateContext;
+use context::CodegenCx;
 
-fn is_homogeneous_aggregate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>)
+fn is_homogeneous_aggregate<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, arg: &mut ArgType<'tcx>)
                                      -> Option<Uniform> {
-    arg.layout.homogeneous_aggregate(ccx).and_then(|unit| {
+    arg.layout.homogeneous_aggregate(cx).and_then(|unit| {
         // Ensure we have at most eight uniquely addressable members.
-        if arg.layout.size > unit.size.checked_mul(8, ccx).unwrap() {
+        if arg.layout.size > unit.size.checked_mul(8, cx).unwrap() {
             return None;
         }
 
@@ -38,13 +38,13 @@
     })
 }
 
-fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) {
+fn classify_ret_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, ret: &mut ArgType<'tcx>) {
     if !ret.layout.is_aggregate() {
         ret.extend_integer_width_to(64);
         return;
     }
 
-    if let Some(uniform) = is_homogeneous_aggregate(ccx, ret) {
+    if let Some(uniform) = is_homogeneous_aggregate(cx, ret) {
         ret.cast_to(uniform);
         return;
     }
@@ -72,13 +72,13 @@
     ret.make_indirect();
 }
 
-fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) {
+fn classify_arg_ty<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, arg: &mut ArgType<'tcx>) {
     if !arg.layout.is_aggregate() {
         arg.extend_integer_width_to(64);
         return;
     }
 
-    if let Some(uniform) = is_homogeneous_aggregate(ccx, arg) {
+    if let Some(uniform) = is_homogeneous_aggregate(cx, arg) {
         arg.cast_to(uniform);
         return;
     }
@@ -90,13 +90,13 @@
     });
 }
 
-pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) {
+pub fn compute_abi_info<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, fty: &mut FnType<'tcx>) {
     if !fty.ret.is_ignore() {
-        classify_ret_ty(ccx, &mut fty.ret);
+        classify_ret_ty(cx, &mut fty.ret);
     }
 
     for arg in &mut fty.args {
         if arg.is_ignore() { continue; }
-        classify_arg_ty(ccx, arg);
+        classify_arg_ty(cx, arg);
     }
 }
diff --git a/src/librustc_trans/cabi_x86.rs b/src/librustc_trans/cabi_x86.rs
index 6fd0140..60347ba 100644
--- a/src/librustc_trans/cabi_x86.rs
+++ b/src/librustc_trans/cabi_x86.rs
@@ -9,7 +9,7 @@
 // except according to those terms.
 
 use abi::{ArgAttribute, FnType, LayoutExt, PassMode, Reg, RegKind};
-use common::CrateContext;
+use common::CodegenCx;
 
 use rustc::ty::layout::{self, TyLayout};
 
@@ -19,7 +19,7 @@
     Fastcall
 }
 
-fn is_single_fp_element<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+fn is_single_fp_element<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
                                   layout: TyLayout<'tcx>) -> bool {
     match layout.abi {
         layout::Abi::Scalar(ref scalar) => {
@@ -30,7 +30,7 @@
         }
         layout::Abi::Aggregate { .. } => {
             if layout.fields.count() == 1 && layout.fields.offset(0).bytes() == 0 {
-                is_single_fp_element(ccx, layout.field(ccx, 0))
+                is_single_fp_element(cx, layout.field(cx, 0))
             } else {
                 false
             }
@@ -39,7 +39,7 @@
     }
 }
 
-pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+pub fn compute_abi_info<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
                                   fty: &mut FnType<'tcx>,
                                   flavor: Flavor) {
     if !fty.ret.is_ignore() {
@@ -51,12 +51,12 @@
             // Some links:
             // http://www.angelcode.com/dev/callconv/callconv.html
             // Clang's ABI handling is in lib/CodeGen/TargetInfo.cpp
-            let t = &ccx.sess().target.target;
+            let t = &cx.sess().target.target;
             if t.options.is_like_osx || t.options.is_like_windows
                 || t.options.is_like_openbsd {
                 // According to Clang, everyone but MSVC returns single-element
                 // float aggregates directly in a floating-point register.
-                if !t.options.is_like_msvc && is_single_fp_element(ccx, fty.ret.layout) {
+                if !t.options.is_like_msvc && is_single_fp_element(cx, fty.ret.layout) {
                     match fty.ret.layout.size.bytes() {
                         4 => fty.ret.cast_to(Reg::f32()),
                         8 => fty.ret.cast_to(Reg::f64()),
@@ -112,7 +112,7 @@
             };
 
             // At this point we know this must be a primitive of sorts.
-            let unit = arg.layout.homogeneous_aggregate(ccx).unwrap();
+            let unit = arg.layout.homogeneous_aggregate(cx).unwrap();
             assert_eq!(unit.size, arg.layout.size);
             if unit.kind == RegKind::Float {
                 continue;
diff --git a/src/librustc_trans/cabi_x86_64.rs b/src/librustc_trans/cabi_x86_64.rs
index 7c9f257..6db18bf 100644
--- a/src/librustc_trans/cabi_x86_64.rs
+++ b/src/librustc_trans/cabi_x86_64.rs
@@ -12,7 +12,7 @@
 // https://github.com/jckarter/clay/blob/master/compiler/src/externals.cpp
 
 use abi::{ArgType, CastTarget, FnType, LayoutExt, Reg, RegKind};
-use context::CrateContext;
+use context::CodegenCx;
 
 use rustc::ty::layout::{self, TyLayout, Size};
 
@@ -31,7 +31,7 @@
 const LARGEST_VECTOR_SIZE: usize = 512;
 const MAX_EIGHTBYTES: usize = LARGEST_VECTOR_SIZE / 64;
 
-fn classify_arg<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &ArgType<'tcx>)
+fn classify_arg<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, arg: &ArgType<'tcx>)
                           -> Result<[Class; MAX_EIGHTBYTES], Memory> {
     fn unify(cls: &mut [Class],
              off: Size,
@@ -52,7 +52,7 @@
         cls[i] = to_write;
     }
 
-    fn classify<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+    fn classify<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
                           layout: TyLayout<'tcx>,
                           cls: &mut [Class],
                           off: Size)
@@ -82,7 +82,7 @@
 
                 // everything after the first one is the upper
                 // half of a register.
-                let stride = element.value.size(ccx);
+                let stride = element.value.size(cx);
                 for i in 1..count {
                     let field_off = off + stride * i;
                     unify(cls, field_off, Class::SseUp);
@@ -95,7 +95,7 @@
                     layout::Variants::Single { .. } => {
                         for i in 0..layout.fields.count() {
                             let field_off = off + layout.fields.offset(i);
-                            classify(ccx, layout.field(ccx, i), cls, field_off)?;
+                            classify(cx, layout.field(cx, i), cls, field_off)?;
                         }
                     }
                     layout::Variants::Tagged { .. } |
@@ -114,7 +114,7 @@
     }
 
     let mut cls = [Class::None; MAX_EIGHTBYTES];
-    classify(ccx, arg.layout, &mut cls, Size::from_bytes(0))?;
+    classify(cx, arg.layout, &mut cls, Size::from_bytes(0))?;
     if n > 2 {
         if cls[0] != Class::Sse {
             return Err(Memory);
@@ -189,12 +189,12 @@
     target
 }
 
-pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) {
+pub fn compute_abi_info<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, fty: &mut FnType<'tcx>) {
     let mut int_regs = 6; // RDI, RSI, RDX, RCX, R8, R9
     let mut sse_regs = 8; // XMM0-7
 
     let mut x86_64_ty = |arg: &mut ArgType<'tcx>, is_arg: bool| {
-        let cls = classify_arg(ccx, arg);
+        let cls = classify_arg(cx, arg);
 
         let mut needed_int = 0;
         let mut needed_sse = 0;
diff --git a/src/librustc_trans/callee.rs b/src/librustc_trans/callee.rs
index ccbc662..c3d5e08 100644
--- a/src/librustc_trans/callee.rs
+++ b/src/librustc_trans/callee.rs
@@ -15,7 +15,7 @@
 //! closure.
 
 use attributes;
-use common::{self, CrateContext};
+use common::{self, CodegenCx};
 use consts;
 use declare;
 use llvm::{self, ValueRef};
@@ -34,13 +34,13 @@
 ///
 /// # Parameters
 ///
-/// - `ccx`: the crate context
+/// - `cx`: the crate context
 /// - `instance`: the instance to be instantiated
-pub fn get_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+pub fn get_fn<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
                         instance: Instance<'tcx>)
                         -> ValueRef
 {
-    let tcx = ccx.tcx();
+    let tcx = cx.tcx;
 
     debug!("get_fn(instance={:?})", instance);
 
@@ -48,8 +48,8 @@
     assert!(!instance.substs.has_escaping_regions());
     assert!(!instance.substs.has_param_types());
 
-    let fn_ty = instance.ty(ccx.tcx());
-    if let Some(&llfn) = ccx.instances().borrow().get(&instance) {
+    let fn_ty = instance.ty(cx.tcx);
+    if let Some(&llfn) = cx.instances.borrow().get(&instance) {
         return llfn;
     }
 
@@ -57,10 +57,10 @@
     debug!("get_fn({:?}: {:?}) => {}", instance, fn_ty, sym);
 
     // Create a fn pointer with the substituted signature.
-    let fn_ptr_ty = tcx.mk_fn_ptr(common::ty_fn_sig(ccx, fn_ty));
-    let llptrty = ccx.layout_of(fn_ptr_ty).llvm_type(ccx);
+    let fn_ptr_ty = tcx.mk_fn_ptr(common::ty_fn_sig(cx, fn_ty));
+    let llptrty = cx.layout_of(fn_ptr_ty).llvm_type(cx);
 
-    let llfn = if let Some(llfn) = declare::get_declared_value(ccx, &sym) {
+    let llfn = if let Some(llfn) = declare::get_declared_value(cx, &sym) {
         // This is subtle and surprising, but sometimes we have to bitcast
         // the resulting fn pointer.  The reason has to do with external
         // functions.  If you have two crates that both bind the same C
@@ -92,14 +92,14 @@
             llfn
         }
     } else {
-        let llfn = declare::declare_fn(ccx, &sym, fn_ty);
+        let llfn = declare::declare_fn(cx, &sym, fn_ty);
         assert_eq!(common::val_ty(llfn), llptrty);
         debug!("get_fn: not casting pointer!");
 
         if instance.def.is_inline(tcx) {
             attributes::inline(llfn, attributes::InlineAttr::Hint);
         }
-        attributes::from_fn_attrs(ccx, llfn, instance.def.def_id());
+        attributes::from_fn_attrs(cx, llfn, instance.def.def_id());
 
         let instance_def_id = instance.def_id();
 
@@ -149,9 +149,9 @@
         unsafe {
             llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::ExternalLinkage);
 
-            if ccx.tcx().is_translated_function(instance_def_id) {
+            if cx.tcx.is_translated_function(instance_def_id) {
                 if instance_def_id.is_local() {
-                    if !ccx.tcx().is_exported_symbol(instance_def_id) {
+                    if !cx.tcx.is_exported_symbol(instance_def_id) {
                         llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
                     }
                 } else {
@@ -160,7 +160,7 @@
             }
         }
 
-        if ccx.use_dll_storage_attrs() &&
+        if cx.use_dll_storage_attrs &&
             tcx.is_dllimport_foreign_item(instance_def_id)
         {
             unsafe {
@@ -171,20 +171,20 @@
         llfn
     };
 
-    ccx.instances().borrow_mut().insert(instance, llfn);
+    cx.instances.borrow_mut().insert(instance, llfn);
 
     llfn
 }
 
-pub fn resolve_and_get_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+pub fn resolve_and_get_fn<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
                                     def_id: DefId,
                                     substs: &'tcx Substs<'tcx>)
                                     -> ValueRef
 {
     get_fn(
-        ccx,
+        cx,
         ty::Instance::resolve(
-            ccx.tcx(),
+            cx.tcx,
             ty::ParamEnv::empty(traits::Reveal::All),
             def_id,
             substs
diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs
index b1bdee3..9e745c3 100644
--- a/src/librustc_trans/common.rs
+++ b/src/librustc_trans/common.rs
@@ -38,7 +38,7 @@
 use syntax::symbol::InternedString;
 use syntax_pos::{Span, DUMMY_SP};
 
-pub use context::{CrateContext, SharedCrateContext};
+pub use context::CodegenCx;
 
 pub fn type_needs_drop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool {
     ty.needs_drop(tcx, ty::ParamEnv::empty(traits::Reveal::All))
@@ -152,46 +152,46 @@
     }
 }
 
-pub fn C_bool(ccx: &CrateContext, val: bool) -> ValueRef {
-    C_uint(Type::i1(ccx), val as u64)
+pub fn C_bool(cx: &CodegenCx, val: bool) -> ValueRef {
+    C_uint(Type::i1(cx), val as u64)
 }
 
-pub fn C_i32(ccx: &CrateContext, i: i32) -> ValueRef {
-    C_int(Type::i32(ccx), i as i64)
+pub fn C_i32(cx: &CodegenCx, i: i32) -> ValueRef {
+    C_int(Type::i32(cx), i as i64)
 }
 
-pub fn C_u32(ccx: &CrateContext, i: u32) -> ValueRef {
-    C_uint(Type::i32(ccx), i as u64)
+pub fn C_u32(cx: &CodegenCx, i: u32) -> ValueRef {
+    C_uint(Type::i32(cx), i as u64)
 }
 
-pub fn C_u64(ccx: &CrateContext, i: u64) -> ValueRef {
-    C_uint(Type::i64(ccx), i)
+pub fn C_u64(cx: &CodegenCx, i: u64) -> ValueRef {
+    C_uint(Type::i64(cx), i)
 }
 
-pub fn C_usize(ccx: &CrateContext, i: u64) -> ValueRef {
-    let bit_size = ccx.data_layout().pointer_size.bits();
+pub fn C_usize(cx: &CodegenCx, i: u64) -> ValueRef {
+    let bit_size = cx.data_layout().pointer_size.bits();
     if bit_size < 64 {
         // make sure it doesn't overflow
         assert!(i < (1<<bit_size));
     }
 
-    C_uint(ccx.isize_ty(), i)
+    C_uint(cx.isize_ty, i)
 }
 
-pub fn C_u8(ccx: &CrateContext, i: u8) -> ValueRef {
-    C_uint(Type::i8(ccx), i as u64)
+pub fn C_u8(cx: &CodegenCx, i: u8) -> ValueRef {
+    C_uint(Type::i8(cx), i as u64)
 }
 
 
 // This is a 'c-like' raw string, which differs from
 // our boxed-and-length-annotated strings.
-pub fn C_cstr(cx: &CrateContext, s: InternedString, null_terminated: bool) -> ValueRef {
+pub fn C_cstr(cx: &CodegenCx, s: InternedString, null_terminated: bool) -> ValueRef {
     unsafe {
-        if let Some(&llval) = cx.const_cstr_cache().borrow().get(&s) {
+        if let Some(&llval) = cx.const_cstr_cache.borrow().get(&s) {
             return llval;
         }
 
-        let sc = llvm::LLVMConstStringInContext(cx.llcx(),
+        let sc = llvm::LLVMConstStringInContext(cx.llcx,
                                                 s.as_ptr() as *const c_char,
                                                 s.len() as c_uint,
                                                 !null_terminated as Bool);
@@ -203,28 +203,28 @@
         llvm::LLVMSetGlobalConstant(g, True);
         llvm::LLVMRustSetLinkage(g, llvm::Linkage::InternalLinkage);
 
-        cx.const_cstr_cache().borrow_mut().insert(s, g);
+        cx.const_cstr_cache.borrow_mut().insert(s, g);
         g
     }
 }
 
 // NB: Do not use `do_spill_noroot` to make this into a constant string, or
 // you will be kicked off fast isel. See issue #4352 for an example of this.
-pub fn C_str_slice(cx: &CrateContext, s: InternedString) -> ValueRef {
+pub fn C_str_slice(cx: &CodegenCx, s: InternedString) -> ValueRef {
     let len = s.len();
     let cs = consts::ptrcast(C_cstr(cx, s, false),
-        cx.layout_of(cx.tcx().mk_str()).llvm_type(cx).ptr_to());
+        cx.layout_of(cx.tcx.mk_str()).llvm_type(cx).ptr_to());
     C_fat_ptr(cx, cs, C_usize(cx, len as u64))
 }
 
-pub fn C_fat_ptr(cx: &CrateContext, ptr: ValueRef, meta: ValueRef) -> ValueRef {
+pub fn C_fat_ptr(cx: &CodegenCx, ptr: ValueRef, meta: ValueRef) -> ValueRef {
     assert_eq!(abi::FAT_PTR_ADDR, 0);
     assert_eq!(abi::FAT_PTR_EXTRA, 1);
     C_struct(cx, &[ptr, meta], false)
 }
 
-pub fn C_struct(cx: &CrateContext, elts: &[ValueRef], packed: bool) -> ValueRef {
-    C_struct_in_context(cx.llcx(), elts, packed)
+pub fn C_struct(cx: &CodegenCx, elts: &[ValueRef], packed: bool) -> ValueRef {
+    C_struct_in_context(cx.llcx, elts, packed)
 }
 
 pub fn C_struct_in_context(llcx: ContextRef, elts: &[ValueRef], packed: bool) -> ValueRef {
@@ -247,8 +247,8 @@
     }
 }
 
-pub fn C_bytes(cx: &CrateContext, bytes: &[u8]) -> ValueRef {
-    C_bytes_in_context(cx.llcx(), bytes)
+pub fn C_bytes(cx: &CodegenCx, bytes: &[u8]) -> ValueRef {
+    C_bytes_in_context(cx.llcx, bytes)
 }
 
 pub fn C_bytes_in_context(llcx: ContextRef, bytes: &[u8]) -> ValueRef {
@@ -328,37 +328,37 @@
 // of Java. (See related discussion on #1877 and #10183.)
 
 pub fn build_unchecked_lshift<'a, 'tcx>(
-    bcx: &Builder<'a, 'tcx>,
+    bx: &Builder<'a, 'tcx>,
     lhs: ValueRef,
     rhs: ValueRef
 ) -> ValueRef {
-    let rhs = base::cast_shift_expr_rhs(bcx, hir::BinOp_::BiShl, lhs, rhs);
+    let rhs = base::cast_shift_expr_rhs(bx, hir::BinOp_::BiShl, lhs, rhs);
     // #1877, #10183: Ensure that input is always valid
-    let rhs = shift_mask_rhs(bcx, rhs);
-    bcx.shl(lhs, rhs)
+    let rhs = shift_mask_rhs(bx, rhs);
+    bx.shl(lhs, rhs)
 }
 
 pub fn build_unchecked_rshift<'a, 'tcx>(
-    bcx: &Builder<'a, 'tcx>, lhs_t: Ty<'tcx>, lhs: ValueRef, rhs: ValueRef
+    bx: &Builder<'a, 'tcx>, lhs_t: Ty<'tcx>, lhs: ValueRef, rhs: ValueRef
 ) -> ValueRef {
-    let rhs = base::cast_shift_expr_rhs(bcx, hir::BinOp_::BiShr, lhs, rhs);
+    let rhs = base::cast_shift_expr_rhs(bx, hir::BinOp_::BiShr, lhs, rhs);
     // #1877, #10183: Ensure that input is always valid
-    let rhs = shift_mask_rhs(bcx, rhs);
+    let rhs = shift_mask_rhs(bx, rhs);
     let is_signed = lhs_t.is_signed();
     if is_signed {
-        bcx.ashr(lhs, rhs)
+        bx.ashr(lhs, rhs)
     } else {
-        bcx.lshr(lhs, rhs)
+        bx.lshr(lhs, rhs)
     }
 }
 
-fn shift_mask_rhs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, rhs: ValueRef) -> ValueRef {
+fn shift_mask_rhs<'a, 'tcx>(bx: &Builder<'a, 'tcx>, rhs: ValueRef) -> ValueRef {
     let rhs_llty = val_ty(rhs);
-    bcx.and(rhs, shift_mask_val(bcx, rhs_llty, rhs_llty, false))
+    bx.and(rhs, shift_mask_val(bx, rhs_llty, rhs_llty, false))
 }
 
 pub fn shift_mask_val<'a, 'tcx>(
-    bcx: &Builder<'a, 'tcx>,
+    bx: &Builder<'a, 'tcx>,
     llty: Type,
     mask_llty: Type,
     invert: bool
@@ -375,23 +375,23 @@
             }
         },
         TypeKind::Vector => {
-            let mask = shift_mask_val(bcx, llty.element_type(), mask_llty.element_type(), invert);
-            bcx.vector_splat(mask_llty.vector_length(), mask)
+            let mask = shift_mask_val(bx, llty.element_type(), mask_llty.element_type(), invert);
+            bx.vector_splat(mask_llty.vector_length(), mask)
         },
         _ => bug!("shift_mask_val: expected Integer or Vector, found {:?}", kind),
     }
 }
 
-pub fn ty_fn_sig<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+pub fn ty_fn_sig<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
                            ty: Ty<'tcx>)
                            -> ty::PolyFnSig<'tcx>
 {
     match ty.sty {
         ty::TyFnDef(..) |
         // Shims currently have type TyFnPtr. Not sure this should remain.
-        ty::TyFnPtr(_) => ty.fn_sig(ccx.tcx()),
+        ty::TyFnPtr(_) => ty.fn_sig(cx.tcx),
         ty::TyClosure(def_id, substs) => {
-            let tcx = ccx.tcx();
+            let tcx = cx.tcx;
             let sig = substs.closure_sig(def_id, tcx);
 
             let env_ty = tcx.closure_env_ty(def_id, substs).unwrap();
@@ -404,8 +404,8 @@
             ))
         }
         ty::TyGenerator(def_id, substs, _) => {
-            let tcx = ccx.tcx();
-            let sig = substs.generator_poly_sig(def_id, ccx.tcx());
+            let tcx = cx.tcx;
+            let sig = substs.generator_poly_sig(def_id, cx.tcx);
 
             let env_region = ty::ReLateBound(ty::DebruijnIndex::new(1), ty::BrEnv);
             let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
diff --git a/src/librustc_trans/consts.rs b/src/librustc_trans/consts.rs
index f9fbceb..d5b33d8 100644
--- a/src/librustc_trans/consts.rs
+++ b/src/librustc_trans/consts.rs
@@ -17,7 +17,7 @@
 use debuginfo;
 use base;
 use monomorphize::{MonoItem, MonoItemExt};
-use common::{CrateContext, val_ty};
+use common::{CodegenCx, val_ty};
 use declare;
 use monomorphize::Instance;
 use type_::Type;
@@ -43,17 +43,17 @@
     }
 }
 
-fn set_global_alignment(ccx: &CrateContext,
+fn set_global_alignment(cx: &CodegenCx,
                         gv: ValueRef,
                         mut align: Align) {
     // The target may require greater alignment for globals than the type does.
     // Note: GCC and Clang also allow `__attribute__((aligned))` on variables,
     // which can force it to be smaller.  Rust doesn't support this yet.
-    if let Some(min) = ccx.sess().target.target.options.min_global_align {
+    if let Some(min) = cx.sess().target.target.options.min_global_align {
         match ty::layout::Align::from_bits(min, min) {
             Ok(min) => align = align.max(min),
             Err(err) => {
-                ccx.sess().err(&format!("invalid minimum global alignment: {}", err));
+                cx.sess().err(&format!("invalid minimum global alignment: {}", err));
             }
         }
     }
@@ -62,30 +62,30 @@
     }
 }
 
-pub fn addr_of_mut(ccx: &CrateContext,
+pub fn addr_of_mut(cx: &CodegenCx,
                    cv: ValueRef,
                    align: Align,
                    kind: &str)
                     -> ValueRef {
     unsafe {
-        let name = ccx.generate_local_symbol_name(kind);
-        let gv = declare::define_global(ccx, &name[..], val_ty(cv)).unwrap_or_else(||{
+        let name = cx.generate_local_symbol_name(kind);
+        let gv = declare::define_global(cx, &name[..], val_ty(cv)).unwrap_or_else(||{
             bug!("symbol `{}` is already defined", name);
         });
         llvm::LLVMSetInitializer(gv, cv);
-        set_global_alignment(ccx, gv, align);
+        set_global_alignment(cx, gv, align);
         llvm::LLVMRustSetLinkage(gv, llvm::Linkage::PrivateLinkage);
         SetUnnamedAddr(gv, true);
         gv
     }
 }
 
-pub fn addr_of(ccx: &CrateContext,
+pub fn addr_of(cx: &CodegenCx,
                cv: ValueRef,
                align: Align,
                kind: &str)
                -> ValueRef {
-    if let Some(&gv) = ccx.const_globals().borrow().get(&cv) {
+    if let Some(&gv) = cx.const_globals.borrow().get(&cv) {
         unsafe {
             // Upgrade the alignment in cases where the same constant is used with different
             // alignment requirements
@@ -96,42 +96,42 @@
         }
         return gv;
     }
-    let gv = addr_of_mut(ccx, cv, align, kind);
+    let gv = addr_of_mut(cx, cv, align, kind);
     unsafe {
         llvm::LLVMSetGlobalConstant(gv, True);
     }
-    ccx.const_globals().borrow_mut().insert(cv, gv);
+    cx.const_globals.borrow_mut().insert(cv, gv);
     gv
 }
 
-pub fn get_static(ccx: &CrateContext, def_id: DefId) -> ValueRef {
-    let instance = Instance::mono(ccx.tcx(), def_id);
-    if let Some(&g) = ccx.instances().borrow().get(&instance) {
+pub fn get_static(cx: &CodegenCx, def_id: DefId) -> ValueRef {
+    let instance = Instance::mono(cx.tcx, def_id);
+    if let Some(&g) = cx.instances.borrow().get(&instance) {
         return g;
     }
 
-    let ty = instance.ty(ccx.tcx());
-    let g = if let Some(id) = ccx.tcx().hir.as_local_node_id(def_id) {
+    let ty = instance.ty(cx.tcx);
+    let g = if let Some(id) = cx.tcx.hir.as_local_node_id(def_id) {
 
-        let llty = ccx.layout_of(ty).llvm_type(ccx);
-        let (g, attrs) = match ccx.tcx().hir.get(id) {
+        let llty = cx.layout_of(ty).llvm_type(cx);
+        let (g, attrs) = match cx.tcx.hir.get(id) {
             hir_map::NodeItem(&hir::Item {
                 ref attrs, span, node: hir::ItemStatic(..), ..
             }) => {
-                let sym = MonoItem::Static(id).symbol_name(ccx.tcx());
+                let sym = MonoItem::Static(id).symbol_name(cx.tcx);
 
-                let defined_in_current_codegen_unit = ccx.codegen_unit()
+                let defined_in_current_codegen_unit = cx.codegen_unit
                                                          .items()
                                                          .contains_key(&MonoItem::Static(id));
                 assert!(!defined_in_current_codegen_unit);
 
-                if declare::get_declared_value(ccx, &sym[..]).is_some() {
+                if declare::get_declared_value(cx, &sym[..]).is_some() {
                     span_bug!(span, "trans: Conflicting symbol names for static?");
                 }
 
-                let g = declare::define_global(ccx, &sym[..], llty).unwrap();
+                let g = declare::define_global(cx, &sym[..], llty).unwrap();
 
-                if !ccx.tcx().is_exported_symbol(def_id) {
+                if !cx.tcx.is_exported_symbol(def_id) {
                     unsafe {
                         llvm::LLVMRustSetVisibility(g, llvm::Visibility::Hidden);
                     }
@@ -143,7 +143,7 @@
             hir_map::NodeForeignItem(&hir::ForeignItem {
                 ref attrs, span, node: hir::ForeignItemStatic(..), ..
             }) => {
-                let sym = ccx.tcx().symbol_name(instance);
+                let sym = cx.tcx.symbol_name(instance);
                 let g = if let Some(name) =
                         attr::first_attr_value_str_by_name(&attrs, "linkage") {
                     // If this is a static with a linkage specified, then we need to handle
@@ -154,18 +154,18 @@
                     let linkage = match base::linkage_by_name(&name.as_str()) {
                         Some(linkage) => linkage,
                         None => {
-                            ccx.sess().span_fatal(span, "invalid linkage specified");
+                            cx.sess().span_fatal(span, "invalid linkage specified");
                         }
                     };
                     let llty2 = match ty.sty {
-                        ty::TyRawPtr(ref mt) => ccx.layout_of(mt.ty).llvm_type(ccx),
+                        ty::TyRawPtr(ref mt) => cx.layout_of(mt.ty).llvm_type(cx),
                         _ => {
-                            ccx.sess().span_fatal(span, "must have type `*const T` or `*mut T`");
+                            cx.sess().span_fatal(span, "must have type `*const T` or `*mut T`");
                         }
                     };
                     unsafe {
                         // Declare a symbol `foo` with the desired linkage.
-                        let g1 = declare::declare_global(ccx, &sym, llty2);
+                        let g1 = declare::declare_global(cx, &sym, llty2);
                         llvm::LLVMRustSetLinkage(g1, base::linkage_to_llvm(linkage));
 
                         // Declare an internal global `extern_with_linkage_foo` which
@@ -176,8 +176,8 @@
                         // zero.
                         let mut real_name = "_rust_extern_with_linkage_".to_string();
                         real_name.push_str(&sym);
-                        let g2 = declare::define_global(ccx, &real_name, llty).unwrap_or_else(||{
-                            ccx.sess().span_fatal(span,
+                        let g2 = declare::define_global(cx, &real_name, llty).unwrap_or_else(||{
+                            cx.sess().span_fatal(span,
                                 &format!("symbol `{}` is already defined", &sym))
                         });
                         llvm::LLVMRustSetLinkage(g2, llvm::Linkage::InternalLinkage);
@@ -186,7 +186,7 @@
                     }
                 } else {
                     // Generate an external declaration.
-                    declare::declare_global(ccx, &sym, llty)
+                    declare::declare_global(cx, &sym, llty)
                 };
 
                 (g, attrs)
@@ -197,29 +197,29 @@
 
         for attr in attrs {
             if attr.check_name("thread_local") {
-                llvm::set_thread_local_mode(g, ccx.tls_model());
+                llvm::set_thread_local_mode(g, cx.tls_model);
             }
         }
 
         g
     } else {
-        let sym = ccx.tcx().symbol_name(instance);
+        let sym = cx.tcx.symbol_name(instance);
 
         // FIXME(nagisa): perhaps the map of externs could be offloaded to llvm somehow?
         // FIXME(nagisa): investigate whether it can be changed into define_global
-        let g = declare::declare_global(ccx, &sym, ccx.layout_of(ty).llvm_type(ccx));
+        let g = declare::declare_global(cx, &sym, cx.layout_of(ty).llvm_type(cx));
         // Thread-local statics in some other crate need to *always* be linked
         // against in a thread-local fashion, so we need to be sure to apply the
         // thread-local attribute locally if it was present remotely. If we
         // don't do this then linker errors can be generated where the linker
         // complains that one object files has a thread local version of the
         // symbol and another one doesn't.
-        for attr in ccx.tcx().get_attrs(def_id).iter() {
+        for attr in cx.tcx.get_attrs(def_id).iter() {
             if attr.check_name("thread_local") {
-                llvm::set_thread_local_mode(g, ccx.tls_model());
+                llvm::set_thread_local_mode(g, cx.tls_model);
             }
         }
-        if ccx.use_dll_storage_attrs() && !ccx.tcx().is_foreign_item(def_id) {
+        if cx.use_dll_storage_attrs && !cx.tcx.is_foreign_item(def_id) {
             // This item is external but not foreign, i.e. it originates from an external Rust
             // crate. Since we don't know whether this crate will be linked dynamically or
             // statically in the final application, we always mark such symbols as 'dllimport'.
@@ -232,42 +232,42 @@
         g
     };
 
-    if ccx.use_dll_storage_attrs() && ccx.tcx().is_dllimport_foreign_item(def_id) {
+    if cx.use_dll_storage_attrs && cx.tcx.is_dllimport_foreign_item(def_id) {
         // For foreign (native) libs we know the exact storage type to use.
         unsafe {
             llvm::LLVMSetDLLStorageClass(g, llvm::DLLStorageClass::DllImport);
         }
     }
 
-    ccx.instances().borrow_mut().insert(instance, g);
-    ccx.statics().borrow_mut().insert(g, def_id);
+    cx.instances.borrow_mut().insert(instance, g);
+    cx.statics.borrow_mut().insert(g, def_id);
     g
 }
 
-pub fn trans_static<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+pub fn trans_static<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
                               m: hir::Mutability,
                               id: ast::NodeId,
                               attrs: &[ast::Attribute])
                               -> Result<ValueRef, ConstEvalErr<'tcx>> {
     unsafe {
-        let def_id = ccx.tcx().hir.local_def_id(id);
-        let g = get_static(ccx, def_id);
+        let def_id = cx.tcx.hir.local_def_id(id);
+        let g = get_static(cx, def_id);
 
-        let v = ::mir::trans_static_initializer(ccx, def_id)?;
+        let v = ::mir::trans_static_initializer(cx, def_id)?;
 
         // boolean SSA values are i1, but they have to be stored in i8 slots,
         // otherwise some LLVM optimization passes don't work as expected
         let mut val_llty = val_ty(v);
-        let v = if val_llty == Type::i1(ccx) {
-            val_llty = Type::i8(ccx);
+        let v = if val_llty == Type::i1(cx) {
+            val_llty = Type::i8(cx);
             llvm::LLVMConstZExt(v, val_llty.to_ref())
         } else {
             v
         };
 
-        let instance = Instance::mono(ccx.tcx(), def_id);
-        let ty = instance.ty(ccx.tcx());
-        let llty = ccx.layout_of(ty).llvm_type(ccx);
+        let instance = Instance::mono(cx.tcx, def_id);
+        let ty = instance.ty(cx.tcx);
+        let llty = cx.layout_of(ty).llvm_type(cx);
         let g = if val_llty == llty {
             g
         } else {
@@ -282,7 +282,7 @@
             let visibility = llvm::LLVMRustGetVisibility(g);
 
             let new_g = llvm::LLVMRustGetOrInsertGlobal(
-                ccx.llmod(), name_string.as_ptr(), val_llty.to_ref());
+                cx.llmod, name_string.as_ptr(), val_llty.to_ref());
 
             llvm::LLVMRustSetLinkage(new_g, linkage);
             llvm::LLVMRustSetVisibility(new_g, visibility);
@@ -290,32 +290,32 @@
             // To avoid breaking any invariants, we leave around the old
             // global for the moment; we'll replace all references to it
             // with the new global later. (See base::trans_crate.)
-            ccx.statics_to_rauw().borrow_mut().push((g, new_g));
+            cx.statics_to_rauw.borrow_mut().push((g, new_g));
             new_g
         };
-        set_global_alignment(ccx, g, ccx.align_of(ty));
+        set_global_alignment(cx, g, cx.align_of(ty));
         llvm::LLVMSetInitializer(g, v);
 
         // As an optimization, all shared statics which do not have interior
         // mutability are placed into read-only memory.
         if m != hir::MutMutable {
-            if ccx.shared().type_is_freeze(ty) {
+            if cx.type_is_freeze(ty) {
                 llvm::LLVMSetGlobalConstant(g, llvm::True);
             }
         }
 
-        debuginfo::create_global_var_metadata(ccx, id, g);
+        debuginfo::create_global_var_metadata(cx, id, g);
 
         if attr::contains_name(attrs, "thread_local") {
-            llvm::set_thread_local_mode(g, ccx.tls_model());
+            llvm::set_thread_local_mode(g, cx.tls_model);
         }
 
-        base::set_link_section(ccx, g, attrs);
+        base::set_link_section(cx, g, attrs);
 
         if attr::contains_name(attrs, "used") {
             // This static will be stored in the llvm.used variable which is an array of i8*
-            let cast = llvm::LLVMConstPointerCast(g, Type::i8p(ccx).to_ref());
-            ccx.used_statics().borrow_mut().push(cast);
+            let cast = llvm::LLVMConstPointerCast(g, Type::i8p(cx).to_ref());
+            cx.used_statics.borrow_mut().push(cast);
         }
 
         Ok(g)
diff --git a/src/librustc_trans/context.rs b/src/librustc_trans/context.rs
index 3014963..06b8d9f 100644
--- a/src/librustc_trans/context.rs
+++ b/src/librustc_trans/context.rs
@@ -11,10 +11,9 @@
 use common;
 use llvm;
 use llvm::{ContextRef, ModuleRef, ValueRef};
-use rustc::dep_graph::{DepGraph, DepGraphSafe};
+use rustc::dep_graph::DepGraphSafe;
 use rustc::hir;
 use rustc::hir::def_id::DefId;
-use rustc::ich::StableHashingContext;
 use rustc::traits;
 use debuginfo;
 use callee;
@@ -28,7 +27,6 @@
 
 use rustc_data_structures::base_n;
 use rustc::mir::mono::Stats;
-use rustc_data_structures::stable_hasher::StableHashingContextProvider;
 use rustc::session::config::{self, NoDebugInfo};
 use rustc::session::Session;
 use rustc::ty::layout::{LayoutError, LayoutOf, Size, TyLayout};
@@ -41,38 +39,30 @@
 use std::iter;
 use std::str;
 use std::sync::Arc;
-use std::marker::PhantomData;
 use syntax::symbol::InternedString;
 use abi::Abi;
 
-/// The shared portion of a `CrateContext`.  There is one `SharedCrateContext`
-/// per crate.  The data here is shared between all compilation units of the
-/// crate, so it must not contain references to any LLVM data structures
-/// (aside from metadata-related ones).
-pub struct SharedCrateContext<'a, 'tcx: 'a> {
-    tcx: TyCtxt<'a, 'tcx, 'tcx>,
-    check_overflow: bool,
-    use_dll_storage_attrs: bool,
-    tls_model: llvm::ThreadLocalMode,
-}
+/// There is one `CodegenCx` per compilation unit. Each one has its own LLVM
+/// `ContextRef` so that several compilation units may be optimized in parallel.
+/// All other LLVM data structures in the `CodegenCx` are tied to that `ContextRef`.
+pub struct CodegenCx<'a, 'tcx: 'a> {
+    pub tcx: TyCtxt<'a, 'tcx, 'tcx>,
+    pub check_overflow: bool,
+    pub use_dll_storage_attrs: bool,
+    pub tls_model: llvm::ThreadLocalMode,
 
-/// The local portion of a `CrateContext`.  There is one `LocalCrateContext`
-/// per compilation unit.  Each one has its own LLVM `ContextRef` so that
-/// several compilation units may be optimized in parallel.  All other LLVM
-/// data structures in the `LocalCrateContext` are tied to that `ContextRef`.
-pub struct LocalCrateContext<'a, 'tcx: 'a> {
-    llmod: ModuleRef,
-    llcx: ContextRef,
-    stats: RefCell<Stats>,
-    codegen_unit: Arc<CodegenUnit<'tcx>>,
+    pub llmod: ModuleRef,
+    pub llcx: ContextRef,
+    pub stats: RefCell<Stats>,
+    pub codegen_unit: Arc<CodegenUnit<'tcx>>,
 
     /// Cache instances of monomorphic and polymorphic items
-    instances: RefCell<FxHashMap<Instance<'tcx>, ValueRef>>,
+    pub instances: RefCell<FxHashMap<Instance<'tcx>, ValueRef>>,
     /// Cache generated vtables
-    vtables: RefCell<FxHashMap<(Ty<'tcx>,
+    pub vtables: RefCell<FxHashMap<(Ty<'tcx>,
                                 Option<ty::PolyExistentialTraitRef<'tcx>>), ValueRef>>,
     /// Cache of constant strings,
-    const_cstr_cache: RefCell<FxHashMap<InternedString, ValueRef>>,
+    pub const_cstr_cache: RefCell<FxHashMap<InternedString, ValueRef>>,
 
     /// Reverse-direction for const ptrs cast from globals.
     /// Key is a ValueRef holding a *T,
@@ -82,72 +72,42 @@
     /// when we ptrcast, and we have to ptrcast during translation
     /// of a [T] const because we form a slice, a (*T,usize) pair, not
     /// a pointer to an LLVM array type. Similar for trait objects.
-    const_unsized: RefCell<FxHashMap<ValueRef, ValueRef>>,
+    pub const_unsized: RefCell<FxHashMap<ValueRef, ValueRef>>,
 
     /// Cache of emitted const globals (value -> global)
-    const_globals: RefCell<FxHashMap<ValueRef, ValueRef>>,
+    pub const_globals: RefCell<FxHashMap<ValueRef, ValueRef>>,
 
     /// Mapping from static definitions to their DefId's.
-    statics: RefCell<FxHashMap<ValueRef, DefId>>,
+    pub statics: RefCell<FxHashMap<ValueRef, DefId>>,
 
     /// List of globals for static variables which need to be passed to the
     /// LLVM function ReplaceAllUsesWith (RAUW) when translation is complete.
     /// (We have to make sure we don't invalidate any ValueRefs referring
     /// to constants.)
-    statics_to_rauw: RefCell<Vec<(ValueRef, ValueRef)>>,
+    pub statics_to_rauw: RefCell<Vec<(ValueRef, ValueRef)>>,
 
     /// Statics that will be placed in the llvm.used variable
     /// See http://llvm.org/docs/LangRef.html#the-llvm-used-global-variable for details
-    used_statics: RefCell<Vec<ValueRef>>,
+    pub used_statics: RefCell<Vec<ValueRef>>,
 
-    lltypes: RefCell<FxHashMap<(Ty<'tcx>, Option<usize>), Type>>,
-    scalar_lltypes: RefCell<FxHashMap<Ty<'tcx>, Type>>,
-    pointee_infos: RefCell<FxHashMap<(Ty<'tcx>, Size), Option<PointeeInfo>>>,
-    isize_ty: Type,
+    pub lltypes: RefCell<FxHashMap<(Ty<'tcx>, Option<usize>), Type>>,
+    pub scalar_lltypes: RefCell<FxHashMap<Ty<'tcx>, Type>>,
+    pub pointee_infos: RefCell<FxHashMap<(Ty<'tcx>, Size), Option<PointeeInfo>>>,
+    pub isize_ty: Type,
 
-    dbg_cx: Option<debuginfo::CrateDebugContext<'tcx>>,
+    pub dbg_cx: Option<debuginfo::CrateDebugContext<'tcx>>,
 
     eh_personality: Cell<Option<ValueRef>>,
     eh_unwind_resume: Cell<Option<ValueRef>>,
-    rust_try_fn: Cell<Option<ValueRef>>,
+    pub rust_try_fn: Cell<Option<ValueRef>>,
 
     intrinsics: RefCell<FxHashMap<&'static str, ValueRef>>,
 
     /// A counter that is used for generating local symbol names
     local_gen_sym_counter: Cell<usize>,
-
-    /// A placeholder so we can add lifetimes
-    placeholder: PhantomData<&'a ()>,
 }
 
-/// A CrateContext value binds together one LocalCrateContext with the
-/// SharedCrateContext. It exists as a convenience wrapper, so we don't have to
-/// pass around (SharedCrateContext, LocalCrateContext) tuples all over trans.
-pub struct CrateContext<'a, 'tcx: 'a> {
-    shared: &'a SharedCrateContext<'a, 'tcx>,
-    local_ccx: &'a LocalCrateContext<'a, 'tcx>,
-}
-
-impl<'a, 'tcx> CrateContext<'a, 'tcx> {
-    pub fn new(shared: &'a SharedCrateContext<'a, 'tcx>,
-               local_ccx: &'a LocalCrateContext<'a, 'tcx>)
-               -> Self {
-        CrateContext { shared, local_ccx }
-    }
-}
-
-impl<'a, 'tcx> DepGraphSafe for CrateContext<'a, 'tcx> {
-}
-
-impl<'a, 'tcx> DepGraphSafe for SharedCrateContext<'a, 'tcx> {
-}
-
-impl<'a, 'tcx> StableHashingContextProvider for SharedCrateContext<'a, 'tcx> {
-    type ContextType = StableHashingContext<'tcx>;
-
-    fn create_stable_hashing_context(&self) -> Self::ContextType {
-        self.tcx.create_stable_hashing_context()
-    }
+impl<'a, 'tcx> DepGraphSafe for CodegenCx<'a, 'tcx> {
 }
 
 pub fn get_reloc_model(sess: &Session) -> llvm::RelocMode {
@@ -252,8 +212,11 @@
     (llcx, llmod)
 }
 
-impl<'b, 'tcx> SharedCrateContext<'b, 'tcx> {
-    pub fn new(tcx: TyCtxt<'b, 'tcx, 'tcx>) -> SharedCrateContext<'b, 'tcx> {
+impl<'a, 'tcx> CodegenCx<'a, 'tcx> {
+    pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+               codegen_unit: Arc<CodegenUnit<'tcx>>,
+               llmod_id: &str)
+               -> CodegenCx<'a, 'tcx> {
         // An interesting part of Windows which MSVC forces our hand on (and
         // apparently MinGW didn't) is the usage of `dllimport` and `dllexport`
         // attributes in LLVM IR as well as native dependencies (in C these
@@ -303,78 +266,25 @@
 
         let tls_model = get_tls_model(&tcx.sess);
 
-        SharedCrateContext {
-            tcx,
-            check_overflow,
-            use_dll_storage_attrs,
-            tls_model,
-        }
-    }
-
-    pub fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool {
-        common::type_needs_drop(self.tcx, ty)
-    }
-
-    pub fn type_is_sized(&self, ty: Ty<'tcx>) -> bool {
-        common::type_is_sized(self.tcx, ty)
-    }
-
-    pub fn type_is_freeze(&self, ty: Ty<'tcx>) -> bool {
-        common::type_is_freeze(self.tcx, ty)
-    }
-
-    pub fn type_has_metadata(&self, ty: Ty<'tcx>) -> bool {
-        use syntax_pos::DUMMY_SP;
-        if ty.is_sized(self.tcx, ty::ParamEnv::empty(traits::Reveal::All), DUMMY_SP) {
-            return false;
-        }
-
-        let tail = self.tcx.struct_tail(ty);
-        match tail.sty {
-            ty::TyForeign(..) => false,
-            ty::TyStr | ty::TySlice(..) | ty::TyDynamic(..) => true,
-            _ => bug!("unexpected unsized tail: {:?}", tail.sty),
-        }
-    }
-
-    pub fn tcx(&self) -> TyCtxt<'b, 'tcx, 'tcx> {
-        self.tcx
-    }
-
-    pub fn sess<'a>(&'a self) -> &'a Session {
-        &self.tcx.sess
-    }
-
-    pub fn dep_graph<'a>(&'a self) -> &'a DepGraph {
-        &self.tcx.dep_graph
-    }
-
-    pub fn use_dll_storage_attrs(&self) -> bool {
-        self.use_dll_storage_attrs
-    }
-}
-
-impl<'a, 'tcx> LocalCrateContext<'a, 'tcx> {
-    pub fn new(shared: &SharedCrateContext<'a, 'tcx>,
-               codegen_unit: Arc<CodegenUnit<'tcx>>,
-               llmod_id: &str)
-               -> LocalCrateContext<'a, 'tcx> {
         unsafe {
-            let (llcx, llmod) = create_context_and_module(&shared.tcx.sess,
+            let (llcx, llmod) = create_context_and_module(&tcx.sess,
                                                           &llmod_id[..]);
 
-            let dbg_cx = if shared.tcx.sess.opts.debuginfo != NoDebugInfo {
+            let dbg_cx = if tcx.sess.opts.debuginfo != NoDebugInfo {
                 let dctx = debuginfo::CrateDebugContext::new(llmod);
-                debuginfo::metadata::compile_unit_metadata(shared,
+                debuginfo::metadata::compile_unit_metadata(tcx,
                                                            codegen_unit.name(),
-                                                           &dctx,
-                                                           shared.tcx.sess);
+                                                           &dctx);
                 Some(dctx)
             } else {
                 None
             };
 
-            let local_ccx = LocalCrateContext {
+            let mut cx = CodegenCx {
+                tcx,
+                check_overflow,
+                use_dll_storage_attrs,
+                tls_model,
                 llmod,
                 llcx,
                 stats: RefCell::new(Stats::default()),
@@ -397,41 +307,9 @@
                 rust_try_fn: Cell::new(None),
                 intrinsics: RefCell::new(FxHashMap()),
                 local_gen_sym_counter: Cell::new(0),
-                placeholder: PhantomData,
             };
-
-            let (isize_ty, mut local_ccx) = {
-                // Do a little dance to create a dummy CrateContext, so we can
-                // create some things in the LLVM module of this codegen unit
-                let mut local_ccxs = vec![local_ccx];
-                let isize_ty = {
-                    let dummy_ccx = LocalCrateContext::dummy_ccx(shared,
-                                                                 local_ccxs.as_mut_slice());
-                    Type::isize(&dummy_ccx)
-                };
-                (isize_ty, local_ccxs.pop().unwrap())
-            };
-
-            local_ccx.isize_ty = isize_ty;
-
-            local_ccx
-        }
-    }
-
-    /// Create a dummy `CrateContext` from `self` and  the provided
-    /// `SharedCrateContext`.  This is somewhat dangerous because `self` may
-    /// not be fully initialized.
-    ///
-    /// This is used in the `LocalCrateContext` constructor to allow calling
-    /// functions that expect a complete `CrateContext`, even before the local
-    /// portion is fully initialized and attached to the `SharedCrateContext`.
-    fn dummy_ccx(shared: &'a SharedCrateContext<'a, 'tcx>,
-                 local_ccxs: &'a [LocalCrateContext<'a, 'tcx>])
-                 -> CrateContext<'a, 'tcx> {
-        assert!(local_ccxs.len() == 1);
-        CrateContext {
-            shared,
-            local_ccx: &local_ccxs[0]
+            cx.isize_ty = Type::isize(&cx);
+            cx
         }
     }
 
@@ -440,25 +318,13 @@
     }
 }
 
-impl<'b, 'tcx> CrateContext<'b, 'tcx> {
-    pub fn shared(&self) -> &'b SharedCrateContext<'b, 'tcx> {
-        self.shared
-    }
-
-    fn local(&self) -> &'b LocalCrateContext<'b, 'tcx> {
-        self.local_ccx
-    }
-
-    pub fn tcx(&self) -> TyCtxt<'b, 'tcx, 'tcx> {
-        self.shared.tcx
-    }
-
+impl<'b, 'tcx> CodegenCx<'b, 'tcx> {
     pub fn sess<'a>(&'a self) -> &'a Session {
-        &self.shared.tcx.sess
+        &self.tcx.sess
     }
 
     pub fn get_intrinsic(&self, key: &str) -> ValueRef {
-        if let Some(v) = self.intrinsics().borrow().get(key).cloned() {
+        if let Some(v) = self.intrinsics.borrow().get(key).cloned() {
             return v;
         }
         match declare_intrinsic(self, key) {
@@ -467,106 +333,11 @@
         }
     }
 
-    pub fn llmod(&self) -> ModuleRef {
-        self.local().llmod
-    }
-
-    pub fn llcx(&self) -> ContextRef {
-        self.local().llcx
-    }
-
-    pub fn codegen_unit(&self) -> &CodegenUnit<'tcx> {
-        &self.local().codegen_unit
-    }
-
-    pub fn td(&self) -> llvm::TargetDataRef {
-        unsafe { llvm::LLVMRustGetModuleDataLayout(self.llmod()) }
-    }
-
-    pub fn instances<'a>(&'a self) -> &'a RefCell<FxHashMap<Instance<'tcx>, ValueRef>> {
-        &self.local().instances
-    }
-
-    pub fn vtables<'a>(&'a self)
-        -> &'a RefCell<FxHashMap<(Ty<'tcx>,
-                                  Option<ty::PolyExistentialTraitRef<'tcx>>), ValueRef>> {
-        &self.local().vtables
-    }
-
-    pub fn const_cstr_cache<'a>(&'a self) -> &'a RefCell<FxHashMap<InternedString, ValueRef>> {
-        &self.local().const_cstr_cache
-    }
-
-    pub fn const_unsized<'a>(&'a self) -> &'a RefCell<FxHashMap<ValueRef, ValueRef>> {
-        &self.local().const_unsized
-    }
-
-    pub fn const_globals<'a>(&'a self) -> &'a RefCell<FxHashMap<ValueRef, ValueRef>> {
-        &self.local().const_globals
-    }
-
-    pub fn statics<'a>(&'a self) -> &'a RefCell<FxHashMap<ValueRef, DefId>> {
-        &self.local().statics
-    }
-
-    pub fn statics_to_rauw<'a>(&'a self) -> &'a RefCell<Vec<(ValueRef, ValueRef)>> {
-        &self.local().statics_to_rauw
-    }
-
-    pub fn used_statics<'a>(&'a self) -> &'a RefCell<Vec<ValueRef>> {
-        &self.local().used_statics
-    }
-
-    pub fn lltypes<'a>(&'a self) -> &'a RefCell<FxHashMap<(Ty<'tcx>, Option<usize>), Type>> {
-        &self.local().lltypes
-    }
-
-    pub fn scalar_lltypes<'a>(&'a self) -> &'a RefCell<FxHashMap<Ty<'tcx>, Type>> {
-        &self.local().scalar_lltypes
-    }
-
-    pub fn pointee_infos<'a>(&'a self)
-                             -> &'a RefCell<FxHashMap<(Ty<'tcx>, Size), Option<PointeeInfo>>> {
-        &self.local().pointee_infos
-    }
-
-    pub fn stats<'a>(&'a self) -> &'a RefCell<Stats> {
-        &self.local().stats
-    }
-
-    pub fn isize_ty(&self) -> Type {
-        self.local().isize_ty
-    }
-
-    pub fn dbg_cx<'a>(&'a self) -> &'a Option<debuginfo::CrateDebugContext<'tcx>> {
-        &self.local().dbg_cx
-    }
-
-    pub fn rust_try_fn<'a>(&'a self) -> &'a Cell<Option<ValueRef>> {
-        &self.local().rust_try_fn
-    }
-
-    fn intrinsics<'a>(&'a self) -> &'a RefCell<FxHashMap<&'static str, ValueRef>> {
-        &self.local().intrinsics
-    }
-
-    pub fn check_overflow(&self) -> bool {
-        self.shared.check_overflow
-    }
-
-    pub fn use_dll_storage_attrs(&self) -> bool {
-        self.shared.use_dll_storage_attrs()
-    }
-
-    pub fn tls_model(&self) -> llvm::ThreadLocalMode {
-        self.shared.tls_model
-    }
-
     /// Generate a new symbol name with the given prefix. This symbol name must
     /// only be used for definitions with `internal` or `private` linkage.
     pub fn generate_local_symbol_name(&self, prefix: &str) -> String {
-        let idx = self.local().local_gen_sym_counter.get();
-        self.local().local_gen_sym_counter.set(idx + 1);
+        let idx = self.local_gen_sym_counter.get();
+        self.local_gen_sym_counter.set(idx + 1);
         // Include a '.' character, so there can be no accidental conflicts with
         // user defined names
         let mut name = String::with_capacity(prefix.len() + 6);
@@ -597,10 +368,10 @@
         // `rust_eh_personality` function, but rather we wired it up to the
         // CRT's custom personality function, which forces LLVM to consider
         // landing pads as "landing pads for SEH".
-        if let Some(llpersonality) = self.local().eh_personality.get() {
+        if let Some(llpersonality) = self.eh_personality.get() {
             return llpersonality
         }
-        let tcx = self.tcx();
+        let tcx = self.tcx;
         let llfn = match tcx.lang_items().eh_personality() {
             Some(def_id) if !base::wants_msvc_seh(self.sess()) => {
                 callee::resolve_and_get_fn(self, def_id, tcx.intern_substs(&[]))
@@ -615,7 +386,7 @@
                 declare::declare_cfn(self, name, fty)
             }
         };
-        self.local().eh_personality.set(Some(llfn));
+        self.eh_personality.set(Some(llfn));
         llfn
     }
 
@@ -623,12 +394,12 @@
     // otherwise declares it as an external function.
     pub fn eh_unwind_resume(&self) -> ValueRef {
         use attributes;
-        let unwresume = &self.local().eh_unwind_resume;
+        let unwresume = &self.eh_unwind_resume;
         if let Some(llfn) = unwresume.get() {
             return llfn;
         }
 
-        let tcx = self.tcx();
+        let tcx = self.tcx;
         assert!(self.sess().target.target.options.custom_unwind_resume);
         if let Some(def_id) = tcx.lang_items().eh_unwind_resume() {
             let llfn = callee::resolve_and_get_fn(self, def_id, tcx.intern_substs(&[]));
@@ -649,33 +420,47 @@
         unwresume.set(Some(llfn));
         llfn
     }
+
+    pub fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool {
+        common::type_needs_drop(self.tcx, ty)
+    }
+
+    pub fn type_is_sized(&self, ty: Ty<'tcx>) -> bool {
+        common::type_is_sized(self.tcx, ty)
+    }
+
+    pub fn type_is_freeze(&self, ty: Ty<'tcx>) -> bool {
+        common::type_is_freeze(self.tcx, ty)
+    }
+
+    pub fn type_has_metadata(&self, ty: Ty<'tcx>) -> bool {
+        use syntax_pos::DUMMY_SP;
+        if ty.is_sized(self.tcx, ty::ParamEnv::empty(traits::Reveal::All), DUMMY_SP) {
+            return false;
+        }
+
+        let tail = self.tcx.struct_tail(ty);
+        match tail.sty {
+            ty::TyForeign(..) => false,
+            ty::TyStr | ty::TySlice(..) | ty::TyDynamic(..) => true,
+            _ => bug!("unexpected unsized tail: {:?}", tail.sty),
+        }
+    }
 }
 
-impl<'a, 'tcx> ty::layout::HasDataLayout for &'a SharedCrateContext<'a, 'tcx> {
+impl<'a, 'tcx> ty::layout::HasDataLayout for &'a CodegenCx<'a, 'tcx> {
     fn data_layout(&self) -> &ty::layout::TargetDataLayout {
         &self.tcx.data_layout
     }
 }
 
-impl<'a, 'tcx> ty::layout::HasTyCtxt<'tcx> for &'a SharedCrateContext<'a, 'tcx> {
+impl<'a, 'tcx> ty::layout::HasTyCtxt<'tcx> for &'a CodegenCx<'a, 'tcx> {
     fn tcx<'b>(&'b self) -> TyCtxt<'b, 'tcx, 'tcx> {
         self.tcx
     }
 }
 
-impl<'a, 'tcx> ty::layout::HasDataLayout for &'a CrateContext<'a, 'tcx> {
-    fn data_layout(&self) -> &ty::layout::TargetDataLayout {
-        &self.shared.tcx.data_layout
-    }
-}
-
-impl<'a, 'tcx> ty::layout::HasTyCtxt<'tcx> for &'a CrateContext<'a, 'tcx> {
-    fn tcx<'b>(&'b self) -> TyCtxt<'b, 'tcx, 'tcx> {
-        self.shared.tcx
-    }
-}
-
-impl<'a, 'tcx> LayoutOf<Ty<'tcx>> for &'a SharedCrateContext<'a, 'tcx> {
+impl<'a, 'tcx> LayoutOf<Ty<'tcx>> for &'a CodegenCx<'a, 'tcx> {
     type TyLayout = TyLayout<'tcx>;
 
     fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
@@ -688,57 +473,48 @@
     }
 }
 
-impl<'a, 'tcx> LayoutOf<Ty<'tcx>> for &'a CrateContext<'a, 'tcx> {
-    type TyLayout = TyLayout<'tcx>;
-
-
-    fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
-        self.shared.layout_of(ty)
-    }
-}
-
 /// Declare any llvm intrinsics that you might need
-fn declare_intrinsic(ccx: &CrateContext, key: &str) -> Option<ValueRef> {
+fn declare_intrinsic(cx: &CodegenCx, key: &str) -> Option<ValueRef> {
     macro_rules! ifn {
         ($name:expr, fn() -> $ret:expr) => (
             if key == $name {
-                let f = declare::declare_cfn(ccx, $name, Type::func(&[], &$ret));
+                let f = declare::declare_cfn(cx, $name, Type::func(&[], &$ret));
                 llvm::SetUnnamedAddr(f, false);
-                ccx.intrinsics().borrow_mut().insert($name, f.clone());
+                cx.intrinsics.borrow_mut().insert($name, f.clone());
                 return Some(f);
             }
         );
         ($name:expr, fn(...) -> $ret:expr) => (
             if key == $name {
-                let f = declare::declare_cfn(ccx, $name, Type::variadic_func(&[], &$ret));
+                let f = declare::declare_cfn(cx, $name, Type::variadic_func(&[], &$ret));
                 llvm::SetUnnamedAddr(f, false);
-                ccx.intrinsics().borrow_mut().insert($name, f.clone());
+                cx.intrinsics.borrow_mut().insert($name, f.clone());
                 return Some(f);
             }
         );
         ($name:expr, fn($($arg:expr),*) -> $ret:expr) => (
             if key == $name {
-                let f = declare::declare_cfn(ccx, $name, Type::func(&[$($arg),*], &$ret));
+                let f = declare::declare_cfn(cx, $name, Type::func(&[$($arg),*], &$ret));
                 llvm::SetUnnamedAddr(f, false);
-                ccx.intrinsics().borrow_mut().insert($name, f.clone());
+                cx.intrinsics.borrow_mut().insert($name, f.clone());
                 return Some(f);
             }
         );
     }
     macro_rules! mk_struct {
-        ($($field_ty:expr),*) => (Type::struct_(ccx, &[$($field_ty),*], false))
+        ($($field_ty:expr),*) => (Type::struct_(cx, &[$($field_ty),*], false))
     }
 
-    let i8p = Type::i8p(ccx);
-    let void = Type::void(ccx);
-    let i1 = Type::i1(ccx);
-    let t_i8 = Type::i8(ccx);
-    let t_i16 = Type::i16(ccx);
-    let t_i32 = Type::i32(ccx);
-    let t_i64 = Type::i64(ccx);
-    let t_i128 = Type::i128(ccx);
-    let t_f32 = Type::f32(ccx);
-    let t_f64 = Type::f64(ccx);
+    let i8p = Type::i8p(cx);
+    let void = Type::void(cx);
+    let i1 = Type::i1(cx);
+    let t_i8 = Type::i8(cx);
+    let t_i16 = Type::i16(cx);
+    let t_i32 = Type::i32(cx);
+    let t_i64 = Type::i64(cx);
+    let t_i128 = Type::i128(cx);
+    let t_f32 = Type::f32(cx);
+    let t_f64 = Type::f64(cx);
 
     ifn!("llvm.memcpy.p0i8.p0i8.i16", fn(i8p, i8p, t_i16, t_i32, i1) -> void);
     ifn!("llvm.memcpy.p0i8.p0i8.i32", fn(i8p, i8p, t_i32, t_i32, i1) -> void);
@@ -870,9 +646,9 @@
     ifn!("llvm.assume", fn(i1) -> void);
     ifn!("llvm.prefetch", fn(i8p, t_i32, t_i32, t_i32) -> void);
 
-    if ccx.sess().opts.debuginfo != NoDebugInfo {
-        ifn!("llvm.dbg.declare", fn(Type::metadata(ccx), Type::metadata(ccx)) -> void);
-        ifn!("llvm.dbg.value", fn(Type::metadata(ccx), t_i64, Type::metadata(ccx)) -> void);
+    if cx.sess().opts.debuginfo != NoDebugInfo {
+        ifn!("llvm.dbg.declare", fn(Type::metadata(cx), Type::metadata(cx)) -> void);
+        ifn!("llvm.dbg.value", fn(Type::metadata(cx), t_i64, Type::metadata(cx)) -> void);
     }
     return None;
 }
diff --git a/src/librustc_trans/debuginfo/create_scope_map.rs b/src/librustc_trans/debuginfo/create_scope_map.rs
index 95f13ea..bddb3d9 100644
--- a/src/librustc_trans/debuginfo/create_scope_map.rs
+++ b/src/librustc_trans/debuginfo/create_scope_map.rs
@@ -14,7 +14,7 @@
 
 use llvm;
 use llvm::debuginfo::DIScope;
-use common::CrateContext;
+use common::CodegenCx;
 use rustc::mir::{Mir, VisibilityScope};
 
 use libc::c_uint;
@@ -44,7 +44,7 @@
 
 /// Produce DIScope DIEs for each MIR Scope which has variables defined in it.
 /// If debuginfo is disabled, the returned vector is empty.
-pub fn create_mir_scopes(ccx: &CrateContext, mir: &Mir, debug_context: &FunctionDebugContext)
+pub fn create_mir_scopes(cx: &CodegenCx, mir: &Mir, debug_context: &FunctionDebugContext)
     -> IndexVec<VisibilityScope, MirDebugScope> {
     let null_scope = MirDebugScope {
         scope_metadata: ptr::null_mut(),
@@ -71,13 +71,13 @@
     // Instantiate all scopes.
     for idx in 0..mir.visibility_scopes.len() {
         let scope = VisibilityScope::new(idx);
-        make_mir_scope(ccx, &mir, &has_variables, debug_context, scope, &mut scopes);
+        make_mir_scope(cx, &mir, &has_variables, debug_context, scope, &mut scopes);
     }
 
     scopes
 }
 
-fn make_mir_scope(ccx: &CrateContext,
+fn make_mir_scope(cx: &CodegenCx,
                   mir: &Mir,
                   has_variables: &BitVector,
                   debug_context: &FunctionDebugContextData,
@@ -89,11 +89,11 @@
 
     let scope_data = &mir.visibility_scopes[scope];
     let parent_scope = if let Some(parent) = scope_data.parent_scope {
-        make_mir_scope(ccx, mir, has_variables, debug_context, parent, scopes);
+        make_mir_scope(cx, mir, has_variables, debug_context, parent, scopes);
         scopes[parent]
     } else {
         // The root is the function itself.
-        let loc = span_start(ccx, mir.span);
+        let loc = span_start(cx, mir.span);
         scopes[scope] = MirDebugScope {
             scope_metadata: debug_context.fn_metadata,
             file_start_pos: loc.file.start_pos,
@@ -115,14 +115,14 @@
         }
     }
 
-    let loc = span_start(ccx, scope_data.span);
-    let file_metadata = file_metadata(ccx,
+    let loc = span_start(cx, scope_data.span);
+    let file_metadata = file_metadata(cx,
                                       &loc.file.name,
                                       debug_context.defining_crate);
 
     let scope_metadata = unsafe {
         llvm::LLVMRustDIBuilderCreateLexicalBlock(
-            DIB(ccx),
+            DIB(cx),
             parent_scope.scope_metadata,
             file_metadata,
             loc.line as c_uint,
diff --git a/src/librustc_trans/debuginfo/doc.rs b/src/librustc_trans/debuginfo/doc.rs
index 93dca03..cbecc0e 100644
--- a/src/librustc_trans/debuginfo/doc.rs
+++ b/src/librustc_trans/debuginfo/doc.rs
@@ -32,7 +32,7 @@
 //! The public API of the module is a set of functions that will insert the
 //! correct metadata into the LLVM IR when called with the right parameters.
 //! The module is thus driven from an outside client with functions like
-//! `debuginfo::create_local_var_metadata(bcx: block, local: &ast::local)`.
+//! `debuginfo::create_local_var_metadata(bx: block, local: &ast::local)`.
 //!
 //! Internally the module will try to reuse already created metadata by
 //! utilizing a cache. The way to get a shared metadata node when needed is
@@ -44,8 +44,8 @@
 //! that exact file path.
 //!
 //! All private state used by the module is stored within either the
-//! CrateDebugContext struct (owned by the CrateContext) or the
-//! FunctionDebugContext (owned by the MirContext).
+//! CrateDebugContext struct (owned by the CodegenCx) or the
+//! FunctionDebugContext (owned by the FunctionCx).
 //!
 //! This file consists of three conceptual sections:
 //! 1. The public interface of the module
diff --git a/src/librustc_trans/debuginfo/gdb.rs b/src/librustc_trans/debuginfo/gdb.rs
index 14d3fa4..03e7c63 100644
--- a/src/librustc_trans/debuginfo/gdb.rs
+++ b/src/librustc_trans/debuginfo/gdb.rs
@@ -12,7 +12,7 @@
 
 use llvm;
 
-use common::{C_bytes, CrateContext, C_i32};
+use common::{C_bytes, CodegenCx, C_i32};
 use builder::Builder;
 use declare;
 use type_::Type;
@@ -24,14 +24,14 @@
 
 /// Inserts a side-effect free instruction sequence that makes sure that the
 /// .debug_gdb_scripts global is referenced, so it isn't removed by the linker.
-pub fn insert_reference_to_gdb_debug_scripts_section_global(ccx: &CrateContext, builder: &Builder) {
-    if needs_gdb_debug_scripts_section(ccx) {
-        let gdb_debug_scripts_section_global = get_or_insert_gdb_debug_scripts_section_global(ccx);
+pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &Builder) {
+    if needs_gdb_debug_scripts_section(bx.cx) {
+        let gdb_debug_scripts_section = get_or_insert_gdb_debug_scripts_section_global(bx.cx);
         // Load just the first byte as that's all that's necessary to force
         // LLVM to keep around the reference to the global.
-        let indices = [C_i32(ccx, 0), C_i32(ccx, 0)];
-        let element = builder.inbounds_gep(gdb_debug_scripts_section_global, &indices);
-        let volative_load_instruction = builder.volatile_load(element);
+        let indices = [C_i32(bx.cx, 0), C_i32(bx.cx, 0)];
+        let element = bx.inbounds_gep(gdb_debug_scripts_section, &indices);
+        let volative_load_instruction = bx.volatile_load(element);
         unsafe {
             llvm::LLVMSetAlignment(volative_load_instruction, 1);
         }
@@ -40,13 +40,13 @@
 
 /// Allocates the global variable responsible for the .debug_gdb_scripts binary
 /// section.
-pub fn get_or_insert_gdb_debug_scripts_section_global(ccx: &CrateContext)
+pub fn get_or_insert_gdb_debug_scripts_section_global(cx: &CodegenCx)
                                                   -> llvm::ValueRef {
     let c_section_var_name = "__rustc_debug_gdb_scripts_section__\0";
     let section_var_name = &c_section_var_name[..c_section_var_name.len()-1];
 
     let section_var = unsafe {
-        llvm::LLVMGetNamedGlobal(ccx.llmod(),
+        llvm::LLVMGetNamedGlobal(cx.llmod,
                                  c_section_var_name.as_ptr() as *const _)
     };
 
@@ -55,15 +55,15 @@
         let section_contents = b"\x01gdb_load_rust_pretty_printers.py\0";
 
         unsafe {
-            let llvm_type = Type::array(&Type::i8(ccx),
+            let llvm_type = Type::array(&Type::i8(cx),
                                         section_contents.len() as u64);
 
-            let section_var = declare::define_global(ccx, section_var_name,
+            let section_var = declare::define_global(cx, section_var_name,
                                                      llvm_type).unwrap_or_else(||{
                 bug!("symbol `{}` is already defined", section_var_name)
             });
             llvm::LLVMSetSection(section_var, section_name.as_ptr() as *const _);
-            llvm::LLVMSetInitializer(section_var, C_bytes(ccx, section_contents));
+            llvm::LLVMSetInitializer(section_var, C_bytes(cx, section_contents));
             llvm::LLVMSetGlobalConstant(section_var, llvm::True);
             llvm::LLVMSetUnnamedAddr(section_var, llvm::True);
             llvm::LLVMRustSetLinkage(section_var, llvm::Linkage::LinkOnceODRLinkage);
@@ -77,13 +77,13 @@
     }
 }
 
-pub fn needs_gdb_debug_scripts_section(ccx: &CrateContext) -> bool {
+pub fn needs_gdb_debug_scripts_section(cx: &CodegenCx) -> bool {
     let omit_gdb_pretty_printer_section =
-        attr::contains_name(&ccx.tcx().hir.krate_attrs(),
+        attr::contains_name(&cx.tcx.hir.krate_attrs(),
                             "omit_gdb_pretty_printer_section");
 
     !omit_gdb_pretty_printer_section &&
-    !ccx.sess().target.target.options.is_like_osx &&
-    !ccx.sess().target.target.options.is_like_windows &&
-    ccx.sess().opts.debuginfo != NoDebugInfo
+    !cx.sess().target.target.options.is_like_osx &&
+    !cx.sess().target.target.options.is_like_windows &&
+    cx.sess().opts.debuginfo != NoDebugInfo
 }
diff --git a/src/librustc_trans/debuginfo/metadata.rs b/src/librustc_trans/debuginfo/metadata.rs
index 6f35565..62ba918 100644
--- a/src/librustc_trans/debuginfo/metadata.rs
+++ b/src/librustc_trans/debuginfo/metadata.rs
@@ -18,7 +18,6 @@
 use super::type_names::compute_debuginfo_type_name;
 use super::{CrateDebugContext};
 use abi;
-use context::SharedCrateContext;
 
 use llvm::{self, ValueRef};
 use llvm::debuginfo::{DIType, DIFile, DIScope, DIDescriptor,
@@ -30,10 +29,10 @@
 use rustc::ty::util::TypeIdHasher;
 use rustc::ich::Fingerprint;
 use rustc::ty::Instance;
-use common::CrateContext;
-use rustc::ty::{self, AdtKind, Ty};
+use common::CodegenCx;
+use rustc::ty::{self, AdtKind, Ty, TyCtxt};
 use rustc::ty::layout::{self, Align, LayoutOf, Size, TyLayout};
-use rustc::session::{Session, config};
+use rustc::session::config;
 use rustc::util::nodemap::FxHashMap;
 use rustc::util::common::path2cstr;
 
@@ -134,7 +133,7 @@
     // Get the UniqueTypeId for the given type. If the UniqueTypeId for the given
     // type has been requested before, this is just a table lookup. Otherwise an
     // ID will be generated and stored for later lookup.
-    fn get_unique_type_id_of_type<'a>(&mut self, cx: &CrateContext<'a, 'tcx>,
+    fn get_unique_type_id_of_type<'a>(&mut self, cx: &CodegenCx<'a, 'tcx>,
                                       type_: Ty<'tcx>) -> UniqueTypeId {
         // Let's see if we already have something in the cache
         match self.type_to_unique_id.get(&type_).cloned() {
@@ -144,7 +143,7 @@
 
         // The hasher we are using to generate the UniqueTypeId. We want
         // something that provides more than the 64 bits of the DefaultHasher.
-        let mut type_id_hasher = TypeIdHasher::<Fingerprint>::new(cx.tcx());
+        let mut type_id_hasher = TypeIdHasher::<Fingerprint>::new(cx.tcx);
         type_id_hasher.visit_ty(type_);
         let unique_type_id = type_id_hasher.finish().to_hex();
 
@@ -158,7 +157,7 @@
     // types of their own, so they need special handling. We still need a
     // UniqueTypeId for them, since to debuginfo they *are* real types.
     fn get_unique_type_id_of_enum_variant<'a>(&mut self,
-                                              cx: &CrateContext<'a, 'tcx>,
+                                              cx: &CodegenCx<'a, 'tcx>,
                                               enum_type: Ty<'tcx>,
                                               variant_name: &str)
                                               -> UniqueTypeId {
@@ -187,7 +186,7 @@
 }
 
 fn create_and_register_recursive_type_forward_declaration<'a, 'tcx>(
-    cx: &CrateContext<'a, 'tcx>,
+    cx: &CodegenCx<'a, 'tcx>,
     unfinished_type: Ty<'tcx>,
     unique_type_id: UniqueTypeId,
     metadata_stub: DICompositeType,
@@ -211,7 +210,7 @@
     // Finishes up the description of the type in question (mostly by providing
     // descriptions of the fields of the given type) and returns the final type
     // metadata.
-    fn finalize<'a>(&self, cx: &CrateContext<'a, 'tcx>) -> MetadataCreationResult {
+    fn finalize<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> MetadataCreationResult {
         match *self {
             FinalMetadata(metadata) => MetadataCreationResult::new(metadata, false),
             UnfinishedMetadata {
@@ -263,7 +262,7 @@
     )
 }
 
-fn fixed_vec_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+fn fixed_vec_metadata<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
                                 unique_type_id: UniqueTypeId,
                                 array_or_slice_type: Ty<'tcx>,
                                 element_type: Ty<'tcx>,
@@ -299,13 +298,13 @@
     return MetadataCreationResult::new(metadata, false);
 }
 
-fn vec_slice_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+fn vec_slice_metadata<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
                                 slice_ptr_type: Ty<'tcx>,
                                 element_type: Ty<'tcx>,
                                 unique_type_id: UniqueTypeId,
                                 span: Span)
                                 -> MetadataCreationResult {
-    let data_ptr_type = cx.tcx().mk_imm_ptr(element_type);
+    let data_ptr_type = cx.tcx.mk_imm_ptr(element_type);
 
     let data_ptr_metadata = type_metadata(cx, data_ptr_type, span);
 
@@ -314,7 +313,7 @@
     let slice_type_name = compute_debuginfo_type_name(cx, slice_ptr_type, true);
 
     let (pointer_size, pointer_align) = cx.size_and_align_of(data_ptr_type);
-    let (usize_size, usize_align) = cx.size_and_align_of(cx.tcx().types.usize);
+    let (usize_size, usize_align) = cx.size_and_align_of(cx.tcx.types.usize);
 
     let member_descriptions = [
         MemberDescription {
@@ -327,7 +326,7 @@
         },
         MemberDescription {
             name: "length".to_string(),
-            type_metadata: type_metadata(cx, cx.tcx().types.usize, span),
+            type_metadata: type_metadata(cx, cx.tcx.types.usize, span),
             offset: pointer_size,
             size: usize_size,
             align: usize_align,
@@ -348,13 +347,13 @@
     MetadataCreationResult::new(metadata, false)
 }
 
-fn subroutine_type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+fn subroutine_type_metadata<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
                                       unique_type_id: UniqueTypeId,
                                       signature: ty::PolyFnSig<'tcx>,
                                       span: Span)
                                       -> MetadataCreationResult
 {
-    let signature = cx.tcx().erase_late_bound_regions_and_normalize(&signature);
+    let signature = cx.tcx.erase_late_bound_regions_and_normalize(&signature);
 
     let mut signature_metadata: Vec<DIType> = Vec::with_capacity(signature.inputs().len() + 1);
 
@@ -387,7 +386,7 @@
 // trait_type should be the actual trait (e.g., Trait). Where the trait is part
 // of a DST struct, there is no trait_object_type and the results of this
 // function will be a little bit weird.
-fn trait_pointer_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+fn trait_pointer_metadata<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
                                     trait_type: Ty<'tcx>,
                                     trait_object_type: Option<Ty<'tcx>>,
                                     unique_type_id: UniqueTypeId)
@@ -416,7 +415,7 @@
 
     let file_metadata = unknown_file_metadata(cx);
 
-    let layout = cx.layout_of(cx.tcx().mk_mut_ptr(trait_type));
+    let layout = cx.layout_of(cx.tcx.mk_mut_ptr(trait_type));
 
     assert_eq!(abi::FAT_PTR_ADDR, 0);
     assert_eq!(abi::FAT_PTR_EXTRA, 1);
@@ -427,7 +426,7 @@
         MemberDescription {
             name: "pointer".to_string(),
             type_metadata: type_metadata(cx,
-                cx.tcx().mk_mut_ptr(cx.tcx().types.u8),
+                cx.tcx.mk_mut_ptr(cx.tcx.types.u8),
                 syntax_pos::DUMMY_SP),
             offset: layout.fields.offset(0),
             size: data_ptr_field.size,
@@ -454,7 +453,7 @@
                             syntax_pos::DUMMY_SP)
 }
 
-pub fn type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+pub fn type_metadata<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
                                t: Ty<'tcx>,
                                usage_site_span: Span)
                                -> DIType {
@@ -499,7 +498,7 @@
                 Ok(vec_slice_metadata(cx, t, typ, unique_type_id, usage_site_span))
             }
             ty::TyStr => {
-                Ok(vec_slice_metadata(cx, t, cx.tcx().types.u8, unique_type_id, usage_site_span))
+                Ok(vec_slice_metadata(cx, t, cx.tcx.types.u8, unique_type_id, usage_site_span))
             }
             ty::TyDynamic(..) => {
                 Ok(MetadataCreationResult::new(
@@ -539,7 +538,7 @@
             fixed_vec_metadata(cx, unique_type_id, t, typ, usage_site_span)
         }
         ty::TyStr => {
-            fixed_vec_metadata(cx, unique_type_id, t, cx.tcx().types.i8, usage_site_span)
+            fixed_vec_metadata(cx, unique_type_id, t, cx.tcx.types.i8, usage_site_span)
         }
         ty::TyDynamic(..) => {
             MetadataCreationResult::new(
@@ -567,7 +566,7 @@
         ty::TyFnDef(..) | ty::TyFnPtr(_) => {
             let fn_metadata = subroutine_type_metadata(cx,
                                                        unique_type_id,
-                                                       t.fn_sig(cx.tcx()),
+                                                       t.fn_sig(cx.tcx),
                                                        usage_site_span).metadata;
             match debug_context(cx).type_map
                                    .borrow()
@@ -581,7 +580,7 @@
 
         }
         ty::TyClosure(def_id, substs) => {
-            let upvar_tys : Vec<_> = substs.upvar_tys(def_id, cx.tcx()).collect();
+            let upvar_tys : Vec<_> = substs.upvar_tys(def_id, cx.tcx).collect();
             prepare_tuple_metadata(cx,
                                    t,
                                    &upvar_tys,
@@ -589,8 +588,8 @@
                                    usage_site_span).finalize(cx)
         }
         ty::TyGenerator(def_id, substs, _) => {
-            let upvar_tys : Vec<_> = substs.field_tys(def_id, cx.tcx()).map(|t| {
-                cx.tcx().fully_normalize_associated_types_in(&t)
+            let upvar_tys : Vec<_> = substs.field_tys(def_id, cx.tcx).map(|t| {
+                cx.tcx.fully_normalize_associated_types_in(&t)
             }).collect();
             prepare_tuple_metadata(cx,
                                    t,
@@ -674,7 +673,7 @@
     metadata
 }
 
-pub fn file_metadata(cx: &CrateContext,
+pub fn file_metadata(cx: &CodegenCx,
                      file_name: &FileName,
                      defining_crate: CrateNum) -> DIFile {
     debug!("file_metadata: file_name: {}, defining_crate: {}",
@@ -692,11 +691,11 @@
     file_metadata_raw(cx, &file_name.to_string(), &directory.to_string_lossy())
 }
 
-pub fn unknown_file_metadata(cx: &CrateContext) -> DIFile {
+pub fn unknown_file_metadata(cx: &CodegenCx) -> DIFile {
     file_metadata_raw(cx, "<unknown>", "")
 }
 
-fn file_metadata_raw(cx: &CrateContext,
+fn file_metadata_raw(cx: &CodegenCx,
                      file_name: &str,
                      directory: &str)
                      -> DIFile {
@@ -722,7 +721,7 @@
     file_metadata
 }
 
-fn basic_type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+fn basic_type_metadata<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
                                  t: Ty<'tcx>) -> DIType {
 
     debug!("basic_type_metadata: {:?}", t);
@@ -759,7 +758,7 @@
     return ty_metadata;
 }
 
-fn foreign_type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+fn foreign_type_metadata<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
                                    t: Ty<'tcx>,
                                    unique_type_id: UniqueTypeId) -> DIType {
     debug!("foreign_type_metadata: {:?}", t);
@@ -768,7 +767,7 @@
     create_struct_stub(cx, t, &name, unique_type_id, NO_SCOPE_METADATA)
 }
 
-fn pointer_type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+fn pointer_type_metadata<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
                                    pointer_type: Ty<'tcx>,
                                    pointee_type_metadata: DIType)
                                    -> DIType {
@@ -785,21 +784,20 @@
     }
 }
 
-pub fn compile_unit_metadata(scc: &SharedCrateContext,
+pub fn compile_unit_metadata(tcx: TyCtxt,
                              codegen_unit_name: &str,
-                             debug_context: &CrateDebugContext,
-                             sess: &Session)
+                             debug_context: &CrateDebugContext)
                              -> DIDescriptor {
-    let mut name_in_debuginfo = match sess.local_crate_source_file {
+    let mut name_in_debuginfo = match tcx.sess.local_crate_source_file {
         Some(ref path) => path.clone(),
-        None => PathBuf::from(&*scc.tcx().crate_name(LOCAL_CRATE).as_str()),
+        None => PathBuf::from(&*tcx.crate_name(LOCAL_CRATE).as_str()),
     };
 
     // The OSX linker has an idiosyncrasy where it will ignore some debuginfo
     // if multiple object files with the same DW_AT_name are linked together.
     // As a workaround we generate unique names for each object file. Those do
     // not correspond to an actual source file but that should be harmless.
-    if scc.sess().target.target.options.is_like_osx {
+    if tcx.sess.target.target.options.is_like_osx {
         name_in_debuginfo.push("@");
         name_in_debuginfo.push(codegen_unit_name);
     }
@@ -811,7 +809,7 @@
 
     let name_in_debuginfo = name_in_debuginfo.to_string_lossy().into_owned();
     let name_in_debuginfo = CString::new(name_in_debuginfo).unwrap();
-    let work_dir = CString::new(&sess.working_dir.0.to_string_lossy()[..]).unwrap();
+    let work_dir = CString::new(&tcx.sess.working_dir.0.to_string_lossy()[..]).unwrap();
     let producer = CString::new(producer).unwrap();
     let flags = "\0";
     let split_name = "\0";
@@ -825,20 +823,20 @@
             DW_LANG_RUST,
             file_metadata,
             producer.as_ptr(),
-            sess.opts.optimize != config::OptLevel::No,
+            tcx.sess.opts.optimize != config::OptLevel::No,
             flags.as_ptr() as *const _,
             0,
             split_name.as_ptr() as *const _);
 
-        if sess.opts.debugging_opts.profile {
+        if tcx.sess.opts.debugging_opts.profile {
             let cu_desc_metadata = llvm::LLVMRustMetadataAsValue(debug_context.llcontext,
                                                                  unit_metadata);
 
             let gcov_cu_info = [
                 path_to_mdstring(debug_context.llcontext,
-                                 &scc.tcx().output_filenames(LOCAL_CRATE).with_extension("gcno")),
+                                 &tcx.output_filenames(LOCAL_CRATE).with_extension("gcno")),
                 path_to_mdstring(debug_context.llcontext,
-                                 &scc.tcx().output_filenames(LOCAL_CRATE).with_extension("gcda")),
+                                 &tcx.output_filenames(LOCAL_CRATE).with_extension("gcda")),
                 cu_desc_metadata,
             ];
             let gcov_metadata = llvm::LLVMMDNodeInContext(debug_context.llcontext,
@@ -903,7 +901,7 @@
 }
 
 impl<'tcx> MemberDescriptionFactory<'tcx> {
-    fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>)
+    fn create_member_descriptions<'a>(&self, cx: &CodegenCx<'a, 'tcx>)
                                       -> Vec<MemberDescription> {
         match *self {
             StructMDF(ref this) => {
@@ -937,7 +935,7 @@
 }
 
 impl<'tcx> StructMemberDescriptionFactory<'tcx> {
-    fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>)
+    fn create_member_descriptions<'a>(&self, cx: &CodegenCx<'a, 'tcx>)
                                       -> Vec<MemberDescription> {
         let layout = cx.layout_of(self.ty);
         self.variant.fields.iter().enumerate().map(|(i, f)| {
@@ -961,7 +959,7 @@
 }
 
 
-fn prepare_struct_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+fn prepare_struct_metadata<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
                                      struct_type: Ty<'tcx>,
                                      unique_type_id: UniqueTypeId,
                                      span: Span)
@@ -1006,7 +1004,7 @@
 }
 
 impl<'tcx> TupleMemberDescriptionFactory<'tcx> {
-    fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>)
+    fn create_member_descriptions<'a>(&self, cx: &CodegenCx<'a, 'tcx>)
                                       -> Vec<MemberDescription> {
         let layout = cx.layout_of(self.ty);
         self.component_types.iter().enumerate().map(|(i, &component_type)| {
@@ -1023,7 +1021,7 @@
     }
 }
 
-fn prepare_tuple_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+fn prepare_tuple_metadata<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
                                     tuple_type: Ty<'tcx>,
                                     component_types: &[Ty<'tcx>],
                                     unique_type_id: UniqueTypeId,
@@ -1059,7 +1057,7 @@
 }
 
 impl<'tcx> UnionMemberDescriptionFactory<'tcx> {
-    fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>)
+    fn create_member_descriptions<'a>(&self, cx: &CodegenCx<'a, 'tcx>)
                                       -> Vec<MemberDescription> {
         self.variant.fields.iter().enumerate().map(|(i, f)| {
             let field = self.layout.field(cx, i);
@@ -1076,7 +1074,7 @@
     }
 }
 
-fn prepare_union_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+fn prepare_union_metadata<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
                                     union_type: Ty<'tcx>,
                                     unique_type_id: UniqueTypeId,
                                     span: Span)
@@ -1127,7 +1125,7 @@
 }
 
 impl<'tcx> EnumMemberDescriptionFactory<'tcx> {
-    fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>)
+    fn create_member_descriptions<'a>(&self, cx: &CodegenCx<'a, 'tcx>)
                                       -> Vec<MemberDescription> {
         let adt = &self.enum_type.ty_adt_def().unwrap();
         match self.layout.variants {
@@ -1212,7 +1210,7 @@
                 // of discriminant instead of us having to recover its path.
                 // Right now it's not even going to work for `niche_start > 0`,
                 // and for multiple niche variants it only supports the first.
-                fn compute_field_path<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+                fn compute_field_path<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
                                                 name: &mut String,
                                                 layout: TyLayout<'tcx>,
                                                 offset: Size,
@@ -1223,10 +1221,10 @@
                             continue;
                         }
                         let inner_offset = offset - field_offset;
-                        let field = layout.field(ccx, i);
+                        let field = layout.field(cx, i);
                         if inner_offset + size <= field.size {
                             write!(name, "{}$", i).unwrap();
-                            compute_field_path(ccx, name, field, inner_offset, size);
+                            compute_field_path(cx, name, field, inner_offset, size);
                         }
                     }
                 }
@@ -1262,7 +1260,7 @@
 }
 
 impl<'tcx> VariantMemberDescriptionFactory<'tcx> {
-    fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>)
+    fn create_member_descriptions<'a>(&self, cx: &CodegenCx<'a, 'tcx>)
                                       -> Vec<MemberDescription> {
         self.args.iter().enumerate().map(|(i, &(ref name, ty))| {
             let (size, align) = cx.size_and_align_of(ty);
@@ -1292,7 +1290,7 @@
 // of the variant, and (3) a MemberDescriptionFactory for producing the
 // descriptions of the fields of the variant. This is a rudimentary version of a
 // full RecursiveTypeDescription.
-fn describe_enum_variant<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+fn describe_enum_variant<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
                                    layout: layout::TyLayout<'tcx>,
                                    variant: &'tcx ty::VariantDef,
                                    discriminant_info: EnumDiscriminantInfo,
@@ -1352,7 +1350,7 @@
     (metadata_stub, member_description_factory)
 }
 
-fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+fn prepare_enum_metadata<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
                                    enum_type: Ty<'tcx>,
                                    enum_def_id: DefId,
                                    unique_type_id: UniqueTypeId,
@@ -1370,7 +1368,7 @@
     let file_metadata = unknown_file_metadata(cx);
 
     let def = enum_type.ty_adt_def().unwrap();
-    let enumerators_metadata: Vec<DIDescriptor> = def.discriminants(cx.tcx())
+    let enumerators_metadata: Vec<DIDescriptor> = def.discriminants(cx.tcx)
         .zip(&def.variants)
         .map(|(discr, v)| {
             let token = v.name.as_str();
@@ -1396,7 +1394,7 @@
                 let (discriminant_size, discriminant_align) =
                     (discr.size(cx), discr.align(cx));
                 let discriminant_base_type_metadata =
-                    type_metadata(cx, discr.to_ty(cx.tcx()), syntax_pos::DUMMY_SP);
+                    type_metadata(cx, discr.to_ty(cx.tcx), syntax_pos::DUMMY_SP);
                 let discriminant_name = get_enum_discriminant_name(cx, enum_def_id);
 
                 let name = CString::new(discriminant_name.as_bytes()).unwrap();
@@ -1472,10 +1470,10 @@
         }),
     );
 
-    fn get_enum_discriminant_name(cx: &CrateContext,
+    fn get_enum_discriminant_name(cx: &CodegenCx,
                                   def_id: DefId)
                                   -> InternedString {
-        cx.tcx().item_name(def_id)
+        cx.tcx.item_name(def_id)
     }
 }
 
@@ -1483,7 +1481,7 @@
 /// results in a LLVM struct.
 ///
 /// Examples of Rust types to use this are: structs, tuples, boxes, vecs, and enums.
-fn composite_type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+fn composite_type_metadata<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
                                      composite_type: Ty<'tcx>,
                                      composite_type_name: &str,
                                      composite_type_unique_id: UniqueTypeId,
@@ -1509,7 +1507,7 @@
     return composite_type_metadata;
 }
 
-fn set_members_of_composite_type(cx: &CrateContext,
+fn set_members_of_composite_type(cx: &CodegenCx,
                                  composite_type_metadata: DICompositeType,
                                  member_descriptions: &[MemberDescription]) {
     // In some rare cases LLVM metadata uniquing would lead to an existing type
@@ -1560,7 +1558,7 @@
 // A convenience wrapper around LLVMRustDIBuilderCreateStructType(). Does not do
 // any caching, does not add any fields to the struct. This can be done later
 // with set_members_of_composite_type().
-fn create_struct_stub<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+fn create_struct_stub<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
                                 struct_type: Ty<'tcx>,
                                 struct_type_name: &str,
                                 unique_type_id: UniqueTypeId,
@@ -1597,7 +1595,7 @@
     return metadata_stub;
 }
 
-fn create_union_stub<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+fn create_union_stub<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
                                union_type: Ty<'tcx>,
                                union_type_name: &str,
                                unique_type_id: UniqueTypeId,
@@ -1635,20 +1633,20 @@
 /// Creates debug information for the given global variable.
 ///
 /// Adds the created metadata nodes directly to the crate's IR.
-pub fn create_global_var_metadata(cx: &CrateContext,
+pub fn create_global_var_metadata(cx: &CodegenCx,
                                   node_id: ast::NodeId,
                                   global: ValueRef) {
-    if cx.dbg_cx().is_none() {
+    if cx.dbg_cx.is_none() {
         return;
     }
 
-    let tcx = cx.tcx();
+    let tcx = cx.tcx;
     let node_def_id = tcx.hir.local_def_id(node_id);
     let no_mangle = attr::contains_name(&tcx.get_attrs(node_def_id), "no_mangle");
     // We may want to remove the namespace scope if we're in an extern block, see:
     // https://github.com/rust-lang/rust/pull/46457#issuecomment-351750952
     let var_scope = get_namespace_for_item(cx, node_def_id);
-    let span = cx.tcx().def_span(node_def_id);
+    let span = cx.tcx.def_span(node_def_id);
 
     let (file_metadata, line_number) = if span != syntax_pos::DUMMY_SP {
         let loc = span_start(cx, span);
@@ -1658,7 +1656,7 @@
     };
 
     let is_local_to_unit = is_node_local_to_unit(cx, node_id);
-    let variable_type = Instance::mono(cx.tcx(), node_def_id).ty(cx.tcx());
+    let variable_type = Instance::mono(cx.tcx, node_def_id).ty(cx.tcx);
     let type_metadata = type_metadata(cx, variable_type, span);
     let var_name = tcx.item_name(node_def_id).to_string();
     let var_name = CString::new(var_name).unwrap();
@@ -1691,15 +1689,15 @@
 }
 
 // Creates an "extension" of an existing DIScope into another file.
-pub fn extend_scope_to_file(ccx: &CrateContext,
+pub fn extend_scope_to_file(cx: &CodegenCx,
                             scope_metadata: DIScope,
                             file: &syntax_pos::FileMap,
                             defining_crate: CrateNum)
                             -> DILexicalBlock {
-    let file_metadata = file_metadata(ccx, &file.name, defining_crate);
+    let file_metadata = file_metadata(cx, &file.name, defining_crate);
     unsafe {
         llvm::LLVMRustDIBuilderCreateLexicalBlockFile(
-            DIB(ccx),
+            DIB(cx),
             scope_metadata,
             file_metadata)
     }
@@ -1709,10 +1707,10 @@
 /// given type.
 ///
 /// Adds the created metadata nodes directly to the crate's IR.
-pub fn create_vtable_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+pub fn create_vtable_metadata<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
                                         ty: ty::Ty<'tcx>,
                                         vtable: ValueRef) {
-    if cx.dbg_cx().is_none() {
+    if cx.dbg_cx.is_none() {
         return;
     }
 
@@ -1736,7 +1734,7 @@
             unknown_file_metadata(cx),
             UNKNOWN_LINE_NUMBER,
             Size::from_bytes(0).bits(),
-            cx.tcx().data_layout.pointer_align.abi_bits() as u32,
+            cx.tcx.data_layout.pointer_align.abi_bits() as u32,
             DIFlags::FlagArtificial,
             ptr::null_mut(),
             empty_array,
diff --git a/src/librustc_trans/debuginfo/mod.rs b/src/librustc_trans/debuginfo/mod.rs
index 3f9ace1..b46e12d 100644
--- a/src/librustc_trans/debuginfo/mod.rs
+++ b/src/librustc_trans/debuginfo/mod.rs
@@ -27,7 +27,7 @@
 use rustc::ty::subst::Substs;
 
 use abi::Abi;
-use common::CrateContext;
+use common::CodegenCx;
 use builder::Builder;
 use monomorphize::Instance;
 use rustc::ty::{self, Ty};
@@ -150,8 +150,8 @@
 }
 
 /// Create any deferred debug metadata nodes
-pub fn finalize(cx: &CrateContext) {
-    if cx.dbg_cx().is_none() {
+pub fn finalize(cx: &CodegenCx) {
+    if cx.dbg_cx.is_none() {
         return;
     }
 
@@ -176,21 +176,21 @@
         // Android has the same issue (#22398)
         if cx.sess().target.target.options.is_like_osx ||
            cx.sess().target.target.options.is_like_android {
-            llvm::LLVMRustAddModuleFlag(cx.llmod(),
+            llvm::LLVMRustAddModuleFlag(cx.llmod,
                                         "Dwarf Version\0".as_ptr() as *const _,
                                         2)
         }
 
         // Indicate that we want CodeView debug information on MSVC
         if cx.sess().target.target.options.is_like_msvc {
-            llvm::LLVMRustAddModuleFlag(cx.llmod(),
+            llvm::LLVMRustAddModuleFlag(cx.llmod,
                                         "CodeView\0".as_ptr() as *const _,
                                         1)
         }
 
         // Prevent bitcode readers from deleting the debug info.
         let ptr = "Debug Info Version\0".as_ptr();
-        llvm::LLVMRustAddModuleFlag(cx.llmod(), ptr as *const _,
+        llvm::LLVMRustAddModuleFlag(cx.llmod, ptr as *const _,
                                     llvm::LLVMRustDebugMetadataVersion());
     };
 }
@@ -201,7 +201,7 @@
 /// for debug info creation. The function may also return another variant of the
 /// FunctionDebugContext enum which indicates why no debuginfo should be created
 /// for the function.
-pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+pub fn create_function_debug_context<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
                                                instance: Instance<'tcx>,
                                                sig: ty::FnSig<'tcx>,
                                                llfn: ValueRef,
@@ -210,7 +210,7 @@
         return FunctionDebugContext::DebugInfoDisabled;
     }
 
-    for attr in instance.def.attrs(cx.tcx()).iter() {
+    for attr in instance.def.attrs(cx.tcx).iter() {
         if attr.check_name("no_debug") {
             return FunctionDebugContext::FunctionWithoutDebugInfo;
         }
@@ -235,15 +235,15 @@
     };
 
     // Find the enclosing function, in case this is a closure.
-    let def_key = cx.tcx().def_key(def_id);
+    let def_key = cx.tcx.def_key(def_id);
     let mut name = def_key.disambiguated_data.data.to_string();
 
-    let enclosing_fn_def_id = cx.tcx().closure_base_def_id(def_id);
+    let enclosing_fn_def_id = cx.tcx.closure_base_def_id(def_id);
 
     // Get_template_parameters() will append a `<...>` clause to the function
     // name if necessary.
-    let generics = cx.tcx().generics_of(enclosing_fn_def_id);
-    let substs = instance.substs.truncate_to(cx.tcx(), generics);
+    let generics = cx.tcx.generics_of(enclosing_fn_def_id);
+    let substs = instance.substs.truncate_to(cx.tcx, generics);
     let template_parameters = get_template_parameters(cx,
                                                       &generics,
                                                       substs,
@@ -255,7 +255,7 @@
 
     let scope_line = span_start(cx, span).line;
 
-    let local_id = cx.tcx().hir.as_local_node_id(instance.def_id());
+    let local_id = cx.tcx.hir.as_local_node_id(instance.def_id());
     let is_local_to_unit = local_id.map_or(false, |id| is_node_local_to_unit(cx, id));
 
     let function_name = CString::new(name).unwrap();
@@ -299,7 +299,7 @@
 
     return FunctionDebugContext::RegularContext(fn_debug_context);
 
-    fn get_function_signature<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+    fn get_function_signature<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
                                         sig: ty::FnSig<'tcx>) -> DIArray {
         if cx.sess().opts.debuginfo == LimitedDebugInfo {
             return create_DIArray(DIB(cx), &[]);
@@ -334,8 +334,8 @@
             signature.extend(inputs.iter().map(|&t| {
                 let t = match t.sty {
                     ty::TyArray(ct, _)
-                        if (ct == cx.tcx().types.u8) || cx.layout_of(ct).is_zst() => {
-                        cx.tcx().mk_imm_ptr(ct)
+                        if (ct == cx.tcx.types.u8) || cx.layout_of(ct).is_zst() => {
+                        cx.tcx.mk_imm_ptr(ct)
                     }
                     _ => t
                 };
@@ -358,7 +358,7 @@
         return create_DIArray(DIB(cx), &signature[..]);
     }
 
-    fn get_template_parameters<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+    fn get_template_parameters<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
                                          generics: &ty::Generics,
                                          substs: &Substs<'tcx>,
                                          file_metadata: DIFile,
@@ -375,7 +375,7 @@
                 name_to_append_suffix_to.push_str(",");
             }
 
-            let actual_type = cx.tcx().fully_normalize_associated_types_in(&actual_type);
+            let actual_type = cx.tcx.fully_normalize_associated_types_in(&actual_type);
             // Add actual type name to <...> clause of function name
             let actual_type_name = compute_debuginfo_type_name(cx,
                                                                actual_type,
@@ -388,7 +388,7 @@
         let template_params: Vec<_> = if cx.sess().opts.debuginfo == FullDebugInfo {
             let names = get_type_parameter_names(cx, generics);
             substs.types().zip(names).map(|(ty, name)| {
-                let actual_type = cx.tcx().fully_normalize_associated_types_in(&ty);
+                let actual_type = cx.tcx.fully_normalize_associated_types_in(&ty);
                 let actual_type_metadata = type_metadata(cx, actual_type, syntax_pos::DUMMY_SP);
                 let name = CString::new(name.as_str().as_bytes()).unwrap();
                 unsafe {
@@ -409,24 +409,24 @@
         return create_DIArray(DIB(cx), &template_params[..]);
     }
 
-    fn get_type_parameter_names(cx: &CrateContext, generics: &ty::Generics) -> Vec<ast::Name> {
+    fn get_type_parameter_names(cx: &CodegenCx, generics: &ty::Generics) -> Vec<ast::Name> {
         let mut names = generics.parent.map_or(vec![], |def_id| {
-            get_type_parameter_names(cx, cx.tcx().generics_of(def_id))
+            get_type_parameter_names(cx, cx.tcx.generics_of(def_id))
         });
         names.extend(generics.types.iter().map(|param| param.name));
         names
     }
 
-    fn get_containing_scope<'ccx, 'tcx>(cx: &CrateContext<'ccx, 'tcx>,
+    fn get_containing_scope<'cx, 'tcx>(cx: &CodegenCx<'cx, 'tcx>,
                                         instance: Instance<'tcx>)
                                         -> DIScope {
         // First, let's see if this is a method within an inherent impl. Because
         // if yes, we want to make the result subroutine DIE a child of the
         // subroutine's self-type.
-        let self_type = cx.tcx().impl_of_method(instance.def_id()).and_then(|impl_def_id| {
+        let self_type = cx.tcx.impl_of_method(instance.def_id()).and_then(|impl_def_id| {
             // If the method does *not* belong to a trait, proceed
-            if cx.tcx().trait_id_of_impl(impl_def_id).is_none() {
-                let impl_self_ty = cx.tcx().trans_impl_self_ty(impl_def_id, instance.substs);
+            if cx.tcx.trait_id_of_impl(impl_def_id).is_none() {
+                let impl_self_ty = cx.tcx.trans_impl_self_ty(impl_def_id, instance.substs);
 
                 // Only "class" methods are generally understood by LLVM,
                 // so avoid methods on other types (e.g. `<*mut T>::null`).
@@ -446,7 +446,7 @@
         self_type.unwrap_or_else(|| {
             namespace::item_namespace(cx, DefId {
                 krate: instance.def_id().krate,
-                index: cx.tcx()
+                index: cx.tcx
                          .def_key(instance.def_id())
                          .parent
                          .expect("get_containing_scope: missing parent?")
@@ -455,7 +455,7 @@
     }
 }
 
-pub fn declare_local<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
+pub fn declare_local<'a, 'tcx>(bx: &Builder<'a, 'tcx>,
                                dbg_context: &FunctionDebugContext,
                                variable_name: ast::Name,
                                variable_type: Ty<'tcx>,
@@ -463,7 +463,7 @@
                                variable_access: VariableAccess,
                                variable_kind: VariableKind,
                                span: Span) {
-    let cx = bcx.ccx;
+    let cx = bx.cx;
 
     let file = span_start(cx, span).file;
     let file_metadata = file_metadata(cx,
@@ -499,10 +499,10 @@
                     align.abi() as u32,
                 )
             };
-            source_loc::set_debug_location(bcx,
+            source_loc::set_debug_location(bx,
                 InternalDebugLocation::new(scope_metadata, loc.line, loc.col.to_usize()));
             unsafe {
-                let debug_loc = llvm::LLVMGetCurrentDebugLocation(bcx.llbuilder);
+                let debug_loc = llvm::LLVMGetCurrentDebugLocation(bx.llbuilder);
                 let instr = llvm::LLVMRustDIBuilderInsertDeclareAtEnd(
                     DIB(cx),
                     alloca,
@@ -510,9 +510,9 @@
                     address_operations.as_ptr(),
                     address_operations.len() as c_uint,
                     debug_loc,
-                    bcx.llbb());
+                    bx.llbb());
 
-                llvm::LLVMSetInstDebugLocation(bcx.llbuilder, instr);
+                llvm::LLVMSetInstDebugLocation(bx.llbuilder, instr);
             }
         }
     }
@@ -520,7 +520,7 @@
     match variable_kind {
         ArgumentVariable(_) | CapturedVariable => {
             assert!(!dbg_context.get_ref(span).source_locations_enabled.get());
-            source_loc::set_debug_location(bcx, UnknownLocation);
+            source_loc::set_debug_location(bx, UnknownLocation);
         }
         _ => { /* nothing to do */ }
     }
diff --git a/src/librustc_trans/debuginfo/namespace.rs b/src/librustc_trans/debuginfo/namespace.rs
index 47e2b8c..46067a4 100644
--- a/src/librustc_trans/debuginfo/namespace.rs
+++ b/src/librustc_trans/debuginfo/namespace.rs
@@ -20,44 +20,44 @@
 use llvm::debuginfo::DIScope;
 use rustc::hir::def_id::DefId;
 use rustc::hir::map::DefPathData;
-use common::CrateContext;
+use common::CodegenCx;
 
 use std::ffi::CString;
 use std::ptr;
 
 pub fn mangled_name_of_instance<'a, 'tcx>(
-    ccx: &CrateContext<'a, 'tcx>,
+    cx: &CodegenCx<'a, 'tcx>,
     instance: Instance<'tcx>,
 ) -> ty::SymbolName {
-     let tcx = ccx.tcx();
+     let tcx = cx.tcx;
      tcx.symbol_name(instance)
 }
 
 pub fn mangled_name_of_item<'a, 'tcx>(
-    ccx: &CrateContext<'a, 'tcx>,
+    cx: &CodegenCx<'a, 'tcx>,
     node_id: ast::NodeId,
 ) -> ty::SymbolName {
-    let tcx = ccx.tcx();
+    let tcx = cx.tcx;
     let node_def_id = tcx.hir.local_def_id(node_id);
     let instance = Instance::mono(tcx, node_def_id);
     tcx.symbol_name(instance)
 }
 
-pub fn item_namespace(ccx: &CrateContext, def_id: DefId) -> DIScope {
-    if let Some(&scope) = debug_context(ccx).namespace_map.borrow().get(&def_id) {
+pub fn item_namespace(cx: &CodegenCx, def_id: DefId) -> DIScope {
+    if let Some(&scope) = debug_context(cx).namespace_map.borrow().get(&def_id) {
         return scope;
     }
 
-    let def_key = ccx.tcx().def_key(def_id);
+    let def_key = cx.tcx.def_key(def_id);
     let parent_scope = def_key.parent.map_or(ptr::null_mut(), |parent| {
-        item_namespace(ccx, DefId {
+        item_namespace(cx, DefId {
             krate: def_id.krate,
             index: parent
         })
     });
 
     let namespace_name = match def_key.disambiguated_data.data {
-        DefPathData::CrateRoot => ccx.tcx().crate_name(def_id.krate).as_str(),
+        DefPathData::CrateRoot => cx.tcx.crate_name(def_id.krate).as_str(),
         data => data.as_interned_str()
     };
 
@@ -65,13 +65,13 @@
 
     let scope = unsafe {
         llvm::LLVMRustDIBuilderCreateNameSpace(
-            DIB(ccx),
+            DIB(cx),
             parent_scope,
             namespace_name.as_ptr(),
-            unknown_file_metadata(ccx),
+            unknown_file_metadata(cx),
             UNKNOWN_LINE_NUMBER)
     };
 
-    debug_context(ccx).namespace_map.borrow_mut().insert(def_id, scope);
+    debug_context(cx).namespace_map.borrow_mut().insert(def_id, scope);
     scope
 }
diff --git a/src/librustc_trans/debuginfo/source_loc.rs b/src/librustc_trans/debuginfo/source_loc.rs
index 7393802..7440296 100644
--- a/src/librustc_trans/debuginfo/source_loc.rs
+++ b/src/librustc_trans/debuginfo/source_loc.rs
@@ -26,25 +26,25 @@
 ///
 /// Maps to a call to llvm::LLVMSetCurrentDebugLocation(...).
 pub fn set_source_location(
-    debug_context: &FunctionDebugContext, builder: &Builder, scope: DIScope, span: Span
+    debug_context: &FunctionDebugContext, bx: &Builder, scope: DIScope, span: Span
 ) {
     let function_debug_context = match *debug_context {
         FunctionDebugContext::DebugInfoDisabled => return,
         FunctionDebugContext::FunctionWithoutDebugInfo => {
-            set_debug_location(builder, UnknownLocation);
+            set_debug_location(bx, UnknownLocation);
             return;
         }
         FunctionDebugContext::RegularContext(ref data) => data
     };
 
     let dbg_loc = if function_debug_context.source_locations_enabled.get() {
-        debug!("set_source_location: {}", builder.sess().codemap().span_to_string(span));
-        let loc = span_start(builder.ccx, span);
+        debug!("set_source_location: {}", bx.sess().codemap().span_to_string(span));
+        let loc = span_start(bx.cx, span);
         InternalDebugLocation::new(scope, loc.line, loc.col.to_usize())
     } else {
         UnknownLocation
     };
-    set_debug_location(builder, dbg_loc);
+    set_debug_location(bx, dbg_loc);
 }
 
 /// Enables emitting source locations for the given functions.
@@ -79,7 +79,7 @@
     }
 }
 
-pub fn set_debug_location(builder: &Builder, debug_location: InternalDebugLocation) {
+pub fn set_debug_location(bx: &Builder, debug_location: InternalDebugLocation) {
     let metadata_node = match debug_location {
         KnownLocation { scope, line, .. } => {
             // Always set the column to zero like Clang and GCC
@@ -88,7 +88,7 @@
 
             unsafe {
                 llvm::LLVMRustDIBuilderCreateDebugLocation(
-                    debug_context(builder.ccx).llcontext,
+                    debug_context(bx.cx).llcontext,
                     line as c_uint,
                     col as c_uint,
                     scope,
@@ -102,6 +102,6 @@
     };
 
     unsafe {
-        llvm::LLVMSetCurrentDebugLocation(builder.llbuilder, metadata_node);
+        llvm::LLVMSetCurrentDebugLocation(bx.llbuilder, metadata_node);
     }
 }
diff --git a/src/librustc_trans/debuginfo/type_names.rs b/src/librustc_trans/debuginfo/type_names.rs
index 85467f5..0aec92b 100644
--- a/src/librustc_trans/debuginfo/type_names.rs
+++ b/src/librustc_trans/debuginfo/type_names.rs
@@ -10,7 +10,7 @@
 
 // Type Names for Debug Info.
 
-use common::CrateContext;
+use common::CodegenCx;
 use rustc::hir::def_id::DefId;
 use rustc::ty::subst::Substs;
 use rustc::ty::{self, Ty};
@@ -21,7 +21,7 @@
 // any caching, i.e. calling the function twice with the same type will also do
 // the work twice. The `qualified` parameter only affects the first level of the
 // type name, further levels (i.e. type parameters) are always fully qualified.
-pub fn compute_debuginfo_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+pub fn compute_debuginfo_type_name<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
                                              t: Ty<'tcx>,
                                              qualified: bool)
                                              -> String {
@@ -32,7 +32,7 @@
 
 // Pushes the name of the type as it should be stored in debuginfo on the
 // `output` String. See also compute_debuginfo_type_name().
-pub fn push_debuginfo_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+pub fn push_debuginfo_type_name<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
                                           t: Ty<'tcx>,
                                           qualified: bool,
                                           output: &mut String) {
@@ -117,14 +117,14 @@
         },
         ty::TyDynamic(ref trait_data, ..) => {
             if let Some(principal) = trait_data.principal() {
-                let principal = cx.tcx().erase_late_bound_regions_and_normalize(
+                let principal = cx.tcx.erase_late_bound_regions_and_normalize(
                     &principal);
                 push_item_name(cx, principal.def_id, false, output);
                 push_type_params(cx, principal.substs, output);
             }
         },
         ty::TyFnDef(..) | ty::TyFnPtr(_) => {
-            let sig = t.fn_sig(cx.tcx());
+            let sig = t.fn_sig(cx.tcx);
             if sig.unsafety() == hir::Unsafety::Unsafe {
                 output.push_str("unsafe ");
             }
@@ -138,7 +138,7 @@
 
             output.push_str("fn(");
 
-            let sig = cx.tcx().erase_late_bound_regions_and_normalize(&sig);
+            let sig = cx.tcx.erase_late_bound_regions_and_normalize(&sig);
             if !sig.inputs().is_empty() {
                 for &parameter_type in sig.inputs() {
                     push_debuginfo_type_name(cx, parameter_type, true, output);
@@ -179,18 +179,18 @@
         }
     }
 
-    fn push_item_name(cx: &CrateContext,
+    fn push_item_name(cx: &CodegenCx,
                       def_id: DefId,
                       qualified: bool,
                       output: &mut String) {
         if qualified {
-            output.push_str(&cx.tcx().crate_name(def_id.krate).as_str());
-            for path_element in cx.tcx().def_path(def_id).data {
+            output.push_str(&cx.tcx.crate_name(def_id.krate).as_str());
+            for path_element in cx.tcx.def_path(def_id).data {
                 output.push_str("::");
                 output.push_str(&path_element.data.as_interned_str());
             }
         } else {
-            output.push_str(&cx.tcx().item_name(def_id));
+            output.push_str(&cx.tcx.item_name(def_id));
         }
     }
 
@@ -199,7 +199,7 @@
     // reconstructed for items from non-local crates. For local crates, this
     // would be possible but with inlining and LTO we have to use the least
     // common denominator - otherwise we would run into conflicts.
-    fn push_type_params<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+    fn push_type_params<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
                                   substs: &Substs<'tcx>,
                                   output: &mut String) {
         if substs.types().next().is_none() {
diff --git a/src/librustc_trans/debuginfo/utils.rs b/src/librustc_trans/debuginfo/utils.rs
index 95427d9..c571b84 100644
--- a/src/librustc_trans/debuginfo/utils.rs
+++ b/src/librustc_trans/debuginfo/utils.rs
@@ -18,12 +18,12 @@
 
 use llvm;
 use llvm::debuginfo::{DIScope, DIBuilderRef, DIDescriptor, DIArray};
-use common::{CrateContext};
+use common::{CodegenCx};
 
 use syntax_pos::{self, Span};
 use syntax::ast;
 
-pub fn is_node_local_to_unit(cx: &CrateContext, node_id: ast::NodeId) -> bool
+pub fn is_node_local_to_unit(cx: &CodegenCx, node_id: ast::NodeId) -> bool
 {
     // The is_local_to_unit flag indicates whether a function is local to the
     // current compilation unit (i.e. if it is *static* in the C-sense). The
@@ -33,8 +33,8 @@
     // visible). It might better to use the `exported_items` set from
     // `driver::CrateAnalysis` in the future, but (atm) this set is not
     // available in the translation pass.
-    let def_id = cx.tcx().hir.local_def_id(node_id);
-    !cx.tcx().is_exported_symbol(def_id)
+    let def_id = cx.tcx.hir.local_def_id(node_id);
+    !cx.tcx.is_exported_symbol(def_id)
 }
 
 #[allow(non_snake_case)]
@@ -45,23 +45,23 @@
 }
 
 /// Return syntax_pos::Loc corresponding to the beginning of the span
-pub fn span_start(cx: &CrateContext, span: Span) -> syntax_pos::Loc {
+pub fn span_start(cx: &CodegenCx, span: Span) -> syntax_pos::Loc {
     cx.sess().codemap().lookup_char_pos(span.lo())
 }
 
 #[inline]
-pub fn debug_context<'a, 'tcx>(cx: &'a CrateContext<'a, 'tcx>)
+pub fn debug_context<'a, 'tcx>(cx: &'a CodegenCx<'a, 'tcx>)
                            -> &'a CrateDebugContext<'tcx> {
-    cx.dbg_cx().as_ref().unwrap()
+    cx.dbg_cx.as_ref().unwrap()
 }
 
 #[inline]
 #[allow(non_snake_case)]
-pub fn DIB(cx: &CrateContext) -> DIBuilderRef {
-    cx.dbg_cx().as_ref().unwrap().builder
+pub fn DIB(cx: &CodegenCx) -> DIBuilderRef {
+    cx.dbg_cx.as_ref().unwrap().builder
 }
 
-pub fn get_namespace_for_item(cx: &CrateContext, def_id: DefId) -> DIScope {
-    item_namespace(cx, cx.tcx().parent(def_id)
+pub fn get_namespace_for_item(cx: &CodegenCx, def_id: DefId) -> DIScope {
+    item_namespace(cx, cx.tcx.parent(def_id)
         .expect("get_namespace_for_item: missing parent?"))
 }
diff --git a/src/librustc_trans/declare.rs b/src/librustc_trans/declare.rs
index f894bdf..aa1cd0c 100644
--- a/src/librustc_trans/declare.rs
+++ b/src/librustc_trans/declare.rs
@@ -27,7 +27,7 @@
 use rustc_back::PanicStrategy;
 use abi::{Abi, FnType};
 use attributes;
-use context::CrateContext;
+use context::CodegenCx;
 use common;
 use type_::Type;
 use value::Value;
@@ -39,13 +39,13 @@
 ///
 /// If there’s a value with the same name already declared, the function will
 /// return its ValueRef instead.
-pub fn declare_global(ccx: &CrateContext, name: &str, ty: Type) -> llvm::ValueRef {
+pub fn declare_global(cx: &CodegenCx, name: &str, ty: Type) -> llvm::ValueRef {
     debug!("declare_global(name={:?})", name);
     let namebuf = CString::new(name).unwrap_or_else(|_|{
         bug!("name {:?} contains an interior null byte", name)
     });
     unsafe {
-        llvm::LLVMRustGetOrInsertGlobal(ccx.llmod(), namebuf.as_ptr(), ty.to_ref())
+        llvm::LLVMRustGetOrInsertGlobal(cx.llmod, namebuf.as_ptr(), ty.to_ref())
     }
 }
 
@@ -54,13 +54,13 @@
 ///
 /// If there’s a value with the same name already declared, the function will
 /// update the declaration and return existing ValueRef instead.
-fn declare_raw_fn(ccx: &CrateContext, name: &str, callconv: llvm::CallConv, ty: Type) -> ValueRef {
+fn declare_raw_fn(cx: &CodegenCx, name: &str, callconv: llvm::CallConv, ty: Type) -> ValueRef {
     debug!("declare_raw_fn(name={:?}, ty={:?})", name, ty);
     let namebuf = CString::new(name).unwrap_or_else(|_|{
         bug!("name {:?} contains an interior null byte", name)
     });
     let llfn = unsafe {
-        llvm::LLVMRustGetOrInsertFunction(ccx.llmod(), namebuf.as_ptr(), ty.to_ref())
+        llvm::LLVMRustGetOrInsertFunction(cx.llmod, namebuf.as_ptr(), ty.to_ref())
     };
 
     llvm::SetFunctionCallConv(llfn, callconv);
@@ -68,12 +68,12 @@
     // be merged.
     llvm::SetUnnamedAddr(llfn, true);
 
-    if ccx.tcx().sess.opts.cg.no_redzone
-        .unwrap_or(ccx.tcx().sess.target.target.options.disable_redzone) {
+    if cx.tcx.sess.opts.cg.no_redzone
+        .unwrap_or(cx.tcx.sess.target.target.options.disable_redzone) {
         llvm::Attribute::NoRedZone.apply_llfn(Function, llfn);
     }
 
-    if let Some(ref sanitizer) = ccx.tcx().sess.opts.debugging_opts.sanitizer {
+    if let Some(ref sanitizer) = cx.tcx.sess.opts.debugging_opts.sanitizer {
         match *sanitizer {
             Sanitizer::Address => {
                 llvm::Attribute::SanitizeAddress.apply_llfn(Function, llfn);
@@ -88,7 +88,7 @@
         }
     }
 
-    match ccx.tcx().sess.opts.cg.opt_level.as_ref().map(String::as_ref) {
+    match cx.tcx.sess.opts.cg.opt_level.as_ref().map(String::as_ref) {
         Some("s") => {
             llvm::Attribute::OptimizeForSize.apply_llfn(Function, llfn);
         },
@@ -99,7 +99,7 @@
         _ => {},
     }
 
-    if ccx.tcx().sess.panic_strategy() != PanicStrategy::Unwind {
+    if cx.tcx.sess.panic_strategy() != PanicStrategy::Unwind {
         attributes::unwind(llfn, false);
     }
 
@@ -114,8 +114,8 @@
 ///
 /// If there’s a value with the same name already declared, the function will
 /// update the declaration and return existing ValueRef instead.
-pub fn declare_cfn(ccx: &CrateContext, name: &str, fn_type: Type) -> ValueRef {
-    declare_raw_fn(ccx, name, llvm::CCallConv, fn_type)
+pub fn declare_cfn(cx: &CodegenCx, name: &str, fn_type: Type) -> ValueRef {
+    declare_raw_fn(cx, name, llvm::CCallConv, fn_type)
 }
 
 
@@ -123,15 +123,15 @@
 ///
 /// If there’s a value with the same name already declared, the function will
 /// update the declaration and return existing ValueRef instead.
-pub fn declare_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, name: &str,
+pub fn declare_fn<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, name: &str,
                             fn_type: Ty<'tcx>) -> ValueRef {
     debug!("declare_rust_fn(name={:?}, fn_type={:?})", name, fn_type);
-    let sig = common::ty_fn_sig(ccx, fn_type);
-    let sig = ccx.tcx().erase_late_bound_regions_and_normalize(&sig);
+    let sig = common::ty_fn_sig(cx, fn_type);
+    let sig = cx.tcx.erase_late_bound_regions_and_normalize(&sig);
     debug!("declare_rust_fn (after region erasure) sig={:?}", sig);
 
-    let fty = FnType::new(ccx, sig, &[]);
-    let llfn = declare_raw_fn(ccx, name, fty.cconv, fty.llvm_type(ccx));
+    let fty = FnType::new(cx, sig, &[]);
+    let llfn = declare_raw_fn(cx, name, fty.cconv, fty.llvm_type(cx));
 
     // FIXME(canndrew): This is_never should really be an is_uninhabited
     if sig.output().is_never() {
@@ -154,11 +154,11 @@
 /// return None if the name already has a definition associated with it. In that
 /// case an error should be reported to the user, because it usually happens due
 /// to user’s fault (e.g. misuse of #[no_mangle] or #[export_name] attributes).
-pub fn define_global(ccx: &CrateContext, name: &str, ty: Type) -> Option<ValueRef> {
-    if get_defined_value(ccx, name).is_some() {
+pub fn define_global(cx: &CodegenCx, name: &str, ty: Type) -> Option<ValueRef> {
+    if get_defined_value(cx, name).is_some() {
         None
     } else {
-        Some(declare_global(ccx, name, ty))
+        Some(declare_global(cx, name, ty))
     }
 }
 
@@ -167,13 +167,13 @@
 /// Use this function when you intend to define a function. This function will
 /// return panic if the name already has a definition associated with it. This
 /// can happen with #[no_mangle] or #[export_name], for example.
-pub fn define_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+pub fn define_fn<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
                            name: &str,
                            fn_type: Ty<'tcx>) -> ValueRef {
-    if get_defined_value(ccx, name).is_some() {
-        ccx.sess().fatal(&format!("symbol `{}` already defined", name))
+    if get_defined_value(cx, name).is_some() {
+        cx.sess().fatal(&format!("symbol `{}` already defined", name))
     } else {
-        declare_fn(ccx, name, fn_type)
+        declare_fn(cx, name, fn_type)
     }
 }
 
@@ -182,22 +182,22 @@
 /// Use this function when you intend to define a function. This function will
 /// return panic if the name already has a definition associated with it. This
 /// can happen with #[no_mangle] or #[export_name], for example.
-pub fn define_internal_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+pub fn define_internal_fn<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
                                     name: &str,
                                     fn_type: Ty<'tcx>) -> ValueRef {
-    let llfn = define_fn(ccx, name, fn_type);
+    let llfn = define_fn(cx, name, fn_type);
     unsafe { llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::InternalLinkage) };
     llfn
 }
 
 
 /// Get declared value by name.
-pub fn get_declared_value(ccx: &CrateContext, name: &str) -> Option<ValueRef> {
+pub fn get_declared_value(cx: &CodegenCx, name: &str) -> Option<ValueRef> {
     debug!("get_declared_value(name={:?})", name);
     let namebuf = CString::new(name).unwrap_or_else(|_|{
         bug!("name {:?} contains an interior null byte", name)
     });
-    let val = unsafe { llvm::LLVMRustGetNamedValue(ccx.llmod(), namebuf.as_ptr()) };
+    let val = unsafe { llvm::LLVMRustGetNamedValue(cx.llmod, namebuf.as_ptr()) };
     if val.is_null() {
         debug!("get_declared_value: {:?} value is null", name);
         None
@@ -209,8 +209,8 @@
 
 /// Get defined or externally defined (AvailableExternally linkage) value by
 /// name.
-pub fn get_defined_value(ccx: &CrateContext, name: &str) -> Option<ValueRef> {
-    get_declared_value(ccx, name).and_then(|val|{
+pub fn get_defined_value(cx: &CodegenCx, name: &str) -> Option<ValueRef> {
+    get_declared_value(cx, name).and_then(|val|{
         let declaration = unsafe {
             llvm::LLVMIsDeclaration(val) != 0
         };
diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs
index 9477adc..c7275d0 100644
--- a/src/librustc_trans/glue.rs
+++ b/src/librustc_trans/glue.rs
@@ -23,39 +23,39 @@
 use rustc::ty::{self, Ty};
 use value::Value;
 
-pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, info: ValueRef)
+pub fn size_and_align_of_dst<'a, 'tcx>(bx: &Builder<'a, 'tcx>, t: Ty<'tcx>, info: ValueRef)
                                        -> (ValueRef, ValueRef) {
     debug!("calculate size of DST: {}; with lost info: {:?}",
            t, Value(info));
-    if bcx.ccx.shared().type_is_sized(t) {
-        let (size, align) = bcx.ccx.size_and_align_of(t);
+    if bx.cx.type_is_sized(t) {
+        let (size, align) = bx.cx.size_and_align_of(t);
         debug!("size_and_align_of_dst t={} info={:?} size: {:?} align: {:?}",
                t, Value(info), size, align);
-        let size = C_usize(bcx.ccx, size.bytes());
-        let align = C_usize(bcx.ccx, align.abi());
+        let size = C_usize(bx.cx, size.bytes());
+        let align = C_usize(bx.cx, align.abi());
         return (size, align);
     }
     assert!(!info.is_null());
     match t.sty {
         ty::TyDynamic(..) => {
             // load size/align from vtable
-            (meth::SIZE.get_usize(bcx, info), meth::ALIGN.get_usize(bcx, info))
+            (meth::SIZE.get_usize(bx, info), meth::ALIGN.get_usize(bx, info))
         }
         ty::TySlice(_) | ty::TyStr => {
-            let unit = t.sequence_element_type(bcx.tcx());
+            let unit = t.sequence_element_type(bx.tcx());
             // The info in this case is the length of the str, so the size is that
             // times the unit size.
-            let (size, align) = bcx.ccx.size_and_align_of(unit);
-            (bcx.mul(info, C_usize(bcx.ccx, size.bytes())),
-             C_usize(bcx.ccx, align.abi()))
+            let (size, align) = bx.cx.size_and_align_of(unit);
+            (bx.mul(info, C_usize(bx.cx, size.bytes())),
+             C_usize(bx.cx, align.abi()))
         }
         _ => {
-            let ccx = bcx.ccx;
+            let cx = bx.cx;
             // First get the size of all statically known fields.
             // Don't use size_of because it also rounds up to alignment, which we
             // want to avoid, as the unsized field's alignment could be smaller.
             assert!(!t.is_simd());
-            let layout = ccx.layout_of(t);
+            let layout = cx.layout_of(t);
             debug!("DST {} layout: {:?}", t, layout);
 
             let i = layout.fields.count() - 1;
@@ -63,13 +63,13 @@
             let sized_align = layout.align.abi();
             debug!("DST {} statically sized prefix size: {} align: {}",
                    t, sized_size, sized_align);
-            let sized_size = C_usize(ccx, sized_size);
-            let sized_align = C_usize(ccx, sized_align);
+            let sized_size = C_usize(cx, sized_size);
+            let sized_align = C_usize(cx, sized_align);
 
             // Recurse to get the size of the dynamically sized field (must be
             // the last field).
-            let field_ty = layout.field(ccx, i).ty;
-            let (unsized_size, mut unsized_align) = size_and_align_of_dst(bcx, field_ty, info);
+            let field_ty = layout.field(cx, i).ty;
+            let (unsized_size, mut unsized_align) = size_and_align_of_dst(bx, field_ty, info);
 
             // FIXME (#26403, #27023): We should be adding padding
             // to `sized_size` (to accommodate the `unsized_align`
@@ -79,7 +79,7 @@
             // here. But this is where the add would go.)
 
             // Return the sum of sizes and max of aligns.
-            let size = bcx.add(sized_size, unsized_size);
+            let size = bx.add(sized_size, unsized_size);
 
             // Packed types ignore the alignment of their fields.
             if let ty::TyAdt(def, _) = t.sty {
@@ -95,9 +95,9 @@
                 (Some(sized_align), Some(unsized_align)) => {
                     // If both alignments are constant, (the sized_align should always be), then
                     // pick the correct alignment statically.
-                    C_usize(ccx, std::cmp::max(sized_align, unsized_align) as u64)
+                    C_usize(cx, std::cmp::max(sized_align, unsized_align) as u64)
                 }
-                _ => bcx.select(bcx.icmp(llvm::IntUGT, sized_align, unsized_align),
+                _ => bx.select(bx.icmp(llvm::IntUGT, sized_align, unsized_align),
                                 sized_align,
                                 unsized_align)
             };
@@ -113,8 +113,8 @@
             //
             //   `(size + (align-1)) & -align`
 
-            let addend = bcx.sub(align, C_usize(bcx.ccx, 1));
-            let size = bcx.and(bcx.add(size, addend), bcx.neg(align));
+            let addend = bx.sub(align, C_usize(bx.cx, 1));
+            let size = bx.and(bx.add(size, addend), bx.neg(align));
 
             (size, align)
         }
diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs
index 23f7d47..b1f1fb5 100644
--- a/src/librustc_trans/intrinsic.rs
+++ b/src/librustc_trans/intrinsic.rs
@@ -35,7 +35,7 @@
 use std::cmp::Ordering;
 use std::iter;
 
-fn get_simple_intrinsic(ccx: &CrateContext, name: &str) -> Option<ValueRef> {
+fn get_simple_intrinsic(cx: &CodegenCx, name: &str) -> Option<ValueRef> {
     let llvm_name = match name {
         "sqrtf32" => "llvm.sqrt.f32",
         "sqrtf64" => "llvm.sqrt.f64",
@@ -79,20 +79,20 @@
         "abort" => "llvm.trap",
         _ => return None
     };
-    Some(ccx.get_intrinsic(&llvm_name))
+    Some(cx.get_intrinsic(&llvm_name))
 }
 
 /// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs,
 /// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics,
 /// add them to librustc_trans/trans/context.rs
-pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
+pub fn trans_intrinsic_call<'a, 'tcx>(bx: &Builder<'a, 'tcx>,
                                       callee_ty: Ty<'tcx>,
                                       fn_ty: &FnType<'tcx>,
                                       args: &[OperandRef<'tcx>],
                                       llresult: ValueRef,
                                       span: Span) {
-    let ccx = bcx.ccx;
-    let tcx = ccx.tcx();
+    let cx = bx.cx;
+    let tcx = cx.tcx;
 
     let (def_id, substs) = match callee_ty.sty {
         ty::TyFnDef(def_id, substs) => (def_id, substs),
@@ -105,13 +105,13 @@
     let ret_ty = sig.output();
     let name = &*tcx.item_name(def_id);
 
-    let llret_ty = ccx.layout_of(ret_ty).llvm_type(ccx);
+    let llret_ty = cx.layout_of(ret_ty).llvm_type(cx);
     let result = PlaceRef::new_sized(llresult, fn_ty.ret.layout, fn_ty.ret.layout.align);
 
-    let simple = get_simple_intrinsic(ccx, name);
+    let simple = get_simple_intrinsic(cx, name);
     let llval = match name {
         _ if simple.is_some() => {
-            bcx.call(simple.unwrap(),
+            bx.call(simple.unwrap(),
                      &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
                      None)
         }
@@ -119,15 +119,15 @@
             return;
         },
         "likely" => {
-            let expect = ccx.get_intrinsic(&("llvm.expect.i1"));
-            bcx.call(expect, &[args[0].immediate(), C_bool(ccx, true)], None)
+            let expect = cx.get_intrinsic(&("llvm.expect.i1"));
+            bx.call(expect, &[args[0].immediate(), C_bool(cx, true)], None)
         }
         "unlikely" => {
-            let expect = ccx.get_intrinsic(&("llvm.expect.i1"));
-            bcx.call(expect, &[args[0].immediate(), C_bool(ccx, false)], None)
+            let expect = cx.get_intrinsic(&("llvm.expect.i1"));
+            bx.call(expect, &[args[0].immediate(), C_bool(cx, false)], None)
         }
         "try" => {
-            try_intrinsic(bcx, ccx,
+            try_intrinsic(bx, cx,
                           args[0].immediate(),
                           args[1].immediate(),
                           args[2].immediate(),
@@ -135,57 +135,57 @@
             return;
         }
         "breakpoint" => {
-            let llfn = ccx.get_intrinsic(&("llvm.debugtrap"));
-            bcx.call(llfn, &[], None)
+            let llfn = cx.get_intrinsic(&("llvm.debugtrap"));
+            bx.call(llfn, &[], None)
         }
         "size_of" => {
             let tp_ty = substs.type_at(0);
-            C_usize(ccx, ccx.size_of(tp_ty).bytes())
+            C_usize(cx, cx.size_of(tp_ty).bytes())
         }
         "size_of_val" => {
             let tp_ty = substs.type_at(0);
             if let OperandValue::Pair(_, meta) = args[0].val {
                 let (llsize, _) =
-                    glue::size_and_align_of_dst(bcx, tp_ty, meta);
+                    glue::size_and_align_of_dst(bx, tp_ty, meta);
                 llsize
             } else {
-                C_usize(ccx, ccx.size_of(tp_ty).bytes())
+                C_usize(cx, cx.size_of(tp_ty).bytes())
             }
         }
         "min_align_of" => {
             let tp_ty = substs.type_at(0);
-            C_usize(ccx, ccx.align_of(tp_ty).abi())
+            C_usize(cx, cx.align_of(tp_ty).abi())
         }
         "min_align_of_val" => {
             let tp_ty = substs.type_at(0);
             if let OperandValue::Pair(_, meta) = args[0].val {
                 let (_, llalign) =
-                    glue::size_and_align_of_dst(bcx, tp_ty, meta);
+                    glue::size_and_align_of_dst(bx, tp_ty, meta);
                 llalign
             } else {
-                C_usize(ccx, ccx.align_of(tp_ty).abi())
+                C_usize(cx, cx.align_of(tp_ty).abi())
             }
         }
         "pref_align_of" => {
             let tp_ty = substs.type_at(0);
-            C_usize(ccx, ccx.align_of(tp_ty).pref())
+            C_usize(cx, cx.align_of(tp_ty).pref())
         }
         "type_name" => {
             let tp_ty = substs.type_at(0);
             let ty_name = Symbol::intern(&tp_ty.to_string()).as_str();
-            C_str_slice(ccx, ty_name)
+            C_str_slice(cx, ty_name)
         }
         "type_id" => {
-            C_u64(ccx, ccx.tcx().type_id_hash(substs.type_at(0)))
+            C_u64(cx, cx.tcx.type_id_hash(substs.type_at(0)))
         }
         "init" => {
             let ty = substs.type_at(0);
-            if !ccx.layout_of(ty).is_zst() {
+            if !cx.layout_of(ty).is_zst() {
                 // Just zero out the stack slot.
                 // If we store a zero constant, LLVM will drown in vreg allocation for large data
                 // structures, and the generated code will be awful. (A telltale sign of this is
                 // large quantities of `mov [byte ptr foo],0` in the generated code.)
-                memset_intrinsic(bcx, false, ty, llresult, C_u8(ccx, 0), C_usize(ccx, 1));
+                memset_intrinsic(bx, false, ty, llresult, C_u8(cx, 0), C_usize(cx, 1));
             }
             return;
         }
@@ -196,82 +196,82 @@
         "needs_drop" => {
             let tp_ty = substs.type_at(0);
 
-            C_bool(ccx, bcx.ccx.shared().type_needs_drop(tp_ty))
+            C_bool(cx, bx.cx.type_needs_drop(tp_ty))
         }
         "offset" => {
             let ptr = args[0].immediate();
             let offset = args[1].immediate();
-            bcx.inbounds_gep(ptr, &[offset])
+            bx.inbounds_gep(ptr, &[offset])
         }
         "arith_offset" => {
             let ptr = args[0].immediate();
             let offset = args[1].immediate();
-            bcx.gep(ptr, &[offset])
+            bx.gep(ptr, &[offset])
         }
 
         "copy_nonoverlapping" => {
-            copy_intrinsic(bcx, false, false, substs.type_at(0),
+            copy_intrinsic(bx, false, false, substs.type_at(0),
                            args[1].immediate(), args[0].immediate(), args[2].immediate())
         }
         "copy" => {
-            copy_intrinsic(bcx, true, false, substs.type_at(0),
+            copy_intrinsic(bx, true, false, substs.type_at(0),
                            args[1].immediate(), args[0].immediate(), args[2].immediate())
         }
         "write_bytes" => {
-            memset_intrinsic(bcx, false, substs.type_at(0),
+            memset_intrinsic(bx, false, substs.type_at(0),
                              args[0].immediate(), args[1].immediate(), args[2].immediate())
         }
 
         "volatile_copy_nonoverlapping_memory" => {
-            copy_intrinsic(bcx, false, true, substs.type_at(0),
+            copy_intrinsic(bx, false, true, substs.type_at(0),
                            args[0].immediate(), args[1].immediate(), args[2].immediate())
         }
         "volatile_copy_memory" => {
-            copy_intrinsic(bcx, true, true, substs.type_at(0),
+            copy_intrinsic(bx, true, true, substs.type_at(0),
                            args[0].immediate(), args[1].immediate(), args[2].immediate())
         }
         "volatile_set_memory" => {
-            memset_intrinsic(bcx, true, substs.type_at(0),
+            memset_intrinsic(bx, true, substs.type_at(0),
                              args[0].immediate(), args[1].immediate(), args[2].immediate())
         }
         "volatile_load" => {
             let tp_ty = substs.type_at(0);
             let mut ptr = args[0].immediate();
             if let PassMode::Cast(ty) = fn_ty.ret.mode {
-                ptr = bcx.pointercast(ptr, ty.llvm_type(ccx).ptr_to());
+                ptr = bx.pointercast(ptr, ty.llvm_type(cx).ptr_to());
             }
-            let load = bcx.volatile_load(ptr);
+            let load = bx.volatile_load(ptr);
             unsafe {
-                llvm::LLVMSetAlignment(load, ccx.align_of(tp_ty).abi() as u32);
+                llvm::LLVMSetAlignment(load, cx.align_of(tp_ty).abi() as u32);
             }
-            to_immediate(bcx, load, ccx.layout_of(tp_ty))
+            to_immediate(bx, load, cx.layout_of(tp_ty))
         },
         "volatile_store" => {
             let tp_ty = substs.type_at(0);
-            let dst = args[0].deref(bcx.ccx);
+            let dst = args[0].deref(bx.cx);
             if let OperandValue::Pair(a, b) = args[1].val {
-                bcx.volatile_store(a, dst.project_field(bcx, 0).llval);
-                bcx.volatile_store(b, dst.project_field(bcx, 1).llval);
+                bx.volatile_store(a, dst.project_field(bx, 0).llval);
+                bx.volatile_store(b, dst.project_field(bx, 1).llval);
             } else {
                 let val = if let OperandValue::Ref(ptr, align) = args[1].val {
-                    bcx.load(ptr, align)
+                    bx.load(ptr, align)
                 } else {
                     if dst.layout.is_zst() {
                         return;
                     }
-                    from_immediate(bcx, args[1].immediate())
+                    from_immediate(bx, args[1].immediate())
                 };
-                let ptr = bcx.pointercast(dst.llval, val_ty(val).ptr_to());
-                let store = bcx.volatile_store(val, ptr);
+                let ptr = bx.pointercast(dst.llval, val_ty(val).ptr_to());
+                let store = bx.volatile_store(val, ptr);
                 unsafe {
-                    llvm::LLVMSetAlignment(store, ccx.align_of(tp_ty).abi() as u32);
+                    llvm::LLVMSetAlignment(store, cx.align_of(tp_ty).abi() as u32);
                 }
             }
             return;
         },
         "prefetch_read_data" | "prefetch_write_data" |
         "prefetch_read_instruction" | "prefetch_write_instruction" => {
-            let expect = ccx.get_intrinsic(&("llvm.prefetch"));
+            let expect = cx.get_intrinsic(&("llvm.prefetch"));
             let (rw, cache_type) = match name {
                 "prefetch_read_data" => (0, 1),
                 "prefetch_write_data" => (1, 1),
@@ -279,11 +279,11 @@
                 "prefetch_write_instruction" => (1, 0),
                 _ => bug!()
             };
-            bcx.call(expect, &[
+            bx.call(expect, &[
                 args[0].immediate(),
-                C_i32(ccx, rw),
+                C_i32(cx, rw),
                 args[1].immediate(),
-                C_i32(ccx, cache_type)
+                C_i32(cx, cache_type)
             ], None)
         },
         "ctlz" | "ctlz_nonzero" | "cttz" | "cttz_nonzero" | "ctpop" | "bswap" |
@@ -291,27 +291,27 @@
         "overflowing_add" | "overflowing_sub" | "overflowing_mul" |
         "unchecked_div" | "unchecked_rem" | "unchecked_shl" | "unchecked_shr" => {
             let ty = arg_tys[0];
-            match int_type_width_signed(ty, ccx) {
+            match int_type_width_signed(ty, cx) {
                 Some((width, signed)) =>
                     match name {
                         "ctlz" | "cttz" => {
-                            let y = C_bool(bcx.ccx, false);
-                            let llfn = ccx.get_intrinsic(&format!("llvm.{}.i{}", name, width));
-                            bcx.call(llfn, &[args[0].immediate(), y], None)
+                            let y = C_bool(bx.cx, false);
+                            let llfn = cx.get_intrinsic(&format!("llvm.{}.i{}", name, width));
+                            bx.call(llfn, &[args[0].immediate(), y], None)
                         }
                         "ctlz_nonzero" | "cttz_nonzero" => {
-                            let y = C_bool(bcx.ccx, true);
+                            let y = C_bool(bx.cx, true);
                             let llvm_name = &format!("llvm.{}.i{}", &name[..4], width);
-                            let llfn = ccx.get_intrinsic(llvm_name);
-                            bcx.call(llfn, &[args[0].immediate(), y], None)
+                            let llfn = cx.get_intrinsic(llvm_name);
+                            bx.call(llfn, &[args[0].immediate(), y], None)
                         }
-                        "ctpop" => bcx.call(ccx.get_intrinsic(&format!("llvm.ctpop.i{}", width)),
+                        "ctpop" => bx.call(cx.get_intrinsic(&format!("llvm.ctpop.i{}", width)),
                                         &[args[0].immediate()], None),
                         "bswap" => {
                             if width == 8 {
                                 args[0].immediate() // byte swap a u8/i8 is just a no-op
                             } else {
-                                bcx.call(ccx.get_intrinsic(&format!("llvm.bswap.i{}", width)),
+                                bx.call(cx.get_intrinsic(&format!("llvm.bswap.i{}", width)),
                                         &[args[0].immediate()], None)
                             }
                         }
@@ -319,44 +319,44 @@
                             let intrinsic = format!("llvm.{}{}.with.overflow.i{}",
                                                     if signed { 's' } else { 'u' },
                                                     &name[..3], width);
-                            let llfn = bcx.ccx.get_intrinsic(&intrinsic);
+                            let llfn = bx.cx.get_intrinsic(&intrinsic);
 
                             // Convert `i1` to a `bool`, and write it to the out parameter
-                            let pair = bcx.call(llfn, &[
+                            let pair = bx.call(llfn, &[
                                 args[0].immediate(),
                                 args[1].immediate()
                             ], None);
-                            let val = bcx.extract_value(pair, 0);
-                            let overflow = bcx.zext(bcx.extract_value(pair, 1), Type::bool(ccx));
+                            let val = bx.extract_value(pair, 0);
+                            let overflow = bx.zext(bx.extract_value(pair, 1), Type::bool(cx));
 
-                            let dest = result.project_field(bcx, 0);
-                            bcx.store(val, dest.llval, dest.align);
-                            let dest = result.project_field(bcx, 1);
-                            bcx.store(overflow, dest.llval, dest.align);
+                            let dest = result.project_field(bx, 0);
+                            bx.store(val, dest.llval, dest.align);
+                            let dest = result.project_field(bx, 1);
+                            bx.store(overflow, dest.llval, dest.align);
 
                             return;
                         },
-                        "overflowing_add" => bcx.add(args[0].immediate(), args[1].immediate()),
-                        "overflowing_sub" => bcx.sub(args[0].immediate(), args[1].immediate()),
-                        "overflowing_mul" => bcx.mul(args[0].immediate(), args[1].immediate()),
+                        "overflowing_add" => bx.add(args[0].immediate(), args[1].immediate()),
+                        "overflowing_sub" => bx.sub(args[0].immediate(), args[1].immediate()),
+                        "overflowing_mul" => bx.mul(args[0].immediate(), args[1].immediate()),
                         "unchecked_div" =>
                             if signed {
-                                bcx.sdiv(args[0].immediate(), args[1].immediate())
+                                bx.sdiv(args[0].immediate(), args[1].immediate())
                             } else {
-                                bcx.udiv(args[0].immediate(), args[1].immediate())
+                                bx.udiv(args[0].immediate(), args[1].immediate())
                             },
                         "unchecked_rem" =>
                             if signed {
-                                bcx.srem(args[0].immediate(), args[1].immediate())
+                                bx.srem(args[0].immediate(), args[1].immediate())
                             } else {
-                                bcx.urem(args[0].immediate(), args[1].immediate())
+                                bx.urem(args[0].immediate(), args[1].immediate())
                             },
-                        "unchecked_shl" => bcx.shl(args[0].immediate(), args[1].immediate()),
+                        "unchecked_shl" => bx.shl(args[0].immediate(), args[1].immediate()),
                         "unchecked_shr" =>
                             if signed {
-                                bcx.ashr(args[0].immediate(), args[1].immediate())
+                                bx.ashr(args[0].immediate(), args[1].immediate())
                             } else {
-                                bcx.lshr(args[0].immediate(), args[1].immediate())
+                                bx.lshr(args[0].immediate(), args[1].immediate())
                             },
                         _ => bug!(),
                     },
@@ -375,11 +375,11 @@
             match float_type_width(sty) {
                 Some(_width) =>
                     match name {
-                        "fadd_fast" => bcx.fadd_fast(args[0].immediate(), args[1].immediate()),
-                        "fsub_fast" => bcx.fsub_fast(args[0].immediate(), args[1].immediate()),
-                        "fmul_fast" => bcx.fmul_fast(args[0].immediate(), args[1].immediate()),
-                        "fdiv_fast" => bcx.fdiv_fast(args[0].immediate(), args[1].immediate()),
-                        "frem_fast" => bcx.frem_fast(args[0].immediate(), args[1].immediate()),
+                        "fadd_fast" => bx.fadd_fast(args[0].immediate(), args[1].immediate()),
+                        "fsub_fast" => bx.fsub_fast(args[0].immediate(), args[1].immediate()),
+                        "fmul_fast" => bx.fmul_fast(args[0].immediate(), args[1].immediate()),
+                        "fdiv_fast" => bx.fdiv_fast(args[0].immediate(), args[1].immediate()),
+                        "frem_fast" => bx.frem_fast(args[0].immediate(), args[1].immediate()),
                         _ => bug!(),
                     },
                 None => {
@@ -394,23 +394,23 @@
         },
 
         "discriminant_value" => {
-            args[0].deref(bcx.ccx).trans_get_discr(bcx, ret_ty)
+            args[0].deref(bx.cx).trans_get_discr(bx, ret_ty)
         }
 
         "align_offset" => {
             // `ptr as usize`
-            let ptr_val = bcx.ptrtoint(args[0].immediate(), bcx.ccx.isize_ty());
+            let ptr_val = bx.ptrtoint(args[0].immediate(), bx.cx.isize_ty);
             // `ptr_val % align`
             let align = args[1].immediate();
-            let offset = bcx.urem(ptr_val, align);
-            let zero = C_null(bcx.ccx.isize_ty());
+            let offset = bx.urem(ptr_val, align);
+            let zero = C_null(bx.cx.isize_ty);
             // `offset == 0`
-            let is_zero = bcx.icmp(llvm::IntPredicate::IntEQ, offset, zero);
+            let is_zero = bx.icmp(llvm::IntPredicate::IntEQ, offset, zero);
             // `if offset == 0 { 0 } else { align - offset }`
-            bcx.select(is_zero, zero, bcx.sub(align, offset))
+            bx.select(is_zero, zero, bx.sub(align, offset))
         }
         name if name.starts_with("simd_") => {
-            match generic_simd_intrinsic(bcx, name,
+            match generic_simd_intrinsic(bx, name,
                                          callee_ty,
                                          args,
                                          ret_ty, llret_ty,
@@ -439,16 +439,16 @@
                         (SequentiallyConsistent, Monotonic),
                     "failacq" if is_cxchg =>
                         (SequentiallyConsistent, Acquire),
-                    _ => ccx.sess().fatal("unknown ordering in atomic intrinsic")
+                    _ => cx.sess().fatal("unknown ordering in atomic intrinsic")
                 },
                 4 => match (split[2], split[3]) {
                     ("acq", "failrelaxed") if is_cxchg =>
                         (Acquire, Monotonic),
                     ("acqrel", "failrelaxed") if is_cxchg =>
                         (AcquireRelease, Monotonic),
-                    _ => ccx.sess().fatal("unknown ordering in atomic intrinsic")
+                    _ => cx.sess().fatal("unknown ordering in atomic intrinsic")
                 },
-                _ => ccx.sess().fatal("Atomic intrinsic not in correct format"),
+                _ => cx.sess().fatal("Atomic intrinsic not in correct format"),
             };
 
             let invalid_monomorphization = |ty| {
@@ -460,22 +460,22 @@
             match split[1] {
                 "cxchg" | "cxchgweak" => {
                     let ty = substs.type_at(0);
-                    if int_type_width_signed(ty, ccx).is_some() {
+                    if int_type_width_signed(ty, cx).is_some() {
                         let weak = if split[1] == "cxchgweak" { llvm::True } else { llvm::False };
-                        let pair = bcx.atomic_cmpxchg(
+                        let pair = bx.atomic_cmpxchg(
                             args[0].immediate(),
                             args[1].immediate(),
                             args[2].immediate(),
                             order,
                             failorder,
                             weak);
-                        let val = bcx.extract_value(pair, 0);
-                        let success = bcx.zext(bcx.extract_value(pair, 1), Type::bool(bcx.ccx));
+                        let val = bx.extract_value(pair, 0);
+                        let success = bx.zext(bx.extract_value(pair, 1), Type::bool(bx.cx));
 
-                        let dest = result.project_field(bcx, 0);
-                        bcx.store(val, dest.llval, dest.align);
-                        let dest = result.project_field(bcx, 1);
-                        bcx.store(success, dest.llval, dest.align);
+                        let dest = result.project_field(bx, 0);
+                        bx.store(val, dest.llval, dest.align);
+                        let dest = result.project_field(bx, 1);
+                        bx.store(success, dest.llval, dest.align);
                         return;
                     } else {
                         return invalid_monomorphization(ty);
@@ -484,9 +484,9 @@
 
                 "load" => {
                     let ty = substs.type_at(0);
-                    if int_type_width_signed(ty, ccx).is_some() {
-                        let align = ccx.align_of(ty);
-                        bcx.atomic_load(args[0].immediate(), order, align)
+                    if int_type_width_signed(ty, cx).is_some() {
+                        let align = cx.align_of(ty);
+                        bx.atomic_load(args[0].immediate(), order, align)
                     } else {
                         return invalid_monomorphization(ty);
                     }
@@ -494,9 +494,9 @@
 
                 "store" => {
                     let ty = substs.type_at(0);
-                    if int_type_width_signed(ty, ccx).is_some() {
-                        let align = ccx.align_of(ty);
-                        bcx.atomic_store(args[1].immediate(), args[0].immediate(), order, align);
+                    if int_type_width_signed(ty, cx).is_some() {
+                        let align = cx.align_of(ty);
+                        bx.atomic_store(args[1].immediate(), args[0].immediate(), order, align);
                         return;
                     } else {
                         return invalid_monomorphization(ty);
@@ -504,12 +504,12 @@
                 }
 
                 "fence" => {
-                    bcx.atomic_fence(order, llvm::SynchronizationScope::CrossThread);
+                    bx.atomic_fence(order, llvm::SynchronizationScope::CrossThread);
                     return;
                 }
 
                 "singlethreadfence" => {
-                    bcx.atomic_fence(order, llvm::SynchronizationScope::SingleThread);
+                    bx.atomic_fence(order, llvm::SynchronizationScope::SingleThread);
                     return;
                 }
 
@@ -527,12 +527,12 @@
                         "min"   => llvm::AtomicMin,
                         "umax"  => llvm::AtomicUMax,
                         "umin"  => llvm::AtomicUMin,
-                        _ => ccx.sess().fatal("unknown atomic operation")
+                        _ => cx.sess().fatal("unknown atomic operation")
                     };
 
                     let ty = substs.type_at(0);
-                    if int_type_width_signed(ty, ccx).is_some() {
-                        bcx.atomic_rmw(atom_op, args[0].immediate(), args[1].immediate(), order)
+                    if int_type_width_signed(ty, cx).is_some() {
+                        bx.atomic_rmw(atom_op, args[0].immediate(), args[1].immediate(), order)
                     } else {
                         return invalid_monomorphization(ty);
                     }
@@ -542,16 +542,16 @@
 
         "nontemporal_store" => {
             let tp_ty = substs.type_at(0);
-            let dst = args[0].deref(bcx.ccx);
+            let dst = args[0].deref(bx.cx);
             let val = if let OperandValue::Ref(ptr, align) = args[1].val {
-                bcx.load(ptr, align)
+                bx.load(ptr, align)
             } else {
-                from_immediate(bcx, args[1].immediate())
+                from_immediate(bx, args[1].immediate())
             };
-            let ptr = bcx.pointercast(dst.llval, val_ty(val).ptr_to());
-            let store = bcx.nontemporal_store(val, ptr);
+            let ptr = bx.pointercast(dst.llval, val_ty(val).ptr_to());
+            let store = bx.nontemporal_store(val, ptr);
             unsafe {
-                llvm::LLVMSetAlignment(store, ccx.align_of(tp_ty).abi() as u32);
+                llvm::LLVMSetAlignment(store, cx.align_of(tp_ty).abi() as u32);
             }
             return
         }
@@ -565,39 +565,39 @@
                 assert_eq!(x.len(), 1);
                 x.into_iter().next().unwrap()
             }
-            fn ty_to_type(ccx: &CrateContext, t: &intrinsics::Type) -> Vec<Type> {
+            fn ty_to_type(cx: &CodegenCx, t: &intrinsics::Type) -> Vec<Type> {
                 use intrinsics::Type::*;
                 match *t {
-                    Void => vec![Type::void(ccx)],
+                    Void => vec![Type::void(cx)],
                     Integer(_signed, _width, llvm_width) => {
-                        vec![Type::ix(ccx, llvm_width as u64)]
+                        vec![Type::ix(cx, llvm_width as u64)]
                     }
                     Float(x) => {
                         match x {
-                            32 => vec![Type::f32(ccx)],
-                            64 => vec![Type::f64(ccx)],
+                            32 => vec![Type::f32(cx)],
+                            64 => vec![Type::f64(cx)],
                             _ => bug!()
                         }
                     }
                     Pointer(ref t, ref llvm_elem, _const) => {
                         let t = llvm_elem.as_ref().unwrap_or(t);
-                        let elem = one(ty_to_type(ccx, t));
+                        let elem = one(ty_to_type(cx, t));
                         vec![elem.ptr_to()]
                     }
                     Vector(ref t, ref llvm_elem, length) => {
                         let t = llvm_elem.as_ref().unwrap_or(t);
-                        let elem = one(ty_to_type(ccx, t));
+                        let elem = one(ty_to_type(cx, t));
                         vec![Type::vector(&elem, length as u64)]
                     }
                     Aggregate(false, ref contents) => {
                         let elems = contents.iter()
-                                            .map(|t| one(ty_to_type(ccx, t)))
+                                            .map(|t| one(ty_to_type(cx, t)))
                                             .collect::<Vec<_>>();
-                        vec![Type::struct_(ccx, &elems, false)]
+                        vec![Type::struct_(cx, &elems, false)]
                     }
                     Aggregate(true, ref contents) => {
                         contents.iter()
-                                .flat_map(|t| ty_to_type(ccx, t))
+                                .flat_map(|t| ty_to_type(cx, t))
                                 .collect()
                     }
                 }
@@ -607,7 +607,7 @@
             // qux` to be converted into `foo, bar, baz, qux`, integer
             // arguments to be truncated as needed and pointers to be
             // cast.
-            fn modify_as_needed<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
+            fn modify_as_needed<'a, 'tcx>(bx: &Builder<'a, 'tcx>,
                                           t: &intrinsics::Type,
                                           arg: &OperandRef<'tcx>)
                                           -> Vec<ValueRef>
@@ -620,29 +620,29 @@
                         // This assumes the type is "simple", i.e. no
                         // destructors, and the contents are SIMD
                         // etc.
-                        assert!(!bcx.ccx.shared().type_needs_drop(arg.layout.ty));
+                        assert!(!bx.cx.type_needs_drop(arg.layout.ty));
                         let (ptr, align) = match arg.val {
                             OperandValue::Ref(ptr, align) => (ptr, align),
                             _ => bug!()
                         };
                         let arg = PlaceRef::new_sized(ptr, arg.layout, align);
                         (0..contents.len()).map(|i| {
-                            arg.project_field(bcx, i).load(bcx).immediate()
+                            arg.project_field(bx, i).load(bx).immediate()
                         }).collect()
                     }
                     intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => {
-                        let llvm_elem = one(ty_to_type(bcx.ccx, llvm_elem));
-                        vec![bcx.pointercast(arg.immediate(), llvm_elem.ptr_to())]
+                        let llvm_elem = one(ty_to_type(bx.cx, llvm_elem));
+                        vec![bx.pointercast(arg.immediate(), llvm_elem.ptr_to())]
                     }
                     intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => {
-                        let llvm_elem = one(ty_to_type(bcx.ccx, llvm_elem));
-                        vec![bcx.bitcast(arg.immediate(), Type::vector(&llvm_elem, length as u64))]
+                        let llvm_elem = one(ty_to_type(bx.cx, llvm_elem));
+                        vec![bx.bitcast(arg.immediate(), Type::vector(&llvm_elem, length as u64))]
                     }
                     intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => {
                         // the LLVM intrinsic uses a smaller integer
                         // size than the C intrinsic's signature, so
                         // we have to trim it down here.
-                        vec![bcx.trunc(arg.immediate(), Type::ix(bcx.ccx, llvm_width as u64))]
+                        vec![bx.trunc(arg.immediate(), Type::ix(bx.cx, llvm_width as u64))]
                     }
                     _ => vec![arg.immediate()],
                 }
@@ -650,22 +650,22 @@
 
 
             let inputs = intr.inputs.iter()
-                                    .flat_map(|t| ty_to_type(ccx, t))
+                                    .flat_map(|t| ty_to_type(cx, t))
                                     .collect::<Vec<_>>();
 
-            let outputs = one(ty_to_type(ccx, &intr.output));
+            let outputs = one(ty_to_type(cx, &intr.output));
 
             let llargs: Vec<_> = intr.inputs.iter().zip(args).flat_map(|(t, arg)| {
-                modify_as_needed(bcx, t, arg)
+                modify_as_needed(bx, t, arg)
             }).collect();
             assert_eq!(inputs.len(), llargs.len());
 
             let val = match intr.definition {
                 intrinsics::IntrinsicDef::Named(name) => {
-                    let f = declare::declare_cfn(ccx,
+                    let f = declare::declare_cfn(cx,
                                                  name,
                                                  Type::func(&inputs, &outputs));
-                    bcx.call(f, &llargs, None)
+                    bx.call(f, &llargs, None)
                 }
             };
 
@@ -675,9 +675,9 @@
                     assert!(!flatten);
 
                     for i in 0..elems.len() {
-                        let dest = result.project_field(bcx, i);
-                        let val = bcx.extract_value(val, i as u64);
-                        bcx.store(val, dest.llval, dest.align);
+                        let dest = result.project_field(bx, i);
+                        let val = bx.extract_value(val, i as u64);
+                        bx.store(val, dest.llval, dest.align);
                     }
                     return;
                 }
@@ -688,16 +688,16 @@
 
     if !fn_ty.ret.is_ignore() {
         if let PassMode::Cast(ty) = fn_ty.ret.mode {
-            let ptr = bcx.pointercast(result.llval, ty.llvm_type(ccx).ptr_to());
-            bcx.store(llval, ptr, result.align);
+            let ptr = bx.pointercast(result.llval, ty.llvm_type(cx).ptr_to());
+            bx.store(llval, ptr, result.align);
         } else {
-            OperandRef::from_immediate_or_packed_pair(bcx, llval, result.layout)
-                .val.store(bcx, result);
+            OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout)
+                .val.store(bx, result);
         }
     }
 }
 
-fn copy_intrinsic<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
+fn copy_intrinsic<'a, 'tcx>(bx: &Builder<'a, 'tcx>,
                             allow_overlap: bool,
                             volatile: bool,
                             ty: Ty<'tcx>,
@@ -705,10 +705,10 @@
                             src: ValueRef,
                             count: ValueRef)
                             -> ValueRef {
-    let ccx = bcx.ccx;
-    let (size, align) = ccx.size_and_align_of(ty);
-    let size = C_usize(ccx, size.bytes());
-    let align = C_i32(ccx, align.abi() as i32);
+    let cx = bx.cx;
+    let (size, align) = cx.size_and_align_of(ty);
+    let size = C_usize(cx, size.bytes());
+    let align = C_i32(cx, align.abi() as i32);
 
     let operation = if allow_overlap {
         "memmove"
@@ -717,53 +717,53 @@
     };
 
     let name = format!("llvm.{}.p0i8.p0i8.i{}", operation,
-                       ccx.data_layout().pointer_size.bits());
+                       cx.data_layout().pointer_size.bits());
 
-    let dst_ptr = bcx.pointercast(dst, Type::i8p(ccx));
-    let src_ptr = bcx.pointercast(src, Type::i8p(ccx));
-    let llfn = ccx.get_intrinsic(&name);
+    let dst_ptr = bx.pointercast(dst, Type::i8p(cx));
+    let src_ptr = bx.pointercast(src, Type::i8p(cx));
+    let llfn = cx.get_intrinsic(&name);
 
-    bcx.call(llfn,
+    bx.call(llfn,
         &[dst_ptr,
         src_ptr,
-        bcx.mul(size, count),
+        bx.mul(size, count),
         align,
-        C_bool(ccx, volatile)],
+        C_bool(cx, volatile)],
         None)
 }
 
 fn memset_intrinsic<'a, 'tcx>(
-    bcx: &Builder<'a, 'tcx>,
+    bx: &Builder<'a, 'tcx>,
     volatile: bool,
     ty: Ty<'tcx>,
     dst: ValueRef,
     val: ValueRef,
     count: ValueRef
 ) -> ValueRef {
-    let ccx = bcx.ccx;
-    let (size, align) = ccx.size_and_align_of(ty);
-    let size = C_usize(ccx, size.bytes());
-    let align = C_i32(ccx, align.abi() as i32);
-    let dst = bcx.pointercast(dst, Type::i8p(ccx));
-    call_memset(bcx, dst, val, bcx.mul(size, count), align, volatile)
+    let cx = bx.cx;
+    let (size, align) = cx.size_and_align_of(ty);
+    let size = C_usize(cx, size.bytes());
+    let align = C_i32(cx, align.abi() as i32);
+    let dst = bx.pointercast(dst, Type::i8p(cx));
+    call_memset(bx, dst, val, bx.mul(size, count), align, volatile)
 }
 
 fn try_intrinsic<'a, 'tcx>(
-    bcx: &Builder<'a, 'tcx>,
-    ccx: &CrateContext,
+    bx: &Builder<'a, 'tcx>,
+    cx: &CodegenCx,
     func: ValueRef,
     data: ValueRef,
     local_ptr: ValueRef,
     dest: ValueRef,
 ) {
-    if bcx.sess().no_landing_pads() {
-        bcx.call(func, &[data], None);
-        let ptr_align = bcx.tcx().data_layout.pointer_align;
-        bcx.store(C_null(Type::i8p(&bcx.ccx)), dest, ptr_align);
-    } else if wants_msvc_seh(bcx.sess()) {
-        trans_msvc_try(bcx, ccx, func, data, local_ptr, dest);
+    if bx.sess().no_landing_pads() {
+        bx.call(func, &[data], None);
+        let ptr_align = bx.tcx().data_layout.pointer_align;
+        bx.store(C_null(Type::i8p(&bx.cx)), dest, ptr_align);
+    } else if wants_msvc_seh(bx.sess()) {
+        trans_msvc_try(bx, cx, func, data, local_ptr, dest);
     } else {
-        trans_gnu_try(bcx, ccx, func, data, local_ptr, dest);
+        trans_gnu_try(bx, cx, func, data, local_ptr, dest);
     }
 }
 
@@ -774,25 +774,25 @@
 // instructions are meant to work for all targets, as of the time of this
 // writing, however, LLVM does not recommend the usage of these new instructions
 // as the old ones are still more optimized.
-fn trans_msvc_try<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
-                            ccx: &CrateContext,
+fn trans_msvc_try<'a, 'tcx>(bx: &Builder<'a, 'tcx>,
+                            cx: &CodegenCx,
                             func: ValueRef,
                             data: ValueRef,
                             local_ptr: ValueRef,
                             dest: ValueRef) {
-    let llfn = get_rust_try_fn(ccx, &mut |bcx| {
-        let ccx = bcx.ccx;
+    let llfn = get_rust_try_fn(cx, &mut |bx| {
+        let cx = bx.cx;
 
-        bcx.set_personality_fn(bcx.ccx.eh_personality());
+        bx.set_personality_fn(bx.cx.eh_personality());
 
-        let normal = bcx.build_sibling_block("normal");
-        let catchswitch = bcx.build_sibling_block("catchswitch");
-        let catchpad = bcx.build_sibling_block("catchpad");
-        let caught = bcx.build_sibling_block("caught");
+        let normal = bx.build_sibling_block("normal");
+        let catchswitch = bx.build_sibling_block("catchswitch");
+        let catchpad = bx.build_sibling_block("catchpad");
+        let caught = bx.build_sibling_block("caught");
 
-        let func = llvm::get_param(bcx.llfn(), 0);
-        let data = llvm::get_param(bcx.llfn(), 1);
-        let local_ptr = llvm::get_param(bcx.llfn(), 2);
+        let func = llvm::get_param(bx.llfn(), 0);
+        let data = llvm::get_param(bx.llfn(), 1);
+        let local_ptr = llvm::get_param(bx.llfn(), 2);
 
         // We're generating an IR snippet that looks like:
         //
@@ -833,42 +833,42 @@
         //      }
         //
         // More information can be found in libstd's seh.rs implementation.
-        let i64p = Type::i64(ccx).ptr_to();
-        let ptr_align = bcx.tcx().data_layout.pointer_align;
-        let slot = bcx.alloca(i64p, "slot", ptr_align);
-        bcx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(),
+        let i64p = Type::i64(cx).ptr_to();
+        let ptr_align = bx.tcx().data_layout.pointer_align;
+        let slot = bx.alloca(i64p, "slot", ptr_align);
+        bx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(),
             None);
 
-        normal.ret(C_i32(ccx, 0));
+        normal.ret(C_i32(cx, 0));
 
         let cs = catchswitch.catch_switch(None, None, 1);
         catchswitch.add_handler(cs, catchpad.llbb());
 
-        let tcx = ccx.tcx();
+        let tcx = cx.tcx;
         let tydesc = match tcx.lang_items().msvc_try_filter() {
-            Some(did) => ::consts::get_static(ccx, did),
+            Some(did) => ::consts::get_static(cx, did),
             None => bug!("msvc_try_filter not defined"),
         };
-        let tok = catchpad.catch_pad(cs, &[tydesc, C_i32(ccx, 0), slot]);
+        let tok = catchpad.catch_pad(cs, &[tydesc, C_i32(cx, 0), slot]);
         let addr = catchpad.load(slot, ptr_align);
 
-        let i64_align = bcx.tcx().data_layout.i64_align;
+        let i64_align = bx.tcx().data_layout.i64_align;
         let arg1 = catchpad.load(addr, i64_align);
-        let val1 = C_i32(ccx, 1);
+        let val1 = C_i32(cx, 1);
         let arg2 = catchpad.load(catchpad.inbounds_gep(addr, &[val1]), i64_align);
         let local_ptr = catchpad.bitcast(local_ptr, i64p);
         catchpad.store(arg1, local_ptr, i64_align);
         catchpad.store(arg2, catchpad.inbounds_gep(local_ptr, &[val1]), i64_align);
         catchpad.catch_ret(tok, caught.llbb());
 
-        caught.ret(C_i32(ccx, 1));
+        caught.ret(C_i32(cx, 1));
     });
 
     // Note that no invoke is used here because by definition this function
     // can't panic (that's what it's catching).
-    let ret = bcx.call(llfn, &[func, data, local_ptr], None);
-    let i32_align = bcx.tcx().data_layout.i32_align;
-    bcx.store(ret, dest, i32_align);
+    let ret = bx.call(llfn, &[func, data, local_ptr], None);
+    let i32_align = bx.tcx().data_layout.i32_align;
+    bx.store(ret, dest, i32_align);
 }
 
 // Definition of the standard "try" function for Rust using the GNU-like model
@@ -882,18 +882,18 @@
 // function calling it, and that function may already have other personality
 // functions in play. By calling a shim we're guaranteed that our shim will have
 // the right personality function.
-fn trans_gnu_try<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
-                           ccx: &CrateContext,
+fn trans_gnu_try<'a, 'tcx>(bx: &Builder<'a, 'tcx>,
+                           cx: &CodegenCx,
                            func: ValueRef,
                            data: ValueRef,
                            local_ptr: ValueRef,
                            dest: ValueRef) {
-    let llfn = get_rust_try_fn(ccx, &mut |bcx| {
-        let ccx = bcx.ccx;
+    let llfn = get_rust_try_fn(cx, &mut |bx| {
+        let cx = bx.cx;
 
         // Translates the shims described above:
         //
-        //   bcx:
+        //   bx:
         //      invoke %func(%args...) normal %normal unwind %catch
         //
         //   normal:
@@ -908,14 +908,14 @@
         // expected to be `*mut *mut u8` for this to actually work, but that's
         // managed by the standard library.
 
-        let then = bcx.build_sibling_block("then");
-        let catch = bcx.build_sibling_block("catch");
+        let then = bx.build_sibling_block("then");
+        let catch = bx.build_sibling_block("catch");
 
-        let func = llvm::get_param(bcx.llfn(), 0);
-        let data = llvm::get_param(bcx.llfn(), 1);
-        let local_ptr = llvm::get_param(bcx.llfn(), 2);
-        bcx.invoke(func, &[data], then.llbb(), catch.llbb(), None);
-        then.ret(C_i32(ccx, 0));
+        let func = llvm::get_param(bx.llfn(), 0);
+        let data = llvm::get_param(bx.llfn(), 1);
+        let local_ptr = llvm::get_param(bx.llfn(), 2);
+        bx.invoke(func, &[data], then.llbb(), catch.llbb(), None);
+        then.ret(C_i32(cx, 0));
 
         // Type indicator for the exception being thrown.
         //
@@ -923,41 +923,41 @@
         // being thrown.  The second value is a "selector" indicating which of
         // the landing pad clauses the exception's type had been matched to.
         // rust_try ignores the selector.
-        let lpad_ty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)],
+        let lpad_ty = Type::struct_(cx, &[Type::i8p(cx), Type::i32(cx)],
                                     false);
-        let vals = catch.landing_pad(lpad_ty, bcx.ccx.eh_personality(), 1);
-        catch.add_clause(vals, C_null(Type::i8p(ccx)));
+        let vals = catch.landing_pad(lpad_ty, bx.cx.eh_personality(), 1);
+        catch.add_clause(vals, C_null(Type::i8p(cx)));
         let ptr = catch.extract_value(vals, 0);
-        let ptr_align = bcx.tcx().data_layout.pointer_align;
-        catch.store(ptr, catch.bitcast(local_ptr, Type::i8p(ccx).ptr_to()), ptr_align);
-        catch.ret(C_i32(ccx, 1));
+        let ptr_align = bx.tcx().data_layout.pointer_align;
+        catch.store(ptr, catch.bitcast(local_ptr, Type::i8p(cx).ptr_to()), ptr_align);
+        catch.ret(C_i32(cx, 1));
     });
 
     // Note that no invoke is used here because by definition this function
     // can't panic (that's what it's catching).
-    let ret = bcx.call(llfn, &[func, data, local_ptr], None);
-    let i32_align = bcx.tcx().data_layout.i32_align;
-    bcx.store(ret, dest, i32_align);
+    let ret = bx.call(llfn, &[func, data, local_ptr], None);
+    let i32_align = bx.tcx().data_layout.i32_align;
+    bx.store(ret, dest, i32_align);
 }
 
 // Helper function to give a Block to a closure to translate a shim function.
 // This is currently primarily used for the `try` intrinsic functions above.
-fn gen_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+fn gen_fn<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
                     name: &str,
                     inputs: Vec<Ty<'tcx>>,
                     output: Ty<'tcx>,
                     trans: &mut for<'b> FnMut(Builder<'b, 'tcx>))
                     -> ValueRef {
-    let rust_fn_ty = ccx.tcx().mk_fn_ptr(ty::Binder(ccx.tcx().mk_fn_sig(
+    let rust_fn_ty = cx.tcx.mk_fn_ptr(ty::Binder(cx.tcx.mk_fn_sig(
         inputs.into_iter(),
         output,
         false,
         hir::Unsafety::Unsafe,
         Abi::Rust
     )));
-    let llfn = declare::define_internal_fn(ccx, name, rust_fn_ty);
-    let bcx = Builder::new_block(ccx, llfn, "entry-block");
-    trans(bcx);
+    let llfn = declare::define_internal_fn(cx, name, rust_fn_ty);
+    let bx = Builder::new_block(cx, llfn, "entry-block");
+    trans(bx);
     llfn
 }
 
@@ -965,15 +965,15 @@
 // catch exceptions.
 //
 // This function is only generated once and is then cached.
-fn get_rust_try_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+fn get_rust_try_fn<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
                              trans: &mut for<'b> FnMut(Builder<'b, 'tcx>))
                              -> ValueRef {
-    if let Some(llfn) = ccx.rust_try_fn().get() {
+    if let Some(llfn) = cx.rust_try_fn.get() {
         return llfn;
     }
 
     // Define the type up front for the signature of the rust_try function.
-    let tcx = ccx.tcx();
+    let tcx = cx.tcx;
     let i8p = tcx.mk_mut_ptr(tcx.types.i8);
     let fn_ty = tcx.mk_fn_ptr(ty::Binder(tcx.mk_fn_sig(
         iter::once(i8p),
@@ -983,8 +983,8 @@
         Abi::Rust
     )));
     let output = tcx.types.i32;
-    let rust_try = gen_fn(ccx, "__rust_try", vec![fn_ty, i8p, i8p], output, trans);
-    ccx.rust_try_fn().set(Some(rust_try));
+    let rust_try = gen_fn(cx, "__rust_try", vec![fn_ty, i8p, i8p], output, trans);
+    cx.rust_try_fn.set(Some(rust_try));
     return rust_try
 }
 
@@ -993,7 +993,7 @@
 }
 
 fn generic_simd_intrinsic<'a, 'tcx>(
-    bcx: &Builder<'a, 'tcx>,
+    bx: &Builder<'a, 'tcx>,
     name: &str,
     callee_ty: Ty<'tcx>,
     args: &[OperandRef<'tcx>],
@@ -1008,7 +1008,7 @@
         };
         ($msg: tt, $($fmt: tt)*) => {
             span_invalid_monomorphization_error(
-                bcx.sess(), span,
+                bx.sess(), span,
                 &format!(concat!("invalid monomorphization of `{}` intrinsic: ",
                                  $msg),
                          name, $($fmt)*));
@@ -1030,7 +1030,7 @@
 
 
 
-    let tcx = bcx.tcx();
+    let tcx = bx.tcx();
     let sig = tcx.erase_late_bound_regions_and_normalize(&callee_ty.fn_sig(tcx));
     let arg_tys = sig.inputs();
 
@@ -1064,7 +1064,7 @@
                  ret_ty,
                  ret_ty.simd_type(tcx));
 
-        return Ok(compare_simd_types(bcx,
+        return Ok(compare_simd_types(bx,
                                      args[0].immediate(),
                                      args[1].immediate(),
                                      in_elem,
@@ -1109,7 +1109,7 @@
                                     arg_idx, total_len);
                         None
                     }
-                    Some(idx) => Some(C_i32(bcx.ccx, idx as i32)),
+                    Some(idx) => Some(C_i32(bx.cx, idx as i32)),
                 }
             })
             .collect();
@@ -1118,7 +1118,7 @@
             None => return Ok(C_null(llret_ty))
         };
 
-        return Ok(bcx.shuffle_vector(args[0].immediate(),
+        return Ok(bx.shuffle_vector(args[0].immediate(),
                                      args[1].immediate(),
                                      C_vector(&indices)))
     }
@@ -1127,7 +1127,7 @@
         require!(in_elem == arg_tys[2],
                  "expected inserted type `{}` (element of input `{}`), found `{}`",
                  in_elem, in_ty, arg_tys[2]);
-        return Ok(bcx.insert_element(args[0].immediate(),
+        return Ok(bx.insert_element(args[0].immediate(),
                                      args[2].immediate(),
                                      args[1].immediate()))
     }
@@ -1135,7 +1135,7 @@
         require!(ret_ty == in_elem,
                  "expected return type `{}` (element of input `{}`), found `{}`",
                  in_elem, in_ty, ret_ty);
-        return Ok(bcx.extract_element(args[0].immediate(), args[1].immediate()))
+        return Ok(bx.extract_element(args[0].immediate(), args[1].immediate()))
     }
 
     if name == "simd_cast" {
@@ -1171,34 +1171,34 @@
         match (in_style, out_style) {
             (Style::Int(in_is_signed), Style::Int(_)) => {
                 return Ok(match in_width.cmp(&out_width) {
-                    Ordering::Greater => bcx.trunc(args[0].immediate(), llret_ty),
+                    Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty),
                     Ordering::Equal => args[0].immediate(),
                     Ordering::Less => if in_is_signed {
-                        bcx.sext(args[0].immediate(), llret_ty)
+                        bx.sext(args[0].immediate(), llret_ty)
                     } else {
-                        bcx.zext(args[0].immediate(), llret_ty)
+                        bx.zext(args[0].immediate(), llret_ty)
                     }
                 })
             }
             (Style::Int(in_is_signed), Style::Float) => {
                 return Ok(if in_is_signed {
-                    bcx.sitofp(args[0].immediate(), llret_ty)
+                    bx.sitofp(args[0].immediate(), llret_ty)
                 } else {
-                    bcx.uitofp(args[0].immediate(), llret_ty)
+                    bx.uitofp(args[0].immediate(), llret_ty)
                 })
             }
             (Style::Float, Style::Int(out_is_signed)) => {
                 return Ok(if out_is_signed {
-                    bcx.fptosi(args[0].immediate(), llret_ty)
+                    bx.fptosi(args[0].immediate(), llret_ty)
                 } else {
-                    bcx.fptoui(args[0].immediate(), llret_ty)
+                    bx.fptoui(args[0].immediate(), llret_ty)
                 })
             }
             (Style::Float, Style::Float) => {
                 return Ok(match in_width.cmp(&out_width) {
-                    Ordering::Greater => bcx.fptrunc(args[0].immediate(), llret_ty),
+                    Ordering::Greater => bx.fptrunc(args[0].immediate(), llret_ty),
                     Ordering::Equal => args[0].immediate(),
-                    Ordering::Less => bcx.fpext(args[0].immediate(), llret_ty)
+                    Ordering::Less => bx.fpext(args[0].immediate(), llret_ty)
                 })
             }
             _ => {/* Unsupported. Fallthrough. */}
@@ -1213,7 +1213,7 @@
             $(if name == stringify!($name) {
                 match in_elem.sty {
                     $($(ty::$p(_))|* => {
-                        return Ok(bcx.$call(args[0].immediate(), args[1].immediate()))
+                        return Ok(bx.$call(args[0].immediate(), args[1].immediate()))
                     })*
                     _ => {},
                 }
@@ -1243,11 +1243,11 @@
 // Returns None if the type is not an integer
 // FIXME: there’s multiple of this functions, investigate using some of the already existing
 // stuffs.
-fn int_type_width_signed(ty: Ty, ccx: &CrateContext) -> Option<(u64, bool)> {
+fn int_type_width_signed(ty: Ty, cx: &CodegenCx) -> Option<(u64, bool)> {
     match ty.sty {
         ty::TyInt(t) => Some((match t {
             ast::IntTy::Isize => {
-                match &ccx.tcx().sess.target.target.target_pointer_width[..] {
+                match &cx.tcx.sess.target.target.target_pointer_width[..] {
                     "16" => 16,
                     "32" => 32,
                     "64" => 64,
@@ -1262,7 +1262,7 @@
         }, true)),
         ty::TyUint(t) => Some((match t {
             ast::UintTy::Usize => {
-                match &ccx.tcx().sess.target.target.target_pointer_width[..] {
+                match &cx.tcx.sess.target.target.target_pointer_width[..] {
                     "16" => 16,
                     "32" => 32,
                     "64" => 64,
diff --git a/src/librustc_trans/meth.rs b/src/librustc_trans/meth.rs
index 4be2774..6b542ae 100644
--- a/src/librustc_trans/meth.rs
+++ b/src/librustc_trans/meth.rs
@@ -33,30 +33,30 @@
         VirtualIndex(index as u64 + 3)
     }
 
-    pub fn get_fn(self, bcx: &Builder<'a, 'tcx>,
+    pub fn get_fn(self, bx: &Builder<'a, 'tcx>,
                   llvtable: ValueRef,
                   fn_ty: &FnType<'tcx>) -> ValueRef {
         // Load the data pointer from the object.
         debug!("get_fn({:?}, {:?})", Value(llvtable), self);
 
-        let llvtable = bcx.pointercast(llvtable, fn_ty.llvm_type(bcx.ccx).ptr_to().ptr_to());
-        let ptr_align = bcx.tcx().data_layout.pointer_align;
-        let ptr = bcx.load(bcx.inbounds_gep(llvtable, &[C_usize(bcx.ccx, self.0)]), ptr_align);
-        bcx.nonnull_metadata(ptr);
+        let llvtable = bx.pointercast(llvtable, fn_ty.llvm_type(bx.cx).ptr_to().ptr_to());
+        let ptr_align = bx.tcx().data_layout.pointer_align;
+        let ptr = bx.load(bx.inbounds_gep(llvtable, &[C_usize(bx.cx, self.0)]), ptr_align);
+        bx.nonnull_metadata(ptr);
         // Vtable loads are invariant
-        bcx.set_invariant_load(ptr);
+        bx.set_invariant_load(ptr);
         ptr
     }
 
-    pub fn get_usize(self, bcx: &Builder<'a, 'tcx>, llvtable: ValueRef) -> ValueRef {
+    pub fn get_usize(self, bx: &Builder<'a, 'tcx>, llvtable: ValueRef) -> ValueRef {
         // Load the data pointer from the object.
         debug!("get_int({:?}, {:?})", Value(llvtable), self);
 
-        let llvtable = bcx.pointercast(llvtable, Type::isize(bcx.ccx).ptr_to());
-        let usize_align = bcx.tcx().data_layout.pointer_align;
-        let ptr = bcx.load(bcx.inbounds_gep(llvtable, &[C_usize(bcx.ccx, self.0)]), usize_align);
+        let llvtable = bx.pointercast(llvtable, Type::isize(bx.cx).ptr_to());
+        let usize_align = bx.tcx().data_layout.pointer_align;
+        let ptr = bx.load(bx.inbounds_gep(llvtable, &[C_usize(bx.cx, self.0)]), usize_align);
         // Vtable loads are invariant
-        bcx.set_invariant_load(ptr);
+        bx.set_invariant_load(ptr);
         ptr
     }
 }
@@ -69,28 +69,28 @@
 /// The `trait_ref` encodes the erased self type. Hence if we are
 /// making an object `Foo<Trait>` from a value of type `Foo<T>`, then
 /// `trait_ref` would map `T:Trait`.
-pub fn get_vtable<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+pub fn get_vtable<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
                             ty: Ty<'tcx>,
                             trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>)
                             -> ValueRef
 {
-    let tcx = ccx.tcx();
+    let tcx = cx.tcx;
 
     debug!("get_vtable(ty={:?}, trait_ref={:?})", ty, trait_ref);
 
     // Check the cache.
-    if let Some(&val) = ccx.vtables().borrow().get(&(ty, trait_ref)) {
+    if let Some(&val) = cx.vtables.borrow().get(&(ty, trait_ref)) {
         return val;
     }
 
     // Not in the cache. Build it.
-    let nullptr = C_null(Type::i8p(ccx));
+    let nullptr = C_null(Type::i8p(cx));
 
-    let (size, align) = ccx.size_and_align_of(ty);
+    let (size, align) = cx.size_and_align_of(ty);
     let mut components: Vec<_> = [
-        callee::get_fn(ccx, monomorphize::resolve_drop_in_place(ccx.tcx(), ty)),
-        C_usize(ccx, size.bytes()),
-        C_usize(ccx, align.abi())
+        callee::get_fn(cx, monomorphize::resolve_drop_in_place(cx.tcx, ty)),
+        C_usize(cx, size.bytes()),
+        C_usize(cx, align.abi())
     ].iter().cloned().collect();
 
     if let Some(trait_ref) = trait_ref {
@@ -98,18 +98,18 @@
         let methods = tcx.vtable_methods(trait_ref);
         let methods = methods.iter().cloned().map(|opt_mth| {
             opt_mth.map_or(nullptr, |(def_id, substs)| {
-                callee::resolve_and_get_fn(ccx, def_id, substs)
+                callee::resolve_and_get_fn(cx, def_id, substs)
             })
         });
         components.extend(methods);
     }
 
-    let vtable_const = C_struct(ccx, &components, false);
-    let align = ccx.data_layout().pointer_align;
-    let vtable = consts::addr_of(ccx, vtable_const, align, "vtable");
+    let vtable_const = C_struct(cx, &components, false);
+    let align = cx.data_layout().pointer_align;
+    let vtable = consts::addr_of(cx, vtable_const, align, "vtable");
 
-    debuginfo::create_vtable_metadata(ccx, ty, vtable);
+    debuginfo::create_vtable_metadata(cx, ty, vtable);
 
-    ccx.vtables().borrow_mut().insert((ty, trait_ref), vtable);
+    cx.vtables.borrow_mut().insert((ty, trait_ref), vtable);
     vtable
 }
diff --git a/src/librustc_trans/mir/analyze.rs b/src/librustc_trans/mir/analyze.rs
index 31cbb07..bf82e1d 100644
--- a/src/librustc_trans/mir/analyze.rs
+++ b/src/librustc_trans/mir/analyze.rs
@@ -20,18 +20,18 @@
 use rustc::ty;
 use rustc::ty::layout::LayoutOf;
 use type_of::LayoutLlvmExt;
-use super::MirContext;
+use super::FunctionCx;
 
-pub fn memory_locals<'a, 'tcx>(mircx: &MirContext<'a, 'tcx>) -> BitVector {
-    let mir = mircx.mir;
-    let mut analyzer = LocalAnalyzer::new(mircx);
+pub fn memory_locals<'a, 'tcx>(fx: &FunctionCx<'a, 'tcx>) -> BitVector {
+    let mir = fx.mir;
+    let mut analyzer = LocalAnalyzer::new(fx);
 
     analyzer.visit_mir(mir);
 
     for (index, ty) in mir.local_decls.iter().map(|l| l.ty).enumerate() {
-        let ty = mircx.monomorphize(&ty);
+        let ty = fx.monomorphize(&ty);
         debug!("local {} has type {:?}", index, ty);
-        let layout = mircx.ccx.layout_of(ty);
+        let layout = fx.cx.layout_of(ty);
         if layout.is_llvm_immediate() {
             // These sorts of types are immediates that we can store
             // in an ValueRef without an alloca.
@@ -52,21 +52,21 @@
 }
 
 struct LocalAnalyzer<'mir, 'a: 'mir, 'tcx: 'a> {
-    cx: &'mir MirContext<'a, 'tcx>,
+    fx: &'mir FunctionCx<'a, 'tcx>,
     memory_locals: BitVector,
     seen_assigned: BitVector
 }
 
 impl<'mir, 'a, 'tcx> LocalAnalyzer<'mir, 'a, 'tcx> {
-    fn new(mircx: &'mir MirContext<'a, 'tcx>) -> LocalAnalyzer<'mir, 'a, 'tcx> {
+    fn new(fx: &'mir FunctionCx<'a, 'tcx>) -> LocalAnalyzer<'mir, 'a, 'tcx> {
         let mut analyzer = LocalAnalyzer {
-            cx: mircx,
-            memory_locals: BitVector::new(mircx.mir.local_decls.len()),
-            seen_assigned: BitVector::new(mircx.mir.local_decls.len())
+            fx,
+            memory_locals: BitVector::new(fx.mir.local_decls.len()),
+            seen_assigned: BitVector::new(fx.mir.local_decls.len())
         };
 
         // Arguments get assigned to by means of the function being called
-        for idx in 0..mircx.mir.arg_count {
+        for idx in 0..fx.mir.arg_count {
             analyzer.seen_assigned.insert(idx + 1);
         }
 
@@ -95,7 +95,7 @@
 
         if let mir::Place::Local(index) = *place {
             self.mark_assigned(index);
-            if !self.cx.rvalue_creates_operand(rvalue) {
+            if !self.fx.rvalue_creates_operand(rvalue) {
                 self.mark_as_memory(index);
             }
         } else {
@@ -117,7 +117,7 @@
                     }, ..
                 }),
                 ref args, ..
-            } if Some(def_id) == self.cx.ccx.tcx().lang_items().box_free_fn() => {
+            } if Some(def_id) == self.fx.cx.tcx.lang_items().box_free_fn() => {
                 // box_free(x) shares with `drop x` the property that it
                 // is not guaranteed to be statically dominated by the
                 // definition of x, so x must always be in an alloca.
@@ -136,7 +136,7 @@
                     context: PlaceContext<'tcx>,
                     location: Location) {
         debug!("visit_place(place={:?}, context={:?})", place, context);
-        let ccx = self.cx.ccx;
+        let cx = self.fx.cx;
 
         if let mir::Place::Projection(ref proj) = *place {
             // Allow uses of projections that are ZSTs or from scalar fields.
@@ -145,18 +145,18 @@
                 _ => false
             };
             if is_consume {
-                let base_ty = proj.base.ty(self.cx.mir, ccx.tcx());
-                let base_ty = self.cx.monomorphize(&base_ty);
+                let base_ty = proj.base.ty(self.fx.mir, cx.tcx);
+                let base_ty = self.fx.monomorphize(&base_ty);
 
                 // ZSTs don't require any actual memory access.
-                let elem_ty = base_ty.projection_ty(ccx.tcx(), &proj.elem).to_ty(ccx.tcx());
-                let elem_ty = self.cx.monomorphize(&elem_ty);
-                if ccx.layout_of(elem_ty).is_zst() {
+                let elem_ty = base_ty.projection_ty(cx.tcx, &proj.elem).to_ty(cx.tcx);
+                let elem_ty = self.fx.monomorphize(&elem_ty);
+                if cx.layout_of(elem_ty).is_zst() {
                     return;
                 }
 
                 if let mir::ProjectionElem::Field(..) = proj.elem {
-                    let layout = ccx.layout_of(base_ty.to_ty(ccx.tcx()));
+                    let layout = cx.layout_of(base_ty.to_ty(cx.tcx));
                     if layout.is_llvm_immediate() || layout.is_llvm_scalar_pair() {
                         // Recurse with the same context, instead of `Projection`,
                         // potentially stopping at non-operand projections,
@@ -200,11 +200,11 @@
             }
 
             PlaceContext::Drop => {
-                let ty = mir::Place::Local(index).ty(self.cx.mir, self.cx.ccx.tcx());
-                let ty = self.cx.monomorphize(&ty.to_ty(self.cx.ccx.tcx()));
+                let ty = mir::Place::Local(index).ty(self.fx.mir, self.fx.cx.tcx);
+                let ty = self.fx.monomorphize(&ty.to_ty(self.fx.cx.tcx));
 
                 // Only need the place if we're actually dropping it.
-                if self.cx.ccx.shared().type_needs_drop(ty) {
+                if self.fx.cx.type_needs_drop(ty) {
                     self.mark_as_memory(index);
                 }
             }
diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs
index 8c9fb03..af1e30a 100644
--- a/src/librustc_trans/mir/block.rs
+++ b/src/librustc_trans/mir/block.rs
@@ -29,35 +29,35 @@
 use syntax::symbol::Symbol;
 use syntax_pos::Pos;
 
-use super::{MirContext, LocalRef};
+use super::{FunctionCx, LocalRef};
 use super::constant::Const;
 use super::place::PlaceRef;
 use super::operand::OperandRef;
 use super::operand::OperandValue::{Pair, Ref, Immediate};
 
-impl<'a, 'tcx> MirContext<'a, 'tcx> {
+impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
     pub fn trans_block(&mut self, bb: mir::BasicBlock) {
-        let mut bcx = self.get_builder(bb);
+        let mut bx = self.build_block(bb);
         let data = &self.mir[bb];
 
         debug!("trans_block({:?}={:?})", bb, data);
 
         for statement in &data.statements {
-            bcx = self.trans_statement(bcx, statement);
+            bx = self.trans_statement(bx, statement);
         }
 
-        self.trans_terminator(bcx, bb, data.terminator());
+        self.trans_terminator(bx, bb, data.terminator());
     }
 
     fn trans_terminator(&mut self,
-                        mut bcx: Builder<'a, 'tcx>,
+                        mut bx: Builder<'a, 'tcx>,
                         bb: mir::BasicBlock,
                         terminator: &mir::Terminator<'tcx>)
     {
         debug!("trans_terminator: {:?}", terminator);
 
         // Create the cleanup bundle, if needed.
-        let tcx = bcx.tcx();
+        let tcx = bx.tcx();
         let span = terminator.source_info.span;
         let funclet_bb = self.cleanup_kinds[bb].funclet_bb(bb);
         let funclet = funclet_bb.and_then(|funclet_bb| self.funclets[funclet_bb].as_ref());
@@ -99,20 +99,20 @@
             }
         };
 
-        let funclet_br = |this: &mut Self, bcx: Builder, target: mir::BasicBlock| {
+        let funclet_br = |this: &mut Self, bx: Builder, target: mir::BasicBlock| {
             let (lltarget, is_cleanupret) = lltarget(this, target);
             if is_cleanupret {
                 // micro-optimization: generate a `ret` rather than a jump
                 // to a trampoline.
-                bcx.cleanup_ret(cleanup_pad.unwrap(), Some(lltarget));
+                bx.cleanup_ret(cleanup_pad.unwrap(), Some(lltarget));
             } else {
-                bcx.br(lltarget);
+                bx.br(lltarget);
             }
         };
 
         let do_call = |
             this: &mut Self,
-            bcx: Builder<'a, 'tcx>,
+            bx: Builder<'a, 'tcx>,
             fn_ty: FnType<'tcx>,
             fn_ptr: ValueRef,
             llargs: &[ValueRef],
@@ -120,25 +120,25 @@
             cleanup: Option<mir::BasicBlock>
         | {
             if let Some(cleanup) = cleanup {
-                let ret_bcx = if let Some((_, target)) = destination {
+                let ret_bx = if let Some((_, target)) = destination {
                     this.blocks[target]
                 } else {
                     this.unreachable_block()
                 };
-                let invokeret = bcx.invoke(fn_ptr,
+                let invokeret = bx.invoke(fn_ptr,
                                            &llargs,
-                                           ret_bcx,
+                                           ret_bx,
                                            llblock(this, cleanup),
                                            cleanup_bundle);
                 fn_ty.apply_attrs_callsite(invokeret);
 
                 if let Some((ret_dest, target)) = destination {
-                    let ret_bcx = this.get_builder(target);
-                    this.set_debug_loc(&ret_bcx, terminator.source_info);
-                    this.store_return(&ret_bcx, ret_dest, &fn_ty.ret, invokeret);
+                    let ret_bx = this.build_block(target);
+                    this.set_debug_loc(&ret_bx, terminator.source_info);
+                    this.store_return(&ret_bx, ret_dest, &fn_ty.ret, invokeret);
                 }
             } else {
-                let llret = bcx.call(fn_ptr, &llargs, cleanup_bundle);
+                let llret = bx.call(fn_ptr, &llargs, cleanup_bundle);
                 fn_ty.apply_attrs_callsite(llret);
                 if this.mir[bb].is_cleanup {
                     // Cleanup is always the cold path. Don't inline
@@ -149,66 +149,66 @@
                 }
 
                 if let Some((ret_dest, target)) = destination {
-                    this.store_return(&bcx, ret_dest, &fn_ty.ret, llret);
-                    funclet_br(this, bcx, target);
+                    this.store_return(&bx, ret_dest, &fn_ty.ret, llret);
+                    funclet_br(this, bx, target);
                 } else {
-                    bcx.unreachable();
+                    bx.unreachable();
                 }
             }
         };
 
-        self.set_debug_loc(&bcx, terminator.source_info);
+        self.set_debug_loc(&bx, terminator.source_info);
         match terminator.kind {
             mir::TerminatorKind::Resume => {
                 if let Some(cleanup_pad) = cleanup_pad {
-                    bcx.cleanup_ret(cleanup_pad, None);
+                    bx.cleanup_ret(cleanup_pad, None);
                 } else {
-                    let slot = self.get_personality_slot(&bcx);
-                    let lp0 = slot.project_field(&bcx, 0).load(&bcx).immediate();
-                    let lp1 = slot.project_field(&bcx, 1).load(&bcx).immediate();
-                    slot.storage_dead(&bcx);
+                    let slot = self.get_personality_slot(&bx);
+                    let lp0 = slot.project_field(&bx, 0).load(&bx).immediate();
+                    let lp1 = slot.project_field(&bx, 1).load(&bx).immediate();
+                    slot.storage_dead(&bx);
 
-                    if !bcx.sess().target.target.options.custom_unwind_resume {
+                    if !bx.sess().target.target.options.custom_unwind_resume {
                         let mut lp = C_undef(self.landing_pad_type());
-                        lp = bcx.insert_value(lp, lp0, 0);
-                        lp = bcx.insert_value(lp, lp1, 1);
-                        bcx.resume(lp);
+                        lp = bx.insert_value(lp, lp0, 0);
+                        lp = bx.insert_value(lp, lp1, 1);
+                        bx.resume(lp);
                     } else {
-                        bcx.call(bcx.ccx.eh_unwind_resume(), &[lp0], cleanup_bundle);
-                        bcx.unreachable();
+                        bx.call(bx.cx.eh_unwind_resume(), &[lp0], cleanup_bundle);
+                        bx.unreachable();
                     }
                 }
             }
 
             mir::TerminatorKind::Abort => {
                 // Call core::intrinsics::abort()
-                let fnname = bcx.ccx.get_intrinsic(&("llvm.trap"));
-                bcx.call(fnname, &[], None);
-                bcx.unreachable();
+                let fnname = bx.cx.get_intrinsic(&("llvm.trap"));
+                bx.call(fnname, &[], None);
+                bx.unreachable();
             }
 
             mir::TerminatorKind::Goto { target } => {
-                funclet_br(self, bcx, target);
+                funclet_br(self, bx, target);
             }
 
             mir::TerminatorKind::SwitchInt { ref discr, switch_ty, ref values, ref targets } => {
-                let discr = self.trans_operand(&bcx, discr);
-                if switch_ty == bcx.tcx().types.bool {
+                let discr = self.trans_operand(&bx, discr);
+                if switch_ty == bx.tcx().types.bool {
                     let lltrue = llblock(self, targets[0]);
                     let llfalse = llblock(self, targets[1]);
                     if let [ConstInt::U8(0)] = values[..] {
-                        bcx.cond_br(discr.immediate(), llfalse, lltrue);
+                        bx.cond_br(discr.immediate(), llfalse, lltrue);
                     } else {
-                        bcx.cond_br(discr.immediate(), lltrue, llfalse);
+                        bx.cond_br(discr.immediate(), lltrue, llfalse);
                     }
                 } else {
                     let (otherwise, targets) = targets.split_last().unwrap();
-                    let switch = bcx.switch(discr.immediate(),
+                    let switch = bx.switch(discr.immediate(),
                                             llblock(self, *otherwise), values.len());
                     for (value, target) in values.iter().zip(targets) {
-                        let val = Const::from_constint(bcx.ccx, value);
+                        let val = Const::from_constint(bx.cx, value);
                         let llbb = llblock(self, *target);
-                        bcx.add_case(switch, val.llval, llbb)
+                        bx.add_case(switch, val.llval, llbb)
                     }
                 }
             }
@@ -216,16 +216,16 @@
             mir::TerminatorKind::Return => {
                 let llval = match self.fn_ty.ret.mode {
                     PassMode::Ignore | PassMode::Indirect(_) => {
-                        bcx.ret_void();
+                        bx.ret_void();
                         return;
                     }
 
                     PassMode::Direct(_) | PassMode::Pair(..) => {
-                        let op = self.trans_consume(&bcx, &mir::Place::Local(mir::RETURN_PLACE));
+                        let op = self.trans_consume(&bx, &mir::Place::Local(mir::RETURN_PLACE));
                         if let Ref(llval, align) = op.val {
-                            bcx.load(llval, align)
+                            bx.load(llval, align)
                         } else {
-                            op.immediate_or_packed_pair(&bcx)
+                            op.immediate_or_packed_pair(&bx)
                         }
                     }
 
@@ -242,8 +242,8 @@
                         };
                         let llslot = match op.val {
                             Immediate(_) | Pair(..) => {
-                                let scratch = PlaceRef::alloca(&bcx, self.fn_ty.ret.layout, "ret");
-                                op.val.store(&bcx, scratch);
+                                let scratch = PlaceRef::alloca(&bx, self.fn_ty.ret.layout, "ret");
+                                op.val.store(&bx, scratch);
                                 scratch.llval
                             }
                             Ref(llval, align) => {
@@ -252,53 +252,53 @@
                                 llval
                             }
                         };
-                        bcx.load(
-                            bcx.pointercast(llslot, cast_ty.llvm_type(bcx.ccx).ptr_to()),
+                        bx.load(
+                            bx.pointercast(llslot, cast_ty.llvm_type(bx.cx).ptr_to()),
                             self.fn_ty.ret.layout.align)
                     }
                 };
-                bcx.ret(llval);
+                bx.ret(llval);
             }
 
             mir::TerminatorKind::Unreachable => {
-                bcx.unreachable();
+                bx.unreachable();
             }
 
             mir::TerminatorKind::Drop { ref location, target, unwind } => {
-                let ty = location.ty(self.mir, bcx.tcx()).to_ty(bcx.tcx());
+                let ty = location.ty(self.mir, bx.tcx()).to_ty(bx.tcx());
                 let ty = self.monomorphize(&ty);
-                let drop_fn = monomorphize::resolve_drop_in_place(bcx.ccx.tcx(), ty);
+                let drop_fn = monomorphize::resolve_drop_in_place(bx.cx.tcx, ty);
 
                 if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def {
                     // we don't actually need to drop anything.
-                    funclet_br(self, bcx, target);
+                    funclet_br(self, bx, target);
                     return
                 }
 
-                let place = self.trans_place(&bcx, location);
+                let place = self.trans_place(&bx, location);
                 let mut args: &[_] = &[place.llval, place.llextra];
                 args = &args[..1 + place.has_extra() as usize];
                 let (drop_fn, fn_ty) = match ty.sty {
                     ty::TyDynamic(..) => {
-                        let fn_ty = drop_fn.ty(bcx.ccx.tcx());
-                        let sig = common::ty_fn_sig(bcx.ccx, fn_ty);
-                        let sig = bcx.tcx().erase_late_bound_regions_and_normalize(&sig);
-                        let fn_ty = FnType::new_vtable(bcx.ccx, sig, &[]);
+                        let fn_ty = drop_fn.ty(bx.cx.tcx);
+                        let sig = common::ty_fn_sig(bx.cx, fn_ty);
+                        let sig = bx.tcx().erase_late_bound_regions_and_normalize(&sig);
+                        let fn_ty = FnType::new_vtable(bx.cx, sig, &[]);
                         args = &args[..1];
-                        (meth::DESTRUCTOR.get_fn(&bcx, place.llextra, &fn_ty), fn_ty)
+                        (meth::DESTRUCTOR.get_fn(&bx, place.llextra, &fn_ty), fn_ty)
                     }
                     _ => {
-                        (callee::get_fn(bcx.ccx, drop_fn),
-                         FnType::of_instance(bcx.ccx, &drop_fn))
+                        (callee::get_fn(bx.cx, drop_fn),
+                         FnType::of_instance(bx.cx, &drop_fn))
                     }
                 };
-                do_call(self, bcx, fn_ty, drop_fn, args,
+                do_call(self, bx, fn_ty, drop_fn, args,
                         Some((ReturnDest::Nothing, target)),
                         unwind);
             }
 
             mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, cleanup } => {
-                let cond = self.trans_operand(&bcx, cond).immediate();
+                let cond = self.trans_operand(&bx, cond).immediate();
                 let mut const_cond = common::const_to_opt_u128(cond, false).map(|c| c == 1);
 
                 // This case can currently arise only from functions marked
@@ -308,7 +308,7 @@
                 // NOTE: Unlike binops, negation doesn't have its own
                 // checked operation, just a comparison with the minimum
                 // value, so we have to check for the assert message.
-                if !bcx.ccx.check_overflow() {
+                if !bx.cx.check_overflow {
                     use rustc_const_math::ConstMathErr::Overflow;
                     use rustc_const_math::Op::Neg;
 
@@ -319,33 +319,33 @@
 
                 // Don't translate the panic block if success if known.
                 if const_cond == Some(expected) {
-                    funclet_br(self, bcx, target);
+                    funclet_br(self, bx, target);
                     return;
                 }
 
                 // Pass the condition through llvm.expect for branch hinting.
-                let expect = bcx.ccx.get_intrinsic(&"llvm.expect.i1");
-                let cond = bcx.call(expect, &[cond, C_bool(bcx.ccx, expected)], None);
+                let expect = bx.cx.get_intrinsic(&"llvm.expect.i1");
+                let cond = bx.call(expect, &[cond, C_bool(bx.cx, expected)], None);
 
                 // Create the failure block and the conditional branch to it.
                 let lltarget = llblock(self, target);
                 let panic_block = self.new_block("panic");
                 if expected {
-                    bcx.cond_br(cond, lltarget, panic_block.llbb());
+                    bx.cond_br(cond, lltarget, panic_block.llbb());
                 } else {
-                    bcx.cond_br(cond, panic_block.llbb(), lltarget);
+                    bx.cond_br(cond, panic_block.llbb(), lltarget);
                 }
 
-                // After this point, bcx is the block for the call to panic.
-                bcx = panic_block;
-                self.set_debug_loc(&bcx, terminator.source_info);
+                // After this point, bx is the block for the call to panic.
+                bx = panic_block;
+                self.set_debug_loc(&bx, terminator.source_info);
 
                 // Get the location information.
-                let loc = bcx.sess().codemap().lookup_char_pos(span.lo());
+                let loc = bx.sess().codemap().lookup_char_pos(span.lo());
                 let filename = Symbol::intern(&loc.file.name.to_string()).as_str();
-                let filename = C_str_slice(bcx.ccx, filename);
-                let line = C_u32(bcx.ccx, loc.line as u32);
-                let col = C_u32(bcx.ccx, loc.col.to_usize() as u32 + 1);
+                let filename = C_str_slice(bx.cx, filename);
+                let line = C_u32(bx.cx, loc.line as u32);
+                let col = C_u32(bx.cx, loc.col.to_usize() as u32 + 1);
                 let align = tcx.data_layout.aggregate_align
                     .max(tcx.data_layout.i32_align)
                     .max(tcx.data_layout.pointer_align);
@@ -353,8 +353,8 @@
                 // Put together the arguments to the panic entry point.
                 let (lang_item, args, const_err) = match *msg {
                     mir::AssertMessage::BoundsCheck { ref len, ref index } => {
-                        let len = self.trans_operand(&mut bcx, len).immediate();
-                        let index = self.trans_operand(&mut bcx, index).immediate();
+                        let len = self.trans_operand(&mut bx, len).immediate();
+                        let index = self.trans_operand(&mut bx, index).immediate();
 
                         let const_err = common::const_to_opt_u128(len, false)
                             .and_then(|len| common::const_to_opt_u128(index, false)
@@ -363,8 +363,8 @@
                                     index: index as u64
                                 }));
 
-                        let file_line_col = C_struct(bcx.ccx, &[filename, line, col], false);
-                        let file_line_col = consts::addr_of(bcx.ccx,
+                        let file_line_col = C_struct(bx.cx, &[filename, line, col], false);
+                        let file_line_col = consts::addr_of(bx.cx,
                                                             file_line_col,
                                                             align,
                                                             "panic_bounds_check_loc");
@@ -374,11 +374,11 @@
                     }
                     mir::AssertMessage::Math(ref err) => {
                         let msg_str = Symbol::intern(err.description()).as_str();
-                        let msg_str = C_str_slice(bcx.ccx, msg_str);
-                        let msg_file_line_col = C_struct(bcx.ccx,
+                        let msg_str = C_str_slice(bx.cx, msg_str);
+                        let msg_file_line_col = C_struct(bx.cx,
                                                      &[msg_str, filename, line, col],
                                                      false);
-                        let msg_file_line_col = consts::addr_of(bcx.ccx,
+                        let msg_file_line_col = consts::addr_of(bx.cx,
                                                                 msg_file_line_col,
                                                                 align,
                                                                 "panic_loc");
@@ -394,11 +394,11 @@
                             "generator resumed after panicking"
                         };
                         let msg_str = Symbol::intern(str).as_str();
-                        let msg_str = C_str_slice(bcx.ccx, msg_str);
-                        let msg_file_line_col = C_struct(bcx.ccx,
+                        let msg_str = C_str_slice(bx.cx, msg_str);
+                        let msg_file_line_col = C_struct(bx.cx,
                                                      &[msg_str, filename, line, col],
                                                      false);
-                        let msg_file_line_col = consts::addr_of(bcx.ccx,
+                        let msg_file_line_col = consts::addr_of(bx.cx,
                                                                 msg_file_line_col,
                                                                 align,
                                                                 "panic_loc");
@@ -413,21 +413,21 @@
                 if const_cond == Some(!expected) {
                     if let Some(err) = const_err {
                         let err = ConstEvalErr{ span: span, kind: err };
-                        let mut diag = bcx.tcx().sess.struct_span_warn(
+                        let mut diag = bx.tcx().sess.struct_span_warn(
                             span, "this expression will panic at run-time");
-                        err.note(bcx.tcx(), span, "expression", &mut diag);
+                        err.note(bx.tcx(), span, "expression", &mut diag);
                         diag.emit();
                     }
                 }
 
                 // Obtain the panic entry point.
-                let def_id = common::langcall(bcx.tcx(), Some(span), "", lang_item);
-                let instance = ty::Instance::mono(bcx.tcx(), def_id);
-                let fn_ty = FnType::of_instance(bcx.ccx, &instance);
-                let llfn = callee::get_fn(bcx.ccx, instance);
+                let def_id = common::langcall(bx.tcx(), Some(span), "", lang_item);
+                let instance = ty::Instance::mono(bx.tcx(), def_id);
+                let fn_ty = FnType::of_instance(bx.cx, &instance);
+                let llfn = callee::get_fn(bx.cx, instance);
 
                 // Translate the actual panic invoke/call.
-                do_call(self, bcx, fn_ty, llfn, &args, None, cleanup);
+                do_call(self, bx, fn_ty, llfn, &args, None, cleanup);
             }
 
             mir::TerminatorKind::DropAndReplace { .. } => {
@@ -436,11 +436,11 @@
 
             mir::TerminatorKind::Call { ref func, ref args, ref destination, cleanup } => {
                 // Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar.
-                let callee = self.trans_operand(&bcx, func);
+                let callee = self.trans_operand(&bx, func);
 
                 let (instance, mut llfn) = match callee.layout.ty.sty {
                     ty::TyFnDef(def_id, substs) => {
-                        (Some(ty::Instance::resolve(bcx.ccx.tcx(),
+                        (Some(ty::Instance::resolve(bx.cx.tcx,
                                                     ty::ParamEnv::empty(traits::Reveal::All),
                                                     def_id,
                                                     substs).unwrap()),
@@ -452,42 +452,42 @@
                     _ => bug!("{} is not callable", callee.layout.ty)
                 };
                 let def = instance.map(|i| i.def);
-                let sig = callee.layout.ty.fn_sig(bcx.tcx());
-                let sig = bcx.tcx().erase_late_bound_regions_and_normalize(&sig);
+                let sig = callee.layout.ty.fn_sig(bx.tcx());
+                let sig = bx.tcx().erase_late_bound_regions_and_normalize(&sig);
                 let abi = sig.abi;
 
                 // Handle intrinsics old trans wants Expr's for, ourselves.
                 let intrinsic = match def {
                     Some(ty::InstanceDef::Intrinsic(def_id))
-                        => Some(bcx.tcx().item_name(def_id)),
+                        => Some(bx.tcx().item_name(def_id)),
                     _ => None
                 };
                 let intrinsic = intrinsic.as_ref().map(|s| &s[..]);
 
                 if intrinsic == Some("transmute") {
                     let &(ref dest, target) = destination.as_ref().unwrap();
-                    self.trans_transmute(&bcx, &args[0], dest);
-                    funclet_br(self, bcx, target);
+                    self.trans_transmute(&bx, &args[0], dest);
+                    funclet_br(self, bx, target);
                     return;
                 }
 
                 let extra_args = &args[sig.inputs().len()..];
                 let extra_args = extra_args.iter().map(|op_arg| {
-                    let op_ty = op_arg.ty(self.mir, bcx.tcx());
+                    let op_ty = op_arg.ty(self.mir, bx.tcx());
                     self.monomorphize(&op_ty)
                 }).collect::<Vec<_>>();
 
                 let fn_ty = match def {
                     Some(ty::InstanceDef::Virtual(..)) => {
-                        FnType::new_vtable(bcx.ccx, sig, &extra_args)
+                        FnType::new_vtable(bx.cx, sig, &extra_args)
                     }
                     Some(ty::InstanceDef::DropGlue(_, None)) => {
                         // empty drop glue - a nop.
                         let &(_, target) = destination.as_ref().unwrap();
-                        funclet_br(self, bcx, target);
+                        funclet_br(self, bx, target);
                         return;
                     }
-                    _ => FnType::new(bcx.ccx, sig, &extra_args)
+                    _ => FnType::new(bx.cx, sig, &extra_args)
                 };
 
                 // The arguments we'll be passing. Plus one to account for outptr, if used.
@@ -497,7 +497,7 @@
                 // Prepare the return value destination
                 let ret_dest = if let Some((ref dest, _)) = *destination {
                     let is_intrinsic = intrinsic.is_some();
-                    self.make_return_dest(&bcx, dest, &fn_ty.ret, &mut llargs,
+                    self.make_return_dest(&bx, dest, &fn_ty.ret, &mut llargs,
                                           is_intrinsic)
                 } else {
                     ReturnDest::Nothing
@@ -509,7 +509,7 @@
                     let dest = match ret_dest {
                         _ if fn_ty.ret.is_indirect() => llargs[0],
                         ReturnDest::Nothing => {
-                            C_undef(fn_ty.ret.memory_ty(bcx.ccx).ptr_to())
+                            C_undef(fn_ty.ret.memory_ty(bx.cx).ptr_to())
                         }
                         ReturnDest::IndirectOperand(dst, _) |
                         ReturnDest::Store(dst) => dst.llval,
@@ -529,31 +529,31 @@
                                     span_bug!(span, "shuffle indices must be constant");
                                 }
                                 mir::Operand::Constant(ref constant) => {
-                                    let val = self.trans_constant(&bcx, constant);
+                                    let val = self.trans_constant(&bx, constant);
                                     return OperandRef {
                                         val: Immediate(val.llval),
-                                        layout: bcx.ccx.layout_of(val.ty)
+                                        layout: bx.cx.layout_of(val.ty)
                                     };
                                 }
                             }
                         }
 
-                        self.trans_operand(&bcx, arg)
+                        self.trans_operand(&bx, arg)
                     }).collect();
 
 
-                    let callee_ty = instance.as_ref().unwrap().ty(bcx.ccx.tcx());
-                    trans_intrinsic_call(&bcx, callee_ty, &fn_ty, &args, dest,
+                    let callee_ty = instance.as_ref().unwrap().ty(bx.cx.tcx);
+                    trans_intrinsic_call(&bx, callee_ty, &fn_ty, &args, dest,
                                          terminator.source_info.span);
 
                     if let ReturnDest::IndirectOperand(dst, _) = ret_dest {
-                        self.store_return(&bcx, ret_dest, &fn_ty.ret, dst.llval);
+                        self.store_return(&bx, ret_dest, &fn_ty.ret, dst.llval);
                     }
 
                     if let Some((_, target)) = *destination {
-                        funclet_br(self, bcx, target);
+                        funclet_br(self, bx, target);
                     } else {
-                        bcx.unreachable();
+                        bx.unreachable();
                     }
 
                     return;
@@ -568,11 +568,11 @@
                 };
 
                 for (i, arg) in first_args.iter().enumerate() {
-                    let mut op = self.trans_operand(&bcx, arg);
+                    let mut op = self.trans_operand(&bx, arg);
                     if let (0, Some(ty::InstanceDef::Virtual(_, idx))) = (i, def) {
                         if let Pair(data_ptr, meta) = op.val {
                             llfn = Some(meth::VirtualIndex::from_index(idx)
-                                .get_fn(&bcx, meta, &fn_ty));
+                                .get_fn(&bx, meta, &fn_ty));
                             llargs.push(data_ptr);
                             continue;
                         }
@@ -583,27 +583,27 @@
                     match (arg, op.val) {
                         (&mir::Operand::Copy(_), Ref(..)) |
                         (&mir::Operand::Constant(_), Ref(..)) => {
-                            let tmp = PlaceRef::alloca(&bcx, op.layout, "const");
-                            op.val.store(&bcx, tmp);
+                            let tmp = PlaceRef::alloca(&bx, op.layout, "const");
+                            op.val.store(&bx, tmp);
                             op.val = Ref(tmp.llval, tmp.align);
                         }
                         _ => {}
                     }
 
-                    self.trans_argument(&bcx, op, &mut llargs, &fn_ty.args[i]);
+                    self.trans_argument(&bx, op, &mut llargs, &fn_ty.args[i]);
                 }
                 if let Some(tup) = untuple {
-                    self.trans_arguments_untupled(&bcx, tup, &mut llargs,
+                    self.trans_arguments_untupled(&bx, tup, &mut llargs,
                         &fn_ty.args[first_args.len()..])
                 }
 
                 let fn_ptr = match (llfn, instance) {
                     (Some(llfn), _) => llfn,
-                    (None, Some(instance)) => callee::get_fn(bcx.ccx, instance),
+                    (None, Some(instance)) => callee::get_fn(bx.cx, instance),
                     _ => span_bug!(span, "no llfn for call"),
                 };
 
-                do_call(self, bcx, fn_ty, fn_ptr, &llargs,
+                do_call(self, bx, fn_ty, fn_ptr, &llargs,
                         destination.as_ref().map(|&(_, target)| (ret_dest, target)),
                         cleanup);
             }
@@ -614,13 +614,13 @@
     }
 
     fn trans_argument(&mut self,
-                      bcx: &Builder<'a, 'tcx>,
+                      bx: &Builder<'a, 'tcx>,
                       op: OperandRef<'tcx>,
                       llargs: &mut Vec<ValueRef>,
                       arg: &ArgType<'tcx>) {
         // Fill padding with undef value, where applicable.
         if let Some(ty) = arg.pad {
-            llargs.push(C_undef(ty.llvm_type(bcx.ccx)));
+            llargs.push(C_undef(ty.llvm_type(bx.cx)));
         }
 
         if arg.is_ignore() {
@@ -643,12 +643,12 @@
             Immediate(_) | Pair(..) => {
                 match arg.mode {
                     PassMode::Indirect(_) | PassMode::Cast(_) => {
-                        let scratch = PlaceRef::alloca(bcx, arg.layout, "arg");
-                        op.val.store(bcx, scratch);
+                        let scratch = PlaceRef::alloca(bx, arg.layout, "arg");
+                        op.val.store(bx, scratch);
                         (scratch.llval, scratch.align, true)
                     }
                     _ => {
-                        (op.immediate_or_packed_pair(bcx), arg.layout.align, false)
+                        (op.immediate_or_packed_pair(bx), arg.layout.align, false)
                     }
                 }
             }
@@ -658,8 +658,8 @@
                     // think that ATM (Rust 1.16) we only pass temporaries, but we shouldn't
                     // have scary latent bugs around.
 
-                    let scratch = PlaceRef::alloca(bcx, arg.layout, "arg");
-                    base::memcpy_ty(bcx, scratch.llval, llval, op.layout, align);
+                    let scratch = PlaceRef::alloca(bx, arg.layout, "arg");
+                    base::memcpy_ty(bx, scratch.llval, llval, op.layout, align);
                     (scratch.llval, scratch.align, true)
                 } else {
                     (llval, align, true)
@@ -670,7 +670,7 @@
         if by_ref && !arg.is_indirect() {
             // Have to load the argument, maybe while casting it.
             if let PassMode::Cast(ty) = arg.mode {
-                llval = bcx.load(bcx.pointercast(llval, ty.llvm_type(bcx.ccx).ptr_to()),
+                llval = bx.load(bx.pointercast(llval, ty.llvm_type(bx.cx).ptr_to()),
                                  align.min(arg.layout.align));
             } else {
                 // We can't use `PlaceRef::load` here because the argument
@@ -678,14 +678,14 @@
                 // used for this call is passing it by-value. In that case,
                 // the load would just produce `OperandValue::Ref` instead
                 // of the `OperandValue::Immediate` we need for the call.
-                llval = bcx.load(llval, align);
+                llval = bx.load(llval, align);
                 if let layout::Abi::Scalar(ref scalar) = arg.layout.abi {
                     if scalar.is_bool() {
-                        bcx.range_metadata(llval, 0..2);
+                        bx.range_metadata(llval, 0..2);
                     }
                 }
                 // We store bools as i8 so we need to truncate to i1.
-                llval = base::to_immediate(bcx, llval, arg.layout);
+                llval = base::to_immediate(bx, llval, arg.layout);
             }
         }
 
@@ -693,38 +693,38 @@
     }
 
     fn trans_arguments_untupled(&mut self,
-                                bcx: &Builder<'a, 'tcx>,
+                                bx: &Builder<'a, 'tcx>,
                                 operand: &mir::Operand<'tcx>,
                                 llargs: &mut Vec<ValueRef>,
                                 args: &[ArgType<'tcx>]) {
-        let tuple = self.trans_operand(bcx, operand);
+        let tuple = self.trans_operand(bx, operand);
 
         // Handle both by-ref and immediate tuples.
         if let Ref(llval, align) = tuple.val {
             let tuple_ptr = PlaceRef::new_sized(llval, tuple.layout, align);
             for i in 0..tuple.layout.fields.count() {
-                let field_ptr = tuple_ptr.project_field(bcx, i);
-                self.trans_argument(bcx, field_ptr.load(bcx), llargs, &args[i]);
+                let field_ptr = tuple_ptr.project_field(bx, i);
+                self.trans_argument(bx, field_ptr.load(bx), llargs, &args[i]);
             }
         } else {
             // If the tuple is immediate, the elements are as well.
             for i in 0..tuple.layout.fields.count() {
-                let op = tuple.extract_field(bcx, i);
-                self.trans_argument(bcx, op, llargs, &args[i]);
+                let op = tuple.extract_field(bx, i);
+                self.trans_argument(bx, op, llargs, &args[i]);
             }
         }
     }
 
-    fn get_personality_slot(&mut self, bcx: &Builder<'a, 'tcx>) -> PlaceRef<'tcx> {
-        let ccx = bcx.ccx;
+    fn get_personality_slot(&mut self, bx: &Builder<'a, 'tcx>) -> PlaceRef<'tcx> {
+        let cx = bx.cx;
         if let Some(slot) = self.personality_slot {
             slot
         } else {
-            let layout = ccx.layout_of(ccx.tcx().intern_tup(&[
-                ccx.tcx().mk_mut_ptr(ccx.tcx().types.u8),
-                ccx.tcx().types.i32
+            let layout = cx.layout_of(cx.tcx.intern_tup(&[
+                cx.tcx.mk_mut_ptr(cx.tcx.types.u8),
+                cx.tcx.types.i32
             ], false));
-            let slot = PlaceRef::alloca(bcx, layout, "personalityslot");
+            let slot = PlaceRef::alloca(bx, layout, "personalityslot");
             self.personality_slot = Some(slot);
             slot
         }
@@ -745,28 +745,28 @@
     }
 
     fn landing_pad_uncached(&mut self, target_bb: BasicBlockRef) -> BasicBlockRef {
-        if base::wants_msvc_seh(self.ccx.sess()) {
+        if base::wants_msvc_seh(self.cx.sess()) {
             span_bug!(self.mir.span, "landing pad was not inserted?")
         }
 
-        let bcx = self.new_block("cleanup");
+        let bx = self.new_block("cleanup");
 
-        let llpersonality = self.ccx.eh_personality();
+        let llpersonality = self.cx.eh_personality();
         let llretty = self.landing_pad_type();
-        let lp = bcx.landing_pad(llretty, llpersonality, 1);
-        bcx.set_cleanup(lp);
+        let lp = bx.landing_pad(llretty, llpersonality, 1);
+        bx.set_cleanup(lp);
 
-        let slot = self.get_personality_slot(&bcx);
-        slot.storage_live(&bcx);
-        Pair(bcx.extract_value(lp, 0), bcx.extract_value(lp, 1)).store(&bcx, slot);
+        let slot = self.get_personality_slot(&bx);
+        slot.storage_live(&bx);
+        Pair(bx.extract_value(lp, 0), bx.extract_value(lp, 1)).store(&bx, slot);
 
-        bcx.br(target_bb);
-        bcx.llbb()
+        bx.br(target_bb);
+        bx.llbb()
     }
 
     fn landing_pad_type(&self) -> Type {
-        let ccx = self.ccx;
-        Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false)
+        let cx = self.cx;
+        Type::struct_(cx, &[Type::i8p(cx), Type::i32(cx)], false)
     }
 
     fn unreachable_block(&mut self) -> BasicBlockRef {
@@ -779,16 +779,16 @@
     }
 
     pub fn new_block(&self, name: &str) -> Builder<'a, 'tcx> {
-        Builder::new_block(self.ccx, self.llfn, name)
+        Builder::new_block(self.cx, self.llfn, name)
     }
 
-    pub fn get_builder(&self, bb: mir::BasicBlock) -> Builder<'a, 'tcx> {
-        let builder = Builder::with_ccx(self.ccx);
-        builder.position_at_end(self.blocks[bb]);
-        builder
+    pub fn build_block(&self, bb: mir::BasicBlock) -> Builder<'a, 'tcx> {
+        let bx = Builder::with_cx(self.cx);
+        bx.position_at_end(self.blocks[bb]);
+        bx
     }
 
-    fn make_return_dest(&mut self, bcx: &Builder<'a, 'tcx>,
+    fn make_return_dest(&mut self, bx: &Builder<'a, 'tcx>,
                         dest: &mir::Place<'tcx>, fn_ret: &ArgType<'tcx>,
                         llargs: &mut Vec<ValueRef>, is_intrinsic: bool)
                         -> ReturnDest<'tcx> {
@@ -805,16 +805,16 @@
                     return if fn_ret.is_indirect() {
                         // Odd, but possible, case, we have an operand temporary,
                         // but the calling convention has an indirect return.
-                        let tmp = PlaceRef::alloca(bcx, fn_ret.layout, "tmp_ret");
-                        tmp.storage_live(bcx);
+                        let tmp = PlaceRef::alloca(bx, fn_ret.layout, "tmp_ret");
+                        tmp.storage_live(bx);
                         llargs.push(tmp.llval);
                         ReturnDest::IndirectOperand(tmp, index)
                     } else if is_intrinsic {
                         // Currently, intrinsics always need a location to store
                         // the result. so we create a temporary alloca for the
                         // result
-                        let tmp = PlaceRef::alloca(bcx, fn_ret.layout, "tmp_ret");
-                        tmp.storage_live(bcx);
+                        let tmp = PlaceRef::alloca(bx, fn_ret.layout, "tmp_ret");
+                        tmp.storage_live(bx);
                         ReturnDest::IndirectOperand(tmp, index)
                     } else {
                         ReturnDest::DirectOperand(index)
@@ -825,7 +825,7 @@
                 }
             }
         } else {
-            self.trans_place(bcx, dest)
+            self.trans_place(bx, dest)
         };
         if fn_ret.is_indirect() {
             if dest.align.abi() < dest.layout.align.abi() {
@@ -844,20 +844,20 @@
         }
     }
 
-    fn trans_transmute(&mut self, bcx: &Builder<'a, 'tcx>,
+    fn trans_transmute(&mut self, bx: &Builder<'a, 'tcx>,
                        src: &mir::Operand<'tcx>,
                        dst: &mir::Place<'tcx>) {
         if let mir::Place::Local(index) = *dst {
             match self.locals[index] {
-                LocalRef::Place(place) => self.trans_transmute_into(bcx, src, place),
+                LocalRef::Place(place) => self.trans_transmute_into(bx, src, place),
                 LocalRef::Operand(None) => {
-                    let dst_layout = bcx.ccx.layout_of(self.monomorphized_place_ty(dst));
+                    let dst_layout = bx.cx.layout_of(self.monomorphized_place_ty(dst));
                     assert!(!dst_layout.ty.has_erasable_regions());
-                    let place = PlaceRef::alloca(bcx, dst_layout, "transmute_temp");
-                    place.storage_live(bcx);
-                    self.trans_transmute_into(bcx, src, place);
-                    let op = place.load(bcx);
-                    place.storage_dead(bcx);
+                    let place = PlaceRef::alloca(bx, dst_layout, "transmute_temp");
+                    place.storage_live(bx);
+                    self.trans_transmute_into(bx, src, place);
+                    let op = place.load(bx);
+                    place.storage_dead(bx);
                     self.locals[index] = LocalRef::Operand(Some(op));
                 }
                 LocalRef::Operand(Some(op)) => {
@@ -866,25 +866,25 @@
                 }
             }
         } else {
-            let dst = self.trans_place(bcx, dst);
-            self.trans_transmute_into(bcx, src, dst);
+            let dst = self.trans_place(bx, dst);
+            self.trans_transmute_into(bx, src, dst);
         }
     }
 
-    fn trans_transmute_into(&mut self, bcx: &Builder<'a, 'tcx>,
+    fn trans_transmute_into(&mut self, bx: &Builder<'a, 'tcx>,
                             src: &mir::Operand<'tcx>,
                             dst: PlaceRef<'tcx>) {
-        let src = self.trans_operand(bcx, src);
-        let llty = src.layout.llvm_type(bcx.ccx);
-        let cast_ptr = bcx.pointercast(dst.llval, llty.ptr_to());
+        let src = self.trans_operand(bx, src);
+        let llty = src.layout.llvm_type(bx.cx);
+        let cast_ptr = bx.pointercast(dst.llval, llty.ptr_to());
         let align = src.layout.align.min(dst.layout.align);
-        src.val.store(bcx, PlaceRef::new_sized(cast_ptr, src.layout, align));
+        src.val.store(bx, PlaceRef::new_sized(cast_ptr, src.layout, align));
     }
 
 
     // Stores the return value of a function call into it's final location.
     fn store_return(&mut self,
-                    bcx: &Builder<'a, 'tcx>,
+                    bx: &Builder<'a, 'tcx>,
                     dest: ReturnDest<'tcx>,
                     ret_ty: &ArgType<'tcx>,
                     llval: ValueRef) {
@@ -892,23 +892,23 @@
 
         match dest {
             Nothing => (),
-            Store(dst) => ret_ty.store(bcx, llval, dst),
+            Store(dst) => ret_ty.store(bx, llval, dst),
             IndirectOperand(tmp, index) => {
-                let op = tmp.load(bcx);
-                tmp.storage_dead(bcx);
+                let op = tmp.load(bx);
+                tmp.storage_dead(bx);
                 self.locals[index] = LocalRef::Operand(Some(op));
             }
             DirectOperand(index) => {
                 // If there is a cast, we have to store and reload.
                 let op = if let PassMode::Cast(_) = ret_ty.mode {
-                    let tmp = PlaceRef::alloca(bcx, ret_ty.layout, "tmp_ret");
-                    tmp.storage_live(bcx);
-                    ret_ty.store(bcx, llval, tmp);
-                    let op = tmp.load(bcx);
-                    tmp.storage_dead(bcx);
+                    let tmp = PlaceRef::alloca(bx, ret_ty.layout, "tmp_ret");
+                    tmp.storage_live(bx);
+                    ret_ty.store(bx, llval, tmp);
+                    let op = tmp.load(bx);
+                    tmp.storage_dead(bx);
                     op
                 } else {
-                    OperandRef::from_immediate_or_packed_pair(bcx, llval, ret_ty.layout)
+                    OperandRef::from_immediate_or_packed_pair(bx, llval, ret_ty.layout)
                 };
                 self.locals[index] = LocalRef::Operand(Some(op));
             }
diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs
index aa640d3..71ce0aa 100644
--- a/src/librustc_trans/mir/constant.rs
+++ b/src/librustc_trans/mir/constant.rs
@@ -27,7 +27,7 @@
 use abi::{self, Abi};
 use callee;
 use builder::Builder;
-use common::{self, CrateContext, const_get_elt, val_ty};
+use common::{self, CodegenCx, const_get_elt, val_ty};
 use common::{C_array, C_bool, C_bytes, C_int, C_uint, C_uint_big, C_u32, C_u64};
 use common::{C_null, C_struct, C_str_slice, C_undef, C_usize, C_vector, C_fat_ptr};
 use common::const_to_opt_u128;
@@ -43,7 +43,7 @@
 use std::ptr;
 
 use super::operand::{OperandRef, OperandValue};
-use super::MirContext;
+use super::FunctionCx;
 
 /// A sized constant rvalue.
 /// The LLVM type might not be the same for a single Rust type,
@@ -62,46 +62,46 @@
         }
     }
 
-    pub fn from_constint(ccx: &CrateContext<'a, 'tcx>, ci: &ConstInt) -> Const<'tcx> {
-        let tcx = ccx.tcx();
+    pub fn from_constint(cx: &CodegenCx<'a, 'tcx>, ci: &ConstInt) -> Const<'tcx> {
+        let tcx = cx.tcx;
         let (llval, ty) = match *ci {
-            I8(v) => (C_int(Type::i8(ccx), v as i64), tcx.types.i8),
-            I16(v) => (C_int(Type::i16(ccx), v as i64), tcx.types.i16),
-            I32(v) => (C_int(Type::i32(ccx), v as i64), tcx.types.i32),
-            I64(v) => (C_int(Type::i64(ccx), v as i64), tcx.types.i64),
-            I128(v) => (C_uint_big(Type::i128(ccx), v as u128), tcx.types.i128),
-            Isize(v) => (C_int(Type::isize(ccx), v.as_i64()), tcx.types.isize),
-            U8(v) => (C_uint(Type::i8(ccx), v as u64), tcx.types.u8),
-            U16(v) => (C_uint(Type::i16(ccx), v as u64), tcx.types.u16),
-            U32(v) => (C_uint(Type::i32(ccx), v as u64), tcx.types.u32),
-            U64(v) => (C_uint(Type::i64(ccx), v), tcx.types.u64),
-            U128(v) => (C_uint_big(Type::i128(ccx), v), tcx.types.u128),
-            Usize(v) => (C_uint(Type::isize(ccx), v.as_u64()), tcx.types.usize),
+            I8(v) => (C_int(Type::i8(cx), v as i64), tcx.types.i8),
+            I16(v) => (C_int(Type::i16(cx), v as i64), tcx.types.i16),
+            I32(v) => (C_int(Type::i32(cx), v as i64), tcx.types.i32),
+            I64(v) => (C_int(Type::i64(cx), v as i64), tcx.types.i64),
+            I128(v) => (C_uint_big(Type::i128(cx), v as u128), tcx.types.i128),
+            Isize(v) => (C_int(Type::isize(cx), v.as_i64()), tcx.types.isize),
+            U8(v) => (C_uint(Type::i8(cx), v as u64), tcx.types.u8),
+            U16(v) => (C_uint(Type::i16(cx), v as u64), tcx.types.u16),
+            U32(v) => (C_uint(Type::i32(cx), v as u64), tcx.types.u32),
+            U64(v) => (C_uint(Type::i64(cx), v), tcx.types.u64),
+            U128(v) => (C_uint_big(Type::i128(cx), v), tcx.types.u128),
+            Usize(v) => (C_uint(Type::isize(cx), v.as_u64()), tcx.types.usize),
         };
         Const { llval: llval, ty: ty }
     }
 
     /// Translate ConstVal into a LLVM constant value.
-    pub fn from_constval(ccx: &CrateContext<'a, 'tcx>,
+    pub fn from_constval(cx: &CodegenCx<'a, 'tcx>,
                          cv: &ConstVal,
                          ty: Ty<'tcx>)
                          -> Const<'tcx> {
-        let llty = ccx.layout_of(ty).llvm_type(ccx);
+        let llty = cx.layout_of(ty).llvm_type(cx);
         let val = match *cv {
             ConstVal::Float(v) => {
                 let bits = match v.ty {
-                    ast::FloatTy::F32 => C_u32(ccx, v.bits as u32),
-                    ast::FloatTy::F64 => C_u64(ccx, v.bits as u64)
+                    ast::FloatTy::F32 => C_u32(cx, v.bits as u32),
+                    ast::FloatTy::F64 => C_u64(cx, v.bits as u64)
                 };
                 consts::bitcast(bits, llty)
             }
-            ConstVal::Bool(v) => C_bool(ccx, v),
-            ConstVal::Integral(ref i) => return Const::from_constint(ccx, i),
-            ConstVal::Str(ref v) => C_str_slice(ccx, v.clone()),
+            ConstVal::Bool(v) => C_bool(cx, v),
+            ConstVal::Integral(ref i) => return Const::from_constint(cx, i),
+            ConstVal::Str(ref v) => C_str_slice(cx, v.clone()),
             ConstVal::ByteStr(v) => {
-                consts::addr_of(ccx, C_bytes(ccx, v.data), ccx.align_of(ty), "byte_str")
+                consts::addr_of(cx, C_bytes(cx, v.data), cx.align_of(ty), "byte_str")
             }
-            ConstVal::Char(c) => C_uint(Type::char(ccx), c as u64),
+            ConstVal::Char(c) => C_uint(Type::char(cx), c as u64),
             ConstVal::Function(..) => C_undef(llty),
             ConstVal::Variant(_) |
             ConstVal::Aggregate(..) |
@@ -115,11 +115,11 @@
         Const::new(val, ty)
     }
 
-    fn get_field(&self, ccx: &CrateContext<'a, 'tcx>, i: usize) -> ValueRef {
-        let layout = ccx.layout_of(self.ty);
-        let field = layout.field(ccx, i);
+    fn get_field(&self, cx: &CodegenCx<'a, 'tcx>, i: usize) -> ValueRef {
+        let layout = cx.layout_of(self.ty);
+        let field = layout.field(cx, i);
         if field.is_zst() {
-            return C_undef(field.immediate_llvm_type(ccx));
+            return C_undef(field.immediate_llvm_type(cx));
         }
         let offset = layout.fields.offset(i);
         match layout.abi {
@@ -130,12 +130,12 @@
 
             layout::Abi::ScalarPair(ref a, ref b) => {
                 if offset.bytes() == 0 {
-                    assert_eq!(field.size, a.value.size(ccx));
+                    assert_eq!(field.size, a.value.size(cx));
                     const_get_elt(self.llval, 0)
                 } else {
-                    assert_eq!(offset, a.value.size(ccx)
-                        .abi_align(b.value.align(ccx)));
-                    assert_eq!(field.size, b.value.size(ccx));
+                    assert_eq!(offset, a.value.size(cx)
+                        .abi_align(b.value.align(cx)));
+                    assert_eq!(field.size, b.value.size(cx));
                     const_get_elt(self.llval, 1)
                 }
             }
@@ -145,14 +145,14 @@
         }
     }
 
-    fn get_pair(&self, ccx: &CrateContext<'a, 'tcx>) -> (ValueRef, ValueRef) {
-        (self.get_field(ccx, 0), self.get_field(ccx, 1))
+    fn get_pair(&self, cx: &CodegenCx<'a, 'tcx>) -> (ValueRef, ValueRef) {
+        (self.get_field(cx, 0), self.get_field(cx, 1))
     }
 
-    fn get_fat_ptr(&self, ccx: &CrateContext<'a, 'tcx>) -> (ValueRef, ValueRef) {
+    fn get_fat_ptr(&self, cx: &CodegenCx<'a, 'tcx>) -> (ValueRef, ValueRef) {
         assert_eq!(abi::FAT_PTR_ADDR, 0);
         assert_eq!(abi::FAT_PTR_EXTRA, 1);
-        self.get_pair(ccx)
+        self.get_pair(cx)
     }
 
     fn as_place(&self) -> ConstPlace<'tcx> {
@@ -163,9 +163,9 @@
         }
     }
 
-    pub fn to_operand(&self, ccx: &CrateContext<'a, 'tcx>) -> OperandRef<'tcx> {
-        let layout = ccx.layout_of(self.ty);
-        let llty = layout.immediate_llvm_type(ccx);
+    pub fn to_operand(&self, cx: &CodegenCx<'a, 'tcx>) -> OperandRef<'tcx> {
+        let layout = cx.layout_of(self.ty);
+        let llty = layout.immediate_llvm_type(cx);
         let llvalty = val_ty(self.llval);
 
         let val = if llty == llvalty && layout.is_llvm_scalar_pair() {
@@ -178,9 +178,9 @@
         } else {
             // Otherwise, or if the value is not immediate, we create
             // a constant LLVM global and cast its address if necessary.
-            let align = ccx.align_of(self.ty);
-            let ptr = consts::addr_of(ccx, self.llval, align, "const");
-            OperandValue::Ref(consts::ptrcast(ptr, layout.llvm_type(ccx).ptr_to()),
+            let align = cx.align_of(self.ty);
+            let ptr = consts::addr_of(cx, self.llval, align, "const");
+            OperandValue::Ref(consts::ptrcast(ptr, layout.llvm_type(cx).ptr_to()),
                               layout.align)
         };
 
@@ -232,10 +232,10 @@
         }
     }
 
-    pub fn len<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef {
+    pub fn len<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> ValueRef {
         match self.ty.sty {
             ty::TyArray(_, n) => {
-                C_usize(ccx, n.val.to_const_int().unwrap().to_u64().unwrap())
+                C_usize(cx, n.val.to_const_int().unwrap().to_u64().unwrap())
             }
             ty::TySlice(_) | ty::TyStr => {
                 assert!(self.llextra != ptr::null_mut());
@@ -249,7 +249,7 @@
 /// Machinery for translating a constant's MIR to LLVM values.
 /// FIXME(eddyb) use miri and lower its allocations to LLVM.
 struct MirConstContext<'a, 'tcx: 'a> {
-    ccx: &'a CrateContext<'a, 'tcx>,
+    cx: &'a CodegenCx<'a, 'tcx>,
     mir: &'a mir::Mir<'tcx>,
 
     /// Type parameters for const fn and associated constants.
@@ -270,13 +270,13 @@
 }
 
 impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
-    fn new(ccx: &'a CrateContext<'a, 'tcx>,
+    fn new(cx: &'a CodegenCx<'a, 'tcx>,
            mir: &'a mir::Mir<'tcx>,
            substs: &'tcx Substs<'tcx>,
            args: IndexVec<mir::Local, Result<Const<'tcx>, ConstEvalErr<'tcx>>>)
            -> MirConstContext<'a, 'tcx> {
         let mut context = MirConstContext {
-            ccx,
+            cx,
             mir,
             substs,
             locals: (0..mir.local_decls.len()).map(|_| None).collect(),
@@ -289,27 +289,27 @@
         context
     }
 
-    fn trans_def(ccx: &'a CrateContext<'a, 'tcx>,
+    fn trans_def(cx: &'a CodegenCx<'a, 'tcx>,
                  def_id: DefId,
                  substs: &'tcx Substs<'tcx>,
                  args: IndexVec<mir::Local, Result<Const<'tcx>, ConstEvalErr<'tcx>>>)
                  -> Result<Const<'tcx>, ConstEvalErr<'tcx>> {
-        let instance = ty::Instance::resolve(ccx.tcx(),
+        let instance = ty::Instance::resolve(cx.tcx,
                                              ty::ParamEnv::empty(traits::Reveal::All),
                                              def_id,
                                              substs).unwrap();
-        let mir = ccx.tcx().instance_mir(instance.def);
-        MirConstContext::new(ccx, &mir, instance.substs, args).trans()
+        let mir = cx.tcx.instance_mir(instance.def);
+        MirConstContext::new(cx, &mir, instance.substs, args).trans()
     }
 
     fn monomorphize<T>(&self, value: &T) -> T
         where T: TransNormalize<'tcx>
     {
-        self.ccx.tcx().trans_apply_param_substs(self.substs, value)
+        self.cx.tcx.trans_apply_param_substs(self.substs, value)
     }
 
     fn trans(&mut self) -> Result<Const<'tcx>, ConstEvalErr<'tcx>> {
-        let tcx = self.ccx.tcx();
+        let tcx = self.cx.tcx;
         let mut bb = mir::START_BLOCK;
 
         // Make sure to evaluate all statemenets to
@@ -399,13 +399,13 @@
                         let result = if fn_ty.fn_sig(tcx).abi() == Abi::RustIntrinsic {
                             match &tcx.item_name(def_id)[..] {
                                 "size_of" => {
-                                    let llval = C_usize(self.ccx,
-                                        self.ccx.size_of(substs.type_at(0)).bytes());
+                                    let llval = C_usize(self.cx,
+                                        self.cx.size_of(substs.type_at(0)).bytes());
                                     Ok(Const::new(llval, tcx.types.usize))
                                 }
                                 "min_align_of" => {
-                                    let llval = C_usize(self.ccx,
-                                        self.ccx.align_of(substs.type_at(0)).abi());
+                                    let llval = C_usize(self.cx,
+                                        self.cx.align_of(substs.type_at(0)).abi());
                                     Ok(Const::new(llval, tcx.types.usize))
                                 }
                                 _ => span_bug!(span, "{:?} in constant", terminator.kind)
@@ -430,12 +430,12 @@
                                     match const_scalar_checked_binop(tcx, op, lhs, rhs, ty) {
                                         Some((llval, of)) => {
                                             Ok(trans_const_adt(
-                                                self.ccx,
+                                                self.cx,
                                                 binop_ty,
                                                 &mir::AggregateKind::Tuple,
                                                 &[
                                                     Const::new(llval, val_ty),
-                                                    Const::new(C_bool(self.ccx, of), tcx.types.bool)
+                                                    Const::new(C_bool(self.cx, of), tcx.types.bool)
                                                 ]))
                                         }
                                         None => {
@@ -447,7 +447,7 @@
                                 }
                             })()
                         } else {
-                            MirConstContext::trans_def(self.ccx, def_id, substs, arg_vals)
+                            MirConstContext::trans_def(self.cx, def_id, substs, arg_vals)
                         };
                         add_err(&mut failure, &result);
                         self.store(dest, result, span);
@@ -462,7 +462,7 @@
     }
 
     fn is_binop_lang_item(&mut self, def_id: DefId) -> Option<(mir::BinOp, bool)> {
-        let tcx = self.ccx.tcx();
+        let tcx = self.cx.tcx;
         let items = tcx.lang_items();
         let def_id = Some(def_id);
         if items.i128_add_fn() == def_id { Some((mir::BinOp::Add, false)) }
@@ -505,7 +505,7 @@
 
     fn const_place(&self, place: &mir::Place<'tcx>, span: Span)
                     -> Result<ConstPlace<'tcx>, ConstEvalErr<'tcx>> {
-        let tcx = self.ccx.tcx();
+        let tcx = self.cx.tcx;
 
         if let mir::Place::Local(index) = *place {
             return self.locals[index].clone().unwrap_or_else(|| {
@@ -517,7 +517,7 @@
             mir::Place::Local(_)  => bug!(), // handled above
             mir::Place::Static(box mir::Static { def_id, ty }) => {
                 ConstPlace {
-                    base: Base::Static(consts::get_static(self.ccx, def_id)),
+                    base: Base::Static(consts::get_static(self.cx, def_id)),
                     llextra: ptr::null_mut(),
                     ty: self.monomorphize(&ty),
                 }
@@ -528,30 +528,30 @@
                     .projection_ty(tcx, &projection.elem);
                 let base = tr_base.to_const(span);
                 let projected_ty = self.monomorphize(&projected_ty).to_ty(tcx);
-                let has_metadata = self.ccx.shared().type_has_metadata(projected_ty);
+                let has_metadata = self.cx.type_has_metadata(projected_ty);
 
                 let (projected, llextra) = match projection.elem {
                     mir::ProjectionElem::Deref => {
                         let (base, extra) = if !has_metadata {
                             (base.llval, ptr::null_mut())
                         } else {
-                            base.get_fat_ptr(self.ccx)
+                            base.get_fat_ptr(self.cx)
                         };
-                        if self.ccx.statics().borrow().contains_key(&base) {
+                        if self.cx.statics.borrow().contains_key(&base) {
                             (Base::Static(base), extra)
                         } else if let ty::TyStr = projected_ty.sty {
                             (Base::Str(base), extra)
                         } else {
                             let v = base;
-                            let v = self.ccx.const_unsized().borrow().get(&v).map_or(v, |&v| v);
+                            let v = self.cx.const_unsized.borrow().get(&v).map_or(v, |&v| v);
                             let mut val = unsafe { llvm::LLVMGetInitializer(v) };
                             if val.is_null() {
                                 span_bug!(span, "dereference of non-constant pointer `{:?}`",
                                           Value(base));
                             }
-                            let layout = self.ccx.layout_of(projected_ty);
+                            let layout = self.cx.layout_of(projected_ty);
                             if let layout::Abi::Scalar(ref scalar) = layout.abi {
-                                let i1_type = Type::i1(self.ccx);
+                                let i1_type = Type::i1(self.cx);
                                 if scalar.is_bool() && val_ty(val) != i1_type {
                                     unsafe {
                                         val = llvm::LLVMConstTrunc(val, i1_type.to_ref());
@@ -562,7 +562,7 @@
                         }
                     }
                     mir::ProjectionElem::Field(ref field, _) => {
-                        let llprojected = base.get_field(self.ccx, field.index());
+                        let llprojected = base.get_field(self.cx, field.index());
                         let llextra = if !has_metadata {
                             ptr::null_mut()
                         } else {
@@ -581,11 +581,11 @@
                         };
 
                         // Produce an undef instead of a LLVM assertion on OOB.
-                        let len = common::const_to_uint(tr_base.len(self.ccx));
+                        let len = common::const_to_uint(tr_base.len(self.cx));
                         let llelem = if iv < len as u128 {
                             const_get_elt(base.llval, iv as u64)
                         } else {
-                            C_undef(self.ccx.layout_of(projected_ty).llvm_type(self.ccx))
+                            C_undef(self.cx.layout_of(projected_ty).llvm_type(self.cx))
                         };
 
                         (Base::Value(llelem), ptr::null_mut())
@@ -616,14 +616,14 @@
                 match constant.literal.clone() {
                     mir::Literal::Promoted { index } => {
                         let mir = &self.mir.promoted[index];
-                        MirConstContext::new(self.ccx, mir, self.substs, IndexVec::new()).trans()
+                        MirConstContext::new(self.cx, mir, self.substs, IndexVec::new()).trans()
                     }
                     mir::Literal::Value { value } => {
                         if let ConstVal::Unevaluated(def_id, substs) = value.val {
                             let substs = self.monomorphize(&substs);
-                            MirConstContext::trans_def(self.ccx, def_id, substs, IndexVec::new())
+                            MirConstContext::trans_def(self.cx, def_id, substs, IndexVec::new())
                         } else {
-                            Ok(Const::from_constval(self.ccx, &value.val, ty))
+                            Ok(Const::from_constval(self.cx, &value.val, ty))
                         }
                     }
                 }
@@ -640,12 +640,12 @@
         let elem_ty = array_ty.builtin_index().unwrap_or_else(|| {
             bug!("bad array type {:?}", array_ty)
         });
-        let llunitty = self.ccx.layout_of(elem_ty).llvm_type(self.ccx);
+        let llunitty = self.cx.layout_of(elem_ty).llvm_type(self.cx);
         // If the array contains enums, an LLVM array won't work.
         let val = if fields.iter().all(|&f| val_ty(f) == llunitty) {
             C_array(llunitty, fields)
         } else {
-            C_struct(self.ccx, fields, false)
+            C_struct(self.cx, fields, false)
         };
         Const::new(val, array_ty)
     }
@@ -653,7 +653,7 @@
     fn const_rvalue(&self, rvalue: &mir::Rvalue<'tcx>,
                     dest_ty: Ty<'tcx>, span: Span)
                     -> Result<Const<'tcx>, ConstEvalErr<'tcx>> {
-        let tcx = self.ccx.tcx();
+        let tcx = self.cx.tcx;
         debug!("const_rvalue({:?}: {:?} @ {:?})", rvalue, dest_ty, span);
         let val = match *rvalue {
             mir::Rvalue::Use(ref operand) => self.const_operand(operand, span)?,
@@ -695,7 +695,7 @@
                 }
                 failure?;
 
-                trans_const_adt(self.ccx, dest_ty, kind, &fields)
+                trans_const_adt(self.cx, dest_ty, kind, &fields)
             }
 
             mir::Rvalue::Cast(ref kind, ref source, cast_ty) => {
@@ -706,7 +706,7 @@
                     mir::CastKind::ReifyFnPointer => {
                         match operand.ty.sty {
                             ty::TyFnDef(def_id, substs) => {
-                                callee::resolve_and_get_fn(self.ccx, def_id, substs)
+                                callee::resolve_and_get_fn(self.cx, def_id, substs)
                             }
                             _ => {
                                 span_bug!(span, "{} cannot be reified to a fn ptr",
@@ -728,7 +728,7 @@
                                 let input = tcx.erase_late_bound_regions_and_normalize(&input);
                                 let substs = tcx.mk_substs([operand.ty, input]
                                     .iter().cloned().map(Kind::from));
-                                callee::resolve_and_get_fn(self.ccx, call_once, substs)
+                                callee::resolve_and_get_fn(self.cx, call_once, substs)
                             }
                             _ => {
                                 bug!("{} cannot be cast to a fn ptr", operand.ty)
@@ -742,14 +742,14 @@
                     mir::CastKind::Unsize => {
                         let pointee_ty = operand.ty.builtin_deref(true, ty::NoPreference)
                             .expect("consts: unsizing got non-pointer type").ty;
-                        let (base, old_info) = if !self.ccx.shared().type_is_sized(pointee_ty) {
+                        let (base, old_info) = if !self.cx.type_is_sized(pointee_ty) {
                             // Normally, the source is a thin pointer and we are
                             // adding extra info to make a fat pointer. The exception
                             // is when we are upcasting an existing object fat pointer
                             // to use a different vtable. In that case, we want to
                             // load out the original data pointer so we can repackage
                             // it.
-                            let (base, extra) = operand.get_fat_ptr(self.ccx);
+                            let (base, extra) = operand.get_fat_ptr(self.cx);
                             (base, Some(extra))
                         } else {
                             (operand.llval, None)
@@ -757,28 +757,28 @@
 
                         let unsized_ty = cast_ty.builtin_deref(true, ty::NoPreference)
                             .expect("consts: unsizing got non-pointer target type").ty;
-                        let ptr_ty = self.ccx.layout_of(unsized_ty).llvm_type(self.ccx).ptr_to();
+                        let ptr_ty = self.cx.layout_of(unsized_ty).llvm_type(self.cx).ptr_to();
                         let base = consts::ptrcast(base, ptr_ty);
-                        let info = base::unsized_info(self.ccx, pointee_ty,
+                        let info = base::unsized_info(self.cx, pointee_ty,
                                                       unsized_ty, old_info);
 
                         if old_info.is_none() {
-                            let prev_const = self.ccx.const_unsized().borrow_mut()
+                            let prev_const = self.cx.const_unsized.borrow_mut()
                                                      .insert(base, operand.llval);
                             assert!(prev_const.is_none() || prev_const == Some(operand.llval));
                         }
-                        C_fat_ptr(self.ccx, base, info)
+                        C_fat_ptr(self.cx, base, info)
                     }
-                    mir::CastKind::Misc if self.ccx.layout_of(operand.ty).is_llvm_immediate() => {
+                    mir::CastKind::Misc if self.cx.layout_of(operand.ty).is_llvm_immediate() => {
                         let r_t_in = CastTy::from_ty(operand.ty).expect("bad input type for cast");
                         let r_t_out = CastTy::from_ty(cast_ty).expect("bad output type for cast");
-                        let cast_layout = self.ccx.layout_of(cast_ty);
+                        let cast_layout = self.cx.layout_of(cast_ty);
                         assert!(cast_layout.is_llvm_immediate());
-                        let ll_t_out = cast_layout.immediate_llvm_type(self.ccx);
+                        let ll_t_out = cast_layout.immediate_llvm_type(self.cx);
                         let llval = operand.llval;
 
                         let mut signed = false;
-                        let l = self.ccx.layout_of(operand.ty);
+                        let l = self.cx.layout_of(operand.ty);
                         if let layout::Abi::Scalar(ref scalar) = l.abi {
                             if let layout::Int(_, true) = scalar.value {
                                 signed = true;
@@ -792,17 +792,17 @@
                                     llvm::LLVMConstIntCast(llval, ll_t_out.to_ref(), s)
                                 }
                                 (CastTy::Int(_), CastTy::Float) => {
-                                    cast_const_int_to_float(self.ccx, llval, signed, ll_t_out)
+                                    cast_const_int_to_float(self.cx, llval, signed, ll_t_out)
                                 }
                                 (CastTy::Float, CastTy::Float) => {
                                     llvm::LLVMConstFPCast(llval, ll_t_out.to_ref())
                                 }
                                 (CastTy::Float, CastTy::Int(IntTy::I)) => {
-                                    cast_const_float_to_int(self.ccx, &operand,
+                                    cast_const_float_to_int(self.cx, &operand,
                                                             true, ll_t_out, span)
                                 }
                                 (CastTy::Float, CastTy::Int(_)) => {
-                                    cast_const_float_to_int(self.ccx, &operand,
+                                    cast_const_float_to_int(self.cx, &operand,
                                                             false, ll_t_out, span)
                                 }
                                 (CastTy::Ptr(_), CastTy::Ptr(_)) |
@@ -813,7 +813,7 @@
                                 (CastTy::Int(_), CastTy::Ptr(_)) => {
                                     let s = signed as llvm::Bool;
                                     let usize_llval = llvm::LLVMConstIntCast(llval,
-                                        self.ccx.isize_ty().to_ref(), s);
+                                        self.cx.isize_ty.to_ref(), s);
                                     llvm::LLVMConstIntToPtr(usize_llval, ll_t_out.to_ref())
                                 }
                                 (CastTy::Ptr(_), CastTy::Int(_)) |
@@ -825,18 +825,18 @@
                         }
                     }
                     mir::CastKind::Misc => { // Casts from a fat-ptr.
-                        let l = self.ccx.layout_of(operand.ty);
-                        let cast = self.ccx.layout_of(cast_ty);
+                        let l = self.cx.layout_of(operand.ty);
+                        let cast = self.cx.layout_of(cast_ty);
                         if l.is_llvm_scalar_pair() {
-                            let (data_ptr, meta) = operand.get_fat_ptr(self.ccx);
+                            let (data_ptr, meta) = operand.get_fat_ptr(self.cx);
                             if cast.is_llvm_scalar_pair() {
                                 let data_cast = consts::ptrcast(data_ptr,
-                                    cast.scalar_pair_element_llvm_type(self.ccx, 0));
-                                C_fat_ptr(self.ccx, data_cast, meta)
+                                    cast.scalar_pair_element_llvm_type(self.cx, 0));
+                                C_fat_ptr(self.cx, data_cast, meta)
                             } else { // cast to thin-ptr
                                 // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
                                 // pointer-cast of that pointer to desired pointer type.
-                                let llcast_ty = cast.immediate_llvm_type(self.ccx);
+                                let llcast_ty = cast.immediate_llvm_type(self.cx);
                                 consts::ptrcast(data_ptr, llcast_ty)
                             }
                         } else {
@@ -857,32 +857,32 @@
                 let base = match tr_place.base {
                     Base::Value(llval) => {
                         // FIXME: may be wrong for &*(&simd_vec as &fmt::Debug)
-                        let align = if self.ccx.shared().type_is_sized(ty) {
-                            self.ccx.align_of(ty)
+                        let align = if self.cx.type_is_sized(ty) {
+                            self.cx.align_of(ty)
                         } else {
-                            self.ccx.tcx().data_layout.pointer_align
+                            self.cx.tcx.data_layout.pointer_align
                         };
                         if bk == mir::BorrowKind::Mut {
-                            consts::addr_of_mut(self.ccx, llval, align, "ref_mut")
+                            consts::addr_of_mut(self.cx, llval, align, "ref_mut")
                         } else {
-                            consts::addr_of(self.ccx, llval, align, "ref")
+                            consts::addr_of(self.cx, llval, align, "ref")
                         }
                     }
                     Base::Str(llval) |
                     Base::Static(llval) => llval
                 };
 
-                let ptr = if self.ccx.shared().type_is_sized(ty) {
+                let ptr = if self.cx.type_is_sized(ty) {
                     base
                 } else {
-                    C_fat_ptr(self.ccx, base, tr_place.llextra)
+                    C_fat_ptr(self.cx, base, tr_place.llextra)
                 };
                 Const::new(ptr, ref_ty)
             }
 
             mir::Rvalue::Len(ref place) => {
                 let tr_place = self.const_place(place, span)?;
-                Const::new(tr_place.len(self.ccx), tcx.types.usize)
+                Const::new(tr_place.len(self.cx), tcx.types.usize)
             }
 
             mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => {
@@ -905,9 +905,9 @@
 
                 match const_scalar_checked_binop(tcx, op, lhs, rhs, ty) {
                     Some((llval, of)) => {
-                        trans_const_adt(self.ccx, binop_ty, &mir::AggregateKind::Tuple, &[
+                        trans_const_adt(self.cx, binop_ty, &mir::AggregateKind::Tuple, &[
                             Const::new(llval, val_ty),
-                            Const::new(C_bool(self.ccx, of), tcx.types.bool)
+                            Const::new(C_bool(self.cx, of), tcx.types.bool)
                         ])
                     }
                     None => {
@@ -941,8 +941,8 @@
             }
 
             mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => {
-                assert!(self.ccx.shared().type_is_sized(ty));
-                let llval = C_usize(self.ccx, self.ccx.size_of(ty).bytes());
+                assert!(self.cx.type_is_sized(ty));
+                let llval = C_usize(self.cx, self.cx.size_of(ty).bytes());
                 Const::new(llval, tcx.types.usize)
             }
 
@@ -1060,7 +1060,7 @@
     }
 }
 
-unsafe fn cast_const_float_to_int(ccx: &CrateContext,
+unsafe fn cast_const_float_to_int(cx: &CodegenCx,
                                   operand: &Const,
                                   signed: bool,
                                   int_ty: Type,
@@ -1074,7 +1074,7 @@
     // One way that might happen would be if addresses could be turned into integers in constant
     // expressions, but that doesn't appear to be possible?
     // In any case, an ICE is better than producing undef.
-    let llval_bits = consts::bitcast(llval, Type::ix(ccx, float_bits as u64));
+    let llval_bits = consts::bitcast(llval, Type::ix(cx, float_bits as u64));
     let bits = const_to_opt_u128(llval_bits, false).unwrap_or_else(|| {
         panic!("could not get bits of constant float {:?}",
                Value(llval));
@@ -1090,12 +1090,12 @@
     };
     if cast_result.status.contains(Status::INVALID_OP) {
         let err = ConstEvalErr { span: span, kind: ErrKind::CannotCast };
-        err.report(ccx.tcx(), span, "expression");
+        err.report(cx.tcx, span, "expression");
     }
     C_uint_big(int_ty, cast_result.value)
 }
 
-unsafe fn cast_const_int_to_float(ccx: &CrateContext,
+unsafe fn cast_const_int_to_float(cx: &CodegenCx,
                                   llval: ValueRef,
                                   signed: bool,
                                   float_ty: Type) -> ValueRef {
@@ -1111,16 +1111,16 @@
         llvm::LLVMConstSIToFP(llval, float_ty.to_ref())
     } else if float_ty.float_width() == 32 && value >= MAX_F32_PLUS_HALF_ULP {
         // We're casting to f32 and the value is > f32::MAX + 0.5 ULP -> round up to infinity.
-        let infinity_bits = C_u32(ccx, ieee::Single::INFINITY.to_bits() as u32);
+        let infinity_bits = C_u32(cx, ieee::Single::INFINITY.to_bits() as u32);
         consts::bitcast(infinity_bits, float_ty)
     } else {
         llvm::LLVMConstUIToFP(llval, float_ty.to_ref())
     }
 }
 
-impl<'a, 'tcx> MirContext<'a, 'tcx> {
+impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
     pub fn trans_constant(&mut self,
-                          bcx: &Builder<'a, 'tcx>,
+                          bx: &Builder<'a, 'tcx>,
                           constant: &mir::Constant<'tcx>)
                           -> Const<'tcx>
     {
@@ -1129,21 +1129,21 @@
         let result = match constant.literal.clone() {
             mir::Literal::Promoted { index } => {
                 let mir = &self.mir.promoted[index];
-                MirConstContext::new(bcx.ccx, mir, self.param_substs, IndexVec::new()).trans()
+                MirConstContext::new(bx.cx, mir, self.param_substs, IndexVec::new()).trans()
             }
             mir::Literal::Value { value } => {
                 if let ConstVal::Unevaluated(def_id, substs) = value.val {
                     let substs = self.monomorphize(&substs);
-                    MirConstContext::trans_def(bcx.ccx, def_id, substs, IndexVec::new())
+                    MirConstContext::trans_def(bx.cx, def_id, substs, IndexVec::new())
                 } else {
-                    Ok(Const::from_constval(bcx.ccx, &value.val, ty))
+                    Ok(Const::from_constval(bx.cx, &value.val, ty))
                 }
             }
         };
 
         let result = result.unwrap_or_else(|_| {
             // We've errored, so we don't have to produce working code.
-            let llty = bcx.ccx.layout_of(ty).llvm_type(bcx.ccx);
+            let llty = bx.cx.layout_of(ty).llvm_type(bx.cx);
             Const::new(C_undef(llty), ty)
         });
 
@@ -1154,11 +1154,11 @@
 
 
 pub fn trans_static_initializer<'a, 'tcx>(
-    ccx: &CrateContext<'a, 'tcx>,
+    cx: &CodegenCx<'a, 'tcx>,
     def_id: DefId)
     -> Result<ValueRef, ConstEvalErr<'tcx>>
 {
-    MirConstContext::trans_def(ccx, def_id, Substs::empty(), IndexVec::new())
+    MirConstContext::trans_def(cx, def_id, Substs::empty(), IndexVec::new())
         .map(|c| c.llval)
 }
 
@@ -1182,19 +1182,19 @@
 /// this could be changed in the future to avoid allocating unnecessary
 /// space after values of shorter-than-maximum cases.
 fn trans_const_adt<'a, 'tcx>(
-    ccx: &CrateContext<'a, 'tcx>,
+    cx: &CodegenCx<'a, 'tcx>,
     t: Ty<'tcx>,
     kind: &mir::AggregateKind,
     vals: &[Const<'tcx>]
 ) -> Const<'tcx> {
-    let l = ccx.layout_of(t);
+    let l = cx.layout_of(t);
     let variant_index = match *kind {
         mir::AggregateKind::Adt(_, index, _, _) => index,
         _ => 0,
     };
 
     if let layout::Abi::Uninhabited = l.abi {
-        return Const::new(C_undef(l.llvm_type(ccx)), t);
+        return Const::new(C_undef(l.llvm_type(cx)), t);
     }
 
     match l.variants {
@@ -1203,14 +1203,14 @@
             if let layout::FieldPlacement::Union(_) = l.fields {
                 assert_eq!(variant_index, 0);
                 assert_eq!(vals.len(), 1);
-                let (field_size, field_align) = ccx.size_and_align_of(vals[0].ty);
+                let (field_size, field_align) = cx.size_and_align_of(vals[0].ty);
                 let contents = [
                     vals[0].llval,
-                    padding(ccx, l.size - field_size)
+                    padding(cx, l.size - field_size)
                 ];
 
                 let packed = l.align.abi() < field_align.abi();
-                Const::new(C_struct(ccx, &contents, packed), t)
+                Const::new(C_struct(cx, &contents, packed), t)
             } else {
                 if let layout::Abi::Vector { .. } = l.abi {
                     if let layout::FieldPlacement::Array { .. } = l.fields {
@@ -1218,24 +1218,24 @@
                             .collect::<Vec<_>>()), t);
                     }
                 }
-                build_const_struct(ccx, l, vals, None)
+                build_const_struct(cx, l, vals, None)
             }
         }
         layout::Variants::Tagged { .. } => {
             let discr = match *kind {
                 mir::AggregateKind::Adt(adt_def, _, _, _) => {
-                    adt_def.discriminant_for_variant(ccx.tcx(), variant_index)
+                    adt_def.discriminant_for_variant(cx.tcx, variant_index)
                            .to_u128_unchecked() as u64
                 },
                 _ => 0,
             };
-            let discr_field = l.field(ccx, 0);
-            let discr = C_int(discr_field.llvm_type(ccx), discr as i64);
+            let discr_field = l.field(cx, 0);
+            let discr = C_int(discr_field.llvm_type(cx), discr as i64);
             if let layout::Abi::Scalar(_) = l.abi {
                 Const::new(discr, t)
             } else {
                 let discr = Const::new(discr, discr_field.ty);
-                build_const_struct(ccx, l.for_variant(ccx, variant_index), vals, Some(discr))
+                build_const_struct(cx, l.for_variant(cx, variant_index), vals, Some(discr))
             }
         }
         layout::Variants::NicheFilling {
@@ -1245,10 +1245,10 @@
             ..
         } => {
             if variant_index == dataful_variant {
-                build_const_struct(ccx, l.for_variant(ccx, dataful_variant), vals, None)
+                build_const_struct(cx, l.for_variant(cx, dataful_variant), vals, None)
             } else {
-                let niche = l.field(ccx, 0);
-                let niche_llty = niche.llvm_type(ccx);
+                let niche = l.field(cx, 0);
+                let niche_llty = niche.llvm_type(cx);
                 let niche_value = ((variant_index - niche_variants.start) as u128)
                     .wrapping_add(niche_start);
                 // FIXME(eddyb) Check the actual primitive type here.
@@ -1258,7 +1258,7 @@
                 } else {
                     C_uint_big(niche_llty, niche_value)
                 };
-                build_const_struct(ccx, l, &[Const::new(niche_llval, niche.ty)], None)
+                build_const_struct(cx, l, &[Const::new(niche_llval, niche.ty)], None)
             }
         }
     }
@@ -1272,7 +1272,7 @@
 /// initializer is 4-byte aligned then simply translating the tuple as
 /// a two-element struct will locate it at offset 4, and accesses to it
 /// will read the wrong memory.
-fn build_const_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+fn build_const_struct<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
                                 layout: layout::TyLayout<'tcx>,
                                 vals: &[Const<'tcx>],
                                 discr: Option<Const<'tcx>>)
@@ -1285,16 +1285,16 @@
         layout::Abi::Vector { .. } if discr.is_none() => {
             let mut non_zst_fields = vals.iter().enumerate().map(|(i, f)| {
                 (f, layout.fields.offset(i))
-            }).filter(|&(f, _)| !ccx.layout_of(f.ty).is_zst());
+            }).filter(|&(f, _)| !cx.layout_of(f.ty).is_zst());
             match (non_zst_fields.next(), non_zst_fields.next()) {
                 (Some((x, offset)), None) if offset.bytes() == 0 => {
                     return Const::new(x.llval, layout.ty);
                 }
                 (Some((a, a_offset)), Some((b, _))) if a_offset.bytes() == 0 => {
-                    return Const::new(C_struct(ccx, &[a.llval, b.llval], false), layout.ty);
+                    return Const::new(C_struct(cx, &[a.llval, b.llval], false), layout.ty);
                 }
                 (Some((a, _)), Some((b, b_offset))) if b_offset.bytes() == 0 => {
-                    return Const::new(C_struct(ccx, &[b.llval, a.llval], false), layout.ty);
+                    return Const::new(C_struct(cx, &[b.llval, a.llval], false), layout.ty);
                 }
                 _ => {}
             }
@@ -1309,7 +1309,7 @@
     cfields.reserve(discr.is_some() as usize + 1 + layout.fields.count() * 2);
 
     if let Some(discr) = discr {
-        let (field_size, field_align) = ccx.size_and_align_of(discr.ty);
+        let (field_size, field_align) = cx.size_and_align_of(discr.ty);
         packed |= layout.align.abi() < field_align.abi();
         cfields.push(discr.llval);
         offset = field_size;
@@ -1319,19 +1319,19 @@
         (vals[i], layout.fields.offset(i))
     });
     for (val, target_offset) in parts {
-        let (field_size, field_align) = ccx.size_and_align_of(val.ty);
+        let (field_size, field_align) = cx.size_and_align_of(val.ty);
         packed |= layout.align.abi() < field_align.abi();
-        cfields.push(padding(ccx, target_offset - offset));
+        cfields.push(padding(cx, target_offset - offset));
         cfields.push(val.llval);
         offset = target_offset + field_size;
     }
 
     // Pad to the size of the whole type, not e.g. the variant.
-    cfields.push(padding(ccx, ccx.size_of(layout.ty) - offset));
+    cfields.push(padding(cx, cx.size_of(layout.ty) - offset));
 
-    Const::new(C_struct(ccx, &cfields, packed), layout.ty)
+    Const::new(C_struct(cx, &cfields, packed), layout.ty)
 }
 
-fn padding(ccx: &CrateContext, size: Size) -> ValueRef {
-    C_undef(Type::array(&Type::i8(ccx), size.bytes()))
+fn padding(cx: &CodegenCx, size: Size) -> ValueRef {
+    C_undef(Type::array(&Type::i8(cx), size.bytes()))
 }
diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs
index 917ff87..ddd78f2 100644
--- a/src/librustc_trans/mir/mod.rs
+++ b/src/librustc_trans/mir/mod.rs
@@ -19,7 +19,7 @@
 use rustc::session::config::FullDebugInfo;
 use base;
 use builder::Builder;
-use common::{CrateContext, Funclet};
+use common::{CodegenCx, Funclet};
 use debuginfo::{self, declare_local, VariableAccess, VariableKind, FunctionDebugContext};
 use monomorphize::Instance;
 use abi::{ArgAttribute, FnType, PassMode};
@@ -41,14 +41,14 @@
 use self::operand::{OperandRef, OperandValue};
 
 /// Master context for translating MIR.
-pub struct MirContext<'a, 'tcx:'a> {
+pub struct FunctionCx<'a, 'tcx:'a> {
     mir: &'a mir::Mir<'tcx>,
 
     debug_context: debuginfo::FunctionDebugContext,
 
     llfn: ValueRef,
 
-    ccx: &'a CrateContext<'a, 'tcx>,
+    cx: &'a CodegenCx<'a, 'tcx>,
 
     fn_ty: FnType<'tcx>,
 
@@ -102,16 +102,16 @@
     param_substs: &'tcx Substs<'tcx>,
 }
 
-impl<'a, 'tcx> MirContext<'a, 'tcx> {
+impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
     pub fn monomorphize<T>(&self, value: &T) -> T
         where T: TransNormalize<'tcx>
     {
-        self.ccx.tcx().trans_apply_param_substs(self.param_substs, value)
+        self.cx.tcx.trans_apply_param_substs(self.param_substs, value)
     }
 
-    pub fn set_debug_loc(&mut self, bcx: &Builder, source_info: mir::SourceInfo) {
+    pub fn set_debug_loc(&mut self, bx: &Builder, source_info: mir::SourceInfo) {
         let (scope, span) = self.debug_loc(source_info);
-        debuginfo::set_source_location(&self.debug_context, bcx, scope, span);
+        debuginfo::set_source_location(&self.debug_context, bx, scope, span);
     }
 
     pub fn debug_loc(&mut self, source_info: mir::SourceInfo) -> (DIScope, Span) {
@@ -128,7 +128,7 @@
         // locations of macro expansions with that of the outermost expansion site
         // (unless the crate is being compiled with `-Z debug-macros`).
         if source_info.span.ctxt() == NO_EXPANSION ||
-           self.ccx.sess().opts.debugging_opts.debug_macros {
+           self.cx.sess().opts.debugging_opts.debug_macros {
             let scope = self.scope_metadata_for_loc(source_info.scope, source_info.span.lo());
             (scope, source_info.span)
         } else {
@@ -158,9 +158,9 @@
         let scope_metadata = self.scopes[scope_id].scope_metadata;
         if pos < self.scopes[scope_id].file_start_pos ||
            pos >= self.scopes[scope_id].file_end_pos {
-            let cm = self.ccx.sess().codemap();
+            let cm = self.cx.sess().codemap();
             let defining_crate = self.debug_context.get_ref(DUMMY_SP).defining_crate;
-            debuginfo::extend_scope_to_file(self.ccx,
+            debuginfo::extend_scope_to_file(self.cx,
                                             scope_metadata,
                                             &cm.lookup_char_pos(pos).file,
                                             defining_crate)
@@ -176,12 +176,12 @@
 }
 
 impl<'a, 'tcx> LocalRef<'tcx> {
-    fn new_operand(ccx: &CrateContext<'a, 'tcx>, layout: TyLayout<'tcx>) -> LocalRef<'tcx> {
+    fn new_operand(cx: &CodegenCx<'a, 'tcx>, layout: TyLayout<'tcx>) -> LocalRef<'tcx> {
         if layout.is_zst() {
             // Zero-size temporaries aren't always initialized, which
             // doesn't matter because they don't contain data, but
             // we need something in the operand.
-            LocalRef::Operand(Some(OperandRef::new_zst(ccx, layout)))
+            LocalRef::Operand(Some(OperandRef::new_zst(cx, layout)))
         } else {
             LocalRef::Operand(None)
         }
@@ -191,46 +191,46 @@
 ///////////////////////////////////////////////////////////////////////////
 
 pub fn trans_mir<'a, 'tcx: 'a>(
-    ccx: &'a CrateContext<'a, 'tcx>,
+    cx: &'a CodegenCx<'a, 'tcx>,
     llfn: ValueRef,
     mir: &'a Mir<'tcx>,
     instance: Instance<'tcx>,
     sig: ty::FnSig<'tcx>,
 ) {
-    let fn_ty = FnType::new(ccx, sig, &[]);
+    let fn_ty = FnType::new(cx, sig, &[]);
     debug!("fn_ty: {:?}", fn_ty);
     let debug_context =
-        debuginfo::create_function_debug_context(ccx, instance, sig, llfn, mir);
-    let bcx = Builder::new_block(ccx, llfn, "start");
+        debuginfo::create_function_debug_context(cx, instance, sig, llfn, mir);
+    let bx = Builder::new_block(cx, llfn, "start");
 
     if mir.basic_blocks().iter().any(|bb| bb.is_cleanup) {
-        bcx.set_personality_fn(ccx.eh_personality());
+        bx.set_personality_fn(cx.eh_personality());
     }
 
     let cleanup_kinds = analyze::cleanup_kinds(&mir);
     // Allocate a `Block` for every basic block, except
     // the start block, if nothing loops back to it.
     let reentrant_start_block = !mir.predecessors_for(mir::START_BLOCK).is_empty();
-    let block_bcxs: IndexVec<mir::BasicBlock, BasicBlockRef> =
+    let block_bxs: IndexVec<mir::BasicBlock, BasicBlockRef> =
         mir.basic_blocks().indices().map(|bb| {
             if bb == mir::START_BLOCK && !reentrant_start_block {
-                bcx.llbb()
+                bx.llbb()
             } else {
-                bcx.build_sibling_block(&format!("{:?}", bb)).llbb()
+                bx.build_sibling_block(&format!("{:?}", bb)).llbb()
             }
         }).collect();
 
     // Compute debuginfo scopes from MIR scopes.
-    let scopes = debuginfo::create_mir_scopes(ccx, mir, &debug_context);
-    let (landing_pads, funclets) = create_funclets(&bcx, &cleanup_kinds, &block_bcxs);
+    let scopes = debuginfo::create_mir_scopes(cx, mir, &debug_context);
+    let (landing_pads, funclets) = create_funclets(&bx, &cleanup_kinds, &block_bxs);
 
-    let mut mircx = MirContext {
+    let mut fx = FunctionCx {
         mir,
         llfn,
         fn_ty,
-        ccx,
+        cx,
         personality_slot: None,
-        blocks: block_bcxs,
+        blocks: block_bxs,
         unreachable_block: None,
         cleanup_kinds,
         landing_pads,
@@ -244,51 +244,51 @@
         },
     };
 
-    let memory_locals = analyze::memory_locals(&mircx);
+    let memory_locals = analyze::memory_locals(&fx);
 
     // Allocate variable and temp allocas
-    mircx.locals = {
-        let args = arg_local_refs(&bcx, &mircx, &mircx.scopes, &memory_locals);
+    fx.locals = {
+        let args = arg_local_refs(&bx, &fx, &fx.scopes, &memory_locals);
 
         let mut allocate_local = |local| {
             let decl = &mir.local_decls[local];
-            let layout = bcx.ccx.layout_of(mircx.monomorphize(&decl.ty));
+            let layout = bx.cx.layout_of(fx.monomorphize(&decl.ty));
             assert!(!layout.ty.has_erasable_regions());
 
             if let Some(name) = decl.name {
                 // User variable
-                let debug_scope = mircx.scopes[decl.source_info.scope];
-                let dbg = debug_scope.is_valid() && bcx.sess().opts.debuginfo == FullDebugInfo;
+                let debug_scope = fx.scopes[decl.source_info.scope];
+                let dbg = debug_scope.is_valid() && bx.sess().opts.debuginfo == FullDebugInfo;
 
                 if !memory_locals.contains(local.index()) && !dbg {
                     debug!("alloc: {:?} ({}) -> operand", local, name);
-                    return LocalRef::new_operand(bcx.ccx, layout);
+                    return LocalRef::new_operand(bx.cx, layout);
                 }
 
                 debug!("alloc: {:?} ({}) -> place", local, name);
-                let place = PlaceRef::alloca(&bcx, layout, &name.as_str());
+                let place = PlaceRef::alloca(&bx, layout, &name.as_str());
                 if dbg {
-                    let (scope, span) = mircx.debug_loc(decl.source_info);
-                    declare_local(&bcx, &mircx.debug_context, name, layout.ty, scope,
+                    let (scope, span) = fx.debug_loc(decl.source_info);
+                    declare_local(&bx, &fx.debug_context, name, layout.ty, scope,
                         VariableAccess::DirectVariable { alloca: place.llval },
                         VariableKind::LocalVariable, span);
                 }
                 LocalRef::Place(place)
             } else {
                 // Temporary or return place
-                if local == mir::RETURN_PLACE && mircx.fn_ty.ret.is_indirect() {
+                if local == mir::RETURN_PLACE && fx.fn_ty.ret.is_indirect() {
                     debug!("alloc: {:?} (return place) -> place", local);
                     let llretptr = llvm::get_param(llfn, 0);
                     LocalRef::Place(PlaceRef::new_sized(llretptr, layout, layout.align))
                 } else if memory_locals.contains(local.index()) {
                     debug!("alloc: {:?} -> place", local);
-                    LocalRef::Place(PlaceRef::alloca(&bcx, layout, &format!("{:?}", local)))
+                    LocalRef::Place(PlaceRef::alloca(&bx, layout, &format!("{:?}", local)))
                 } else {
                     // If this is an immediate local, we do not create an
                     // alloca in advance. Instead we wait until we see the
                     // definition and update the operand there.
                     debug!("alloc: {:?} -> operand", local);
-                    LocalRef::new_operand(bcx.ccx, layout)
+                    LocalRef::new_operand(bx.cx, layout)
                 }
             }
         };
@@ -302,13 +302,13 @@
 
     // Branch to the START block, if it's not the entry block.
     if reentrant_start_block {
-        bcx.br(mircx.blocks[mir::START_BLOCK]);
+        bx.br(fx.blocks[mir::START_BLOCK]);
     }
 
     // Up until here, IR instructions for this function have explicitly not been annotated with
     // source code location, so we don't step into call setup code. From here on, source location
     // emitting should be enabled.
-    debuginfo::start_emitting_source_locations(&mircx.debug_context);
+    debuginfo::start_emitting_source_locations(&fx.debug_context);
 
     let rpo = traversal::reverse_postorder(&mir);
     let mut visited = BitVector::new(mir.basic_blocks().len());
@@ -316,7 +316,7 @@
     // Translate the body of each block using reverse postorder
     for (bb, _) in rpo {
         visited.insert(bb.index());
-        mircx.trans_block(bb);
+        fx.trans_block(bb);
     }
 
     // Remove blocks that haven't been visited, or have no
@@ -326,26 +326,26 @@
         if !visited.contains(bb.index()) {
             debug!("trans_mir: block {:?} was not visited", bb);
             unsafe {
-                llvm::LLVMDeleteBasicBlock(mircx.blocks[bb]);
+                llvm::LLVMDeleteBasicBlock(fx.blocks[bb]);
             }
         }
     }
 }
 
 fn create_funclets<'a, 'tcx>(
-    bcx: &Builder<'a, 'tcx>,
+    bx: &Builder<'a, 'tcx>,
     cleanup_kinds: &IndexVec<mir::BasicBlock, CleanupKind>,
-    block_bcxs: &IndexVec<mir::BasicBlock, BasicBlockRef>)
+    block_bxs: &IndexVec<mir::BasicBlock, BasicBlockRef>)
     -> (IndexVec<mir::BasicBlock, Option<BasicBlockRef>>,
         IndexVec<mir::BasicBlock, Option<Funclet>>)
 {
-    block_bcxs.iter_enumerated().zip(cleanup_kinds).map(|((bb, &llbb), cleanup_kind)| {
+    block_bxs.iter_enumerated().zip(cleanup_kinds).map(|((bb, &llbb), cleanup_kind)| {
         match *cleanup_kind {
-            CleanupKind::Funclet if base::wants_msvc_seh(bcx.sess()) => {
-                let cleanup_bcx = bcx.build_sibling_block(&format!("funclet_{:?}", bb));
-                let cleanup = cleanup_bcx.cleanup_pad(None, &[]);
-                cleanup_bcx.br(llbb);
-                (Some(cleanup_bcx.llbb()), Some(Funclet::new(cleanup)))
+            CleanupKind::Funclet if base::wants_msvc_seh(bx.sess()) => {
+                let cleanup_bx = bx.build_sibling_block(&format!("funclet_{:?}", bb));
+                let cleanup = cleanup_bx.cleanup_pad(None, &[]);
+                cleanup_bx.br(llbb);
+                (Some(cleanup_bx.llbb()), Some(Funclet::new(cleanup)))
             }
             _ => (None, None)
         }
@@ -355,19 +355,19 @@
 /// Produce, for each argument, a `ValueRef` pointing at the
 /// argument's value. As arguments are places, these are always
 /// indirect.
-fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
-                            mircx: &MirContext<'a, 'tcx>,
+fn arg_local_refs<'a, 'tcx>(bx: &Builder<'a, 'tcx>,
+                            fx: &FunctionCx<'a, 'tcx>,
                             scopes: &IndexVec<mir::VisibilityScope, debuginfo::MirDebugScope>,
                             memory_locals: &BitVector)
                             -> Vec<LocalRef<'tcx>> {
-    let mir = mircx.mir;
-    let tcx = bcx.tcx();
+    let mir = fx.mir;
+    let tcx = bx.tcx();
     let mut idx = 0;
-    let mut llarg_idx = mircx.fn_ty.ret.is_indirect() as usize;
+    let mut llarg_idx = fx.fn_ty.ret.is_indirect() as usize;
 
     // Get the argument scope, if it exists and if we need it.
     let arg_scope = scopes[mir::ARGUMENT_VISIBILITY_SCOPE];
-    let arg_scope = if arg_scope.is_valid() && bcx.sess().opts.debuginfo == FullDebugInfo {
+    let arg_scope = if arg_scope.is_valid() && bx.sess().opts.debuginfo == FullDebugInfo {
         Some(arg_scope.scope_metadata)
     } else {
         None
@@ -392,17 +392,17 @@
             // to reconstruct it into a tuple local variable, from multiple
             // individual LLVM function arguments.
 
-            let arg_ty = mircx.monomorphize(&arg_decl.ty);
+            let arg_ty = fx.monomorphize(&arg_decl.ty);
             let tupled_arg_tys = match arg_ty.sty {
                 ty::TyTuple(ref tys, _) => tys,
                 _ => bug!("spread argument isn't a tuple?!")
             };
 
-            let place = PlaceRef::alloca(bcx, bcx.ccx.layout_of(arg_ty), &name);
+            let place = PlaceRef::alloca(bx, bx.cx.layout_of(arg_ty), &name);
             for i in 0..tupled_arg_tys.len() {
-                let arg = &mircx.fn_ty.args[idx];
+                let arg = &fx.fn_ty.args[idx];
                 idx += 1;
-                arg.store_fn_arg(bcx, &mut llarg_idx, place.project_field(bcx, i));
+                arg.store_fn_arg(bx, &mut llarg_idx, place.project_field(bx, i));
             }
 
             // Now that we have one alloca that contains the aggregate value,
@@ -412,8 +412,8 @@
                     alloca: place.llval
                 };
                 declare_local(
-                    bcx,
-                    &mircx.debug_context,
+                    bx,
+                    &fx.debug_context,
                     arg_decl.name.unwrap_or(keywords::Invalid.name()),
                     arg_ty, scope,
                     variable_access,
@@ -425,7 +425,7 @@
             return LocalRef::Place(place);
         }
 
-        let arg = &mircx.fn_ty.args[idx];
+        let arg = &fx.fn_ty.args[idx];
         idx += 1;
         if arg.pad.is_some() {
             llarg_idx += 1;
@@ -438,22 +438,22 @@
             let local = |op| LocalRef::Operand(Some(op));
             match arg.mode {
                 PassMode::Ignore => {
-                    return local(OperandRef::new_zst(bcx.ccx, arg.layout));
+                    return local(OperandRef::new_zst(bx.cx, arg.layout));
                 }
                 PassMode::Direct(_) => {
-                    let llarg = llvm::get_param(bcx.llfn(), llarg_idx as c_uint);
-                    bcx.set_value_name(llarg, &name);
+                    let llarg = llvm::get_param(bx.llfn(), llarg_idx as c_uint);
+                    bx.set_value_name(llarg, &name);
                     llarg_idx += 1;
                     return local(
-                        OperandRef::from_immediate_or_packed_pair(bcx, llarg, arg.layout));
+                        OperandRef::from_immediate_or_packed_pair(bx, llarg, arg.layout));
                 }
                 PassMode::Pair(..) => {
-                    let a = llvm::get_param(bcx.llfn(), llarg_idx as c_uint);
-                    bcx.set_value_name(a, &(name.clone() + ".0"));
+                    let a = llvm::get_param(bx.llfn(), llarg_idx as c_uint);
+                    bx.set_value_name(a, &(name.clone() + ".0"));
                     llarg_idx += 1;
 
-                    let b = llvm::get_param(bcx.llfn(), llarg_idx as c_uint);
-                    bcx.set_value_name(b, &(name + ".1"));
+                    let b = llvm::get_param(bx.llfn(), llarg_idx as c_uint);
+                    bx.set_value_name(b, &(name + ".1"));
                     llarg_idx += 1;
 
                     return local(OperandRef {
@@ -469,13 +469,13 @@
             // Don't copy an indirect argument to an alloca, the caller
             // already put it in a temporary alloca and gave it up.
             // FIXME: lifetimes
-            let llarg = llvm::get_param(bcx.llfn(), llarg_idx as c_uint);
-            bcx.set_value_name(llarg, &name);
+            let llarg = llvm::get_param(bx.llfn(), llarg_idx as c_uint);
+            bx.set_value_name(llarg, &name);
             llarg_idx += 1;
             PlaceRef::new_sized(llarg, arg.layout, arg.layout.align)
         } else {
-            let tmp = PlaceRef::alloca(bcx, arg.layout, &name);
-            arg.store_fn_arg(bcx, &mut llarg_idx, tmp);
+            let tmp = PlaceRef::alloca(bx, arg.layout, &name);
+            arg.store_fn_arg(bx, &mut llarg_idx, tmp);
             tmp
         };
         arg_scope.map(|scope| {
@@ -498,8 +498,8 @@
                 }
 
                 declare_local(
-                    bcx,
-                    &mircx.debug_context,
+                    bx,
+                    &fx.debug_context,
                     arg_decl.name.unwrap_or(keywords::Invalid.name()),
                     arg.layout.ty,
                     scope,
@@ -512,7 +512,7 @@
 
             // Or is it the closure environment?
             let (closure_layout, env_ref) = match arg.layout.ty.sty {
-                ty::TyRef(_, mt) | ty::TyRawPtr(mt) => (bcx.ccx.layout_of(mt.ty), true),
+                ty::TyRef(_, mt) | ty::TyRawPtr(mt) => (bx.cx.layout_of(mt.ty), true),
                 _ => (arg.layout, false)
             };
 
@@ -530,10 +530,10 @@
             // doesn't actually strip the offset when splitting the closure
             // environment into its components so it ends up out of bounds.
             let env_ptr = if !env_ref {
-                let scratch = PlaceRef::alloca(bcx,
-                    bcx.ccx.layout_of(tcx.mk_mut_ptr(arg.layout.ty)),
+                let scratch = PlaceRef::alloca(bx,
+                    bx.cx.layout_of(tcx.mk_mut_ptr(arg.layout.ty)),
                     "__debuginfo_env_ptr");
-                bcx.store(place.llval, scratch.llval, scratch.align);
+                bx.store(place.llval, scratch.llval, scratch.align);
                 scratch.llval
             } else {
                 place.llval
@@ -567,8 +567,8 @@
                     address_operations: &ops
                 };
                 declare_local(
-                    bcx,
-                    &mircx.debug_context,
+                    bx,
+                    &fx.debug_context,
                     decl.debug_name,
                     ty,
                     scope,
diff --git a/src/librustc_trans/mir/operand.rs b/src/librustc_trans/mir/operand.rs
index 05af487..25db9f9 100644
--- a/src/librustc_trans/mir/operand.rs
+++ b/src/librustc_trans/mir/operand.rs
@@ -15,7 +15,7 @@
 use rustc_data_structures::indexed_vec::Idx;
 
 use base;
-use common::{self, CrateContext, C_undef, C_usize};
+use common::{self, CodegenCx, C_undef, C_usize};
 use builder::Builder;
 use value::Value;
 use type_of::LayoutLlvmExt;
@@ -24,7 +24,7 @@
 use std::fmt;
 use std::ptr;
 
-use super::{MirContext, LocalRef};
+use super::{FunctionCx, LocalRef};
 use super::place::PlaceRef;
 
 /// The representation of a Rust value. The enum variant is in fact
@@ -81,11 +81,11 @@
 }
 
 impl<'a, 'tcx> OperandRef<'tcx> {
-    pub fn new_zst(ccx: &CrateContext<'a, 'tcx>,
+    pub fn new_zst(cx: &CodegenCx<'a, 'tcx>,
                    layout: TyLayout<'tcx>) -> OperandRef<'tcx> {
         assert!(layout.is_zst());
         OperandRef {
-            val: OperandValue::Immediate(C_undef(layout.immediate_llvm_type(ccx))),
+            val: OperandValue::Immediate(C_undef(layout.immediate_llvm_type(cx))),
             layout
         }
     }
@@ -99,7 +99,7 @@
         }
     }
 
-    pub fn deref(self, ccx: &CrateContext<'a, 'tcx>) -> PlaceRef<'tcx> {
+    pub fn deref(self, cx: &CodegenCx<'a, 'tcx>) -> PlaceRef<'tcx> {
         let projected_ty = self.layout.ty.builtin_deref(true, ty::NoPreference)
             .unwrap_or_else(|| bug!("deref of non-pointer {:?}", self)).ty;
         let (llptr, llextra) = match self.val {
@@ -107,7 +107,7 @@
             OperandValue::Pair(llptr, llextra) => (llptr, llextra),
             OperandValue::Ref(..) => bug!("Deref of by-Ref operand {:?}", self)
         };
-        let layout = ccx.layout_of(projected_ty);
+        let layout = cx.layout_of(projected_ty);
         PlaceRef {
             llval: llptr,
             llextra,
@@ -118,15 +118,15 @@
 
     /// If this operand is a `Pair`, we return an aggregate with the two values.
     /// For other cases, see `immediate`.
-    pub fn immediate_or_packed_pair(self, bcx: &Builder<'a, 'tcx>) -> ValueRef {
+    pub fn immediate_or_packed_pair(self, bx: &Builder<'a, 'tcx>) -> ValueRef {
         if let OperandValue::Pair(a, b) = self.val {
-            let llty = self.layout.llvm_type(bcx.ccx);
+            let llty = self.layout.llvm_type(bx.cx);
             debug!("Operand::immediate_or_packed_pair: packing {:?} into {:?}",
                    self, llty);
             // Reconstruct the immediate aggregate.
             let mut llpair = C_undef(llty);
-            llpair = bcx.insert_value(llpair, a, 0);
-            llpair = bcx.insert_value(llpair, b, 1);
+            llpair = bx.insert_value(llpair, a, 0);
+            llpair = bx.insert_value(llpair, b, 1);
             llpair
         } else {
             self.immediate()
@@ -134,7 +134,7 @@
     }
 
     /// If the type is a pair, we return a `Pair`, otherwise, an `Immediate`.
-    pub fn from_immediate_or_packed_pair(bcx: &Builder<'a, 'tcx>,
+    pub fn from_immediate_or_packed_pair(bx: &Builder<'a, 'tcx>,
                                          llval: ValueRef,
                                          layout: TyLayout<'tcx>)
                                          -> OperandRef<'tcx> {
@@ -143,23 +143,23 @@
                     llval, layout);
 
             // Deconstruct the immediate aggregate.
-            OperandValue::Pair(bcx.extract_value(llval, 0),
-                               bcx.extract_value(llval, 1))
+            OperandValue::Pair(bx.extract_value(llval, 0),
+                               bx.extract_value(llval, 1))
         } else {
             OperandValue::Immediate(llval)
         };
         OperandRef { val, layout }
     }
 
-    pub fn extract_field(&self, bcx: &Builder<'a, 'tcx>, i: usize) -> OperandRef<'tcx> {
-        let field = self.layout.field(bcx.ccx, i);
+    pub fn extract_field(&self, bx: &Builder<'a, 'tcx>, i: usize) -> OperandRef<'tcx> {
+        let field = self.layout.field(bx.cx, i);
         let offset = self.layout.fields.offset(i);
 
         let mut val = match (self.val, &self.layout.abi) {
             // If we're uninhabited, or the field is ZST, it has no data.
             _ if self.layout.abi == layout::Abi::Uninhabited || field.is_zst() => {
                 return OperandRef {
-                    val: OperandValue::Immediate(C_undef(field.immediate_llvm_type(bcx.ccx))),
+                    val: OperandValue::Immediate(C_undef(field.immediate_llvm_type(bx.cx))),
                     layout: field
                 };
             }
@@ -174,12 +174,12 @@
             // Extract a scalar component from a pair.
             (OperandValue::Pair(a_llval, b_llval), &layout::Abi::ScalarPair(ref a, ref b)) => {
                 if offset.bytes() == 0 {
-                    assert_eq!(field.size, a.value.size(bcx.ccx));
+                    assert_eq!(field.size, a.value.size(bx.cx));
                     OperandValue::Immediate(a_llval)
                 } else {
-                    assert_eq!(offset, a.value.size(bcx.ccx)
-                        .abi_align(b.value.align(bcx.ccx)));
-                    assert_eq!(field.size, b.value.size(bcx.ccx));
+                    assert_eq!(offset, a.value.size(bx.cx)
+                        .abi_align(b.value.align(bx.cx)));
+                    assert_eq!(field.size, b.value.size(bx.cx));
                     OperandValue::Immediate(b_llval)
                 }
             }
@@ -187,7 +187,7 @@
             // `#[repr(simd)]` types are also immediate.
             (OperandValue::Immediate(llval), &layout::Abi::Vector { .. }) => {
                 OperandValue::Immediate(
-                    bcx.extract_element(llval, C_usize(bcx.ccx, i as u64)))
+                    bx.extract_element(llval, C_usize(bx.cx, i as u64)))
             }
 
             _ => bug!("OperandRef::extract_field({:?}): not applicable", self)
@@ -196,11 +196,11 @@
         // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
         match val {
             OperandValue::Immediate(ref mut llval) => {
-                *llval = bcx.bitcast(*llval, field.immediate_llvm_type(bcx.ccx));
+                *llval = bx.bitcast(*llval, field.immediate_llvm_type(bx.cx));
             }
             OperandValue::Pair(ref mut a, ref mut b) => {
-                *a = bcx.bitcast(*a, field.scalar_pair_element_llvm_type(bcx.ccx, 0));
-                *b = bcx.bitcast(*b, field.scalar_pair_element_llvm_type(bcx.ccx, 1));
+                *a = bx.bitcast(*a, field.scalar_pair_element_llvm_type(bx.cx, 0));
+                *b = bx.bitcast(*b, field.scalar_pair_element_llvm_type(bx.cx, 1));
             }
             OperandValue::Ref(..) => bug!()
         }
@@ -213,7 +213,7 @@
 }
 
 impl<'a, 'tcx> OperandValue {
-    pub fn store(self, bcx: &Builder<'a, 'tcx>, dest: PlaceRef<'tcx>) {
+    pub fn store(self, bx: &Builder<'a, 'tcx>, dest: PlaceRef<'tcx>) {
         debug!("OperandRef::store: operand={:?}, dest={:?}", self, dest);
         // Avoid generating stores of zero-sized values, because the only way to have a zero-sized
         // value is through `undef`, and store itself is useless.
@@ -222,28 +222,28 @@
         }
         match self {
             OperandValue::Ref(r, source_align) =>
-                base::memcpy_ty(bcx, dest.llval, r, dest.layout,
+                base::memcpy_ty(bx, dest.llval, r, dest.layout,
                                 source_align.min(dest.align)),
             OperandValue::Immediate(s) => {
-                bcx.store(base::from_immediate(bcx, s), dest.llval, dest.align);
+                bx.store(base::from_immediate(bx, s), dest.llval, dest.align);
             }
             OperandValue::Pair(a, b) => {
                 for (i, &x) in [a, b].iter().enumerate() {
-                    let mut llptr = bcx.struct_gep(dest.llval, i as u64);
+                    let mut llptr = bx.struct_gep(dest.llval, i as u64);
                     // Make sure to always store i1 as i8.
-                    if common::val_ty(x) == Type::i1(bcx.ccx) {
-                        llptr = bcx.pointercast(llptr, Type::i8p(bcx.ccx));
+                    if common::val_ty(x) == Type::i1(bx.cx) {
+                        llptr = bx.pointercast(llptr, Type::i8p(bx.cx));
                     }
-                    bcx.store(base::from_immediate(bcx, x), llptr, dest.align);
+                    bx.store(base::from_immediate(bx, x), llptr, dest.align);
                 }
             }
         }
     }
 }
 
-impl<'a, 'tcx> MirContext<'a, 'tcx> {
+impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
     fn maybe_trans_consume_direct(&mut self,
-                                  bcx: &Builder<'a, 'tcx>,
+                                  bx: &Builder<'a, 'tcx>,
                                   place: &mir::Place<'tcx>)
                                    -> Option<OperandRef<'tcx>>
     {
@@ -267,19 +267,19 @@
 
         // Moves out of scalar and scalar pair fields are trivial.
         if let &mir::Place::Projection(ref proj) = place {
-            if let Some(o) = self.maybe_trans_consume_direct(bcx, &proj.base) {
+            if let Some(o) = self.maybe_trans_consume_direct(bx, &proj.base) {
                 match proj.elem {
                     mir::ProjectionElem::Field(ref f, _) => {
-                        return Some(o.extract_field(bcx, f.index()));
+                        return Some(o.extract_field(bx, f.index()));
                     }
                     mir::ProjectionElem::Index(_) |
                     mir::ProjectionElem::ConstantIndex { .. } => {
                         // ZSTs don't require any actual memory access.
                         // FIXME(eddyb) deduplicate this with the identical
                         // checks in `trans_consume` and `extract_field`.
-                        let elem = o.layout.field(bcx.ccx, 0);
+                        let elem = o.layout.field(bx.cx, 0);
                         if elem.is_zst() {
-                            return Some(OperandRef::new_zst(bcx.ccx, elem));
+                            return Some(OperandRef::new_zst(bx.cx, elem));
                         }
                     }
                     _ => {}
@@ -291,31 +291,31 @@
     }
 
     pub fn trans_consume(&mut self,
-                         bcx: &Builder<'a, 'tcx>,
+                         bx: &Builder<'a, 'tcx>,
                          place: &mir::Place<'tcx>)
                          -> OperandRef<'tcx>
     {
         debug!("trans_consume(place={:?})", place);
 
         let ty = self.monomorphized_place_ty(place);
-        let layout = bcx.ccx.layout_of(ty);
+        let layout = bx.cx.layout_of(ty);
 
         // ZSTs don't require any actual memory access.
         if layout.is_zst() {
-            return OperandRef::new_zst(bcx.ccx, layout);
+            return OperandRef::new_zst(bx.cx, layout);
         }
 
-        if let Some(o) = self.maybe_trans_consume_direct(bcx, place) {
+        if let Some(o) = self.maybe_trans_consume_direct(bx, place) {
             return o;
         }
 
         // for most places, to consume them we just load them
         // out from their home
-        self.trans_place(bcx, place).load(bcx)
+        self.trans_place(bx, place).load(bx)
     }
 
     pub fn trans_operand(&mut self,
-                         bcx: &Builder<'a, 'tcx>,
+                         bx: &Builder<'a, 'tcx>,
                          operand: &mir::Operand<'tcx>)
                          -> OperandRef<'tcx>
     {
@@ -324,15 +324,15 @@
         match *operand {
             mir::Operand::Copy(ref place) |
             mir::Operand::Move(ref place) => {
-                self.trans_consume(bcx, place)
+                self.trans_consume(bx, place)
             }
 
             mir::Operand::Constant(ref constant) => {
-                let val = self.trans_constant(&bcx, constant);
-                let operand = val.to_operand(bcx.ccx);
+                let val = self.trans_constant(&bx, constant);
+                let operand = val.to_operand(bx.cx);
                 if let OperandValue::Ref(ptr, align) = operand.val {
                     // If this is a OperandValue::Ref to an immediate constant, load it.
-                    PlaceRef::new_sized(ptr, operand.layout, align).load(bcx)
+                    PlaceRef::new_sized(ptr, operand.layout, align).load(bx)
                 } else {
                     operand
                 }
diff --git a/src/librustc_trans/mir/place.rs b/src/librustc_trans/mir/place.rs
index b556b6a..9977047 100644
--- a/src/librustc_trans/mir/place.rs
+++ b/src/librustc_trans/mir/place.rs
@@ -16,7 +16,7 @@
 use rustc_data_structures::indexed_vec::Idx;
 use base;
 use builder::Builder;
-use common::{CrateContext, C_usize, C_u8, C_u32, C_uint, C_int, C_null, C_uint_big};
+use common::{CodegenCx, C_usize, C_u8, C_u32, C_uint, C_int, C_null, C_uint_big};
 use consts;
 use type_of::LayoutLlvmExt;
 use type_::Type;
@@ -25,7 +25,7 @@
 
 use std::ptr;
 
-use super::{MirContext, LocalRef};
+use super::{FunctionCx, LocalRef};
 use super::operand::{OperandRef, OperandValue};
 
 #[derive(Copy, Clone, Debug)]
@@ -56,21 +56,21 @@
         }
     }
 
-    pub fn alloca(bcx: &Builder<'a, 'tcx>, layout: TyLayout<'tcx>, name: &str)
+    pub fn alloca(bx: &Builder<'a, 'tcx>, layout: TyLayout<'tcx>, name: &str)
                   -> PlaceRef<'tcx> {
         debug!("alloca({:?}: {:?})", name, layout);
-        let tmp = bcx.alloca(layout.llvm_type(bcx.ccx), name, layout.align);
+        let tmp = bx.alloca(layout.llvm_type(bx.cx), name, layout.align);
         Self::new_sized(tmp, layout, layout.align)
     }
 
-    pub fn len(&self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef {
+    pub fn len(&self, cx: &CodegenCx<'a, 'tcx>) -> ValueRef {
         if let layout::FieldPlacement::Array { count, .. } = self.layout.fields {
             if self.layout.is_unsized() {
                 assert!(self.has_extra());
                 assert_eq!(count, 0);
                 self.llextra
             } else {
-                C_usize(ccx, count)
+                C_usize(cx, count)
             }
         } else {
             bug!("unexpected layout `{:#?}` in PlaceRef::len", self.layout)
@@ -81,19 +81,19 @@
         !self.llextra.is_null()
     }
 
-    pub fn load(&self, bcx: &Builder<'a, 'tcx>) -> OperandRef<'tcx> {
+    pub fn load(&self, bx: &Builder<'a, 'tcx>) -> OperandRef<'tcx> {
         debug!("PlaceRef::load: {:?}", self);
 
         assert!(!self.has_extra());
 
         if self.layout.is_zst() {
-            return OperandRef::new_zst(bcx.ccx, self.layout);
+            return OperandRef::new_zst(bx.cx, self.layout);
         }
 
         let scalar_load_metadata = |load, scalar: &layout::Scalar| {
             let (min, max) = (scalar.valid_range.start, scalar.valid_range.end);
             let max_next = max.wrapping_add(1);
-            let bits = scalar.value.size(bcx.ccx).bits();
+            let bits = scalar.value.size(bx.cx).bits();
             assert!(bits <= 128);
             let mask = !0u128 >> (128 - bits);
             // For a (max) value of -1, max will be `-1 as usize`, which overflows.
@@ -106,10 +106,10 @@
                 layout::Int(..) if max_next & mask != min & mask => {
                     // llvm::ConstantRange can deal with ranges that wrap around,
                     // so an overflow on (max + 1) is fine.
-                    bcx.range_metadata(load, min..max_next);
+                    bx.range_metadata(load, min..max_next);
                 }
                 layout::Pointer if 0 < min && min < max => {
-                    bcx.nonnull_metadata(load);
+                    bx.nonnull_metadata(load);
                 }
                 _ => {}
             }
@@ -127,24 +127,24 @@
             let llval = if !const_llval.is_null() {
                 const_llval
             } else {
-                let load = bcx.load(self.llval, self.align);
+                let load = bx.load(self.llval, self.align);
                 if let layout::Abi::Scalar(ref scalar) = self.layout.abi {
                     scalar_load_metadata(load, scalar);
                 }
                 load
             };
-            OperandValue::Immediate(base::to_immediate(bcx, llval, self.layout))
+            OperandValue::Immediate(base::to_immediate(bx, llval, self.layout))
         } else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi {
             let load = |i, scalar: &layout::Scalar| {
-                let mut llptr = bcx.struct_gep(self.llval, i as u64);
+                let mut llptr = bx.struct_gep(self.llval, i as u64);
                 // Make sure to always load i1 as i8.
                 if scalar.is_bool() {
-                    llptr = bcx.pointercast(llptr, Type::i8p(bcx.ccx));
+                    llptr = bx.pointercast(llptr, Type::i8p(bx.cx));
                 }
-                let load = bcx.load(llptr, self.align);
+                let load = bx.load(llptr, self.align);
                 scalar_load_metadata(load, scalar);
                 if scalar.is_bool() {
-                    bcx.trunc(load, Type::i1(bcx.ccx))
+                    bx.trunc(load, Type::i1(bx.cx))
                 } else {
                     load
                 }
@@ -158,9 +158,9 @@
     }
 
     /// Access a field, at a point when the value's case is known.
-    pub fn project_field(self, bcx: &Builder<'a, 'tcx>, ix: usize) -> PlaceRef<'tcx> {
-        let ccx = bcx.ccx;
-        let field = self.layout.field(ccx, ix);
+    pub fn project_field(self, bx: &Builder<'a, 'tcx>, ix: usize) -> PlaceRef<'tcx> {
+        let cx = bx.cx;
+        let field = self.layout.field(cx, ix);
         let offset = self.layout.fields.offset(ix);
         let align = self.align.min(self.layout.align).min(field.align);
 
@@ -170,15 +170,15 @@
                 self.llval
             } else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi {
                 // Offsets have to match either first or second field.
-                assert_eq!(offset, a.value.size(ccx).abi_align(b.value.align(ccx)));
-                bcx.struct_gep(self.llval, 1)
+                assert_eq!(offset, a.value.size(cx).abi_align(b.value.align(cx)));
+                bx.struct_gep(self.llval, 1)
             } else {
-                bcx.struct_gep(self.llval, self.layout.llvm_field_index(ix))
+                bx.struct_gep(self.llval, self.layout.llvm_field_index(ix))
             };
             PlaceRef {
                 // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
-                llval: bcx.pointercast(llval, field.llvm_type(ccx).ptr_to()),
-                llextra: if ccx.shared().type_has_metadata(field.ty) {
+                llval: bx.pointercast(llval, field.llvm_type(cx).ptr_to()),
+                llextra: if cx.type_has_metadata(field.ty) {
                     self.llextra
                 } else {
                     ptr::null_mut()
@@ -228,10 +228,10 @@
 
         let meta = self.llextra;
 
-        let unaligned_offset = C_usize(ccx, offset.bytes());
+        let unaligned_offset = C_usize(cx, offset.bytes());
 
         // Get the alignment of the field
-        let (_, unsized_align) = glue::size_and_align_of_dst(bcx, field.ty, meta);
+        let (_, unsized_align) = glue::size_and_align_of_dst(bx, field.ty, meta);
 
         // Bump the unaligned offset up to the appropriate alignment using the
         // following expression:
@@ -239,22 +239,22 @@
         //   (unaligned offset + (align - 1)) & -align
 
         // Calculate offset
-        let align_sub_1 = bcx.sub(unsized_align, C_usize(ccx, 1u64));
-        let offset = bcx.and(bcx.add(unaligned_offset, align_sub_1),
-        bcx.neg(unsized_align));
+        let align_sub_1 = bx.sub(unsized_align, C_usize(cx, 1u64));
+        let offset = bx.and(bx.add(unaligned_offset, align_sub_1),
+        bx.neg(unsized_align));
 
         debug!("struct_field_ptr: DST field offset: {:?}", Value(offset));
 
         // Cast and adjust pointer
-        let byte_ptr = bcx.pointercast(self.llval, Type::i8p(ccx));
-        let byte_ptr = bcx.gep(byte_ptr, &[offset]);
+        let byte_ptr = bx.pointercast(self.llval, Type::i8p(cx));
+        let byte_ptr = bx.gep(byte_ptr, &[offset]);
 
         // Finally, cast back to the type expected
-        let ll_fty = field.llvm_type(ccx);
+        let ll_fty = field.llvm_type(cx);
         debug!("struct_field_ptr: Field type is {:?}", ll_fty);
 
         PlaceRef {
-            llval: bcx.pointercast(byte_ptr, ll_fty.ptr_to()),
+            llval: bx.pointercast(byte_ptr, ll_fty.ptr_to()),
             llextra: self.llextra,
             layout: field,
             align,
@@ -262,8 +262,8 @@
     }
 
     /// Obtain the actual discriminant of a value.
-    pub fn trans_get_discr(self, bcx: &Builder<'a, 'tcx>, cast_to: Ty<'tcx>) -> ValueRef {
-        let cast_to = bcx.ccx.layout_of(cast_to).immediate_llvm_type(bcx.ccx);
+    pub fn trans_get_discr(self, bx: &Builder<'a, 'tcx>, cast_to: Ty<'tcx>) -> ValueRef {
+        let cast_to = bx.cx.layout_of(cast_to).immediate_llvm_type(bx.cx);
         match self.layout.variants {
             layout::Variants::Single { index } => {
                 return C_uint(cast_to, index as u64);
@@ -272,8 +272,8 @@
             layout::Variants::NicheFilling { .. } => {},
         }
 
-        let discr = self.project_field(bcx, 0);
-        let lldiscr = discr.load(bcx).immediate();
+        let discr = self.project_field(bx, 0);
+        let lldiscr = discr.load(bx).immediate();
         match self.layout.variants {
             layout::Variants::Single { .. } => bug!(),
             layout::Variants::Tagged { ref discr, .. } => {
@@ -281,7 +281,7 @@
                     layout::Int(_, signed) => signed,
                     _ => false
                 };
-                bcx.intcast(lldiscr, cast_to, signed)
+                bx.intcast(lldiscr, cast_to, signed)
             }
             layout::Variants::NicheFilling {
                 dataful_variant,
@@ -289,7 +289,7 @@
                 niche_start,
                 ..
             } => {
-                let niche_llty = discr.layout.immediate_llvm_type(bcx.ccx);
+                let niche_llty = discr.layout.immediate_llvm_type(bx.cx);
                 if niche_variants.start == niche_variants.end {
                     // FIXME(eddyb) Check the actual primitive type here.
                     let niche_llval = if niche_start == 0 {
@@ -298,16 +298,16 @@
                     } else {
                         C_uint_big(niche_llty, niche_start)
                     };
-                    bcx.select(bcx.icmp(llvm::IntEQ, lldiscr, niche_llval),
+                    bx.select(bx.icmp(llvm::IntEQ, lldiscr, niche_llval),
                         C_uint(cast_to, niche_variants.start as u64),
                         C_uint(cast_to, dataful_variant as u64))
                 } else {
                     // Rebase from niche values to discriminant values.
                     let delta = niche_start.wrapping_sub(niche_variants.start as u128);
-                    let lldiscr = bcx.sub(lldiscr, C_uint_big(niche_llty, delta));
+                    let lldiscr = bx.sub(lldiscr, C_uint_big(niche_llty, delta));
                     let lldiscr_max = C_uint(niche_llty, niche_variants.end as u64);
-                    bcx.select(bcx.icmp(llvm::IntULE, lldiscr, lldiscr_max),
-                        bcx.intcast(lldiscr, cast_to, false),
+                    bx.select(bx.icmp(llvm::IntULE, lldiscr, lldiscr_max),
+                        bx.intcast(lldiscr, cast_to, false),
                         C_uint(cast_to, dataful_variant as u64))
                 }
             }
@@ -316,8 +316,8 @@
 
     /// Set the discriminant for a new value of the given case of the given
     /// representation.
-    pub fn trans_set_discr(&self, bcx: &Builder<'a, 'tcx>, variant_index: usize) {
-        if self.layout.for_variant(bcx.ccx, variant_index).abi == layout::Abi::Uninhabited {
+    pub fn trans_set_discr(&self, bx: &Builder<'a, 'tcx>, variant_index: usize) {
+        if self.layout.for_variant(bx.cx, variant_index).abi == layout::Abi::Uninhabited {
             return;
         }
         match self.layout.variants {
@@ -325,11 +325,11 @@
                 assert_eq!(index, variant_index);
             }
             layout::Variants::Tagged { .. } => {
-                let ptr = self.project_field(bcx, 0);
+                let ptr = self.project_field(bx, 0);
                 let to = self.layout.ty.ty_adt_def().unwrap()
-                    .discriminant_for_variant(bcx.tcx(), variant_index)
+                    .discriminant_for_variant(bx.tcx(), variant_index)
                     .to_u128_unchecked() as u64;
-                bcx.store(C_int(ptr.layout.llvm_type(bcx.ccx), to as i64),
+                bx.store(C_int(ptr.layout.llvm_type(bx.cx), to as i64),
                     ptr.llval, ptr.align);
             }
             layout::Variants::NicheFilling {
@@ -339,20 +339,20 @@
                 ..
             } => {
                 if variant_index != dataful_variant {
-                    if bcx.sess().target.target.arch == "arm" ||
-                       bcx.sess().target.target.arch == "aarch64" {
+                    if bx.sess().target.target.arch == "arm" ||
+                       bx.sess().target.target.arch == "aarch64" {
                         // Issue #34427: As workaround for LLVM bug on ARM,
                         // use memset of 0 before assigning niche value.
-                        let llptr = bcx.pointercast(self.llval, Type::i8(bcx.ccx).ptr_to());
-                        let fill_byte = C_u8(bcx.ccx, 0);
+                        let llptr = bx.pointercast(self.llval, Type::i8(bx.cx).ptr_to());
+                        let fill_byte = C_u8(bx.cx, 0);
                         let (size, align) = self.layout.size_and_align();
-                        let size = C_usize(bcx.ccx, size.bytes());
-                        let align = C_u32(bcx.ccx, align.abi() as u32);
-                        base::call_memset(bcx, llptr, fill_byte, size, align, false);
+                        let size = C_usize(bx.cx, size.bytes());
+                        let align = C_u32(bx.cx, align.abi() as u32);
+                        base::call_memset(bx, llptr, fill_byte, size, align, false);
                     }
 
-                    let niche = self.project_field(bcx, 0);
-                    let niche_llty = niche.layout.immediate_llvm_type(bcx.ccx);
+                    let niche = self.project_field(bx, 0);
+                    let niche_llty = niche.layout.immediate_llvm_type(bx.cx);
                     let niche_value = ((variant_index - niche_variants.start) as u128)
                         .wrapping_add(niche_start);
                     // FIXME(eddyb) Check the actual primitive type here.
@@ -362,52 +362,52 @@
                     } else {
                         C_uint_big(niche_llty, niche_value)
                     };
-                    OperandValue::Immediate(niche_llval).store(bcx, niche);
+                    OperandValue::Immediate(niche_llval).store(bx, niche);
                 }
             }
         }
     }
 
-    pub fn project_index(&self, bcx: &Builder<'a, 'tcx>, llindex: ValueRef)
+    pub fn project_index(&self, bx: &Builder<'a, 'tcx>, llindex: ValueRef)
                          -> PlaceRef<'tcx> {
         PlaceRef {
-            llval: bcx.inbounds_gep(self.llval, &[C_usize(bcx.ccx, 0), llindex]),
+            llval: bx.inbounds_gep(self.llval, &[C_usize(bx.cx, 0), llindex]),
             llextra: ptr::null_mut(),
-            layout: self.layout.field(bcx.ccx, 0),
+            layout: self.layout.field(bx.cx, 0),
             align: self.align
         }
     }
 
-    pub fn project_downcast(&self, bcx: &Builder<'a, 'tcx>, variant_index: usize)
+    pub fn project_downcast(&self, bx: &Builder<'a, 'tcx>, variant_index: usize)
                             -> PlaceRef<'tcx> {
         let mut downcast = *self;
-        downcast.layout = self.layout.for_variant(bcx.ccx, variant_index);
+        downcast.layout = self.layout.for_variant(bx.cx, variant_index);
 
         // Cast to the appropriate variant struct type.
-        let variant_ty = downcast.layout.llvm_type(bcx.ccx);
-        downcast.llval = bcx.pointercast(downcast.llval, variant_ty.ptr_to());
+        let variant_ty = downcast.layout.llvm_type(bx.cx);
+        downcast.llval = bx.pointercast(downcast.llval, variant_ty.ptr_to());
 
         downcast
     }
 
-    pub fn storage_live(&self, bcx: &Builder<'a, 'tcx>) {
-        bcx.lifetime_start(self.llval, self.layout.size);
+    pub fn storage_live(&self, bx: &Builder<'a, 'tcx>) {
+        bx.lifetime_start(self.llval, self.layout.size);
     }
 
-    pub fn storage_dead(&self, bcx: &Builder<'a, 'tcx>) {
-        bcx.lifetime_end(self.llval, self.layout.size);
+    pub fn storage_dead(&self, bx: &Builder<'a, 'tcx>) {
+        bx.lifetime_end(self.llval, self.layout.size);
     }
 }
 
-impl<'a, 'tcx> MirContext<'a, 'tcx> {
+impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
     pub fn trans_place(&mut self,
-                        bcx: &Builder<'a, 'tcx>,
+                        bx: &Builder<'a, 'tcx>,
                         place: &mir::Place<'tcx>)
                         -> PlaceRef<'tcx> {
         debug!("trans_place(place={:?})", place);
 
-        let ccx = bcx.ccx;
-        let tcx = ccx.tcx();
+        let cx = bx.cx;
+        let tcx = cx.tcx;
 
         if let mir::Place::Local(index) = *place {
             match self.locals[index] {
@@ -423,66 +423,66 @@
         let result = match *place {
             mir::Place::Local(_) => bug!(), // handled above
             mir::Place::Static(box mir::Static { def_id, ty }) => {
-                let layout = ccx.layout_of(self.monomorphize(&ty));
-                PlaceRef::new_sized(consts::get_static(ccx, def_id), layout, layout.align)
+                let layout = cx.layout_of(self.monomorphize(&ty));
+                PlaceRef::new_sized(consts::get_static(cx, def_id), layout, layout.align)
             },
             mir::Place::Projection(box mir::Projection {
                 ref base,
                 elem: mir::ProjectionElem::Deref
             }) => {
                 // Load the pointer from its location.
-                self.trans_consume(bcx, base).deref(bcx.ccx)
+                self.trans_consume(bx, base).deref(bx.cx)
             }
             mir::Place::Projection(ref projection) => {
-                let tr_base = self.trans_place(bcx, &projection.base);
+                let tr_base = self.trans_place(bx, &projection.base);
 
                 match projection.elem {
                     mir::ProjectionElem::Deref => bug!(),
                     mir::ProjectionElem::Field(ref field, _) => {
-                        tr_base.project_field(bcx, field.index())
+                        tr_base.project_field(bx, field.index())
                     }
                     mir::ProjectionElem::Index(index) => {
                         let index = &mir::Operand::Copy(mir::Place::Local(index));
-                        let index = self.trans_operand(bcx, index);
+                        let index = self.trans_operand(bx, index);
                         let llindex = index.immediate();
-                        tr_base.project_index(bcx, llindex)
+                        tr_base.project_index(bx, llindex)
                     }
                     mir::ProjectionElem::ConstantIndex { offset,
                                                          from_end: false,
                                                          min_length: _ } => {
-                        let lloffset = C_usize(bcx.ccx, offset as u64);
-                        tr_base.project_index(bcx, lloffset)
+                        let lloffset = C_usize(bx.cx, offset as u64);
+                        tr_base.project_index(bx, lloffset)
                     }
                     mir::ProjectionElem::ConstantIndex { offset,
                                                          from_end: true,
                                                          min_length: _ } => {
-                        let lloffset = C_usize(bcx.ccx, offset as u64);
-                        let lllen = tr_base.len(bcx.ccx);
-                        let llindex = bcx.sub(lllen, lloffset);
-                        tr_base.project_index(bcx, llindex)
+                        let lloffset = C_usize(bx.cx, offset as u64);
+                        let lllen = tr_base.len(bx.cx);
+                        let llindex = bx.sub(lllen, lloffset);
+                        tr_base.project_index(bx, llindex)
                     }
                     mir::ProjectionElem::Subslice { from, to } => {
-                        let mut subslice = tr_base.project_index(bcx,
-                            C_usize(bcx.ccx, from as u64));
+                        let mut subslice = tr_base.project_index(bx,
+                            C_usize(bx.cx, from as u64));
                         let projected_ty = PlaceTy::Ty { ty: tr_base.layout.ty }
-                            .projection_ty(tcx, &projection.elem).to_ty(bcx.tcx());
-                        subslice.layout = bcx.ccx.layout_of(self.monomorphize(&projected_ty));
+                            .projection_ty(tcx, &projection.elem).to_ty(bx.tcx());
+                        subslice.layout = bx.cx.layout_of(self.monomorphize(&projected_ty));
 
                         if subslice.layout.is_unsized() {
                             assert!(tr_base.has_extra());
-                            subslice.llextra = bcx.sub(tr_base.llextra,
-                                C_usize(bcx.ccx, (from as u64) + (to as u64)));
+                            subslice.llextra = bx.sub(tr_base.llextra,
+                                C_usize(bx.cx, (from as u64) + (to as u64)));
                         }
 
                         // Cast the place pointer type to the new
                         // array or slice type (*[%_; new_len]).
-                        subslice.llval = bcx.pointercast(subslice.llval,
-                            subslice.layout.llvm_type(bcx.ccx).ptr_to());
+                        subslice.llval = bx.pointercast(subslice.llval,
+                            subslice.layout.llvm_type(bx.cx).ptr_to());
 
                         subslice
                     }
                     mir::ProjectionElem::Downcast(_, v) => {
-                        tr_base.project_downcast(bcx, v)
+                        tr_base.project_downcast(bx, v)
                     }
                 }
             }
@@ -492,7 +492,7 @@
     }
 
     pub fn monomorphized_place_ty(&self, place: &mir::Place<'tcx>) -> Ty<'tcx> {
-        let tcx = self.ccx.tcx();
+        let tcx = self.cx.tcx;
         let place_ty = place.ty(self.mir, tcx);
         self.monomorphize(&place_ty.to_ty(tcx))
     }
diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs
index 56309f2..d1bc4fe 100644
--- a/src/librustc_trans/mir/rvalue.rs
+++ b/src/librustc_trans/mir/rvalue.rs
@@ -29,14 +29,14 @@
 use type_of::LayoutLlvmExt;
 use value::Value;
 
-use super::{MirContext, LocalRef};
+use super::{FunctionCx, LocalRef};
 use super::constant::const_scalar_checked_binop;
 use super::operand::{OperandRef, OperandValue};
 use super::place::PlaceRef;
 
-impl<'a, 'tcx> MirContext<'a, 'tcx> {
+impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
     pub fn trans_rvalue(&mut self,
-                        bcx: Builder<'a, 'tcx>,
+                        bx: Builder<'a, 'tcx>,
                         dest: PlaceRef<'tcx>,
                         rvalue: &mir::Rvalue<'tcx>)
                         -> Builder<'a, 'tcx>
@@ -46,11 +46,11 @@
 
         match *rvalue {
            mir::Rvalue::Use(ref operand) => {
-               let tr_operand = self.trans_operand(&bcx, operand);
+               let tr_operand = self.trans_operand(&bx, operand);
                // FIXME: consider not copying constants through stack. (fixable by translating
                // constants into OperandValue::Ref, why don’t we do that yet if we don’t?)
-               tr_operand.val.store(&bcx, dest);
-               bcx
+               tr_operand.val.store(&bx, dest);
+               bx
            }
 
             mir::Rvalue::Cast(mir::CastKind::Unsize, ref source, _) => {
@@ -59,16 +59,16 @@
                 if dest.layout.is_llvm_scalar_pair() {
                     // into-coerce of a thin pointer to a fat pointer - just
                     // use the operand path.
-                    let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
-                    temp.val.store(&bcx, dest);
-                    return bcx;
+                    let (bx, temp) = self.trans_rvalue_operand(bx, rvalue);
+                    temp.val.store(&bx, dest);
+                    return bx;
                 }
 
                 // Unsize of a nontrivial struct. I would prefer for
                 // this to be eliminated by MIR translation, but
                 // `CoerceUnsized` can be passed by a where-clause,
                 // so the (generic) MIR may not be able to expand it.
-                let operand = self.trans_operand(&bcx, source);
+                let operand = self.trans_operand(&bx, source);
                 match operand.val {
                     OperandValue::Pair(..) |
                     OperandValue::Immediate(_) => {
@@ -79,79 +79,79 @@
                         // index into the struct, and this case isn't
                         // important enough for it.
                         debug!("trans_rvalue: creating ugly alloca");
-                        let scratch = PlaceRef::alloca(&bcx, operand.layout, "__unsize_temp");
-                        scratch.storage_live(&bcx);
-                        operand.val.store(&bcx, scratch);
-                        base::coerce_unsized_into(&bcx, scratch, dest);
-                        scratch.storage_dead(&bcx);
+                        let scratch = PlaceRef::alloca(&bx, operand.layout, "__unsize_temp");
+                        scratch.storage_live(&bx);
+                        operand.val.store(&bx, scratch);
+                        base::coerce_unsized_into(&bx, scratch, dest);
+                        scratch.storage_dead(&bx);
                     }
                     OperandValue::Ref(llref, align) => {
                         let source = PlaceRef::new_sized(llref, operand.layout, align);
-                        base::coerce_unsized_into(&bcx, source, dest);
+                        base::coerce_unsized_into(&bx, source, dest);
                     }
                 }
-                bcx
+                bx
             }
 
             mir::Rvalue::Repeat(ref elem, count) => {
-                let tr_elem = self.trans_operand(&bcx, elem);
+                let tr_elem = self.trans_operand(&bx, elem);
 
                 // Do not generate the loop for zero-sized elements or empty arrays.
                 if dest.layout.is_zst() {
-                    return bcx;
+                    return bx;
                 }
 
-                let start = dest.project_index(&bcx, C_usize(bcx.ccx, 0)).llval;
+                let start = dest.project_index(&bx, C_usize(bx.cx, 0)).llval;
 
                 if let OperandValue::Immediate(v) = tr_elem.val {
-                    let align = C_i32(bcx.ccx, dest.align.abi() as i32);
-                    let size = C_usize(bcx.ccx, dest.layout.size.bytes());
+                    let align = C_i32(bx.cx, dest.align.abi() as i32);
+                    let size = C_usize(bx.cx, dest.layout.size.bytes());
 
                     // Use llvm.memset.p0i8.* to initialize all zero arrays
                     if common::is_const_integral(v) && common::const_to_uint(v) == 0 {
-                        let fill = C_u8(bcx.ccx, 0);
-                        base::call_memset(&bcx, start, fill, size, align, false);
-                        return bcx;
+                        let fill = C_u8(bx.cx, 0);
+                        base::call_memset(&bx, start, fill, size, align, false);
+                        return bx;
                     }
 
                     // Use llvm.memset.p0i8.* to initialize byte arrays
-                    let v = base::from_immediate(&bcx, v);
-                    if common::val_ty(v) == Type::i8(bcx.ccx) {
-                        base::call_memset(&bcx, start, v, size, align, false);
-                        return bcx;
+                    let v = base::from_immediate(&bx, v);
+                    if common::val_ty(v) == Type::i8(bx.cx) {
+                        base::call_memset(&bx, start, v, size, align, false);
+                        return bx;
                     }
                 }
 
                 let count = count.as_u64();
-                let count = C_usize(bcx.ccx, count);
-                let end = dest.project_index(&bcx, count).llval;
+                let count = C_usize(bx.cx, count);
+                let end = dest.project_index(&bx, count).llval;
 
-                let header_bcx = bcx.build_sibling_block("repeat_loop_header");
-                let body_bcx = bcx.build_sibling_block("repeat_loop_body");
-                let next_bcx = bcx.build_sibling_block("repeat_loop_next");
+                let header_bx = bx.build_sibling_block("repeat_loop_header");
+                let body_bx = bx.build_sibling_block("repeat_loop_body");
+                let next_bx = bx.build_sibling_block("repeat_loop_next");
 
-                bcx.br(header_bcx.llbb());
-                let current = header_bcx.phi(common::val_ty(start), &[start], &[bcx.llbb()]);
+                bx.br(header_bx.llbb());
+                let current = header_bx.phi(common::val_ty(start), &[start], &[bx.llbb()]);
 
-                let keep_going = header_bcx.icmp(llvm::IntNE, current, end);
-                header_bcx.cond_br(keep_going, body_bcx.llbb(), next_bcx.llbb());
+                let keep_going = header_bx.icmp(llvm::IntNE, current, end);
+                header_bx.cond_br(keep_going, body_bx.llbb(), next_bx.llbb());
 
-                tr_elem.val.store(&body_bcx,
+                tr_elem.val.store(&body_bx,
                     PlaceRef::new_sized(current, tr_elem.layout, dest.align));
 
-                let next = body_bcx.inbounds_gep(current, &[C_usize(bcx.ccx, 1)]);
-                body_bcx.br(header_bcx.llbb());
-                header_bcx.add_incoming_to_phi(current, next, body_bcx.llbb());
+                let next = body_bx.inbounds_gep(current, &[C_usize(bx.cx, 1)]);
+                body_bx.br(header_bx.llbb());
+                header_bx.add_incoming_to_phi(current, next, body_bx.llbb());
 
-                next_bcx
+                next_bx
             }
 
             mir::Rvalue::Aggregate(ref kind, ref operands) => {
                 let (dest, active_field_index) = match **kind {
                     mir::AggregateKind::Adt(adt_def, variant_index, _, active_field_index) => {
-                        dest.trans_set_discr(&bcx, variant_index);
+                        dest.trans_set_discr(&bx, variant_index);
                         if adt_def.is_enum() {
-                            (dest.project_downcast(&bcx, variant_index), active_field_index)
+                            (dest.project_downcast(&bx, variant_index), active_field_index)
                         } else {
                             (dest, active_field_index)
                         }
@@ -159,27 +159,27 @@
                     _ => (dest, None)
                 };
                 for (i, operand) in operands.iter().enumerate() {
-                    let op = self.trans_operand(&bcx, operand);
+                    let op = self.trans_operand(&bx, operand);
                     // Do not generate stores and GEPis for zero-sized fields.
                     if !op.layout.is_zst() {
                         let field_index = active_field_index.unwrap_or(i);
-                        op.val.store(&bcx, dest.project_field(&bcx, field_index));
+                        op.val.store(&bx, dest.project_field(&bx, field_index));
                     }
                 }
-                bcx
+                bx
             }
 
             _ => {
                 assert!(self.rvalue_creates_operand(rvalue));
-                let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
-                temp.val.store(&bcx, dest);
-                bcx
+                let (bx, temp) = self.trans_rvalue_operand(bx, rvalue);
+                temp.val.store(&bx, dest);
+                bx
             }
         }
     }
 
     pub fn trans_rvalue_operand(&mut self,
-                                bcx: Builder<'a, 'tcx>,
+                                bx: Builder<'a, 'tcx>,
                                 rvalue: &mir::Rvalue<'tcx>)
                                 -> (Builder<'a, 'tcx>, OperandRef<'tcx>)
     {
@@ -187,16 +187,16 @@
 
         match *rvalue {
             mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
-                let operand = self.trans_operand(&bcx, source);
+                let operand = self.trans_operand(&bx, source);
                 debug!("cast operand is {:?}", operand);
-                let cast = bcx.ccx.layout_of(self.monomorphize(&mir_cast_ty));
+                let cast = bx.cx.layout_of(self.monomorphize(&mir_cast_ty));
 
                 let val = match *kind {
                     mir::CastKind::ReifyFnPointer => {
                         match operand.layout.ty.sty {
                             ty::TyFnDef(def_id, substs) => {
                                 OperandValue::Immediate(
-                                    callee::resolve_and_get_fn(bcx.ccx, def_id, substs))
+                                    callee::resolve_and_get_fn(bx.cx, def_id, substs))
                             }
                             _ => {
                                 bug!("{} cannot be reified to a fn ptr", operand.layout.ty)
@@ -207,8 +207,8 @@
                         match operand.layout.ty.sty {
                             ty::TyClosure(def_id, substs) => {
                                 let instance = monomorphize::resolve_closure(
-                                    bcx.ccx.tcx(), def_id, substs, ty::ClosureKind::FnOnce);
-                                OperandValue::Immediate(callee::get_fn(bcx.ccx, instance))
+                                    bx.cx.tcx, def_id, substs, ty::ClosureKind::FnOnce);
+                                OperandValue::Immediate(callee::get_fn(bx.cx, instance))
                             }
                             _ => {
                                 bug!("{} cannot be cast to a fn ptr", operand.layout.ty)
@@ -230,13 +230,13 @@
 
                                 // HACK(eddyb) have to bitcast pointers
                                 // until LLVM removes pointee types.
-                                let lldata = bcx.pointercast(lldata,
-                                    cast.scalar_pair_element_llvm_type(bcx.ccx, 0));
+                                let lldata = bx.pointercast(lldata,
+                                    cast.scalar_pair_element_llvm_type(bx.cx, 0));
                                 OperandValue::Pair(lldata, llextra)
                             }
                             OperandValue::Immediate(lldata) => {
                                 // "standard" unsize
-                                let (lldata, llextra) = base::unsize_thin_ptr(&bcx, lldata,
+                                let (lldata, llextra) = base::unsize_thin_ptr(&bx, lldata,
                                     operand.layout.ty, cast.ty);
                                 OperandValue::Pair(lldata, llextra)
                             }
@@ -249,14 +249,14 @@
                     mir::CastKind::Misc if operand.layout.is_llvm_scalar_pair() => {
                         if let OperandValue::Pair(data_ptr, meta) = operand.val {
                             if cast.is_llvm_scalar_pair() {
-                                let data_cast = bcx.pointercast(data_ptr,
-                                    cast.scalar_pair_element_llvm_type(bcx.ccx, 0));
+                                let data_cast = bx.pointercast(data_ptr,
+                                    cast.scalar_pair_element_llvm_type(bx.cx, 0));
                                 OperandValue::Pair(data_cast, meta)
                             } else { // cast to thin-ptr
                                 // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
                                 // pointer-cast of that pointer to desired pointer type.
-                                let llcast_ty = cast.immediate_llvm_type(bcx.ccx);
-                                let llval = bcx.pointercast(data_ptr, llcast_ty);
+                                let llcast_ty = cast.immediate_llvm_type(bx.cx);
+                                let llval = bx.pointercast(data_ptr, llcast_ty);
                                 OperandValue::Immediate(llval)
                             }
                         } else {
@@ -268,8 +268,8 @@
                         let r_t_in = CastTy::from_ty(operand.layout.ty)
                             .expect("bad input type for cast");
                         let r_t_out = CastTy::from_ty(cast.ty).expect("bad output type for cast");
-                        let ll_t_in = operand.layout.immediate_llvm_type(bcx.ccx);
-                        let ll_t_out = cast.immediate_llvm_type(bcx.ccx);
+                        let ll_t_in = operand.layout.immediate_llvm_type(bx.cx);
+                        let ll_t_out = cast.immediate_llvm_type(bx.cx);
                         let llval = operand.immediate();
 
                         let mut signed = false;
@@ -282,7 +282,7 @@
                                     // have bound checks, and this is the most
                                     // convenient place to put the `assume`.
 
-                                    base::call_assume(&bcx, bcx.icmp(
+                                    base::call_assume(&bx, bx.icmp(
                                         llvm::IntULE,
                                         llval,
                                         C_uint_big(ll_t_in, scalar.valid_range.end)
@@ -293,15 +293,15 @@
 
                         let newval = match (r_t_in, r_t_out) {
                             (CastTy::Int(_), CastTy::Int(_)) => {
-                                bcx.intcast(llval, ll_t_out, signed)
+                                bx.intcast(llval, ll_t_out, signed)
                             }
                             (CastTy::Float, CastTy::Float) => {
                                 let srcsz = ll_t_in.float_width();
                                 let dstsz = ll_t_out.float_width();
                                 if dstsz > srcsz {
-                                    bcx.fpext(llval, ll_t_out)
+                                    bx.fpext(llval, ll_t_out)
                                 } else if srcsz > dstsz {
-                                    bcx.fptrunc(llval, ll_t_out)
+                                    bx.fptrunc(llval, ll_t_out)
                                 } else {
                                     llval
                                 }
@@ -309,68 +309,68 @@
                             (CastTy::Ptr(_), CastTy::Ptr(_)) |
                             (CastTy::FnPtr, CastTy::Ptr(_)) |
                             (CastTy::RPtr(_), CastTy::Ptr(_)) =>
-                                bcx.pointercast(llval, ll_t_out),
+                                bx.pointercast(llval, ll_t_out),
                             (CastTy::Ptr(_), CastTy::Int(_)) |
                             (CastTy::FnPtr, CastTy::Int(_)) =>
-                                bcx.ptrtoint(llval, ll_t_out),
+                                bx.ptrtoint(llval, ll_t_out),
                             (CastTy::Int(_), CastTy::Ptr(_)) => {
-                                let usize_llval = bcx.intcast(llval, bcx.ccx.isize_ty(), signed);
-                                bcx.inttoptr(usize_llval, ll_t_out)
+                                let usize_llval = bx.intcast(llval, bx.cx.isize_ty, signed);
+                                bx.inttoptr(usize_llval, ll_t_out)
                             }
                             (CastTy::Int(_), CastTy::Float) =>
-                                cast_int_to_float(&bcx, signed, llval, ll_t_in, ll_t_out),
+                                cast_int_to_float(&bx, signed, llval, ll_t_in, ll_t_out),
                             (CastTy::Float, CastTy::Int(IntTy::I)) =>
-                                cast_float_to_int(&bcx, true, llval, ll_t_in, ll_t_out),
+                                cast_float_to_int(&bx, true, llval, ll_t_in, ll_t_out),
                             (CastTy::Float, CastTy::Int(_)) =>
-                                cast_float_to_int(&bcx, false, llval, ll_t_in, ll_t_out),
+                                cast_float_to_int(&bx, false, llval, ll_t_in, ll_t_out),
                             _ => bug!("unsupported cast: {:?} to {:?}", operand.layout.ty, cast.ty)
                         };
                         OperandValue::Immediate(newval)
                     }
                 };
-                (bcx, OperandRef {
+                (bx, OperandRef {
                     val,
                     layout: cast
                 })
             }
 
             mir::Rvalue::Ref(_, bk, ref place) => {
-                let tr_place = self.trans_place(&bcx, place);
+                let tr_place = self.trans_place(&bx, place);
 
                 let ty = tr_place.layout.ty;
 
                 // Note: places are indirect, so storing the `llval` into the
                 // destination effectively creates a reference.
-                let val = if !bcx.ccx.shared().type_has_metadata(ty) {
+                let val = if !bx.cx.type_has_metadata(ty) {
                     OperandValue::Immediate(tr_place.llval)
                 } else {
                     OperandValue::Pair(tr_place.llval, tr_place.llextra)
                 };
-                (bcx, OperandRef {
+                (bx, OperandRef {
                     val,
-                    layout: self.ccx.layout_of(self.ccx.tcx().mk_ref(
-                        self.ccx.tcx().types.re_erased,
+                    layout: self.cx.layout_of(self.cx.tcx.mk_ref(
+                        self.cx.tcx.types.re_erased,
                         ty::TypeAndMut { ty, mutbl: bk.to_mutbl_lossy() }
                     )),
                 })
             }
 
             mir::Rvalue::Len(ref place) => {
-                let size = self.evaluate_array_len(&bcx, place);
+                let size = self.evaluate_array_len(&bx, place);
                 let operand = OperandRef {
                     val: OperandValue::Immediate(size),
-                    layout: bcx.ccx.layout_of(bcx.tcx().types.usize),
+                    layout: bx.cx.layout_of(bx.tcx().types.usize),
                 };
-                (bcx, operand)
+                (bx, operand)
             }
 
             mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => {
-                let lhs = self.trans_operand(&bcx, lhs);
-                let rhs = self.trans_operand(&bcx, rhs);
+                let lhs = self.trans_operand(&bx, lhs);
+                let rhs = self.trans_operand(&bx, rhs);
                 let llresult = match (lhs.val, rhs.val) {
                     (OperandValue::Pair(lhs_addr, lhs_extra),
                      OperandValue::Pair(rhs_addr, rhs_extra)) => {
-                        self.trans_fat_ptr_binop(&bcx, op,
+                        self.trans_fat_ptr_binop(&bx, op,
                                                  lhs_addr, lhs_extra,
                                                  rhs_addr, rhs_extra,
                                                  lhs.layout.ty)
@@ -378,114 +378,114 @@
 
                     (OperandValue::Immediate(lhs_val),
                      OperandValue::Immediate(rhs_val)) => {
-                        self.trans_scalar_binop(&bcx, op, lhs_val, rhs_val, lhs.layout.ty)
+                        self.trans_scalar_binop(&bx, op, lhs_val, rhs_val, lhs.layout.ty)
                     }
 
                     _ => bug!()
                 };
                 let operand = OperandRef {
                     val: OperandValue::Immediate(llresult),
-                    layout: bcx.ccx.layout_of(
-                        op.ty(bcx.tcx(), lhs.layout.ty, rhs.layout.ty)),
+                    layout: bx.cx.layout_of(
+                        op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)),
                 };
-                (bcx, operand)
+                (bx, operand)
             }
             mir::Rvalue::CheckedBinaryOp(op, ref lhs, ref rhs) => {
-                let lhs = self.trans_operand(&bcx, lhs);
-                let rhs = self.trans_operand(&bcx, rhs);
-                let result = self.trans_scalar_checked_binop(&bcx, op,
+                let lhs = self.trans_operand(&bx, lhs);
+                let rhs = self.trans_operand(&bx, rhs);
+                let result = self.trans_scalar_checked_binop(&bx, op,
                                                              lhs.immediate(), rhs.immediate(),
                                                              lhs.layout.ty);
-                let val_ty = op.ty(bcx.tcx(), lhs.layout.ty, rhs.layout.ty);
-                let operand_ty = bcx.tcx().intern_tup(&[val_ty, bcx.tcx().types.bool], false);
+                let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty);
+                let operand_ty = bx.tcx().intern_tup(&[val_ty, bx.tcx().types.bool], false);
                 let operand = OperandRef {
                     val: result,
-                    layout: bcx.ccx.layout_of(operand_ty)
+                    layout: bx.cx.layout_of(operand_ty)
                 };
 
-                (bcx, operand)
+                (bx, operand)
             }
 
             mir::Rvalue::UnaryOp(op, ref operand) => {
-                let operand = self.trans_operand(&bcx, operand);
+                let operand = self.trans_operand(&bx, operand);
                 let lloperand = operand.immediate();
                 let is_float = operand.layout.ty.is_fp();
                 let llval = match op {
-                    mir::UnOp::Not => bcx.not(lloperand),
+                    mir::UnOp::Not => bx.not(lloperand),
                     mir::UnOp::Neg => if is_float {
-                        bcx.fneg(lloperand)
+                        bx.fneg(lloperand)
                     } else {
-                        bcx.neg(lloperand)
+                        bx.neg(lloperand)
                     }
                 };
-                (bcx, OperandRef {
+                (bx, OperandRef {
                     val: OperandValue::Immediate(llval),
                     layout: operand.layout,
                 })
             }
 
             mir::Rvalue::Discriminant(ref place) => {
-                let discr_ty = rvalue.ty(&*self.mir, bcx.tcx());
-                let discr =  self.trans_place(&bcx, place)
-                    .trans_get_discr(&bcx, discr_ty);
-                (bcx, OperandRef {
+                let discr_ty = rvalue.ty(&*self.mir, bx.tcx());
+                let discr =  self.trans_place(&bx, place)
+                    .trans_get_discr(&bx, discr_ty);
+                (bx, OperandRef {
                     val: OperandValue::Immediate(discr),
-                    layout: self.ccx.layout_of(discr_ty)
+                    layout: self.cx.layout_of(discr_ty)
                 })
             }
 
             mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => {
-                assert!(bcx.ccx.shared().type_is_sized(ty));
-                let val = C_usize(bcx.ccx, bcx.ccx.size_of(ty).bytes());
-                let tcx = bcx.tcx();
-                (bcx, OperandRef {
+                assert!(bx.cx.type_is_sized(ty));
+                let val = C_usize(bx.cx, bx.cx.size_of(ty).bytes());
+                let tcx = bx.tcx();
+                (bx, OperandRef {
                     val: OperandValue::Immediate(val),
-                    layout: self.ccx.layout_of(tcx.types.usize),
+                    layout: self.cx.layout_of(tcx.types.usize),
                 })
             }
 
             mir::Rvalue::NullaryOp(mir::NullOp::Box, content_ty) => {
                 let content_ty: Ty<'tcx> = self.monomorphize(&content_ty);
-                let (size, align) = bcx.ccx.size_and_align_of(content_ty);
-                let llsize = C_usize(bcx.ccx, size.bytes());
-                let llalign = C_usize(bcx.ccx, align.abi());
-                let box_layout = bcx.ccx.layout_of(bcx.tcx().mk_box(content_ty));
-                let llty_ptr = box_layout.llvm_type(bcx.ccx);
+                let (size, align) = bx.cx.size_and_align_of(content_ty);
+                let llsize = C_usize(bx.cx, size.bytes());
+                let llalign = C_usize(bx.cx, align.abi());
+                let box_layout = bx.cx.layout_of(bx.tcx().mk_box(content_ty));
+                let llty_ptr = box_layout.llvm_type(bx.cx);
 
                 // Allocate space:
-                let def_id = match bcx.tcx().lang_items().require(ExchangeMallocFnLangItem) {
+                let def_id = match bx.tcx().lang_items().require(ExchangeMallocFnLangItem) {
                     Ok(id) => id,
                     Err(s) => {
-                        bcx.sess().fatal(&format!("allocation of `{}` {}", box_layout.ty, s));
+                        bx.sess().fatal(&format!("allocation of `{}` {}", box_layout.ty, s));
                     }
                 };
-                let instance = ty::Instance::mono(bcx.tcx(), def_id);
-                let r = callee::get_fn(bcx.ccx, instance);
-                let val = bcx.pointercast(bcx.call(r, &[llsize, llalign], None), llty_ptr);
+                let instance = ty::Instance::mono(bx.tcx(), def_id);
+                let r = callee::get_fn(bx.cx, instance);
+                let val = bx.pointercast(bx.call(r, &[llsize, llalign], None), llty_ptr);
 
                 let operand = OperandRef {
                     val: OperandValue::Immediate(val),
                     layout: box_layout,
                 };
-                (bcx, operand)
+                (bx, operand)
             }
             mir::Rvalue::Use(ref operand) => {
-                let operand = self.trans_operand(&bcx, operand);
-                (bcx, operand)
+                let operand = self.trans_operand(&bx, operand);
+                (bx, operand)
             }
             mir::Rvalue::Repeat(..) |
             mir::Rvalue::Aggregate(..) => {
                 // According to `rvalue_creates_operand`, only ZST
                 // aggregate rvalues are allowed to be operands.
-                let ty = rvalue.ty(self.mir, self.ccx.tcx());
-                (bcx, OperandRef::new_zst(self.ccx,
-                    self.ccx.layout_of(self.monomorphize(&ty))))
+                let ty = rvalue.ty(self.mir, self.cx.tcx);
+                (bx, OperandRef::new_zst(self.cx,
+                    self.cx.layout_of(self.monomorphize(&ty))))
             }
         }
     }
 
     fn evaluate_array_len(&mut self,
-                          bcx: &Builder<'a, 'tcx>,
+                          bx: &Builder<'a, 'tcx>,
                           place: &mir::Place<'tcx>) -> ValueRef
     {
         // ZST are passed as operands and require special handling
@@ -494,17 +494,17 @@
             if let LocalRef::Operand(Some(op)) = self.locals[index] {
                 if let ty::TyArray(_, n) = op.layout.ty.sty {
                     let n = n.val.to_const_int().unwrap().to_u64().unwrap();
-                    return common::C_usize(bcx.ccx, n);
+                    return common::C_usize(bx.cx, n);
                 }
             }
         }
         // use common size calculation for non zero-sized types
-        let tr_value = self.trans_place(&bcx, place);
-        return tr_value.len(bcx.ccx);
+        let tr_value = self.trans_place(&bx, place);
+        return tr_value.len(bx.cx);
     }
 
     pub fn trans_scalar_binop(&mut self,
-                              bcx: &Builder<'a, 'tcx>,
+                              bx: &Builder<'a, 'tcx>,
                               op: mir::BinOp,
                               lhs: ValueRef,
                               rhs: ValueRef,
@@ -515,49 +515,49 @@
         let is_bool = input_ty.is_bool();
         match op {
             mir::BinOp::Add => if is_float {
-                bcx.fadd(lhs, rhs)
+                bx.fadd(lhs, rhs)
             } else {
-                bcx.add(lhs, rhs)
+                bx.add(lhs, rhs)
             },
             mir::BinOp::Sub => if is_float {
-                bcx.fsub(lhs, rhs)
+                bx.fsub(lhs, rhs)
             } else {
-                bcx.sub(lhs, rhs)
+                bx.sub(lhs, rhs)
             },
             mir::BinOp::Mul => if is_float {
-                bcx.fmul(lhs, rhs)
+                bx.fmul(lhs, rhs)
             } else {
-                bcx.mul(lhs, rhs)
+                bx.mul(lhs, rhs)
             },
             mir::BinOp::Div => if is_float {
-                bcx.fdiv(lhs, rhs)
+                bx.fdiv(lhs, rhs)
             } else if is_signed {
-                bcx.sdiv(lhs, rhs)
+                bx.sdiv(lhs, rhs)
             } else {
-                bcx.udiv(lhs, rhs)
+                bx.udiv(lhs, rhs)
             },
             mir::BinOp::Rem => if is_float {
-                bcx.frem(lhs, rhs)
+                bx.frem(lhs, rhs)
             } else if is_signed {
-                bcx.srem(lhs, rhs)
+                bx.srem(lhs, rhs)
             } else {
-                bcx.urem(lhs, rhs)
+                bx.urem(lhs, rhs)
             },
-            mir::BinOp::BitOr => bcx.or(lhs, rhs),
-            mir::BinOp::BitAnd => bcx.and(lhs, rhs),
-            mir::BinOp::BitXor => bcx.xor(lhs, rhs),
-            mir::BinOp::Offset => bcx.inbounds_gep(lhs, &[rhs]),
-            mir::BinOp::Shl => common::build_unchecked_lshift(bcx, lhs, rhs),
-            mir::BinOp::Shr => common::build_unchecked_rshift(bcx, input_ty, lhs, rhs),
+            mir::BinOp::BitOr => bx.or(lhs, rhs),
+            mir::BinOp::BitAnd => bx.and(lhs, rhs),
+            mir::BinOp::BitXor => bx.xor(lhs, rhs),
+            mir::BinOp::Offset => bx.inbounds_gep(lhs, &[rhs]),
+            mir::BinOp::Shl => common::build_unchecked_lshift(bx, lhs, rhs),
+            mir::BinOp::Shr => common::build_unchecked_rshift(bx, input_ty, lhs, rhs),
             mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt |
             mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => if is_nil {
-                C_bool(bcx.ccx, match op {
+                C_bool(bx.cx, match op {
                     mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt => false,
                     mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => true,
                     _ => unreachable!()
                 })
             } else if is_float {
-                bcx.fcmp(
+                bx.fcmp(
                     base::bin_op_to_fcmp_predicate(op.to_hir_binop()),
                     lhs, rhs
                 )
@@ -565,13 +565,13 @@
                 let (lhs, rhs) = if is_bool {
                     // FIXME(#36856) -- extend the bools into `i8` because
                     // LLVM's i1 comparisons are broken.
-                    (bcx.zext(lhs, Type::i8(bcx.ccx)),
-                     bcx.zext(rhs, Type::i8(bcx.ccx)))
+                    (bx.zext(lhs, Type::i8(bx.cx)),
+                     bx.zext(rhs, Type::i8(bx.cx)))
                 } else {
                     (lhs, rhs)
                 };
 
-                bcx.icmp(
+                bx.icmp(
                     base::bin_op_to_icmp_predicate(op.to_hir_binop(), is_signed),
                     lhs, rhs
                 )
@@ -580,7 +580,7 @@
     }
 
     pub fn trans_fat_ptr_binop(&mut self,
-                               bcx: &Builder<'a, 'tcx>,
+                               bx: &Builder<'a, 'tcx>,
                                op: mir::BinOp,
                                lhs_addr: ValueRef,
                                lhs_extra: ValueRef,
@@ -590,15 +590,15 @@
                                -> ValueRef {
         match op {
             mir::BinOp::Eq => {
-                bcx.and(
-                    bcx.icmp(llvm::IntEQ, lhs_addr, rhs_addr),
-                    bcx.icmp(llvm::IntEQ, lhs_extra, rhs_extra)
+                bx.and(
+                    bx.icmp(llvm::IntEQ, lhs_addr, rhs_addr),
+                    bx.icmp(llvm::IntEQ, lhs_extra, rhs_extra)
                 )
             }
             mir::BinOp::Ne => {
-                bcx.or(
-                    bcx.icmp(llvm::IntNE, lhs_addr, rhs_addr),
-                    bcx.icmp(llvm::IntNE, lhs_extra, rhs_extra)
+                bx.or(
+                    bx.icmp(llvm::IntNE, lhs_addr, rhs_addr),
+                    bx.icmp(llvm::IntNE, lhs_extra, rhs_extra)
                 )
             }
             mir::BinOp::Le | mir::BinOp::Lt |
@@ -612,11 +612,11 @@
                     _ => bug!(),
                 };
 
-                bcx.or(
-                    bcx.icmp(strict_op, lhs_addr, rhs_addr),
-                    bcx.and(
-                        bcx.icmp(llvm::IntEQ, lhs_addr, rhs_addr),
-                        bcx.icmp(op, lhs_extra, rhs_extra)
+                bx.or(
+                    bx.icmp(strict_op, lhs_addr, rhs_addr),
+                    bx.and(
+                        bx.icmp(llvm::IntEQ, lhs_addr, rhs_addr),
+                        bx.icmp(op, lhs_extra, rhs_extra)
                     )
                 )
             }
@@ -627,7 +627,7 @@
     }
 
     pub fn trans_scalar_checked_binop(&mut self,
-                                      bcx: &Builder<'a, 'tcx>,
+                                      bx: &Builder<'a, 'tcx>,
                                       op: mir::BinOp,
                                       lhs: ValueRef,
                                       rhs: ValueRef,
@@ -636,17 +636,17 @@
         // with #[rustc_inherit_overflow_checks] and inlined from
         // another crate (mostly core::num generic/#[inline] fns),
         // while the current crate doesn't use overflow checks.
-        if !bcx.ccx.check_overflow() {
-            let val = self.trans_scalar_binop(bcx, op, lhs, rhs, input_ty);
-            return OperandValue::Pair(val, C_bool(bcx.ccx, false));
+        if !bx.cx.check_overflow {
+            let val = self.trans_scalar_binop(bx, op, lhs, rhs, input_ty);
+            return OperandValue::Pair(val, C_bool(bx.cx, false));
         }
 
         // First try performing the operation on constants, which
         // will only succeed if both operands are constant.
         // This is necessary to determine when an overflow Assert
         // will always panic at runtime, and produce a warning.
-        if let Some((val, of)) = const_scalar_checked_binop(bcx.tcx(), op, lhs, rhs, input_ty) {
-            return OperandValue::Pair(val, C_bool(bcx.ccx, of));
+        if let Some((val, of)) = const_scalar_checked_binop(bx.tcx(), op, lhs, rhs, input_ty) {
+            return OperandValue::Pair(val, C_bool(bx.cx, of));
         }
 
         let (val, of) = match op {
@@ -658,20 +658,20 @@
                     mir::BinOp::Mul => OverflowOp::Mul,
                     _ => unreachable!()
                 };
-                let intrinsic = get_overflow_intrinsic(oop, bcx, input_ty);
-                let res = bcx.call(intrinsic, &[lhs, rhs], None);
+                let intrinsic = get_overflow_intrinsic(oop, bx, input_ty);
+                let res = bx.call(intrinsic, &[lhs, rhs], None);
 
-                (bcx.extract_value(res, 0),
-                 bcx.extract_value(res, 1))
+                (bx.extract_value(res, 0),
+                 bx.extract_value(res, 1))
             }
             mir::BinOp::Shl | mir::BinOp::Shr => {
                 let lhs_llty = val_ty(lhs);
                 let rhs_llty = val_ty(rhs);
-                let invert_mask = common::shift_mask_val(&bcx, lhs_llty, rhs_llty, true);
-                let outer_bits = bcx.and(rhs, invert_mask);
+                let invert_mask = common::shift_mask_val(&bx, lhs_llty, rhs_llty, true);
+                let outer_bits = bx.and(rhs, invert_mask);
 
-                let of = bcx.icmp(llvm::IntNE, outer_bits, C_null(rhs_llty));
-                let val = self.trans_scalar_binop(bcx, op, lhs, rhs, input_ty);
+                let of = bx.icmp(llvm::IntNE, outer_bits, C_null(rhs_llty));
+                let val = self.trans_scalar_binop(bx, op, lhs, rhs, input_ty);
 
                 (val, of)
             }
@@ -697,9 +697,9 @@
                 true,
             mir::Rvalue::Repeat(..) |
             mir::Rvalue::Aggregate(..) => {
-                let ty = rvalue.ty(self.mir, self.ccx.tcx());
+                let ty = rvalue.ty(self.mir, self.cx.tcx);
                 let ty = self.monomorphize(&ty);
-                self.ccx.layout_of(ty).is_zst()
+                self.cx.layout_of(ty).is_zst()
             }
         }
 
@@ -712,12 +712,12 @@
     Add, Sub, Mul
 }
 
-fn get_overflow_intrinsic(oop: OverflowOp, bcx: &Builder, ty: Ty) -> ValueRef {
+fn get_overflow_intrinsic(oop: OverflowOp, bx: &Builder, ty: Ty) -> ValueRef {
     use syntax::ast::IntTy::*;
     use syntax::ast::UintTy::*;
     use rustc::ty::{TyInt, TyUint};
 
-    let tcx = bcx.tcx();
+    let tcx = bx.tcx();
 
     let new_sty = match ty.sty {
         TyInt(Isize) => match &tcx.sess.target.target.target_pointer_width[..] {
@@ -784,10 +784,10 @@
         },
     };
 
-    bcx.ccx.get_intrinsic(&name)
+    bx.cx.get_intrinsic(&name)
 }
 
-fn cast_int_to_float(bcx: &Builder,
+fn cast_int_to_float(bx: &Builder,
                      signed: bool,
                      x: ValueRef,
                      int_ty: Type,
@@ -800,31 +800,31 @@
         // All inputs greater or equal to (f32::MAX + 0.5 ULP) are rounded to infinity,
         // and for everything else LLVM's uitofp works just fine.
         let max = C_uint_big(int_ty, MAX_F32_PLUS_HALF_ULP);
-        let overflow = bcx.icmp(llvm::IntUGE, x, max);
-        let infinity_bits = C_u32(bcx.ccx, ieee::Single::INFINITY.to_bits() as u32);
+        let overflow = bx.icmp(llvm::IntUGE, x, max);
+        let infinity_bits = C_u32(bx.cx, ieee::Single::INFINITY.to_bits() as u32);
         let infinity = consts::bitcast(infinity_bits, float_ty);
-        bcx.select(overflow, infinity, bcx.uitofp(x, float_ty))
+        bx.select(overflow, infinity, bx.uitofp(x, float_ty))
     } else {
         if signed {
-            bcx.sitofp(x, float_ty)
+            bx.sitofp(x, float_ty)
         } else {
-            bcx.uitofp(x, float_ty)
+            bx.uitofp(x, float_ty)
         }
     }
 }
 
-fn cast_float_to_int(bcx: &Builder,
+fn cast_float_to_int(bx: &Builder,
                      signed: bool,
                      x: ValueRef,
                      float_ty: Type,
                      int_ty: Type) -> ValueRef {
     let fptosui_result = if signed {
-        bcx.fptosi(x, int_ty)
+        bx.fptosi(x, int_ty)
     } else {
-        bcx.fptoui(x, int_ty)
+        bx.fptoui(x, int_ty)
     };
 
-    if !bcx.sess().opts.debugging_opts.saturating_float_casts {
+    if !bx.sess().opts.debugging_opts.saturating_float_casts {
         return fptosui_result;
     }
     // LLVM's fpto[su]i returns undef when the input x is infinite, NaN, or does not fit into the
@@ -870,8 +870,8 @@
     }
     let float_bits_to_llval = |bits| {
         let bits_llval = match float_ty.float_width() {
-            32 => C_u32(bcx.ccx, bits as u32),
-            64 => C_u64(bcx.ccx, bits as u64),
+            32 => C_u32(bx.cx, bits as u32),
+            64 => C_u64(bx.cx, bits as u64),
             n => bug!("unsupported float width {}", n),
         };
         consts::bitcast(bits_llval, float_ty)
@@ -924,19 +924,19 @@
     // negation, and the negation can be merged into the select. Therefore, it not necessarily any
     // more expensive than a ordered ("normal") comparison. Whether these optimizations will be
     // performed is ultimately up to the backend, but at least x86 does perform them.
-    let less_or_nan = bcx.fcmp(llvm::RealULT, x, f_min);
-    let greater = bcx.fcmp(llvm::RealOGT, x, f_max);
+    let less_or_nan = bx.fcmp(llvm::RealULT, x, f_min);
+    let greater = bx.fcmp(llvm::RealOGT, x, f_max);
     let int_max = C_uint_big(int_ty, int_max(signed, int_ty));
     let int_min = C_uint_big(int_ty, int_min(signed, int_ty) as u128);
-    let s0 = bcx.select(less_or_nan, int_min, fptosui_result);
-    let s1 = bcx.select(greater, int_max, s0);
+    let s0 = bx.select(less_or_nan, int_min, fptosui_result);
+    let s1 = bx.select(greater, int_max, s0);
 
     // Step 3: NaN replacement.
     // For unsigned types, the above step already yielded int_ty::MIN == 0 if x is NaN.
     // Therefore we only need to execute this step for signed integer types.
     if signed {
         // LLVM has no isNaN predicate, so we use (x == x) instead
-        bcx.select(bcx.fcmp(llvm::RealOEQ, x, x), s1, C_uint(int_ty, 0))
+        bx.select(bx.fcmp(llvm::RealOEQ, x, x), s1, C_uint(int_ty, 0))
     } else {
         s1
     }
diff --git a/src/librustc_trans/mir/statement.rs b/src/librustc_trans/mir/statement.rs
index e0ca5dc..b5b7484 100644
--- a/src/librustc_trans/mir/statement.rs
+++ b/src/librustc_trans/mir/statement.rs
@@ -13,28 +13,28 @@
 use asm;
 use builder::Builder;
 
-use super::MirContext;
+use super::FunctionCx;
 use super::LocalRef;
 
-impl<'a, 'tcx> MirContext<'a, 'tcx> {
+impl<'a, 'tcx> FunctionCx<'a, 'tcx> {
     pub fn trans_statement(&mut self,
-                           bcx: Builder<'a, 'tcx>,
+                           bx: Builder<'a, 'tcx>,
                            statement: &mir::Statement<'tcx>)
                            -> Builder<'a, 'tcx> {
         debug!("trans_statement(statement={:?})", statement);
 
-        self.set_debug_loc(&bcx, statement.source_info);
+        self.set_debug_loc(&bx, statement.source_info);
         match statement.kind {
             mir::StatementKind::Assign(ref place, ref rvalue) => {
                 if let mir::Place::Local(index) = *place {
                     match self.locals[index] {
                         LocalRef::Place(tr_dest) => {
-                            self.trans_rvalue(bcx, tr_dest, rvalue)
+                            self.trans_rvalue(bx, tr_dest, rvalue)
                         }
                         LocalRef::Operand(None) => {
-                            let (bcx, operand) = self.trans_rvalue_operand(bcx, rvalue);
+                            let (bx, operand) = self.trans_rvalue_operand(bx, rvalue);
                             self.locals[index] = LocalRef::Operand(Some(operand));
-                            bcx
+                            bx
                         }
                         LocalRef::Operand(Some(op)) => {
                             if !op.layout.is_zst() {
@@ -45,46 +45,46 @@
 
                             // If the type is zero-sized, it's already been set here,
                             // but we still need to make sure we translate the operand
-                            self.trans_rvalue_operand(bcx, rvalue).0
+                            self.trans_rvalue_operand(bx, rvalue).0
                         }
                     }
                 } else {
-                    let tr_dest = self.trans_place(&bcx, place);
-                    self.trans_rvalue(bcx, tr_dest, rvalue)
+                    let tr_dest = self.trans_place(&bx, place);
+                    self.trans_rvalue(bx, tr_dest, rvalue)
                 }
             }
             mir::StatementKind::SetDiscriminant{ref place, variant_index} => {
-                self.trans_place(&bcx, place)
-                    .trans_set_discr(&bcx, variant_index);
-                bcx
+                self.trans_place(&bx, place)
+                    .trans_set_discr(&bx, variant_index);
+                bx
             }
             mir::StatementKind::StorageLive(local) => {
                 if let LocalRef::Place(tr_place) = self.locals[local] {
-                    tr_place.storage_live(&bcx);
+                    tr_place.storage_live(&bx);
                 }
-                bcx
+                bx
             }
             mir::StatementKind::StorageDead(local) => {
                 if let LocalRef::Place(tr_place) = self.locals[local] {
-                    tr_place.storage_dead(&bcx);
+                    tr_place.storage_dead(&bx);
                 }
-                bcx
+                bx
             }
             mir::StatementKind::InlineAsm { ref asm, ref outputs, ref inputs } => {
                 let outputs = outputs.iter().map(|output| {
-                    self.trans_place(&bcx, output)
+                    self.trans_place(&bx, output)
                 }).collect();
 
                 let input_vals = inputs.iter().map(|input| {
-                    self.trans_operand(&bcx, input).immediate()
+                    self.trans_operand(&bx, input).immediate()
                 }).collect();
 
-                asm::trans_inline_asm(&bcx, asm, outputs, input_vals);
-                bcx
+                asm::trans_inline_asm(&bx, asm, outputs, input_vals);
+                bx
             }
             mir::StatementKind::EndRegion(_) |
             mir::StatementKind::Validate(..) |
-            mir::StatementKind::Nop => bcx,
+            mir::StatementKind::Nop => bx,
         }
     }
 }
diff --git a/src/librustc_trans/trans_item.rs b/src/librustc_trans/trans_item.rs
index fa6a42e..cc270ab 100644
--- a/src/librustc_trans/trans_item.rs
+++ b/src/librustc_trans/trans_item.rs
@@ -18,7 +18,7 @@
 use attributes;
 use base;
 use consts;
-use context::CrateContext;
+use context::CodegenCx;
 use declare;
 use llvm;
 use monomorphize::Instance;
@@ -38,18 +38,18 @@
 pub use rustc_mir::monomorphize::item::MonoItemExt as BaseMonoItemExt;
 
 pub trait MonoItemExt<'a, 'tcx>: fmt::Debug + BaseMonoItemExt<'a, 'tcx> {
-    fn define(&self, ccx: &CrateContext<'a, 'tcx>) {
+    fn define(&self, cx: &CodegenCx<'a, 'tcx>) {
         debug!("BEGIN IMPLEMENTING '{} ({})' in cgu {}",
-               self.to_string(ccx.tcx()),
+               self.to_string(cx.tcx),
                self.to_raw_string(),
-               ccx.codegen_unit().name());
+               cx.codegen_unit.name());
 
         match *self.as_mono_item() {
             MonoItem::Static(node_id) => {
-                let tcx = ccx.tcx();
+                let tcx = cx.tcx;
                 let item = tcx.hir.expect_item(node_id);
                 if let hir::ItemStatic(_, m, _) = item.node {
-                    match consts::trans_static(&ccx, m, item.id, &item.attrs) {
+                    match consts::trans_static(&cx, m, item.id, &item.attrs) {
                         Ok(_) => { /* Cool, everything's alright. */ },
                         Err(err) => {
                             err.report(tcx, item.span, "static");
@@ -60,51 +60,51 @@
                 }
             }
             MonoItem::GlobalAsm(node_id) => {
-                let item = ccx.tcx().hir.expect_item(node_id);
+                let item = cx.tcx.hir.expect_item(node_id);
                 if let hir::ItemGlobalAsm(ref ga) = item.node {
-                    asm::trans_global_asm(ccx, ga);
+                    asm::trans_global_asm(cx, ga);
                 } else {
                     span_bug!(item.span, "Mismatch between hir::Item type and TransItem type")
                 }
             }
             MonoItem::Fn(instance) => {
-                base::trans_instance(&ccx, instance);
+                base::trans_instance(&cx, instance);
             }
         }
 
         debug!("END IMPLEMENTING '{} ({})' in cgu {}",
-               self.to_string(ccx.tcx()),
+               self.to_string(cx.tcx),
                self.to_raw_string(),
-               ccx.codegen_unit().name());
+               cx.codegen_unit.name());
     }
 
     fn predefine(&self,
-                 ccx: &CrateContext<'a, 'tcx>,
+                 cx: &CodegenCx<'a, 'tcx>,
                  linkage: Linkage,
                  visibility: Visibility) {
         debug!("BEGIN PREDEFINING '{} ({})' in cgu {}",
-               self.to_string(ccx.tcx()),
+               self.to_string(cx.tcx),
                self.to_raw_string(),
-               ccx.codegen_unit().name());
+               cx.codegen_unit.name());
 
-        let symbol_name = self.symbol_name(ccx.tcx());
+        let symbol_name = self.symbol_name(cx.tcx);
 
         debug!("symbol {}", &symbol_name);
 
         match *self.as_mono_item() {
             MonoItem::Static(node_id) => {
-                predefine_static(ccx, node_id, linkage, visibility, &symbol_name);
+                predefine_static(cx, node_id, linkage, visibility, &symbol_name);
             }
             MonoItem::Fn(instance) => {
-                predefine_fn(ccx, instance, linkage, visibility, &symbol_name);
+                predefine_fn(cx, instance, linkage, visibility, &symbol_name);
             }
             MonoItem::GlobalAsm(..) => {}
         }
 
         debug!("END PREDEFINING '{} ({})' in cgu {}",
-               self.to_string(ccx.tcx()),
+               self.to_string(cx.tcx),
                self.to_raw_string(),
-               ccx.codegen_unit().name());
+               cx.codegen_unit.name());
     }
 
     fn local_span(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Option<Span> {
@@ -138,18 +138,18 @@
 
 impl<'a, 'tcx> MonoItemExt<'a, 'tcx> for MonoItem<'tcx> {}
 
-fn predefine_static<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+fn predefine_static<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
                               node_id: ast::NodeId,
                               linkage: Linkage,
                               visibility: Visibility,
                               symbol_name: &str) {
-    let def_id = ccx.tcx().hir.local_def_id(node_id);
-    let instance = Instance::mono(ccx.tcx(), def_id);
-    let ty = instance.ty(ccx.tcx());
-    let llty = ccx.layout_of(ty).llvm_type(ccx);
+    let def_id = cx.tcx.hir.local_def_id(node_id);
+    let instance = Instance::mono(cx.tcx, def_id);
+    let ty = instance.ty(cx.tcx);
+    let llty = cx.layout_of(ty).llvm_type(cx);
 
-    let g = declare::define_global(ccx, symbol_name, llty).unwrap_or_else(|| {
-        ccx.sess().span_fatal(ccx.tcx().hir.span(node_id),
+    let g = declare::define_global(cx, symbol_name, llty).unwrap_or_else(|| {
+        cx.sess().span_fatal(cx.tcx.hir.span(node_id),
             &format!("symbol `{}` is already defined", symbol_name))
     });
 
@@ -158,11 +158,11 @@
         llvm::LLVMRustSetVisibility(g, base::visibility_to_llvm(visibility));
     }
 
-    ccx.instances().borrow_mut().insert(instance, g);
-    ccx.statics().borrow_mut().insert(g, def_id);
+    cx.instances.borrow_mut().insert(instance, g);
+    cx.statics.borrow_mut().insert(g, def_id);
 }
 
-fn predefine_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+fn predefine_fn<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
                           instance: Instance<'tcx>,
                           linkage: Linkage,
                           visibility: Visibility,
@@ -170,14 +170,14 @@
     assert!(!instance.substs.needs_infer() &&
             !instance.substs.has_param_types());
 
-    let mono_ty = instance.ty(ccx.tcx());
-    let attrs = instance.def.attrs(ccx.tcx());
-    let lldecl = declare::declare_fn(ccx, symbol_name, mono_ty);
+    let mono_ty = instance.ty(cx.tcx);
+    let attrs = instance.def.attrs(cx.tcx);
+    let lldecl = declare::declare_fn(cx, symbol_name, mono_ty);
     unsafe { llvm::LLVMRustSetLinkage(lldecl, base::linkage_to_llvm(linkage)) };
-    base::set_link_section(ccx, lldecl, &attrs);
+    base::set_link_section(cx, lldecl, &attrs);
     if linkage == Linkage::LinkOnceODR ||
         linkage == Linkage::WeakODR {
-        llvm::SetUniqueComdat(ccx.llmod(), lldecl);
+        llvm::SetUniqueComdat(cx.llmod, lldecl);
     }
 
     // If we're compiling the compiler-builtins crate, e.g. the equivalent of
@@ -185,7 +185,7 @@
     // visibility as we're going to link this object all over the place but
     // don't want the symbols to get exported.
     if linkage != Linkage::Internal && linkage != Linkage::Private &&
-       attr::contains_name(ccx.tcx().hir.krate_attrs(), "compiler_builtins") {
+       attr::contains_name(cx.tcx.hir.krate_attrs(), "compiler_builtins") {
         unsafe {
             llvm::LLVMRustSetVisibility(lldecl, llvm::Visibility::Hidden);
         }
@@ -196,10 +196,10 @@
     }
 
     debug!("predefine_fn: mono_ty = {:?} instance = {:?}", mono_ty, instance);
-    if instance.def.is_inline(ccx.tcx()) {
+    if instance.def.is_inline(cx.tcx) {
         attributes::inline(lldecl, attributes::InlineAttr::Hint);
     }
-    attributes::from_fn_attrs(ccx, lldecl, instance.def.def_id());
+    attributes::from_fn_attrs(cx, lldecl, instance.def.def_id());
 
-    ccx.instances().borrow_mut().insert(instance, lldecl);
+    cx.instances.borrow_mut().insert(instance, lldecl);
 }
diff --git a/src/librustc_trans/type_.rs b/src/librustc_trans/type_.rs
index 6cbe175..a77acc4 100644
--- a/src/librustc_trans/type_.rs
+++ b/src/librustc_trans/type_.rs
@@ -14,7 +14,7 @@
 use llvm::{ContextRef, TypeRef, Bool, False, True, TypeKind};
 use llvm::{Float, Double, X86_FP80, PPC_FP128, FP128};
 
-use context::CrateContext;
+use context::CodegenCx;
 
 use syntax::ast;
 use rustc::ty::layout::{self, Align, Size};
@@ -62,115 +62,115 @@
         unsafe { mem::transmute(slice) }
     }
 
-    pub fn void(ccx: &CrateContext) -> Type {
-        ty!(llvm::LLVMVoidTypeInContext(ccx.llcx()))
+    pub fn void(cx: &CodegenCx) -> Type {
+        ty!(llvm::LLVMVoidTypeInContext(cx.llcx))
     }
 
-    pub fn metadata(ccx: &CrateContext) -> Type {
-        ty!(llvm::LLVMRustMetadataTypeInContext(ccx.llcx()))
+    pub fn metadata(cx: &CodegenCx) -> Type {
+        ty!(llvm::LLVMRustMetadataTypeInContext(cx.llcx))
     }
 
-    pub fn i1(ccx: &CrateContext) -> Type {
-        ty!(llvm::LLVMInt1TypeInContext(ccx.llcx()))
+    pub fn i1(cx: &CodegenCx) -> Type {
+        ty!(llvm::LLVMInt1TypeInContext(cx.llcx))
     }
 
-    pub fn i8(ccx: &CrateContext) -> Type {
-        ty!(llvm::LLVMInt8TypeInContext(ccx.llcx()))
+    pub fn i8(cx: &CodegenCx) -> Type {
+        ty!(llvm::LLVMInt8TypeInContext(cx.llcx))
     }
 
     pub fn i8_llcx(llcx: ContextRef) -> Type {
         ty!(llvm::LLVMInt8TypeInContext(llcx))
     }
 
-    pub fn i16(ccx: &CrateContext) -> Type {
-        ty!(llvm::LLVMInt16TypeInContext(ccx.llcx()))
+    pub fn i16(cx: &CodegenCx) -> Type {
+        ty!(llvm::LLVMInt16TypeInContext(cx.llcx))
     }
 
-    pub fn i32(ccx: &CrateContext) -> Type {
-        ty!(llvm::LLVMInt32TypeInContext(ccx.llcx()))
+    pub fn i32(cx: &CodegenCx) -> Type {
+        ty!(llvm::LLVMInt32TypeInContext(cx.llcx))
     }
 
-    pub fn i64(ccx: &CrateContext) -> Type {
-        ty!(llvm::LLVMInt64TypeInContext(ccx.llcx()))
+    pub fn i64(cx: &CodegenCx) -> Type {
+        ty!(llvm::LLVMInt64TypeInContext(cx.llcx))
     }
 
-    pub fn i128(ccx: &CrateContext) -> Type {
-        ty!(llvm::LLVMIntTypeInContext(ccx.llcx(), 128))
+    pub fn i128(cx: &CodegenCx) -> Type {
+        ty!(llvm::LLVMIntTypeInContext(cx.llcx, 128))
     }
 
     // Creates an integer type with the given number of bits, e.g. i24
-    pub fn ix(ccx: &CrateContext, num_bits: u64) -> Type {
-        ty!(llvm::LLVMIntTypeInContext(ccx.llcx(), num_bits as c_uint))
+    pub fn ix(cx: &CodegenCx, num_bits: u64) -> Type {
+        ty!(llvm::LLVMIntTypeInContext(cx.llcx, num_bits as c_uint))
     }
 
-    pub fn f32(ccx: &CrateContext) -> Type {
-        ty!(llvm::LLVMFloatTypeInContext(ccx.llcx()))
+    pub fn f32(cx: &CodegenCx) -> Type {
+        ty!(llvm::LLVMFloatTypeInContext(cx.llcx))
     }
 
-    pub fn f64(ccx: &CrateContext) -> Type {
-        ty!(llvm::LLVMDoubleTypeInContext(ccx.llcx()))
+    pub fn f64(cx: &CodegenCx) -> Type {
+        ty!(llvm::LLVMDoubleTypeInContext(cx.llcx))
     }
 
-    pub fn bool(ccx: &CrateContext) -> Type {
-        Type::i8(ccx)
+    pub fn bool(cx: &CodegenCx) -> Type {
+        Type::i8(cx)
     }
 
-    pub fn char(ccx: &CrateContext) -> Type {
-        Type::i32(ccx)
+    pub fn char(cx: &CodegenCx) -> Type {
+        Type::i32(cx)
     }
 
-    pub fn i8p(ccx: &CrateContext) -> Type {
-        Type::i8(ccx).ptr_to()
+    pub fn i8p(cx: &CodegenCx) -> Type {
+        Type::i8(cx).ptr_to()
     }
 
     pub fn i8p_llcx(llcx: ContextRef) -> Type {
         Type::i8_llcx(llcx).ptr_to()
     }
 
-    pub fn isize(ccx: &CrateContext) -> Type {
-        match &ccx.tcx().sess.target.target.target_pointer_width[..] {
-            "16" => Type::i16(ccx),
-            "32" => Type::i32(ccx),
-            "64" => Type::i64(ccx),
+    pub fn isize(cx: &CodegenCx) -> Type {
+        match &cx.tcx.sess.target.target.target_pointer_width[..] {
+            "16" => Type::i16(cx),
+            "32" => Type::i32(cx),
+            "64" => Type::i64(cx),
             tws => bug!("Unsupported target word size for int: {}", tws),
         }
     }
 
-    pub fn c_int(ccx: &CrateContext) -> Type {
-        match &ccx.tcx().sess.target.target.target_c_int_width[..] {
-            "16" => Type::i16(ccx),
-            "32" => Type::i32(ccx),
-            "64" => Type::i64(ccx),
+    pub fn c_int(cx: &CodegenCx) -> Type {
+        match &cx.tcx.sess.target.target.target_c_int_width[..] {
+            "16" => Type::i16(cx),
+            "32" => Type::i32(cx),
+            "64" => Type::i64(cx),
             width => bug!("Unsupported target_c_int_width: {}", width),
         }
     }
 
-    pub fn int_from_ty(ccx: &CrateContext, t: ast::IntTy) -> Type {
+    pub fn int_from_ty(cx: &CodegenCx, t: ast::IntTy) -> Type {
         match t {
-            ast::IntTy::Isize => ccx.isize_ty(),
-            ast::IntTy::I8 => Type::i8(ccx),
-            ast::IntTy::I16 => Type::i16(ccx),
-            ast::IntTy::I32 => Type::i32(ccx),
-            ast::IntTy::I64 => Type::i64(ccx),
-            ast::IntTy::I128 => Type::i128(ccx),
+            ast::IntTy::Isize => cx.isize_ty,
+            ast::IntTy::I8 => Type::i8(cx),
+            ast::IntTy::I16 => Type::i16(cx),
+            ast::IntTy::I32 => Type::i32(cx),
+            ast::IntTy::I64 => Type::i64(cx),
+            ast::IntTy::I128 => Type::i128(cx),
         }
     }
 
-    pub fn uint_from_ty(ccx: &CrateContext, t: ast::UintTy) -> Type {
+    pub fn uint_from_ty(cx: &CodegenCx, t: ast::UintTy) -> Type {
         match t {
-            ast::UintTy::Usize => ccx.isize_ty(),
-            ast::UintTy::U8 => Type::i8(ccx),
-            ast::UintTy::U16 => Type::i16(ccx),
-            ast::UintTy::U32 => Type::i32(ccx),
-            ast::UintTy::U64 => Type::i64(ccx),
-            ast::UintTy::U128 => Type::i128(ccx),
+            ast::UintTy::Usize => cx.isize_ty,
+            ast::UintTy::U8 => Type::i8(cx),
+            ast::UintTy::U16 => Type::i16(cx),
+            ast::UintTy::U32 => Type::i32(cx),
+            ast::UintTy::U64 => Type::i64(cx),
+            ast::UintTy::U128 => Type::i128(cx),
         }
     }
 
-    pub fn float_from_ty(ccx: &CrateContext, t: ast::FloatTy) -> Type {
+    pub fn float_from_ty(cx: &CodegenCx, t: ast::FloatTy) -> Type {
         match t {
-            ast::FloatTy::F32 => Type::f32(ccx),
-            ast::FloatTy::F64 => Type::f64(ccx),
+            ast::FloatTy::F32 => Type::f32(cx),
+            ast::FloatTy::F64 => Type::f64(cx),
         }
     }
 
@@ -186,16 +186,16 @@
                                    args.len() as c_uint, True))
     }
 
-    pub fn struct_(ccx: &CrateContext, els: &[Type], packed: bool) -> Type {
+    pub fn struct_(cx: &CodegenCx, els: &[Type], packed: bool) -> Type {
         let els: &[TypeRef] = Type::to_ref_slice(els);
-        ty!(llvm::LLVMStructTypeInContext(ccx.llcx(), els.as_ptr(),
+        ty!(llvm::LLVMStructTypeInContext(cx.llcx, els.as_ptr(),
                                           els.len() as c_uint,
                                           packed as Bool))
     }
 
-    pub fn named_struct(ccx: &CrateContext, name: &str) -> Type {
+    pub fn named_struct(cx: &CodegenCx, name: &str) -> Type {
         let name = CString::new(name).unwrap();
-        ty!(llvm::LLVMStructCreateNamed(ccx.llcx(), name.as_ptr()))
+        ty!(llvm::LLVMStructCreateNamed(cx.llcx, name.as_ptr()))
     }
 
 
@@ -265,7 +265,7 @@
         }
     }
 
-    pub fn from_integer(cx: &CrateContext, i: layout::Integer) -> Type {
+    pub fn from_integer(cx: &CodegenCx, i: layout::Integer) -> Type {
         use rustc::ty::layout::Integer::*;
         match i {
             I8 => Type::i8(cx),
@@ -278,23 +278,23 @@
 
     /// Return a LLVM type that has at most the required alignment,
     /// as a conservative approximation for unknown pointee types.
-    pub fn pointee_for_abi_align(ccx: &CrateContext, align: Align) -> Type {
+    pub fn pointee_for_abi_align(cx: &CodegenCx, align: Align) -> Type {
         // FIXME(eddyb) We could find a better approximation if ity.align < align.
-        let ity = layout::Integer::approximate_abi_align(ccx, align);
-        Type::from_integer(ccx, ity)
+        let ity = layout::Integer::approximate_abi_align(cx, align);
+        Type::from_integer(cx, ity)
     }
 
     /// Return a LLVM type that has at most the required alignment,
     /// and exactly the required size, as a best-effort padding array.
-    pub fn padding_filler(ccx: &CrateContext, size: Size, align: Align) -> Type {
-        let unit = layout::Integer::approximate_abi_align(ccx, align);
+    pub fn padding_filler(cx: &CodegenCx, size: Size, align: Align) -> Type {
+        let unit = layout::Integer::approximate_abi_align(cx, align);
         let size = size.bytes();
         let unit_size = unit.size().bytes();
         assert_eq!(size % unit_size, 0);
-        Type::array(&Type::from_integer(ccx, unit), size / unit_size)
+        Type::array(&Type::from_integer(cx, unit), size / unit_size)
     }
 
-    pub fn x86_mmx(ccx: &CrateContext) -> Type {
-        ty!(llvm::LLVMX86MMXTypeInContext(ccx.llcx()))
+    pub fn x86_mmx(cx: &CodegenCx) -> Type {
+        ty!(llvm::LLVMX86MMXTypeInContext(cx.llcx))
     }
 }
diff --git a/src/librustc_trans/type_of.rs b/src/librustc_trans/type_of.rs
index 8d9bc07..b1533cf 100644
--- a/src/librustc_trans/type_of.rs
+++ b/src/librustc_trans/type_of.rs
@@ -19,7 +19,7 @@
 
 use std::fmt::Write;
 
-fn uncached_llvm_type<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+fn uncached_llvm_type<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
                                 layout: TyLayout<'tcx>,
                                 defer: &mut Option<(Type, TyLayout<'tcx>)>)
                                 -> Type {
@@ -34,19 +34,19 @@
             // one-element SIMD vectors, so it's assumed this won't clash with
             // much else.
             let use_x86_mmx = count == 1 && layout.size.bits() == 64 &&
-                (ccx.sess().target.target.arch == "x86" ||
-                 ccx.sess().target.target.arch == "x86_64");
+                (cx.sess().target.target.arch == "x86" ||
+                 cx.sess().target.target.arch == "x86_64");
             if use_x86_mmx {
-                return Type::x86_mmx(ccx)
+                return Type::x86_mmx(cx)
             } else {
-                let element = layout.scalar_llvm_type_at(ccx, element, Size::from_bytes(0));
+                let element = layout.scalar_llvm_type_at(cx, element, Size::from_bytes(0));
                 return Type::vector(&element, count);
             }
         }
         layout::Abi::ScalarPair(..) => {
-            return Type::struct_(ccx, &[
-                layout.scalar_pair_element_llvm_type(ccx, 0),
-                layout.scalar_pair_element_llvm_type(ccx, 1),
+            return Type::struct_(cx, &[
+                layout.scalar_pair_element_llvm_type(cx, 0),
+                layout.scalar_pair_element_llvm_type(cx, 1),
             ], false);
         }
         layout::Abi::Uninhabited |
@@ -61,7 +61,7 @@
         ty::TyForeign(..) |
         ty::TyStr => {
             let mut name = String::with_capacity(32);
-            let printer = DefPathBasedNames::new(ccx.tcx(), true, true);
+            let printer = DefPathBasedNames::new(cx.tcx, true, true);
             printer.push_type_name(layout.ty, &mut name);
             match (&layout.ty.sty, &layout.variants) {
                 (&ty::TyAdt(def, _), &layout::Variants::Single { index }) => {
@@ -78,30 +78,30 @@
 
     match layout.fields {
         layout::FieldPlacement::Union(_) => {
-            let fill = Type::padding_filler(ccx, layout.size, layout.align);
+            let fill = Type::padding_filler(cx, layout.size, layout.align);
             let packed = false;
             match name {
                 None => {
-                    Type::struct_(ccx, &[fill], packed)
+                    Type::struct_(cx, &[fill], packed)
                 }
                 Some(ref name) => {
-                    let mut llty = Type::named_struct(ccx, name);
+                    let mut llty = Type::named_struct(cx, name);
                     llty.set_struct_body(&[fill], packed);
                     llty
                 }
             }
         }
         layout::FieldPlacement::Array { count, .. } => {
-            Type::array(&layout.field(ccx, 0).llvm_type(ccx), count)
+            Type::array(&layout.field(cx, 0).llvm_type(cx), count)
         }
         layout::FieldPlacement::Arbitrary { .. } => {
             match name {
                 None => {
-                    let (llfields, packed) = struct_llfields(ccx, layout);
-                    Type::struct_(ccx, &llfields, packed)
+                    let (llfields, packed) = struct_llfields(cx, layout);
+                    Type::struct_(cx, &llfields, packed)
                 }
                 Some(ref name) => {
-                    let llty = Type::named_struct(ccx, name);
+                    let llty = Type::named_struct(cx, name);
                     *defer = Some((llty, layout));
                     llty
                 }
@@ -110,7 +110,7 @@
     }
 }
 
-fn struct_llfields<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+fn struct_llfields<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
                              layout: TyLayout<'tcx>)
                              -> (Vec<Type>, bool) {
     debug!("struct_llfields: {:#?}", layout);
@@ -121,7 +121,7 @@
     let mut prev_align = layout.align;
     let mut result: Vec<Type> = Vec::with_capacity(1 + field_count * 2);
     for i in layout.fields.index_by_increasing_offset() {
-        let field = layout.field(ccx, i);
+        let field = layout.field(cx, i);
         packed |= layout.align.abi() < field.align.abi();
 
         let target_offset = layout.fields.offset(i as usize);
@@ -131,10 +131,10 @@
         let padding = target_offset - offset;
         let padding_align = layout.align.min(prev_align).min(field.align);
         assert_eq!(offset.abi_align(padding_align) + padding, target_offset);
-        result.push(Type::padding_filler(ccx, padding, padding_align));
+        result.push(Type::padding_filler(cx, padding, padding_align));
         debug!("    padding before: {:?}", padding);
 
-        result.push(field.llvm_type(ccx));
+        result.push(field.llvm_type(cx));
         offset = target_offset + field.size;
         prev_align = field.align;
     }
@@ -148,7 +148,7 @@
         assert_eq!(offset.abi_align(padding_align) + padding, layout.size);
         debug!("struct_llfields: pad_bytes: {:?} offset: {:?} stride: {:?}",
                padding, offset, layout.size);
-        result.push(Type::padding_filler(ccx, padding, padding_align));
+        result.push(Type::padding_filler(cx, padding, padding_align));
         assert!(result.len() == 1 + field_count * 2);
     } else {
         debug!("struct_llfields: offset: {:?} stride: {:?}",
@@ -158,7 +158,7 @@
     (result, packed)
 }
 
-impl<'a, 'tcx> CrateContext<'a, 'tcx> {
+impl<'a, 'tcx> CodegenCx<'a, 'tcx> {
     pub fn align_of(&self, ty: Ty<'tcx>) -> Align {
         self.layout_of(ty).align
     }
@@ -197,14 +197,14 @@
 pub trait LayoutLlvmExt<'tcx> {
     fn is_llvm_immediate(&self) -> bool;
     fn is_llvm_scalar_pair<'a>(&self) -> bool;
-    fn llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type;
-    fn immediate_llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type;
-    fn scalar_llvm_type_at<'a>(&self, ccx: &CrateContext<'a, 'tcx>,
+    fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> Type;
+    fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> Type;
+    fn scalar_llvm_type_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>,
                                scalar: &layout::Scalar, offset: Size) -> Type;
-    fn scalar_pair_element_llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>,
+    fn scalar_pair_element_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>,
                                          index: usize) -> Type;
     fn llvm_field_index(&self, index: usize) -> u64;
-    fn pointee_info_at<'a>(&self, ccx: &CrateContext<'a, 'tcx>, offset: Size)
+    fn pointee_info_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, offset: Size)
                            -> Option<PointeeInfo>;
 }
 
@@ -240,28 +240,28 @@
     /// with the inner-most trailing unsized field using the "minimal unit"
     /// of that field's type - this is useful for taking the address of
     /// that field and ensuring the struct has the right alignment.
-    fn llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type {
+    fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> Type {
         if let layout::Abi::Scalar(ref scalar) = self.abi {
             // Use a different cache for scalars because pointers to DSTs
             // can be either fat or thin (data pointers of fat pointers).
-            if let Some(&llty) = ccx.scalar_lltypes().borrow().get(&self.ty) {
+            if let Some(&llty) = cx.scalar_lltypes.borrow().get(&self.ty) {
                 return llty;
             }
             let llty = match self.ty.sty {
                 ty::TyRef(_, ty::TypeAndMut { ty, .. }) |
                 ty::TyRawPtr(ty::TypeAndMut { ty, .. }) => {
-                    ccx.layout_of(ty).llvm_type(ccx).ptr_to()
+                    cx.layout_of(ty).llvm_type(cx).ptr_to()
                 }
                 ty::TyAdt(def, _) if def.is_box() => {
-                    ccx.layout_of(self.ty.boxed_ty()).llvm_type(ccx).ptr_to()
+                    cx.layout_of(self.ty.boxed_ty()).llvm_type(cx).ptr_to()
                 }
                 ty::TyFnPtr(sig) => {
-                    let sig = ccx.tcx().erase_late_bound_regions_and_normalize(&sig);
-                    FnType::new(ccx, sig, &[]).llvm_type(ccx).ptr_to()
+                    let sig = cx.tcx.erase_late_bound_regions_and_normalize(&sig);
+                    FnType::new(cx, sig, &[]).llvm_type(cx).ptr_to()
                 }
-                _ => self.scalar_llvm_type_at(ccx, scalar, Size::from_bytes(0))
+                _ => self.scalar_llvm_type_at(cx, scalar, Size::from_bytes(0))
             };
-            ccx.scalar_lltypes().borrow_mut().insert(self.ty, llty);
+            cx.scalar_lltypes.borrow_mut().insert(self.ty, llty);
             return llty;
         }
 
@@ -271,7 +271,7 @@
             layout::Variants::Single { index } => Some(index),
             _ => None
         };
-        if let Some(&llty) = ccx.lltypes().borrow().get(&(self.ty, variant_index)) {
+        if let Some(&llty) = cx.lltypes.borrow().get(&(self.ty, variant_index)) {
             return llty;
         }
 
@@ -281,69 +281,69 @@
 
         // Make sure lifetimes are erased, to avoid generating distinct LLVM
         // types for Rust types that only differ in the choice of lifetimes.
-        let normal_ty = ccx.tcx().erase_regions(&self.ty);
+        let normal_ty = cx.tcx.erase_regions(&self.ty);
 
         let mut defer = None;
         let llty = if self.ty != normal_ty {
-            let mut layout = ccx.layout_of(normal_ty);
+            let mut layout = cx.layout_of(normal_ty);
             if let Some(v) = variant_index {
-                layout = layout.for_variant(ccx, v);
+                layout = layout.for_variant(cx, v);
             }
-            layout.llvm_type(ccx)
+            layout.llvm_type(cx)
         } else {
-            uncached_llvm_type(ccx, *self, &mut defer)
+            uncached_llvm_type(cx, *self, &mut defer)
         };
         debug!("--> mapped {:#?} to llty={:?}", self, llty);
 
-        ccx.lltypes().borrow_mut().insert((self.ty, variant_index), llty);
+        cx.lltypes.borrow_mut().insert((self.ty, variant_index), llty);
 
         if let Some((mut llty, layout)) = defer {
-            let (llfields, packed) = struct_llfields(ccx, layout);
+            let (llfields, packed) = struct_llfields(cx, layout);
             llty.set_struct_body(&llfields, packed)
         }
 
         llty
     }
 
-    fn immediate_llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type {
+    fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> Type {
         if let layout::Abi::Scalar(ref scalar) = self.abi {
             if scalar.is_bool() {
-                return Type::i1(ccx);
+                return Type::i1(cx);
             }
         }
-        self.llvm_type(ccx)
+        self.llvm_type(cx)
     }
 
-    fn scalar_llvm_type_at<'a>(&self, ccx: &CrateContext<'a, 'tcx>,
+    fn scalar_llvm_type_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>,
                                scalar: &layout::Scalar, offset: Size) -> Type {
         match scalar.value {
-            layout::Int(i, _) => Type::from_integer(ccx, i),
-            layout::F32 => Type::f32(ccx),
-            layout::F64 => Type::f64(ccx),
+            layout::Int(i, _) => Type::from_integer(cx, i),
+            layout::F32 => Type::f32(cx),
+            layout::F64 => Type::f64(cx),
             layout::Pointer => {
                 // If we know the alignment, pick something better than i8.
-                let pointee = if let Some(pointee) = self.pointee_info_at(ccx, offset) {
-                    Type::pointee_for_abi_align(ccx, pointee.align)
+                let pointee = if let Some(pointee) = self.pointee_info_at(cx, offset) {
+                    Type::pointee_for_abi_align(cx, pointee.align)
                 } else {
-                    Type::i8(ccx)
+                    Type::i8(cx)
                 };
                 pointee.ptr_to()
             }
         }
     }
 
-    fn scalar_pair_element_llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>,
+    fn scalar_pair_element_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>,
                                          index: usize) -> Type {
         // HACK(eddyb) special-case fat pointers until LLVM removes
         // pointee types, to avoid bitcasting every `OperandRef::deref`.
         match self.ty.sty {
             ty::TyRef(..) |
             ty::TyRawPtr(_) => {
-                return self.field(ccx, index).llvm_type(ccx);
+                return self.field(cx, index).llvm_type(cx);
             }
             ty::TyAdt(def, _) if def.is_box() => {
-                let ptr_ty = ccx.tcx().mk_mut_ptr(self.ty.boxed_ty());
-                return ccx.layout_of(ptr_ty).scalar_pair_element_llvm_type(ccx, index);
+                let ptr_ty = cx.tcx.mk_mut_ptr(self.ty.boxed_ty());
+                return cx.layout_of(ptr_ty).scalar_pair_element_llvm_type(cx, index);
             }
             _ => {}
         }
@@ -362,15 +362,15 @@
         // load/store `bool` as `i8` to avoid crippling LLVM optimizations,
         // `i1` in a LLVM aggregate is valid and mostly equivalent to `i8`.
         if scalar.is_bool() {
-            return Type::i1(ccx);
+            return Type::i1(cx);
         }
 
         let offset = if index == 0 {
             Size::from_bytes(0)
         } else {
-            a.value.size(ccx).abi_align(b.value.align(ccx))
+            a.value.size(cx).abi_align(b.value.align(cx))
         };
-        self.scalar_llvm_type_at(ccx, scalar, offset)
+        self.scalar_llvm_type_at(cx, scalar, offset)
     }
 
     fn llvm_field_index(&self, index: usize) -> u64 {
@@ -396,16 +396,16 @@
         }
     }
 
-    fn pointee_info_at<'a>(&self, ccx: &CrateContext<'a, 'tcx>, offset: Size)
+    fn pointee_info_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, offset: Size)
                            -> Option<PointeeInfo> {
-        if let Some(&pointee) = ccx.pointee_infos().borrow().get(&(self.ty, offset)) {
+        if let Some(&pointee) = cx.pointee_infos.borrow().get(&(self.ty, offset)) {
             return pointee;
         }
 
         let mut result = None;
         match self.ty.sty {
             ty::TyRawPtr(mt) if offset.bytes() == 0 => {
-                let (size, align) = ccx.size_and_align_of(mt.ty);
+                let (size, align) = cx.size_and_align_of(mt.ty);
                 result = Some(PointeeInfo {
                     size,
                     align,
@@ -414,17 +414,17 @@
             }
 
             ty::TyRef(_, mt) if offset.bytes() == 0 => {
-                let (size, align) = ccx.size_and_align_of(mt.ty);
+                let (size, align) = cx.size_and_align_of(mt.ty);
 
                 let kind = match mt.mutbl {
-                    hir::MutImmutable => if ccx.shared().type_is_freeze(mt.ty) {
+                    hir::MutImmutable => if cx.type_is_freeze(mt.ty) {
                         PointerKind::Frozen
                     } else {
                         PointerKind::Shared
                     },
                     hir::MutMutable => {
-                        if ccx.shared().tcx().sess.opts.debugging_opts.mutable_noalias ||
-                           ccx.shared().tcx().sess.panic_strategy() == PanicStrategy::Abort {
+                        if cx.tcx.sess.opts.debugging_opts.mutable_noalias ||
+                           cx.tcx.sess.panic_strategy() == PanicStrategy::Abort {
                             PointerKind::UniqueBorrowed
                         } else {
                             PointerKind::Shared
@@ -454,7 +454,7 @@
                         // niches than just null (e.g. the first page
                         // of the address space, or unaligned pointers).
                         if self.fields.offset(0) == offset {
-                            Some(self.for_variant(ccx, dataful_variant))
+                            Some(self.for_variant(cx, dataful_variant))
                         } else {
                             None
                         }
@@ -470,14 +470,14 @@
                 }
 
                 if let Some(variant) = data_variant {
-                    let ptr_end = offset + layout::Pointer.size(ccx);
+                    let ptr_end = offset + layout::Pointer.size(cx);
                     for i in 0..variant.fields.count() {
                         let field_start = variant.fields.offset(i);
                         if field_start <= offset {
-                            let field = variant.field(ccx, i);
+                            let field = variant.field(cx, i);
                             if ptr_end <= field_start + field.size {
                                 // We found the right field, look inside it.
-                                result = field.pointee_info_at(ccx, offset - field_start);
+                                result = field.pointee_info_at(cx, offset - field_start);
                                 break;
                             }
                         }
@@ -495,7 +495,7 @@
             }
         }
 
-        ccx.pointee_infos().borrow_mut().insert((self.ty, offset), result);
+        cx.pointee_infos.borrow_mut().insert((self.ty, offset), result);
         result
     }
 }
diff --git a/src/rustllvm/PassWrapper.cpp b/src/rustllvm/PassWrapper.cpp
index 4e326c9..54a73a0 100644
--- a/src/rustllvm/PassWrapper.cpp
+++ b/src/rustllvm/PassWrapper.cpp
@@ -746,10 +746,6 @@
   unwrap(Module)->setDataLayout(Target->createDataLayout());
 }
 
-extern "C" LLVMTargetDataRef LLVMRustGetModuleDataLayout(LLVMModuleRef M) {
-  return wrap(&unwrap(M)->getDataLayout());
-}
-
 extern "C" void LLVMRustSetModulePIELevel(LLVMModuleRef M) {
   unwrap(M)->setPIELevel(PIELevel::Level::Large);
 }