diff --git a/src/librustc/mir/interpret/allocation.rs b/src/librustc/mir/interpret/allocation.rs
index aa8ac4902a894..c31fb03086504 100644
--- a/src/librustc/mir/interpret/allocation.rs
+++ b/src/librustc/mir/interpret/allocation.rs
@@ -5,7 +5,7 @@ use super::{
 };
 
 use crate::mir;
-use crate::ty::layout::{Size, Align};
+use crate::ty::layout::{Size, MemoryPosition, Align};
 
 use rustc_data_structures::sorted_map::SortedMap;
 use rustc_target::abi::HasDataLayout;
@@ -39,10 +39,9 @@ pub struct Allocation<Tag = (),Extra = ()> {
     relocations: Relocations<Tag>,
     /// Denotes which part of this allocation is initialized.
     undef_mask: UndefMask,
-    /// The size of the allocation. Currently, must always equal `bytes.len()`.
-    pub size: Size,
-    /// The alignment of the allocation to detect unaligned reads.
-    pub align: Align,
+    /// The position of the allocation.
+    /// Currently, the size must always equal `bytes.len()`.
+    pub mem_pos: MemoryPosition,
     /// `true` if the allocation is mutable.
     /// Also used by codegen to determine if a static should be put into mutable memory,
     /// which happens for `static mut` and `static` with interior mutability.
@@ -101,12 +100,12 @@ impl<Tag> Allocation<Tag> {
     pub fn from_bytes<'a>(slice: impl Into<Cow<'a, [u8]>>, align: Align) -> Self {
         let bytes = slice.into().into_owned();
         let size = Size::from_bytes(bytes.len() as u64);
+        let mem_pos = MemoryPosition::new(size, align);
         Self {
             bytes,
             relocations: Relocations::new(),
             undef_mask: UndefMask::new(size, true),
-            size,
-            align,
+            mem_pos,
             mutability: Mutability::Immutable,
             extra: (),
         }
@@ -116,14 +115,13 @@ impl<Tag> Allocation<Tag> {
         Allocation::from_bytes(slice, Align::from_bytes(1).unwrap())
     }
 
-    pub fn undef(size: Size, align: Align) -> Self {
-        assert_eq!(size.bytes() as usize as u64, size.bytes());
+    pub fn undef(mem_pos: MemoryPosition) -> Self {
+        assert_eq!(mem_pos.size.bytes() as usize as u64, mem_pos.size.bytes());
         Allocation {
-            bytes: vec![0; size.bytes() as usize],
+            bytes: vec![0; mem_pos.size.bytes() as usize],
             relocations: Relocations::new(),
-            undef_mask: UndefMask::new(size, false),
-            size,
-            align,
+            undef_mask: UndefMask::new(mem_pos.size, false),
+            mem_pos: mem_pos,
             mutability: Mutability::Mutable,
             extra: (),
         }
@@ -139,7 +137,6 @@ impl Allocation<(), ()> {
     ) -> Allocation<T, E> {
         Allocation {
             bytes: self.bytes,
-            size: self.size,
             relocations: Relocations::from_presorted(
                 self.relocations.iter()
                     // The allocations in the relocations (pointers stored *inside* this allocation)
@@ -151,7 +148,7 @@ impl Allocation<(), ()> {
                     .collect()
             ),
             undef_mask: self.undef_mask,
-            align: self.align,
+            mem_pos: self.mem_pos,
             mutability: self.mutability,
             extra,
         }
@@ -161,7 +158,7 @@ impl Allocation<(), ()> {
 /// Raw accessors. Provide access to otherwise private bytes.
 impl<Tag, Extra> Allocation<Tag, Extra> {
     pub fn len(&self) -> usize {
-        self.size.bytes() as usize
+        self.mem_pos.size.bytes() as usize
     }
 
     /// Looks at a slice which may describe undefined bytes or describe a relocation. This differs
@@ -398,7 +395,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
         // Now we do the actual reading.
         let bits = read_target_uint(cx.data_layout().endian, bytes).unwrap();
         // See if we got a pointer.
-        if size != cx.data_layout().pointer_size {
+        if size != cx.data_layout().pointer_pos.size {
             // *Now*, we better make sure that the inside is free of relocations too.
             self.check_relocations(cx, ptr, size)?;
         } else {
@@ -424,7 +421,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
         ptr: Pointer<Tag>,
     ) -> InterpResult<'tcx, ScalarMaybeUndef<Tag>>
     {
-        self.read_scalar(cx, ptr, cx.data_layout().pointer_size)
+        self.read_scalar(cx, ptr, cx.data_layout().pointer_pos.size)
     }
 
     /// Writes a *non-ZST* scalar.
@@ -486,7 +483,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
         val: ScalarMaybeUndef<Tag>
     ) -> InterpResult<'tcx>
     {
-        let ptr_size = cx.data_layout().pointer_size;
+        let ptr_size = cx.data_layout().pointer_pos.size;
         self.write_scalar(cx, ptr.into(), val, ptr_size)
     }
 }
@@ -500,9 +497,10 @@ impl<'tcx, Tag: Copy, Extra> Allocation<Tag, Extra> {
         ptr: Pointer<Tag>,
         size: Size,
     ) -> &[(Size, (Tag, AllocId))] {
+        let ptr_pos = cx.data_layout().pointer_pos;
         // We have to go back `pointer_size - 1` bytes, as that one would still overlap with
         // the beginning of this range.
-        let start = ptr.offset.bytes().saturating_sub(cx.data_layout().pointer_size.bytes() - 1);
+        let start = ptr.offset.bytes().saturating_sub(ptr_pos.size.bytes() - 1);
         let end = ptr.offset + size; // This does overflow checking.
         self.relocations.range(Size::from_bytes(start)..end)
     }
@@ -543,7 +541,7 @@ impl<'tcx, Tag: Copy, Extra> Allocation<Tag, Extra> {
             }
 
             (relocations.first().unwrap().0,
-             relocations.last().unwrap().0 + cx.data_layout().pointer_size)
+             relocations.last().unwrap().0 + cx.data_layout().pointer_pos.size)
         };
         let start = ptr.offset;
         let end = start + size;
diff --git a/src/librustc/mir/interpret/error.rs b/src/librustc/mir/interpret/error.rs
index d918b9ee67347..264c1e236ae85 100644
--- a/src/librustc/mir/interpret/error.rs
+++ b/src/librustc/mir/interpret/error.rs
@@ -4,7 +4,7 @@ use crate::hir;
 use crate::hir::map::definitions::DefPathData;
 use crate::mir;
 use crate::ty::{self, Ty, layout};
-use crate::ty::layout::{Size, Align, LayoutError};
+use crate::ty::layout::{Size, MemoryPosition, Align, LayoutError};
 use crate::ty::query::TyCtxtAt;
 
 use backtrace::Backtrace;
@@ -438,7 +438,7 @@ pub enum UnsupportedOpInfo<'tcx> {
     DeallocatedWrongMemoryKind(String, String),
     ReallocateNonBasePtr,
     DeallocateNonBasePtr,
-    IncorrectAllocationInformation(Size, Size, Align, Align),
+    IncorrectAllocationInformation(MemoryPosition, MemoryPosition),
     HeapAllocZeroBytes,
     HeapAllocNonPowerOfTwoAlignment(u64),
     ReadFromReturnPointer,
@@ -484,10 +484,10 @@ impl fmt::Debug for UnsupportedOpInfo<'tcx> {
                 write!(f, "expected primitive type, got {}", ty),
             PathNotFound(ref path) =>
                 write!(f, "cannot find path {:?}", path),
-            IncorrectAllocationInformation(size, size2, align, align2) =>
+            IncorrectAllocationInformation(expect, got) =>
                 write!(f, "incorrect alloc info: expected size {} and align {}, \
                            got size {} and align {}",
-                    size.bytes(), align.bytes(), size2.bytes(), align2.bytes()),
+                    expect.size.bytes(), expect.align.bytes(), got.size.bytes(), got.align.bytes()),
             InvalidMemoryAccess =>
                 write!(f, "tried to access memory through an invalid pointer"),
             DanglingPointerDeref =>
diff --git a/src/librustc/mir/interpret/pointer.rs b/src/librustc/mir/interpret/pointer.rs
index 1bb4d9ea4d6d9..4f13fd4a747f1 100644
--- a/src/librustc/mir/interpret/pointer.rs
+++ b/src/librustc/mir/interpret/pointer.rs
@@ -1,7 +1,7 @@
 use std::fmt::{self, Display};
 
 use crate::mir;
-use crate::ty::layout::{self, HasDataLayout, Size};
+use crate::ty::layout::{self, HasDataLayout, LayoutPositionPref, Size};
 use rustc_macros::HashStable;
 
 use super::{AllocId, InterpResult};
@@ -35,9 +35,14 @@ impl Display for CheckInAllocMsg {
 pub trait PointerArithmetic: layout::HasDataLayout {
     // These are not supposed to be overridden.
 
+    #[inline(always)]
+    fn pointer_pos(&self) -> LayoutPositionPref {
+        self.data_layout().pointer_pos
+    }
+
     #[inline(always)]
     fn pointer_size(&self) -> Size {
-        self.data_layout().pointer_size
+        self.pointer_pos().size
     }
 
     /// Helper function: truncate given value-"overflowed flag" pair to pointer size and
diff --git a/src/librustc/mir/interpret/value.rs b/src/librustc/mir/interpret/value.rs
index 7b29fb26e74f5..afe50866bad80 100644
--- a/src/librustc/mir/interpret/value.rs
+++ b/src/librustc/mir/interpret/value.rs
@@ -191,7 +191,7 @@ impl<'tcx, Tag> Scalar<Tag> {
     pub fn ptr_null(cx: &impl HasDataLayout) -> Self {
         Scalar::Raw {
             data: 0,
-            size: cx.data_layout().pointer_size.bytes() as u8,
+            size: cx.data_layout().pointer_pos.size.bytes() as u8,
         }
     }
 
@@ -205,7 +205,7 @@ impl<'tcx, Tag> Scalar<Tag> {
         let dl = cx.data_layout();
         match self {
             Scalar::Raw { data, size } => {
-                assert_eq!(size as u64, dl.pointer_size.bytes());
+                assert_eq!(size as u64, dl.pointer_pos.size.bytes());
                 Ok(Scalar::Raw {
                     data: dl.offset(data as u64, i.bytes())? as u128,
                     size,
@@ -220,7 +220,7 @@ impl<'tcx, Tag> Scalar<Tag> {
         let dl = cx.data_layout();
         match self {
             Scalar::Raw { data, size } => {
-                assert_eq!(size as u64, dl.pointer_size.bytes());
+                assert_eq!(size as u64, dl.pointer_pos.size.bytes());
                 Scalar::Raw {
                     data: dl.overflowing_offset(data as u64, i.bytes()).0 as u128,
                     size,
@@ -250,7 +250,7 @@ impl<'tcx, Tag> Scalar<Tag> {
         let dl = cx.data_layout();
         match self {
             Scalar::Raw { data, size } => {
-                assert_eq!(size as u64, dl.pointer_size.bytes());
+                assert_eq!(size as u64, dl.pointer_pos.size.bytes());
                 Scalar::Raw {
                     data: dl.overflowing_signed_offset(data as u64, i128::from(i)).0 as u128,
                     size,
@@ -342,7 +342,7 @@ impl<'tcx, Tag> Scalar<Tag> {
                 Ok(data)
             }
             Scalar::Ptr(ptr) => {
-                assert_eq!(target_size, cx.data_layout().pointer_size);
+                assert_eq!(target_size, cx.data_layout().pointer_pos.size);
                 Err(ptr)
             }
         }
@@ -440,7 +440,7 @@ impl<'tcx, Tag> Scalar<Tag> {
     }
 
     pub fn to_machine_usize(self, cx: &impl HasDataLayout) -> InterpResult<'static, u64> {
-        let b = self.to_bits(cx.data_layout().pointer_size)?;
+        let b = self.to_bits(cx.data_layout().pointer_pos.size)?;
         Ok(b as u64)
     }
 
@@ -466,7 +466,7 @@ impl<'tcx, Tag> Scalar<Tag> {
     }
 
     pub fn to_machine_isize(self, cx: &impl HasDataLayout) -> InterpResult<'static, i64> {
-        let sz = cx.data_layout().pointer_size;
+        let sz = cx.data_layout().pointer_pos.size;
         let b = self.to_bits(sz)?;
         let b = sign_extend(b, sz) as i128;
         Ok(b as i64)
diff --git a/src/librustc/mir/mod.rs b/src/librustc/mir/mod.rs
index a3ddfec765f3f..8d84b4809f674 100644
--- a/src/librustc/mir/mod.rs
+++ b/src/librustc/mir/mod.rs
@@ -1502,7 +1502,7 @@ impl<'tcx> TerminatorKind<'tcx> {
             SwitchInt { ref values, switch_ty, .. } => ty::tls::with(|tcx| {
                 let param_env = ty::ParamEnv::empty();
                 let switch_ty = tcx.lift(&switch_ty).unwrap();
-                let size = tcx.layout_of(param_env.and(switch_ty)).unwrap().size;
+                let size = tcx.layout_of(param_env.and(switch_ty)).unwrap().pref_pos.size;
                 values
                     .iter()
                     .map(|&u| {
diff --git a/src/librustc/session/code_stats.rs b/src/librustc/session/code_stats.rs
index 5baf0c5948f28..d0381151978f0 100644
--- a/src/librustc/session/code_stats.rs
+++ b/src/librustc/session/code_stats.rs
@@ -1,4 +1,4 @@
-use rustc_target::abi::{Align, Size};
+use rustc_target::abi::{MemoryPosition, Size};
 use rustc_data_structures::fx::{FxHashSet};
 use std::cmp::{self, Ordering};
 use rustc_data_structures::sync::Lock;
@@ -54,8 +54,7 @@ impl CodeStats {
     pub fn record_type_size<S: ToString>(&self,
                                          kind: DataTypeKind,
                                          type_desc: S,
-                                         align: Align,
-                                         overall_size: Size,
+                                         mem_pos: MemoryPosition,
                                          packed: bool,
                                          opt_discr_size: Option<Size>,
                                          mut variants: Vec<VariantInfo>) {
@@ -68,8 +67,8 @@ impl CodeStats {
         let info = TypeSizeInfo {
             kind,
             type_description: type_desc.to_string(),
-            align: align.bytes(),
-            overall_size: overall_size.bytes(),
+            align: mem_pos.align.bytes(),
+            overall_size: mem_pos.size.bytes(),
             packed: packed,
             opt_discr_size: opt_discr_size.map(|s| s.bytes()),
             variants,
diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs
index 972452601ddd5..fd0935ad81470 100644
--- a/src/librustc/ty/layout.rs
+++ b/src/librustc/ty/layout.rs
@@ -232,7 +232,7 @@ enum StructKind {
     /// A univariant, the last field of which may be coerced to unsized.
     MaybeUnsized,
     /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
-    Prefixed(Size, Align),
+    Prefixed(MemoryPosition),
 }
 
 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
@@ -251,10 +251,15 @@ fn invert_mapping(map: &[u32]) -> Vec<u32> {
 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
     fn scalar_pair(&self, a: Scalar, b: Scalar) -> LayoutDetails {
         let dl = self.data_layout();
-        let b_align = b.value.align(dl);
-        let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
-        let b_offset = a.value.size(dl).align_to(b_align.abi);
-        let size = (b_offset + b.value.size(dl)).align_to(align.abi);
+
+        let a_pref_pos = a.value.pref_pos(dl);
+        let b_pref_pos = b.value.pref_pos(dl);
+
+        let b_align = b_pref_pos.align;
+
+        let pref_pos = a_pref_pos.align_to(b_align).align_to(dl.aggregate_align);
+        let (pref_pos, b_offset) = pref_pos.extend(b_pref_pos);
+        let pref_pos = pref_pos.strided();
 
         // HACK(nox): We iter on `b` and then `a` because `max_by_key`
         // returns the last maximum.
@@ -271,8 +276,7 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
             },
             abi: Abi::ScalarPair(a, b),
             largest_niche,
-            align,
-            size
+            pref_pos
         }
     }
 
@@ -287,7 +291,7 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
             bug!("struct cannot be packed and aligned");
         }
 
-        let mut align = if pack.is_some() {
+        let base_align = if pack.is_some() {
             dl.i8_align
         } else {
             dl.aggregate_align
@@ -298,8 +302,8 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
         let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
 
         let mut optimize = !repr.inhibit_struct_field_reordering_opt();
-        if let StructKind::Prefixed(_, align) = kind {
-            optimize &= align.bytes() == 1;
+        if let StructKind::Prefixed(mem_pos) = kind {
+            optimize &= mem_pos.align.bytes() == 1;
         }
 
         if optimize {
@@ -310,7 +314,11 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
             };
             let optimizing = &mut inverse_memory_index[..end];
             let field_align = |f: &TyLayout<'_>| {
-                if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
+                if let Some(pack) = pack {
+                    f.pref_pos.align.abi.min(pack)
+                } else {
+                    f.pref_pos.align.abi
+                }
             };
             match kind {
                 StructKind::AlwaysSized |
@@ -336,18 +344,17 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
         // produce `memory_index` (see `invert_mapping`).
 
 
-        let mut offset = Size::ZERO;
+        let mut offset_pos = LayoutPositionPref::new(Size::ZERO, base_align);
         let mut largest_niche = None;
         let mut largest_niche_available = 0;
 
-        if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
+        if let StructKind::Prefixed(prefix_mem_pos) = kind {
             let prefix_align = if let Some(pack) = pack {
-                prefix_align.min(pack)
+                prefix_mem_pos.align.min(pack)
             } else {
-                prefix_align
+                prefix_mem_pos.align
             };
-            align = align.max(AbiAndPrefAlign::new(prefix_align));
-            offset = prefix_size.align_to(prefix_align);
+            offset_pos = prefix_mem_pos.align_and_stride_to(prefix_align).pref_pos();
         }
 
         for &i in &inverse_memory_index {
@@ -361,37 +368,35 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
                 sized = false;
             }
 
-            // Invariant: offset < dl.obj_size_bound() <= 1<<61
-            let field_align = if let Some(pack) = pack {
-                field.align.min(AbiAndPrefAlign::new(pack))
+            // Invariant: offset_pos.size < dl.obj_size_bound() <= 1<<61
+            let field_pos = if let Some(pack) = pack {
+                field.pref_pos.pack_to(AbiAndPrefAlign::new(pack))
             } else {
-                field.align
+                field.pref_pos
             };
-            offset = offset.align_to(field_align.abi);
-            align = align.max(field_align);
+            offset_pos = offset_pos.align_and_stride_to(field_pos.align);
 
-            debug!("univariant offset: {:?} field: {:#?}", offset, field);
-            offsets[i as usize] = offset;
+            debug!("univariant offset: {:?} field: {:#?}", offset_pos.size, field);
+            offsets[i as usize] = offset_pos.size;
 
             if let Some(mut niche) = field.largest_niche.clone() {
                 let available = niche.available(dl);
                 if available > largest_niche_available {
                     largest_niche_available = available;
-                    niche.offset += offset;
+                    niche.offset += offset_pos.size;
                     largest_niche = Some(niche);
                 }
             }
 
-            offset = offset.checked_add(field.size, dl)
+            offset_pos = offset_pos.checked_add(field_pos, dl)
                 .ok_or(LayoutError::SizeOverflow(ty))?;
         }
 
         if let Some(repr_align) = repr.align {
-            align = align.max(AbiAndPrefAlign::new(repr_align));
+            offset_pos = offset_pos.align_to(AbiAndPrefAlign::new(repr_align));
         }
 
-        debug!("univariant min_size: {:?}", offset);
-        let min_size = offset;
+        debug!("univariant min_size: {:?}", offset_pos.size);
 
         // As stated above, inverse_memory_index holds field indices by increasing offset.
         // This makes it an already-sorted view of the offsets vec.
@@ -407,11 +412,12 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
             memory_index = inverse_memory_index;
         }
 
-        let size = min_size.align_to(align.abi);
+        // preserve stride == size
+        let pref_pos = offset_pos.strided();
         let mut abi = Abi::Aggregate { sized };
 
         // Unpack newtype ABIs and find scalar pairs.
-        if sized && size.bytes() > 0 {
+        if sized && pref_pos.size.bytes() > 0 {
             // All other fields must be ZSTs, and we need them to all start at 0.
             let mut zst_offsets =
                 offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst());
@@ -424,8 +430,7 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
                     (Some((i, field)), None, None) => {
                         // Field fills the struct and it has a scalar or scalar pair ABI.
                         if offsets[i].bytes() == 0 &&
-                           align.abi == field.align.abi &&
-                           size == field.size {
+                           pref_pos.mem_pos() == field.pref_pos.mem_pos() {
                             match field.abi {
                                 // For plain scalars, or vectors of them, we can't unpack
                                 // newtypes for `#[repr(C)]`, as that affects C ABIs.
@@ -467,8 +472,7 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
                         };
                         if offsets[i] == pair_offsets[0] &&
                            offsets[j] == pair_offsets[1] &&
-                           align == pair.align &&
-                           size == pair.size {
+                           pref_pos == pair.pref_pos {
                             // We can use `ScalarPair` only when it matches our
                             // already computed layout (including `#[repr(C)]`).
                             abi = pair.abi;
@@ -492,8 +496,7 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
             },
             abi,
             largest_niche,
-            align,
-            size
+            pref_pos
         })
     }
 
@@ -555,8 +558,7 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
                     fields: FieldPlacement::Union(0),
                     abi: Abi::Uninhabited,
                     largest_niche: None,
-                    align: dl.i8_align,
-                    size: Size::ZERO
+                    pref_pos: LayoutPositionPref::new(Size::ZERO, dl.i8_align)
                 })
             }
 
@@ -604,7 +606,7 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
 
                 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
                 let element = self.layout_of(element)?;
-                let size = element.size.checked_mul(count, dl)
+                let pref_pos = element.pref_pos.checked_mul(count, dl)
                     .ok_or(LayoutError::SizeOverflow(ty))?;
 
                 let abi = if count != 0 && ty.conservative_is_privately_uninhabited(tcx) {
@@ -622,13 +624,12 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
                 tcx.intern_layout(LayoutDetails {
                     variants: Variants::Single { index: VariantIdx::new(0) },
                     fields: FieldPlacement::Array {
-                        stride: element.size,
+                        stride: element.pref_pos.stride(),
                         count
                     },
                     abi,
                     largest_niche,
-                    align: element.align,
-                    size
+                    pref_pos
                 })
             }
             ty::Slice(element) => {
@@ -636,13 +637,12 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
                 tcx.intern_layout(LayoutDetails {
                     variants: Variants::Single { index: VariantIdx::new(0) },
                     fields: FieldPlacement::Array {
-                        stride: element.size,
+                        stride: element.pref_pos.stride(),
                         count: 0
                     },
                     abi: Abi::Aggregate { sized: false },
                     largest_niche: None,
-                    align: element.align,
-                    size: Size::ZERO
+                    pref_pos: LayoutPositionPref::new(Size::ZERO, element.pref_pos.align)
                 })
             }
             ty::Str => {
@@ -654,8 +654,7 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
                     },
                     abi: Abi::Aggregate { sized: false },
                     largest_niche: None,
-                    align: dl.i8_align,
-                    size: Size::ZERO
+                    pref_pos: LayoutPositionPref::new(Size::ZERO, dl.i8_align)
                 })
             }
 
@@ -707,15 +706,15 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
                                                 ty, element.ty));
                     }
                 };
-                let size = element.size.checked_mul(count, dl)
+                let vec_pos = element.pref_pos.checked_mul(count, dl)
                     .ok_or(LayoutError::SizeOverflow(ty))?;
-                let align = dl.vector_align(size);
-                let size = size.align_to(align.abi);
+                let align = dl.vector_align(vec_pos.size);
+                let pref_pos = vec_pos.stride_to(align.abi);
 
                 tcx.intern_layout(LayoutDetails {
                     variants: Variants::Single { index: VariantIdx::new(0) },
                     fields: FieldPlacement::Array {
-                        stride: element.size,
+                        stride: element.pref_pos.stride(),
                         count
                     },
                     abi: Abi::Vector {
@@ -723,8 +722,7 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
                         count
                     },
                     largest_niche: element.largest_niche.clone(),
-                    size,
-                    align,
+                    pref_pos
                 })
             }
 
@@ -742,23 +740,23 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
                         bug!("union cannot be packed and aligned");
                     }
 
-                    let mut align = if def.repr.pack.is_some() {
+                    let base_align = if def.repr.pack.is_some() {
                         dl.i8_align
                     } else {
                         dl.aggregate_align
                     };
 
+                    let mut pref_pos = LayoutPositionPref::new(Size::ZERO, base_align);
+
                     if let Some(repr_align) = def.repr.align {
-                        align = align.max(AbiAndPrefAlign::new(repr_align));
+                        pref_pos = pref_pos.align_to(AbiAndPrefAlign::new(repr_align));
                     }
 
                     let optimize = !def.repr.inhibit_union_abi_opt();
-                    let mut size = Size::ZERO;
                     let mut abi = Abi::Aggregate { sized: true };
                     let index = VariantIdx::new(0);
                     for field in &variants[index] {
                         assert!(!field.is_unsized());
-                        align = align.max(field.align);
 
                         // If all non-ZST fields have the same ABI, forward this ABI
                         if optimize && !field.is_zst() {
@@ -781,7 +779,7 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
                                 Abi::Aggregate { .. }  => Abi::Aggregate { sized: true },
                             };
 
-                            if size == Size::ZERO {
+                            if pref_pos.size == Size::ZERO {
                                 // first non ZST: initialize 'abi'
                                 abi = field_abi;
                             } else if abi != field_abi  {
@@ -789,21 +787,22 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
                                 abi = Abi::Aggregate { sized: true };
                             }
                         }
-
-                        size = cmp::max(size, field.size);
+                        pref_pos = pref_pos.max(field.pref_pos);
                     }
 
                     if let Some(pack) = def.repr.pack {
-                        align = align.min(AbiAndPrefAlign::new(pack));
+                        pref_pos = pref_pos.pack_to(AbiAndPrefAlign::new(pack));
                     }
 
+                    // preserve stride == size
+                    let pref_pos = pref_pos.strided();
+
                     return Ok(tcx.intern_layout(LayoutDetails {
                         variants: Variants::Single { index },
                         fields: FieldPlacement::Union(variants[index].len()),
                         abi,
                         largest_niche: None,
-                        align,
-                        size: size.align_to(align.abi)
+                        pref_pos
                     }));
                 }
 
@@ -967,13 +966,16 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
                                     &def.repr, StructKind::AlwaysSized)?;
                                 st.variants = Variants::Single { index: j };
 
-                                align = align.max(st.align);
+                                align = align.max(st.pref_pos.align);
 
                                 Ok(st)
                             }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
 
                             let offset = st[i].fields.offset(field_index) + niche.offset;
-                            let size = st[i].size;
+                            let size = st[i].pref_pos.size;
+
+                            let pref_pos =
+                                LayoutPositionPref::new(size, align);
 
                             let mut abi = match st[i].abi {
                                 Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
@@ -1023,8 +1025,7 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
                                 },
                                 abi,
                                 largest_niche,
-                                size,
-                                align,
+                                pref_pos,
                             }));
                         }
                     }
@@ -1053,8 +1054,7 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
                 assert!(min <= max, "discriminant range is {}...{}", min, max);
                 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
 
-                let mut align = dl.aggregate_align;
-                let mut size = Size::ZERO;
+                let mut pref_pos = LayoutPositionPref::new(Size::ZERO, dl.aggregate_align);
 
                 // We're interested in the smallest alignment, so start large.
                 let mut start_align = Align::from_bytes(256).unwrap();
@@ -1069,33 +1069,34 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
                 if def.repr.c() {
                     for fields in &variants {
                         for field in fields {
-                            prefix_align = prefix_align.max(field.align.abi);
+                            prefix_align = prefix_align.max(field.pref_pos.align.abi);
                         }
                     }
                 }
 
+                let prefix_mem_pos = MemoryPosition::new(min_ity.size(), prefix_align);
+
                 // Create the set of structs that represent each variant.
                 let mut layout_variants = variants.iter_enumerated().map(|(i, field_layouts)| {
                     let mut st = self.univariant_uninterned(ty, &field_layouts,
-                        &def.repr, StructKind::Prefixed(min_ity.size(), prefix_align))?;
+                        &def.repr, StructKind::Prefixed(prefix_mem_pos))?;
                     st.variants = Variants::Single { index: i };
                     // Find the first field we can't move later
                     // to make room for a larger discriminant.
                     for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) {
-                        if !field.is_zst() || field.align.abi.bytes() != 1 {
-                            start_align = start_align.min(field.align.abi);
+                        if !field.is_zst() || field.pref_pos.align.abi.bytes() != 1 {
+                            start_align = start_align.min(field.pref_pos.align.abi);
                             break;
                         }
                     }
-                    size = cmp::max(size, st.size);
-                    align = align.max(st.align);
+                    pref_pos = pref_pos.max(st.pref_pos);
                     Ok(st)
                 }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
 
                 // Align the maximum variant size to the largest alignment.
-                size = size.align_to(align.abi);
+                pref_pos = pref_pos.strided();
 
-                if size.bytes() >= dl.obj_size_bound() {
+                if pref_pos.size.bytes() >= dl.obj_size_bound() {
                     return Err(LayoutError::SizeOverflow(ty));
                 }
 
@@ -1150,8 +1151,8 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
                                     }
                                 }
                                 // We might be making the struct larger.
-                                if variant.size <= old_ity_size {
-                                    variant.size = new_ity_size;
+                                if variant.pref_pos.size <= old_ity_size {
+                                    variant.pref_pos.size = new_ity_size;
                                 }
                             }
                             _ => bug!()
@@ -1165,7 +1166,7 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
                     valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
                 };
                 let mut abi = Abi::Aggregate { sized: true };
-                if tag.value.size(dl) == size {
+                if tag.value.size(dl) == pref_pos.size {
                     abi = Abi::Scalar(tag.clone());
                 } else {
                     // Try to use a ScalarPair for all tagged enums.
@@ -1221,8 +1222,7 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
                         };
                         if pair_offsets[0] == Size::ZERO &&
                             pair_offsets[1] == *offset &&
-                            align == pair.align &&
-                            size == pair.size {
+                            pref_pos == pair.pref_pos {
                             // We can use `ScalarPair` only when it matches our
                             // already computed layout (including `#[repr(C)]`).
                             abi = pair.abi;
@@ -1249,8 +1249,7 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
                     },
                     largest_niche,
                     abi,
-                    align,
-                    size
+                    pref_pos
                 })
             }
 
@@ -1448,8 +1447,6 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
             StructKind::AlwaysSized,
         )?;
 
-        let (prefix_size, prefix_align) = (prefix.size, prefix.align);
-
         // Split the prefix layout into the "outer" fields (upvars and
         // discriminant) and the "promoted" fields. Promoted fields will
         // get included in each variant that requested them in
@@ -1487,8 +1484,8 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
             _ => bug!(),
         };
 
-        let mut size = prefix.size;
-        let mut align = prefix.align;
+        let mut pref_pos = prefix.pref_pos;
+        let prefix_mem_pos = pref_pos.mem_pos();
         let variants = info.variant_fields.iter_enumerated().map(|(index, variant_fields)| {
             // Only include overlap-eligible fields when we compute our variant layout.
             let variant_only_tys = variant_fields
@@ -1509,7 +1506,7 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
                     .map(|ty| self.layout_of(ty))
                     .collect::<Result<Vec<_>, _>>()?,
                 &ReprOptions::default(),
-                StructKind::Prefixed(prefix_size, prefix_align.abi))?;
+                StructKind::Prefixed(prefix_mem_pos))?;
             variant.variants = Variants::Single { index };
 
             let (offsets, memory_index) = match variant.fields {
@@ -1558,12 +1555,12 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
                 memory_index: combined_memory_index,
             };
 
-            size = size.max(variant.size);
-            align = align.max(variant.align);
+            pref_pos = pref_pos.max(variant.pref_pos);
             Ok(variant)
         }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
 
-        size = size.align_to(align.abi);
+        // Align the size to the alignment.
+        pref_pos = pref_pos.strided();
 
         let abi = if prefix.abi.is_uninhabited() ||
                      variants.iter().all(|v| v.abi.is_uninhabited()) {
@@ -1582,8 +1579,7 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
             fields: outer_fields,
             abi,
             largest_niche: prefix.largest_niche,
-            size,
-            align,
+            pref_pos,
         });
         debug!("generator layout ({:?}): {:#?}", ty, layout);
         Ok(layout)
@@ -1616,8 +1612,7 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
             let type_desc = format!("{:?}", layout.ty);
             self.tcx.sess.code_stats.record_type_size(kind,
                                                       type_desc,
-                                                      layout.align.abi,
-                                                      layout.size,
+                                                      layout.pref_pos.mem_pos(),
                                                       packed,
                                                       opt_discr_size,
                                                       variants);
@@ -1655,15 +1650,15 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
                     }
                     Ok(field_layout) => {
                         let offset = layout.fields.offset(i);
-                        let field_end = offset + field_layout.size;
+                        let field_end = offset + field_layout.pref_pos.size;
                         if min_size < field_end {
                             min_size = field_end;
                         }
                         session::FieldInfo {
                             name: name.to_string(),
                             offset: offset.bytes(),
-                            size: field_layout.size.bytes(),
-                            align: field_layout.align.abi.bytes(),
+                            size: field_layout.pref_pos.size.bytes(),
+                            align: field_layout.pref_pos.align.abi.bytes(),
                         }
                     }
                 }
@@ -1676,9 +1671,9 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
                 } else {
                     session::SizeKind::Exact
                 },
-                align: layout.align.abi.bytes(),
+                align: layout.pref_pos.align.abi.bytes(),
                 size: if min_size.bytes() == 0 {
-                    layout.size.bytes()
+                    layout.pref_pos.size.bytes()
                 } else {
                     min_size.bytes()
                 },
@@ -1759,7 +1754,7 @@ impl<'tcx> SizeSkeleton<'tcx> {
         // First try computing a static layout.
         let err = match tcx.layout_of(param_env.and(ty)) {
             Ok(layout) => {
-                return Ok(SizeSkeleton::Known(layout.size));
+                return Ok(SizeSkeleton::Known(layout.pref_pos.size));
             }
             Err(err) => err
         };
@@ -2055,8 +2050,7 @@ where
                     fields: FieldPlacement::Union(fields),
                     abi: Abi::Uninhabited,
                     largest_niche: None,
-                    align: tcx.data_layout.i8_align,
-                    size: Size::ZERO
+                    pref_pos: LayoutPositionPref::new(Size::ZERO, tcx.data_layout.i8_align)
                 })
             }
 
@@ -2211,8 +2205,7 @@ where
             ty::RawPtr(mt) if offset.bytes() == 0 => {
                 cx.layout_of(mt.ty).to_result().ok()
                     .map(|layout| PointeeInfo {
-                        size: layout.size,
-                        align: layout.align.abi,
+                        mem_pos: layout.pref_pos.mem_pos(),
                         safe: None,
                     })
             }
@@ -2250,8 +2243,7 @@ where
 
                 cx.layout_of(ty).to_result().ok()
                     .map(|layout| PointeeInfo {
-                        size: layout.size,
-                        align: layout.align.abi,
+                        mem_pos: layout.pref_pos.mem_pos(),
                         safe: Some(kind),
                     })
             }
@@ -2298,7 +2290,7 @@ where
                             let field = variant.field(cx, i);
                             result = field.to_result().ok()
                                 .and_then(|field| {
-                                    if ptr_end <= field_start + field.size {
+                                    if ptr_end <= field_start + field.pref_pos.size {
                                         // We found the right field, look inside it.
                                         field.pointee_info_at(cx, offset - field_start)
                                     } else {
@@ -2443,8 +2435,7 @@ impl_stable_hash_for!(struct crate::ty::layout::LayoutDetails {
     fields,
     abi,
     largest_niche,
-    size,
-    align
+    pref_pos
 });
 
 impl_stable_hash_for!(enum crate::ty::layout::Integer {
@@ -2467,6 +2458,16 @@ impl_stable_hash_for!(struct crate::ty::layout::AbiAndPrefAlign {
     pref
 });
 
+impl_stable_hash_for!(struct crate::ty::layout::LayoutPositionPref {
+    size,
+    align
+});
+
+impl_stable_hash_for!(struct crate::ty::layout::MemoryPosition {
+    size,
+    align
+});
+
 impl<'tcx> HashStable<StableHashingContext<'tcx>> for Align {
     fn hash_stable(&self, hcx: &mut StableHashingContext<'tcx>, hasher: &mut StableHasher) {
         self.bytes().hash_stable(hcx, hasher);
@@ -2678,8 +2679,8 @@ where
 
             if let Some(pointee) = layout.pointee_info_at(cx, offset) {
                 if let Some(kind) = pointee.safe {
-                    attrs.pointee_size = pointee.size;
-                    attrs.pointee_align = Some(pointee.align);
+                    attrs.pointee_size = pointee.mem_pos.size;
+                    attrs.pointee_align = Some(pointee.mem_pos.align);
 
                     // `Box` pointer parameters never alias because ownership is transferred
                     // `&mut` pointer parameters never alias other parameters,
@@ -2809,7 +2810,7 @@ where
                     _ => return,
                 }
 
-                let size = arg.layout.size;
+                let size = arg.layout.pref_pos.size;
                 if arg.layout.is_unsized() || size > Pointer.size(cx) {
                     arg.make_indirect();
                 } else {
diff --git a/src/librustc/ty/print/pretty.rs b/src/librustc/ty/print/pretty.rs
index fdd3a1faaa975..f16f134065ebc 100644
--- a/src/librustc/ty/print/pretty.rs
+++ b/src/librustc/ty/print/pretty.rs
@@ -910,6 +910,7 @@ pub trait PrettyPrinter<'tcx>:
                 let ty = self.tcx().lift(&ct.ty).unwrap();
                 let size = self.tcx().layout_of(ty::ParamEnv::empty().and(ty))
                     .unwrap()
+                    .pref_pos
                     .size;
                 let i_str = i.name_str();
                 match data {
diff --git a/src/librustc/ty/sty.rs b/src/librustc/ty/sty.rs
index 51cf7550c30f7..731b0373d80d5 100644
--- a/src/librustc/ty/sty.rs
+++ b/src/librustc/ty/sty.rs
@@ -2276,7 +2276,7 @@ impl<'tcx> Const<'tcx> {
     pub fn from_bits(tcx: TyCtxt<'tcx>, bits: u128, ty: ParamEnvAnd<'tcx, Ty<'tcx>>) -> &'tcx Self {
         let size = tcx.layout_of(ty).unwrap_or_else(|e| {
             panic!("could not compute layout for {:?}: {:?}", ty, e)
-        }).size;
+        }).pref_pos.size;
         Self::from_scalar(tcx, Scalar::from_uint(bits, size), ty.value)
     }
 
@@ -2303,7 +2303,7 @@ impl<'tcx> Const<'tcx> {
         ty: Ty<'tcx>,
     ) -> Option<u128> {
         assert_eq!(self.ty, ty);
-        let size = tcx.layout_of(param_env.with_reveal_all().and(ty)).ok()?.size;
+        let size = tcx.layout_of(param_env.with_reveal_all().and(ty)).ok()?.pref_pos.size;
         // if `ty` does not depend on generic parameters, use an empty param_env
         self.eval(tcx, param_env).val.try_to_bits(size)
     }
diff --git a/src/librustc_codegen_llvm/abi.rs b/src/librustc_codegen_llvm/abi.rs
index 287d5705de8c7..1155320064dcb 100644
--- a/src/librustc_codegen_llvm/abi.rs
+++ b/src/librustc_codegen_llvm/abi.rs
@@ -200,7 +200,7 @@ impl ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
             return;
         }
         if self.is_sized_indirect() {
-            OperandValue::Ref(val, None, self.layout.align.abi).store(bx, dst)
+            OperandValue::Ref(val, None, self.layout.pref_pos.align.abi).store(bx, dst)
         } else if self.is_unsized_indirect() {
             bug!("unsized ArgAbi must be handled through store_fn_arg");
         } else if let PassMode::Cast(cast) = self.mode {
@@ -210,7 +210,7 @@ impl ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
             if can_store_through_cast_ptr {
                 let cast_ptr_llty = bx.type_ptr_to(cast.llvm_type(bx));
                 let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty);
-                bx.store(val, cast_dst, self.layout.align.abi);
+                bx.store(val, cast_dst, self.layout.pref_pos.align.abi);
             } else {
                 // The actual return type is a struct, but the ABI
                 // adaptation code has cast it into some scalar type.  The
@@ -238,10 +238,10 @@ impl ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
                 // ...and then memcpy it to the intended destination.
                 bx.memcpy(
                     dst.llval,
-                    self.layout.align.abi,
+                    self.layout.pref_pos.align.abi,
                     llscratch,
                     scratch_align,
-                    bx.const_usize(self.layout.size.bytes()),
+                    bx.const_usize(self.layout.pref_pos.size.bytes()),
                     MemFlags::empty()
                 );
 
@@ -269,7 +269,8 @@ impl ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
                 OperandValue::Pair(next(), next()).store(bx, dst);
             }
             PassMode::Indirect(_, Some(_)) => {
-                OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst);
+                let align = self.layout.pref_pos.align.abi;
+                OperandValue::Ref(next(), Some(next()), align).store(bx, dst);
             }
             PassMode::Direct(_) | PassMode::Indirect(_, None) | PassMode::Cast(_) => {
                 let next_arg = next();
diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs
index 312c41b88b092..418d4f97569a1 100644
--- a/src/librustc_codegen_llvm/builder.rs
+++ b/src/librustc_codegen_llvm/builder.rs
@@ -546,7 +546,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         let keep_going = header_bx.icmp(IntPredicate::IntNE, current, end);
         header_bx.cond_br(keep_going, body_bx.llbb(), next_bx.llbb());
 
-        let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size);
+        let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).pref_pos.size);
         cg_elem.val.store(&mut body_bx,
             PlaceRef::new_sized_aligned(current, cg_elem.layout, align));
 
diff --git a/src/librustc_codegen_llvm/common.rs b/src/librustc_codegen_llvm/common.rs
index f38f9dfecd387..3b4f77a6c3eee 100644
--- a/src/librustc_codegen_llvm/common.rs
+++ b/src/librustc_codegen_llvm/common.rs
@@ -202,7 +202,7 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
     }
 
     fn const_usize(&self, i: u64) -> &'ll Value {
-        let bit_size = self.data_layout().pointer_size.bits();
+        let bit_size = self.data_layout().pointer_pos.size.bits();
         if bit_size < 64 {
             // make sure it doesn't overflow
             assert!(i < (1<<bit_size));
@@ -280,9 +280,9 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
                     Some(GlobalAlloc::Memory(alloc)) => {
                         let init = const_alloc_to_llvm(self, alloc);
                         if alloc.mutability == Mutability::Mutable {
-                            self.static_addr_of_mut(init, alloc.align, None)
+                            self.static_addr_of_mut(init, alloc.mem_pos.align, None)
                         } else {
-                            self.static_addr_of(init, alloc.align, None)
+                            self.static_addr_of(init, alloc.mem_pos.align, None)
                         }
                     }
                     Some(GlobalAlloc::Function(fn_instance)) => {
@@ -314,14 +314,14 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
         alloc: &Allocation,
         offset: Size,
     ) -> PlaceRef<'tcx, &'ll Value> {
-        assert_eq!(alloc.align, layout.align.abi);
+        assert_eq!(alloc.mem_pos.align, layout.pref_pos.align.abi);
         let llty = self.type_ptr_to(layout.llvm_type(self));
-        let llval = if layout.size == Size::ZERO {
-            let llval = self.const_usize(alloc.align.bytes());
+        let llval = if layout.pref_pos.size == Size::ZERO {
+            let llval = self.const_usize(alloc.mem_pos.align.bytes());
             unsafe { llvm::LLVMConstIntToPtr(llval, llty) }
         } else {
             let init = const_alloc_to_llvm(self, alloc);
-            let base_addr = self.static_addr_of(init, alloc.align, None);
+            let base_addr = self.static_addr_of(init, alloc.mem_pos.align, None);
 
             let llval = unsafe { llvm::LLVMConstInBoundsGEP(
                 self.const_bitcast(base_addr, self.type_i8p()),
diff --git a/src/librustc_codegen_llvm/consts.rs b/src/librustc_codegen_llvm/consts.rs
index fd7054a5a0ada..d08a34377ac3a 100644
--- a/src/librustc_codegen_llvm/consts.rs
+++ b/src/librustc_codegen_llvm/consts.rs
@@ -26,7 +26,7 @@ use std::ffi::{CStr, CString};
 pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll Value {
     let mut llvals = Vec::with_capacity(alloc.relocations().len() + 1);
     let dl = cx.data_layout();
-    let pointer_size = dl.pointer_size.bytes() as usize;
+    let pointer_size = dl.pointer_pos.size.bytes() as usize;
 
     let mut next_offset = 0;
     for &(offset, ((), alloc_id)) in alloc.relocations().iter() {
diff --git a/src/librustc_codegen_llvm/context.rs b/src/librustc_codegen_llvm/context.rs
index 4a40349cb73e8..23c265e5232da 100644
--- a/src/librustc_codegen_llvm/context.rs
+++ b/src/librustc_codegen_llvm/context.rs
@@ -288,7 +288,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
             None
         };
 
-        let isize_ty = Type::ix_llcx(llcx, tcx.data_layout.pointer_size.bits());
+        let isize_ty = Type::ix_llcx(llcx, tcx.data_layout.pointer_pos.size.bits());
 
         CodegenCx {
             tcx,
diff --git a/src/librustc_codegen_llvm/debuginfo/metadata.rs b/src/librustc_codegen_llvm/debuginfo/metadata.rs
index f1bf451113152..6186aa04a232c 100644
--- a/src/librustc_codegen_llvm/debuginfo/metadata.rs
+++ b/src/librustc_codegen_llvm/debuginfo/metadata.rs
@@ -28,7 +28,7 @@ use rustc::mir::interpret::truncate;
 use rustc_data_structures::fingerprint::Fingerprint;
 use rustc::ty::Instance;
 use rustc::ty::{self, AdtKind, ParamEnv, Ty, TyCtxt};
-use rustc::ty::layout::{self, Align, Integer, IntegerExt, LayoutOf,
+use rustc::ty::layout::{self, MemoryPosition, Integer, IntegerExt, LayoutOf,
                         PrimitiveExt, Size, TyLayout, VariantIdx};
 use rustc::ty::subst::{GenericArgKind, SubstsRef};
 use rustc::session::config::{self, DebugInfo};
@@ -338,7 +338,7 @@ fn fixed_vec_metadata(
 
     return_if_metadata_created_in_meantime!(cx, unique_type_id);
 
-    let (size, align) = cx.size_and_align_of(array_or_slice_type);
+    let mem_pos = cx.mem_pos_of(array_or_slice_type);
 
     let upper_bound = match array_or_slice_type.kind {
         ty::Array(_, len) => len.eval_usize(cx.tcx, ty::ParamEnv::reveal_all()) as c_longlong,
@@ -353,8 +353,8 @@ fn fixed_vec_metadata(
     let metadata = unsafe {
         llvm::LLVMRustDIBuilderCreateArrayType(
             DIB(cx),
-            size.bits(),
-            align.bits() as u32,
+            mem_pos.size.bits(),
+            mem_pos.align.bits() as u32,
             element_type_metadata,
             subscripts)
     };
@@ -377,25 +377,24 @@ fn vec_slice_metadata(
 
     let slice_type_name = compute_debuginfo_type_name(cx.tcx, slice_ptr_type, true);
 
-    let (pointer_size, pointer_align) = cx.size_and_align_of(data_ptr_type);
-    let (usize_size, usize_align) = cx.size_and_align_of(cx.tcx.types.usize);
+    let pointer_mem_pos = cx.mem_pos_of(data_ptr_type);
+    let usize_mem_pos = cx.mem_pos_of(cx.tcx.types.usize);
+    let pointer_stride = pointer_mem_pos.stride_to(usize_mem_pos.align).size;
 
     let member_descriptions = vec![
         MemberDescription {
             name: "data_ptr".to_owned(),
             type_metadata: data_ptr_metadata,
             offset: Size::ZERO,
-            size: pointer_size,
-            align: pointer_align,
+            mem_pos: pointer_mem_pos,
             flags: DIFlags::FlagZero,
             discriminant: None,
         },
         MemberDescription {
             name: "length".to_owned(),
             type_metadata: type_metadata(cx, cx.tcx.types.usize, span),
-            offset: pointer_size,
-            size: usize_size,
-            align: usize_align,
+            offset: pointer_stride,
+            mem_pos: usize_mem_pos,
             flags: DIFlags::FlagZero,
             discriminant: None,
         },
@@ -496,8 +495,7 @@ fn trait_pointer_metadata(
                 cx.tcx.mk_mut_ptr(cx.tcx.types.u8),
                 syntax_pos::DUMMY_SP),
             offset: layout.fields.offset(0),
-            size: data_ptr_field.size,
-            align: data_ptr_field.align.abi,
+            mem_pos: data_ptr_field.pref_pos.mem_pos(),
             flags: DIFlags::FlagArtificial,
             discriminant: None,
         },
@@ -505,8 +503,7 @@ fn trait_pointer_metadata(
             name: "vtable".to_owned(),
             type_metadata: type_metadata(cx, vtable_field.ty, syntax_pos::DUMMY_SP),
             offset: layout.fields.offset(1),
-            size: vtable_field.size,
-            align: vtable_field.align.abi,
+            mem_pos: vtable_field.pref_pos.mem_pos(),
             flags: DIFlags::FlagArtificial,
             discriminant: None,
         },
@@ -656,12 +653,12 @@ pub fn type_metadata(
                     // anything reading the debuginfo for a recursive
                     // type is going to see *somthing* weird - the only
                     // question is what exactly it will see
-                    let (size, align) = cx.size_and_align_of(t);
+                    let mem_pos = cx.mem_pos_of(t);
                     llvm::LLVMRustDIBuilderCreateBasicType(
                         DIB(cx),
                         SmallCStr::new("<recur_type>").as_ptr(),
-                        size.bits(),
-                        align.bits() as u32,
+                        mem_pos.size.bits(),
+                        mem_pos.align.bits() as u32,
                         DW_ATE_unsigned)
                 }
             };
@@ -854,14 +851,14 @@ fn basic_type_metadata(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>) -> &'ll DIType {
         _ => bug!("debuginfo::basic_type_metadata - t is invalid type")
     };
 
-    let (size, align) = cx.size_and_align_of(t);
+    let mem_pos = cx.mem_pos_of(t);
     let name = SmallCStr::new(name);
     let ty_metadata = unsafe {
         llvm::LLVMRustDIBuilderCreateBasicType(
             DIB(cx),
             name.as_ptr(),
-            size.bits(),
-            align.bits() as u32,
+            mem_pos.size.bits(),
+            mem_pos.align.bits() as u32,
             encoding)
     };
 
@@ -884,15 +881,15 @@ fn pointer_type_metadata(
     pointer_type: Ty<'tcx>,
     pointee_type_metadata: &'ll DIType,
 ) -> &'ll DIType {
-    let (pointer_size, pointer_align) = cx.size_and_align_of(pointer_type);
+    let pointer_mem_pos = cx.mem_pos_of(pointer_type);
     let name = compute_debuginfo_type_name(cx.tcx, pointer_type, false);
     let name = SmallCStr::new(&name);
     unsafe {
         llvm::LLVMRustDIBuilderCreatePointerType(
             DIB(cx),
             pointee_type_metadata,
-            pointer_size.bits(),
-            pointer_align.bits() as u32,
+            pointer_mem_pos.size.bits(),
+            pointer_mem_pos.align.bits() as u32,
             name.as_ptr())
     }
 }
@@ -1036,8 +1033,7 @@ struct MemberDescription<'ll> {
     name: String,
     type_metadata: &'ll DIType,
     offset: Size,
-    size: Size,
-    align: Align,
+    mem_pos: MemoryPosition,
     flags: DIFlags,
     discriminant: Option<u64>,
 }
@@ -1054,8 +1050,8 @@ impl<'ll> MemberDescription<'ll> {
                 member_name.as_ptr(),
                 unknown_file_metadata(cx),
                 UNKNOWN_LINE_NUMBER,
-                self.size.bits(),
-                self.align.bits() as u32,
+                self.mem_pos.size.bits(),
+                self.mem_pos.align.bits() as u32,
                 self.offset.bits(),
                 match self.discriminant {
                     None => None,
@@ -1128,8 +1124,7 @@ impl<'tcx> StructMemberDescriptionFactory<'tcx> {
                 name,
                 type_metadata: type_metadata(cx, field.ty, self.span),
                 offset: layout.fields.offset(i),
-                size: field.size,
-                align: field.align.abi,
+                mem_pos: field.pref_pos.mem_pos(),
                 flags: DIFlags::FlagZero,
                 discriminant: None,
             }
@@ -1189,13 +1184,12 @@ impl<'tcx> TupleMemberDescriptionFactory<'tcx> {
                                   -> Vec<MemberDescription<'ll>> {
         let layout = cx.layout_of(self.ty);
         self.component_types.iter().enumerate().map(|(i, &component_type)| {
-            let (size, align) = cx.size_and_align_of(component_type);
+            let mem_pos = cx.mem_pos_of(component_type);
             MemberDescription {
                 name: format!("__{}", i),
                 type_metadata: type_metadata(cx, component_type, self.span),
                 offset: layout.fields.offset(i),
-                size,
-                align,
+                mem_pos,
                 flags: DIFlags::FlagZero,
                 discriminant: None,
             }
@@ -1252,8 +1246,7 @@ impl<'tcx> UnionMemberDescriptionFactory<'tcx> {
                 name: f.ident.to_string(),
                 type_metadata: type_metadata(cx, field.ty, self.span),
                 offset: Size::ZERO,
-                size: field.size,
-                align: field.align.abi,
+                mem_pos: field.pref_pos.mem_pos(),
                 flags: DIFlags::FlagZero,
                 discriminant: None,
             }
@@ -1386,8 +1379,7 @@ impl EnumMemberDescriptionFactory<'ll, 'tcx> {
                         },
                         type_metadata: variant_type_metadata,
                         offset: Size::ZERO,
-                        size: self.layout.size,
-                        align: self.layout.align.abi,
+                        mem_pos: self.layout.pref_pos.mem_pos(),
                         flags: DIFlags::FlagZero,
                         discriminant: None,
                     }
@@ -1435,8 +1427,7 @@ impl EnumMemberDescriptionFactory<'ll, 'tcx> {
                         },
                         type_metadata: variant_type_metadata,
                         offset: Size::ZERO,
-                        size: self.layout.size,
-                        align: self.layout.align.abi,
+                        mem_pos: self.layout.pref_pos.mem_pos(),
                         flags: DIFlags::FlagZero,
                         discriminant: Some(
                             self.layout.ty.discriminant_for_variant(cx.tcx, i).unwrap().val as u64
@@ -1490,7 +1481,7 @@ impl EnumMemberDescriptionFactory<'ll, 'tcx> {
                             }
                             let inner_offset = offset - field_offset;
                             let field = layout.field(cx, i);
-                            if inner_offset + size <= field.size {
+                            if inner_offset + size <= field.pref_pos.size {
                                 write!(name, "{}$", i).unwrap();
                                 compute_field_path(cx, name, field, inner_offset, size);
                             }
@@ -1499,7 +1490,7 @@ impl EnumMemberDescriptionFactory<'ll, 'tcx> {
                     compute_field_path(cx, &mut name,
                                        self.layout,
                                        self.layout.fields.offset(discr_index),
-                                       self.layout.field(cx, discr_index).size);
+                                       self.layout.field(cx, discr_index).pref_pos.size);
                     variant_info_for(*niche_variants.start()).map_struct_name(|variant_name| {
                         name.push_str(variant_name);
                     });
@@ -1510,8 +1501,7 @@ impl EnumMemberDescriptionFactory<'ll, 'tcx> {
                             name,
                             type_metadata: variant_type_metadata,
                             offset: Size::ZERO,
-                            size: variant.size,
-                            align: variant.align.abi,
+                            mem_pos: variant.pref_pos.mem_pos(),
                             flags: DIFlags::FlagZero,
                             discriminant: None,
                         }
@@ -1554,8 +1544,7 @@ impl EnumMemberDescriptionFactory<'ll, 'tcx> {
                             name: variant_info.variant_name(),
                             type_metadata: variant_type_metadata,
                             offset: Size::ZERO,
-                            size: self.layout.size,
-                            align: self.layout.align.abi,
+                            mem_pos: self.layout.pref_pos.mem_pos(),
                             flags: DIFlags::FlagZero,
                             discriminant: niche_value,
                         }
@@ -1579,7 +1568,7 @@ impl VariantMemberDescriptionFactory<'ll, 'tcx> {
     fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx>)
                                       -> Vec<MemberDescription<'ll>> {
         self.args.iter().enumerate().map(|(i, &(ref name, ty))| {
-            let (size, align) = cx.size_and_align_of(ty);
+            let mem_pos = cx.mem_pos_of(ty);
             MemberDescription {
                 name: name.to_string(),
                 type_metadata: if use_enum_fallback(cx) {
@@ -1593,8 +1582,7 @@ impl VariantMemberDescriptionFactory<'ll, 'tcx> {
                     type_metadata(cx, ty, self.span)
                 },
                 offset: self.offsets[i],
-                size,
-                align,
+                mem_pos,
                 flags: DIFlags::FlagZero,
                 discriminant: None,
             }
@@ -1859,8 +1847,8 @@ fn prepare_enum_metadata(
                 enum_name.as_ptr(),
                 file_metadata,
                 UNKNOWN_LINE_NUMBER,
-                layout.size.bits(),
-                layout.align.abi.bits() as u32,
+                layout.pref_pos.size.bits(),
+                layout.pref_pos.align.abi.bits() as u32,
                 DIFlags::FlagZero,
                 None,
                 0, // RuntimeLang
@@ -1932,7 +1920,7 @@ fn prepare_enum_metadata(
             ..
         } => {
             let discr_type = discr.value.to_ty(cx.tcx);
-            let (size, align) = cx.size_and_align_of(discr_type);
+            let mem_pos = cx.mem_pos_of(discr_type);
 
             let discr_metadata = basic_type_metadata(cx, discr_type);
             unsafe {
@@ -1942,8 +1930,8 @@ fn prepare_enum_metadata(
                     discriminator_name,
                     file_metadata,
                     UNKNOWN_LINE_NUMBER,
-                    size.bits(),
-                    align.bits() as u32,
+                    mem_pos.size.bits(),
+                    mem_pos.align.bits() as u32,
                     layout.fields.offset(discr_index).bits(),
                     DIFlags::FlagArtificial,
                     discr_metadata))
@@ -1980,8 +1968,8 @@ fn prepare_enum_metadata(
             ptr::null_mut(),
             file_metadata,
             UNKNOWN_LINE_NUMBER,
-            layout.size.bits(),
-            layout.align.abi.bits() as u32,
+            layout.pref_pos.size.bits(),
+            layout.pref_pos.align.abi.bits() as u32,
             DIFlags::FlagZero,
             discriminator_metadata,
             empty_array,
@@ -1998,8 +1986,8 @@ fn prepare_enum_metadata(
             enum_name.as_ptr(),
             file_metadata,
             UNKNOWN_LINE_NUMBER,
-            layout.size.bits(),
-            layout.align.abi.bits() as u32,
+            layout.pref_pos.size.bits(),
+            layout.pref_pos.align.abi.bits() as u32,
             DIFlags::FlagZero,
             None,
             type_array,
@@ -2144,7 +2132,7 @@ fn create_struct_stub(
     unique_type_id: UniqueTypeId,
     containing_scope: Option<&'ll DIScope>,
 ) -> &'ll DICompositeType {
-    let (struct_size, struct_align) = cx.size_and_align_of(struct_type);
+    let struct_mem_pos = cx.mem_pos_of(struct_type);
 
     let name = SmallCStr::new(struct_type_name);
     let unique_type_id = SmallCStr::new(
@@ -2162,8 +2150,8 @@ fn create_struct_stub(
             name.as_ptr(),
             unknown_file_metadata(cx),
             UNKNOWN_LINE_NUMBER,
-            struct_size.bits(),
-            struct_align.bits() as u32,
+            struct_mem_pos.size.bits(),
+            struct_mem_pos.align.bits() as u32,
             DIFlags::FlagZero,
             None,
             empty_array,
@@ -2182,7 +2170,7 @@ fn create_union_stub(
     unique_type_id: UniqueTypeId,
     containing_scope: &'ll DIScope,
 ) -> &'ll DICompositeType {
-    let (union_size, union_align) = cx.size_and_align_of(union_type);
+    let union_mem_pos = cx.mem_pos_of(union_type);
 
     let name = SmallCStr::new(union_type_name);
     let unique_type_id = SmallCStr::new(
@@ -2200,8 +2188,8 @@ fn create_union_stub(
             name.as_ptr(),
             unknown_file_metadata(cx),
             UNKNOWN_LINE_NUMBER,
-            union_size.bits(),
-            union_align.bits() as u32,
+            union_mem_pos.size.bits(),
+            union_mem_pos.align.bits() as u32,
             DIFlags::FlagZero,
             Some(empty_array),
             0, // RuntimeLang
@@ -2304,7 +2292,7 @@ pub fn create_vtable_metadata(cx: &CodegenCx<'ll, 'tcx>, ty: Ty<'tcx>, vtable: &
             unknown_file_metadata(cx),
             UNKNOWN_LINE_NUMBER,
             Size::ZERO.bits(),
-            cx.tcx.data_layout.pointer_align.abi.bits() as u32,
+            cx.tcx.data_layout.pointer_pos.align.abi.bits() as u32,
             DIFlags::FlagArtificial,
             None,
             empty_array,
diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs
index e1ce7f622e2ef..6f9d1625c73c6 100644
--- a/src/librustc_codegen_llvm/intrinsic.rs
+++ b/src/librustc_codegen_llvm/intrinsic.rs
@@ -769,17 +769,17 @@ fn copy_intrinsic(
     src: &'ll Value,
     count: &'ll Value,
 ) {
-    let (size, align) = bx.size_and_align_of(ty);
-    let size = bx.mul(bx.const_usize(size.bytes()), count);
+    let mem_pos = bx.mem_pos_of(ty);
+    let size = bx.mul(bx.const_usize(mem_pos.size.bytes()), count);
     let flags = if volatile {
         MemFlags::VOLATILE
     } else {
         MemFlags::empty()
     };
     if allow_overlap {
-        bx.memmove(dst, align, src, align, size, flags);
+        bx.memmove(dst, mem_pos.align, src, mem_pos.align, size, flags);
     } else {
-        bx.memcpy(dst, align, src, align, size, flags);
+        bx.memcpy(dst, mem_pos.align, src, mem_pos.align, size, flags);
     }
 }
 
@@ -791,14 +791,14 @@ fn memset_intrinsic(
     val: &'ll Value,
     count: &'ll Value
 ) {
-    let (size, align) = bx.size_and_align_of(ty);
-    let size = bx.mul(bx.const_usize(size.bytes()), count);
+    let mem_pos = bx.mem_pos_of(ty);
+    let size = bx.mul(bx.const_usize(mem_pos.size.bytes()), count);
     let flags = if volatile {
         MemFlags::VOLATILE
     } else {
         MemFlags::empty()
     };
-    bx.memset(dst, val, size, align, flags);
+    bx.memset(dst, val, size, mem_pos.align, flags);
 }
 
 fn try_intrinsic(
@@ -810,7 +810,7 @@ fn try_intrinsic(
 ) {
     if bx.sess().no_landing_pads() {
         bx.call(func, &[data], None);
-        let ptr_align = bx.tcx().data_layout.pointer_align.abi;
+        let ptr_align = bx.tcx().data_layout.pointer_pos.align.abi;
         bx.store(bx.const_null(bx.type_i8p()), dest, ptr_align);
     } else if wants_msvc_seh(bx.sess()) {
         codegen_msvc_try(bx, func, data, local_ptr, dest);
@@ -984,7 +984,7 @@ fn codegen_gnu_try(
         };
         catch.add_clause(vals, tydesc);
         let ptr = catch.extract_value(vals, 0);
-        let ptr_align = bx.tcx().data_layout.pointer_align.abi;
+        let ptr_align = bx.tcx().data_layout.pointer_pos.align.abi;
         let bitcast = catch.bitcast(local_ptr, bx.type_ptr_to(bx.type_i8p()));
         catch.store(ptr, bitcast, ptr_align);
         catch.ret(bx.const_i32(1));
@@ -1262,11 +1262,11 @@ fn generic_simd_intrinsic(
         let (i_xn, in_elem_bitwidth) = match in_elem.kind {
             ty::Int(i) => (
                 args[0].immediate(),
-                i.bit_width().unwrap_or(bx.data_layout().pointer_size.bits() as _)
+                i.bit_width().unwrap_or(bx.data_layout().pointer_pos.size.bits() as _)
             ),
             ty::Uint(i) => (
                 args[0].immediate(),
-                i.bit_width().unwrap_or(bx.data_layout().pointer_size.bits() as _)
+                i.bit_width().unwrap_or(bx.data_layout().pointer_pos.size.bits() as _)
             ),
             _ => return_error!(
                 "vector argument `{}`'s element type `{}`, expected integer element type",
@@ -1876,7 +1876,7 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
         let lhs = args[0].immediate();
         let rhs = args[1].immediate();
         let is_add = name == "simd_saturating_add";
-        let ptr_bits = bx.tcx().data_layout.pointer_size.bits() as _;
+        let ptr_bits = bx.tcx().data_layout.pointer_pos.size.bits() as _;
         let (signed, elem_width, elem_ty) = match in_elem.kind {
             ty::Int(i) =>
                 (
diff --git a/src/librustc_codegen_llvm/type_of.rs b/src/librustc_codegen_llvm/type_of.rs
index c21e62e7562e3..74e025d59f8a1 100644
--- a/src/librustc_codegen_llvm/type_of.rs
+++ b/src/librustc_codegen_llvm/type_of.rs
@@ -2,7 +2,9 @@ use crate::abi::{FnAbi};
 use crate::common::*;
 use crate::type_::Type;
 use rustc::ty::{self, Ty, TypeFoldable};
-use rustc::ty::layout::{self, Align, LayoutOf, FnAbiExt, PointeeInfo, Size, TyLayout};
+use rustc::ty::layout::{
+    self, Align, MemoryPosition, LayoutOf, FnAbiExt, PointeeInfo, Size, TyLayout
+};
 use rustc_target::abi::TyLayoutMethods;
 use rustc::ty::print::obsolete::DefPathBasedNames;
 use rustc_codegen_ssa::traits::*;
@@ -23,7 +25,7 @@ fn uncached_llvm_type<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
             // x86_mmx" type. In general there shouldn't be a need for other
             // one-element SIMD vectors, so it's assumed this won't clash with
             // much else.
-            let use_x86_mmx = count == 1 && layout.size.bits() == 64 &&
+            let use_x86_mmx = count == 1 && layout.pref_pos.size.bits() == 64 &&
                 (cx.sess().target.target.arch == "x86" ||
                  cx.sess().target.target.arch == "x86_64");
             if use_x86_mmx {
@@ -74,7 +76,7 @@ fn uncached_llvm_type<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
 
     match layout.fields {
         layout::FieldPlacement::Union(_) => {
-            let fill = cx.type_padding_filler(layout.size, layout.align.abi);
+            let fill = cx.type_padding_filler(layout.pref_pos.size, layout.pref_pos.align.abi);
             let packed = false;
             match name {
                 None => {
@@ -114,15 +116,15 @@ fn struct_llfields<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
 
     let mut packed = false;
     let mut offset = Size::ZERO;
-    let mut prev_effective_align = layout.align.abi;
+    let mut prev_effective_align = layout.pref_pos.align.abi;
     let mut result: Vec<_> = Vec::with_capacity(1 + field_count * 2);
     for i in layout.fields.index_by_increasing_offset() {
         let target_offset = layout.fields.offset(i as usize);
         let field = layout.field(cx, i);
-        let effective_field_align = layout.align.abi
-            .min(field.align.abi)
+        let effective_field_align = layout.pref_pos.align.abi
+            .min(field.pref_pos.align.abi)
             .restrict_for_offset(target_offset);
-        packed |= effective_field_align < field.align.abi;
+        packed |= effective_field_align < field.pref_pos.align.abi;
 
         debug!("struct_llfields: {}: {:?} offset: {:?} target_offset: {:?} \
                 effective_field_align: {}",
@@ -135,24 +137,24 @@ fn struct_llfields<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
         debug!("    padding before: {:?}", padding);
 
         result.push(field.llvm_type(cx));
-        offset = target_offset + field.size;
+        offset = target_offset + field.pref_pos.size;
         prev_effective_align = effective_field_align;
     }
     if !layout.is_unsized() && field_count > 0 {
-        if offset > layout.size {
+        if offset > layout.pref_pos.size {
             bug!("layout: {:#?} stride: {:?} offset: {:?}",
-                 layout, layout.size, offset);
+                 layout, layout.pref_pos.size, offset);
         }
-        let padding = layout.size - offset;
+        let padding = layout.pref_pos.size - offset;
         let padding_align = prev_effective_align;
-        assert_eq!(offset.align_to(padding_align) + padding, layout.size);
+        assert_eq!(offset.align_to(padding_align) + padding, layout.pref_pos.size);
         debug!("struct_llfields: pad_bytes: {:?} offset: {:?} stride: {:?}",
-               padding, offset, layout.size);
+               padding, offset, layout.pref_pos.size);
         result.push(cx.type_padding_filler(padding, padding_align));
         assert_eq!(result.len(), 1 + field_count * 2);
     } else {
         debug!("struct_llfields: offset: {:?} stride: {:?}",
-               offset, layout.size);
+               offset, layout.pref_pos.size);
     }
 
     (result, packed)
@@ -160,16 +162,15 @@ fn struct_llfields<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
 
 impl<'a, 'tcx> CodegenCx<'a, 'tcx> {
     pub fn align_of(&self, ty: Ty<'tcx>) -> Align {
-        self.layout_of(ty).align.abi
+        self.layout_of(ty).pref_pos.align.abi
     }
 
     pub fn size_of(&self, ty: Ty<'tcx>) -> Size {
-        self.layout_of(ty).size
+        self.layout_of(ty).pref_pos.size
     }
 
-    pub fn size_and_align_of(&self, ty: Ty<'tcx>) -> (Size, Align) {
-        let layout = self.layout_of(ty);
-        (layout.size, layout.align.abi)
+    pub fn mem_pos_of(&self, ty: Ty<'tcx>) -> MemoryPosition {
+        self.layout_of(ty).pref_pos.mem_pos()
     }
 }
 
@@ -305,7 +306,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> {
             layout::Pointer => {
                 // If we know the alignment, pick something better than i8.
                 let pointee = if let Some(pointee) = self.pointee_info_at(cx, offset) {
-                    cx.type_pointee_for_align(pointee.align)
+                    cx.type_pointee_for_align(pointee.mem_pos.align)
                 } else {
                     cx.type_i8()
                 };
diff --git a/src/librustc_codegen_llvm/va_arg.rs b/src/librustc_codegen_llvm/va_arg.rs
index 86b0ad761af6a..dda0370956d08 100644
--- a/src/librustc_codegen_llvm/va_arg.rs
+++ b/src/librustc_codegen_llvm/va_arg.rs
@@ -4,7 +4,7 @@ use crate::type_of::LayoutLlvmExt;
 use crate::value::Value;
 use rustc_codegen_ssa::mir::operand::OperandRef;
 use rustc_codegen_ssa::traits::{BaseTypeMethods, BuilderMethods, ConstMethods, DerivedTypeMethods};
-use rustc::ty::layout::{Align, HasDataLayout, HasTyCtxt, LayoutOf, Size};
+use rustc::ty::layout::{Align, HasDataLayout, HasTyCtxt, LayoutOf, MemoryPosition};
 use rustc::ty::Ty;
 
 #[allow(dead_code)]
@@ -24,8 +24,7 @@ fn emit_direct_ptr_va_arg(
     bx: &mut Builder<'a, 'll, 'tcx>,
     list: OperandRef<'tcx, &'ll Value>,
     llty: &'ll Type,
-    size: Size,
-    align: Align,
+    mem_pos: MemoryPosition,
     slot_size: Align,
     allow_higher_align: bool
 ) -> (&'ll Value, Align) {
@@ -36,23 +35,23 @@ fn emit_direct_ptr_va_arg(
         list.immediate()
     };
 
-    let ptr = bx.load(va_list_addr, bx.tcx().data_layout.pointer_align.abi);
+    let ptr = bx.load(va_list_addr, bx.tcx().data_layout.pointer_pos.align.abi);
 
-    let (addr, addr_align) = if allow_higher_align && align > slot_size {
-        (round_pointer_up_to_alignment(bx, ptr, align, bx.cx().type_i8p()), align)
+    let (addr, addr_align) = if allow_higher_align && mem_pos.align > slot_size {
+        (round_pointer_up_to_alignment(bx, ptr, mem_pos.align, bx.cx().type_i8p()), mem_pos.align)
     } else {
         (ptr, slot_size)
     };
 
 
-    let aligned_size = size.align_to(slot_size).bytes() as i32;
+    let aligned_size = mem_pos.size.align_to(slot_size).bytes() as i32;
     let full_direct_size = bx.cx().const_i32(aligned_size);
     let next = bx.inbounds_gep(addr, &[full_direct_size]);
-    bx.store(next, va_list_addr, bx.tcx().data_layout.pointer_align.abi);
+    bx.store(next, va_list_addr, bx.tcx().data_layout.pointer_pos.align.abi);
 
-    if size.bytes() < slot_size.bytes() &&
+    if mem_pos.size.bytes() < slot_size.bytes() &&
             &*bx.tcx().sess.target.target.target_endian == "big" {
-        let adjusted_size = bx.cx().const_i32((slot_size.bytes() - size.bytes()) as i32);
+        let adjusted_size = bx.cx().const_i32((slot_size.bytes() - mem_pos.size.bytes()) as i32);
         let adjusted = bx.inbounds_gep(addr, &[adjusted_size]);
         (bx.bitcast(adjusted, bx.cx().type_ptr_to(llty)), addr_align)
     } else {
@@ -69,20 +68,18 @@ fn emit_ptr_va_arg(
     allow_higher_align: bool
 ) -> &'ll Value {
     let layout = bx.cx.layout_of(target_ty);
-    let (llty, size, align) = if indirect {
+    let (llty, pref_pos) = if indirect {
         (bx.cx.layout_of(bx.cx.tcx.mk_imm_ptr(target_ty)).llvm_type(bx.cx),
-         bx.cx.data_layout().pointer_size,
-         bx.cx.data_layout().pointer_align)
+         bx.cx.data_layout().pointer_pos)
     } else {
         (layout.llvm_type(bx.cx),
-         layout.size,
-         layout.align)
+         layout.pref_pos)
     };
-    let (addr, addr_align) = emit_direct_ptr_va_arg(bx, list, llty, size, align.abi,
+    let (addr, addr_align) = emit_direct_ptr_va_arg(bx, list, llty, pref_pos.mem_pos(),
                                                     slot_size, allow_higher_align);
     if indirect {
         let tmp_ret = bx.load(addr, addr_align);
-        bx.load(tmp_ret, align.abi)
+        bx.load(tmp_ret, pref_pos.align.abi)
     } else {
         bx.load(addr, addr_align)
     }
diff --git a/src/librustc_codegen_ssa/base.rs b/src/librustc_codegen_ssa/base.rs
index c3f2a5161ae1c..601b10a6c3219 100644
--- a/src/librustc_codegen_ssa/base.rs
+++ b/src/librustc_codegen_ssa/base.rs
@@ -181,7 +181,7 @@ pub fn unsize_thin_ptr<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
                 if src_f.is_zst() {
                     continue;
                 }
-                assert_eq!(src_layout.size, src_f.size);
+                assert_eq!(src_layout.pref_pos.size, src_f.pref_pos.size);
 
                 let dst_f = dst_layout.field(bx.cx(), i);
                 assert_ne!(src_f.ty, dst_f.ty);
@@ -348,7 +348,7 @@ pub fn memcpy_ty<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
     layout: TyLayout<'tcx>,
     flags: MemFlags,
 ) {
-    let size = layout.size.bytes();
+    let size = layout.pref_pos.size.bytes();
     if size == 0 {
         return;
     }
diff --git a/src/librustc_codegen_ssa/glue.rs b/src/librustc_codegen_ssa/glue.rs
index 9818bb78e757b..9ddf3a0db3b3b 100644
--- a/src/librustc_codegen_ssa/glue.rs
+++ b/src/librustc_codegen_ssa/glue.rs
@@ -16,8 +16,8 @@ pub fn size_and_align_of_dst<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
     debug!("size_and_align_of_dst(ty={}, info={:?}): layout: {:?}",
            t, info, layout);
     if !layout.is_unsized() {
-        let size = bx.const_usize(layout.size.bytes());
-        let align = bx.const_usize(layout.align.abi.bytes());
+        let size = bx.const_usize(layout.pref_pos.size.bytes());
+        let align = bx.const_usize(layout.pref_pos.align.abi.bytes());
         return (size, align);
     }
     match t.kind {
@@ -30,8 +30,8 @@ pub fn size_and_align_of_dst<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
             let unit = layout.field(bx, 0);
             // The info in this case is the length of the str, so the size is that
             // times the unit size.
-            (bx.mul(info.unwrap(), bx.const_usize(unit.size.bytes())),
-             bx.const_usize(unit.align.abi.bytes()))
+            (bx.mul(info.unwrap(), bx.const_usize(unit.pref_pos.size.bytes())),
+             bx.const_usize(unit.pref_pos.align.abi.bytes()))
         }
         _ => {
             // First get the size of all statically known fields.
@@ -42,7 +42,7 @@ pub fn size_and_align_of_dst<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
 
             let i = layout.fields.count() - 1;
             let sized_size = layout.fields.offset(i).bytes();
-            let sized_align = layout.align.abi.bytes();
+            let sized_align = layout.pref_pos.align.abi.bytes();
             debug!("DST {} statically sized prefix size: {} align: {}",
                    t, sized_size, sized_align);
             let sized_size = bx.const_usize(sized_size);
diff --git a/src/librustc_codegen_ssa/meth.rs b/src/librustc_codegen_ssa/meth.rs
index 692027390ec1b..da95d676a11ac 100644
--- a/src/librustc_codegen_ssa/meth.rs
+++ b/src/librustc_codegen_ssa/meth.rs
@@ -29,7 +29,7 @@ impl<'a, 'tcx> VirtualIndex {
             llvtable,
             bx.type_ptr_to(bx.fn_ptr_backend_type(fn_abi))
         );
-        let ptr_align = bx.tcx().data_layout.pointer_align.abi;
+        let ptr_align = bx.tcx().data_layout.pointer_pos.align.abi;
         let gep = bx.inbounds_gep(llvtable, &[bx.const_usize(self.0)]);
         let ptr = bx.load(gep, ptr_align);
         bx.nonnull_metadata(ptr);
@@ -47,7 +47,7 @@ impl<'a, 'tcx> VirtualIndex {
         debug!("get_int({:?}, {:?})", llvtable, self);
 
         let llvtable = bx.pointercast(llvtable, bx.type_ptr_to(bx.type_isize()));
-        let usize_align = bx.tcx().data_layout.pointer_align.abi;
+        let usize_align = bx.tcx().data_layout.pointer_pos.align.abi;
         let gep = bx.inbounds_gep(llvtable, &[bx.const_usize(self.0)]);
         let ptr = bx.load(gep, usize_align);
         // Vtable loads are invariant
@@ -109,12 +109,12 @@ pub fn get_vtable<'tcx, Cx: CodegenMethods<'tcx>>(
     // /////////////////////////////////////////////////////////////////////////////////////////////
     let components: Vec<_> = [
         cx.get_fn_addr(Instance::resolve_drop_in_place(cx.tcx(), ty)),
-        cx.const_usize(layout.size.bytes()),
-        cx.const_usize(layout.align.abi.bytes())
+        cx.const_usize(layout.pref_pos.size.bytes()),
+        cx.const_usize(layout.pref_pos.align.abi.bytes())
     ].iter().cloned().chain(methods).collect();
 
     let vtable_const = cx.const_struct(&components, false);
-    let align = cx.data_layout().pointer_align.abi;
+    let align = cx.data_layout().pointer_pos.align.abi;
     let vtable = cx.static_addr_of(vtable_const, align, Some("vtable"));
 
     cx.create_vtable_metadata(ty, vtable);
diff --git a/src/librustc_codegen_ssa/mir/block.rs b/src/librustc_codegen_ssa/mir/block.rs
index 13cd202158b77..7f9e71d69fe25 100644
--- a/src/librustc_codegen_ssa/mir/block.rs
+++ b/src/librustc_codegen_ssa/mir/block.rs
@@ -301,7 +301,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                         scratch.llval
                     }
                     Ref(llval, _, align) => {
-                        assert_eq!(align, op.layout.align.abi,
+                        assert_eq!(align, op.layout.pref_pos.align.abi,
                                    "return place is unaligned!");
                         llval
                     }
@@ -309,7 +309,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                 let addr = bx.pointercast(llslot, bx.type_ptr_to(
                     bx.cast_backend_type(&cast_ty)
                 ));
-                bx.load(addr, self.fn_abi.ret.layout.align.abi)
+                bx.load(addr, self.fn_abi.ret.layout.pref_pos.align.abi)
             }
         };
         bx.ret(llval);
@@ -915,12 +915,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                         (scratch.llval, scratch.align, true)
                     }
                     _ => {
-                        (op.immediate_or_packed_pair(bx), arg.layout.align.abi, false)
+                        (op.immediate_or_packed_pair(bx), arg.layout.pref_pos.align.abi, false)
                     }
                 }
             }
             Ref(llval, _, align) => {
-                if arg.is_indirect() && align < arg.layout.align.abi {
+                if arg.is_indirect() && align < arg.layout.pref_pos.align.abi {
                     // `foo(packed.large_field)`. We can't pass the (unaligned) field directly. I
                     // think that ATM (Rust 1.16) we only pass temporaries, but we shouldn't
                     // have scary latent bugs around.
@@ -941,7 +941,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                 let addr = bx.pointercast(llval, bx.type_ptr_to(
                     bx.cast_backend_type(&ty))
                 );
-                llval = bx.load(addr, align.min(arg.layout.align.abi));
+                llval = bx.load(addr, align.min(arg.layout.pref_pos.align.abi));
             } else {
                 // We can't use `PlaceRef::load` here because the argument
                 // may have a type we don't treat as immediate, but the ABI
@@ -1139,7 +1139,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
             })
         };
         if fn_ret.is_indirect() {
-            if dest.align < dest.layout.align.abi {
+            if dest.align < dest.layout.pref_pos.align.abi {
                 // Currently, MIR code generation does not create calls
                 // that store directly to fields of packed structs (in
                 // fact, the calls it creates write only to temps).
@@ -1195,7 +1195,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
         let src = self.codegen_operand(bx, src);
         let llty = bx.backend_type(src.layout);
         let cast_ptr = bx.pointercast(dst.llval, bx.type_ptr_to(llty));
-        let align = src.layout.align.abi.min(dst.align);
+        let align = src.layout.pref_pos.align.abi.min(dst.align);
         src.val.store(bx, PlaceRef::new_sized_aligned(cast_ptr, src.layout, align));
     }
 
diff --git a/src/librustc_codegen_ssa/mir/operand.rs b/src/librustc_codegen_ssa/mir/operand.rs
index ba5e47aeede1b..25c9eeaae6a67 100644
--- a/src/librustc_codegen_ssa/mir/operand.rs
+++ b/src/librustc_codegen_ssa/mir/operand.rs
@@ -146,7 +146,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
             llval: llptr,
             llextra,
             layout,
-            align: layout.align.abi,
+            align: layout.pref_pos.align.abi,
         }
     }
 
@@ -210,7 +210,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
 
             // Newtype of a scalar, scalar pair or vector.
             (OperandValue::Immediate(_), _) |
-            (OperandValue::Pair(..), _) if field.size == self.layout.size => {
+            (OperandValue::Pair(..), _) if field.pref_pos.size == self.layout.pref_pos.size => {
                 assert_eq!(offset.bytes(), 0);
                 self.val
             }
@@ -218,12 +218,12 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
             // Extract a scalar component from a pair.
             (OperandValue::Pair(a_llval, b_llval), &layout::Abi::ScalarPair(ref a, ref b)) => {
                 if offset.bytes() == 0 {
-                    assert_eq!(field.size, a.value.size(bx.cx()));
+                    assert_eq!(field.pref_pos.size, a.value.size(bx.cx()));
                     OperandValue::Immediate(a_llval)
                 } else {
                     assert_eq!(offset, a.value.size(bx.cx())
                         .align_to(b.value.align(bx.cx()).abi));
-                    assert_eq!(field.size, b.value.size(bx.cx()));
+                    assert_eq!(field.pref_pos.size, b.value.size(bx.cx()));
                     OperandValue::Immediate(b_llval)
                 }
             }
diff --git a/src/librustc_codegen_ssa/mir/place.rs b/src/librustc_codegen_ssa/mir/place.rs
index 3e7c4ef49fb5a..e406b96a22377 100644
--- a/src/librustc_codegen_ssa/mir/place.rs
+++ b/src/librustc_codegen_ssa/mir/place.rs
@@ -36,7 +36,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
             llval,
             llextra: None,
             layout,
-            align: layout.align.abi
+            align: layout.pref_pos.align.abi
         }
     }
 
@@ -64,7 +64,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
             llval,
             llextra: None,
             layout,
-            align: layout.align.abi
+            align: layout.pref_pos.align.abi
         }
     }
 
@@ -75,7 +75,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
         layout: TyLayout<'tcx>,
     ) -> Self {
         assert!(!layout.is_unsized(), "tried to statically allocate unsized place");
-        let tmp = bx.alloca(bx.cx().backend_type(layout), layout.align.abi);
+        let tmp = bx.alloca(bx.cx().backend_type(layout), layout.pref_pos.align.abi);
         Self::new_sized(tmp, layout)
     }
 
@@ -160,7 +160,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
                 if def.repr.packed() {
                     // FIXME(eddyb) generalize the adjustment when we
                     // start supporting packing to larger alignments.
-                    assert_eq!(self.layout.align.abi.bytes(), 1);
+                    assert_eq!(self.layout.pref_pos.align.abi.bytes(), 1);
                     return simple();
                 }
             }
@@ -368,7 +368,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
                         // FIXME(#34427): as workaround for LLVM bug on ARM,
                         // use memset of 0 before assigning niche value.
                         let fill_byte = bx.cx().const_u8(0);
-                        let size = bx.cx().const_usize(self.layout.size.bytes());
+                        let size = bx.cx().const_usize(self.layout.pref_pos.size.bytes());
                         bx.memset(self.llval, fill_byte, size, self.align, MemFlags::empty());
                     }
 
@@ -399,9 +399,9 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
         // as this will yield the lowest alignment.
         let layout = self.layout.field(bx, 0);
         let offset = if let Some(llindex) = bx.const_to_opt_uint(llindex) {
-            layout.size.checked_mul(llindex, bx).unwrap_or(layout.size)
+            layout.pref_pos.checked_mul(llindex, bx).unwrap_or(layout.pref_pos).size
         } else {
-            layout.size
+            layout.pref_pos.size
         };
 
         PlaceRef {
@@ -428,11 +428,11 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
     }
 
     pub fn storage_live<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
-        bx.lifetime_start(self.llval, self.layout.size);
+        bx.lifetime_start(self.llval, self.layout.pref_pos.size);
     }
 
     pub fn storage_dead<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
-        bx.lifetime_end(self.llval, self.layout.size);
+        bx.lifetime_end(self.llval, self.layout.pref_pos.size);
     }
 }
 
diff --git a/src/librustc_codegen_ssa/mir/rvalue.rs b/src/librustc_codegen_ssa/mir/rvalue.rs
index 981fdf2298419..fb6e8d0c7e357 100644
--- a/src/librustc_codegen_ssa/mir/rvalue.rs
+++ b/src/librustc_codegen_ssa/mir/rvalue.rs
@@ -91,7 +91,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                 if let OperandValue::Immediate(v) = cg_elem.val {
                     let zero = bx.const_usize(0);
                     let start = dest.project_index(&mut bx, zero).llval;
-                    let size = bx.const_usize(dest.layout.size.bytes());
+                    let size = bx.const_usize(dest.layout.pref_pos.size.bytes());
 
                     // Use llvm.memset.p0i8.* to initialize all zero arrays
                     if bx.cx().const_to_opt_uint(v) == Some(0) {
@@ -472,7 +472,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
 
             mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => {
                 assert!(bx.cx().type_is_sized(ty));
-                let val = bx.cx().const_usize(bx.cx().layout_of(ty).size.bytes());
+                let val = bx.cx().const_usize(bx.cx().layout_of(ty).pref_pos.size.bytes());
                 let tcx = self.cx.tcx();
                 (bx, OperandRef {
                     val: OperandValue::Immediate(val),
@@ -483,8 +483,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
             mir::Rvalue::NullaryOp(mir::NullOp::Box, content_ty) => {
                 let content_ty = self.monomorphize(&content_ty);
                 let content_layout = bx.cx().layout_of(content_ty);
-                let llsize = bx.cx().const_usize(content_layout.size.bytes());
-                let llalign = bx.cx().const_usize(content_layout.align.abi.bytes());
+                let llsize = bx.cx().const_usize(content_layout.pref_pos.size.bytes());
+                let llalign = bx.cx().const_usize(content_layout.pref_pos.align.abi.bytes());
                 let box_layout = bx.cx().layout_of(bx.tcx().mk_box(content_ty));
                 let llty_ptr = bx.cx().backend_type(box_layout);
 
diff --git a/src/librustc_lint/types.rs b/src/librustc_lint/types.rs
index 65e0940920bd7..f7daa7bf184a8 100644
--- a/src/librustc_lint/types.rs
+++ b/src/librustc_lint/types.rs
@@ -1077,14 +1077,14 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for VariantSizeDifferences {
             let discr_size = tag.value.size(&cx.tcx).bytes();
 
             debug!("enum `{}` is {} bytes large with layout:\n{:#?}",
-                   t, layout.size.bytes(), layout);
+                   t, layout.pref_pos.size.bytes(), layout);
 
             let (largest, slargest, largest_index) = enum_definition.variants
                 .iter()
                 .zip(variants)
                 .map(|(variant, variant_layout)| {
                     // Subtract the size of the enum discriminant.
-                    let bytes = variant_layout.size.bytes().saturating_sub(discr_size);
+                    let bytes = variant_layout.pref_pos.size.bytes().saturating_sub(discr_size);
 
                     debug!("- variant `{}` is {} bytes large",
                            variant.ident,
diff --git a/src/librustc_mir/build/expr/as_rvalue.rs b/src/librustc_mir/build/expr/as_rvalue.rs
index 4f1ac8e51dc20..7fb0c86414871 100644
--- a/src/librustc_mir/build/expr/as_rvalue.rs
+++ b/src/librustc_mir/build/expr/as_rvalue.rs
@@ -583,7 +583,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
     // Helper to get a `-1` value of the appropriate type
     fn neg_1_literal(&mut self, span: Span, ty: Ty<'tcx>) -> Operand<'tcx> {
         let param_ty = ty::ParamEnv::empty().and(ty);
-        let bits = self.hir.tcx().layout_of(param_ty).unwrap().size.bits();
+        let bits = self.hir.tcx().layout_of(param_ty).unwrap().pref_pos.size.bits();
         let n = (!0u128) >> (128 - bits);
         let literal = ty::Const::from_bits(self.hir.tcx(), n, param_ty);
 
@@ -594,7 +594,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
     fn minval_literal(&mut self, span: Span, ty: Ty<'tcx>) -> Operand<'tcx> {
         assert!(ty.is_signed());
         let param_ty = ty::ParamEnv::empty().and(ty);
-        let bits = self.hir.tcx().layout_of(param_ty).unwrap().size.bits();
+        let bits = self.hir.tcx().layout_of(param_ty).unwrap().pref_pos.size.bits();
         let n = 1 << (bits - 1);
         let literal = ty::Const::from_bits(self.hir.tcx(), n, param_ty);
 
diff --git a/src/librustc_mir/hair/constant.rs b/src/librustc_mir/hair/constant.rs
index b9e75a576cad8..5b716c501d6f8 100644
--- a/src/librustc_mir/hair/constant.rs
+++ b/src/librustc_mir/hair/constant.rs
@@ -19,7 +19,7 @@ crate fn lit_to_const<'tcx>(
 
     let trunc = |n| {
         let param_ty = ParamEnv::reveal_all().and(ty);
-        let width = tcx.layout_of(param_ty).map_err(|_| LitToConstError::Reported)?.size;
+        let width = tcx.layout_of(param_ty).map_err(|_| LitToConstError::Reported)?.pref_pos.size;
         trace!("trunc {} with size {} and shift {}", n, width.bits(), 128 - width.bits());
         let result = truncate(n, width);
         trace!("trunc result: {}", result);
diff --git a/src/librustc_mir/hair/pattern/_match.rs b/src/librustc_mir/hair/pattern/_match.rs
index 982330baf9c95..50f257ac2542d 100644
--- a/src/librustc_mir/hair/pattern/_match.rs
+++ b/src/librustc_mir/hair/pattern/_match.rs
@@ -2287,8 +2287,8 @@ fn specialize_one_pattern<'p, 'a: 'p, 'q: 'p, 'tcx>(
                 let ptr = Pointer::new(AllocId(0), offset);
                 (0..n)
                     .map(|i| {
-                        let ptr = ptr.offset(layout.size * i, &cx.tcx).ok()?;
-                        let scalar = alloc.read_scalar(&cx.tcx, ptr, layout.size).ok()?;
+                        let ptr = ptr.offset((layout.pref_pos * i).size, &cx.tcx).ok()?;
+                        let scalar = alloc.read_scalar(&cx.tcx, ptr, layout.pref_pos.size).ok()?;
                         let scalar = scalar.not_undef().ok()?;
                         let value = ty::Const::from_scalar(cx.tcx, scalar, ty);
                         let pattern =
diff --git a/src/librustc_mir/interpret/cast.rs b/src/librustc_mir/interpret/cast.rs
index 1fb8b3ca63fcf..01fc0ae31eba3 100644
--- a/src/librustc_mir/interpret/cast.rs
+++ b/src/librustc_mir/interpret/cast.rs
@@ -130,7 +130,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                     src.layout.ty.discriminant_for_variant(*self.tcx, index)
                 {
                     assert!(src.layout.is_zst());
-                    return Ok(Scalar::from_uint(discr.val, dest_layout.size).into());
+                    return Ok(Scalar::from_uint(discr.val, dest_layout.pref_pos.size).into());
                 }
             }
             layout::Variants::Multiple { .. } => {},
@@ -138,10 +138,10 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
 
         // Handle casting the metadata away from a fat pointer.
         if src.layout.ty.is_unsafe_ptr() && dest_layout.ty.is_unsafe_ptr() &&
-            dest_layout.size != src.layout.size
+            dest_layout.pref_pos.size != src.layout.pref_pos.size
         {
-            assert_eq!(src.layout.size, 2*self.memory.pointer_size());
-            assert_eq!(dest_layout.size, self.memory.pointer_size());
+            assert_eq!(src.layout.pref_pos.size, (2*self.memory.pointer_pos()).size);
+            assert_eq!(dest_layout.pref_pos.size, self.memory.pointer_size());
             assert!(dest_layout.ty.is_unsafe_ptr());
             match *src {
                 Immediate::ScalarPair(data, _) =>
@@ -158,7 +158,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         if src.layout.ty.is_any_ptr() && dest_layout.ty.is_unsafe_ptr()
         {
             // The only possible size-unequal case was handled above.
-            assert_eq!(src.layout.size, dest_layout.size);
+            assert_eq!(src.layout.pref_pos.size, dest_layout.pref_pos.size);
             return Ok(*src);
         }
 
@@ -166,7 +166,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         // (a) cast a raw ptr to usize, or
         // (b) cast from an integer-like (including bool, char, enums).
         // In both cases we want the bits.
-        let bits = self.force_bits(src.to_scalar()?, src.layout.size)?;
+        let bits = self.force_bits(src.to_scalar()?, src.layout.pref_pos.size)?;
         Ok(self.cast_from_int(bits, src.layout, dest_layout)?.into())
     }
 
@@ -188,7 +188,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         match dest_layout.ty.kind {
             Int(_) | Uint(_) | RawPtr(_) => {
                 let v = self.truncate(v, dest_layout);
-                Ok(Scalar::from_uint(v, dest_layout.size))
+                Ok(Scalar::from_uint(v, dest_layout.pref_pos.size))
             }
 
             Float(FloatTy::F32) if signed => Ok(Scalar::from_f32(
diff --git a/src/librustc_mir/interpret/eval_context.rs b/src/librustc_mir/interpret/eval_context.rs
index 8e901068a8d26..d1972fc0730bc 100644
--- a/src/librustc_mir/interpret/eval_context.rs
+++ b/src/librustc_mir/interpret/eval_context.rs
@@ -7,7 +7,7 @@ use rustc::hir::def_id::DefId;
 use rustc::hir::def::DefKind;
 use rustc::mir;
 use rustc::ty::layout::{
-    self, Size, Align, HasDataLayout, LayoutOf, TyLayout
+    self, Size, MemoryPosition, HasDataLayout, LayoutOf, TyLayout
 };
 use rustc::ty::subst::SubstsRef;
 use rustc::ty::{self, Ty, TyCtxt, TypeFoldable};
@@ -262,12 +262,12 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
     #[inline(always)]
     pub fn sign_extend(&self, value: u128, ty: TyLayout<'_>) -> u128 {
         assert!(ty.abi.is_signed());
-        sign_extend(value, ty.size)
+        sign_extend(value, ty.pref_pos.size)
     }
 
     #[inline(always)]
     pub fn truncate(&self, value: u128, ty: TyLayout<'_>) -> u128 {
-        truncate(value, ty.size)
+        truncate(value, ty.pref_pos.size)
     }
 
     #[inline]
@@ -369,13 +369,13 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
     /// Returns the actual dynamic size and alignment of the place at the given type.
     /// Only the "meta" (metadata) part of the place matters.
     /// This can fail to provide an answer for extern types.
-    pub(super) fn size_and_align_of(
+    pub(super) fn mem_pos_of(
         &self,
         metadata: Option<Scalar<M::PointerTag>>,
         layout: TyLayout<'tcx>,
-    ) -> InterpResult<'tcx, Option<(Size, Align)>> {
+    ) -> InterpResult<'tcx, Option<MemoryPosition>> {
         if !layout.is_unsized() {
-            return Ok(Some((layout.size, layout.align.abi)));
+            return Ok(Some(layout.pref_pos.mem_pos()));
         }
         match layout.ty.kind {
             ty::Adt(..) | ty::Tuple(..) => {
@@ -387,7 +387,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 trace!("DST layout: {:?}", layout);
 
                 let sized_size = layout.fields.offset(layout.fields.count() - 1);
-                let sized_align = layout.align.abi;
+                let sized_align = layout.pref_pos.align.abi;
                 trace!(
                     "DST {} statically sized prefix size: {:?} align: {:?}",
                     layout.ty,
@@ -399,7 +399,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 // the last field).  Can't have foreign types here, how would we
                 // adjust alignment and size for them?
                 let field = layout.field(self, layout.fields.count() - 1)?;
-                let (unsized_size, unsized_align) = match self.size_and_align_of(metadata, field)? {
+                let unsized_mem_pos = match self.mem_pos_of(metadata, field)? {
                     Some(size_and_align) => size_and_align,
                     None => {
                         // A field with extern type.  If this field is at offset 0, we behave
@@ -415,6 +415,9 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                     }
                 };
 
+                let unsized_size = unsized_mem_pos.size;
+                let unsized_align = unsized_mem_pos.align;
+
                 // FIXME (#26403, #27023): We should be adding padding
                 // to `sized_size` (to accommodate the `unsized_align`
                 // required of the unsized field that follows) before
@@ -438,12 +441,16 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                     throw_ub_format!("wide pointer metadata contains invalid information: \
                         total size is bigger than largest supported object");
                 }
-                Ok(Some((size, align)))
-            }
+                let mem_pos = MemoryPosition::new(size, align).strided();
+
+                Ok(Some(mem_pos))
+            },
             ty::Dynamic(..) => {
                 let vtable = metadata.expect("dyn trait fat ptr must have vtable");
                 // Read size and align from vtable (already checks size).
-                Ok(Some(self.read_size_and_align_from_vtable(vtable)?))
+                let mem_pos = self.read_mem_pos_from_vtable(vtable)?;
+
+                Ok(Some(mem_pos))
             }
 
             ty::Slice(_) | ty::Str => {
@@ -451,25 +458,25 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 let elem = layout.field(self, 0)?;
 
                 // Make sure the slice is not too big.
-                let size = elem.size.checked_mul(len, &*self.tcx)
+                let pref_pos = elem.pref_pos.checked_mul(len, &*self.tcx)
                     .ok_or_else(|| err_ub_format!("invalid slice: \
                         total size is bigger than largest supported object"))?;
-                Ok(Some((size, elem.align.abi)))
+                Ok(Some(pref_pos.mem_pos()))
             }
 
             ty::Foreign(_) => {
                 Ok(None)
             }
 
-            _ => bug!("size_and_align_of::<{:?}> not supported", layout.ty),
+            _ => bug!("mem_pos_of::<{:?}> not supported", layout.ty),
         }
     }
     #[inline]
-    pub fn size_and_align_of_mplace(
+    pub fn mem_pos_of_mplace(
         &self,
         mplace: MPlaceTy<'tcx, M::PointerTag>
-    ) -> InterpResult<'tcx, Option<(Size, Align)>> {
-        self.size_and_align_of(mplace.meta, mplace.layout)
+    ) -> InterpResult<'tcx, Option<MemoryPosition>> {
+        self.mem_pos_of(mplace.meta, mplace.layout)
     }
 
     pub fn push_stack_frame(
diff --git a/src/librustc_mir/interpret/intrinsics.rs b/src/librustc_mir/interpret/intrinsics.rs
index 39f10d8e6045d..9837d5865d9de 100644
--- a/src/librustc_mir/interpret/intrinsics.rs
+++ b/src/librustc_mir/interpret/intrinsics.rs
@@ -68,9 +68,9 @@ crate fn eval_nullary_intrinsic<'tcx>(
         "pref_align_of" => {
             let layout = tcx.layout_of(param_env.and(tp_ty)).map_err(|e| err_inval!(Layout(e)))?;
             let n = match name {
-                "pref_align_of" => layout.align.pref.bytes(),
-                "min_align_of" => layout.align.abi.bytes(),
-                "size_of" => layout.size.bytes(),
+                "pref_align_of" => layout.pref_pos.align.pref.bytes(),
+                "min_align_of" => layout.pref_pos.align.abi.bytes(),
+                "size_of" => layout.pref_pos.stride().bytes(),
                 _ => bug!(),
             };
             ty::Const::from_usize(tcx, n)
@@ -133,7 +133,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 let ty = substs.type_at(0);
                 let layout_of = self.layout_of(ty)?;
                 let val = self.read_scalar(args[0])?.not_undef()?;
-                let bits = self.force_bits(val, layout_of.size)?;
+                let bits = self.force_bits(val, layout_of.pref_pos.size)?;
                 let kind = match layout_of.abi {
                     ty::layout::Abi::Scalar(ref scalar) => scalar.value,
                     _ => throw_unsup!(TypeNotPrimitive(ty)),
@@ -181,13 +181,14 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                     BinOp::Sub
                 }, l, r)?;
                 let val = if overflowed {
-                    let num_bits = l.layout.size.bits();
+                    let num_bits = l.layout.pref_pos.size.bits();
                     if l.layout.abi.is_signed() {
                         // For signed ints the saturated value depends on the sign of the first
                         // term since the sign of the second term can be inferred from this and
                         // the fact that the operation has overflowed (if either is 0 no
                         // overflow can occur)
-                        let first_term: u128 = self.force_bits(l.to_scalar()?, l.layout.size)?;
+                        let first_term: u128 = self.force_bits(l.to_scalar()?,
+                                                                l.layout.pref_pos.size)?;
                         let first_term_positive = first_term & (1 << (num_bits-1)) == 0;
                         if first_term_positive {
                             // Negative overflow not possible since the positive first term
@@ -225,7 +226,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 let (val, overflowed, _ty) = self.overflowing_binary_op(bin_op, l, r)?;
                 if overflowed {
                     let layout = self.layout_of(substs.type_at(0))?;
-                    let r_val = self.force_bits(r.to_scalar()?, layout.size)?;
+                    let r_val = self.force_bits(r.to_scalar()?, layout.pref_pos.size)?;
                     throw_ub_format!("Overflowing shift by {} in `{}`", r_val, intrinsic_name);
                 }
                 self.write_scalar(val, dest)?;
@@ -235,10 +236,10 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 // rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW))
                 let layout = self.layout_of(substs.type_at(0))?;
                 let val = self.read_scalar(args[0])?.not_undef()?;
-                let val_bits = self.force_bits(val, layout.size)?;
+                let val_bits = self.force_bits(val, layout.pref_pos.size)?;
                 let raw_shift = self.read_scalar(args[1])?.not_undef()?;
-                let raw_shift_bits = self.force_bits(raw_shift, layout.size)?;
-                let width_bits = layout.size.bits() as u128;
+                let raw_shift_bits = self.force_bits(raw_shift, layout.pref_pos.size)?;
+                let width_bits = layout.pref_pos.size.bits() as u128;
                 let shift_bits = raw_shift_bits % width_bits;
                 let inv_shift_bits = (width_bits - shift_bits) % width_bits;
                 let result_bits = if intrinsic_name == "rotate_left" {
@@ -247,7 +248,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                     (val_bits >> shift_bits) | (val_bits << inv_shift_bits)
                 };
                 let truncated_bits = self.truncate(result_bits, layout);
-                let result = Scalar::from_uint(truncated_bits, layout.size);
+                let result = Scalar::from_uint(truncated_bits, layout.pref_pos.size);
                 self.write_scalar(result, dest)?;
             }
 
@@ -266,7 +267,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                     let a = a.to_machine_usize(self)?;
                     let b = b.to_machine_usize(self)?;
                     if a == b && a != 0 {
-                        self.write_scalar(Scalar::from_int(0, isize_layout.size), dest)?;
+                        self.write_scalar(Scalar::from_int(0, isize_layout.pref_pos.size), dest)?;
                         return Ok(true);
                     }
                 }
@@ -288,7 +289,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 )?;
                 let pointee_layout = self.layout_of(substs.type_at(0))?;
                 let val = ImmTy::from_scalar(val, isize_layout);
-                let size = ImmTy::from_int(pointee_layout.size.bytes(), isize_layout);
+                let size = ImmTy::from_int(pointee_layout.pref_pos.size.bytes(), isize_layout);
                 self.exact_div(val, size, dest)?;
             }
 
@@ -409,7 +410,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         // First, check x % y != 0.
         if self.binary_op(BinOp::Rem, a, b)?.to_bits()? != 0 {
             // Then, check if `b` is -1, which is the "min_value / -1" case.
-            let minus1 = Scalar::from_int(-1, dest.layout.size);
+            let minus1 = Scalar::from_int(-1, dest.layout.pref_pos.size);
             let b = b.to_scalar().unwrap();
             if b == minus1 {
                 throw_ub_format!("exact_div: result of dividing MIN by -1 cannot be represented")
diff --git a/src/librustc_mir/interpret/memory.rs b/src/librustc_mir/interpret/memory.rs
index e929b0855834e..495b1265b9251 100644
--- a/src/librustc_mir/interpret/memory.rs
+++ b/src/librustc_mir/interpret/memory.rs
@@ -11,7 +11,7 @@ use std::ptr;
 use std::borrow::Cow;
 
 use rustc::ty::{self, Instance, ParamEnv, query::TyCtxtAt};
-use rustc::ty::layout::{Align, TargetDataLayout, Size, HasDataLayout};
+use rustc::ty::layout::{Align, MemoryPosition, TargetDataLayout, Size, HasDataLayout};
 use rustc_data_structures::fx::{FxHashSet, FxHashMap};
 
 use syntax::ast::Mutability;
@@ -95,7 +95,7 @@ pub struct Memory<'mir, 'tcx, M: Machine<'mir, 'tcx>> {
     /// to ZSTs (where pointers may dangle), we keep track of the size even for allocations
     /// that do not exist any more.
     // FIXME: this should not be public, but interning currently needs access to it
-    pub(super) dead_alloc_map: FxHashMap<AllocId, (Size, Align)>,
+    pub(super) dead_alloc_map: FxHashMap<AllocId, MemoryPosition>,
 
     /// Extra data added by the machine.
     pub extra: M::MemoryExtra,
@@ -165,11 +165,10 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
 
     pub fn allocate(
         &mut self,
-        size: Size,
-        align: Align,
+        mem_pos: MemoryPosition,
         kind: MemoryKind<M::MemoryKinds>,
     ) -> Pointer<M::PointerTag> {
-        let alloc = Allocation::undef(size, align);
+        let alloc = Allocation::undef(mem_pos);
         self.allocate_with(alloc, kind)
     }
 
@@ -196,9 +195,8 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
     pub fn reallocate(
         &mut self,
         ptr: Pointer<M::PointerTag>,
-        old_size_and_align: Option<(Size, Align)>,
-        new_size: Size,
-        new_align: Align,
+        old_mem_pos: Option<MemoryPosition>,
+        new_mem_pos: MemoryPosition,
         kind: MemoryKind<M::MemoryKinds>,
     ) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
         if ptr.offset.bytes() != 0 {
@@ -207,18 +205,18 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
 
         // For simplicities' sake, we implement reallocate as "alloc, copy, dealloc".
         // This happens so rarely, the perf advantage is outweighed by the maintenance cost.
-        let new_ptr = self.allocate(new_size, new_align, kind);
-        let old_size = match old_size_and_align {
-            Some((size, _align)) => size,
-            None => self.get_raw(ptr.alloc_id)?.size,
+        let new_ptr = self.allocate(new_mem_pos, kind);
+        let old_size = match old_mem_pos {
+            Some(old_mem_pos) => old_mem_pos.size,
+            None => self.get_raw(ptr.alloc_id)?.mem_pos.size,
         };
         self.copy(
             ptr,
             new_ptr,
-            old_size.min(new_size),
+            old_size.min(new_mem_pos.size),
             /*nonoverlapping*/ true,
         )?;
-        self.deallocate(ptr, old_size_and_align, kind)?;
+        self.deallocate(ptr, old_mem_pos, kind)?;
 
         Ok(new_ptr)
     }
@@ -237,7 +235,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
     pub fn deallocate(
         &mut self,
         ptr: Pointer<M::PointerTag>,
-        old_size_and_align: Option<(Size, Align)>,
+        old_mem_pos: Option<MemoryPosition>,
         kind: MemoryKind<M::MemoryKinds>,
     ) -> InterpResult<'tcx> {
         trace!("deallocating: {}", ptr.alloc_id);
@@ -270,21 +268,22 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
                 format!("{:?}", kind),
             ))
         }
-        if let Some((size, align)) = old_size_and_align {
-            if size != alloc.size || align != alloc.align {
-                let bytes = alloc.size;
-                throw_unsup!(IncorrectAllocationInformation(size, bytes, align, alloc.align))
+
+        let bytes_mem_pos = alloc.mem_pos;
+
+        if let Some(mem_pos) = old_mem_pos {
+            if mem_pos != bytes_mem_pos {
+                throw_unsup!(IncorrectAllocationInformation(mem_pos, bytes_mem_pos))
             }
         }
 
         // Let the machine take some extra action
-        let size = alloc.size;
-        AllocationExtra::memory_deallocated(&mut alloc, ptr, size)?;
+        AllocationExtra::memory_deallocated(&mut alloc, ptr, bytes_mem_pos.size)?;
 
         // Don't forget to remember size and align of this now-dead allocation
         let old = self.dead_alloc_map.insert(
             ptr.alloc_id,
-            (alloc.size, alloc.align)
+            bytes_mem_pos
         );
         if old.is_some() {
             bug!("Nothing can be deallocated twice");
@@ -310,11 +309,10 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
     pub fn check_ptr_access(
         &self,
         sptr: Scalar<M::PointerTag>,
-        size: Size,
-        align: Align,
+        mem_pos: MemoryPosition,
     ) -> InterpResult<'tcx, Option<Pointer<M::PointerTag>>> {
-        let align = if M::CHECK_ALIGN { Some(align) } else { None };
-        self.check_ptr_access_align(sptr, size, align, CheckInAllocMsg::MemoryAccessTest)
+        let align = if M::CHECK_ALIGN { Some(mem_pos.align) } else { None };
+        self.check_ptr_access_align(sptr, mem_pos.size, align, CheckInAllocMsg::MemoryAccessTest)
     }
 
     /// Like `check_ptr_access`, but *definitely* checks alignment when `align`
@@ -364,8 +362,9 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
                 None
             }
             Err(ptr) => {
-                let (allocation_size, alloc_align) =
+                let alloc_mem_pos =
                     self.get_size_and_align(ptr.alloc_id, AllocCheck::Dereferencable)?;
+                let (allocation_size, alloc_align) = (alloc_mem_pos.size, alloc_mem_pos.align);
                 // Test bounds. This also ensures non-NULL.
                 // It is sufficient to check this for the end pointer. The addition
                 // checks for overflow.
@@ -400,9 +399,9 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
         &self,
         ptr: Pointer<M::PointerTag>,
     ) -> bool {
-        let (size, _align) = self.get_size_and_align(ptr.alloc_id, AllocCheck::MaybeDead)
+        let mem_pos = self.get_size_and_align(ptr.alloc_id, AllocCheck::MaybeDead)
             .expect("alloc info with MaybeDead cannot fail");
-        ptr.check_inbounds_alloc(size, CheckInAllocMsg::NullPointerTest).is_err()
+        ptr.check_inbounds_alloc(mem_pos.size, CheckInAllocMsg::NullPointerTest).is_err()
     }
 }
 
@@ -557,13 +556,13 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
         &self,
         id: AllocId,
         liveness: AllocCheck,
-    ) -> InterpResult<'static, (Size, Align)> {
+    ) -> InterpResult<'static, MemoryPosition> {
         // # Regular allocations
         // Don't use `self.get_raw` here as that will
         // a) cause cycles in case `id` refers to a static
         // b) duplicate a static's allocation in miri
         if let Some((_, alloc)) = self.alloc_map.get(id) {
-            return Ok((alloc.size, alloc.align));
+            return Ok(alloc.mem_pos);
         }
 
         // # Function pointers
@@ -573,7 +572,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
                 // The caller requested no function pointers.
                 throw_unsup!(DerefFunctionPointer)
             } else {
-                Ok((Size::ZERO, Align::from_bytes(1).unwrap()))
+                Ok(MemoryPosition::new(Size::ZERO, Align::from_bytes(1).unwrap()))
             };
         }
 
@@ -586,12 +585,12 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
                 // Use size and align of the type.
                 let ty = self.tcx.type_of(did);
                 let layout = self.tcx.layout_of(ParamEnv::empty().and(ty)).unwrap();
-                Ok((layout.size, layout.align.abi))
+                Ok(layout.pref_pos.mem_pos())
             },
             Some(GlobalAlloc::Memory(alloc)) =>
                 // Need to duplicate the logic here, because the global allocations have
                 // different associated types than the interpreter-local ones.
-                Ok((alloc.size, alloc.align)),
+                Ok(alloc.mem_pos),
             Some(GlobalAlloc::Function(_)) =>
                 bug!("We already checked function pointers above"),
             // The rest must be dead.
@@ -653,7 +652,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
         let prefix_len = msg.len();
         let mut relocations = vec![];
 
-        for i in 0..alloc.size.bytes() {
+        for i in 0..alloc.mem_pos.size.bytes() {
             let i = Size::from_bytes(i);
             if let Some(&(_, target_id)) = alloc.relocations().get(&i) {
                 if allocs_seen.insert(target_id) {
@@ -677,8 +676,8 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
         trace!(
             "{}({} bytes, alignment {}){}",
             msg,
-            alloc.size.bytes(),
-            alloc.align.bytes(),
+            alloc.mem_pos.size.bytes(),
+            alloc.mem_pos.align.bytes(),
             extra
         );
 
@@ -693,7 +692,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
                 let target = format!("({})", target_id);
                 // this `as usize` is fine, since we can't print more chars than `usize::MAX`
                 write!(msg, "└{0:─^1$}┘ ", target, relocation_width as usize).unwrap();
-                pos = i + self.pointer_size();
+                pos = i + self.pointer_pos().stride();
             }
             trace!("{}", msg);
         }
@@ -776,7 +775,8 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
         ptr: Scalar<M::PointerTag>,
         size: Size,
     ) -> InterpResult<'tcx, &[u8]> {
-        let ptr = match self.check_ptr_access(ptr, size, Align::from_bytes(1).unwrap())? {
+        let mem_pos = MemoryPosition::new(size, Align::from_bytes(1).unwrap());
+        let ptr = match self.check_ptr_access(ptr, mem_pos)? {
             Some(ptr) => ptr,
             None => return Ok(&[]), // zero-sized access
         };
@@ -803,7 +803,8 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
         let src = src.into_iter();
         let size = Size::from_bytes(src.size_hint().0 as u64);
         // `write_bytes` checks that this lower bound matches the upper bound matches reality.
-        let ptr = match self.check_ptr_access(ptr, size, Align::from_bytes(1).unwrap())? {
+        let mem_pos = MemoryPosition::new(size, Align::from_bytes(1).unwrap());
+        let ptr = match self.check_ptr_access(ptr, mem_pos)? {
             Some(ptr) => ptr,
             None => return Ok(()), // zero-sized access
         };
diff --git a/src/librustc_mir/interpret/operand.rs b/src/librustc_mir/interpret/operand.rs
index 79762b87b0a85..b66a5e7a99c03 100644
--- a/src/librustc_mir/interpret/operand.rs
+++ b/src/librustc_mir/interpret/operand.rs
@@ -54,7 +54,7 @@ impl<'tcx, Tag> Immediate<Tag> {
     ) -> Self {
         Immediate::ScalarPair(
             val.into(),
-            Scalar::from_uint(len, cx.data_layout().pointer_size).into(),
+            Scalar::from_uint(len, cx.data_layout().pointer_pos.size).into(),
         )
     }
 
@@ -175,17 +175,17 @@ impl<'tcx, Tag: Copy> ImmTy<'tcx, Tag> {
 
     #[inline]
     pub fn from_uint(i: impl Into<u128>, layout: TyLayout<'tcx>) -> Self {
-        Self::from_scalar(Scalar::from_uint(i, layout.size), layout)
+        Self::from_scalar(Scalar::from_uint(i, layout.pref_pos.size), layout)
     }
 
     #[inline]
     pub fn from_int(i: impl Into<i128>, layout: TyLayout<'tcx>) -> Self {
-        Self::from_scalar(Scalar::from_int(i, layout.size), layout)
+        Self::from_scalar(Scalar::from_int(i, layout.pref_pos.size), layout)
     }
 
     #[inline]
     pub fn to_bits(self) -> InterpResult<'tcx, u128> {
-        self.to_scalar()?.to_bits(self.layout.size)
+        self.to_scalar()?.to_bits(self.layout.pref_pos.size)
     }
 }
 
@@ -249,7 +249,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             layout::Abi::Scalar(..) => {
                 let scalar = self.memory
                     .get_raw(ptr.alloc_id)?
-                    .read_scalar(self, ptr, mplace.layout.size)?;
+                    .read_scalar(self, ptr, mplace.layout.pref_pos.size)?;
                 Ok(Some(ImmTy {
                     imm: scalar.into(),
                     layout: mplace.layout,
@@ -371,7 +371,8 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         let offset = op.layout.fields.offset(field);
         let immediate = match *base {
             // the field covers the entire type
-            _ if offset.bytes() == 0 && field_layout.size == op.layout.size => *base,
+            _ if offset.bytes() == 0 &&
+                field_layout.pref_pos.size == op.layout.pref_pos.size => *base,
             // extract fields from types with `ScalarPair` ABI
             Immediate::ScalarPair(a, b) => {
                 let val = if offset.bytes() == 0 { a } else { b };
@@ -567,7 +568,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 // We rely on mutability being set correctly in that allocation to prevent writes
                 // where none should happen.
                 let ptr = self.tag_static_base_pointer(Pointer::new(id, offset));
-                Operand::Indirect(MemPlace::from_ptr(ptr, layout.align.abi))
+                Operand::Indirect(MemPlace::from_ptr(ptr, layout.pref_pos.align.abi))
             },
             ConstValue::Scalar(x) => Operand::Immediate(tag_scalar(x).into()),
             ConstValue::Slice { data, start, end } => {
@@ -626,12 +627,13 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             layout::DiscriminantKind::Tag => {
                 let bits_discr = raw_discr
                     .not_undef()
-                    .and_then(|raw_discr| self.force_bits(raw_discr, discr_val.layout.size))
+                    .and_then(|raw_discr| self.force_bits(raw_discr,
+                        discr_val.layout.pref_pos.size))
                     .map_err(|_| err_ub!(InvalidDiscriminant(raw_discr.erase_tag())))?;
                 let real_discr = if discr_val.layout.ty.is_signed() {
                     // going from layout tag type to typeck discriminant type
                     // requires first sign extending with the discriminant layout
-                    let sexted = sign_extend(bits_discr, discr_val.layout.size) as i128;
+                    let sexted = sign_extend(bits_discr, discr_val.layout.pref_pos.size) as i128;
                     // and then zeroing with the typeck discriminant type
                     let discr_ty = rval.layout.ty
                         .ty_adt_def().expect("tagged layout corresponds to adt")
@@ -671,7 +673,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 let raw_discr = raw_discr.not_undef().map_err(|_| {
                     err_ub!(InvalidDiscriminant(ScalarMaybeUndef::Undef))
                 })?;
-                match raw_discr.to_bits_or_ptr(discr_val.layout.size, self) {
+                match raw_discr.to_bits_or_ptr(discr_val.layout.pref_pos.size, self) {
                     Err(ptr) => {
                         // The niche must be just 0 (which an inbounds pointer value never is)
                         let ptr_valid = niche_start == 0 && variants_start == variants_end &&
@@ -694,7 +696,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                         )?;
                         let variant_index_relative = variant_index_relative_val
                             .to_scalar()?
-                            .assert_bits(discr_val.layout.size);
+                            .assert_bits(discr_val.layout.pref_pos.size);
                         // Check if this is in the range that indicates an actual discriminant.
                         if variant_index_relative <= u128::from(variants_end - variants_start) {
                             let variant_index_relative = u32::try_from(variant_index_relative)
diff --git a/src/librustc_mir/interpret/operator.rs b/src/librustc_mir/interpret/operator.rs
index 176b084f22587..ad6d78e578027 100644
--- a/src/librustc_mir/interpret/operator.rs
+++ b/src/librustc_mir/interpret/operator.rs
@@ -128,7 +128,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             let signed = left_layout.abi.is_signed();
             let mut oflo = (r as u32 as u128) != r;
             let mut r = r as u32;
-            let size = left_layout.size;
+            let size = left_layout.pref_pos.size;
             oflo |= r >= size.bits() as u32;
             if oflo {
                 r %= size.bits() as u32;
@@ -189,7 +189,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             if let Some(op) = op {
                 let l128 = self.sign_extend(l, left_layout) as i128;
                 let r = self.sign_extend(r, right_layout) as i128;
-                let size = left_layout.size;
+                let size = left_layout.pref_pos.size;
                 match bin_op {
                     Rem | Div => {
                         // int_min / -1
@@ -213,7 +213,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             }
         }
 
-        let size = left_layout.size;
+        let size = left_layout.pref_pos.size;
 
         let (val, ty) = match bin_op {
             Eq => (Scalar::from_bool(l == r), self.tcx.types.bool),
@@ -307,8 +307,8 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                     left.layout.ty, bin_op, right.layout.ty
                 );
 
-                let l = self.force_bits(left.to_scalar()?, left.layout.size)?;
-                let r = self.force_bits(right.to_scalar()?, right.layout.size)?;
+                let l = self.force_bits(left.to_scalar()?, left.layout.pref_pos.size)?;
+                let r = self.force_bits(right.to_scalar()?, right.layout.pref_pos.size)?;
                 self.binary_int_op(bin_op, l, left.layout, r, right.layout)
             }
             _ if left.layout.ty.is_any_ptr() => {
@@ -367,7 +367,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             }
             _ => {
                 assert!(layout.ty.is_integral());
-                let val = self.force_bits(val, layout.size)?;
+                let val = self.force_bits(val, layout.pref_pos.size)?;
                 let res = match un_op {
                     Not => !val,
                     Neg => {
diff --git a/src/librustc_mir/interpret/place.rs b/src/librustc_mir/interpret/place.rs
index 0bd47edc04660..08930ecff962e 100644
--- a/src/librustc_mir/interpret/place.rs
+++ b/src/librustc_mir/interpret/place.rs
@@ -9,7 +9,7 @@ use rustc::mir;
 use rustc::mir::interpret::truncate;
 use rustc::ty::{self, Ty};
 use rustc::ty::layout::{
-    self, Size, Align, LayoutOf, TyLayout, HasDataLayout, VariantIdx, PrimitiveExt
+    self, Size, Align, MemoryPosition, LayoutOf, TyLayout, HasDataLayout, VariantIdx, PrimitiveExt
 };
 use rustc::ty::TypeFoldable;
 
@@ -154,8 +154,8 @@ impl<'tcx, Tag> MPlaceTy<'tcx, Tag> {
     pub fn dangling(layout: TyLayout<'tcx>, cx: &impl HasDataLayout) -> Self {
         MPlaceTy {
             mplace: MemPlace::from_scalar_ptr(
-                Scalar::from_uint(layout.align.abi.bytes(), cx.pointer_size()),
-                layout.align.abi
+                Scalar::from_uint(layout.pref_pos.align.abi.bytes(), cx.pointer_size()),
+                layout.pref_pos.align.abi
             ),
             layout
         }
@@ -186,7 +186,7 @@ impl<'tcx, Tag> MPlaceTy<'tcx, Tag> {
 
     #[inline]
     fn from_aligned_ptr(ptr: Pointer<Tag>, layout: TyLayout<'tcx>) -> Self {
-        MPlaceTy { mplace: MemPlace::from_ptr(ptr, layout.align.abi), layout }
+        MPlaceTy { mplace: MemPlace::from_ptr(ptr, layout.pref_pos.align.abi), layout }
     }
 
     #[inline]
@@ -302,7 +302,7 @@ where
             // the point of tracking the alignment here is to make sure that the *static*
             // alignment information emitted with the loads is correct. The run-time
             // alignment can only be more restrictive.
-            align: layout.align.abi,
+            align: layout.pref_pos.align.abi,
             meta,
         };
         Ok(MPlaceTy { mplace, layout })
@@ -332,12 +332,14 @@ where
         place: MPlaceTy<'tcx, M::PointerTag>,
         size: Option<Size>,
     ) -> InterpResult<'tcx, Option<Pointer<M::PointerTag>>> {
-        let size = size.unwrap_or_else(|| {
+        let mem_pos = if let Some(size) = size {
+            MemoryPosition::new(size, place.align)
+        } else {
             assert!(!place.layout.is_unsized());
             assert!(place.meta.is_none());
-            place.layout.size
-        });
-        self.memory.check_ptr_access(place.ptr, size, place.align)
+            place.layout.pref_pos.mem_pos()
+        };
+        self.memory.check_ptr_access(place.ptr, mem_pos)
     }
 
     /// Return the "access-checked" version of this `MPlace`, where for non-ZST
@@ -346,8 +348,9 @@ where
         &self,
         mut place: MPlaceTy<'tcx, M::PointerTag>,
     ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
-        let (size, align) = self.size_and_align_of_mplace(place)?
-            .unwrap_or((place.layout.size, place.layout.align.abi));
+        let mem_pos = self.mem_pos_of_mplace(place)?
+            .unwrap_or(place.layout.pref_pos.mem_pos());
+        let (size, align) = (mem_pos.size, mem_pos.align);
         assert!(place.mplace.align <= align, "dynamic alignment less strict than static one?");
         place.mplace.align = align; // maximally strict checking
         // When dereferencing a pointer, it must be non-NULL, aligned, and live.
@@ -407,14 +410,14 @@ where
             // Re-use parent metadata to determine dynamic field layout.
             // With custom DSTS, this *will* execute user-defined code, but the same
             // happens at run-time so that's okay.
-            let align = match self.size_and_align_of(base.meta, field_layout)? {
-                Some((_, align)) => align,
+            let align = match self.mem_pos_of(base.meta, field_layout)? {
+                Some(mem_pos) => mem_pos.align,
                 None if offset == Size::ZERO =>
                     // An extern type at offset 0, we fall back to its static alignment.
                     // FIXME: Once we have made decisions for how to handle size and alignment
                     // of `extern type`, this should be adapted.  It is just a temporary hack
                     // to get some code to work that probably ought to work.
-                    field_layout.align.abi,
+                    field_layout.pref_pos.align.abi,
                 None =>
                     bug!("Cannot compute offset for extern type field at non-0 offset"),
             };
@@ -425,7 +428,7 @@ where
             (None, offset)
         };
 
-        // We do not look at `base.layout.align` nor `field_layout.align`, unlike
+        // We do not look at `base.layout.pref_pos.align` nor `field_layout.pref_pos.align`, unlike
         // codegen -- mostly to see if we can get away with that
         base.offset(offset, meta, field_layout, self)
     }
@@ -508,7 +511,7 @@ where
                 let layout = self.layout_of(self.tcx.types.usize)?;
                 let n = self.access_local(self.frame(), local, Some(layout))?;
                 let n = self.read_scalar(n)?;
-                let n = self.force_bits(n.not_undef()?, self.tcx.data_layout.pointer_size)?;
+                let n = self.force_bits(n.not_undef()?, self.tcx.data_layout.pointer_pos.size)?;
                 self.mplace_field(base, u64::try_from(n).unwrap())?
             }
 
@@ -740,10 +743,10 @@ where
             assert!(!dest.layout.is_unsized(), "Cannot write unsized data");
             match src {
                 Immediate::Scalar(ScalarMaybeUndef::Scalar(Scalar::Ptr(_))) =>
-                    assert_eq!(self.pointer_size(), dest.layout.size,
+                    assert_eq!(self.pointer_size(), dest.layout.pref_pos.size,
                         "Size mismatch when writing pointer"),
                 Immediate::Scalar(ScalarMaybeUndef::Scalar(Scalar::Raw { size, .. })) =>
-                    assert_eq!(Size::from_bytes(size.into()), dest.layout.size,
+                    assert_eq!(Size::from_bytes(size.into()), dest.layout.pref_pos.size,
                         "Size mismatch when writing bits"),
                 Immediate::Scalar(ScalarMaybeUndef::Undef) => {}, // undef can have any size
                 Immediate::ScalarPair(_, _) => {
@@ -798,7 +801,7 @@ where
         };
 
         let tcx = &*self.tcx;
-        // FIXME: We should check that there are dest.layout.size many bytes available in
+        // FIXME: We should check that there are dest.layout.pref_pos.size many bytes available in
         // memory.  The code below is not sufficient, with enough padding it might not
         // cover all the bytes!
         match value {
@@ -809,7 +812,7 @@ where
                             dest.layout)
                 }
                 self.memory.get_raw_mut(ptr.alloc_id)?.write_scalar(
-                    tcx, ptr, scalar, dest.layout.size
+                    tcx, ptr, scalar, dest.layout.pref_pos.size
                 )
             }
             Immediate::ScalarPair(a_val, b_val) => {
@@ -891,7 +894,7 @@ where
         let size = size.unwrap_or_else(|| {
             assert!(!dest.layout.is_unsized(),
                 "Cannot copy into already initialized unsized place");
-            dest.layout.size
+            dest.layout.pref_pos.size
         });
         assert_eq!(src.meta, dest.meta, "Can only copy between equally-sized instances");
 
@@ -925,7 +928,7 @@ where
             return self.copy_op(src, dest);
         }
         // We still require the sizes to match.
-        assert!(src.layout.size == dest.layout.size,
+        assert!(src.layout.pref_pos.size == dest.layout.pref_pos.size,
             "Size mismatch when transmuting!\nsrc: {:#?}\ndest: {:#?}", src, dest);
         // Unsized copies rely on interpreting `src.meta` with `dest.layout`, we want
         // to avoid that here.
@@ -989,9 +992,10 @@ where
                         // that has different alignment than the outer field.
                         // We also need to support unsized types, and hence cannot use `allocate`.
                         let local_layout = self.layout_of_local(&self.stack[frame], local, None)?;
-                        let (size, align) = self.size_and_align_of(meta, local_layout)?
+                        let mem_pos = self.mem_pos_of(meta, local_layout)?
                             .expect("Cannot allocate for non-dyn-sized type");
-                        let ptr = self.memory.allocate(size, align, MemoryKind::Stack);
+                        let (size, align) = (mem_pos.size, mem_pos.align);
+                        let ptr = self.memory.allocate(mem_pos, MemoryKind::Stack);
                         let mplace = MemPlace { ptr: ptr.into(), align, meta };
                         if let Some(value) = old_val {
                             // Preserve old value.
@@ -1028,7 +1032,7 @@ where
         layout: TyLayout<'tcx>,
         kind: MemoryKind<M::MemoryKinds>,
     ) -> MPlaceTy<'tcx, M::PointerTag> {
-        let ptr = self.memory.allocate(layout.size, layout.align.abi, kind);
+        let ptr = self.memory.allocate(layout.pref_pos.mem_pos(), kind);
         MPlaceTy::from_aligned_ptr(ptr, layout)
     }
 
@@ -1126,10 +1130,9 @@ where
 
         // More sanity checks
         if cfg!(debug_assertions) {
-            let (size, align) = self.read_size_and_align_from_vtable(vtable)?;
-            assert_eq!(size, layout.size);
+            let mem_pos = self.read_mem_pos_from_vtable(vtable)?;
             // only ABI alignment is preserved
-            assert_eq!(align, layout.align.abi);
+            assert_eq!(mem_pos, layout.pref_pos.mem_pos());
         }
 
         let mplace = MPlaceTy {
diff --git a/src/librustc_mir/interpret/snapshot.rs b/src/librustc_mir/interpret/snapshot.rs
index 1df98f079cc10..dfb0e1b81e7da 100644
--- a/src/librustc_mir/interpret/snapshot.rs
+++ b/src/librustc_mir/interpret/snapshot.rs
@@ -15,7 +15,7 @@ use rustc::mir::interpret::{
 };
 
 use rustc::ty::{self, TyCtxt};
-use rustc::ty::layout::{Align, Size};
+use rustc::ty::layout::MemoryPosition;
 use rustc_data_structures::fx::FxHashSet;
 use rustc_index::vec::IndexVec;
 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
@@ -275,8 +275,7 @@ struct AllocationSnapshot<'a> {
     bytes: &'a [u8],
     relocations: Relocations<(), AllocIdSnapshot<'a>>,
     undef_mask: &'a UndefMask,
-    align: &'a Align,
-    size: &'a Size,
+    mem_pos: &'a MemoryPosition,
     mutability: &'a Mutability,
 }
 
@@ -287,8 +286,7 @@ impl<'a, Ctx> Snapshot<'a, Ctx> for &'a Allocation
 
     fn snapshot(&self, ctx: &'a Ctx) -> Self::Item {
         let Allocation {
-            size,
-            align,
+            mem_pos,
             mutability,
             extra: (),
             ..
@@ -306,8 +304,7 @@ impl<'a, Ctx> Snapshot<'a, Ctx> for &'a Allocation
         AllocationSnapshot {
             bytes,
             undef_mask,
-            align,
-            size,
+            mem_pos,
             mutability,
             relocations: relocations.snapshot(ctx),
         }
diff --git a/src/librustc_mir/interpret/step.rs b/src/librustc_mir/interpret/step.rs
index daca7a25787ca..f9f828d3899d8 100644
--- a/src/librustc_mir/interpret/step.rs
+++ b/src/librustc_mir/interpret/step.rs
@@ -216,7 +216,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                     self.copy_op(op, first.into())?;
 
                     if length > 1 {
-                        let elem_size = first.layout.size;
+                        let elem_size = first.layout.pref_pos.size;
                         // Copy the rest. This is performance-sensitive code
                         // for big static/const arrays!
                         let rest_ptr = first_ptr.offset(elem_size, self)?;
@@ -242,7 +242,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             Ref(_, _, ref place) => {
                 let src = self.eval_place(place)?;
                 let place = self.force_allocation(src)?;
-                if place.layout.size.bytes() > 0 {
+                if place.layout.pref_pos.size.bytes() > 0 {
                     // definitely not a ZST
                     assert!(place.ptr.is_ptr(), "non-ZST places should be normalized to `Pointer`");
                 }
@@ -260,7 +260,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                         "SizeOf nullary MIR operator called for unsized type");
                 let size = self.pointer_size();
                 self.write_scalar(
-                    Scalar::from_uint(layout.size.bytes(), size),
+                    Scalar::from_uint(layout.pref_pos.size.bytes(), size),
                     dest,
                 )?;
             }
@@ -273,7 +273,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             Discriminant(ref place) => {
                 let op = self.eval_place_to_op(place, None)?;
                 let discr_val = self.read_discriminant(op)?.0;
-                let size = dest.layout.size;
+                let size = dest.layout.pref_pos.size;
                 self.write_scalar(Scalar::from_uint(discr_val, size), dest)?;
             }
         }
diff --git a/src/librustc_mir/interpret/terminator.rs b/src/librustc_mir/interpret/terminator.rs
index e10bb85d52df8..0d27a7cab59ab 100644
--- a/src/librustc_mir/interpret/terminator.rs
+++ b/src/librustc_mir/interpret/terminator.rs
@@ -422,7 +422,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             // cannot use the shim here, because that will only result in infinite recursion
             ty::InstanceDef::Virtual(_, idx) => {
                 let mut args = args.to_vec();
-                let ptr_size = self.pointer_size();
+                let ptr_pos = self.pointer_pos().mem_pos();
                 // We have to implement all "object safe receivers".  Currently we
                 // support built-in pointers (&, &mut, Box) as well as unsized-self.  We do
                 // not yet support custom self types.
@@ -439,11 +439,10 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 };
                 // Find and consult vtable
                 let vtable = receiver_place.vtable();
-                let vtable_slot = vtable.ptr_offset(ptr_size * (idx as u64 + 3), self)?;
+                let vtable_slot = vtable.ptr_offset((ptr_pos * (idx as u64 + 3)).size, self)?;
                 let vtable_slot = self.memory.check_ptr_access(
                     vtable_slot,
-                    ptr_size,
-                    self.tcx.data_layout.pointer_align.abi,
+                    ptr_pos,
                 )?.expect("cannot be a ZST");
                 let fn_ptr = self.memory.get_raw(vtable_slot.alloc_id)?
                     .read_ptr_sized(self, vtable_slot)?.not_undef()?;
diff --git a/src/librustc_mir/interpret/traits.rs b/src/librustc_mir/interpret/traits.rs
index c15425321ec01..3b279c31aab17 100644
--- a/src/librustc_mir/interpret/traits.rs
+++ b/src/librustc_mir/interpret/traits.rs
@@ -1,5 +1,5 @@
 use rustc::ty::{self, Ty, Instance, TypeFoldable};
-use rustc::ty::layout::{Size, Align, LayoutOf, HasDataLayout};
+use rustc::ty::layout::{Size, MemoryPosition, Align, LayoutOf, HasDataLayout};
 use rustc::mir::interpret::{Scalar, Pointer, InterpResult, PointerArithmetic,};
 
 use super::{InterpCx, Machine, MemoryKind, FnVal};
@@ -44,18 +44,17 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
 
         let layout = self.layout_of(ty)?;
         assert!(!layout.is_unsized(), "can't create a vtable for an unsized type");
-        let size = layout.size.bytes();
-        let align = layout.align.abi.bytes();
+        let size = layout.pref_pos.size.bytes();
+        let align = layout.pref_pos.align.abi.bytes();
+
+        let ptr_mem_pos = self.tcx.data_layout.pointer_pos.mem_pos();
 
-        let ptr_size = self.pointer_size();
-        let ptr_align = self.tcx.data_layout.pointer_align.abi;
         // /////////////////////////////////////////////////////////////////////////////////////////
         // If you touch this code, be sure to also make the corresponding changes to
         // `get_vtable` in rust_codegen_llvm/meth.rs
         // /////////////////////////////////////////////////////////////////////////////////////////
         let vtable = self.memory.allocate(
-            ptr_size * (3 + methods.len() as u64),
-            ptr_align,
+            ptr_mem_pos * (3 + methods.len() as u64),
             MemoryKind::Vtable,
         );
         let tcx = &*self.tcx;
@@ -69,10 +68,12 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         let vtable_alloc = self.memory.get_raw_mut(vtable.alloc_id)?;
         vtable_alloc.write_ptr_sized(tcx, vtable, Scalar::Ptr(drop).into())?;
 
-        let size_ptr = vtable.offset(ptr_size, tcx)?;
-        vtable_alloc.write_ptr_sized(tcx, size_ptr, Scalar::from_uint(size, ptr_size).into())?;
-        let align_ptr = vtable.offset(ptr_size * 2, tcx)?;
-        vtable_alloc.write_ptr_sized(tcx, align_ptr, Scalar::from_uint(align, ptr_size).into())?;
+        let size_ptr = vtable.offset(ptr_mem_pos.size, tcx)?;
+        vtable_alloc.write_ptr_sized(tcx, size_ptr,
+            Scalar::from_uint(size, ptr_mem_pos.size).into())?;
+        let align_ptr = vtable.offset((ptr_mem_pos * 2).size, tcx)?;
+        vtable_alloc.write_ptr_sized(tcx, align_ptr,
+            Scalar::from_uint(align, ptr_mem_pos.size).into())?;
 
         for (i, method) in methods.iter().enumerate() {
             if let Some((def_id, substs)) = *method {
@@ -85,7 +86,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 ).ok_or_else(|| err_inval!(TooGeneric))?;
                 let fn_ptr = self.memory.create_fn_alloc(FnVal::Instance(instance));
                 // We cannot use `vtable_allic` as we are creating fn ptrs in this loop.
-                let method_ptr = vtable.offset(ptr_size * (3 + i as u64), tcx)?;
+                let method_ptr = vtable.offset((ptr_mem_pos * (3 + i as u64)).size, tcx)?;
                 self.memory.get_raw_mut(vtable.alloc_id)?
                     .write_ptr_sized(tcx, method_ptr, Scalar::Ptr(fn_ptr).into())?;
             }
@@ -105,8 +106,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         // we don't care about the pointee type, we just want a pointer
         let vtable = self.memory.check_ptr_access(
             vtable,
-            self.tcx.data_layout.pointer_size,
-            self.tcx.data_layout.pointer_align.abi,
+            self.tcx.data_layout.pointer_pos.mem_pos(),
         )?.expect("cannot be a ZST");
         let drop_fn = self.memory
             .get_raw(vtable.alloc_id)?
@@ -123,34 +123,33 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         Ok((drop_instance, ty))
     }
 
-    pub fn read_size_and_align_from_vtable(
+    pub fn read_mem_pos_from_vtable(
         &self,
         vtable: Scalar<M::PointerTag>,
-    ) -> InterpResult<'tcx, (Size, Align)> {
-        let pointer_size = self.pointer_size();
+    ) -> InterpResult<'tcx, MemoryPosition> {
+        let ptr_pos = self.pointer_pos().mem_pos();
         // We check for size = 3*ptr_size, that covers the drop fn (unused here),
         // the size, and the align (which we read below).
         let vtable = self.memory.check_ptr_access(
             vtable,
-            3*pointer_size,
-            self.tcx.data_layout.pointer_align.abi,
+            3 * ptr_pos,
         )?.expect("cannot be a ZST");
         let alloc = self.memory.get_raw(vtable.alloc_id)?;
         let size = alloc.read_ptr_sized(
             self,
-            vtable.offset(pointer_size, self)?
+            vtable.offset(ptr_pos.size, self)?
         )?.not_undef()?;
-        let size = self.force_bits(size, pointer_size)? as u64;
+        let size = self.force_bits(size, ptr_pos.size)? as u64;
         let align = alloc.read_ptr_sized(
             self,
-            vtable.offset(pointer_size * 2, self)?,
+            vtable.offset((ptr_pos * 2).size, self)?,
         )?.not_undef()?;
-        let align = self.force_bits(align, pointer_size)? as u64;
+        let align = self.force_bits(align, ptr_pos.size)? as u64;
 
         if size >= self.tcx.data_layout().obj_size_bound() {
             throw_ub_format!("invalid vtable: \
                 size is bigger than largest supported object");
         }
-        Ok((Size::from_bytes(size), Align::from_bytes(align).unwrap()))
+        Ok(MemoryPosition::new(Size::from_bytes(size), Align::from_bytes(align).unwrap()))
     }
 }
diff --git a/src/librustc_mir/interpret/validity.rs b/src/librustc_mir/interpret/validity.rs
index d698b2e8d8f80..52ea4a8cdc46c 100644
--- a/src/librustc_mir/interpret/validity.rs
+++ b/src/librustc_mir/interpret/validity.rs
@@ -269,15 +269,14 @@ impl<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, 'tcx, M
                 try_validation!(
                     self.ecx.memory.check_ptr_access(
                         vtable,
-                        3*self.ecx.tcx.data_layout.pointer_size, // drop, size, align
-                        self.ecx.tcx.data_layout.pointer_align.abi,
+                        3 * self.ecx.tcx.data_layout.pointer_pos.mem_pos(), // drop, size, align
                     ),
                     "dangling or unaligned vtable pointer in wide pointer or too small vtable",
                     self.path
                 );
                 try_validation!(self.ecx.read_drop_type_from_vtable(vtable),
                     "invalid drop fn in vtable", self.path);
-                try_validation!(self.ecx.read_size_and_align_from_vtable(vtable),
+                try_validation!(self.ecx.read_mem_pos_from_vtable(vtable),
                     "invalid size or align in vtable", self.path);
                 // FIXME: More checks for the vtable.
             }
@@ -376,7 +375,7 @@ impl<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
             ty::Float(_) | ty::Int(_) | ty::Uint(_) => {
                 // NOTE: Keep this in sync with the array optimization for int/float
                 // types below!
-                let size = value.layout.size;
+                let size = value.layout.pref_pos.size;
                 let value = value.to_scalar_or_undef();
                 if self.ref_tracking_for_consts.is_some() {
                     // Integers/floats in CTFE: Must be scalar bits, pointers are dangerous
@@ -405,16 +404,16 @@ impl<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
                     self.check_wide_ptr_meta(place.meta, place.layout)?;
                 }
                 // Make sure this is dereferencable and all.
-                let (size, align) = self.ecx.size_and_align_of(place.meta, place.layout)?
+                let mem_pos = self.ecx.mem_pos_of(place.meta, place.layout)?
                     // for the purpose of validity, consider foreign types to have
                     // alignment and size determined by the layout (size will be 0,
                     // alignment should take attributes into account).
-                    .unwrap_or_else(|| (place.layout.size, place.layout.align.abi));
+                    .unwrap_or_else(|| place.layout.pref_pos.mem_pos());
                 let ptr: Option<_> = match
                     self.ecx.memory.check_ptr_access_align(
                         place.ptr,
-                        size,
-                        Some(align),
+                        mem_pos.size,
+                        Some(mem_pos.align),
                         CheckInAllocMsg::InboundsTest,
                     )
                 {
@@ -422,7 +421,7 @@ impl<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
                     Err(err) => {
                         info!(
                             "{:?} did not pass access check for size {:?}, align {:?}",
-                            place.ptr, size, align
+                            place.ptr, mem_pos.size, mem_pos.align
                         );
                         match err.kind {
                             err_unsup!(InvalidNullPointerUsage) =>
@@ -463,7 +462,7 @@ impl<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
                     // `!` is a ZST and we want to validate it.
                     // Normalize before handing `place` to tracking because that will
                     // check for duplicates.
-                    let place = if size.bytes() > 0 {
+                    let place = if mem_pos.size.bytes() > 0 {
                         self.ecx.force_mplace_ptr(place)
                             .expect("we already bounds-checked")
                     } else {
@@ -508,7 +507,7 @@ impl<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
         // Determine the allowed range
         let (lo, hi) = layout.valid_range.clone().into_inner();
         // `max_hi` is as big as the size fits
-        let max_hi = u128::max_value() >> (128 - op.layout.size.bits());
+        let max_hi = u128::max_value() >> (128 - op.layout.pref_pos.size.bits());
         assert!(hi <= max_hi);
         // We could also write `(hi + 1) % (max_hi + 1) == lo` but `max_hi + 1` overflows for `u128`
         if (lo == 0 && hi == max_hi) || (hi + 1 == lo) {
@@ -524,7 +523,7 @@ impl<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
                 wrapping_range_format(&layout.valid_range, max_hi),
             )
         );
-        let bits = match value.to_bits_or_ptr(op.layout.size, self.ecx) {
+        let bits = match value.to_bits_or_ptr(op.layout.pref_pos.size, self.ecx) {
             Err(ptr) => {
                 if lo == 1 && hi == max_hi {
                     // Only NULL is the niche.  So make sure the ptr is NOT NULL.
@@ -601,9 +600,9 @@ impl<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
                     return Ok(());
                 }
                 // This is the element type size.
-                let ty_size = self.ecx.layout_of(tys)?.size;
+                let ty_pref_pos = self.ecx.layout_of(tys)?.pref_pos;
                 // This is the size in bytes of the whole array.
-                let size = ty_size * len;
+                let pref_pos = ty_pref_pos * len;
                 // Size is not 0, get a pointer.
                 let ptr = self.ecx.force_ptr(mplace.ptr)?;
 
@@ -620,7 +619,7 @@ impl<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
                 match self.ecx.memory.get_raw(ptr.alloc_id)?.check_bytes(
                     self.ecx,
                     ptr,
-                    size,
+                    pref_pos.size,
                     /*allow_ptr_and_undef*/ self.ref_tracking_for_consts.is_none(),
                 ) {
                     // In the happy case, we needn't check anything else.
@@ -633,7 +632,7 @@ impl<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
                                 // Some byte was undefined, determine which
                                 // element that byte belongs to so we can
                                 // provide an index.
-                                let i = (offset.bytes() / ty_size.bytes()) as usize;
+                                let i = (offset.bytes() / ty_pref_pos.size.bytes()) as usize;
                                 self.path.push(PathElem::ArrayElem(i));
 
                                 throw_validation_failure!("undefined bytes", self.path)
diff --git a/src/librustc_mir/transform/const_prop.rs b/src/librustc_mir/transform/const_prop.rs
index a0d04bd593212..ed9ab9459aaca 100644
--- a/src/librustc_mir/transform/const_prop.rs
+++ b/src/librustc_mir/transform/const_prop.rs
@@ -480,7 +480,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
                         let prim = this.ecx.read_immediate(arg)?;
                         // Need to do overflow check here: For actual CTFE, MIR
                         // generation emits code that does this before calling the op.
-                        if prim.to_bits()? == (1 << (prim.layout.size.bits() - 1)) {
+                        if prim.to_bits()? == (1 << (prim.layout.pref_pos.size.bits() - 1)) {
                             throw_panic!(OverflowNeg)
                         }
                     }
@@ -498,8 +498,8 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
                     this.ecx.read_immediate(this.ecx.eval_operand(right, None)?)
                 })?;
                 if *op == BinOp::Shr || *op == BinOp::Shl {
-                    let left_bits = place_layout.size.bits();
-                    let right_size = r.layout.size;
+                    let left_bits = place_layout.pref_pos.size.bits();
+                    let right_size = r.layout.pref_pos.size;
                     let r_bits = r.to_scalar().and_then(|r| r.to_bits(right_size));
                     if r_bits.ok().map_or(false, |b| b >= left_bits as u128) {
                         let source_scope_local_data = match self.source_scope_local_data {
diff --git a/src/librustc_mir/transform/inline.rs b/src/librustc_mir/transform/inline.rs
index 5a34e3f471f66..2ec45639b888e 100644
--- a/src/librustc_mir/transform/inline.rs
+++ b/src/librustc_mir/transform/inline.rs
@@ -354,7 +354,7 @@ impl Inliner<'tcx> {
         // Count up the cost of local variables and temps, if we know the size
         // use that, otherwise we use a moderately-large dummy cost.
 
-        let ptr_size = tcx.data_layout.pointer_size.bytes();
+        let ptr_size = tcx.data_layout.pointer_pos.size.bytes();
 
         for v in callee_body.vars_and_temps_iter() {
             let v = &callee_body.local_decls[v];
@@ -621,7 +621,7 @@ fn type_size_of<'tcx>(
     param_env: ty::ParamEnv<'tcx>,
     ty: Ty<'tcx>,
 ) -> Option<u64> {
-    tcx.layout_of(param_env.and(ty)).ok().map(|layout| layout.size.bytes())
+    tcx.layout_of(param_env.and(ty)).ok().map(|layout| layout.pref_pos.size.bytes())
 }
 
 /**
diff --git a/src/librustc_mir/util/alignment.rs b/src/librustc_mir/util/alignment.rs
index f949fcf0745f0..b4f854794683c 100644
--- a/src/librustc_mir/util/alignment.rs
+++ b/src/librustc_mir/util/alignment.rs
@@ -21,7 +21,7 @@ where
 
     let ty = place.ty(local_decls, tcx).ty;
     match tcx.layout_raw(param_env.and(ty)) {
-        Ok(layout) if layout.align.abi.bytes() == 1 => {
+        Ok(layout) if layout.pref_pos.align.abi.bytes() == 1 => {
             // if the alignment is 1, the type can't be further
             // disaligned.
             debug!("is_disaligned({:?}) - align = 1", place);
diff --git a/src/librustc_passes/layout_test.rs b/src/librustc_passes/layout_test.rs
index 06683c16e4a9b..0dd52814f8d39 100644
--- a/src/librustc_passes/layout_test.rs
+++ b/src/librustc_passes/layout_test.rs
@@ -65,13 +65,15 @@ impl VarianceTest<'tcx> {
                         sym::align => {
                             self.tcx
                                 .sess
-                                .span_err(item.span, &format!("align: {:?}", ty_layout.align));
+                                .span_err(item.span, &format!("align: {:?}",
+                                    ty_layout.pref_pos.align));
                         }
 
                         sym::size => {
                             self.tcx
                                 .sess
-                                .span_err(item.span, &format!("size: {:?}", ty_layout.size));
+                                .span_err(item.span, &format!("size: {:?}",
+                                    ty_layout.pref_pos.size));
                         }
 
                         sym::homogeneous_aggregate => {
diff --git a/src/librustc_target/abi/call/aarch64.rs b/src/librustc_target/abi/call/aarch64.rs
index 45fe4751a3dae..3847916b71e1c 100644
--- a/src/librustc_target/abi/call/aarch64.rs
+++ b/src/librustc_target/abi/call/aarch64.rs
@@ -7,7 +7,7 @@ fn is_homogeneous_aggregate<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>)
           C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
 {
     arg.layout.homogeneous_aggregate(cx).unit().and_then(|unit| {
-        let size = arg.layout.size;
+        let size = arg.layout.pref_pos.size;
 
         // Ensure we have at most four uniquely addressable members.
         if size > unit.size.checked_mul(4, cx).unwrap() {
@@ -43,7 +43,7 @@ fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>)
         ret.cast_to(uniform);
         return;
     }
-    let size = ret.layout.size;
+    let size = ret.layout.pref_pos.size;
     let bits = size.bits();
     if bits <= 128 {
         let unit = if bits <= 8 {
@@ -77,7 +77,7 @@ fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>)
         arg.cast_to(uniform);
         return;
     }
-    let size = arg.layout.size;
+    let size = arg.layout.pref_pos.size;
     let bits = size.bits();
     if bits <= 128 {
         let unit = if bits <= 8 {
diff --git a/src/librustc_target/abi/call/arm.rs b/src/librustc_target/abi/call/arm.rs
index ff929f33d8bc9..9119b54bb780d 100644
--- a/src/librustc_target/abi/call/arm.rs
+++ b/src/librustc_target/abi/call/arm.rs
@@ -8,7 +8,7 @@ fn is_homogeneous_aggregate<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>)
           C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
 {
     arg.layout.homogeneous_aggregate(cx).unit().and_then(|unit| {
-        let size = arg.layout.size;
+        let size = arg.layout.pref_pos.size;
 
         // Ensure we have at most four uniquely addressable members.
         if size > unit.size.checked_mul(4, cx).unwrap() {
@@ -48,7 +48,7 @@ fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>, vfp: bool)
         }
     }
 
-    let size = ret.layout.size;
+    let size = ret.layout.pref_pos.size;
     let bits = size.bits();
     if bits <= 32 {
         let unit = if bits <= 8 {
@@ -83,8 +83,8 @@ fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, vfp: bool)
         }
     }
 
-    let align = arg.layout.align.abi.bytes();
-    let total = arg.layout.size;
+    let align = arg.layout.pref_pos.align.abi.bytes();
+    let total = arg.layout.pref_pos.size;
     arg.cast_to(Uniform {
         unit: if align <= 4 { Reg::i32() } else { Reg::i64() },
         total
diff --git a/src/librustc_target/abi/call/hexagon.rs b/src/librustc_target/abi/call/hexagon.rs
index d3cae35f08924..534e43d4e9029 100644
--- a/src/librustc_target/abi/call/hexagon.rs
+++ b/src/librustc_target/abi/call/hexagon.rs
@@ -1,7 +1,7 @@
 use crate::abi::call::{FnAbi, ArgAbi};
 
 fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) {
-    if ret.layout.is_aggregate() && ret.layout.size.bits() > 64 {
+    if ret.layout.is_aggregate() && ret.layout.pref_pos.size.bits() > 64 {
         ret.make_indirect();
     } else {
         ret.extend_integer_width_to(32);
@@ -9,7 +9,7 @@ fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) {
 }
 
 fn classify_arg<Ty>(arg: &mut ArgAbi<'_, Ty>) {
-    if arg.layout.is_aggregate() && arg.layout.size.bits() > 64 {
+    if arg.layout.is_aggregate() && arg.layout.pref_pos.size.bits() > 64 {
         arg.make_indirect();
     } else {
         arg.extend_integer_width_to(32);
diff --git a/src/librustc_target/abi/call/mips.rs b/src/librustc_target/abi/call/mips.rs
index b2c8d26ff1f86..82fa1d3d12743 100644
--- a/src/librustc_target/abi/call/mips.rs
+++ b/src/librustc_target/abi/call/mips.rs
@@ -8,7 +8,7 @@ fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'_, Ty>, offset: &mut Size)
         ret.extend_integer_width_to(32);
     } else {
         ret.make_indirect();
-        *offset += cx.data_layout().pointer_size;
+        *offset += cx.data_layout().pointer_pos.size;
     }
 }
 
@@ -16,8 +16,8 @@ fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'_, Ty>, offset: &mut Size)
     where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
 {
     let dl = cx.data_layout();
-    let size = arg.layout.size;
-    let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align).abi;
+    let size = arg.layout.pref_pos.size;
+    let align = arg.layout.pref_pos.align.max(dl.i32_align).min(dl.i64_align).abi;
 
     if arg.layout.is_aggregate() {
         arg.cast_to(Uniform {
diff --git a/src/librustc_target/abi/call/mips64.rs b/src/librustc_target/abi/call/mips64.rs
index 18b121f9c5bef..6f237cfd0689d 100644
--- a/src/librustc_target/abi/call/mips64.rs
+++ b/src/librustc_target/abi/call/mips64.rs
@@ -40,7 +40,7 @@ fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>)
         return;
     }
 
-    let size = ret.layout.size;
+    let size = ret.layout.pref_pos.size;
     let bits = size.bits();
     if bits <= 128 {
         // Unlike other architectures which return aggregates in registers, MIPS n64 limits the
@@ -83,7 +83,7 @@ fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>)
     }
 
     let dl = cx.data_layout();
-    let size = arg.layout.size;
+    let size = arg.layout.pref_pos.size;
     let mut prefix = [None; 8];
     let mut prefix_index = 0;
 
diff --git a/src/librustc_target/abi/call/mod.rs b/src/librustc_target/abi/call/mod.rs
index aced12aa32acb..451b9ff811d3a 100644
--- a/src/librustc_target/abi/call/mod.rs
+++ b/src/librustc_target/abi/call/mod.rs
@@ -291,7 +291,7 @@ impl<'a, Ty> TyLayout<'a, Ty> {
                 };
                 HomogeneousAggregate::Homogeneous(Reg {
                     kind,
-                    size: self.size
+                    size: self.pref_pos.size
                 })
             }
 
@@ -299,7 +299,7 @@ impl<'a, Ty> TyLayout<'a, Ty> {
                 assert!(!self.is_zst());
                 HomogeneousAggregate::Homogeneous(Reg {
                     kind: RegKind::Vector,
-                    size: self.size
+                    size: self.pref_pos.size
                 })
             }
 
@@ -348,7 +348,7 @@ impl<'a, Ty> TyLayout<'a, Ty> {
                     }
 
                     // Keep track of the offset (without padding).
-                    let size = field.size;
+                    let size = field.pref_pos.size;
                     if is_union {
                         total = total.max(size);
                     } else {
@@ -357,7 +357,7 @@ impl<'a, Ty> TyLayout<'a, Ty> {
                 }
 
                 // There needs to be no padding.
-                if total != self.size {
+                if total != self.pref_pos.size {
                     HomogeneousAggregate::Heterogeneous
                 } else {
                     match result {
@@ -409,10 +409,10 @@ impl<'a, Ty> ArgAbi<'a, Ty> {
         attrs.set(ArgAttribute::NoAlias)
              .set(ArgAttribute::NoCapture)
              .set(ArgAttribute::NonNull);
-        attrs.pointee_size = self.layout.size;
+        attrs.pointee_size = self.layout.pref_pos.size;
         // FIXME(eddyb) We should be doing this, but at least on
         // i686-pc-windows-msvc, it results in wrong stack offsets.
-        // attrs.pointee_align = Some(self.layout.align.abi);
+        // attrs.pointee_align = Some(self.layout.pref_pos.align.abi);
 
         let extra_attrs = if self.layout.is_unsized() {
             Some(ArgAttributes::new())
diff --git a/src/librustc_target/abi/call/msp430.rs b/src/librustc_target/abi/call/msp430.rs
index 3004bb9ff5d5b..3411332f9a9b7 100644
--- a/src/librustc_target/abi/call/msp430.rs
+++ b/src/librustc_target/abi/call/msp430.rs
@@ -10,7 +10,7 @@ use crate::abi::call::{ArgAbi, FnAbi};
 // places its address in the appropriate location: either in a register or on
 // the stack, according to its position in the argument list. (..)"
 fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) {
-    if ret.layout.is_aggregate() && ret.layout.size.bits() > 32 {
+    if ret.layout.is_aggregate() && ret.layout.pref_pos.size.bits() > 32 {
         ret.make_indirect();
     } else {
         ret.extend_integer_width_to(16);
@@ -18,7 +18,7 @@ fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) {
 }
 
 fn classify_arg<Ty>(arg: &mut ArgAbi<'_, Ty>) {
-    if arg.layout.is_aggregate() && arg.layout.size.bits() > 32 {
+    if arg.layout.is_aggregate() && arg.layout.pref_pos.size.bits() > 32 {
         arg.make_indirect();
     } else {
         arg.extend_integer_width_to(16);
diff --git a/src/librustc_target/abi/call/nvptx.rs b/src/librustc_target/abi/call/nvptx.rs
index 693337f0e52fd..cafa1f7e3633c 100644
--- a/src/librustc_target/abi/call/nvptx.rs
+++ b/src/librustc_target/abi/call/nvptx.rs
@@ -4,7 +4,7 @@
 use crate::abi::call::{ArgAbi, FnAbi};
 
 fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) {
-    if ret.layout.is_aggregate() && ret.layout.size.bits() > 32 {
+    if ret.layout.is_aggregate() && ret.layout.pref_pos.size.bits() > 32 {
         ret.make_indirect();
     } else {
         ret.extend_integer_width_to(32);
@@ -12,7 +12,7 @@ fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) {
 }
 
 fn classify_arg<Ty>(arg: &mut ArgAbi<'_, Ty>) {
-    if arg.layout.is_aggregate() && arg.layout.size.bits() > 32 {
+    if arg.layout.is_aggregate() && arg.layout.pref_pos.size.bits() > 32 {
         arg.make_indirect();
     } else {
         arg.extend_integer_width_to(32);
diff --git a/src/librustc_target/abi/call/nvptx64.rs b/src/librustc_target/abi/call/nvptx64.rs
index b9c9296dbacc7..31e4fe067e04f 100644
--- a/src/librustc_target/abi/call/nvptx64.rs
+++ b/src/librustc_target/abi/call/nvptx64.rs
@@ -4,7 +4,7 @@
 use crate::abi::call::{ArgAbi, FnAbi};
 
 fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) {
-    if ret.layout.is_aggregate() && ret.layout.size.bits() > 64 {
+    if ret.layout.is_aggregate() && ret.layout.pref_pos.size.bits() > 64 {
         ret.make_indirect();
     } else {
         ret.extend_integer_width_to(64);
@@ -12,7 +12,7 @@ fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) {
 }
 
 fn classify_arg<Ty>(arg: &mut ArgAbi<'_, Ty>) {
-    if arg.layout.is_aggregate() && arg.layout.size.bits() > 64 {
+    if arg.layout.is_aggregate() && arg.layout.pref_pos.size.bits() > 64 {
         arg.make_indirect();
     } else {
         arg.extend_integer_width_to(64);
diff --git a/src/librustc_target/abi/call/powerpc64.rs b/src/librustc_target/abi/call/powerpc64.rs
index f967a83d5f9b6..9b8dd1add06a2 100644
--- a/src/librustc_target/abi/call/powerpc64.rs
+++ b/src/librustc_target/abi/call/powerpc64.rs
@@ -21,21 +21,21 @@ fn is_homogeneous_aggregate<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, abi: AB
     arg.layout.homogeneous_aggregate(cx).unit().and_then(|unit| {
         // ELFv1 only passes one-member aggregates transparently.
         // ELFv2 passes up to eight uniquely addressable members.
-        if (abi == ELFv1 && arg.layout.size > unit.size)
-                || arg.layout.size > unit.size.checked_mul(8, cx).unwrap() {
+        if (abi == ELFv1 && arg.layout.pref_pos.size > unit.size)
+                || arg.layout.pref_pos.size > unit.size.checked_mul(8, cx).unwrap() {
             return None;
         }
 
         let valid_unit = match unit.kind {
             RegKind::Integer => false,
             RegKind::Float => true,
-            RegKind::Vector => arg.layout.size.bits() == 128
+            RegKind::Vector => arg.layout.pref_pos.size.bits() == 128
         };
 
         if valid_unit {
             Some(Uniform {
                 unit,
-                total: arg.layout.size
+                total: arg.layout.pref_pos.size
             })
         } else {
             None
@@ -63,7 +63,7 @@ fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>, abi: ABI)
         return;
     }
 
-    let size = ret.layout.size;
+    let size = ret.layout.pref_pos.size;
     let bits = size.bits();
     if bits <= 128 {
         let unit = if cx.data_layout().endian == Endian::Big {
@@ -102,7 +102,7 @@ fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, abi: ABI)
         return;
     }
 
-    let size = arg.layout.size;
+    let size = arg.layout.pref_pos.size;
     let (unit, total) = if size.bits() <= 64 {
         // Aggregates smaller than a doubleword should appear in
         // the least-significant bits of the parameter doubleword.
diff --git a/src/librustc_target/abi/call/riscv.rs b/src/librustc_target/abi/call/riscv.rs
index 095e5aff74422..47fce547d7434 100644
--- a/src/librustc_target/abi/call/riscv.rs
+++ b/src/librustc_target/abi/call/riscv.rs
@@ -9,7 +9,7 @@ fn classify_ret<Ty>(arg: &mut ArgAbi<'_, Ty>, xlen: u64) {
     // "Aggregates larger than 2✕XLEN bits are passed by reference and are
     // replaced in the argument list with the address, as are C++ aggregates
     // with nontrivial copy constructors, destructors, or vtables."
-    if arg.layout.size.bits() > 2 * xlen {
+    if arg.layout.pref_pos.size.bits() > 2 * xlen {
         arg.make_indirect();
     }
 
@@ -25,7 +25,7 @@ fn classify_arg<Ty>(arg: &mut ArgAbi<'_, Ty>, xlen: u64) {
     // "Aggregates larger than 2✕XLEN bits are passed by reference and are
     // replaced in the argument list with the address, as are C++ aggregates
     // with nontrivial copy constructors, destructors, or vtables."
-    if arg.layout.size.bits() > 2 * xlen {
+    if arg.layout.pref_pos.size.bits() > 2 * xlen {
         arg.make_indirect();
     }
 
diff --git a/src/librustc_target/abi/call/s390x.rs b/src/librustc_target/abi/call/s390x.rs
index c3967cb3ff56a..cad199c27f218 100644
--- a/src/librustc_target/abi/call/s390x.rs
+++ b/src/librustc_target/abi/call/s390x.rs
@@ -7,7 +7,7 @@ use crate::abi::{self, HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
 fn classify_ret<'a, Ty, C>(ret: &mut ArgAbi<'_, Ty>)
     where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
 {
-    if !ret.layout.is_aggregate() && ret.layout.size.bits() <= 64 {
+    if !ret.layout.is_aggregate() && ret.layout.pref_pos.size.bits() <= 64 {
         ret.extend_integer_width_to(64);
     } else {
         ret.make_indirect();
@@ -35,19 +35,19 @@ fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>)
     where Ty: TyLayoutMethods<'a, C> + Copy,
           C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
 {
-    if !arg.layout.is_aggregate() && arg.layout.size.bits() <= 64 {
+    if !arg.layout.is_aggregate() && arg.layout.pref_pos.size.bits() <= 64 {
         arg.extend_integer_width_to(64);
         return;
     }
 
     if is_single_fp_element(cx, arg.layout) {
-        match arg.layout.size.bytes() {
+        match arg.layout.pref_pos.size.bytes() {
             4 => arg.cast_to(Reg::f32()),
             8 => arg.cast_to(Reg::f64()),
             _ => arg.make_indirect()
         }
     } else {
-        match arg.layout.size.bytes() {
+        match arg.layout.pref_pos.size.bytes() {
             1 => arg.cast_to(Reg::i8()),
             2 => arg.cast_to(Reg::i16()),
             4 => arg.cast_to(Reg::i32()),
diff --git a/src/librustc_target/abi/call/sparc.rs b/src/librustc_target/abi/call/sparc.rs
index b2c8d26ff1f86..82fa1d3d12743 100644
--- a/src/librustc_target/abi/call/sparc.rs
+++ b/src/librustc_target/abi/call/sparc.rs
@@ -8,7 +8,7 @@ fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'_, Ty>, offset: &mut Size)
         ret.extend_integer_width_to(32);
     } else {
         ret.make_indirect();
-        *offset += cx.data_layout().pointer_size;
+        *offset += cx.data_layout().pointer_pos.size;
     }
 }
 
@@ -16,8 +16,8 @@ fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'_, Ty>, offset: &mut Size)
     where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
 {
     let dl = cx.data_layout();
-    let size = arg.layout.size;
-    let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align).abi;
+    let size = arg.layout.pref_pos.size;
+    let align = arg.layout.pref_pos.align.max(dl.i32_align).min(dl.i64_align).abi;
 
     if arg.layout.is_aggregate() {
         arg.cast_to(Uniform {
diff --git a/src/librustc_target/abi/call/sparc64.rs b/src/librustc_target/abi/call/sparc64.rs
index fe2c427f70310..97668da1ac9c8 100644
--- a/src/librustc_target/abi/call/sparc64.rs
+++ b/src/librustc_target/abi/call/sparc64.rs
@@ -10,20 +10,20 @@ fn is_homogeneous_aggregate<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>)
 {
     arg.layout.homogeneous_aggregate(cx).unit().and_then(|unit| {
         // Ensure we have at most eight uniquely addressable members.
-        if arg.layout.size > unit.size.checked_mul(8, cx).unwrap() {
+        if arg.layout.pref_pos.size > unit.size.checked_mul(8, cx).unwrap() {
             return None;
         }
 
         let valid_unit = match unit.kind {
             RegKind::Integer => false,
             RegKind::Float => true,
-            RegKind::Vector => arg.layout.size.bits() == 128
+            RegKind::Vector => arg.layout.pref_pos.size.bits() == 128
         };
 
         if valid_unit {
             Some(Uniform {
                 unit,
-                total: arg.layout.size
+                total: arg.layout.pref_pos.size
             })
         } else {
             None
@@ -44,7 +44,7 @@ fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>)
         ret.cast_to(uniform);
         return;
     }
-    let size = ret.layout.size;
+    let size = ret.layout.pref_pos.size;
     let bits = size.bits();
     if bits <= 256 {
         let unit = Reg::i64();
@@ -73,7 +73,7 @@ fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>)
         return;
     }
 
-    let total = arg.layout.size;
+    let total = arg.layout.pref_pos.size;
     if total.bits() > 128 {
         arg.make_indirect();
         return;
diff --git a/src/librustc_target/abi/call/wasm32.rs b/src/librustc_target/abi/call/wasm32.rs
index 31b78337311ff..66e1591541df8 100644
--- a/src/librustc_target/abi/call/wasm32.rs
+++ b/src/librustc_target/abi/call/wasm32.rs
@@ -7,7 +7,7 @@ fn unwrap_trivial_aggregate<'a, Ty, C>(cx: &C, val: &mut ArgAbi<'a, Ty>) -> bool
 {
     if val.layout.is_aggregate() {
         if let Some(unit) = val.layout.homogeneous_aggregate(cx).unit() {
-            let size = val.layout.size;
+            let size = val.layout.pref_pos.size;
             if unit.size == size {
                 val.cast_to(Uniform {
                     unit,
diff --git a/src/librustc_target/abi/call/x86.rs b/src/librustc_target/abi/call/x86.rs
index be7574e799a99..fc471487f49b3 100644
--- a/src/librustc_target/abi/call/x86.rs
+++ b/src/librustc_target/abi/call/x86.rs
@@ -43,13 +43,13 @@ pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>, flavor: F
                 // According to Clang, everyone but MSVC returns single-element
                 // float aggregates directly in a floating-point register.
                 if !t.options.is_like_msvc && is_single_fp_element(cx, fn_abi.ret.layout) {
-                    match fn_abi.ret.layout.size.bytes() {
+                    match fn_abi.ret.layout.pref_pos.size.bytes() {
                         4 => fn_abi.ret.cast_to(Reg::f32()),
                         8 => fn_abi.ret.cast_to(Reg::f64()),
                         _ => fn_abi.ret.make_indirect()
                     }
                 } else {
-                    match fn_abi.ret.layout.size.bytes() {
+                    match fn_abi.ret.layout.pref_pos.size.bytes() {
                         1 => fn_abi.ret.cast_to(Reg::i8()),
                         2 => fn_abi.ret.cast_to(Reg::i16()),
                         4 => fn_abi.ret.cast_to(Reg::i32()),
@@ -100,12 +100,12 @@ pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>, flavor: F
 
             // At this point we know this must be a primitive of sorts.
             let unit = arg.layout.homogeneous_aggregate(cx).unit().unwrap();
-            assert_eq!(unit.size, arg.layout.size);
+            assert_eq!(unit.size, arg.layout.pref_pos.size);
             if unit.kind == RegKind::Float {
                 continue;
             }
 
-            let size_in_regs = (arg.layout.size.bits() + 31) / 32;
+            let size_in_regs = (arg.layout.pref_pos.size.bits() + 31) / 32;
 
             if size_in_regs == 0 {
                 continue;
@@ -117,7 +117,7 @@ pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>, flavor: F
 
             free_regs -= size_in_regs;
 
-            if arg.layout.size.bits() <= 32 && unit.kind == RegKind::Integer {
+            if arg.layout.pref_pos.size.bits() <= 32 && unit.kind == RegKind::Integer {
                 attrs.set(ArgAttribute::InReg);
             }
 
diff --git a/src/librustc_target/abi/call/x86_64.rs b/src/librustc_target/abi/call/x86_64.rs
index 452ca024e61b4..e67b733e0d07e 100644
--- a/src/librustc_target/abi/call/x86_64.rs
+++ b/src/librustc_target/abi/call/x86_64.rs
@@ -31,7 +31,7 @@ fn classify_arg<'a, Ty, C>(cx: &C, arg: &ArgAbi<'a, Ty>)
         where Ty: TyLayoutMethods<'a, C> + Copy,
             C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
     {
-        if !off.is_aligned(layout.align.abi) {
+        if !off.is_aligned(layout.pref_pos.align.abi) {
             if !layout.is_zst() {
                 return Err(Memory);
             }
@@ -69,7 +69,7 @@ fn classify_arg<'a, Ty, C>(cx: &C, arg: &ArgAbi<'a, Ty>)
 
         // Fill in `cls` for scalars (Int/Sse) and vectors (Sse).
         let first = (off.bytes() / 8) as usize;
-        let last = ((off.bytes() + layout.size.bytes() - 1) / 8) as usize;
+        let last = ((off.bytes() + layout.pref_pos.size.bytes() - 1) / 8) as usize;
         for cls in &mut cls[first..=last] {
             *cls = Some(cls.map_or(c, |old| old.min(c)));
 
@@ -83,7 +83,7 @@ fn classify_arg<'a, Ty, C>(cx: &C, arg: &ArgAbi<'a, Ty>)
         Ok(())
     }
 
-    let n = ((arg.layout.size.bytes() + 7) / 8) as usize;
+    let n = ((arg.layout.pref_pos.size.bytes() + 7) / 8) as usize;
     if n > MAX_EIGHTBYTES {
         return Err(Memory);
     }
@@ -225,7 +225,7 @@ pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
             Ok(ref cls) => {
                 // split into sized chunks passed individually
                 if arg.layout.is_aggregate() {
-                    let size = arg.layout.size;
+                    let size = arg.layout.pref_pos.size;
                     arg.cast_to(cast_target(cls, size))
                 } else {
                     arg.extend_integer_width_to(32);
diff --git a/src/librustc_target/abi/call/x86_win64.rs b/src/librustc_target/abi/call/x86_win64.rs
index 3c27d18a744ca..c76acd3b03b6a 100644
--- a/src/librustc_target/abi/call/x86_win64.rs
+++ b/src/librustc_target/abi/call/x86_win64.rs
@@ -9,7 +9,7 @@ pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
             Abi::Uninhabited => {}
             Abi::ScalarPair(..) |
             Abi::Aggregate { .. } => {
-                match a.layout.size.bits() {
+                match a.layout.pref_pos.size.bits() {
                     8 => a.cast_to(Reg::i8()),
                     16 => a.cast_to(Reg::i16()),
                     32 => a.cast_to(Reg::i32()),
@@ -22,7 +22,7 @@ pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
                 // (probably what clang calls "illegal vectors").
             }
             Abi::Scalar(_) => {
-                if a.layout.size.bytes() > 8 {
+                if a.layout.pref_pos.size.bytes() > 8 {
                     a.make_indirect();
                 } else {
                     a.extend_integer_width_to(32);
diff --git a/src/librustc_target/abi/mod.rs b/src/librustc_target/abi/mod.rs
index 2d7e05037ba0d..87ed5a9a28115 100644
--- a/src/librustc_target/abi/mod.rs
+++ b/src/librustc_target/abi/mod.rs
@@ -22,12 +22,11 @@ pub struct TargetDataLayout {
     pub i128_align: AbiAndPrefAlign,
     pub f32_align: AbiAndPrefAlign,
     pub f64_align: AbiAndPrefAlign,
-    pub pointer_size: Size,
-    pub pointer_align: AbiAndPrefAlign,
+    pub pointer_pos: LayoutPositionPref,
     pub aggregate_align: AbiAndPrefAlign,
 
     /// Alignments for vector types.
-    pub vector_align: Vec<(Size, AbiAndPrefAlign)>,
+    pub vector_align: Vec<LayoutPositionPref>,
 
     pub instruction_address_space: u32,
 }
@@ -46,12 +45,11 @@ impl Default for TargetDataLayout {
             i128_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
             f32_align: AbiAndPrefAlign::new(align(32)),
             f64_align: AbiAndPrefAlign::new(align(64)),
-            pointer_size: Size::from_bits(64),
-            pointer_align: AbiAndPrefAlign::new(align(64)),
+            pointer_pos: LayoutPositionPref::from_bits(64),
             aggregate_align: AbiAndPrefAlign { abi: align(0), pref: align(64) },
             vector_align: vec![
-                (Size::from_bits(64), AbiAndPrefAlign::new(align(64))),
-                (Size::from_bits(128), AbiAndPrefAlign::new(align(128))),
+                LayoutPositionPref::from_bits(64),
+                LayoutPositionPref::from_bits(128),
             ],
             instruction_address_space: 0,
         }
@@ -121,8 +119,9 @@ impl TargetDataLayout {
                     dl.f64_align = align(a, "f64")?
                 }
                 [p @ "p", s, ref a @ ..] | [p @ "p0", s, ref a @ ..] => {
-                    dl.pointer_size = size(s, p)?;
-                    dl.pointer_align = align(a, p)?;
+                    let pointer_size = size(s, p)?;
+                    let pointer_align = align(a, p)?;
+                    dl.pointer_pos = LayoutPositionPref::new(pointer_size, pointer_align)
                 }
                 [s, ref a @ ..] if s.starts_with("i") => {
                     let bits = match s[1..].parse::<u64>() {
@@ -151,12 +150,12 @@ impl TargetDataLayout {
                 [s, ref a @ ..] if s.starts_with("v") => {
                     let v_size = size(&s[1..], "v")?;
                     let a = align(a, s)?;
-                    if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) {
-                        v.1 = a;
+                    if let Some(v) = dl.vector_align.iter_mut().find(|v| v.size == v_size) {
+                        v.align = a;
                         continue;
                     }
                     // No existing entry, add a new one.
-                    dl.vector_align.push((v_size, a));
+                    dl.vector_align.push(LayoutPositionPref::new(v_size, a));
                 }
                 _ => {} // Ignore everything else.
             }
@@ -173,10 +172,10 @@ impl TargetDataLayout {
                                endian_str, target.target_endian));
         }
 
-        if dl.pointer_size.bits().to_string() != target.target_pointer_width {
+        if dl.pointer_pos.size.bits().to_string() != target.target_pointer_width {
             return Err(format!("inconsistent target specification: \"data-layout\" claims \
                                 pointers are {}-bit, while \"target-pointer-width\" is `{}`",
-                               dl.pointer_size.bits(), target.target_pointer_width));
+                               dl.pointer_pos.size.bits(), target.target_pointer_width));
         }
 
         Ok(dl)
@@ -194,7 +193,7 @@ impl TargetDataLayout {
     /// currently conservatively bounded to 1 << 47 as that is enough to cover the current usable
     /// address space on 64-bit ARMv8 and x86_64.
     pub fn obj_size_bound(&self) -> u64 {
-        match self.pointer_size.bits() {
+        match self.pointer_pos.size.bits() {
             16 => 1 << 15,
             32 => 1 << 31,
             64 => 1 << 47,
@@ -203,7 +202,7 @@ impl TargetDataLayout {
     }
 
     pub fn ptr_sized_integer(&self) -> Integer {
-        match self.pointer_size.bits() {
+        match self.pointer_pos.size.bits() {
             16 => I16,
             32 => I32,
             64 => I64,
@@ -212,7 +211,7 @@ impl TargetDataLayout {
     }
 
     pub fn vector_align(&self, vec_size: Size) -> AbiAndPrefAlign {
-        for &(size, align) in &self.vector_align {
+        for &LayoutPositionPref {size, align} in &self.vector_align {
             if size == vec_size {
                 return align;
             }
@@ -451,6 +450,298 @@ impl AbiAndPrefAlign {
     }
 }
 
+/// An aligned size preference.
+/// Better name appreciated.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)]
+pub struct LayoutPositionPref {
+    /// A size not rounded up to alignment
+    pub size: Size,
+    /// the alignment of the start of the size
+    pub align: AbiAndPrefAlign,
+}
+
+impl LayoutPositionPref {
+    pub fn new_simple(size: Size, align: Align) -> LayoutPositionPref {
+        LayoutPositionPref {
+           size,
+           align: AbiAndPrefAlign::new(align)
+        }
+    }
+
+    pub fn new(size: Size, align: AbiAndPrefAlign) -> LayoutPositionPref {
+        LayoutPositionPref {
+           size,
+           align
+        }
+    }
+
+    #[inline]
+    pub fn from_bits(bits: u64) -> LayoutPositionPref {
+        // Avoid potential overflow from `bits + 7`.
+        LayoutPositionPref::from_bytes(bits / 8 + ((bits % 8) + 7) / 8)
+    }
+
+    #[inline]
+    pub fn from_bytes(bytes: u64) -> LayoutPositionPref {
+        LayoutPositionPref::new_simple(Size::from_bytes(bytes), Align::from_bytes(bytes).unwrap())
+    }
+
+    #[inline]
+    pub fn stride_to(self, align: Align) -> LayoutPositionPref {
+        LayoutPositionPref::new(self.size.align_to(align), self.align)
+    }
+
+    #[inline]
+    pub fn max(self, other: LayoutPositionPref) -> LayoutPositionPref {
+        LayoutPositionPref::new(self.size.max(other.size), self.align.max(other.align))
+    }
+
+    pub fn padding_needed_for(self, align: Align) -> Size {
+        self.stride_to(align).size - self.size
+    }
+
+    pub fn repeat(self, count: u64) -> Self {
+        return self * count
+    }
+
+    pub fn extend(self, other: LayoutPositionPref) ->  (Self, Size) {
+        let p2 = self.stride_to(other.align.abi).size;
+        (LayoutPositionPref::new(p2 + other.size, self.align), p2)
+    }
+
+    #[inline]
+    pub fn align_to(self, align: AbiAndPrefAlign) -> LayoutPositionPref {
+        LayoutPositionPref::new(self.size, self.align.max(align))
+    }
+
+    #[inline]
+    pub fn pack_to(self, align: AbiAndPrefAlign) -> LayoutPositionPref {
+        LayoutPositionPref::new(self.size, self.align.min(align))
+    }
+
+    #[inline]
+    pub fn align_and_stride_to(self, align: AbiAndPrefAlign) -> LayoutPositionPref {
+        self.align_to(align).stride_to(align.abi)
+    }
+
+    pub fn strided(self) -> LayoutPositionPref {
+        self.stride_to(self.align.abi)
+    }
+
+    pub fn strided_pref(self) -> LayoutPositionPref {
+        self.stride_to(self.align.pref)
+    }
+
+    pub fn stride(self) -> Size {
+        self.strided().size
+    }
+
+    pub fn pref_stride(self) -> Size {
+        self.strided_pref().size
+    }
+
+    #[inline]
+    pub fn is_aligned(self, align: Align) -> bool {
+        self.size.is_aligned(align)
+    }
+
+    #[inline]
+    pub fn checked_add<C: HasDataLayout>(self, other: LayoutPositionPref, cx: &C)
+    -> Option<LayoutPositionPref> {
+        let size = self.stride_to(other.align.abi).size.checked_add(other.size, cx)?;
+        Some(LayoutPositionPref::new(size, self.align))
+    }
+
+    #[inline]
+    pub fn checked_mul<C: HasDataLayout>(self, count: u64, cx: &C) -> Option<LayoutPositionPref> {
+        Some(if count == 0 {
+               LayoutPositionPref::new(Size::ZERO, self.align)
+            } else {
+               LayoutPositionPref::new(self.stride().checked_mul(count - 1, cx)?, self.align) + self
+            })
+    }
+
+}
+
+impl Add for LayoutPositionPref {
+    type Output = LayoutPositionPref;
+    #[inline]
+    fn add(self, other: LayoutPositionPref) -> LayoutPositionPref {
+        self.extend(other).0
+    }
+}
+
+impl Mul<LayoutPositionPref> for u64 {
+    type Output = LayoutPositionPref;
+    #[inline]
+    fn mul(self, size: LayoutPositionPref) -> LayoutPositionPref {
+        size * self
+    }
+}
+
+impl Mul<u64> for LayoutPositionPref {
+    type Output = LayoutPositionPref;
+    #[inline]
+    fn mul(self, count: u64) -> LayoutPositionPref {
+        if count == 0 {
+            LayoutPositionPref::new(Size::ZERO, self.align)
+        } else {
+            LayoutPositionPref::new(self.stride() * (count - 1), self.align) + self
+        }
+    }
+}
+
+impl AddAssign for LayoutPositionPref {
+    #[inline]
+    fn add_assign(&mut self, other: LayoutPositionPref) {
+        *self = *self + other;
+    }
+}
+
+/// An aligned size.
+/// Better name appreciated.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)]
+// I am opposed to adding these, therefore they are in a separate section, and this note.
+#[derive(PartialOrd, Ord)]
+pub struct MemoryPosition {
+    /// A size not rounded up to alignment
+    pub size: Size,
+    /// the alignment of the start of the size
+    pub align: Align,
+}
+
+impl LayoutPositionPref {
+    pub fn mem_pos(self) -> MemoryPosition {
+        MemoryPosition::new(self.size, self.align.abi)
+    }
+}
+
+impl MemoryPosition {
+    pub fn new(size: Size, align: Align) -> MemoryPosition {
+        MemoryPosition {
+           size,
+           align
+        }
+    }
+
+    pub fn pref_pos(self) -> LayoutPositionPref {
+        LayoutPositionPref::new_simple(self.size, self.align)
+    }
+
+    #[inline]
+    pub fn from_bits(bits: u64) -> MemoryPosition {
+        // Avoid potential overflow from `bits + 7`.
+        MemoryPosition::from_bytes(bits / 8 + ((bits % 8) + 7) / 8)
+    }
+
+    #[inline]
+    pub fn from_bytes(bytes: u64) -> MemoryPosition {
+        MemoryPosition::new(Size::from_bytes(bytes), Align::from_bytes(bytes).unwrap())
+    }
+
+    #[inline]
+    pub fn stride_to(self, align: Align) -> MemoryPosition {
+        MemoryPosition::new(self.size.align_to(align), self.align)
+    }
+
+    #[inline]
+    pub fn pack_to(self, align: Align) -> MemoryPosition {
+        MemoryPosition::new(self.size, self.align.min(align))
+    }
+
+    #[inline]
+    pub fn max(self, other: MemoryPosition) -> MemoryPosition {
+        MemoryPosition::new(self.size.max(other.size), self.align.max(other.align))
+    }
+
+    pub fn padding_needed_for(self, align: Align) -> Size {
+        self.stride_to(align).size - self.size
+    }
+
+    pub fn repeat(self, count: u64) -> Self {
+        return self * count
+    }
+
+    pub fn extend(self, other: MemoryPosition) ->  (Self, Size) {
+        let p2 = self.stride_to(other.align).size;
+        (MemoryPosition::new(p2 + other.size, self.align), p2)
+    }
+
+    #[inline]
+    pub fn align_to(self, align: Align) -> MemoryPosition {
+        MemoryPosition::new(self.size, self.align.max(align))
+    }
+
+    #[inline]
+    pub fn align_and_stride_to(self, align: Align) -> MemoryPosition {
+        self.align_to(align).stride_to(align)
+    }
+
+    pub fn strided(self) -> MemoryPosition {
+        self.stride_to(self.align)
+    }
+
+    pub fn stride(self) -> Size {
+        self.strided().size
+    }
+
+    #[inline]
+    pub fn is_aligned(self, align: Align) -> bool {
+        self.size.is_aligned(align)
+    }
+
+    #[inline]
+    pub fn checked_add<C: HasDataLayout>(self, other: Self, cx: &C) -> Option<Self> {
+        let size = self.stride_to(other.align).size.checked_add(other.size, cx)?;
+        Some(MemoryPosition::new(size, self.align))
+    }
+
+    #[inline]
+    pub fn checked_mul<C: HasDataLayout>(self, count: u64, cx: &C) -> Option<MemoryPosition> {
+        if count == 0 {
+            Some(MemoryPosition::new(Size::ZERO, self.align))
+        } else {
+            Some(MemoryPosition::new(self.stride().checked_mul(count - 1, cx)?, self.align) + self)
+        }
+    }
+
+}
+
+impl Add for MemoryPosition {
+    type Output = MemoryPosition;
+    #[inline]
+    fn add(self, other: MemoryPosition) -> MemoryPosition {
+        self.extend(other).0
+    }
+}
+
+impl Mul<MemoryPosition> for u64 {
+    type Output = MemoryPosition;
+    #[inline]
+    fn mul(self, size: MemoryPosition) -> MemoryPosition {
+        size * self
+    }
+}
+
+impl Mul<u64> for MemoryPosition {
+    type Output = MemoryPosition;
+    #[inline]
+    fn mul(self, count: u64) -> MemoryPosition {
+        if count == 0 {
+            MemoryPosition::new(Size::ZERO, self.align)
+        } else {
+            MemoryPosition::new(self.stride() * (count - 1), self.align) + self
+        }
+    }
+}
+
+impl AddAssign for MemoryPosition {
+    #[inline]
+    fn add_assign(&mut self, other: MemoryPosition) {
+        *self = *self + other;
+    }
+}
+
 /// Integers, also used for enum discriminants.
 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
 pub enum Integer {
@@ -549,6 +840,17 @@ pub enum Primitive {
 }
 
 impl Primitive {
+    pub fn pref_pos<C: HasDataLayout>(self, cx: &C) -> LayoutPositionPref {
+        let dl = cx.data_layout();
+
+        match self {
+            Int(i, _) => LayoutPositionPref::new(i.size(), i.align(dl)),
+            F32 => LayoutPositionPref::new(Size::from_bits(32), dl.f32_align),
+            F64 => LayoutPositionPref::new(Size::from_bits(64), dl.f64_align),
+            Pointer => dl.pointer_pos
+        }
+    }
+
     pub fn size<C: HasDataLayout>(self, cx: &C) -> Size {
         let dl = cx.data_layout();
 
@@ -556,7 +858,7 @@ impl Primitive {
             Int(i, _) => i.size(),
             F32 => Size::from_bits(32),
             F64 => Size::from_bits(64),
-            Pointer => dl.pointer_size
+            Pointer => dl.pointer_pos.size
         }
     }
 
@@ -567,7 +869,7 @@ impl Primitive {
             Int(i, _) => i.align(dl),
             F32 => dl.f32_align,
             F64 => dl.f64_align,
-            Pointer => dl.pointer_align
+            Pointer => dl.pointer_pos.align
         }
     }
 
@@ -916,22 +1218,19 @@ pub struct LayoutDetails {
     /// (i.e. outside of its `valid_range`), if it exists.
     pub largest_niche: Option<Niche>,
 
-    pub align: AbiAndPrefAlign,
-    pub size: Size
+    pub pref_pos: LayoutPositionPref
 }
 
 impl LayoutDetails {
     pub fn scalar<C: HasDataLayout>(cx: &C, scalar: Scalar) -> Self {
         let largest_niche = Niche::from_scalar(cx, Size::ZERO, scalar.clone());
-        let size = scalar.value.size(cx);
-        let align = scalar.value.align(cx);
+        let pref_pos = scalar.value.pref_pos(cx);
         LayoutDetails {
             variants: Variants::Single { index: VariantIdx::new(0) },
             fields: FieldPlacement::Union(0),
             abi: Abi::Scalar(scalar),
             largest_niche,
-            size,
-            align,
+            pref_pos,
         }
     }
 }
@@ -983,8 +1282,7 @@ pub enum PointerKind {
 
 #[derive(Copy, Clone)]
 pub struct PointeeInfo {
-    pub size: Size,
-    pub align: Align,
+    pub mem_pos: MemoryPosition,
     pub safe: Option<PointerKind>,
 }
 
@@ -1029,8 +1327,8 @@ impl<'a, Ty> TyLayout<'a, Ty> {
             Abi::Scalar(_) |
             Abi::ScalarPair(..) |
             Abi::Vector { .. } => false,
-            Abi::Uninhabited => self.size.bytes() == 0,
-            Abi::Aggregate { sized } => sized && self.size.bytes() == 0
+            Abi::Uninhabited => self.pref_pos.size.bytes() == 0,
+            Abi::Aggregate { sized } => sized && self.pref_pos.size.bytes() == 0
         }
     }
 }
diff --git a/src/librustc_typeck/check/mod.rs b/src/librustc_typeck/check/mod.rs
index 72b5018589cf8..0bbf4d7ba1b42 100644
--- a/src/librustc_typeck/check/mod.rs
+++ b/src/librustc_typeck/check/mod.rs
@@ -2252,7 +2252,7 @@ fn check_transparent(tcx: TyCtxt<'_>, sp: Span, def_id: DefId) {
         // We are currently checking the type this field came from, so it must be local
         let span = tcx.hir().span_if_local(field.did).unwrap();
         let zst = layout.map(|layout| layout.is_zst()).unwrap_or(false);
-        let align1 = layout.map(|layout| layout.align.abi.bytes() == 1).unwrap_or(false);
+        let align1 = layout.map(|layout| layout.pref_pos.align.abi.bytes() == 1).unwrap_or(false);
         (span, zst, align1)
     });
 
diff --git a/src/librustc_typeck/coherence/builtin.rs b/src/librustc_typeck/coherence/builtin.rs
index d613abc435288..a2e51bda4319b 100644
--- a/src/librustc_typeck/coherence/builtin.rs
+++ b/src/librustc_typeck/coherence/builtin.rs
@@ -225,7 +225,7 @@ fn visit_implementation_of_dispatch_from_dyn(tcx: TyCtxt<'_>, impl_did: DefId) {
                         let ty_b = field.ty(tcx, substs_b);
 
                         if let Ok(layout) = tcx.layout_of(param_env.and(ty_a)) {
-                            if layout.is_zst() && layout.details.align.abi.bytes() == 1 {
+                            if layout.is_zst() && layout.details.pref_pos.align.abi.bytes() == 1 {
                                 // ignore ZST fields with alignment of 1 byte
                                 return None;
                             }