Merge pull request #22008 from xedin/rdar-47334176

[Sema] Fix `resolveDependentMemberType` to properly handle nested typ…
diff --git a/lib/SILOptimizer/Analysis/AccessSummaryAnalysis.cpp b/lib/SILOptimizer/Analysis/AccessSummaryAnalysis.cpp
index e7bced7..2407511 100644
--- a/lib/SILOptimizer/Analysis/AccessSummaryAnalysis.cpp
+++ b/lib/SILOptimizer/Analysis/AccessSummaryAnalysis.cpp
@@ -32,7 +32,7 @@
     FunctionSummary &functionSummary = info->getSummary();
     ArgumentSummary &argSummary =
         functionSummary.getAccessForArgument(index);
-    index++;
+    ++index;
 
     auto *functionArg = cast<SILFunctionArgument>(arg);
     // Only summarize @inout_aliasable arguments.
@@ -426,7 +426,7 @@
       os << ", ";
     }
     os << subAccess.getDescription(BaseType, M);
-    index++;
+    ++index;
   }
   os << "]";
 
@@ -597,7 +597,7 @@
 
   const IndexTrieNode *iter = subPath;
   while (iter) {
-    length++;
+    ++length;
     iter = iter->getParent();
   }
 
@@ -631,7 +631,7 @@
   unsigned argCount = getArgumentCount();
   os << "(";
 
-  for (unsigned i = 0; i < argCount; i++) {
+  for (unsigned i = 0; i < argCount; ++i) {
     if (i > 0) {
       os << ",  ";
     }
diff --git a/lib/SILOptimizer/Mandatory/PMOMemoryUseCollector.cpp b/lib/SILOptimizer/Mandatory/PMOMemoryUseCollector.cpp
index dc87f64..ecf3801 100644
--- a/lib/SILOptimizer/Mandatory/PMOMemoryUseCollector.cpp
+++ b/lib/SILOptimizer/Mandatory/PMOMemoryUseCollector.cpp
@@ -238,13 +238,6 @@
       continue;
     }
 
-#define NEVER_OR_SOMETIMES_LOADABLE_CHECKED_REF_STORAGE(Name, ...)             \
-  if (isa<Load##Name##Inst>(User)) {                                           \
-    Uses.emplace_back(User, PMOUseKind::Load);                                 \
-    continue;                                                                  \
-  }
-#include "swift/AST/ReferenceStorage.def"
-
     // Stores *to* the allocation are writes.
     if (isa<StoreInst>(User) && UI->getOperandNumber() == 1) {
       if (PointeeType.is<TupleType>()) {
@@ -263,20 +256,6 @@
       continue;
     }
 
-#define NEVER_OR_SOMETIMES_LOADABLE_CHECKED_REF_STORAGE(Name, ...)             \
-  if (auto *SI = dyn_cast<Store##Name##Inst>(User)) {                          \
-    if (UI->getOperandNumber() == 1) {                                         \
-      PMOUseKind Kind;                                                         \
-      if (SI->isInitializationOfDest())                                        \
-        Kind = PMOUseKind::Initialization;                                     \
-      else                                                                     \
-        Kind = PMOUseKind::Assign;                                             \
-      Uses.emplace_back(User, Kind);                                           \
-      continue;                                                                \
-    }                                                                          \
-  }
-#include "swift/AST/ReferenceStorage.def"
-
     if (auto *CAI = dyn_cast<CopyAddrInst>(User)) {
       // If this is a copy of a tuple, we should scalarize it so that we don't
       // have an access that crosses elements.
@@ -294,6 +273,8 @@
       auto Kind = ([&]() -> PMOUseKind {
         if (UI->getOperandNumber() == CopyAddrInst::Src)
           return PMOUseKind::Load;
+        if (PointeeType.isTrivial(CAI->getModule()))
+          return PMOUseKind::InitOrAssign;
         if (CAI->isInitializationOfDest())
           return PMOUseKind::Initialization;
         return PMOUseKind::Assign;
diff --git a/lib/SILOptimizer/Mandatory/PredictableMemOpt.cpp b/lib/SILOptimizer/Mandatory/PredictableMemOpt.cpp
index 283f43f..3a36c73 100644
--- a/lib/SILOptimizer/Mandatory/PredictableMemOpt.cpp
+++ b/lib/SILOptimizer/Mandatory/PredictableMemOpt.cpp
@@ -911,7 +911,7 @@
   assert((LoadUse.isValid() || StoreUse.isValid()) &&
          "we should have a load or a store, possibly both");
   assert(StoreUse.isInvalid() || StoreUse.Kind == Assign ||
-         StoreUse.Kind == Initialization);
+         StoreUse.Kind == Initialization || StoreUse.Kind == InitOrAssign);
 
   // Now that we've emitted a bunch of instructions, including a load and store
   // but also including other stuff, update the internal state of
@@ -930,6 +930,12 @@
       // something else), track it as an access.
       if (StoreUse.isValid()) {
         StoreUse.Inst = NewInst;
+        // If our store use by the copy_addr is an assign, then we know that
+        // before we store the new value, we loaded the old value implying that
+        // our store is technically initializing memory when it occurs. So
+        // change the kind to Initialization.
+        if (StoreUse.Kind == Assign)
+          StoreUse.Kind = Initialization;
         NonLoadUses[NewInst] = Uses.size();
         Uses.push_back(StoreUse);
       }
@@ -948,11 +954,6 @@
       }
       continue;
 
-#define ALWAYS_OR_SOMETIMES_LOADABLE_CHECKED_REF_STORAGE(Name, ...) \
-    case SILInstructionKind::Name##RetainInst: \
-    case SILInstructionKind::Name##ReleaseInst: \
-    case SILInstructionKind::StrongRetain##Name##Inst:
-#include "swift/AST/ReferenceStorage.def"
     case SILInstructionKind::RetainValueInst:
     case SILInstructionKind::StrongRetainInst:
     case SILInstructionKind::StrongReleaseInst:
@@ -1286,6 +1287,9 @@
 
     switch (u.Kind) {
     case PMOUseKind::Assign:
+      // Until we can promote the value being destroyed by the assign, we can
+      // not remove deallocations with such assigns.
+      return false;
     case PMOUseKind::InitOrAssign:
       break;    // These don't prevent removal.
     case PMOUseKind::Initialization:
diff --git a/test/Interpreter/SDK/libc.swift b/test/Interpreter/SDK/libc.swift
index 0e1dd7d..e052e5a 100644
--- a/test/Interpreter/SDK/libc.swift
+++ b/test/Interpreter/SDK/libc.swift
@@ -13,6 +13,8 @@
   import Darwin
 #elseif os(Linux) || os(FreeBSD) || os(PS4) || os(Android)
   import Glibc
+#elseif os(Windows)
+  import MSVCRT
 #endif
 
 let sourcePath = CommandLine.arguments[1]
diff --git a/test/Interpreter/dynamic_replacement.swift b/test/Interpreter/dynamic_replacement.swift
index c358aa9..58f26c5 100644
--- a/test/Interpreter/dynamic_replacement.swift
+++ b/test/Interpreter/dynamic_replacement.swift
@@ -55,10 +55,11 @@
 
 #if os(Linux)
   import Glibc
-  let dylibSuffix = "so"
+#elseif os(Windows)
+  import MSVCRT
+  import WinSDK
 #else
   import Darwin
-  let dylibSuffix = "dylib"
 #endif
 
 var DynamicallyReplaceable = TestSuite("DynamicallyReplaceable")
@@ -108,6 +109,16 @@
             expectedResult(useOrig, "public_enum_generic_func"))
 }
 
+private func target_library_name(_ name: String) -> String {
+#if os(iOS) || os(macOS) || os(tvOS) || os(watchOS)
+  return "lib\(name).dylib"
+#elseif os(Windows)
+  return "\(name).dll"
+#else
+  return "lib\(name).so"
+#endif
+}
+
 DynamicallyReplaceable.test("DynamicallyReplaceable") {
   var executablePath = CommandLine.arguments[0]
   executablePath.removeLast(4)
@@ -118,9 +129,11 @@
   // Now, test with the module containing the replacements.
 
 #if os(Linux)
-	_ = dlopen("libModule2."+dylibSuffix, RTLD_NOW)
+	_ = dlopen(target_library_name("Module2"), RTLD_NOW)
+#elseif os(Windows)
+        _ = LoadLibraryA(target_library_name("Module2"))
 #else
-	_ = dlopen(executablePath+"libModule2."+dylibSuffix, RTLD_NOW)
+	_ = dlopen(target_library_name("Module2"), RTLD_NOW)
 #endif
 	checkExpectedResults(forOriginalLibrary: false)
 }
diff --git a/test/Interpreter/dynamic_replacement_chaining.swift b/test/Interpreter/dynamic_replacement_chaining.swift
index 2b30230..027fe41 100644
--- a/test/Interpreter/dynamic_replacement_chaining.swift
+++ b/test/Interpreter/dynamic_replacement_chaining.swift
@@ -24,25 +24,38 @@
 
 #if os(Linux)
   import Glibc
-  let dylibSuffix = "so"
+#elseif os(Windows)
+  import MSVCRT
+  import WinSDK
 #else
   import Darwin
-  let dylibSuffix = "dylib"
 #endif
 
 var DynamicallyReplaceable = TestSuite("DynamicallyReplaceableChaining")
 
+func target_library_name(_ name: String) -> String {
+#if os(iOS) || os(macOS) || os(tvOS) || os(watchOS)
+  return "lib\(name).dylib"
+#elseif os(Windows)
+  return "\(name).dll"
+#else
+  return "lib\(name).so"
+#endif
+}
 
 DynamicallyReplaceable.test("DynamicallyReplaceable") {
   var executablePath = CommandLine.arguments[0]
   executablePath.removeLast(4)
 
 #if os(Linux)
-	_ = dlopen("libB."+dylibSuffix, RTLD_NOW)
-	_ = dlopen("libC."+dylibSuffix, RTLD_NOW)
+	_ = dlopen(target_library_name("B"), RTLD_NOW)
+	_ = dlopen(target_library_name("C"), RTLD_NOW)
+#elseif os(Windows)
+        _ = LoadLibraryA(target_library_name("B"))
+        _ = LoadLibraryA(target_library_name("C"))
 #else
-	_ = dlopen(executablePath+"libB."+dylibSuffix, RTLD_NOW)
-	_ = dlopen(executablePath+"libC."+dylibSuffix, RTLD_NOW)
+	_ = dlopen(executablePath+target_library_name("B"), RTLD_NOW)
+	_ = dlopen(executablePath+target_library_name("C"), RTLD_NOW)
 #endif
 
 #if CHAINING
diff --git a/test/Interpreter/extended_grapheme_cluster_literal.swift b/test/Interpreter/extended_grapheme_cluster_literal.swift
index 3577596..a4fc2c8 100644
--- a/test/Interpreter/extended_grapheme_cluster_literal.swift
+++ b/test/Interpreter/extended_grapheme_cluster_literal.swift
@@ -17,7 +17,7 @@
   }
 }
 
-private func string(_ characters: UInt32...) -> String {
+public func string(_ characters: UInt32...) -> String {
   return String(characters.map { Character(UnicodeScalar($0)!) })
 }
 private func expressible<T>(_ literal: Expressible<T>, as type: T.Type)
diff --git a/test/Interpreter/unions-and-bitfields.swift b/test/Interpreter/unions-and-bitfields.swift
index b4830fc..e553650 100644
--- a/test/Interpreter/unions-and-bitfields.swift
+++ b/test/Interpreter/unions-and-bitfields.swift
@@ -1,7 +1,9 @@
-// RUN: %target-build-swift %s -import-objc-header %S/Inputs/unions-and-bitfields.h -disable-bridging-pch -o %t
+// RUN: %target-build-swift %s -Xfrontend -enable-objc-interop -Xfrontend -disable-objc-attr-requires-foundation-module -import-objc-header %S/Inputs/unions-and-bitfields.h -disable-bridging-pch -o %t
 // RUN: %target-codesign %t
 // RUN: %target-run %t
+
 // REQUIRES: executable_test
+// REQUIRES: objc_interop
 
 // The -disable-bridging-pch above isn't actually relevant to the test; however,
 // precompiled headers don't play nice with the way we include the platform
diff --git a/test/SILOptimizer/predictable_deadalloc_elim.sil b/test/SILOptimizer/predictable_deadalloc_elim.sil
new file mode 100644
index 0000000..7e40d5a
--- /dev/null
+++ b/test/SILOptimizer/predictable_deadalloc_elim.sil
@@ -0,0 +1,267 @@
+// RUN: %target-sil-opt -enable-sil-verify-all %s -predictable-deadalloc-elim | %FileCheck %s
+
+sil_stage canonical
+
+import Swift
+import Builtin
+
+// CHECK-LABEL: sil @simple_trivial_stack : $@convention(thin) (Builtin.Int32) -> () {
+// CHECK-NOT: alloc_stack
+// CHECK: } // end sil function 'simple_trivial_stack'
+sil @simple_trivial_stack : $@convention(thin) (Builtin.Int32) -> () {
+bb0(%0 : $Builtin.Int32):
+  %1 = alloc_stack $Builtin.Int32
+  store %0 to %1 : $*Builtin.Int32
+  dealloc_stack %1 : $*Builtin.Int32
+  %9999 = tuple()
+  return %9999 : $()
+}
+
+// CHECK-LABEL: sil @simple_trivial_init_box : $@convention(thin) (Builtin.Int32) -> () {
+// CHECK-NOT: alloc_box
+// CHECK: } // end sil function 'simple_trivial_init_box'
+sil @simple_trivial_init_box : $@convention(thin) (Builtin.Int32) -> () {
+bb0(%0 : $Builtin.Int32):
+  %1 = alloc_box ${ var Builtin.Int32 }
+  %2 = project_box %1 : ${ var Builtin.Int32 }, 0
+  store %0 to %2 : $*Builtin.Int32
+  strong_release %1 : ${ var Builtin.Int32 }
+  %9999 = tuple()
+  return %9999 : $()
+}
+
+// CHECK-LABEL: sil @simple_trivial_uninit_box : $@convention(thin) (Builtin.Int32) -> () {
+// CHECK-NOT: alloc_box
+// CHECK: } // end sil function 'simple_trivial_uninit_box'
+sil @simple_trivial_uninit_box : $@convention(thin) (Builtin.Int32) -> () {
+bb0(%0 : $Builtin.Int32):
+  %1 = alloc_box ${ var Builtin.Int32 }
+  %2 = project_box %1 : ${ var Builtin.Int32 }, 0
+  store %0 to %2 : $*Builtin.Int32
+  dealloc_box %1 : ${ var Builtin.Int32 }
+  %9999 = tuple()
+  return %9999 : $()
+}
+
+// CHECK-LABEL: sil @simple_nontrivial_stack : $@convention(thin) (@owned Builtin.NativeObject) -> () {
+// CHECK: bb0([[ARG:%.*]] :
+// CHECK-NEXT: strong_release [[ARG]]
+// CHECK-NEXT: tuple
+// CHECK-NEXT: return
+// CHECK: } // end sil function 'simple_nontrivial_stack'
+sil @simple_nontrivial_stack : $@convention(thin) (@owned Builtin.NativeObject) -> () {
+bb0(%0 : $Builtin.NativeObject):
+  %1 = alloc_stack $Builtin.NativeObject
+  store %0 to %1 : $*Builtin.NativeObject
+  destroy_addr %1 : $*Builtin.NativeObject
+  dealloc_stack %1 : $*Builtin.NativeObject
+  %9999 = tuple()
+  return %9999 : $()
+}
+
+// We do not handle this today, since we do not understand that we need to treat
+// the strong_release of the alloc_box as a destroy_addr of the entire value.
+//
+// FIXME: We should be able to handle this.
+//
+// CHECK-LABEL: sil @simple_nontrivial_init_box : $@convention(thin) (@owned Builtin.NativeObject) -> () {
+// CHECK: alloc_box
+// CHECK: } // end sil function 'simple_nontrivial_init_box'
+sil @simple_nontrivial_init_box : $@convention(thin) (@owned Builtin.NativeObject) -> () {
+bb0(%0 : $Builtin.NativeObject):
+  %1 = alloc_box ${ var Builtin.NativeObject }
+  %2 = project_box %1 : ${ var Builtin.NativeObject }, 0
+  store %0 to %2 : $*Builtin.NativeObject
+  strong_release %1 : ${ var Builtin.NativeObject }
+  %9999 = tuple()
+  return %9999 : $()
+}
+
+// CHECK-LABEL: sil @simple_nontrivial_uninit_box : $@convention(thin) (@owned Builtin.NativeObject) -> () {
+// CHECK: bb0([[ARG:%.*]] :
+// CHECK-NEXT: strong_release [[ARG]]
+// CHECK-NEXT: tuple
+// CHECK-NEXT: return
+// CHECK: } // end sil function 'simple_nontrivial_uninit_box'
+sil @simple_nontrivial_uninit_box : $@convention(thin) (@owned Builtin.NativeObject) -> () {
+bb0(%0 : $Builtin.NativeObject):
+  %1 = alloc_box ${ var Builtin.NativeObject }
+  %2 = project_box %1 : ${ var Builtin.NativeObject }, 0
+  store %0 to %2 : $*Builtin.NativeObject
+  destroy_addr %2 : $*Builtin.NativeObject
+  dealloc_box %1 : ${ var Builtin.NativeObject }
+  %9999 = tuple()
+  return %9999 : $()
+}
+
+//////////////////
+// Assign Tests //
+//////////////////
+
+// Make sure that we do eliminate this allocation
+// CHECK-LABEL: sil @simple_assign_take_trivial : $@convention(thin) (Builtin.Int32, @in Builtin.Int32) -> () {
+// CHECK-NOT: alloc_stack
+// CHECK: } // end sil function 'simple_assign_take_trivial'
+sil @simple_assign_take_trivial : $@convention(thin) (Builtin.Int32, @in Builtin.Int32) -> () {
+bb0(%0 : $Builtin.Int32, %1 : $*Builtin.Int32):
+  %2 = alloc_stack $Builtin.Int32
+  store %0 to %2 : $*Builtin.Int32
+  copy_addr [take] %1 to %2 : $*Builtin.Int32
+  dealloc_stack %2 : $*Builtin.Int32
+  %9999 = tuple()
+  return %9999 : $()
+}
+
+// In this case, we perform an init, copy. Since we do not want to lose the +1
+// on the argument, we do not eliminate this (even though with time perhaps we
+// could).
+// CHECK-LABEL: sil @simple_init_copy : $@convention(thin) (@owned Builtin.NativeObject, @in_guaranteed Builtin.NativeObject) -> () {
+// CHECK: alloc_stack
+// CHECK: copy_addr
+// CHECK: } // end sil function 'simple_init_copy'
+sil @simple_init_copy : $@convention(thin) (@owned Builtin.NativeObject, @in_guaranteed Builtin.NativeObject) -> () {
+bb0(%0 : $Builtin.NativeObject, %1 : $*Builtin.NativeObject):
+  %2 = alloc_stack $Builtin.NativeObject
+  store %0 to %2 : $*Builtin.NativeObject
+  destroy_addr %2 : $*Builtin.NativeObject
+  copy_addr %1 to [initialization] %2 : $*Builtin.NativeObject
+  destroy_addr %2 : $*Builtin.NativeObject
+  dealloc_stack %2 : $*Builtin.NativeObject
+  %9999 = tuple()
+  return %9999 : $()
+}
+
+// This we can promote successfully.
+// CHECK-LABEL: sil @simple_init_take : $@convention(thin) (@owned Builtin.NativeObject, @in Builtin.NativeObject) -> () {
+// CHECK: bb0([[ARG0:%.*]] : $Builtin.NativeObject, [[ARG1:%.*]] : $*Builtin.NativeObject):
+// CHECK-NOT: alloc_stack
+// CHECK:  strong_release [[ARG0]]
+// CHECK:  [[ARG1_LOADED:%.*]] = load [[ARG1]]
+// CHECK:  strong_release [[ARG1_LOADED]]
+// CHECK: } // end sil function 'simple_init_take'
+sil @simple_init_take : $@convention(thin) (@owned Builtin.NativeObject, @in Builtin.NativeObject) -> () {
+bb0(%0 : $Builtin.NativeObject, %1 : $*Builtin.NativeObject):
+  %2 = alloc_stack $Builtin.NativeObject
+  store %0 to %2 : $*Builtin.NativeObject
+  destroy_addr %2 : $*Builtin.NativeObject
+  copy_addr [take] %1 to [initialization] %2 : $*Builtin.NativeObject
+  destroy_addr %2 : $*Builtin.NativeObject
+  dealloc_stack %2 : $*Builtin.NativeObject
+  %9999 = tuple()
+  return %9999 : $()
+}
+
+// Since we are copying the input argument, we can not get rid of the copy_addr,
+// meaning we shouldn't eliminate the allocation here.
+// CHECK-LABEL: sil @simple_assign_no_take : $@convention(thin) (@owned Builtin.NativeObject, @in_guaranteed Builtin.NativeObject) -> () {
+// CHECK: alloc_stack
+// CHECK: copy_addr
+// CHECK: } // end sil function 'simple_assign_no_take'
+sil @simple_assign_no_take : $@convention(thin) (@owned Builtin.NativeObject, @in_guaranteed Builtin.NativeObject) -> () {
+bb0(%0 : $Builtin.NativeObject, %1 : $*Builtin.NativeObject):
+  %2 = alloc_stack $Builtin.NativeObject
+  store %0 to %2 : $*Builtin.NativeObject
+  copy_addr %1 to %2 : $*Builtin.NativeObject
+  destroy_addr %2 : $*Builtin.NativeObject
+  dealloc_stack %2 : $*Builtin.NativeObject
+  %9999 = tuple()
+  return %9999 : $()
+}
+
+// If PMO understood how to promote assigns, we should be able to handle this
+// case.
+// CHECK-LABEL: sil @simple_assign_take : $@convention(thin) (@owned Builtin.NativeObject, @in Builtin.NativeObject) -> () {
+// CHECK: alloc_stack
+// CHECK: copy_addr
+// CHECK: } // end sil function 'simple_assign_take'
+sil @simple_assign_take : $@convention(thin) (@owned Builtin.NativeObject, @in Builtin.NativeObject) -> () {
+bb0(%0 : $Builtin.NativeObject, %1 : $*Builtin.NativeObject):
+  %2 = alloc_stack $Builtin.NativeObject
+  store %0 to %2 : $*Builtin.NativeObject
+  copy_addr [take] %1 to %2 : $*Builtin.NativeObject
+  destroy_addr %2 : $*Builtin.NativeObject
+  dealloc_stack %2 : $*Builtin.NativeObject
+  %9999 = tuple()
+  return %9999 : $()
+}
+
+// CHECK-LABEL: sil @simple_diamond_without_assign : $@convention(thin) (@owned Builtin.NativeObject) -> () {
+// CHECK: bb0([[ARG:%.*]] :
+// CHECK-NOT: alloc_stack
+// CHECK-NOT: store
+// CHECK: bb3:
+// CHECK-NEXT: strong_release [[ARG]]
+// CHECK: } // end sil function 'simple_diamond_without_assign'
+sil @simple_diamond_without_assign : $@convention(thin) (@owned Builtin.NativeObject) -> () {
+bb0(%0 : $Builtin.NativeObject):
+  %1 = alloc_stack $Builtin.NativeObject
+  store %0 to %1 : $*Builtin.NativeObject
+  cond_br undef, bb1, bb2
+
+bb1:
+  br bb3
+
+bb2:
+  br bb3
+
+bb3:
+  destroy_addr %1 : $*Builtin.NativeObject
+  dealloc_stack %1 : $*Builtin.NativeObject
+  %9999 = tuple()
+  return %9999 : $()
+}
+
+// We should not promote this due to this being an assign to %2.
+// CHECK-LABEL: sil @simple_diamond_with_assign : $@convention(thin) (@owned Builtin.NativeObject, @in_guaranteed Builtin.NativeObject) -> () {
+// CHECK: alloc_stack
+// CHECK: copy_addr
+// CHECK: } // end sil function 'simple_diamond_with_assign'
+sil @simple_diamond_with_assign : $@convention(thin) (@owned Builtin.NativeObject, @in_guaranteed Builtin.NativeObject) -> () {
+bb0(%0 : $Builtin.NativeObject, %1 : $*Builtin.NativeObject):
+  %2 = alloc_stack $Builtin.NativeObject
+  store %0 to %2 : $*Builtin.NativeObject
+  cond_br undef, bb1, bb2
+
+bb1:
+  copy_addr [take] %1 to %2 : $*Builtin.NativeObject
+  br bb3
+
+bb2:
+  br bb3
+
+bb3:
+  destroy_addr %2 : $*Builtin.NativeObject
+  dealloc_stack %2 : $*Builtin.NativeObject
+  %9999 = tuple()
+  return %9999 : $()
+}
+
+// Today PMO can not handle different available values coming from different
+// BBs. With time it can be taught to do that if necessary. That being said,
+// this test shows that we /tried/ and failed with the available value test
+// instead of failing earlier due to the copy_addr being an assign since we
+// explode the copy_addr.
+// CHECK-LABEL: sil @simple_diamond_with_assign_remove : $@convention(thin) (@owned Builtin.NativeObject, @in_guaranteed Builtin.NativeObject) -> () {
+// CHECK: alloc_stack
+// CHECK-NOT: copy_addr
+// CHECK: } // end sil function 'simple_diamond_with_assign_remove'
+sil @simple_diamond_with_assign_remove : $@convention(thin) (@owned Builtin.NativeObject, @in_guaranteed Builtin.NativeObject) -> () {
+bb0(%0 : $Builtin.NativeObject, %1 : $*Builtin.NativeObject):
+  %2 = alloc_stack $Builtin.NativeObject
+  store %0 to %2 : $*Builtin.NativeObject
+  cond_br undef, bb1, bb2
+
+bb1:
+  destroy_addr %2 : $*Builtin.NativeObject
+  copy_addr [take] %1 to [initialization] %2 : $*Builtin.NativeObject
+  br bb3
+
+bb2:
+  br bb3
+
+bb3:
+  destroy_addr %2 : $*Builtin.NativeObject
+  dealloc_stack %2 : $*Builtin.NativeObject
+  %9999 = tuple()
+  return %9999 : $()
+}