|  | ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py | 
|  | ; RUN: llc < %s -mtriple=x86_64-- -verify-machineinstrs                           | FileCheck %s --check-prefix=X64 | 
|  | ; RUN: llc < %s -mtriple=i686-- -verify-machineinstrs           -mattr=+sse2      | FileCheck %s --check-prefixes=X86,X86-GENERIC,X86-SSE2 | 
|  | ; RUN: llc < %s -mtriple=i686-- -verify-machineinstrs -mcpu=slm -mattr=-sse2      | FileCheck %s --check-prefixes=X86,X86-GENERIC,X86-SLM | 
|  | ; RUN: llc < %s -mtriple=i686-- -verify-machineinstrs -mcpu=goldmont -mattr=-sse2 | FileCheck %s --check-prefixes=X86,X86-GENERIC,X86-SLM | 
|  | ; RUN: llc < %s -mtriple=i686-- -verify-machineinstrs -mcpu=knl -mattr=-sse2      | FileCheck %s --check-prefixes=X86,X86-GENERIC,X86-SLM | 
|  | ; RUN: llc < %s -mtriple=i686-- -verify-machineinstrs -mcpu=atom -mattr=-sse2     | FileCheck %s --check-prefixes=X86,X86-ATOM | 
|  |  | 
|  | ; On x86, an atomic rmw operation that does not modify the value in memory | 
|  | ; (such as atomic add 0) can be replaced by an mfence followed by a mov. | 
|  | ; This is explained (with the motivation for such an optimization) in | 
|  | ; http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf | 
|  |  | 
|  | define i8 @add8(ptr %p) #0 { | 
|  | ; X64-LABEL: add8: | 
|  | ; X64:       # %bb.0: | 
|  | ; X64-NEXT:    #MEMBARRIER | 
|  | ; X64-NEXT:    movzbl (%rdi), %eax | 
|  | ; X64-NEXT:    retq | 
|  | ; | 
|  | ; X86-GENERIC-LABEL: add8: | 
|  | ; X86-GENERIC:       # %bb.0: | 
|  | ; X86-GENERIC-NEXT:    movl {{[0-9]+}}(%esp), %eax | 
|  | ; X86-GENERIC-NEXT:    #MEMBARRIER | 
|  | ; X86-GENERIC-NEXT:    movzbl (%eax), %eax | 
|  | ; X86-GENERIC-NEXT:    retl | 
|  | ; | 
|  | ; X86-ATOM-LABEL: add8: | 
|  | ; X86-ATOM:       # %bb.0: | 
|  | ; X86-ATOM-NEXT:    movl {{[0-9]+}}(%esp), %eax | 
|  | ; X86-ATOM-NEXT:    #MEMBARRIER | 
|  | ; X86-ATOM-NEXT:    movzbl (%eax), %eax | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    retl | 
|  | %1 = atomicrmw add ptr %p, i8 0 syncscope("singlethread") monotonic | 
|  | ret i8 %1 | 
|  | } | 
|  |  | 
|  | define i16 @or16(ptr %p) #0 { | 
|  | ; X64-LABEL: or16: | 
|  | ; X64:       # %bb.0: | 
|  | ; X64-NEXT:    #MEMBARRIER | 
|  | ; X64-NEXT:    movzwl (%rdi), %eax | 
|  | ; X64-NEXT:    retq | 
|  | ; | 
|  | ; X86-GENERIC-LABEL: or16: | 
|  | ; X86-GENERIC:       # %bb.0: | 
|  | ; X86-GENERIC-NEXT:    movl {{[0-9]+}}(%esp), %eax | 
|  | ; X86-GENERIC-NEXT:    #MEMBARRIER | 
|  | ; X86-GENERIC-NEXT:    movzwl (%eax), %eax | 
|  | ; X86-GENERIC-NEXT:    retl | 
|  | ; | 
|  | ; X86-ATOM-LABEL: or16: | 
|  | ; X86-ATOM:       # %bb.0: | 
|  | ; X86-ATOM-NEXT:    movl {{[0-9]+}}(%esp), %eax | 
|  | ; X86-ATOM-NEXT:    #MEMBARRIER | 
|  | ; X86-ATOM-NEXT:    movzwl (%eax), %eax | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    retl | 
|  | %1 = atomicrmw or ptr %p, i16 0 syncscope("singlethread") acquire | 
|  | ret i16 %1 | 
|  | } | 
|  |  | 
|  | define i32 @xor32(ptr %p) #0 { | 
|  | ; X64-LABEL: xor32: | 
|  | ; X64:       # %bb.0: | 
|  | ; X64-NEXT:    #MEMBARRIER | 
|  | ; X64-NEXT:    movl (%rdi), %eax | 
|  | ; X64-NEXT:    retq | 
|  | ; | 
|  | ; X86-GENERIC-LABEL: xor32: | 
|  | ; X86-GENERIC:       # %bb.0: | 
|  | ; X86-GENERIC-NEXT:    movl {{[0-9]+}}(%esp), %eax | 
|  | ; X86-GENERIC-NEXT:    #MEMBARRIER | 
|  | ; X86-GENERIC-NEXT:    movl (%eax), %eax | 
|  | ; X86-GENERIC-NEXT:    retl | 
|  | ; | 
|  | ; X86-ATOM-LABEL: xor32: | 
|  | ; X86-ATOM:       # %bb.0: | 
|  | ; X86-ATOM-NEXT:    movl {{[0-9]+}}(%esp), %eax | 
|  | ; X86-ATOM-NEXT:    #MEMBARRIER | 
|  | ; X86-ATOM-NEXT:    movl (%eax), %eax | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    retl | 
|  | %1 = atomicrmw xor ptr %p, i32 0 syncscope("singlethread") release | 
|  | ret i32 %1 | 
|  | } | 
|  |  | 
|  | define i64 @sub64(ptr %p) #0 { | 
|  | ; X64-LABEL: sub64: | 
|  | ; X64:       # %bb.0: | 
|  | ; X64-NEXT:    #MEMBARRIER | 
|  | ; X64-NEXT:    movq (%rdi), %rax | 
|  | ; X64-NEXT:    retq | 
|  | ; | 
|  | ; X86-LABEL: sub64: | 
|  | ; X86:       # %bb.0: | 
|  | ; X86-NEXT:    pushl %ebx | 
|  | ; X86-NEXT:    pushl %esi | 
|  | ; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi | 
|  | ; X86-NEXT:    movl (%esi), %eax | 
|  | ; X86-NEXT:    movl 4(%esi), %edx | 
|  | ; X86-NEXT:    .p2align 4 | 
|  | ; X86-NEXT:  .LBB3_1: # %atomicrmw.start | 
|  | ; X86-NEXT:    # =>This Inner Loop Header: Depth=1 | 
|  | ; X86-NEXT:    movl %edx, %ecx | 
|  | ; X86-NEXT:    movl %eax, %ebx | 
|  | ; X86-NEXT:    lock cmpxchg8b (%esi) | 
|  | ; X86-NEXT:    jne .LBB3_1 | 
|  | ; X86-NEXT:  # %bb.2: # %atomicrmw.end | 
|  | ; X86-NEXT:    popl %esi | 
|  | ; X86-NEXT:    popl %ebx | 
|  | ; X86-NEXT:    retl | 
|  | %1 = atomicrmw sub ptr %p, i64 0 syncscope("singlethread") seq_cst | 
|  | ret i64 %1 | 
|  | } | 
|  |  | 
|  | define i128 @or128(ptr %p) #0 { | 
|  | ; X64-LABEL: or128: | 
|  | ; X64:       # %bb.0: | 
|  | ; X64-NEXT:    pushq %rax | 
|  | ; X64-NEXT:    xorl %esi, %esi | 
|  | ; X64-NEXT:    xorl %edx, %edx | 
|  | ; X64-NEXT:    xorl %ecx, %ecx | 
|  | ; X64-NEXT:    callq __atomic_fetch_or_16@PLT | 
|  | ; X64-NEXT:    popq %rcx | 
|  | ; X64-NEXT:    retq | 
|  | ; | 
|  | ; X86-GENERIC-LABEL: or128: | 
|  | ; X86-GENERIC:       # %bb.0: | 
|  | ; X86-GENERIC-NEXT:    pushl %ebp | 
|  | ; X86-GENERIC-NEXT:    movl %esp, %ebp | 
|  | ; X86-GENERIC-NEXT:    pushl %ebx | 
|  | ; X86-GENERIC-NEXT:    pushl %edi | 
|  | ; X86-GENERIC-NEXT:    pushl %esi | 
|  | ; X86-GENERIC-NEXT:    andl $-16, %esp | 
|  | ; X86-GENERIC-NEXT:    subl $48, %esp | 
|  | ; X86-GENERIC-NEXT:    movl 12(%ebp), %edi | 
|  | ; X86-GENERIC-NEXT:    movl 12(%edi), %ecx | 
|  | ; X86-GENERIC-NEXT:    movl 8(%edi), %edx | 
|  | ; X86-GENERIC-NEXT:    movl (%edi), %ebx | 
|  | ; X86-GENERIC-NEXT:    movl 4(%edi), %esi | 
|  | ; X86-GENERIC-NEXT:    .p2align 4 | 
|  | ; X86-GENERIC-NEXT:  .LBB4_1: # %atomicrmw.start | 
|  | ; X86-GENERIC-NEXT:    # =>This Inner Loop Header: Depth=1 | 
|  | ; X86-GENERIC-NEXT:    movl %ebx, (%esp) | 
|  | ; X86-GENERIC-NEXT:    movl %esi, {{[0-9]+}}(%esp) | 
|  | ; X86-GENERIC-NEXT:    movl %edx, {{[0-9]+}}(%esp) | 
|  | ; X86-GENERIC-NEXT:    movl %ecx, {{[0-9]+}}(%esp) | 
|  | ; X86-GENERIC-NEXT:    movl %ecx, {{[0-9]+}}(%esp) | 
|  | ; X86-GENERIC-NEXT:    movl %edx, {{[0-9]+}}(%esp) | 
|  | ; X86-GENERIC-NEXT:    movl %esi, {{[0-9]+}}(%esp) | 
|  | ; X86-GENERIC-NEXT:    movl %ebx, {{[0-9]+}}(%esp) | 
|  | ; X86-GENERIC-NEXT:    pushl $0 | 
|  | ; X86-GENERIC-NEXT:    pushl $0 | 
|  | ; X86-GENERIC-NEXT:    leal {{[0-9]+}}(%esp), %eax | 
|  | ; X86-GENERIC-NEXT:    pushl %eax | 
|  | ; X86-GENERIC-NEXT:    leal {{[0-9]+}}(%esp), %eax | 
|  | ; X86-GENERIC-NEXT:    pushl %eax | 
|  | ; X86-GENERIC-NEXT:    pushl %edi | 
|  | ; X86-GENERIC-NEXT:    pushl $16 | 
|  | ; X86-GENERIC-NEXT:    calll __atomic_compare_exchange@PLT | 
|  | ; X86-GENERIC-NEXT:    addl $24, %esp | 
|  | ; X86-GENERIC-NEXT:    movl {{[0-9]+}}(%esp), %ecx | 
|  | ; X86-GENERIC-NEXT:    movl {{[0-9]+}}(%esp), %edx | 
|  | ; X86-GENERIC-NEXT:    movl (%esp), %ebx | 
|  | ; X86-GENERIC-NEXT:    movl {{[0-9]+}}(%esp), %esi | 
|  | ; X86-GENERIC-NEXT:    testb %al, %al | 
|  | ; X86-GENERIC-NEXT:    je .LBB4_1 | 
|  | ; X86-GENERIC-NEXT:  # %bb.2: # %atomicrmw.end | 
|  | ; X86-GENERIC-NEXT:    movl 8(%ebp), %eax | 
|  | ; X86-GENERIC-NEXT:    movl %ebx, (%eax) | 
|  | ; X86-GENERIC-NEXT:    movl %esi, 4(%eax) | 
|  | ; X86-GENERIC-NEXT:    movl %edx, 8(%eax) | 
|  | ; X86-GENERIC-NEXT:    movl %ecx, 12(%eax) | 
|  | ; X86-GENERIC-NEXT:    leal -12(%ebp), %esp | 
|  | ; X86-GENERIC-NEXT:    popl %esi | 
|  | ; X86-GENERIC-NEXT:    popl %edi | 
|  | ; X86-GENERIC-NEXT:    popl %ebx | 
|  | ; X86-GENERIC-NEXT:    popl %ebp | 
|  | ; X86-GENERIC-NEXT:    retl $4 | 
|  | ; | 
|  | ; X86-ATOM-LABEL: or128: | 
|  | ; X86-ATOM:       # %bb.0: | 
|  | ; X86-ATOM-NEXT:    pushl %ebp | 
|  | ; X86-ATOM-NEXT:    movl %esp, %ebp | 
|  | ; X86-ATOM-NEXT:    pushl %ebx | 
|  | ; X86-ATOM-NEXT:    pushl %edi | 
|  | ; X86-ATOM-NEXT:    pushl %esi | 
|  | ; X86-ATOM-NEXT:    andl $-16, %esp | 
|  | ; X86-ATOM-NEXT:    leal -{{[0-9]+}}(%esp), %esp | 
|  | ; X86-ATOM-NEXT:    movl 12(%ebp), %edi | 
|  | ; X86-ATOM-NEXT:    movl 12(%edi), %ecx | 
|  | ; X86-ATOM-NEXT:    movl 8(%edi), %edx | 
|  | ; X86-ATOM-NEXT:    movl (%edi), %esi | 
|  | ; X86-ATOM-NEXT:    movl 4(%edi), %ebx | 
|  | ; X86-ATOM-NEXT:    .p2align 4 | 
|  | ; X86-ATOM-NEXT:  .LBB4_1: # %atomicrmw.start | 
|  | ; X86-ATOM-NEXT:    # =>This Inner Loop Header: Depth=1 | 
|  | ; X86-ATOM-NEXT:    movl %esi, (%esp) | 
|  | ; X86-ATOM-NEXT:    movl %ebx, {{[0-9]+}}(%esp) | 
|  | ; X86-ATOM-NEXT:    movl %edx, {{[0-9]+}}(%esp) | 
|  | ; X86-ATOM-NEXT:    movl %ecx, {{[0-9]+}}(%esp) | 
|  | ; X86-ATOM-NEXT:    movl %ecx, {{[0-9]+}}(%esp) | 
|  | ; X86-ATOM-NEXT:    movl %edx, {{[0-9]+}}(%esp) | 
|  | ; X86-ATOM-NEXT:    movl %ebx, {{[0-9]+}}(%esp) | 
|  | ; X86-ATOM-NEXT:    movl %esi, {{[0-9]+}}(%esp) | 
|  | ; X86-ATOM-NEXT:    pushl $0 | 
|  | ; X86-ATOM-NEXT:    pushl $0 | 
|  | ; X86-ATOM-NEXT:    leal {{[0-9]+}}(%esp), %eax | 
|  | ; X86-ATOM-NEXT:    pushl %eax | 
|  | ; X86-ATOM-NEXT:    leal {{[0-9]+}}(%esp), %eax | 
|  | ; X86-ATOM-NEXT:    pushl %eax | 
|  | ; X86-ATOM-NEXT:    pushl %edi | 
|  | ; X86-ATOM-NEXT:    pushl $16 | 
|  | ; X86-ATOM-NEXT:    calll __atomic_compare_exchange@PLT | 
|  | ; X86-ATOM-NEXT:    leal {{[0-9]+}}(%esp), %esp | 
|  | ; X86-ATOM-NEXT:    movl {{[0-9]+}}(%esp), %ecx | 
|  | ; X86-ATOM-NEXT:    movl {{[0-9]+}}(%esp), %edx | 
|  | ; X86-ATOM-NEXT:    testb %al, %al | 
|  | ; X86-ATOM-NEXT:    movl (%esp), %esi | 
|  | ; X86-ATOM-NEXT:    movl {{[0-9]+}}(%esp), %ebx | 
|  | ; X86-ATOM-NEXT:    je .LBB4_1 | 
|  | ; X86-ATOM-NEXT:  # %bb.2: # %atomicrmw.end | 
|  | ; X86-ATOM-NEXT:    movl 8(%ebp), %eax | 
|  | ; X86-ATOM-NEXT:    movl %esi, (%eax) | 
|  | ; X86-ATOM-NEXT:    movl %ebx, 4(%eax) | 
|  | ; X86-ATOM-NEXT:    movl %edx, 8(%eax) | 
|  | ; X86-ATOM-NEXT:    movl %ecx, 12(%eax) | 
|  | ; X86-ATOM-NEXT:    leal -12(%ebp), %esp | 
|  | ; X86-ATOM-NEXT:    popl %esi | 
|  | ; X86-ATOM-NEXT:    popl %edi | 
|  | ; X86-ATOM-NEXT:    popl %ebx | 
|  | ; X86-ATOM-NEXT:    popl %ebp | 
|  | ; X86-ATOM-NEXT:    retl $4 | 
|  | %1 = atomicrmw or ptr %p, i128 0 syncscope("singlethread") monotonic | 
|  | ret i128 %1 | 
|  | } | 
|  |  | 
|  | ; For 'and', the idempotent value is (-1) | 
|  | define i32 @and32 (ptr %p) #0 { | 
|  | ; X64-LABEL: and32: | 
|  | ; X64:       # %bb.0: | 
|  | ; X64-NEXT:    #MEMBARRIER | 
|  | ; X64-NEXT:    movl (%rdi), %eax | 
|  | ; X64-NEXT:    retq | 
|  | ; | 
|  | ; X86-GENERIC-LABEL: and32: | 
|  | ; X86-GENERIC:       # %bb.0: | 
|  | ; X86-GENERIC-NEXT:    movl {{[0-9]+}}(%esp), %eax | 
|  | ; X86-GENERIC-NEXT:    #MEMBARRIER | 
|  | ; X86-GENERIC-NEXT:    movl (%eax), %eax | 
|  | ; X86-GENERIC-NEXT:    retl | 
|  | ; | 
|  | ; X86-ATOM-LABEL: and32: | 
|  | ; X86-ATOM:       # %bb.0: | 
|  | ; X86-ATOM-NEXT:    movl {{[0-9]+}}(%esp), %eax | 
|  | ; X86-ATOM-NEXT:    #MEMBARRIER | 
|  | ; X86-ATOM-NEXT:    movl (%eax), %eax | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    retl | 
|  | %1 = atomicrmw and ptr %p, i32 -1 syncscope("singlethread") acq_rel | 
|  | ret i32 %1 | 
|  | } | 
|  |  | 
|  | define void @or32_nouse_monotonic(ptr %p) #0 { | 
|  | ; X64-LABEL: or32_nouse_monotonic: | 
|  | ; X64:       # %bb.0: | 
|  | ; X64-NEXT:    #MEMBARRIER | 
|  | ; X64-NEXT:    retq | 
|  | ; | 
|  | ; X86-GENERIC-LABEL: or32_nouse_monotonic: | 
|  | ; X86-GENERIC:       # %bb.0: | 
|  | ; X86-GENERIC-NEXT:    #MEMBARRIER | 
|  | ; X86-GENERIC-NEXT:    retl | 
|  | ; | 
|  | ; X86-ATOM-LABEL: or32_nouse_monotonic: | 
|  | ; X86-ATOM:       # %bb.0: | 
|  | ; X86-ATOM-NEXT:    #MEMBARRIER | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    retl | 
|  | atomicrmw or ptr %p, i32 0 syncscope("singlethread") monotonic | 
|  | ret void | 
|  | } | 
|  |  | 
|  |  | 
|  | define void @or32_nouse_acquire(ptr %p) #0 { | 
|  | ; X64-LABEL: or32_nouse_acquire: | 
|  | ; X64:       # %bb.0: | 
|  | ; X64-NEXT:    #MEMBARRIER | 
|  | ; X64-NEXT:    retq | 
|  | ; | 
|  | ; X86-GENERIC-LABEL: or32_nouse_acquire: | 
|  | ; X86-GENERIC:       # %bb.0: | 
|  | ; X86-GENERIC-NEXT:    #MEMBARRIER | 
|  | ; X86-GENERIC-NEXT:    retl | 
|  | ; | 
|  | ; X86-ATOM-LABEL: or32_nouse_acquire: | 
|  | ; X86-ATOM:       # %bb.0: | 
|  | ; X86-ATOM-NEXT:    #MEMBARRIER | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    retl | 
|  | atomicrmw or ptr %p, i32 0 acquire | 
|  | ret void | 
|  | } | 
|  |  | 
|  | define void @or32_nouse_release(ptr %p) #0 { | 
|  | ; X64-LABEL: or32_nouse_release: | 
|  | ; X64:       # %bb.0: | 
|  | ; X64-NEXT:    #MEMBARRIER | 
|  | ; X64-NEXT:    retq | 
|  | ; | 
|  | ; X86-GENERIC-LABEL: or32_nouse_release: | 
|  | ; X86-GENERIC:       # %bb.0: | 
|  | ; X86-GENERIC-NEXT:    #MEMBARRIER | 
|  | ; X86-GENERIC-NEXT:    retl | 
|  | ; | 
|  | ; X86-ATOM-LABEL: or32_nouse_release: | 
|  | ; X86-ATOM:       # %bb.0: | 
|  | ; X86-ATOM-NEXT:    #MEMBARRIER | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    retl | 
|  | atomicrmw or ptr %p, i32 0 syncscope("singlethread") release | 
|  | ret void | 
|  | } | 
|  |  | 
|  | define void @or32_nouse_acq_rel(ptr %p) #0 { | 
|  | ; X64-LABEL: or32_nouse_acq_rel: | 
|  | ; X64:       # %bb.0: | 
|  | ; X64-NEXT:    #MEMBARRIER | 
|  | ; X64-NEXT:    retq | 
|  | ; | 
|  | ; X86-GENERIC-LABEL: or32_nouse_acq_rel: | 
|  | ; X86-GENERIC:       # %bb.0: | 
|  | ; X86-GENERIC-NEXT:    #MEMBARRIER | 
|  | ; X86-GENERIC-NEXT:    retl | 
|  | ; | 
|  | ; X86-ATOM-LABEL: or32_nouse_acq_rel: | 
|  | ; X86-ATOM:       # %bb.0: | 
|  | ; X86-ATOM-NEXT:    #MEMBARRIER | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    retl | 
|  | atomicrmw or ptr %p, i32 0 syncscope("singlethread") acq_rel | 
|  | ret void | 
|  | } | 
|  |  | 
|  | define void @or32_nouse_seq_cst(ptr %p) #0 { | 
|  | ; X64-LABEL: or32_nouse_seq_cst: | 
|  | ; X64:       # %bb.0: | 
|  | ; X64-NEXT:    #MEMBARRIER | 
|  | ; X64-NEXT:    retq | 
|  | ; | 
|  | ; X86-GENERIC-LABEL: or32_nouse_seq_cst: | 
|  | ; X86-GENERIC:       # %bb.0: | 
|  | ; X86-GENERIC-NEXT:    #MEMBARRIER | 
|  | ; X86-GENERIC-NEXT:    retl | 
|  | ; | 
|  | ; X86-ATOM-LABEL: or32_nouse_seq_cst: | 
|  | ; X86-ATOM:       # %bb.0: | 
|  | ; X86-ATOM-NEXT:    #MEMBARRIER | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    retl | 
|  | atomicrmw or ptr %p, i32 0 syncscope("singlethread") seq_cst | 
|  | ret void | 
|  | } | 
|  |  | 
|  | ; TODO: The value isn't used on 32 bit, so the cmpxchg8b is unneeded | 
|  | define void @or64_nouse_seq_cst(ptr %p) #0 { | 
|  | ; X64-LABEL: or64_nouse_seq_cst: | 
|  | ; X64:       # %bb.0: | 
|  | ; X64-NEXT:    #MEMBARRIER | 
|  | ; X64-NEXT:    retq | 
|  | ; | 
|  | ; X86-LABEL: or64_nouse_seq_cst: | 
|  | ; X86:       # %bb.0: | 
|  | ; X86-NEXT:    pushl %ebx | 
|  | ; X86-NEXT:    pushl %esi | 
|  | ; X86-NEXT:    movl {{[0-9]+}}(%esp), %esi | 
|  | ; X86-NEXT:    movl (%esi), %eax | 
|  | ; X86-NEXT:    movl 4(%esi), %edx | 
|  | ; X86-NEXT:    .p2align 4 | 
|  | ; X86-NEXT:  .LBB11_1: # %atomicrmw.start | 
|  | ; X86-NEXT:    # =>This Inner Loop Header: Depth=1 | 
|  | ; X86-NEXT:    movl %edx, %ecx | 
|  | ; X86-NEXT:    movl %eax, %ebx | 
|  | ; X86-NEXT:    lock cmpxchg8b (%esi) | 
|  | ; X86-NEXT:    jne .LBB11_1 | 
|  | ; X86-NEXT:  # %bb.2: # %atomicrmw.end | 
|  | ; X86-NEXT:    popl %esi | 
|  | ; X86-NEXT:    popl %ebx | 
|  | ; X86-NEXT:    retl | 
|  | atomicrmw or ptr %p, i64 0 syncscope("singlethread") seq_cst | 
|  | ret void | 
|  | } | 
|  |  | 
|  | ; TODO: Don't need to lower as sync_and_fetch call | 
|  | define void @or128_nouse_seq_cst(ptr %p) #0 { | 
|  | ; X64-LABEL: or128_nouse_seq_cst: | 
|  | ; X64:       # %bb.0: | 
|  | ; X64-NEXT:    pushq %rax | 
|  | ; X64-NEXT:    xorl %esi, %esi | 
|  | ; X64-NEXT:    xorl %edx, %edx | 
|  | ; X64-NEXT:    movl $5, %ecx | 
|  | ; X64-NEXT:    callq __atomic_fetch_or_16@PLT | 
|  | ; X64-NEXT:    popq %rax | 
|  | ; X64-NEXT:    retq | 
|  | ; | 
|  | ; X86-GENERIC-LABEL: or128_nouse_seq_cst: | 
|  | ; X86-GENERIC:       # %bb.0: | 
|  | ; X86-GENERIC-NEXT:    pushl %ebp | 
|  | ; X86-GENERIC-NEXT:    movl %esp, %ebp | 
|  | ; X86-GENERIC-NEXT:    pushl %ebx | 
|  | ; X86-GENERIC-NEXT:    pushl %edi | 
|  | ; X86-GENERIC-NEXT:    pushl %esi | 
|  | ; X86-GENERIC-NEXT:    andl $-16, %esp | 
|  | ; X86-GENERIC-NEXT:    subl $48, %esp | 
|  | ; X86-GENERIC-NEXT:    movl 8(%ebp), %esi | 
|  | ; X86-GENERIC-NEXT:    movl 12(%esi), %ecx | 
|  | ; X86-GENERIC-NEXT:    movl 8(%esi), %edi | 
|  | ; X86-GENERIC-NEXT:    movl (%esi), %edx | 
|  | ; X86-GENERIC-NEXT:    movl 4(%esi), %ebx | 
|  | ; X86-GENERIC-NEXT:    .p2align 4 | 
|  | ; X86-GENERIC-NEXT:  .LBB12_1: # %atomicrmw.start | 
|  | ; X86-GENERIC-NEXT:    # =>This Inner Loop Header: Depth=1 | 
|  | ; X86-GENERIC-NEXT:    movl %edx, (%esp) | 
|  | ; X86-GENERIC-NEXT:    movl %ebx, {{[0-9]+}}(%esp) | 
|  | ; X86-GENERIC-NEXT:    movl %edi, {{[0-9]+}}(%esp) | 
|  | ; X86-GENERIC-NEXT:    movl %ecx, {{[0-9]+}}(%esp) | 
|  | ; X86-GENERIC-NEXT:    movl %ecx, {{[0-9]+}}(%esp) | 
|  | ; X86-GENERIC-NEXT:    movl %edi, {{[0-9]+}}(%esp) | 
|  | ; X86-GENERIC-NEXT:    movl %ebx, {{[0-9]+}}(%esp) | 
|  | ; X86-GENERIC-NEXT:    movl %edx, {{[0-9]+}}(%esp) | 
|  | ; X86-GENERIC-NEXT:    pushl $5 | 
|  | ; X86-GENERIC-NEXT:    pushl $5 | 
|  | ; X86-GENERIC-NEXT:    leal {{[0-9]+}}(%esp), %eax | 
|  | ; X86-GENERIC-NEXT:    pushl %eax | 
|  | ; X86-GENERIC-NEXT:    leal {{[0-9]+}}(%esp), %eax | 
|  | ; X86-GENERIC-NEXT:    pushl %eax | 
|  | ; X86-GENERIC-NEXT:    pushl %esi | 
|  | ; X86-GENERIC-NEXT:    pushl $16 | 
|  | ; X86-GENERIC-NEXT:    calll __atomic_compare_exchange@PLT | 
|  | ; X86-GENERIC-NEXT:    addl $24, %esp | 
|  | ; X86-GENERIC-NEXT:    movl {{[0-9]+}}(%esp), %ecx | 
|  | ; X86-GENERIC-NEXT:    movl {{[0-9]+}}(%esp), %edi | 
|  | ; X86-GENERIC-NEXT:    movl (%esp), %edx | 
|  | ; X86-GENERIC-NEXT:    movl {{[0-9]+}}(%esp), %ebx | 
|  | ; X86-GENERIC-NEXT:    testb %al, %al | 
|  | ; X86-GENERIC-NEXT:    je .LBB12_1 | 
|  | ; X86-GENERIC-NEXT:  # %bb.2: # %atomicrmw.end | 
|  | ; X86-GENERIC-NEXT:    leal -12(%ebp), %esp | 
|  | ; X86-GENERIC-NEXT:    popl %esi | 
|  | ; X86-GENERIC-NEXT:    popl %edi | 
|  | ; X86-GENERIC-NEXT:    popl %ebx | 
|  | ; X86-GENERIC-NEXT:    popl %ebp | 
|  | ; X86-GENERIC-NEXT:    retl | 
|  | ; | 
|  | ; X86-ATOM-LABEL: or128_nouse_seq_cst: | 
|  | ; X86-ATOM:       # %bb.0: | 
|  | ; X86-ATOM-NEXT:    pushl %ebp | 
|  | ; X86-ATOM-NEXT:    movl %esp, %ebp | 
|  | ; X86-ATOM-NEXT:    pushl %ebx | 
|  | ; X86-ATOM-NEXT:    pushl %edi | 
|  | ; X86-ATOM-NEXT:    pushl %esi | 
|  | ; X86-ATOM-NEXT:    andl $-16, %esp | 
|  | ; X86-ATOM-NEXT:    leal -{{[0-9]+}}(%esp), %esp | 
|  | ; X86-ATOM-NEXT:    movl 8(%ebp), %esi | 
|  | ; X86-ATOM-NEXT:    movl %esp, %ebx | 
|  | ; X86-ATOM-NEXT:    movl 12(%esi), %ecx | 
|  | ; X86-ATOM-NEXT:    movl 8(%esi), %edx | 
|  | ; X86-ATOM-NEXT:    movl (%esi), %eax | 
|  | ; X86-ATOM-NEXT:    movl 4(%esi), %edi | 
|  | ; X86-ATOM-NEXT:    .p2align 4 | 
|  | ; X86-ATOM-NEXT:  .LBB12_1: # %atomicrmw.start | 
|  | ; X86-ATOM-NEXT:    # =>This Inner Loop Header: Depth=1 | 
|  | ; X86-ATOM-NEXT:    movl %eax, (%esp) | 
|  | ; X86-ATOM-NEXT:    movl %edi, {{[0-9]+}}(%esp) | 
|  | ; X86-ATOM-NEXT:    movl %edx, {{[0-9]+}}(%esp) | 
|  | ; X86-ATOM-NEXT:    movl %ecx, {{[0-9]+}}(%esp) | 
|  | ; X86-ATOM-NEXT:    movl %ecx, {{[0-9]+}}(%esp) | 
|  | ; X86-ATOM-NEXT:    movl %edx, {{[0-9]+}}(%esp) | 
|  | ; X86-ATOM-NEXT:    movl %edi, {{[0-9]+}}(%esp) | 
|  | ; X86-ATOM-NEXT:    movl %eax, {{[0-9]+}}(%esp) | 
|  | ; X86-ATOM-NEXT:    pushl $5 | 
|  | ; X86-ATOM-NEXT:    pushl $5 | 
|  | ; X86-ATOM-NEXT:    leal {{[0-9]+}}(%esp), %eax | 
|  | ; X86-ATOM-NEXT:    pushl %eax | 
|  | ; X86-ATOM-NEXT:    pushl %ebx | 
|  | ; X86-ATOM-NEXT:    pushl %esi | 
|  | ; X86-ATOM-NEXT:    pushl $16 | 
|  | ; X86-ATOM-NEXT:    calll __atomic_compare_exchange@PLT | 
|  | ; X86-ATOM-NEXT:    leal {{[0-9]+}}(%esp), %esp | 
|  | ; X86-ATOM-NEXT:    testb %al, %al | 
|  | ; X86-ATOM-NEXT:    movl {{[0-9]+}}(%esp), %ecx | 
|  | ; X86-ATOM-NEXT:    movl {{[0-9]+}}(%esp), %edx | 
|  | ; X86-ATOM-NEXT:    movl (%esp), %eax | 
|  | ; X86-ATOM-NEXT:    movl {{[0-9]+}}(%esp), %edi | 
|  | ; X86-ATOM-NEXT:    je .LBB12_1 | 
|  | ; X86-ATOM-NEXT:  # %bb.2: # %atomicrmw.end | 
|  | ; X86-ATOM-NEXT:    leal -12(%ebp), %esp | 
|  | ; X86-ATOM-NEXT:    popl %esi | 
|  | ; X86-ATOM-NEXT:    popl %edi | 
|  | ; X86-ATOM-NEXT:    popl %ebx | 
|  | ; X86-ATOM-NEXT:    popl %ebp | 
|  | ; X86-ATOM-NEXT:    retl | 
|  | atomicrmw or ptr %p, i128 0 syncscope("singlethread") seq_cst | 
|  | ret void | 
|  | } | 
|  |  | 
|  |  | 
|  | define void @or16_nouse_seq_cst(ptr %p) #0 { | 
|  | ; X64-LABEL: or16_nouse_seq_cst: | 
|  | ; X64:       # %bb.0: | 
|  | ; X64-NEXT:    #MEMBARRIER | 
|  | ; X64-NEXT:    retq | 
|  | ; | 
|  | ; X86-GENERIC-LABEL: or16_nouse_seq_cst: | 
|  | ; X86-GENERIC:       # %bb.0: | 
|  | ; X86-GENERIC-NEXT:    #MEMBARRIER | 
|  | ; X86-GENERIC-NEXT:    retl | 
|  | ; | 
|  | ; X86-ATOM-LABEL: or16_nouse_seq_cst: | 
|  | ; X86-ATOM:       # %bb.0: | 
|  | ; X86-ATOM-NEXT:    #MEMBARRIER | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    retl | 
|  | atomicrmw or ptr %p, i16 0 syncscope("singlethread") seq_cst | 
|  | ret void | 
|  | } | 
|  |  | 
|  | define void @or8_nouse_seq_cst(ptr %p) #0 { | 
|  | ; X64-LABEL: or8_nouse_seq_cst: | 
|  | ; X64:       # %bb.0: | 
|  | ; X64-NEXT:    #MEMBARRIER | 
|  | ; X64-NEXT:    retq | 
|  | ; | 
|  | ; X86-GENERIC-LABEL: or8_nouse_seq_cst: | 
|  | ; X86-GENERIC:       # %bb.0: | 
|  | ; X86-GENERIC-NEXT:    #MEMBARRIER | 
|  | ; X86-GENERIC-NEXT:    retl | 
|  | ; | 
|  | ; X86-ATOM-LABEL: or8_nouse_seq_cst: | 
|  | ; X86-ATOM:       # %bb.0: | 
|  | ; X86-ATOM-NEXT:    #MEMBARRIER | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    nop | 
|  | ; X86-ATOM-NEXT:    retl | 
|  | atomicrmw or ptr %p, i8 0 syncscope("singlethread") seq_cst | 
|  | ret void | 
|  | } | 
|  |  | 
|  | attributes #0 = { nounwind } | 
|  |  | 
|  | ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: | 
|  | ; X86-SLM: {{.*}} | 
|  | ; X86-SSE2: {{.*}} |