diff --git a/src/intrinsics.cpp b/src/intrinsics.cpp index e37ee264398e9..4096bba08d0c7 100644 --- a/src/intrinsics.cpp +++ b/src/intrinsics.cpp @@ -584,25 +584,21 @@ static jl_cgval_t emit_pointerref(jl_codectx_t &ctx, jl_cgval_t *argv) jl_value_t *ety = jl_tparam0(aty); if (jl_is_typevar(ety)) return emit_runtime_pointerref(ctx, argv); - if (!jl_is_datatype(ety)) - ety = (jl_value_t*)jl_any_type; + if (!is_valid_intrinsic_elptr(ety)) { + emit_error(ctx, "pointerref: invalid pointer type"); + return jl_cgval_t(); + } Value *idx = emit_unbox(ctx, T_size, i, (jl_value_t*)jl_long_type); Value *im1 = ctx.builder.CreateSub(idx, ConstantInt::get(T_size, 1)); if (ety == (jl_value_t*)jl_any_type) { Value *thePtr = emit_unbox(ctx, T_pprjlvalue, e, e.typ); - return mark_julia_type( - ctx, - ctx.builder.CreateAlignedLoad(ctx.builder.CreateInBoundsGEP(T_prjlvalue, thePtr, im1), Align(align_nb)), - true, - ety); + LoadInst *load = ctx.builder.CreateAlignedLoad(ctx.builder.CreateInBoundsGEP(T_prjlvalue, thePtr, im1), Align(align_nb)); + tbaa_decorate(tbaa_data, load); + return mark_julia_type(ctx, load, true, ety); } else if (!jl_isbits(ety)) { - if (!jl_is_structtype(ety) || jl_is_array_type(ety) || !jl_is_concrete_type(ety)) { - emit_error(ctx, "pointerref: invalid pointer type"); - return jl_cgval_t(); - } assert(jl_is_datatype(ety)); uint64_t size = jl_datatype_size(ety); Value *strct = emit_allocobj(ctx, size, @@ -656,8 +652,8 @@ static jl_cgval_t emit_pointerset(jl_codectx_t &ctx, jl_cgval_t *argv) return emit_runtime_pointerset(ctx, argv); if (align.constant == NULL || !jl_is_long(align.constant)) return emit_runtime_pointerset(ctx, argv); - if (!jl_is_datatype(ety)) - ety = (jl_value_t*)jl_any_type; + if (!is_valid_intrinsic_elptr(ety)) + emit_error(ctx, "pointerset: invalid pointer type"); emit_typecheck(ctx, x, ety, "pointerset"); Value *idx = emit_unbox(ctx, T_size, i, (jl_value_t*)jl_long_type); @@ -673,10 +669,6 @@ static jl_cgval_t emit_pointerset(jl_codectx_t &ctx, jl_cgval_t *argv) tbaa_decorate(tbaa_data, store); } else if (!jl_isbits(ety)) { - if (!jl_is_structtype(ety) || jl_is_array_type(ety) || !jl_is_concrete_type(ety)) { - emit_error(ctx, "pointerset: invalid pointer type"); - return jl_cgval_t(); - } thePtr = emit_unbox(ctx, T_pint8, e, e.typ); uint64_t size = jl_datatype_size(ety); im1 = ctx.builder.CreateMul(im1, ConstantInt::get(T_size, @@ -696,6 +688,170 @@ static jl_cgval_t emit_pointerset(jl_codectx_t &ctx, jl_cgval_t *argv) return e; } +static jl_cgval_t emit_atomicfence(jl_codectx_t &ctx, jl_cgval_t *argv) +{ + const jl_cgval_t &ord = argv[0]; + if (ord.constant && jl_is_symbol(ord.constant)) { + enum jl_memory_order order = jl_get_atomic_order((jl_sym_t*)ord.constant, false, false); + if (order == jl_memory_order_invalid) { + emit_atomic_error(ctx, "invalid atomic ordering"); + return jl_cgval_t(); // unreachable + } + if (order > jl_memory_order_monotonic) + ctx.builder.CreateFence(get_llvm_atomic_order(order)); + return ghostValue(jl_nothing_type); + } + return emit_runtime_call(ctx, atomic_fence, argv, 1); +} + +static jl_cgval_t emit_atomic_pointerref(jl_codectx_t &ctx, jl_cgval_t *argv) +{ + const jl_cgval_t &e = argv[0]; + const jl_cgval_t &ord = argv[1]; + jl_value_t *aty = e.typ; + if (!jl_is_cpointer_type(aty) || !ord.constant || !jl_is_symbol(ord.constant)) + return emit_runtime_call(ctx, atomic_pointerref, argv, 2); + jl_value_t *ety = jl_tparam0(aty); + if (jl_is_typevar(ety)) + return emit_runtime_call(ctx, atomic_pointerref, argv, 2); + enum jl_memory_order order = jl_get_atomic_order((jl_sym_t*)ord.constant, true, false); + if (order == jl_memory_order_invalid) { + emit_atomic_error(ctx, "invalid atomic ordering"); + return jl_cgval_t(); // unreachable + } + AtomicOrdering llvm_order = get_llvm_atomic_order(order); + + if (ety == (jl_value_t*)jl_any_type) { + Value *thePtr = emit_unbox(ctx, T_pprjlvalue, e, e.typ); + LoadInst *load = ctx.builder.CreateAlignedLoad(thePtr, Align(sizeof(jl_value_t*))); + tbaa_decorate(tbaa_data, load); + load->setOrdering(llvm_order); + return mark_julia_type(ctx, load, true, ety); + } + + if (!is_valid_intrinsic_elptr(ety)) { + emit_error(ctx, "atomic_pointerref: invalid pointer type"); + return jl_cgval_t(); + } + + size_t nb = jl_datatype_size(ety); + if ((nb & (nb - 1)) != 0 || nb > MAX_POINTERATOMIC_SIZE) { + emit_error(ctx, "atomic_pointerref: invalid pointer for atomic operation"); + return jl_cgval_t(); + } + + if (!jl_isbits(ety)) { + assert(jl_is_datatype(ety)); + uint64_t size = jl_datatype_size(ety); + Value *strct = emit_allocobj(ctx, size, + literal_pointer_val(ctx, ety)); + Value *thePtr = emit_unbox(ctx, T_pint8, e, e.typ); + Type *loadT = Type::getIntNTy(jl_LLVMContext, nb * 8); + thePtr = emit_bitcast(ctx, thePtr, loadT->getPointerTo()); + MDNode *tbaa = best_tbaa(ety); + LoadInst *load = ctx.builder.CreateAlignedLoad(loadT, thePtr, Align(nb)); + tbaa_decorate(tbaa, load); + load->setOrdering(llvm_order); + thePtr = emit_bitcast(ctx, strct, thePtr->getType()); + StoreInst *store = ctx.builder.CreateAlignedStore(load, thePtr, Align(julia_alignment(ety))); + tbaa_decorate(tbaa, store); + return mark_julia_type(ctx, strct, true, ety); + } + else { + bool isboxed; + Type *ptrty = julia_type_to_llvm(ctx, ety, &isboxed); + assert(!isboxed); + if (!type_is_ghost(ptrty)) { + Value *thePtr = emit_unbox(ctx, ptrty->getPointerTo(), e, e.typ); + return typed_load(ctx, thePtr, nullptr, ety, tbaa_data, nullptr, isboxed, llvm_order, true, nb); + } + else { + if (order > jl_memory_order_monotonic) + ctx.builder.CreateFence(llvm_order); + return ghostValue(ety); + } + } +} + +// e[i] = x (set) +// e[i] <= x (swap) +// e[i] y => x (replace) +static jl_cgval_t emit_atomic_pointerset(jl_codectx_t &ctx, intrinsic f, const jl_cgval_t *argv, int nargs) +{ + bool issetfield = f == atomic_pointerset; + bool isreplacefield = f == atomic_pointerreplace; + const jl_cgval_t undefval; + const jl_cgval_t &e = argv[0]; + const jl_cgval_t &x = isreplacefield ? argv[2] : argv[1]; + const jl_cgval_t &y = isreplacefield ? argv[1] : undefval; + const jl_cgval_t &ord = isreplacefield ? argv[3] : argv[2]; + const jl_cgval_t &failord = isreplacefield ? argv[4] : undefval; + + jl_value_t *aty = e.typ; + if (!jl_is_cpointer_type(aty) || !ord.constant || !jl_is_symbol(ord.constant)) + return emit_runtime_call(ctx, f, argv, nargs); + if (isreplacefield) { + if (!failord.constant || !jl_is_symbol(failord.constant)) + return emit_runtime_call(ctx, f, argv, nargs); + } + jl_value_t *ety = jl_tparam0(aty); + if (jl_is_typevar(ety)) + return emit_runtime_call(ctx, f, argv, nargs); + enum jl_memory_order order = jl_get_atomic_order((jl_sym_t*)ord.constant, !issetfield, true); + enum jl_memory_order failorder = isreplacefield ? jl_get_atomic_order((jl_sym_t*)failord.constant, true, false) : order; + if (order == jl_memory_order_invalid || failorder == jl_memory_order_invalid || failorder > order) { + emit_atomic_error(ctx, "invalid atomic ordering"); + return jl_cgval_t(); // unreachable + } + AtomicOrdering llvm_order = get_llvm_atomic_order(order); + AtomicOrdering llvm_failorder = get_llvm_atomic_order(failorder); + + if (ety == (jl_value_t*)jl_any_type) { + // unsafe_store to Ptr{Any} is allowed to implicitly drop GC roots. + // n.b.: the expected value (y) must be rooted, but not the others + Value *thePtr = emit_unbox(ctx, T_pprjlvalue, e, e.typ); + bool isboxed = true; + jl_cgval_t ret = typed_store(ctx, thePtr, nullptr, x, y, ety, tbaa_data, nullptr, nullptr, isboxed, + llvm_order, llvm_failorder, sizeof(jl_value_t*), false, issetfield, isreplacefield, false); + if (issetfield) + ret = e; + return ret; + } + + if (!is_valid_intrinsic_elptr(ety)) { + std::string msg(StringRef(jl_intrinsic_name((int)f))); + msg += ": invalid pointer type"; + emit_error(ctx, msg); + return jl_cgval_t(); + } + emit_typecheck(ctx, x, ety, std::string(jl_intrinsic_name((int)f))); + + size_t nb = jl_datatype_size(ety); + if ((nb & (nb - 1)) != 0 || nb > MAX_POINTERATOMIC_SIZE) { + std::string msg(StringRef(jl_intrinsic_name((int)f))); + msg += ": invalid pointer for atomic operation"; + emit_error(ctx, msg); + return jl_cgval_t(); + } + + if (!jl_isbits(ety)) { + //Value *thePtr = emit_unbox(ctx, T_pint8, e, e.typ); + //uint64_t size = jl_datatype_size(ety); + return emit_runtime_call(ctx, f, argv, nargs); // TODO: optimizations + } + else { + bool isboxed; + Type *ptrty = julia_type_to_llvm(ctx, ety, &isboxed); + assert(!isboxed); + Value *thePtr = emit_unbox(ctx, ptrty->getPointerTo(), e, e.typ); + jl_cgval_t ret = typed_store(ctx, thePtr, nullptr, x, y, ety, tbaa_data, nullptr, nullptr, isboxed, + llvm_order, llvm_failorder, nb, false, issetfield, isreplacefield, false); + if (issetfield) + ret = e; + return ret; + } +} + static Value *emit_checked_srem_int(jl_codectx_t &ctx, Value *x, Value *den) { Type *t = den->getType(); @@ -924,11 +1080,14 @@ static jl_cgval_t emit_intrinsic(jl_codectx_t &ctx, intrinsic f, jl_value_t **ar case pointerset: return emit_pointerset(ctx, argv); case atomic_fence: + return emit_atomicfence(ctx, argv); case atomic_pointerref: + return emit_atomic_pointerref(ctx, argv); case atomic_pointerset: case atomic_pointerswap: - case atomic_pointermodify: case atomic_pointerreplace: + return emit_atomic_pointerset(ctx, f, argv, nargs); + case atomic_pointermodify: return emit_runtime_call(ctx, f, argv, nargs); case bitcast: return generic_bitcast(ctx, argv); diff --git a/src/julia_internal.h b/src/julia_internal.h index e044ecc1bce61..44bab66b083f9 100644 --- a/src/julia_internal.h +++ b/src/julia_internal.h @@ -1128,6 +1128,10 @@ extern JL_DLLEXPORT jl_value_t *jl_segv_exception; JL_DLLEXPORT const char *jl_intrinsic_name(int f) JL_NOTSAFEPOINT; unsigned jl_intrinsic_nargs(int f) JL_NOTSAFEPOINT; +STATIC_INLINE int is_valid_intrinsic_elptr(jl_value_t *ety) +{ + return ety == (jl_value_t*)jl_any_type || (jl_is_concrete_type(ety) && !jl_is_layout_opaque(((jl_datatype_t*)ety)->layout)); +} JL_DLLEXPORT jl_value_t *jl_bitcast(jl_value_t *ty, jl_value_t *v); JL_DLLEXPORT jl_value_t *jl_pointerref(jl_value_t *p, jl_value_t *i, jl_value_t *align); JL_DLLEXPORT jl_value_t *jl_pointerset(jl_value_t *p, jl_value_t *x, jl_value_t *align, jl_value_t *i); diff --git a/src/runtime_intrinsics.c b/src/runtime_intrinsics.c index c7c31bb98a86d..7cb58bc230294 100644 --- a/src/runtime_intrinsics.c +++ b/src/runtime_intrinsics.c @@ -43,7 +43,7 @@ JL_DLLEXPORT jl_value_t *jl_pointerref(jl_value_t *p, jl_value_t *i, jl_value_t return *pp; } else { - if (!jl_is_datatype(ety)) + if (!is_valid_intrinsic_elptr(ety)) jl_error("pointerref: invalid pointer"); size_t nb = LLT_ALIGN(jl_datatype_size(ety), jl_datatype_align(ety)); char *pp = (char*)jl_unbox_long(p) + (jl_unbox_long(i)-1)*nb; @@ -56,14 +56,14 @@ JL_DLLEXPORT jl_value_t *jl_pointerset(jl_value_t *p, jl_value_t *x, jl_value_t { JL_TYPECHK(pointerset, pointer, p); JL_TYPECHK(pointerset, long, i); - JL_TYPECHK(pointerref, long, align); + JL_TYPECHK(pointerset, long, align); jl_value_t *ety = jl_tparam0(jl_typeof(p)); if (ety == (jl_value_t*)jl_any_type) { jl_value_t **pp = (jl_value_t**)(jl_unbox_long(p) + (jl_unbox_long(i)-1)*sizeof(void*)); *pp = x; } else { - if (!jl_is_datatype(ety)) + if (!is_valid_intrinsic_elptr(ety)) jl_error("pointerset: invalid pointer"); if (jl_typeof(x) != ety) jl_type_error("pointerset", ety, x); @@ -77,8 +77,8 @@ JL_DLLEXPORT jl_value_t *jl_pointerset(jl_value_t *p, jl_value_t *x, jl_value_t JL_DLLEXPORT jl_value_t *jl_atomic_pointerref(jl_value_t *p, jl_value_t *order) { - JL_TYPECHK(pointerref, pointer, p); - JL_TYPECHK(pointerref, symbol, order) + JL_TYPECHK(atomic_pointerref, pointer, p); + JL_TYPECHK(atomic_pointerref, symbol, order) (void)jl_get_atomic_order_checked((jl_sym_t*)order, 1, 0); jl_value_t *ety = jl_tparam0(jl_typeof(p)); char *pp = (char*)jl_unbox_long(p); @@ -86,19 +86,19 @@ JL_DLLEXPORT jl_value_t *jl_atomic_pointerref(jl_value_t *p, jl_value_t *order) return jl_atomic_load((jl_value_t**)pp); } else { - if (!jl_is_datatype(ety)) - jl_error("pointerref: invalid pointer"); + if (!is_valid_intrinsic_elptr(ety)) + jl_error("atomic_pointerref: invalid pointer"); size_t nb = jl_datatype_size(ety); if ((nb & (nb - 1)) != 0 || nb > MAX_POINTERATOMIC_SIZE) - jl_error("pointerref: invalid pointer for atomic operation"); + jl_error("atomic_pointerref: invalid pointer for atomic operation"); return jl_atomic_new_bits(ety, pp); } } JL_DLLEXPORT jl_value_t *jl_atomic_pointerset(jl_value_t *p, jl_value_t *x, jl_value_t *order) { - JL_TYPECHK(pointerset, pointer, p); - JL_TYPECHK(pointerset, symbol, order); + JL_TYPECHK(atomic_pointerset, pointer, p); + JL_TYPECHK(atomic_pointerset, symbol, order); (void)jl_get_atomic_order_checked((jl_sym_t*)order, 0, 1); jl_value_t *ety = jl_tparam0(jl_typeof(p)); char *pp = (char*)jl_unbox_long(p); @@ -106,13 +106,13 @@ JL_DLLEXPORT jl_value_t *jl_atomic_pointerset(jl_value_t *p, jl_value_t *x, jl_v jl_atomic_store((jl_value_t**)pp, x); } else { - if (!jl_is_datatype(ety)) - jl_error("pointerset: invalid pointer"); + if (!is_valid_intrinsic_elptr(ety)) + jl_error("atomic_pointerset: invalid pointer"); if (jl_typeof(x) != ety) - jl_type_error("pointerset", ety, x); + jl_type_error("atomic_pointerset", ety, x); size_t nb = jl_datatype_size(ety); if ((nb & (nb - 1)) != 0 || nb > MAX_POINTERATOMIC_SIZE) - jl_error("pointerset: invalid pointer for atomic operation"); + jl_error("atomic_pointerset: invalid pointer for atomic operation"); jl_atomic_store_bits(pp, x, nb); } return p; @@ -120,8 +120,8 @@ JL_DLLEXPORT jl_value_t *jl_atomic_pointerset(jl_value_t *p, jl_value_t *x, jl_v JL_DLLEXPORT jl_value_t *jl_atomic_pointerswap(jl_value_t *p, jl_value_t *x, jl_value_t *order) { - JL_TYPECHK(pointerswap, pointer, p); - JL_TYPECHK(pointerswap, symbol, order); + JL_TYPECHK(atomic_pointerswap, pointer, p); + JL_TYPECHK(atomic_pointerswap, symbol, order); (void)jl_get_atomic_order_checked((jl_sym_t*)order, 1, 1); jl_value_t *ety = jl_tparam0(jl_typeof(p)); jl_value_t *y; @@ -130,13 +130,13 @@ JL_DLLEXPORT jl_value_t *jl_atomic_pointerswap(jl_value_t *p, jl_value_t *x, jl_ y = jl_atomic_exchange((jl_value_t**)pp, x); } else { - if (!jl_is_datatype(ety)) - jl_error("pointerswap: invalid pointer"); + if (!is_valid_intrinsic_elptr(ety)) + jl_error("atomic_pointerswap: invalid pointer"); if (jl_typeof(x) != ety) - jl_type_error("pointerswap", ety, x); + jl_type_error("atomic_pointerswap", ety, x); size_t nb = jl_datatype_size(ety); if ((nb & (nb - 1)) != 0 || nb > MAX_POINTERATOMIC_SIZE) - jl_error("pointerswap: invalid pointer for atomic operation"); + jl_error("atomic_pointerswap: invalid pointer for atomic operation"); y = jl_atomic_swap_bits(ety, pp, x, nb); } return y; @@ -163,8 +163,10 @@ JL_DLLEXPORT jl_value_t *jl_atomic_pointermodify(jl_value_t *p, jl_value_t *f, j break; } else { + //if (!is_valid_intrinsic_elptr(ety)) // handled by jl_atomic_pointerref earlier + // jl_error("atomic_pointermodify: invalid pointer"); if (jl_typeof(y) != ety) - jl_type_error("pointermodify", ety, y); + jl_type_error("atomic_pointermodify", ety, y); size_t nb = jl_datatype_size(ety); if (jl_atomic_bool_cmpswap_bits(pp, expected, y, nb)) break; @@ -181,13 +183,13 @@ JL_DLLEXPORT jl_value_t *jl_atomic_pointermodify(jl_value_t *p, jl_value_t *f, j JL_DLLEXPORT jl_value_t *jl_atomic_pointerreplace(jl_value_t *p, jl_value_t *expected, jl_value_t *x, jl_value_t *success_order_sym, jl_value_t *failure_order_sym) { - JL_TYPECHK(pointerreplace, pointer, p); - JL_TYPECHK(pointerreplace, symbol, success_order_sym); - JL_TYPECHK(pointerreplace, symbol, failure_order_sym); + JL_TYPECHK(atomic_pointerreplace, pointer, p); + JL_TYPECHK(atomic_pointerreplace, symbol, success_order_sym); + JL_TYPECHK(atomic_pointerreplace, symbol, failure_order_sym); enum jl_memory_order success_order = jl_get_atomic_order_checked((jl_sym_t*)success_order_sym, 1, 1); enum jl_memory_order failure_order = jl_get_atomic_order_checked((jl_sym_t*)failure_order_sym, 1, 0); if (failure_order > success_order) - jl_atomic_error("pointerreplace: invalid atomic ordering"); + jl_atomic_error("atomic_pointerreplace: invalid atomic ordering"); // TODO: filter other invalid orderings jl_value_t *ety = jl_tparam0(jl_typeof(p)); char *pp = (char*)jl_unbox_long(p); @@ -207,22 +209,23 @@ JL_DLLEXPORT jl_value_t *jl_atomic_pointerreplace(jl_value_t *p, jl_value_t *exp return result[0]; } else { - if (!jl_is_datatype(ety)) - jl_error("pointerreplace: invalid pointer"); + if (!is_valid_intrinsic_elptr(ety)) + jl_error("atomic_pointerreplace: invalid pointer"); if (jl_typeof(x) != ety) - jl_type_error("pointerreplace", ety, x); + jl_type_error("atomic_pointerreplace", ety, x); size_t nb = jl_datatype_size(ety); if ((nb & (nb - 1)) != 0 || nb > MAX_POINTERATOMIC_SIZE) - jl_error("pointerreplace: invalid pointer for atomic operation"); + jl_error("atomic_pointerreplace: invalid pointer for atomic operation"); return jl_atomic_cmpswap_bits((jl_datatype_t*)ety, pp, expected, x, nb); } } -JL_DLLEXPORT jl_value_t *jl_atomic_fence(jl_value_t *order) +JL_DLLEXPORT jl_value_t *jl_atomic_fence(jl_value_t *order_sym) { - JL_TYPECHK(fence, symbol, order); - (void)jl_get_atomic_order_checked((jl_sym_t*)order, 0, 0); - jl_fence(); + JL_TYPECHK(fence, symbol, order_sym); + enum jl_memory_order order = jl_get_atomic_order_checked((jl_sym_t*)order_sym, 0, 0); + if (order > jl_memory_order_monotonic) + jl_fence(); return jl_nothing; } diff --git a/test/intrinsics.jl b/test/intrinsics.jl index c1d3019f8db35..7fa8ecb0ebe27 100644 --- a/test/intrinsics.jl +++ b/test/intrinsics.jl @@ -171,51 +171,106 @@ primitive type Int512 <: Signed 512 end Int512(i::Int) = Core.Intrinsics.sext_int(Int512, i) function add(i::T, j)::T where {T}; return i + j; end swap(i, j) = j + for TT in (Int8, Int16, Int32, Int64, Int128, Int256, Int512, Complex{Int32}, Complex{Int512}, Any) - T(x) = convert(TT, x) r = Ref{TT}(10) - p = Base.unsafe_convert(Ptr{eltype(r)}, r) GC.@preserve r begin - S = UInt32 - if TT !== Any - @test_throws TypeError Core.Intrinsics.atomic_pointerset(p, S(1), :sequentially_consistent) - @test_throws TypeError Core.Intrinsics.atomic_pointerswap(p, S(100), :sequentially_consistent) - @test_throws TypeError Core.Intrinsics.atomic_pointerreplace(p, T(100), S(2), :sequentially_consistent, :sequentially_consistent) - end - @test Core.Intrinsics.pointerref(p, 1, 1) === T(10) === r[] - if sizeof(r) > 8 - @test_throws ErrorException("pointerref: invalid pointer for atomic operation") Core.Intrinsics.atomic_pointerref(p, :sequentially_consistent) - @test_throws ErrorException("pointerset: invalid pointer for atomic operation") Core.Intrinsics.atomic_pointerset(p, T(1), :sequentially_consistent) - @test_throws ErrorException("pointerswap: invalid pointer for atomic operation") Core.Intrinsics.atomic_pointerswap(p, T(100), :sequentially_consistent) - @test_throws ErrorException("pointerref: invalid pointer for atomic operation") Core.Intrinsics.atomic_pointermodify(p, add, T(1), :sequentially_consistent) - @test_throws ErrorException("pointerref: invalid pointer for atomic operation") Core.Intrinsics.atomic_pointermodify(p, swap, S(1), :sequentially_consistent) - @test_throws ErrorException("pointerreplace: invalid pointer for atomic operation") Core.Intrinsics.atomic_pointerreplace(p, T(100), T(2), :sequentially_consistent, :sequentially_consistent) - @test_throws ErrorException("pointerreplace: invalid pointer for atomic operation") Core.Intrinsics.atomic_pointerreplace(p, S(100), T(2), :sequentially_consistent, :sequentially_consistent) + (function (::Type{TT}) where TT + p = Base.unsafe_convert(Ptr{TT}, r) + T(x) = convert(TT, x) + S = UInt32 + if TT !== Any + @test_throws TypeError Core.Intrinsics.atomic_pointerset(p, S(1), :sequentially_consistent) + @test_throws TypeError Core.Intrinsics.atomic_pointerswap(p, S(100), :sequentially_consistent) + @test_throws TypeError Core.Intrinsics.atomic_pointerreplace(p, T(100), S(2), :sequentially_consistent, :sequentially_consistent) + end @test Core.Intrinsics.pointerref(p, 1, 1) === T(10) === r[] - else - TT !== Any && @test_throws TypeError Core.Intrinsics.atomic_pointermodify(p, swap, S(1), :sequentially_consistent) - @test Core.Intrinsics.atomic_pointerref(p, :sequentially_consistent) === T(10) - @test Core.Intrinsics.atomic_pointerset(p, T(1), :sequentially_consistent) === p - @test Core.Intrinsics.atomic_pointerref(p, :sequentially_consistent) === T(1) - @test Core.Intrinsics.atomic_pointerreplace(p, T(1), T(100), :sequentially_consistent, :sequentially_consistent) === (T(1), true) - @test Core.Intrinsics.atomic_pointerref(p, :sequentially_consistent) === T(100) - @test Core.Intrinsics.atomic_pointerreplace(p, T(1), T(1), :sequentially_consistent, :sequentially_consistent) === (T(100), false) - @test Core.Intrinsics.atomic_pointerref(p, :sequentially_consistent) === T(100) - @test Core.Intrinsics.atomic_pointermodify(p, add, T(1), :sequentially_consistent) === (T(100), T(101)) - @test Core.Intrinsics.atomic_pointermodify(p, add, T(1), :sequentially_consistent) === (T(101), T(102)) - @test Core.Intrinsics.atomic_pointerref(p, :sequentially_consistent) === T(102) - @test Core.Intrinsics.atomic_pointerswap(p, T(103), :sequentially_consistent) === T(102) - @test Core.Intrinsics.atomic_pointerreplace(p, S(100), T(2), :sequentially_consistent, :sequentially_consistent) === (T(103), false) - @test Core.Intrinsics.atomic_pointerref(p, :sequentially_consistent) === T(103) - end - if TT === Any - @test Core.Intrinsics.atomic_pointermodify(p, swap, S(103), :sequentially_consistent) === (T(103), S(103)) - @test Core.Intrinsics.atomic_pointerref(p, :sequentially_consistent) === S(103) - @test Core.Intrinsics.atomic_pointerset(p, S(1), :sequentially_consistent) === p - @test Core.Intrinsics.atomic_pointerswap(p, S(100), :sequentially_consistent) === S(1) - @test Core.Intrinsics.atomic_pointerreplace(p, T(100), S(2), :sequentially_consistent, :sequentially_consistent) === (S(100), false) - @test Core.Intrinsics.atomic_pointerreplace(p, S(100), T(2), :sequentially_consistent, :sequentially_consistent) === (S(100), true) - @test Core.Intrinsics.atomic_pointerref(p, :sequentially_consistent) === T(2) - end + if sizeof(r) > 8 + @test_throws ErrorException("atomic_pointerref: invalid pointer for atomic operation") Core.Intrinsics.atomic_pointerref(p, :sequentially_consistent) + @test_throws ErrorException("atomic_pointerset: invalid pointer for atomic operation") Core.Intrinsics.atomic_pointerset(p, T(1), :sequentially_consistent) + @test_throws ErrorException("atomic_pointerswap: invalid pointer for atomic operation") Core.Intrinsics.atomic_pointerswap(p, T(100), :sequentially_consistent) + @test_throws ErrorException("atomic_pointerref: invalid pointer for atomic operation") Core.Intrinsics.atomic_pointermodify(p, add, T(1), :sequentially_consistent) + @test_throws ErrorException("atomic_pointerref: invalid pointer for atomic operation") Core.Intrinsics.atomic_pointermodify(p, swap, S(1), :sequentially_consistent) + @test_throws ErrorException("atomic_pointerreplace: invalid pointer for atomic operation") Core.Intrinsics.atomic_pointerreplace(p, T(100), T(2), :sequentially_consistent, :sequentially_consistent) + @test_throws ErrorException("atomic_pointerreplace: invalid pointer for atomic operation") Core.Intrinsics.atomic_pointerreplace(p, S(100), T(2), :sequentially_consistent, :sequentially_consistent) + @test Core.Intrinsics.pointerref(p, 1, 1) === T(10) === r[] + else + TT !== Any && @test_throws TypeError Core.Intrinsics.atomic_pointermodify(p, swap, S(1), :sequentially_consistent) + @test Core.Intrinsics.atomic_pointerref(p, :sequentially_consistent) === T(10) + @test Core.Intrinsics.atomic_pointerset(p, T(1), :sequentially_consistent) === p + @test Core.Intrinsics.atomic_pointerref(p, :sequentially_consistent) === T(1) + @test Core.Intrinsics.atomic_pointerreplace(p, T(1), T(100), :sequentially_consistent, :sequentially_consistent) === (T(1), true) + @test Core.Intrinsics.atomic_pointerref(p, :sequentially_consistent) === T(100) + @test Core.Intrinsics.atomic_pointerreplace(p, T(1), T(1), :sequentially_consistent, :sequentially_consistent) === (T(100), false) + @test Core.Intrinsics.atomic_pointerref(p, :sequentially_consistent) === T(100) + @test Core.Intrinsics.atomic_pointermodify(p, add, T(1), :sequentially_consistent) === (T(100), T(101)) + @test Core.Intrinsics.atomic_pointermodify(p, add, T(1), :sequentially_consistent) === (T(101), T(102)) + @test Core.Intrinsics.atomic_pointerref(p, :sequentially_consistent) === T(102) + @test Core.Intrinsics.atomic_pointerswap(p, T(103), :sequentially_consistent) === T(102) + @test Core.Intrinsics.atomic_pointerreplace(p, S(100), T(2), :sequentially_consistent, :sequentially_consistent) === (T(103), false) + @test Core.Intrinsics.atomic_pointerref(p, :sequentially_consistent) === T(103) + end + if TT === Any + @test Core.Intrinsics.atomic_pointermodify(p, swap, S(103), :sequentially_consistent) === (T(103), S(103)) + @test Core.Intrinsics.atomic_pointerref(p, :sequentially_consistent) === S(103) + @test Core.Intrinsics.atomic_pointerset(p, S(1), :sequentially_consistent) === p + @test Core.Intrinsics.atomic_pointerswap(p, S(100), :sequentially_consistent) === S(1) + @test Core.Intrinsics.atomic_pointerreplace(p, T(100), S(2), :sequentially_consistent, :sequentially_consistent) === (S(100), false) + @test Core.Intrinsics.atomic_pointerreplace(p, S(100), T(2), :sequentially_consistent, :sequentially_consistent) === (S(100), true) + @test Core.Intrinsics.atomic_pointerref(p, :sequentially_consistent) === T(2) + end + end)(TT,) end end + +mutable struct IntWrap <: Signed + x::Int +end +Base.:(+)(a::IntWrap, b::Int) = IntWrap(a.x + b) +Base.:(+)(a::IntWrap, b::IntWrap) = IntWrap(a.x + b.x) +Base.show(io::IO, a::IntWrap) = print(io, "IntWrap(", a.x, ")") +(function() + TT = IntWrap + T(x) = convert(TT, x) + r = Ref{TT}(10) + p = Base.unsafe_convert(Ptr{TT}, r) + GC.@preserve r begin + S = UInt32 + @test_throws TypeError Core.Intrinsics.atomic_pointerset(p, S(1), :sequentially_consistent) + @test_throws TypeError Core.Intrinsics.atomic_pointerswap(p, S(100), :sequentially_consistent) + @test_throws TypeError Core.Intrinsics.atomic_pointerreplace(p, T(100), S(2), :sequentially_consistent, :sequentially_consistent) + r2 = Core.Intrinsics.pointerref(p, 1, 1) + @test r2 isa IntWrap && r2.x === 10 === r[].x && r2 !== r[] + @test_throws TypeError Core.Intrinsics.atomic_pointermodify(p, swap, S(1), :sequentially_consistent) + r2 = Core.Intrinsics.atomic_pointerref(p, :sequentially_consistent) + @test r2 isa IntWrap && r2.x === 10 === r[].x && r2 !== r[] + @test Core.Intrinsics.atomic_pointerset(p, T(1), :sequentially_consistent) === p + r2 = Core.Intrinsics.atomic_pointerref(p, :sequentially_consistent) + @test r2 isa IntWrap && r2.x === 1 === r[].x && r2 !== r[] + r2, succ = Core.Intrinsics.atomic_pointerreplace(p, T(1), T(100), :sequentially_consistent, :sequentially_consistent) + @test r2 isa IntWrap && r2.x === 1 && r[].x === 100 && r2 !== r[] + @test succ + r2 = Core.Intrinsics.atomic_pointerref(p, :sequentially_consistent) + @test r2 isa IntWrap && r2.x === 100 === r[].x && r2 !== r[] + r2, succ = Core.Intrinsics.atomic_pointerreplace(p, T(1), T(1), :sequentially_consistent, :sequentially_consistent) + @test r2 isa IntWrap && r2.x === 100 === r[].x && r2 !== r[] + @test !succ + r2 = Core.Intrinsics.atomic_pointerref(p, :sequentially_consistent) + @test r2 isa IntWrap && r2.x === 100 === r[].x && r2 !== r[] + r2, r3 = Core.Intrinsics.atomic_pointermodify(p, add, T(1), :sequentially_consistent) + @test r2 isa IntWrap && r2.x === 100 !== r[].x && r2 !== r[] + @test r3 isa IntWrap && r3.x === 101 === r[].x && r3 !== r[] + r2, r3 = Core.Intrinsics.atomic_pointermodify(p, add, T(1), :sequentially_consistent) + @test r2 isa IntWrap && r2.x === 101 !== r[].x && r2 !== r[] + @test r3 isa IntWrap && r3.x === 102 === r[].x && r3 !== r[] + r2 = Core.Intrinsics.atomic_pointerref(p, :sequentially_consistent) + @test r2 isa IntWrap && r2.x === 102 === r[].x && r2 !== r[] + r2 = Core.Intrinsics.atomic_pointerswap(p, T(103), :sequentially_consistent) + @test r2 isa IntWrap && r2.x === 102 !== r[].x && r[].x == 103 && r2 !== r[] + r2, succ = Core.Intrinsics.atomic_pointerreplace(p, S(100), T(2), :sequentially_consistent, :sequentially_consistent) + @test r2 isa IntWrap && r2.x === 103 === r[].x && r2 !== r[] + @test !succ + r2 = Core.Intrinsics.atomic_pointerref(p, :sequentially_consistent) + @test r2 isa IntWrap && r2.x === 103 === r[].x && r2 !== r[] + end +end)()