From 5650c93f1aa45a17c10c9482e5d3e0042cf4cc6b Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Thu, 24 Jun 2021 11:14:23 -0400 Subject: [PATCH] codegen: add optimizations for swapfield and replacefield (#41275) --- src/builtins.c | 6 +- src/cgutils.cpp | 423 ++++++++++++++++++++++++----- src/codegen.cpp | 155 +++++------ src/intrinsics.cpp | 3 +- src/llvm-gc-invariant-verifier.cpp | 32 ++- src/llvm-late-gc-lowering.cpp | 31 ++- src/llvm-propagate-addrspaces.cpp | 34 ++- test/atomics.jl | 16 +- 8 files changed, 510 insertions(+), 190 deletions(-) diff --git a/src/builtins.c b/src/builtins.c index b5b4bb4069773..f40d694d23529 100644 --- a/src/builtins.c +++ b/src/builtins.c @@ -967,7 +967,9 @@ JL_CALLABLE(jl_f_replacefield) JL_TYPECHK(replacefield!, symbol, args[5]); failure_order = jl_get_atomic_order_checked((jl_sym_t*)args[5], 1, 0); } - // TODO: filter more invalid ordering combinations + if (failure_order > success_order) + jl_atomic_error("invalid atomic ordering"); + // TODO: filter more invalid ordering combinations? jl_value_t *v = args[0]; jl_datatype_t *st = (jl_datatype_t*)jl_typeof(v); size_t idx = get_checked_fieldindex("replacefield!", st, v, args[1], 1); @@ -978,8 +980,6 @@ JL_CALLABLE(jl_f_replacefield) if (isatomic == (failure_order == jl_memory_order_notatomic)) jl_atomic_error(isatomic ? "replacefield!: atomic field cannot be accessed non-atomically" : "replacefield!: non-atomic field cannot be accessed atomically"); - if (failure_order > success_order) - jl_atomic_error("invalid atomic ordering"); v = replace_nth_field(st, v, idx, args[2], args[3], isatomic); // always seq_cst, if isatomic needed at all return v; } diff --git a/src/cgutils.cpp b/src/cgutils.cpp index c5e25466110d0..2303ddb286971 100644 --- a/src/cgutils.cpp +++ b/src/cgutils.cpp @@ -1073,6 +1073,7 @@ static Value *null_pointer_cmp(jl_codectx_t &ctx, Value *v) return ctx.builder.CreateICmpNE(v, Constant::getNullValue(v->getType())); } + // If `nullcheck` is not NULL and a pointer NULL check is necessary // store the pointer to be checked in `*nullcheck` instead of checking it static void null_pointer_check(jl_codectx_t &ctx, Value *v, Value **nullcheck = nullptr) @@ -1085,6 +1086,62 @@ static void null_pointer_check(jl_codectx_t &ctx, Value *v, Value **nullcheck = literal_pointer_val(ctx, jl_undefref_exception)); } +template +static Value *emit_guarded_test(jl_codectx_t &ctx, Value *ifnot, Value *defval, Func &&func) +{ + if (auto Cond = dyn_cast(ifnot)) { + if (Cond->isZero()) + return defval; + return func(); + } + BasicBlock *currBB = ctx.builder.GetInsertBlock(); + BasicBlock *passBB = BasicBlock::Create(jl_LLVMContext, "guard_pass", ctx.f); + BasicBlock *exitBB = BasicBlock::Create(jl_LLVMContext, "guard_exit", ctx.f); + ctx.builder.CreateCondBr(ifnot, passBB, exitBB); + ctx.builder.SetInsertPoint(passBB); + auto res = func(); + passBB = ctx.builder.GetInsertBlock(); + ctx.builder.CreateBr(exitBB); + ctx.builder.SetInsertPoint(exitBB); + if (defval == nullptr) + return nullptr; + PHINode *phi = ctx.builder.CreatePHI(defval->getType(), 2); + phi->addIncoming(defval, currBB); + phi->addIncoming(res, passBB); + return phi; +} + +template +static Value *emit_guarded_test(jl_codectx_t &ctx, Value *ifnot, bool defval, Func &&func) +{ + return emit_guarded_test(ctx, ifnot, ConstantInt::get(T_int1, defval), func); +} + +template +static Value *emit_nullcheck_guard(jl_codectx_t &ctx, Value *nullcheck, Func &&func) +{ + if (!nullcheck) + return func(); + return emit_guarded_test(ctx, null_pointer_cmp(ctx, nullcheck), false, func); +} + +template +static Value *emit_nullcheck_guard2(jl_codectx_t &ctx, Value *nullcheck1, + Value *nullcheck2, Func &&func) +{ + if (!nullcheck1) + return emit_nullcheck_guard(ctx, nullcheck2, func); + if (!nullcheck2) + return emit_nullcheck_guard(ctx, nullcheck1, func); + nullcheck1 = null_pointer_cmp(ctx, nullcheck1); + nullcheck2 = null_pointer_cmp(ctx, nullcheck2); + // If both are NULL, return true. + return emit_guarded_test(ctx, ctx.builder.CreateOr(nullcheck1, nullcheck2), true, [&] { + return emit_guarded_test(ctx, ctx.builder.CreateAnd(nullcheck1, nullcheck2), + false, func); + }); +} + static void emit_type_error(jl_codectx_t &ctx, const jl_cgval_t &x, Value *type, const std::string &msg) { Value *msg_val = stringConstPtr(ctx.emission_context, ctx.builder, msg); @@ -1407,6 +1464,19 @@ Value *extract_first_ptr(jl_codectx_t &ctx, Value *V) return ctx.builder.CreateExtractValue(V, path); } + +static void emit_lockstate_value(jl_codectx_t &ctx, Value *strct, bool newstate) +{ + Value *v = mark_callee_rooted(ctx, strct); + ctx.builder.CreateCall(prepare_call(newstate ? jllockvalue_func : jlunlockvalue_func), v); +} +static void emit_lockstate_value(jl_codectx_t &ctx, const jl_cgval_t &strct, bool newstate) +{ + assert(strct.isboxed); + emit_lockstate_value(ctx, boxed(ctx, strct), newstate); +} + + // If `nullcheck` is not NULL and a pointer NULL check is necessary // store the pointer to be checked in `*nullcheck` instead of checking it static jl_cgval_t typed_load(jl_codectx_t &ctx, Value *ptr, Value *idx_0based, jl_value_t *jltype, @@ -1470,18 +1540,24 @@ static jl_cgval_t typed_load(jl_codectx_t &ctx, Value *ptr, Value *idx_0based, j return mark_julia_type(ctx, load, isboxed, jltype); } -static void typed_store(jl_codectx_t &ctx, - Value *ptr, Value *idx_0based, const jl_cgval_t &rhs, +static jl_cgval_t typed_store(jl_codectx_t &ctx, + Value *ptr, Value *idx_0based, const jl_cgval_t &rhs, const jl_cgval_t &cmp, jl_value_t *jltype, MDNode *tbaa, MDNode *aliasscope, Value *parent, // for the write barrier, NULL if no barrier needed - bool isboxed, AtomicOrdering Order, unsigned alignment = 0) + bool isboxed, AtomicOrdering Order, AtomicOrdering FailOrder, unsigned alignment, + bool needlock, bool issetfield, bool isreplacefield, bool maybe_null_if_boxed) { + assert(!needlock || parent != nullptr); + jl_cgval_t oldval = rhs; Type *elty = isboxed ? T_prjlvalue : julia_type_to_llvm(ctx, jltype); if (type_is_ghost(elty)) - return; + return oldval; + Value *intcast = nullptr; if (!isboxed && Order != AtomicOrdering::NotAtomic && !elty->isIntOrPtrTy() && !elty->isFloatingPointTy()) { const DataLayout &DL = jl_data_layout; unsigned nb = DL.getTypeSizeInBits(elty); + if (!issetfield) + intcast = ctx.builder.CreateAlloca(elty); elty = Type::getIntNTy(jl_LLVMContext, nb); } Value *r; @@ -1498,18 +1574,206 @@ static void typed_store(jl_codectx_t &ctx, alignment = sizeof(void*); else if (!alignment) alignment = julia_alignment(jltype); - StoreInst *store = ctx.builder.CreateAlignedStore(r, ptr, Align(alignment)); - store->setOrdering(Order); - if (aliasscope) - store->setMetadata("noalias", aliasscope); - if (tbaa) - tbaa_decorate(tbaa, store); + Instruction *instr = nullptr; + Value *Compare = nullptr; + Value *Success = nullptr; + BasicBlock *DoneBB = issetfield || (!isreplacefield && !isboxed) ? nullptr : BasicBlock::Create(jl_LLVMContext, "done_xchg", ctx.f); + if (needlock) + emit_lockstate_value(ctx, parent, true); + if (issetfield || Order == AtomicOrdering::NotAtomic) { + if (!issetfield) { + instr = ctx.builder.CreateAlignedLoad(elty, ptr, Align(alignment)); + if (aliasscope) + instr->setMetadata("noalias", aliasscope); + if (tbaa) + tbaa_decorate(tbaa, instr); + } + if (isreplacefield) { + oldval = mark_julia_type(ctx, instr, isboxed, jltype); + Value *first_ptr = nullptr; + if (maybe_null_if_boxed) + first_ptr = isboxed ? instr : extract_first_ptr(ctx, instr); + Success = emit_nullcheck_guard(ctx, first_ptr, [&] { + return emit_f_is(ctx, oldval, cmp); + }); + BasicBlock *BB = BasicBlock::Create(jl_LLVMContext, "xchg", ctx.f); + ctx.builder.CreateCondBr(Success, BB, DoneBB); + ctx.builder.SetInsertPoint(BB); + } + StoreInst *store = ctx.builder.CreateAlignedStore(r, ptr, Align(alignment)); + store->setOrdering(Order); + if (aliasscope) + store->setMetadata("noalias", aliasscope); + if (tbaa) + tbaa_decorate(tbaa, store); + if (DoneBB) + ctx.builder.CreateBr(DoneBB); + } + else if (isboxed || isreplacefield) { + // we have to handle isboxed here as a workaround for really bad LLVM design issue: plain Xchg only works with integers + bool needloop; + PHINode *Succ = nullptr, *Current = nullptr; + if (isreplacefield) { + if (!isboxed) { + needloop = ((jl_datatype_t*)jltype)->layout->haspadding; + Value *SameType = emit_isa(ctx, cmp, jltype, nullptr).first; + if (SameType != ConstantInt::getTrue(jl_LLVMContext)) { + BasicBlock *SkipBB = BasicBlock::Create(jl_LLVMContext, "skip_xchg", ctx.f); + BasicBlock *BB = BasicBlock::Create(jl_LLVMContext, "xchg", ctx.f); + ctx.builder.CreateCondBr(SameType, BB, SkipBB); + ctx.builder.SetInsertPoint(SkipBB); + LoadInst *load = ctx.builder.CreateAlignedLoad(elty, ptr, Align(alignment)); + load->setOrdering(FailOrder); + if (aliasscope) + load->setMetadata("noalias", aliasscope); + if (tbaa) + tbaa_decorate(tbaa, load); + instr = load; + ctx.builder.CreateBr(DoneBB); + ctx.builder.SetInsertPoint(DoneBB); + Succ = ctx.builder.CreatePHI(T_int1, 2); + Succ->addIncoming(ConstantInt::get(T_int1, 0), SkipBB); + Current = ctx.builder.CreatePHI(instr->getType(), 2); + Current->addIncoming(instr, SkipBB); + ctx.builder.SetInsertPoint(BB); + } + Compare = emit_unbox(ctx, elty, cmp, jltype); + } + else if (cmp.isboxed) { + Compare = boxed(ctx, cmp); + needloop = !jl_is_mutable_datatype(jltype); + } + else { + Compare = V_rnull; + needloop = true; + } + } + else { + LoadInst *Current = ctx.builder.CreateAlignedLoad(elty, ptr, Align(alignment)); + Current->setOrdering(AtomicOrdering::Monotonic); + if (aliasscope) + Current->setMetadata("noalias", aliasscope); + if (tbaa) + tbaa_decorate(tbaa, Current); + Compare = Current; + needloop = true; + } + BasicBlock *BB; + if (needloop) { + BasicBlock *From = ctx.builder.GetInsertBlock(); + BB = BasicBlock::Create(jl_LLVMContext, "xchg", ctx.f); + ctx.builder.CreateBr(BB); + ctx.builder.SetInsertPoint(BB); + PHINode *Cmp = ctx.builder.CreatePHI(r->getType(), 2); + Cmp->addIncoming(Compare, From); + Compare = Cmp; + } + if (Order == AtomicOrdering::Unordered) + Order = AtomicOrdering::Monotonic; + if (!isreplacefield) + FailOrder = AtomicOrdering::Monotonic; + else if (FailOrder == AtomicOrdering::Unordered) + FailOrder = AtomicOrdering::Monotonic; +#if JL_LLVM_VERSION >= 130000 + auto *store = ctx.builder.CreateAtomicCmpXchg(ptr, Compare, r, Align(alignment), Order, FailOrder); +#else + auto *store = ctx.builder.CreateAtomicCmpXchg(ptr, Compare, r, Order, FailOrder); + store->setAlignment(Align(alignment)); +#endif + if (aliasscope) + store->setMetadata("noalias", aliasscope); + if (tbaa) + tbaa_decorate(tbaa, store); + instr = ctx.builder.Insert(ExtractValueInst::Create(store, 0)); + Success = ctx.builder.CreateExtractValue(store, 1); + Value *Done = Success; + if (needloop) { + if (isreplacefield) { + if (intcast) { + ctx.builder.CreateStore(instr, ctx.builder.CreateBitCast(intcast, instr->getType()->getPointerTo())); + oldval = mark_julia_slot(intcast, jltype, NULL, tbaa_stack); + } + else { + oldval = mark_julia_type(ctx, instr, isboxed, jltype); + } + Done = emit_guarded_test(ctx, ctx.builder.CreateNot(Success), false, [&] { + Value *first_ptr = nullptr; + if (maybe_null_if_boxed) + first_ptr = isboxed ? instr : extract_first_ptr(ctx, instr); + return emit_nullcheck_guard(ctx, first_ptr, [&] { + return emit_f_is(ctx, oldval, cmp); + }); + }); + Done = ctx.builder.CreateNot(Done); + } + cast(Compare)->addIncoming(instr, ctx.builder.GetInsertBlock()); + } + if (Succ != nullptr) { + Current->addIncoming(instr, ctx.builder.GetInsertBlock()); + instr = Current; + Succ->addIncoming(Success, ctx.builder.GetInsertBlock()); + Success = Succ; + } + if (needloop) + ctx.builder.CreateCondBr(Done, DoneBB, BB); + else + ctx.builder.CreateBr(DoneBB); + } + else { +#if JL_LLVM_VERSION >= 130000 + instr = ctx.builder.CreateAtomicRMW(AtomicRMWInst::Xchg, ptr, r, Align(alignment), Order); +#else + auto *store = ctx.builder.CreateAtomicRMW(AtomicRMWInst::Xchg, ptr, r, Order); + store->setAlignment(Align(alignment)); + instr = store; +#endif + if (aliasscope) + instr->setMetadata("noalias", aliasscope); + if (tbaa) + tbaa_decorate(tbaa, instr); + assert(DoneBB == nullptr); + } + if (DoneBB) + ctx.builder.SetInsertPoint(DoneBB); + if (needlock) + emit_lockstate_value(ctx, parent, false); if (parent != NULL) { + BasicBlock *DoneBB; + if (isreplacefield) { + // TOOD: avoid this branch if we aren't making a write barrier + BasicBlock *BB = BasicBlock::Create(jl_LLVMContext, "xchg_wb", ctx.f); + DoneBB = BasicBlock::Create(jl_LLVMContext, "done_xchg_wb", ctx.f); + ctx.builder.CreateCondBr(Success, BB, DoneBB); + ctx.builder.SetInsertPoint(BB); + } if (!isboxed) emit_write_multibarrier(ctx, parent, r, rhs.typ); else if (!type_is_permalloc(rhs.typ)) emit_write_barrier(ctx, parent, r); + if (isreplacefield) { + ctx.builder.CreateBr(DoneBB); + ctx.builder.SetInsertPoint(DoneBB); + } + } + if (!issetfield) { + if (intcast) { + ctx.builder.CreateStore(instr, ctx.builder.CreateBitCast(intcast, instr->getType()->getPointerTo())); + instr = ctx.builder.CreateLoad(intcast); + } + if (maybe_null_if_boxed) { + Value *first_ptr = isboxed ? instr : extract_first_ptr(ctx, instr); + if (first_ptr) + null_pointer_check(ctx, first_ptr, nullptr); + } + oldval = mark_julia_type(ctx, instr, isboxed, jltype); + if (isreplacefield) { + // TODO: do better here + jl_cgval_t argv[2] = {oldval, mark_julia_type(ctx, Success, false, jl_bool_type)}; + instr = emit_jlcall(ctx, jltuple_func, V_rnull, argv, 2, JLCALL_F_CC); + oldval = mark_julia_type(ctx, instr, true, jl_any_type); + } } + return oldval; } // --- convert boolean value to julia --- @@ -1760,11 +2024,22 @@ static bool emit_getfield_unknownidx(jl_codectx_t &ctx, return false; } -static void emit_lockstate_value(jl_codectx_t &ctx, const jl_cgval_t &strct, bool newstate) +static jl_cgval_t emit_unionload(jl_codectx_t &ctx, Value *addr, Value *ptindex, jl_value_t *jfty, size_t fsz, size_t al, MDNode *tbaa, bool mutabl) { - assert(strct.isboxed); - Value *v = mark_callee_rooted(ctx, boxed(ctx, strct)); - ctx.builder.CreateCall(prepare_call(newstate ? jllockvalue_func : jlunlockvalue_func), v); + Instruction *tindex0 = tbaa_decorate(tbaa_unionselbyte, ctx.builder.CreateAlignedLoad(T_int8, ptindex, Align(1))); + //tindex0->setMetadata(LLVMContext::MD_range, MDNode::get(jl_LLVMContext, { + // ConstantAsMetadata::get(ConstantInt::get(T_int8, 0)), + // ConstantAsMetadata::get(ConstantInt::get(T_int8, union_max)) })); + Value *tindex = ctx.builder.CreateNUWAdd(ConstantInt::get(T_int8, 1), tindex0); + if (mutabl) { + // move value to an immutable stack slot (excluding tindex) + Type *ET = IntegerType::get(jl_LLVMContext, 8 * al); + AllocaInst *lv = emit_static_alloca(ctx, ET); + lv->setOperand(0, ConstantInt::get(T_int32, (fsz + al - 1) / al)); + emit_memcpy(ctx, lv, tbaa, addr, tbaa, fsz, al); + addr = lv; + } + return mark_julia_slot(addr, jfty, tindex, tbaa); } // If `nullcheck` is not NULL and a pointer NULL check is necessary @@ -1845,20 +2120,7 @@ static jl_cgval_t emit_getfield_knownidx(jl_codectx_t &ctx, const jl_cgval_t &st else { ptindex = emit_struct_gep(ctx, cast(lt), staddr, byte_offset + fsz); } - Instruction *tindex0 = tbaa_decorate(tbaa_unionselbyte, ctx.builder.CreateAlignedLoad(T_int8, ptindex, Align(1))); - //tindex0->setMetadata(LLVMContext::MD_range, MDNode::get(jl_LLVMContext, { - // ConstantAsMetadata::get(ConstantInt::get(T_int8, 0)), - // ConstantAsMetadata::get(ConstantInt::get(T_int8, union_max)) })); - Value *tindex = ctx.builder.CreateNUWAdd(ConstantInt::get(T_int8, 1), tindex0); - if (jt->name->mutabl) { - // move value to an immutable stack slot (excluding tindex) - Type *ET = IntegerType::get(jl_LLVMContext, 8 * al); - AllocaInst *lv = emit_static_alloca(ctx, ET); - lv->setOperand(0, ConstantInt::get(T_int32, (fsz + al - 1) / al)); - emit_memcpy(ctx, lv, tbaa, addr, tbaa, fsz, al); - addr = lv; - } - return mark_julia_slot(addr, jfty, tindex, tbaa); + return emit_unionload(ctx, addr, ptindex, jfty, fsz, al, tbaa, jt->name->mutabl); } assert(jl_is_concrete_type(jfty)); if (!jt->name->mutabl && !(maybe_null && (jfty == (jl_value_t*)jl_bool_type || @@ -2883,49 +3145,76 @@ static void emit_write_multibarrier(jl_codectx_t &ctx, Value *parent, Value *agg } -static void emit_setfield(jl_codectx_t &ctx, +static jl_cgval_t emit_setfield(jl_codectx_t &ctx, jl_datatype_t *sty, const jl_cgval_t &strct, size_t idx0, - const jl_cgval_t &rhs, bool checked, bool wb, AtomicOrdering Order) -{ - if (sty->name->mutabl || !checked) { - assert(strct.ispointer()); - size_t byte_offset = jl_field_offset(sty, idx0); - Value *addr = data_pointer(ctx, strct); - if (byte_offset > 0) { - addr = ctx.builder.CreateInBoundsGEP( - T_int8, - emit_bitcast(ctx, maybe_decay_tracked(ctx, addr), T_pint8), - ConstantInt::get(T_size, byte_offset)); // TODO: use emit_struct_gep - } - jl_value_t *jfty = jl_svecref(sty->types, idx0); - if (!jl_field_isptr(sty, idx0) && jl_is_uniontype(jfty)) { - int fsz = jl_field_size(sty, idx0) - 1; - // compute tindex from rhs - jl_cgval_t rhs_union = convert_julia_type(ctx, rhs, jfty); - if (rhs_union.typ == jl_bottom_type) - return; - Value *tindex = compute_tindex_unboxed(ctx, rhs_union, jfty); - tindex = ctx.builder.CreateNUWSub(tindex, ConstantInt::get(T_int8, 1)); - Value *ptindex = ctx.builder.CreateInBoundsGEP(T_int8, emit_bitcast(ctx, maybe_decay_tracked(ctx, addr), T_pint8), ConstantInt::get(T_size, fsz)); - tbaa_decorate(tbaa_unionselbyte, ctx.builder.CreateAlignedStore(tindex, ptindex, Align(1))); - // copy data - if (!rhs.isghost) { - emit_unionmove(ctx, addr, strct.tbaa, rhs, nullptr); - } - } - else { - unsigned align = jl_field_align(sty, idx0); - bool isboxed = jl_field_isptr(sty, idx0); - typed_store(ctx, addr, NULL, rhs, jfty, strct.tbaa, nullptr, - wb ? maybe_bitcast(ctx, data_pointer(ctx, strct), T_pjlvalue) : nullptr, - isboxed, Order, align); - } - } - else { + const jl_cgval_t &rhs, const jl_cgval_t &cmp, + bool checked, bool wb, AtomicOrdering Order, AtomicOrdering FailOrder, + bool needlock, bool issetfield, bool isreplacefield) +{ + if (!sty->name->mutabl && checked) { std::string msg = "setfield!: immutable struct of type " + std::string(jl_symbol_name(sty->name->name)) + " cannot be changed"; emit_error(ctx, msg); + return jl_cgval_t(); + } + assert(strct.ispointer()); + size_t byte_offset = jl_field_offset(sty, idx0); + Value *addr = data_pointer(ctx, strct); + if (byte_offset > 0) { + addr = ctx.builder.CreateInBoundsGEP( + T_int8, + emit_bitcast(ctx, maybe_decay_tracked(ctx, addr), T_pint8), + ConstantInt::get(T_size, byte_offset)); // TODO: use emit_struct_gep + } + jl_value_t *jfty = jl_svecref(sty->types, idx0); + if (!jl_field_isptr(sty, idx0) && jl_is_uniontype(jfty)) { + size_t fsz = 0, al = 0; + bool isptr = !jl_islayout_inline(jfty, &fsz, &al); + assert(!isptr && fsz == jl_field_size(sty, idx0) - 1); (void)isptr; + // compute tindex from rhs + jl_cgval_t rhs_union = convert_julia_type(ctx, rhs, jfty); + if (rhs_union.typ == jl_bottom_type) + return jl_cgval_t(); + Value *tindex = compute_tindex_unboxed(ctx, rhs_union, jfty); + tindex = ctx.builder.CreateNUWSub(tindex, ConstantInt::get(T_int8, 1)); + Value *ptindex = ctx.builder.CreateInBoundsGEP(T_int8, emit_bitcast(ctx, maybe_decay_tracked(ctx, addr), T_pint8), ConstantInt::get(T_size, fsz)); + if (needlock) + emit_lockstate_value(ctx, strct, true); + jl_cgval_t oldval = rhs; + if (!issetfield) + oldval = emit_unionload(ctx, addr, ptindex, jfty, fsz, al, strct.tbaa, true); + Value *Success; + BasicBlock *DoneBB; + if (isreplacefield) { + BasicBlock *BB = BasicBlock::Create(jl_LLVMContext, "xchg", ctx.f); + DoneBB = BasicBlock::Create(jl_LLVMContext, "done_xchg", ctx.f); + Success = emit_f_is(ctx, oldval, cmp); + ctx.builder.CreateCondBr(Success, BB, DoneBB); + ctx.builder.SetInsertPoint(BB); + } + tbaa_decorate(tbaa_unionselbyte, ctx.builder.CreateAlignedStore(tindex, ptindex, Align(1))); + // copy data + if (!rhs.isghost) { + emit_unionmove(ctx, addr, strct.tbaa, rhs, nullptr); + } + if (isreplacefield) { + ctx.builder.CreateBr(DoneBB); + ctx.builder.SetInsertPoint(DoneBB); + } + if (needlock) + emit_lockstate_value(ctx, strct, false); + return oldval; + } + else { + unsigned align = jl_field_align(sty, idx0); + bool isboxed = jl_field_isptr(sty, idx0); + size_t nfields = jl_datatype_nfields(sty); + bool maybe_null = idx0 >= nfields - (unsigned)sty->name->n_uninitialized; + return typed_store(ctx, addr, NULL, rhs, cmp, jfty, strct.tbaa, nullptr, + wb ? maybe_bitcast(ctx, data_pointer(ctx, strct), T_pjlvalue) : nullptr, + isboxed, Order, FailOrder, align, + needlock, issetfield, isreplacefield, maybe_null); } } @@ -3104,7 +3393,7 @@ static jl_cgval_t emit_new_struct(jl_codectx_t &ctx, jl_value_t *ty, size_t narg else need_wb = false; emit_typecheck(ctx, rhs, jl_svecref(sty->types, i), "new"); - emit_setfield(ctx, sty, strctinfo, i, rhs, false, need_wb, AtomicOrdering::NotAtomic); + emit_setfield(ctx, sty, strctinfo, i, rhs, jl_cgval_t(), false, need_wb, AtomicOrdering::NotAtomic, AtomicOrdering::NotAtomic, false, true, false); } return strctinfo; } diff --git a/src/codegen.cpp b/src/codegen.cpp index 4eba89badbd8e..dc03d453d4578 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -879,6 +879,8 @@ static const std::map builtin_func_map = { { &jl_f_isdefined, new JuliaFunction{"jl_f_isdefined", get_func_sig, get_func_attrs} }, { &jl_f_getfield, new JuliaFunction{"jl_f_getfield", get_func_sig, get_func_attrs} }, { &jl_f_setfield, new JuliaFunction{"jl_f_setfield", get_func_sig, get_func_attrs} }, + { &jl_f_swapfield, new JuliaFunction{"jl_f_swapfield", get_func_sig, get_func_attrs} }, + { &jl_f_modifyfield, new JuliaFunction{"jl_f_modifyfield", get_func_sig, get_func_attrs} }, { &jl_f_fieldtype, new JuliaFunction{"jl_f_fieldtype", get_func_sig, get_func_attrs} }, { &jl_f_nfields, new JuliaFunction{"jl_f_nfields", get_func_sig, get_func_attrs} }, { &jl_f__expr, new JuliaFunction{"jl_f__expr", get_func_sig, get_func_attrs} }, @@ -1160,6 +1162,8 @@ static CallInst *emit_jlcall(jl_codectx_t &ctx, Function *theFptr, Value *theF, jl_cgval_t *args, size_t nargs, CallingConv::ID cc); static CallInst *emit_jlcall(jl_codectx_t &ctx, JuliaFunction *theFptr, Value *theF, jl_cgval_t *args, size_t nargs, CallingConv::ID cc); +static Value *emit_f_is(jl_codectx_t &ctx, const jl_cgval_t &arg1, const jl_cgval_t &arg2, + Value *nullcheck1 = nullptr, Value *nullcheck2 = nullptr); static Value *literal_pointer_val(jl_codectx_t &ctx, jl_value_t *p); static GlobalVariable *prepare_global_in(Module *M, GlobalVariable *G); @@ -1441,6 +1445,17 @@ static void alloc_def_flag(jl_codectx_t &ctx, jl_varinfo_t& vi) // --- utilities --- +static Constant *undef_value_for_type(Type *T) { + auto tracked = CountTrackedPointers(T); + Constant *undef; + if (tracked.count) + // make sure gc pointers (including ptr_phi of union-split) are initialized to NULL + undef = Constant::getNullValue(T); + else + undef = UndefValue::get(T); + return undef; +} + static void CreateTrap(IRBuilder<> &irbuilder) { Function *f = irbuilder.GetInsertBlock()->getParent(); @@ -1472,6 +1487,7 @@ static void CreateConditionalAbort(IRBuilder<> &irbuilder, Value *test) #endif #endif + #include "cgutils.cpp" static jl_cgval_t convert_julia_type_union(jl_codectx_t &ctx, const jl_cgval_t &v, jl_value_t *typ, Value **skip) @@ -2351,62 +2367,6 @@ static jl_cgval_t emit_globalref(jl_codectx_t &ctx, jl_module_t *mod, jl_sym_t * return emit_checked_var(ctx, bp, name, false, tbaa_binding); } -template -static Value *emit_guarded_test(jl_codectx_t &ctx, Value *ifnot, Constant *defval, Func &&func) -{ - if (auto Cond = dyn_cast(ifnot)) { - if (Cond->isZero()) - return defval; - return func(); - } - BasicBlock *currBB = ctx.builder.GetInsertBlock(); - BasicBlock *passBB = BasicBlock::Create(jl_LLVMContext, "guard_pass", ctx.f); - BasicBlock *exitBB = BasicBlock::Create(jl_LLVMContext, "guard_exit", ctx.f); - ctx.builder.CreateCondBr(ifnot, passBB, exitBB); - ctx.builder.SetInsertPoint(passBB); - auto res = func(); - passBB = ctx.builder.GetInsertBlock(); - ctx.builder.CreateBr(exitBB); - ctx.builder.SetInsertPoint(exitBB); - if (defval == nullptr) - return nullptr; - PHINode *phi = ctx.builder.CreatePHI(defval->getType(), 2); - phi->addIncoming(defval, currBB); - phi->addIncoming(res, passBB); - return phi; -} - -template -static Value *emit_guarded_test(jl_codectx_t &ctx, Value *ifnot, bool defval, Func &&func) -{ - return emit_guarded_test(ctx, ifnot, ConstantInt::get(T_int1, defval), func); -} - -template -static Value *emit_nullcheck_guard(jl_codectx_t &ctx, Value *nullcheck, Func &&func) -{ - if (!nullcheck) - return func(); - return emit_guarded_test(ctx, null_pointer_cmp(ctx, nullcheck), false, func); -} - -template -static Value *emit_nullcheck_guard2(jl_codectx_t &ctx, Value *nullcheck1, - Value *nullcheck2, Func &&func) -{ - if (!nullcheck1) - return emit_nullcheck_guard(ctx, nullcheck2, func); - if (!nullcheck2) - return emit_nullcheck_guard(ctx, nullcheck1, func); - nullcheck1 = null_pointer_cmp(ctx, nullcheck1); - nullcheck2 = null_pointer_cmp(ctx, nullcheck2); - // If both are NULL, return true. - return emit_guarded_test(ctx, ctx.builder.CreateOr(nullcheck1, nullcheck2), true, [&] { - return emit_guarded_test(ctx, ctx.builder.CreateAnd(nullcheck1, nullcheck2), - false, func); - }); -} - static Value *emit_box_compare(jl_codectx_t &ctx, const jl_cgval_t &arg1, const jl_cgval_t &arg2, Value *nullcheck1, Value *nullcheck2) { @@ -2443,8 +2403,6 @@ static Value *emit_box_compare(jl_codectx_t &ctx, const jl_cgval_t &arg1, const } static Value *emit_bits_compare(jl_codectx_t &ctx, jl_cgval_t arg1, jl_cgval_t arg2); -static Value *emit_f_is(jl_codectx_t &ctx, const jl_cgval_t &arg1, const jl_cgval_t &arg2, - Value *nullcheck1 = nullptr, Value *nullcheck2 = nullptr); static Value *emit_bitsunion_compare(jl_codectx_t &ctx, const jl_cgval_t &arg1, const jl_cgval_t &arg2) { @@ -3011,13 +2969,18 @@ static bool emit_builtin_call(jl_codectx_t &ctx, jl_cgval_t *ret, jl_value_t *f, else { typed_store(ctx, emit_arrayptr(ctx, ary, ary_ex, isboxed), - idx, val, ety, + idx, val, jl_cgval_t(), ety, isboxed ? tbaa_ptrarraybuf : tbaa_arraybuf, ctx.aliasscope, data_owner, isboxed, isboxed ? AtomicOrdering::Unordered : AtomicOrdering::NotAtomic, // TODO: we should do this for anything with CountTrackedPointers(elty).count > 0 - 0); + isboxed ? AtomicOrdering::Unordered : AtomicOrdering::NotAtomic, // TODO: we should do this for anything with CountTrackedPointers(elty).count > 0 + 0, + false, + true, + false, + false); } } *ret = ary; @@ -3158,19 +3121,34 @@ static bool emit_builtin_call(jl_codectx_t &ctx, jl_cgval_t *ret, jl_value_t *f, return false; } - else if (f == jl_builtin_setfield && (nargs == 3 || nargs == 4)) { + else if ((f == jl_builtin_setfield && (nargs == 3 || nargs == 4)) || + (f == jl_builtin_swapfield && (nargs == 3 || nargs == 4)) || + (f == jl_builtin_replacefield && (nargs == 4 || nargs == 5 || nargs == 6))) { + bool issetfield = f == jl_builtin_setfield; + bool isreplacefield = f == jl_builtin_replacefield; + const jl_cgval_t undefval; const jl_cgval_t &obj = argv[1]; const jl_cgval_t &fld = argv[2]; - const jl_cgval_t &val = argv[3]; + const jl_cgval_t &val = argv[isreplacefield ? 4 : 3]; + const jl_cgval_t &cmp = isreplacefield ? argv[3] : undefval; enum jl_memory_order order = jl_memory_order_notatomic; - if (nargs == 4) { - const jl_cgval_t &ord = argv[4]; - emit_typecheck(ctx, ord, (jl_value_t*)jl_symbol_type, "setfield!"); + if (nargs >= (isreplacefield ? 5 : 4)) { + const jl_cgval_t &ord = argv[isreplacefield ? 5 : 4]; + emit_typecheck(ctx, ord, (jl_value_t*)jl_symbol_type, + issetfield ? "setfield!" : isreplacefield ? "replacefield!" : "swapfield!"); if (!ord.constant) return false; - order = jl_get_atomic_order((jl_sym_t*)ord.constant, false, true); + order = jl_get_atomic_order((jl_sym_t*)ord.constant, !issetfield, true); } - if (order == jl_memory_order_invalid) { + enum jl_memory_order fail_order = order; + if (isreplacefield && nargs == 6) { + const jl_cgval_t &ord = argv[6]; + emit_typecheck(ctx, ord, (jl_value_t*)jl_symbol_type, "replacefield!"); + if (!ord.constant) + return false; + fail_order = jl_get_atomic_order((jl_sym_t*)ord.constant, true, false); + } + if (order == jl_memory_order_invalid || fail_order == jl_memory_order_invalid || fail_order > order) { emit_atomic_error(ctx, "invalid atomic ordering"); *ret = jl_cgval_t(); // unreachable return true; @@ -3189,27 +3167,39 @@ static bool emit_builtin_call(jl_codectx_t &ctx, jl_cgval_t *ret, jl_value_t *f, } if (idx != -1) { jl_value_t *ft = jl_svecref(uty->types, idx); - if (jl_subtype(val.typ, ft)) { + if (!jl_has_free_typevars(ft) && jl_subtype(val.typ, ft)) { // TODO: attempt better codegen for approximate types bool isboxed = jl_field_isptr(uty, idx); bool isatomic = jl_field_isatomic(uty, idx); bool needlock = isatomic && !isboxed && jl_datatype_size(jl_field_type(uty, idx)) > MAX_ATOMIC_SIZE; if (isatomic == (order == jl_memory_order_notatomic)) { emit_atomic_error(ctx, - isatomic ? "setfield!: atomic field cannot be written non-atomically" - : "setfield!: non-atomic field cannot be written atomically"); + issetfield ? + (isatomic ? "setfield!: atomic field cannot be written non-atomically" + : "setfield!: non-atomic field cannot be written atomically") : + isreplacefield ? + (isatomic ? "replacefield!: atomic field cannot be written non-atomically" + : "replacefield!: non-atomic field cannot be written atomically") : + (isatomic ? "swapfield!: atomic field cannot be written non-atomically" + : "swapfield!: non-atomic field cannot be written atomically")); + *ret = jl_cgval_t(); + return true; + } + if (isatomic == (fail_order == jl_memory_order_notatomic)) { + emit_atomic_error(ctx, + (isatomic ? "replacefield!: atomic field cannot be accessed non-atomically" + : "replacefield!: non-atomic field cannot be accessed atomically")); *ret = jl_cgval_t(); return true; } - if (needlock) - emit_lockstate_value(ctx, obj, true); - emit_setfield(ctx, uty, obj, idx, val, true, true, + *ret = emit_setfield(ctx, uty, obj, idx, val, cmp, true, true, (needlock || order <= jl_memory_order_notatomic) ? (isboxed ? AtomicOrdering::Unordered : AtomicOrdering::NotAtomic) // TODO: we should do this for anything with CountTrackedPointers(elty).count > 0 - : get_llvm_atomic_order(order)); - if (needlock) - emit_lockstate_value(ctx, obj, false); - *ret = val; + : get_llvm_atomic_order(order), + (needlock || fail_order <= jl_memory_order_notatomic) + ? (isboxed ? AtomicOrdering::Unordered : AtomicOrdering::NotAtomic) // TODO: we should do this for anything with CountTrackedPointers(elty).count > 0 + : get_llvm_atomic_order(fail_order), + needlock, issetfield, isreplacefield); return true; } } @@ -7233,17 +7223,6 @@ static std::pair, jl_llvm_functions_t> ctx.builder.SetCurrentDebugLocation(noDbg); ctx.builder.ClearInsertionPoint(); - auto undef_value_for_type = [&](Type *T) { - auto tracked = CountTrackedPointers(T); - Constant *undef; - if (tracked.count) - // make sure gc pointers (including ptr_phi of union-split) are initialized to NULL - undef = Constant::getNullValue(T); - else - undef = UndefValue::get(T); - return undef; - }; - // Codegen Phi nodes std::map, BasicBlock*> BB_rewrite_map; std::vector ToDelete; diff --git a/src/intrinsics.cpp b/src/intrinsics.cpp index 904623e4ec43c..7d0e94fd30783 100644 --- a/src/intrinsics.cpp +++ b/src/intrinsics.cpp @@ -690,7 +690,8 @@ static jl_cgval_t emit_pointerset(jl_codectx_t &ctx, jl_cgval_t *argv) assert(!isboxed); if (!type_is_ghost(ptrty)) { thePtr = emit_unbox(ctx, ptrty->getPointerTo(), e, e.typ); - typed_store(ctx, thePtr, im1, x, ety, tbaa_data, nullptr, nullptr, isboxed, AtomicOrdering::NotAtomic, align_nb); + typed_store(ctx, thePtr, im1, x, jl_cgval_t(), ety, tbaa_data, nullptr, nullptr, isboxed, + AtomicOrdering::NotAtomic, AtomicOrdering::NotAtomic, align_nb, false, true, false, false); } } return e; diff --git a/src/llvm-gc-invariant-verifier.cpp b/src/llvm-gc-invariant-verifier.cpp index 1c584f3852e9e..29b8c9ac4e60c 100644 --- a/src/llvm-gc-invariant-verifier.cpp +++ b/src/llvm-gc-invariant-verifier.cpp @@ -55,13 +55,17 @@ struct GCInvariantVerifier : public FunctionPass, public InstVisitorgetType(); +void GCInvariantVerifier::checkStoreInst(Type *VTy, unsigned AS, Value &SI) { if (VTy->isPointerTy()) { /* We currently don't obey this for arguments. That's ok - they're externally rooted. */ @@ -90,12 +93,23 @@ void GCInvariantVerifier::visitStoreInst(StoreInst &SI) { AS != AddressSpace::Derived, "Illegal store of decayed value", &SI); } - VTy = SI.getPointerOperand()->getType(); - if (VTy->isPointerTy()) { - unsigned AS = cast(VTy)->getAddressSpace(); - Check(AS != AddressSpace::CalleeRooted, - "Illegal store to callee rooted value", &SI); - } + Check(AS != AddressSpace::CalleeRooted, + "Illegal store to callee rooted value", &SI); +} + +void GCInvariantVerifier::visitStoreInst(StoreInst &SI) { + Type *VTy = SI.getValueOperand()->getType(); + checkStoreInst(VTy, SI.getPointerAddressSpace(), SI); +} + +void GCInvariantVerifier::visitAtomicRMWInst(AtomicRMWInst &SI) { + Type *VTy = SI.getValOperand()->getType(); + checkStoreInst(VTy, SI.getPointerAddressSpace(), SI); +} + +void GCInvariantVerifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &SI) { + Type *VTy = SI.getNewValOperand()->getType(); + checkStoreInst(VTy, SI.getPointerAddressSpace(), SI); } void GCInvariantVerifier::visitLoadInst(LoadInst &LI) { diff --git a/src/llvm-late-gc-lowering.cpp b/src/llvm-late-gc-lowering.cpp index 4712365e3065b..d8ad3d62d4cc1 100644 --- a/src/llvm-late-gc-lowering.cpp +++ b/src/llvm-late-gc-lowering.cpp @@ -508,6 +508,16 @@ static std::pair FindBaseValue(const State &S, Value *V, bool UseCac // In general a load terminates a walk break; } + else if (auto LI = dyn_cast(CurrentV)) { + // In general a load terminates a walk + (void)LI; + break; + } + else if (auto LI = dyn_cast(CurrentV)) { + // In general a load terminates a walk + (void)LI; + break; + } else if (auto II = dyn_cast(CurrentV)) { // Some intrinsics behave like LoadInst followed by a SelectInst // This should never happen in a derived addrspace (since those cannot be stored to memory) @@ -550,6 +560,7 @@ static std::pair FindBaseValue(const State &S, Value *V, bool UseCac } } assert(isa(CurrentV) || isa(CurrentV) || + isa(CurrentV) || isa(CurrentV) || isa(CurrentV) || isa(CurrentV) || isa(CurrentV) || isa(CurrentV) || isa(CurrentV) || isa(CurrentV) || @@ -906,7 +917,8 @@ std::vector LateLowerGCFrame::NumberAllBase(State &S, Value *CurrentV) { Numbers = S.AllCompositeNumbering.at(CurrentV); } } else { - assert((isa(CurrentV) || isa(CurrentV) || isa(CurrentV) || isa(CurrentV)) + assert((isa(CurrentV) || isa(CurrentV) || isa(CurrentV) || isa(CurrentV) || + isa(CurrentV) || isa(CurrentV)) && "unexpected def expression"); // This is simple, we can just number them sequentially for (unsigned i = 0; i < tracked.count; ++i) { @@ -1065,7 +1077,8 @@ void RecursivelyVisit(callback f, Value *V) { f(VU); if (isa(TheUser) || isa(TheUser) || isa(TheUser) || isa(TheUser) || - isa(TheUser) || isa(TheUser)) + isa(TheUser) || isa(TheUser) || + isa(TheUser) || isa(TheUser)) continue; if (isa(TheUser) || isa(TheUser) || isa(TheUser)) { RecursivelyVisit(f, TheUser); @@ -1606,6 +1619,20 @@ State LateLowerGCFrame::LocalScan(Function &F) { MaybeNoteDef(S, BBS, LI, BBS.Safepoints, std::move(RefinedPtr)); } NoteOperandUses(S, BBS, I); + } else if (auto *LI = dyn_cast(&I)) { + Type *Ty = LI->getNewValOperand()->getType()->getScalarType(); + if (!Ty->isPointerTy() || Ty->getPointerAddressSpace() != AddressSpace::Loaded) { + MaybeNoteDef(S, BBS, LI, BBS.Safepoints); + } + NoteOperandUses(S, BBS, I); + // TODO: do we need MaybeTrackStore(S, LI); + } else if (auto *LI = dyn_cast(&I)) { + Type *Ty = LI->getType()->getScalarType(); + if (!Ty->isPointerTy() || Ty->getPointerAddressSpace() != AddressSpace::Loaded) { + MaybeNoteDef(S, BBS, LI, BBS.Safepoints); + } + NoteOperandUses(S, BBS, I); + // TODO: do we need MaybeTrackStore(S, LI); } else if (SelectInst *SI = dyn_cast(&I)) { auto tracked = CountTrackedPointers(SI->getType()); if (tracked.count && !tracked.derived) { diff --git a/src/llvm-propagate-addrspaces.cpp b/src/llvm-propagate-addrspaces.cpp index 6cfa66ddca6ff..a6afcda870911 100644 --- a/src/llvm-propagate-addrspaces.cpp +++ b/src/llvm-propagate-addrspaces.cpp @@ -51,8 +51,11 @@ struct PropagateJuliaAddrspaces : public FunctionPass, public InstVisitorgetType()->getPointerAddressSpace(); if (!isSpecialAS(AS)) return; - Value *Replacement = LiftPointer(LI.getPointerOperand(), LI.getType(), &LI); + Value *Replacement = LiftPointer(Original, T, &I); if (!Replacement) return; - LI.setOperand(LoadInst::getPointerOperandIndex(), Replacement); + I.setOperand(OpIndex, Replacement); +} + +void PropagateJuliaAddrspaces::visitLoadInst(LoadInst &LI) { + visitMemop(LI, LI.getType(), LoadInst::getPointerOperandIndex()); } void PropagateJuliaAddrspaces::visitStoreInst(StoreInst &SI) { - unsigned AS = SI.getPointerAddressSpace(); - if (!isSpecialAS(AS)) - return; - Value *Replacement = LiftPointer(SI.getPointerOperand(), SI.getValueOperand()->getType(), &SI); - if (!Replacement) - return; - SI.setOperand(StoreInst::getPointerOperandIndex(), Replacement); + visitMemop(SI, SI.getValueOperand()->getType(), StoreInst::getPointerOperandIndex()); +} + +void PropagateJuliaAddrspaces::visitAtomicCmpXchgInst(AtomicCmpXchgInst &SI) { + visitMemop(SI, SI.getNewValOperand()->getType(), AtomicCmpXchgInst::getPointerOperandIndex()); +} + +void PropagateJuliaAddrspaces::visitAtomicRMWInst(AtomicRMWInst &SI) { + visitMemop(SI, SI.getType(), AtomicRMWInst::getPointerOperandIndex()); } void PropagateJuliaAddrspaces::visitMemSetInst(MemSetInst &MI) { diff --git a/test/atomics.jl b/test/atomics.jl index 59c45299db221..2a0cbd7357c6e 100644 --- a/test/atomics.jl +++ b/test/atomics.jl @@ -184,12 +184,12 @@ test_field_operators(ARefxy{Complex{Int128}}(12345_10, 12345_20)) @test_throws ConcurrencyViolationError("replacefield!: non-atomic field cannot be written atomically") replacefield!(r, :y, y, y, :acquire_release, :not_atomic) @test_throws ConcurrencyViolationError("replacefield!: non-atomic field cannot be written atomically") replacefield!(r, :y, y, y, :sequentially_consistent, :not_atomic) @test_throws ConcurrencyViolationError("invalid atomic ordering") replacefield!(r, :y, y, y, :not_atomic, :u) - @test_throws ConcurrencyViolationError("replacefield!: non-atomic field cannot be accessed atomically") replacefield!(r, :y, y, y, :not_atomic, :unordered) - @test_throws ConcurrencyViolationError("replacefield!: non-atomic field cannot be accessed atomically") replacefield!(r, :y, y, y, :not_atomic, :monotonic) - @test_throws ConcurrencyViolationError("replacefield!: non-atomic field cannot be accessed atomically") replacefield!(r, :y, y, y, :not_atomic, :acquire) + @test_throws ConcurrencyViolationError("invalid atomic ordering") replacefield!(r, :y, y, y, :not_atomic, :unordered) + @test_throws ConcurrencyViolationError("invalid atomic ordering") replacefield!(r, :y, y, y, :not_atomic, :monotonic) + @test_throws ConcurrencyViolationError("invalid atomic ordering") replacefield!(r, :y, y, y, :not_atomic, :acquire) @test_throws ConcurrencyViolationError("invalid atomic ordering") replacefield!(r, :y, y, y, :not_atomic, :release) @test_throws ConcurrencyViolationError("invalid atomic ordering") replacefield!(r, :y, y, y, :not_atomic, :acquire_release) - @test_throws ConcurrencyViolationError("replacefield!: non-atomic field cannot be accessed atomically") replacefield!(r, :y, y, y, :not_atomic, :sequentially_consistent) + @test_throws ConcurrencyViolationError("invalid atomic ordering") replacefield!(r, :y, y, y, :not_atomic, :sequentially_consistent) @test replacefield!(r, :y, x, y, :not_atomic, :not_atomic) === (x, true) @test replacefield!(r, :y, x, y, :not_atomic, :not_atomic) === (y, x === y) @test replacefield!(r, :y, y, y, :not_atomic) === (y, true) @@ -225,12 +225,12 @@ test_field_operators(ARefxy{Complex{Int128}}(12345_10, 12345_20)) @test_throws ConcurrencyViolationError("replacefield!: atomic field cannot be accessed non-atomically") replacefield!(r, :x, x, x, :acquire_release, :not_atomic) @test_throws ConcurrencyViolationError("replacefield!: atomic field cannot be accessed non-atomically") replacefield!(r, :x, x, x, :sequentially_consistent, :not_atomic) @test_throws ConcurrencyViolationError("invalid atomic ordering") replacefield!(r, :x, x, x, :not_atomic, :u) - @test_throws ConcurrencyViolationError("replacefield!: atomic field cannot be written non-atomically") replacefield!(r, :x, x, x, :not_atomic, :unordered) - @test_throws ConcurrencyViolationError("replacefield!: atomic field cannot be written non-atomically") replacefield!(r, :x, x, x, :not_atomic, :monotonic) - @test_throws ConcurrencyViolationError("replacefield!: atomic field cannot be written non-atomically") replacefield!(r, :x, x, x, :not_atomic, :acquire) + @test_throws ConcurrencyViolationError("invalid atomic ordering") replacefield!(r, :x, x, x, :not_atomic, :unordered) + @test_throws ConcurrencyViolationError("invalid atomic ordering") replacefield!(r, :x, x, x, :not_atomic, :monotonic) + @test_throws ConcurrencyViolationError("invalid atomic ordering") replacefield!(r, :x, x, x, :not_atomic, :acquire) @test_throws ConcurrencyViolationError("invalid atomic ordering") replacefield!(r, :x, x, x, :not_atomic, :release) @test_throws ConcurrencyViolationError("invalid atomic ordering") replacefield!(r, :x, x, x, :not_atomic, :acquire_release) - @test_throws ConcurrencyViolationError("replacefield!: atomic field cannot be written non-atomically") replacefield!(r, :x, x, x, :not_atomic, :sequentially_consistent) + @test_throws ConcurrencyViolationError("invalid atomic ordering") replacefield!(r, :x, x, x, :not_atomic, :sequentially_consistent) @test replacefield!(r, :x, x, y, :sequentially_consistent, :sequentially_consistent) === (x, true) @test replacefield!(r, :x, x, y, :sequentially_consistent, :sequentially_consistent) === (y, x === y) @test replacefield!(r, :x, y, x, :sequentially_consistent) === (y, true)