Skip to content

Commit

Permalink
[JFR] Crash in Monitor::set_owner_implementation on slow debug build
Browse files Browse the repository at this point in the history
Summary: allow to skip the check of rank order

Test Plan: jdk/test/jdk/jfr

Reviewed-by: kelthuzadx, zhengxiaolinX

Issue: dragonwell-project/dragonwell8#223
  • Loading branch information
D-D-H committed Mar 26, 2021
1 parent 7129915 commit 6db5be9
Show file tree
Hide file tree
Showing 3 changed files with 24 additions and 2 deletions.
16 changes: 16 additions & 0 deletions src/share/vm/jfr/recorder/checkpoint/types/jfrType.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -109,6 +109,18 @@ void JfrCheckpointThreadClosure::do_thread(Thread* t) {
void JfrThreadConstantSet::serialize(JfrCheckpointWriter& writer) {
JfrCheckpointThreadClosure tc(writer);
MutexLockerEx ml(Threads_lock);
#ifdef ASSERT
// OpenJDK8 doesn't support ThreadSMR, so it's necessary to acquire
// the Threads_lock here.
// And JfrCheckpointThreadClosure acquires JNIGlobalHandle_lock, whose
// rank is greater than Threads_lock's rank, this will cause the
// mutex rank checking failure.
// A workaround is to skipping the checking logic.
// But we need to be aware that if there is a thread that acquires
// JNIGlobalHandle_lock first, and then acquires Threads_lock, it will
// cause a deadlock. At present, there is no such situation.
Thread::current()->set_skip_rank_order_check(true);
#endif
JfrJavaThreadIterator javathreads;
while (javathreads.has_next()) {
tc.do_thread(javathreads.next());
Expand All @@ -117,6 +129,10 @@ void JfrThreadConstantSet::serialize(JfrCheckpointWriter& writer) {
while (nonjavathreads.has_next()) {
tc.do_thread(nonjavathreads.next());
}
#ifdef ASSERT
// enable checking again
Thread::current()->set_skip_rank_order_check(false);
#endif
}

void JfrThreadGroupConstant::serialize(JfrCheckpointWriter& writer) {
Expand Down
5 changes: 3 additions & 2 deletions src/share/vm/runtime/mutex.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1228,7 +1228,8 @@ Monitor * Monitor::get_least_ranked_lock(Monitor * locks) {
for (tmp = locks; tmp != NULL; tmp = tmp->next()) {
if (tmp->next() != NULL) {
assert(tmp->rank() == Mutex::native ||
tmp->rank() <= tmp->next()->rank(), "mutex rank anomaly?");
tmp->rank() <= tmp->next()->rank() ||
Thread::current()->skip_rank_order_check(), "mutex rank anomaly?");
}
}
}
Expand Down Expand Up @@ -1314,7 +1315,7 @@ void Monitor::set_owner_implementation(Thread *new_owner) {
!SafepointSynchronize::is_at_safepoint() &&
this != Interrupt_lock && this != ProfileVM_lock &&
!(this == Safepoint_lock && contains(locks, Terminator_lock) &&
SafepointSynchronize::is_synchronizing())) {
SafepointSynchronize::is_synchronizing()) && !Thread::current()->skip_rank_order_check()) {
new_owner->print_owned_locks();
fatal(err_msg("acquiring lock %s/%d out of order with lock %s/%d -- "
"possible deadlock", this->name(), this->rank(),
Expand Down
5 changes: 5 additions & 0 deletions src/share/vm/runtime/thread.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -312,10 +312,15 @@ class Thread: public ThreadShadow {
#ifdef ASSERT
private:
bool _visited_for_critical_count;
bool _skip_rank_order_check;

public:
void set_visited_for_critical_count(bool z) { _visited_for_critical_count = z; }
bool was_visited_for_critical_count() const { return _visited_for_critical_count; }

// introduced by Alibaba Dragonwell issue 233
void set_skip_rank_order_check(bool val) { _skip_rank_order_check = val; }
bool skip_rank_order_check() { return _skip_rank_order_check; }
#endif

public:
Expand Down

0 comments on commit 6db5be9

Please sign in to comment.