Skip to content

Commit

Permalink
Fixing performance bug with a lock being held tooo long when processi…
Browse files Browse the repository at this point in the history
…ng profile objects from the queues.
  • Loading branch information
khuck committed Jan 29, 2020
1 parent c25b735 commit 821b859
Show file tree
Hide file tree
Showing 2 changed files with 42 additions and 18 deletions.
58 changes: 41 additions & 17 deletions src/apex/profiler_listener.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1070,10 +1070,14 @@ node_color * get_node_color(double v,double vmin,double vmax)
#ifdef APEX_HAVE_HPX
bool schedule_another_task = false;
{
std::unique_lock<std::mutex> queue_lock(queue_mtx);
for (auto a_queue : allqueues) {
size_t num_queues = 0;
{
std::unique_lock<std::mutex> queue_lock(queue_mtx);
num_queues = allqueues.size();
}
for (size_t q = 0 ; q < num_queues ; q++) {
int i = 0;
while(!_done && a_queue->try_dequeue(p)) {
while(!_done && allqueues[q]->try_dequeue(p)) {
process_profile(p, 0);
if (++i > 1000) {
schedule_another_task = true;
Expand All @@ -1083,10 +1087,14 @@ node_color * get_node_color(double v,double vmin,double vmax)
}
}
if (apex_options::use_taskgraph_output()) {
std::unique_lock<std::mutex> queue_lock(queue_mtx);
for (auto a_queue : dependency_queues) {
size_t num_queues = 0;
{
std::unique_lock<std::mutex> queue_lock(queue_mtx);
num_queues = dependency_queues.size();
}
for (size_t q = 0 ; q < num_queues ; q++) {
int i = 0;
while(!_done && a_queue->try_dequeue(td)) {
while(!_done && dependency_queues[q]->try_dequeue(td)) {
process_dependency(td);
if (++i > 1000) {
schedule_another_task = true;
Expand All @@ -1104,17 +1112,25 @@ node_color * get_node_color(double v,double vmin,double vmax)
"profiler_listener::process_profiles: main loop");
}
{
std::unique_lock<std::mutex> queue_lock(queue_mtx);
for (auto a_queue : allqueues) {
while(!_done && a_queue->try_dequeue(p)) {
size_t num_queues = 0;
{
std::unique_lock<std::mutex> queue_lock(queue_mtx);
num_queues = allqueues.size();
}
for (size_t q = 0 ; q < num_queues ; q++) {
while(!_done && allqueues[q]->try_dequeue(p)) {
process_profile(p, 0);
}
}
}
if (apex_options::use_taskgraph_output()) {
std::unique_lock<std::mutex> queue_lock(queue_mtx);
for (auto a_queue : dependency_queues) {
while(!_done && a_queue->try_dequeue(td)) {
size_t num_queues = 0;
{
std::unique_lock<std::mutex> queue_lock(queue_mtx);
num_queues = dependency_queues.size();
}
for (size_t q = 0 ; q < num_queues ; q++) {
while(!_done && dependency_queues[q]->try_dequeue(td)) {
process_dependency(td);
}
}
Expand Down Expand Up @@ -1285,9 +1301,13 @@ if (rc != 0) cout << "PAPI error! " << name << ": " << PAPI_strerror(rc) << endl
{
size_t ignored = 0;
{ // we need to lock in case another thread appears
std::unique_lock<std::mutex> queue_lock(queue_mtx);
for (auto a_queue : allqueues) {
ignored += a_queue->size_approx();
size_t num_queues = 0;
{
std::unique_lock<std::mutex> queue_lock(queue_mtx);
num_queues = allqueues.size();
}
for (size_t q = 0 ; q < num_queues ; q++) {
ignored += allqueues[q]->size_approx();
}
}
if (ignored > 100000) {
Expand All @@ -1299,8 +1319,12 @@ if (rc != 0) cout << "PAPI error! " << name << ": " << PAPI_strerror(rc) << endl
* so just process the queue. Anyway, it shouldn't get backed up that
* much without suggesting there is a bigger problem. */
{
std::unique_lock<std::mutex> queue_lock(queue_mtx);
for (unsigned int i=0; i<allqueues.size(); ++i) {
size_t num_queues = 0;
{
std::unique_lock<std::mutex> queue_lock(queue_mtx);
num_queues = allqueues.size();
}
for (unsigned int i=0 ; i < num_queues ; ++i) {
if (apex_options::use_tau()) {
tau_listener::Tau_start_wrapper(
"profiler_listener::concurrent_cleanup");
Expand Down
2 changes: 1 addition & 1 deletion src/unit_tests/C++/apex_pthread_flood.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
#ifdef APEX_HAVE_TAU
#define FLOOD_LEVEL 15 // TAU has a limit of 128 threads.
#else
#define FLOOD_LEVEL 1000
#define FLOOD_LEVEL 100
#endif

inline int foo (int i) {
Expand Down

0 comments on commit 821b859

Please sign in to comment.