Skip to content

Commit

Permalink
Merge branch 'gpugraph_new' of https://github.com/xuewujiao/Paddle in…
Browse files Browse the repository at this point in the history
…to fix_600_infer
  • Loading branch information
DesmonDay committed May 16, 2023
2 parents 58671ba + f100887 commit fdaefb2
Show file tree
Hide file tree
Showing 7 changed files with 602 additions and 607 deletions.
3 changes: 3 additions & 0 deletions paddle/fluid/distributed/ps/service/ps_local_client.cc
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,9 @@ int32_t PsLocalClient::Initialize() {

::std::future<int32_t> PsLocalClient::Shrink(uint32_t table_id,
const std::string threshold) {
// threshold not use
auto* table_ptr = GetTable(table_id);
table_ptr->Shrink("");
return done();
}

Expand Down
10 changes: 9 additions & 1 deletion paddle/fluid/distributed/ps/table/memory_sparse_table.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1050,18 +1050,26 @@ int32_t MemorySparseTable::Flush() { return 0; }

int32_t MemorySparseTable::Shrink(const std::string &param) {
VLOG(0) << "MemorySparseTable::Shrink";
// TODO(zhaocaibei123): implement with multi-thread
std::atomic<uint32_t> shrink_size_all{0};
int thread_num = _real_local_shard_num;
omp_set_num_threads(thread_num);
#pragma omp parallel for schedule(dynamic)
for (int shard_id = 0; shard_id < _real_local_shard_num; ++shard_id) {
// Shrink
int feasign_size = 0;
auto &shard = _local_shards[shard_id];
for (auto it = shard.begin(); it != shard.end();) {
if (_value_accesor->Shrink(it.value().data())) {
it = shard.erase(it);
++feasign_size;
} else {
++it;
}
}
shrink_size_all += feasign_size;
}
VLOG(0) << "MemorySparseTable::Shrink success, shrink size:"
<< shrink_size_all;
return 0;
}

Expand Down
Loading

0 comments on commit fdaefb2

Please sign in to comment.