Skip to content

Commit

Permalink
H5_CHECK_OVERFLOW
Browse files Browse the repository at this point in the history
  • Loading branch information
seanm committed Jul 18, 2023
1 parent 64c1c49 commit f7163f7
Show file tree
Hide file tree
Showing 2 changed files with 10 additions and 10 deletions.
10 changes: 5 additions & 5 deletions src/H5Dmpio.c
Original file line number Diff line number Diff line change
Expand Up @@ -2903,7 +2903,7 @@ H5D__obtain_mpio_mode(H5D_io_info_t *io_info, H5D_dset_io_info_t *di, uint8_t as
} /* end while */

/* Gather all the information */
H5_CHECK_OVERFLOW(total_chunks, size_t, int)
H5_CHECK_OVERFLOW(total_chunks, size_t, int);
if (MPI_SUCCESS != (mpi_code = MPI_Gather(io_mode_info, (int)total_chunks, MPI_BYTE, recv_io_mode_info,
(int)total_chunks, MPI_BYTE, root, comm)))
HMPI_GOTO_ERROR(FAIL, "MPI_Gather failed", mpi_code)
Expand Down Expand Up @@ -3865,7 +3865,7 @@ H5D__mpio_share_chunk_modification_data(H5D_filtered_collective_io_info_t *chunk
* future, this may become a problem and derived datatypes
* will need to be used.
*/
H5_CHECK_OVERFLOW(mod_data_size, size_t, int)
H5_CHECK_OVERFLOW(mod_data_size, size_t, int);

/* Send modification data to new owner */
if (MPI_SUCCESS !=
Expand Down Expand Up @@ -3905,8 +3905,8 @@ H5D__mpio_share_chunk_modification_data(H5D_filtered_collective_io_info_t *chunk
HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL,
"too many shared chunks in parallel filtered write operation")

H5_CHECK_OVERFLOW(num_send_requests, size_t, int)
H5_CHECK_OVERFLOW(num_msgs_incoming, size_t, int)
H5_CHECK_OVERFLOW(num_send_requests, size_t, int);
H5_CHECK_OVERFLOW(num_msgs_incoming, size_t, int);

/*
* Allocate receive buffer and MPI_Request arrays for non-blocking
Expand Down Expand Up @@ -3942,7 +3942,7 @@ H5D__mpio_share_chunk_modification_data(H5D_filtered_collective_io_info_t *chunk
if (MPI_SUCCESS != (mpi_code = MPI_Get_elements_x(&status, MPI_BYTE, &msg_size)))
HMPI_GOTO_ERROR(FAIL, "MPI_Get_elements_x failed", mpi_code)

H5_CHECK_OVERFLOW(msg_size, MPI_Count, int)
H5_CHECK_OVERFLOW(msg_size, MPI_Count, int);
#else
int msg_size = 0;

Expand Down
10 changes: 5 additions & 5 deletions src/H5Smpio.c
Original file line number Diff line number Diff line change
Expand Up @@ -695,7 +695,7 @@ H5S__mpio_reg_hyper_type(H5S_t *space, size_t elmt_size, MPI_Datatype *new_type,
fprintf(H5DEBUG(S), "%s: Flattened selection\n", __func__);
#endif
for (u = 0; u < rank; ++u) {
H5_CHECK_OVERFLOW(diminfo[u].start, hsize_t, hssize_t)
H5_CHECK_OVERFLOW(diminfo[u].start, hsize_t, hssize_t);
d[u].start = (hssize_t)diminfo[u].start + sel_iter->u.hyp.sel_off[u];
d[u].strid = diminfo[u].stride;
d[u].block = diminfo[u].block;
Expand Down Expand Up @@ -729,7 +729,7 @@ H5S__mpio_reg_hyper_type(H5S_t *space, size_t elmt_size, MPI_Datatype *new_type,
fprintf(H5DEBUG(S), "%s: Non-flattened selection\n", __func__);
#endif
for (u = 0; u < rank; ++u) {
H5_CHECK_OVERFLOW(diminfo[u].start, hsize_t, hssize_t)
H5_CHECK_OVERFLOW(diminfo[u].start, hsize_t, hssize_t);
d[u].start = (hssize_t)diminfo[u].start + space->select.offset[u];
d[u].strid = diminfo[u].stride;
d[u].block = diminfo[u].block;
Expand Down Expand Up @@ -1178,7 +1178,7 @@ H5S__obtain_datatype(H5S_hyper_span_info_t *spans, const hsize_t *down, size_t e

/* Store displacement & block length */
disp[outercount] = (MPI_Aint)elmt_size * (MPI_Aint)span->low;
H5_CHECK_OVERFLOW(nelmts, hsize_t, int)
H5_CHECK_OVERFLOW(nelmts, hsize_t, int);
blocklen[outercount] = (int)nelmts;

if (bigio_count < (hsize_t)blocklen[outercount])
Expand Down Expand Up @@ -1287,7 +1287,7 @@ H5S__obtain_datatype(H5S_hyper_span_info_t *spans, const hsize_t *down, size_t e
nelmts = (span->high - span->low) + 1;

/* Build the MPI datatype for this node */
H5_CHECK_OVERFLOW(nelmts, hsize_t, int)
H5_CHECK_OVERFLOW(nelmts, hsize_t, int);
if (MPI_SUCCESS != (mpi_code = MPI_Type_create_hvector((int)nelmts, 1, stride, down_type,
&inner_type[outercount])))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hvector failed", mpi_code)
Expand All @@ -1297,7 +1297,7 @@ H5S__obtain_datatype(H5S_hyper_span_info_t *spans, const hsize_t *down, size_t e
} /* end while */

/* Building the whole vector datatype */
H5_CHECK_OVERFLOW(outercount, size_t, int)
H5_CHECK_OVERFLOW(outercount, size_t, int);
if (MPI_SUCCESS != (mpi_code = MPI_Type_create_struct((int)outercount, blocklen, disp, inner_type,
&spans->op_info[op_info_i].u.down_type)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_struct failed", mpi_code)
Expand Down

0 comments on commit f7163f7

Please sign in to comment.