2022-05-14 03:31:31 +08:00
|
|
|
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
|
|
|
|
* Copyright by The HDF Group. *
|
|
|
|
* All rights reserved. *
|
|
|
|
* *
|
|
|
|
* This file is part of HDF5. The full HDF5 copyright notice, including *
|
|
|
|
* terms governing use, modification, and redistribution, is contained in *
|
|
|
|
* the COPYING file, which can be found at the root of the source code *
|
2024-04-24 15:00:37 +08:00
|
|
|
* distribution tree, or in https://www.hdfgroup.org/licenses. *
|
2022-05-14 03:31:31 +08:00
|
|
|
* If you do not have access to either file, you may request a copy from *
|
|
|
|
* help@hdfgroup.org. *
|
|
|
|
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Programmer: Mohamad Chaarawi
|
|
|
|
* February 2015
|
|
|
|
*
|
|
|
|
* Purpose: This test creates a file and a bunch of objects in the
|
|
|
|
* file and then calls MPI_Finalize without closing anything. The
|
|
|
|
* library should exercise the attribute callback destroy attached to
|
|
|
|
* MPI_COMM_SELF and terminate the HDF5 library closing all open
|
|
|
|
* objects. The t_prestart test will read back the file and make sure
|
|
|
|
* all created objects are there.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "testphdf5.h"
|
|
|
|
|
2024-04-24 15:00:37 +08:00
|
|
|
int nerrors = 0; /* errors count */
|
2022-05-14 03:31:31 +08:00
|
|
|
|
2024-04-24 15:00:37 +08:00
|
|
|
const char *FILENAME[] = {"shutdown", NULL};
|
2022-05-14 03:31:31 +08:00
|
|
|
|
|
|
|
int
|
2024-04-24 15:00:37 +08:00
|
|
|
main(int argc, char **argv)
|
2022-05-14 03:31:31 +08:00
|
|
|
{
|
2024-04-24 15:00:37 +08:00
|
|
|
hid_t file_id, dset_id, grp_id;
|
|
|
|
hid_t fapl, sid, mem_dataspace;
|
|
|
|
hsize_t dims[RANK], i;
|
|
|
|
herr_t ret;
|
|
|
|
char filename[1024];
|
|
|
|
int mpi_size, mpi_rank;
|
|
|
|
MPI_Comm comm = MPI_COMM_WORLD;
|
|
|
|
MPI_Info info = MPI_INFO_NULL;
|
|
|
|
hsize_t start[RANK];
|
|
|
|
hsize_t count[RANK];
|
|
|
|
hsize_t stride[RANK];
|
|
|
|
hsize_t block[RANK];
|
|
|
|
DATATYPE *data_array = NULL; /* data buffer */
|
2022-05-14 03:31:31 +08:00
|
|
|
|
|
|
|
MPI_Init(&argc, &argv);
|
|
|
|
MPI_Comm_size(comm, &mpi_size);
|
2024-04-24 15:00:37 +08:00
|
|
|
MPI_Comm_rank(comm, &mpi_rank);
|
|
|
|
|
|
|
|
if (MAINPROCESS)
|
|
|
|
TESTING("proper shutdown of HDF5 library");
|
2022-05-14 03:31:31 +08:00
|
|
|
|
|
|
|
/* Set up file access property list with parallel I/O access */
|
|
|
|
fapl = H5Pcreate(H5P_FILE_ACCESS);
|
|
|
|
VRFY((fapl >= 0), "H5Pcreate succeeded");
|
|
|
|
ret = H5Pset_fapl_mpio(fapl, comm, info);
|
|
|
|
VRFY((ret >= 0), "");
|
|
|
|
|
|
|
|
h5_fixname(FILENAME[0], fapl, filename, sizeof filename);
|
|
|
|
file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
|
|
|
|
VRFY((file_id >= 0), "H5Fcreate succeeded");
|
|
|
|
grp_id = H5Gcreate2(file_id, "Group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
|
|
|
|
VRFY((grp_id >= 0), "H5Gcreate succeeded");
|
|
|
|
|
2024-04-24 15:00:37 +08:00
|
|
|
dims[0] = (hsize_t)ROW_FACTOR * (hsize_t)mpi_size;
|
|
|
|
dims[1] = (hsize_t)COL_FACTOR * (hsize_t)mpi_size;
|
|
|
|
sid = H5Screate_simple(RANK, dims, NULL);
|
2022-05-14 03:31:31 +08:00
|
|
|
VRFY((sid >= 0), "H5Screate_simple succeeded");
|
|
|
|
|
|
|
|
dset_id = H5Dcreate2(grp_id, "Dataset", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
|
|
|
|
VRFY((dset_id >= 0), "H5Dcreate succeeded");
|
|
|
|
|
|
|
|
/* allocate memory for data buffer */
|
2024-04-24 15:00:37 +08:00
|
|
|
data_array = (DATATYPE *)HDmalloc(dims[0] * dims[1] * sizeof(DATATYPE));
|
2022-05-14 03:31:31 +08:00
|
|
|
VRFY((data_array != NULL), "data_array HDmalloc succeeded");
|
|
|
|
|
|
|
|
/* Each process takes a slabs of rows. */
|
2024-04-24 15:00:37 +08:00
|
|
|
block[0] = dims[0] / (hsize_t)mpi_size;
|
|
|
|
block[1] = dims[1];
|
2022-05-14 03:31:31 +08:00
|
|
|
stride[0] = block[0];
|
|
|
|
stride[1] = block[1];
|
2024-04-24 15:00:37 +08:00
|
|
|
count[0] = 1;
|
|
|
|
count[1] = 1;
|
|
|
|
start[0] = (hsize_t)mpi_rank * block[0];
|
|
|
|
start[1] = 0;
|
2022-05-14 03:31:31 +08:00
|
|
|
|
|
|
|
/* put some trivial data in the data_array */
|
2024-04-24 15:00:37 +08:00
|
|
|
for (i = 0; i < dims[0] * dims[1]; i++)
|
2022-05-14 03:31:31 +08:00
|
|
|
data_array[i] = mpi_rank + 1;
|
|
|
|
|
|
|
|
ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block);
|
|
|
|
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
|
|
|
|
|
|
|
|
/* create a memory dataspace independently */
|
2024-04-24 15:00:37 +08:00
|
|
|
mem_dataspace = H5Screate_simple(RANK, block, NULL);
|
2022-05-14 03:31:31 +08:00
|
|
|
VRFY((mem_dataspace >= 0), "");
|
|
|
|
|
|
|
|
/* write data independently */
|
2024-04-24 15:00:37 +08:00
|
|
|
ret = H5Dwrite(dset_id, H5T_NATIVE_INT, mem_dataspace, sid, H5P_DEFAULT, data_array);
|
2022-05-14 03:31:31 +08:00
|
|
|
VRFY((ret >= 0), "H5Dwrite succeeded");
|
|
|
|
|
|
|
|
/* release data buffers */
|
2024-04-24 15:00:37 +08:00
|
|
|
if (data_array)
|
2022-05-14 03:31:31 +08:00
|
|
|
HDfree(data_array);
|
|
|
|
|
|
|
|
MPI_Finalize();
|
|
|
|
|
|
|
|
nerrors += GetTestNumErrs();
|
|
|
|
|
2024-04-24 15:00:37 +08:00
|
|
|
if (MAINPROCESS) {
|
|
|
|
if (0 == nerrors)
|
|
|
|
PASSED();
|
2022-05-14 03:31:31 +08:00
|
|
|
else
|
2024-04-24 15:00:37 +08:00
|
|
|
H5_FAILED();
|
2022-05-14 03:31:31 +08:00
|
|
|
}
|
|
|
|
|
2024-04-24 15:00:37 +08:00
|
|
|
return (nerrors != 0);
|
2022-05-14 03:31:31 +08:00
|
|
|
}
|