hdf5/testpar/t_pflush1.c

224 lines
7.1 KiB
C
Raw Normal View History

2022-05-14 03:31:31 +08:00
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* Copyright by The HDF Group. *
* All rights reserved. *
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
* the COPYING file, which can be found at the root of the source code *
2024-04-24 15:00:37 +08:00
* distribution tree, or in https://www.hdfgroup.org/licenses. *
2022-05-14 03:31:31 +08:00
* If you do not have access to either file, you may request a copy from *
* help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* Programmer: Leon Arber <larber@uiuc.edu>
* Sept. 28, 2006.
*
2024-04-24 15:00:37 +08:00
* Purpose: This is the first half of a two-part test that makes sure
* that a file can be read after a parallel application crashes
* as long as the file was flushed first. We simulate a crash by
* calling _exit() since this doesn't flush HDF5 caches but
* still exits with success.
2022-05-14 03:31:31 +08:00
*/
#include "h5test.h"
2024-04-24 15:00:37 +08:00
const char *FILENAME[] = {"flush", "noflush", NULL};
2022-05-14 03:31:31 +08:00
2024-04-24 15:00:37 +08:00
static int *data_g = NULL;
#define N_GROUPS 100
2022-05-14 03:31:31 +08:00
/*-------------------------------------------------------------------------
2024-04-24 15:00:37 +08:00
* Function: create_test_file
2022-05-14 03:31:31 +08:00
*
2024-04-24 15:00:37 +08:00
* Purpose: Creates the file used in part 1 of the test
2022-05-14 03:31:31 +08:00
*
2024-04-24 15:00:37 +08:00
* Return: Success: A valid file ID
* Failure: H5I_INVALID_HID
2022-05-14 03:31:31 +08:00
*
2024-04-24 15:00:37 +08:00
* Programmer: Leon Arber
2022-05-14 03:31:31 +08:00
* Sept. 26, 2006
*
*-------------------------------------------------------------------------
*/
static hid_t
2024-04-24 15:00:37 +08:00
create_test_file(char *name, hid_t fapl_id)
2022-05-14 03:31:31 +08:00
{
2024-04-24 15:00:37 +08:00
hid_t fid = H5I_INVALID_HID;
hid_t dcpl_id = H5I_INVALID_HID;
hid_t sid = H5I_INVALID_HID;
hid_t did = H5I_INVALID_HID;
hid_t top_level_gid = H5I_INVALID_HID;
hid_t gid = H5I_INVALID_HID;
hid_t dxpl_id = H5I_INVALID_HID;
hsize_t dims[2] = {100, 100};
hsize_t chunk_dims[2] = {5, 5};
hsize_t i, j;
if ((fid = H5Fcreate(name, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id)) < 0)
goto error;
2022-05-14 03:31:31 +08:00
/* Create a chunked dataset */
2024-04-24 15:00:37 +08:00
if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0)
goto error;
if (H5Pset_chunk(dcpl_id, 2, chunk_dims) < 0)
goto error;
if ((sid = H5Screate_simple(2, dims, NULL)) < 0)
goto error;
if ((did = H5Dcreate2(fid, "dset", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
goto error;
if ((dxpl_id = H5Pcreate(H5P_DATASET_XFER)) < 0)
goto error;
if (H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) < 0)
goto error;
2022-05-14 03:31:31 +08:00
/* Write some data */
2024-04-24 15:00:37 +08:00
for (i = 0; i < dims[0]; i++)
for (j = 0; j < dims[1]; j++)
data_g[(i * 100) + j] = (int)(i + (i * j) + j);
if (H5Dwrite(did, H5T_NATIVE_INT, sid, sid, dxpl_id, data_g) < 0)
goto error;
2022-05-14 03:31:31 +08:00
/* Create some groups */
2024-04-24 15:00:37 +08:00
if ((top_level_gid = H5Gcreate2(fid, "some_groups", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
goto error;
for (i = 0; i < N_GROUPS; i++) {
HDsprintf(name, "grp%02u", (unsigned)i);
if ((gid = H5Gcreate2(top_level_gid, name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
goto error;
if (H5Gclose(gid) < 0)
goto error;
2022-05-14 03:31:31 +08:00
}
2024-04-24 15:00:37 +08:00
return fid;
2022-05-14 03:31:31 +08:00
error:
2024-04-24 15:00:37 +08:00
return H5I_INVALID_HID;
} /* end create_test_file() */
2022-05-14 03:31:31 +08:00
/*-------------------------------------------------------------------------
* Function: main
*
2024-04-24 15:00:37 +08:00
* Purpose: Part 1 of a two-part parallel H5Fflush() test.
2022-05-14 03:31:31 +08:00
*
2024-04-24 15:00:37 +08:00
* Return: EXIT_FAILURE (always)
2022-05-14 03:31:31 +08:00
*
2024-04-24 15:00:37 +08:00
* Programmer: Robb Matzke
2022-05-14 03:31:31 +08:00
* Friday, October 23, 1998
*
*-------------------------------------------------------------------------
*/
int
2024-04-24 15:00:37 +08:00
main(int argc, char *argv[])
2022-05-14 03:31:31 +08:00
{
2024-04-24 15:00:37 +08:00
hid_t fid1 = H5I_INVALID_HID;
hid_t fid2 = H5I_INVALID_HID;
hid_t fapl_id = H5I_INVALID_HID;
MPI_File *mpifh_p = NULL;
char name[1024];
const char *envval = NULL;
int mpi_size;
int mpi_rank;
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Info info = MPI_INFO_NULL;
2022-05-14 03:31:31 +08:00
MPI_Init(&argc, &argv);
MPI_Comm_size(comm, &mpi_size);
MPI_Comm_rank(comm, &mpi_rank);
2024-04-24 15:00:37 +08:00
if (mpi_rank == 0)
TESTING("H5Fflush (part1)");
2022-05-14 03:31:31 +08:00
2024-04-24 15:00:37 +08:00
/* Don't run using the split VFD */
2022-05-14 03:31:31 +08:00
envval = HDgetenv("HDF5_DRIVER");
2024-04-24 15:00:37 +08:00
if (envval == NULL)
2022-05-14 03:31:31 +08:00
envval = "nomatch";
2024-04-24 15:00:37 +08:00
if (!HDstrcmp(envval, "split")) {
if (mpi_rank == 0) {
SKIPPED();
HDputs(" Test not compatible with current Virtual File Driver");
}
MPI_Finalize();
HDexit(EXIT_FAILURE);
}
if (NULL == (data_g = HDmalloc(100 * 100 * sizeof(*data_g))))
goto error;
if ((fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0)
goto error;
if (H5Pset_fapl_mpio(fapl_id, comm, info) < 0)
goto error;
2022-05-14 03:31:31 +08:00
/* Create the file */
2024-04-24 15:00:37 +08:00
h5_fixname(FILENAME[0], fapl_id, name, sizeof(name));
if ((fid1 = create_test_file(name, fapl_id)) < 0)
goto error;
2022-05-14 03:31:31 +08:00
/* Flush and exit without closing the library */
2024-04-24 15:00:37 +08:00
if (H5Fflush(fid1, H5F_SCOPE_GLOBAL) < 0)
goto error;
2022-05-14 03:31:31 +08:00
/* Create the other file which will not be flushed */
2024-04-24 15:00:37 +08:00
h5_fixname(FILENAME[1], fapl_id, name, sizeof(name));
if ((fid2 = create_test_file(name, fapl_id)) < 0)
goto error;
2022-05-14 03:31:31 +08:00
2024-04-24 15:00:37 +08:00
if (mpi_rank == 0)
2022-05-14 03:31:31 +08:00
PASSED();
2024-04-24 15:00:37 +08:00
HDfflush(stdout);
HDfflush(stderr);
/* Some systems like AIX do not like files not being closed when MPI_Finalize
2022-05-14 03:31:31 +08:00
* is called. So, we need to get the MPI file handles, close them by hand.
* Then the _exit is still needed to stop at_exit from happening in some systems.
* Note that MPIO VFD returns the address of the file-handle in the VFD struct
* because MPI_File_close wants to modify the file-handle variable.
*/
2024-04-24 15:00:37 +08:00
/* Close file 1 */
if (H5Fget_vfd_handle(fid1, fapl_id, (void **)&mpifh_p) < 0)
goto error;
if (MPI_File_close(mpifh_p) != MPI_SUCCESS)
goto error;
/* Close file 2 */
if (H5Fget_vfd_handle(fid2, fapl_id, (void **)&mpifh_p) < 0)
goto error;
if (MPI_File_close(mpifh_p) != MPI_SUCCESS)
goto error;
HDfflush(stdout);
HDfflush(stderr);
if (data_g) {
HDfree(data_g);
data_g = NULL;
}
/* Always exit with a failure code!
*
* In accordance with the standard, not having all processes
* call MPI_Finalize() can be considered an error, so mpiexec
* et al. may indicate failure on return. It's much easier to
* always ignore the failure condition than to handle some
* platforms returning success and others failure.
*/
HD_exit(EXIT_FAILURE);
2022-05-14 03:31:31 +08:00
error:
2024-04-24 15:00:37 +08:00
HDfflush(stdout);
HDfflush(stderr);
HDprintf("*** ERROR ***\n");
HDprintf("THERE WAS A REAL ERROR IN t_pflush1.\n");
HDfflush(stdout);
if (data_g)
HDfree(data_g);
2022-05-14 03:31:31 +08:00
2024-04-24 15:00:37 +08:00
HD_exit(EXIT_FAILURE);
} /* end main() */