Actual source code: ex76.c
1: #include <petscksp.h>
2: #include <petsc/private/petscimpl.h>
4: static char help[] = "Solves a linear system using PCHPDDM.\n\n";
6: int main(int argc, char **args)
7: {
8: Vec b; /* computed solution and RHS */
9: Mat A, aux, X, B; /* linear system matrix */
10: KSP ksp; /* linear solver context */
11: PC pc;
12: IS is, sizes;
13: const PetscInt *idx;
14: PetscMPIInt rank, size;
15: PetscInt m, N = 1;
16: PetscLayout map;
17: PetscViewer viewer;
18: char dir[PETSC_MAX_PATH_LEN], name[PETSC_MAX_PATH_LEN], type[256];
19: PetscBool3 share = PETSC_BOOL3_UNKNOWN;
20: PetscBool flg, set, transpose = PETSC_FALSE;
22: PetscFunctionBeginUser;
23: PetscCall(PetscInitialize(&argc, &args, NULL, help));
24: PetscCall(PetscLogDefaultBegin());
25: PetscCallMPI(MPI_Comm_size(PETSC_COMM_WORLD, &size));
26: PetscCheck(size == 4, PETSC_COMM_WORLD, PETSC_ERR_WRONG_MPI_SIZE, "This example requires 4 processes");
27: PetscCall(PetscOptionsGetInt(NULL, NULL, "-rhs", &N, NULL));
28: PetscCallMPI(MPI_Comm_rank(PETSC_COMM_WORLD, &rank));
29: PetscCall(MatCreate(PETSC_COMM_WORLD, &A));
30: PetscCall(PetscStrncpy(dir, ".", sizeof(dir)));
31: PetscCall(PetscOptionsGetString(NULL, NULL, "-load_dir", dir, sizeof(dir), NULL));
32: /* loading matrices */
33: PetscCall(PetscSNPrintf(name, sizeof(name), "%s/sizes_%d.dat", dir, size));
34: PetscCall(PetscViewerBinaryOpen(PETSC_COMM_WORLD, name, FILE_MODE_READ, &viewer));
35: PetscCall(ISCreate(PETSC_COMM_WORLD, &sizes));
36: PetscCall(ISLoad(sizes, viewer));
37: PetscCall(ISGetIndices(sizes, &idx));
38: PetscCall(MatSetSizes(A, idx[0], idx[1], idx[2], idx[3]));
39: PetscCall(MatCreate(PETSC_COMM_WORLD, &X));
40: PetscCall(MatSetSizes(X, idx[4], idx[4], PETSC_DETERMINE, PETSC_DETERMINE));
41: PetscCall(MatSetUp(X));
42: PetscCall(ISRestoreIndices(sizes, &idx));
43: PetscCall(ISDestroy(&sizes));
44: PetscCall(PetscViewerDestroy(&viewer));
45: PetscCall(PetscSNPrintf(name, sizeof(name), "%s/A.dat", dir));
46: PetscCall(PetscViewerBinaryOpen(PETSC_COMM_WORLD, name, FILE_MODE_READ, &viewer));
47: PetscCall(MatLoad(A, viewer));
48: PetscCall(PetscViewerDestroy(&viewer));
49: PetscCall(PetscSNPrintf(name, sizeof(name), "%s/is_%d.dat", dir, size));
50: PetscCall(PetscViewerBinaryOpen(PETSC_COMM_WORLD, name, FILE_MODE_READ, &viewer));
51: PetscCall(ISCreate(PETSC_COMM_WORLD, &sizes));
52: PetscCall(MatGetLayouts(X, &map, NULL));
53: PetscCall(ISSetLayout(sizes, map));
54: PetscCall(ISLoad(sizes, viewer));
55: PetscCall(ISGetLocalSize(sizes, &m));
56: PetscCall(ISGetIndices(sizes, &idx));
57: PetscCall(ISCreateGeneral(PETSC_COMM_SELF, m, idx, PETSC_COPY_VALUES, &is));
58: PetscCall(ISRestoreIndices(sizes, &idx));
59: PetscCall(ISDestroy(&sizes));
60: PetscCall(MatGetBlockSize(A, &m));
61: PetscCall(ISSetBlockSize(is, m));
62: PetscCall(PetscViewerDestroy(&viewer));
63: PetscCall(PetscSNPrintf(name, sizeof(name), "%s/Neumann_%d.dat", dir, size));
64: PetscCall(PetscViewerBinaryOpen(PETSC_COMM_WORLD, name, FILE_MODE_READ, &viewer));
65: PetscCall(MatLoad(X, viewer));
66: PetscCall(PetscViewerDestroy(&viewer));
67: PetscCall(MatGetDiagonalBlock(X, &B));
68: PetscCall(MatDuplicate(B, MAT_COPY_VALUES, &aux));
69: PetscCall(MatDestroy(&X));
70: flg = PETSC_FALSE;
71: PetscCall(PetscOptionsGetBool(NULL, NULL, "-pc_hpddm_levels_1_st_share_sub_ksp", &flg, &set));
72: if (flg) { /* PETSc LU/Cholesky is struggling numerically for bs > 1 */
73: /* only set the proper bs for the geneo_share_* tests, 1 otherwise */
74: PetscCall(MatSetBlockSizesFromMats(aux, A, A));
75: share = PETSC_BOOL3_TRUE;
76: } else if (set) share = PETSC_BOOL3_FALSE;
77: PetscCall(MatSetOption(A, MAT_SYMMETRIC, PETSC_TRUE));
78: PetscCall(MatSetOption(aux, MAT_SYMMETRIC, PETSC_TRUE));
79: /* ready for testing */
80: PetscOptionsBegin(PETSC_COMM_WORLD, "", "", "");
81: PetscCall(PetscStrncpy(type, MATAIJ, sizeof(type)));
82: PetscCall(PetscOptionsFList("-mat_type", "Matrix type", "MatSetType", MatList, type, type, 256, &flg));
83: PetscOptionsEnd();
84: PetscCall(MatConvert(A, type, MAT_INPLACE_MATRIX, &A));
85: PetscCall(MatConvert(aux, type, MAT_INPLACE_MATRIX, &aux));
86: PetscCall(KSPCreate(PETSC_COMM_WORLD, &ksp));
87: PetscCall(KSPSetOperators(ksp, A, A));
88: PetscCall(KSPGetPC(ksp, &pc));
89: PetscCall(PCSetType(pc, PCHPDDM));
90: #if defined(PETSC_HAVE_HPDDM) && defined(PETSC_HAVE_DYNAMIC_LIBRARIES) && defined(PETSC_USE_SHARED_LIBRARIES)
91: flg = PETSC_FALSE;
92: PetscCall(PetscOptionsGetBool(NULL, NULL, "-reset", &flg, NULL));
93: if (flg) {
94: PetscCall(PetscOptionsSetValue(NULL, "-pc_hpddm_block_splitting", "true"));
95: PetscCall(PCSetFromOptions(pc));
96: PetscCall(PCSetUp(pc));
97: PetscCall(PetscOptionsClearValue(NULL, "-pc_hpddm_block_splitting"));
98: }
99: PetscCall(PCHPDDMSetAuxiliaryMat(pc, is, aux, NULL, NULL));
100: PetscCall(PCHPDDMHasNeumannMat(pc, PETSC_FALSE)); /* PETSC_TRUE is fine as well, just testing */
101: if (share == PETSC_BOOL3_UNKNOWN) PetscCall(PCHPDDMSetSTShareSubKSP(pc, PetscBool3ToBool(share)));
102: flg = PETSC_FALSE;
103: PetscCall(PetscOptionsGetBool(NULL, NULL, "-set_rhs", &flg, NULL));
104: if (flg) { /* user-provided RHS for concurrent generalized eigenvalue problems */
105: Mat a, c, P; /* usually assembled automatically in PCHPDDM, this is solely for testing PCHPDDMSetRHSMat() */
106: PetscInt rstart, rend, location;
108: PetscCall(MatDuplicate(aux, MAT_DO_NOT_COPY_VALUES, &B)); /* duplicate so that MatStructure is SAME_NONZERO_PATTERN */
109: PetscCall(MatGetDiagonalBlock(A, &a));
110: PetscCall(MatGetOwnershipRange(A, &rstart, &rend));
111: PetscCall(ISGetLocalSize(is, &m));
112: PetscCall(MatCreateSeqAIJ(PETSC_COMM_SELF, rend - rstart, m, 1, NULL, &P));
113: for (m = rstart; m < rend; ++m) {
114: PetscCall(ISLocate(is, m, &location));
115: PetscCheck(location >= 0, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "IS of the auxiliary Mat does not include all local rows of A");
116: PetscCall(MatSetValue(P, m - rstart, location, 1.0, INSERT_VALUES));
117: }
118: PetscCall(MatAssemblyBegin(P, MAT_FINAL_ASSEMBLY));
119: PetscCall(MatAssemblyEnd(P, MAT_FINAL_ASSEMBLY));
120: PetscCall(PetscObjectTypeCompare((PetscObject)a, MATSEQAIJ, &flg));
121: if (flg) PetscCall(MatPtAP(a, P, MAT_INITIAL_MATRIX, 1.0, &X)); // MatPtAP() is used to extend diagonal blocks with zeros on the overlap
122: else { // workaround for MatPtAP() limitations with some types
123: PetscCall(MatConvert(a, MATSEQAIJ, MAT_INITIAL_MATRIX, &c));
124: PetscCall(MatPtAP(c, P, MAT_INITIAL_MATRIX, 1.0, &X));
125: PetscCall(MatDestroy(&c));
126: }
127: PetscCall(MatDestroy(&P));
128: PetscCall(MatAXPY(B, 1.0, X, SUBSET_NONZERO_PATTERN));
129: PetscCall(MatDestroy(&X));
130: PetscCall(MatSetOption(B, MAT_SYMMETRIC, PETSC_TRUE));
131: PetscCall(PCHPDDMSetRHSMat(pc, B));
132: PetscCall(MatDestroy(&B));
133: }
134: #else
135: (void)share;
136: #endif
137: PetscCall(MatDestroy(&aux));
138: PetscCall(KSPSetFromOptions(ksp));
139: PetscCall(PetscObjectTypeCompare((PetscObject)pc, PCASM, &flg));
140: if (flg) {
141: flg = PETSC_FALSE;
142: PetscCall(PetscOptionsGetBool(NULL, NULL, "-pc_hpddm_define_subdomains", &flg, NULL));
143: if (flg) {
144: IS rows;
146: PetscCall(MatGetOwnershipIS(A, &rows, NULL));
147: PetscCall(PCASMSetLocalSubdomains(pc, 1, &is, &rows));
148: PetscCall(ISDestroy(&rows));
149: }
150: }
151: PetscCall(ISDestroy(&is));
152: PetscCall(MatCreateVecs(A, NULL, &b));
153: PetscCall(VecSet(b, 1.0));
154: PetscCall(PetscOptionsGetBool(NULL, NULL, "-transpose", &transpose, NULL));
155: if (!transpose) PetscCall(KSPSolve(ksp, b, b));
156: else {
157: PetscCall(KSPSolveTranspose(ksp, b, b));
158: set = PETSC_FALSE;
159: PetscCall(PetscOptionsGetBool(NULL, NULL, "-ksp_use_explicittranspose", &set, NULL));
160: if (set) PetscCall(KSPSetOperators(ksp, A, A)); /* -ksp_use_explicittranspose does not cache the initial Mat and will transpose the explicit transpose again if not set back to the original Mat */
161: }
162: PetscCall(VecGetLocalSize(b, &m));
163: PetscCall(VecDestroy(&b));
164: if (N > 1) {
165: KSPType type;
166: VecType vt;
168: PetscCall(PetscOptionsClearValue(NULL, "-ksp_converged_reason"));
169: PetscCall(KSPSetFromOptions(ksp));
170: PetscCall(MatGetVecType(A, &vt));
171: PetscCall(MatCreateDenseFromVecType(PETSC_COMM_WORLD, vt, m, PETSC_DECIDE, PETSC_DECIDE, N, PETSC_DECIDE, NULL, &B));
172: PetscCall(MatCreateDenseFromVecType(PETSC_COMM_WORLD, vt, m, PETSC_DECIDE, PETSC_DECIDE, N, PETSC_DECIDE, NULL, &X));
173: PetscCall(MatSetRandom(B, NULL));
174: /* this is algorithmically optimal in the sense that blocks of vectors are coarsened or interpolated using matrix--matrix operations */
175: /* PCHPDDM however heavily relies on MPI[S]BAIJ format for which there is no efficient MatProduct implementation */
176: if (!transpose) PetscCall(KSPMatSolve(ksp, B, X));
177: else {
178: PetscCall(KSPMatSolveTranspose(ksp, B, X));
179: if (set) PetscCall(KSPSetOperators(ksp, A, A)); /* same as in the prior KSPSolveTranspose() */
180: }
181: PetscCall(KSPGetType(ksp, &type));
182: PetscCall(PetscStrcmp(type, KSPHPDDM, &flg));
183: #if defined(PETSC_HAVE_HPDDM)
184: if (flg) {
185: PetscReal norm;
186: KSPHPDDMType type;
188: PetscCall(KSPHPDDMGetType(ksp, &type));
189: if (type == KSP_HPDDM_TYPE_PREONLY || type == KSP_HPDDM_TYPE_CG || type == KSP_HPDDM_TYPE_GMRES || type == KSP_HPDDM_TYPE_GCRODR) {
190: Mat C;
192: PetscCall(MatDuplicate(X, MAT_DO_NOT_COPY_VALUES, &C));
193: PetscCall(KSPSetMatSolveBatchSize(ksp, 1));
194: if (!transpose) PetscCall(KSPMatSolve(ksp, B, C));
195: else {
196: PetscCall(KSPMatSolveTranspose(ksp, B, C));
197: if (set) PetscCall(KSPSetOperators(ksp, A, A)); /* same as in the prior KSPMatSolveTranspose() */
198: }
199: PetscCall(MatAYPX(C, -1.0, X, SAME_NONZERO_PATTERN));
200: PetscCall(MatNorm(C, NORM_INFINITY, &norm));
201: PetscCall(MatDestroy(&C));
202: PetscCheck(norm <= 100 * PETSC_MACHINE_EPSILON, PetscObjectComm((PetscObject)pc), PETSC_ERR_PLIB, "KSPMatSolve%s() and KSPSolve%s() difference has nonzero norm %g with pseudo-block KSPHPDDMType %s", (transpose ? "Transpose" : ""), (transpose ? "Transpose" : ""), (double)norm, KSPHPDDMTypes[type]);
203: }
204: }
205: #endif
206: PetscCall(MatDestroy(&X));
207: PetscCall(MatDestroy(&B));
208: }
209: PetscCall(PetscObjectTypeCompare((PetscObject)pc, PCHPDDM, &flg));
210: #if defined(PETSC_HAVE_HPDDM) && defined(PETSC_HAVE_DYNAMIC_LIBRARIES) && defined(PETSC_USE_SHARED_LIBRARIES)
211: if (flg) PetscCall(PCHPDDMGetSTShareSubKSP(pc, &flg));
212: #endif
213: if (flg && PetscDefined(USE_LOG)) {
214: PetscCall(PetscOptionsHasName(NULL, NULL, "-pc_hpddm_harmonic_overlap", &flg));
215: if (!flg) {
216: PetscLogEvent event;
217: PetscEventPerfInfo info1, info2;
219: PetscCall(PetscLogEventRegister("MatLUFactorSym", PC_CLASSID, &event));
220: PetscCall(PetscLogEventGetPerfInfo(PETSC_DETERMINE, event, &info1));
221: PetscCall(PetscLogEventRegister("MatLUFactorNum", PC_CLASSID, &event));
222: PetscCall(PetscLogEventGetPerfInfo(PETSC_DETERMINE, event, &info2));
223: if (!info1.count && !info2.count) {
224: PetscCall(PetscLogEventRegister("MatCholFctrSym", PC_CLASSID, &event));
225: PetscCall(PetscLogEventGetPerfInfo(PETSC_DETERMINE, event, &info1));
226: PetscCall(PetscLogEventRegister("MatCholFctrNum", PC_CLASSID, &event));
227: PetscCall(PetscLogEventGetPerfInfo(PETSC_DETERMINE, event, &info2));
228: PetscCheck(info2.count > info1.count, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Cholesky numerical factorization (%d) not called more times than Cholesky symbolic factorization (%d), broken -pc_hpddm_levels_1_st_share_sub_ksp", info2.count, info1.count);
229: } else PetscCheck(info2.count > info1.count, PETSC_COMM_SELF, PETSC_ERR_PLIB, "LU numerical factorization (%d) not called more times than LU symbolic factorization (%d), broken -pc_hpddm_levels_1_st_share_sub_ksp", info2.count, info1.count);
230: }
231: }
232: #if defined(PETSC_HAVE_HPDDM) && defined(PETSC_HAVE_DYNAMIC_LIBRARIES) && defined(PETSC_USE_SHARED_LIBRARIES)
233: if (N == 1) {
234: flg = PETSC_FALSE;
235: PetscCall(PetscOptionsGetBool(NULL, NULL, "-successive_solves", &flg, NULL));
236: if (flg) {
237: KSPConvergedReason reason[2];
238: PetscInt iterations[3];
240: PetscCall(KSPGetConvergedReason(ksp, reason));
241: PetscCall(KSPGetTotalIterations(ksp, iterations));
242: PetscCall(PetscOptionsClearValue(NULL, "-ksp_converged_reason"));
243: PetscCall(KSPSetFromOptions(ksp));
244: flg = PETSC_FALSE;
245: PetscCall(PetscOptionsGetBool(NULL, NULL, "-pc_hpddm_block_splitting", &flg, NULL));
246: if (!flg) {
247: PetscCall(PetscSNPrintf(name, sizeof(name), "%s/sizes_%d.dat", dir, size));
248: PetscCall(PetscViewerBinaryOpen(PETSC_COMM_WORLD, name, FILE_MODE_READ, &viewer));
249: PetscCall(ISCreate(PETSC_COMM_WORLD, &sizes));
250: PetscCall(ISLoad(sizes, viewer));
251: PetscCall(ISGetIndices(sizes, &idx));
252: PetscCall(MatCreate(PETSC_COMM_WORLD, &X));
253: PetscCall(MatSetSizes(X, idx[4], idx[4], PETSC_DETERMINE, PETSC_DETERMINE));
254: PetscCall(MatSetUp(X));
255: PetscCall(ISRestoreIndices(sizes, &idx));
256: PetscCall(ISDestroy(&sizes));
257: PetscCall(PetscViewerDestroy(&viewer));
258: PetscCall(PetscSNPrintf(name, sizeof(name), "%s/is_%d.dat", dir, size));
259: PetscCall(PetscViewerBinaryOpen(PETSC_COMM_WORLD, name, FILE_MODE_READ, &viewer));
260: PetscCall(ISCreate(PETSC_COMM_WORLD, &sizes));
261: PetscCall(MatGetLayouts(X, &map, NULL));
262: PetscCall(ISSetLayout(sizes, map));
263: PetscCall(ISLoad(sizes, viewer));
264: PetscCall(ISGetLocalSize(sizes, &m));
265: PetscCall(ISGetIndices(sizes, &idx));
266: PetscCall(ISCreateGeneral(PETSC_COMM_SELF, m, idx, PETSC_COPY_VALUES, &is));
267: PetscCall(ISRestoreIndices(sizes, &idx));
268: PetscCall(ISDestroy(&sizes));
269: PetscCall(MatGetBlockSize(A, &m));
270: PetscCall(ISSetBlockSize(is, m));
271: PetscCall(PetscViewerDestroy(&viewer));
272: PetscCall(PetscSNPrintf(name, sizeof(name), "%s/Neumann_%d.dat", dir, size));
273: PetscCall(PetscViewerBinaryOpen(PETSC_COMM_WORLD, name, FILE_MODE_READ, &viewer));
274: PetscCall(MatLoad(X, viewer));
275: PetscCall(PetscViewerDestroy(&viewer));
276: PetscCall(MatGetDiagonalBlock(X, &B));
277: PetscCall(MatDuplicate(B, MAT_COPY_VALUES, &aux));
278: PetscCall(MatDestroy(&X));
279: PetscCall(MatSetBlockSizesFromMats(aux, A, A));
280: PetscCall(MatSetOption(aux, MAT_SYMMETRIC, PETSC_TRUE));
281: PetscCall(MatConvert(aux, type, MAT_INPLACE_MATRIX, &aux));
282: }
283: PetscCall(MatCreateVecs(A, NULL, &b));
284: PetscCall(PetscObjectStateIncrease((PetscObject)A));
285: if (!flg) PetscCall(PCHPDDMSetAuxiliaryMat(pc, NULL, aux, NULL, NULL));
286: PetscCall(VecSet(b, 1.0));
287: if (!transpose) PetscCall(KSPSolve(ksp, b, b));
288: else PetscCall(KSPSolveTranspose(ksp, b, b));
289: PetscCall(KSPGetConvergedReason(ksp, reason + 1));
290: PetscCall(KSPGetTotalIterations(ksp, iterations + 1));
291: iterations[1] -= iterations[0];
292: PetscCheck(reason[0] == reason[1] && PetscAbs(iterations[0] - iterations[1]) <= 3, PetscObjectComm((PetscObject)ksp), PETSC_ERR_PLIB, "Successive calls to KSPSolve%s() did not converge for the same reason (%s v. %s) or with the same number of iterations (+/- 3, %" PetscInt_FMT " v. %" PetscInt_FMT ")", (transpose ? "Transpose" : ""), KSPConvergedReasons[reason[0]], KSPConvergedReasons[reason[1]], iterations[0], iterations[1]);
293: PetscCall(PetscObjectStateIncrease((PetscObject)A));
294: if (!flg) PetscCall(PCHPDDMSetAuxiliaryMat(pc, is, aux, NULL, NULL));
295: PetscCall(PCSetFromOptions(pc));
296: PetscCall(VecSet(b, 1.0));
297: if (!transpose) PetscCall(KSPSolve(ksp, b, b));
298: else PetscCall(KSPSolveTranspose(ksp, b, b));
299: PetscCall(KSPGetConvergedReason(ksp, reason + 1));
300: PetscCall(KSPGetTotalIterations(ksp, iterations + 2));
301: iterations[2] -= iterations[0] + iterations[1];
302: PetscCheck(reason[0] == reason[1] && PetscAbs(iterations[0] - iterations[2]) <= 3, PetscObjectComm((PetscObject)ksp), PETSC_ERR_PLIB, "Successive calls to KSPSolve%s() did not converge for the same reason (%s v. %s) or with the same number of iterations (+/- 3, %" PetscInt_FMT " v. %" PetscInt_FMT ")", (transpose ? "Transpose" : ""), KSPConvergedReasons[reason[0]], KSPConvergedReasons[reason[1]], iterations[0], iterations[2]);
303: PetscCall(VecDestroy(&b));
304: PetscCall(ISDestroy(&is));
305: PetscCall(MatDestroy(&aux));
306: }
307: }
308: PetscCall(PetscOptionsGetBool(NULL, NULL, "-viewer", &flg, NULL));
309: if (flg) {
310: PetscCall(PetscObjectTypeCompare((PetscObject)pc, PCHPDDM, &flg));
311: if (flg) {
312: PetscCall(PetscStrncpy(dir, "XXXXXX", sizeof(dir)));
313: if (rank == 0) PetscCall(PetscMkdtemp(dir));
314: PetscCallMPI(MPI_Bcast(dir, 6, MPI_CHAR, 0, PETSC_COMM_WORLD));
315: for (PetscInt i = 0; i < 2; ++i) {
316: PetscCall(PetscSNPrintf(name, sizeof(name), "%s/%s", dir, i == 0 ? "A" : "A.dat"));
317: PetscCall(PetscViewerASCIIOpen(PETSC_COMM_WORLD, name, &viewer));
318: PetscCall(PetscViewerPushFormat(viewer, PETSC_VIEWER_ASCII_INFO_DETAIL));
319: PetscCall(PCView(pc, viewer));
320: PetscCall(PetscViewerPopFormat(viewer));
321: PetscCall(PetscViewerDestroy(&viewer));
322: }
323: PetscCallMPI(MPI_Barrier(PETSC_COMM_WORLD));
324: if (rank == 0) PetscCall(PetscRMTree(dir));
325: }
326: }
327: #endif
328: PetscCall(KSPDestroy(&ksp));
329: PetscCall(MatDestroy(&A));
330: PetscCall(PetscFinalize());
331: return 0;
332: }
334: /*TEST
336: test:
337: requires: hpddm slepc datafilespath double !complex !defined(PETSC_USE_64BIT_INDICES) defined(PETSC_HAVE_DYNAMIC_LIBRARIES) defined(PETSC_USE_SHARED_LIBRARIES)
338: nsize: 4
339: args: -ksp_rtol 1e-3 -ksp_converged_reason -pc_type {{bjacobi hpddm}shared output} -pc_hpddm_coarse_sub_pc_type lu -sub_pc_type lu -options_left no -load_dir ${DATAFILESPATH}/matrices/hpddm/GENEO
341: testset:
342: requires: hpddm slepc datafilespath double !complex !defined(PETSC_USE_64BIT_INDICES) defined(PETSC_HAVE_DYNAMIC_LIBRARIES) defined(PETSC_USE_SHARED_LIBRARIES)
343: suffix: define_subdomains
344: nsize: 4
345: args: -ksp_rtol 1e-3 -ksp_converged_reason -pc_hpddm_define_subdomains -options_left no -load_dir ${DATAFILESPATH}/matrices/hpddm/GENEO
346: test:
347: args: -pc_type {{asm hpddm}shared output} -pc_hpddm_coarse_sub_pc_type lu -sub_pc_type lu -viewer
348: test:
349: args: -pc_type hpddm -pc_hpddm_levels_1_st_pc_type cholesky -pc_hpddm_levels_1_eps_nev 5 -pc_hpddm_coarse_sub_pc_type lu -pc_hpddm_levels_1_sub_pc_type lu -pc_hpddm_coarse_correction none
351: testset:
352: requires: hpddm slepc datafilespath double !complex !defined(PETSC_USE_64BIT_INDICES) defined(PETSC_HAVE_DYNAMIC_LIBRARIES) defined(PETSC_USE_SHARED_LIBRARIES)
353: nsize: 4
354: args: -ksp_converged_reason -pc_type hpddm -pc_hpddm_levels_1_sub_pc_type cholesky -pc_hpddm_coarse_pc_type redundant -load_dir ${DATAFILESPATH}/matrices/hpddm/GENEO
355: test:
356: suffix: geneo
357: args: -pc_hpddm_coarse_p {{1 2}shared output} -pc_hpddm_levels_1_st_pc_type cholesky -pc_hpddm_levels_1_eps_nev {{5 15}separate output} -mat_type {{aij baij sbaij}shared output}
358: test:
359: suffix: geneo_block_splitting
360: output_file: output/ex76_geneo_pc_hpddm_levels_1_eps_nev-15.out
361: filter: sed -e "s/Linear solve converged due to CONVERGED_RTOL iterations 1[6-9]/Linear solve converged due to CONVERGED_RTOL iterations 11/g"
362: args: -pc_hpddm_coarse_p 2 -pc_hpddm_levels_1_eps_nev 15 -pc_hpddm_block_splitting -pc_hpddm_levels_1_st_pc_type lu -pc_hpddm_levels_1_eps_gen_non_hermitian -mat_type {{aij baij}shared output} -successive_solves
363: test:
364: suffix: geneo_share
365: output_file: output/ex76_geneo_pc_hpddm_levels_1_eps_nev-5.out
366: args: -pc_hpddm_levels_1_st_pc_type cholesky -pc_hpddm_levels_1_eps_nev 5 -pc_hpddm_levels_1_st_share_sub_ksp -reset {{false true}shared output}
367: test:
368: suffix: harmonic_overlap_1_define_false
369: output_file: output/ex76_geneo_share.out
370: filter: sed -e "s/Linear solve converged due to CONVERGED_RTOL iterations 1[0-3]/Linear solve converged due to CONVERGED_RTOL iterations 15/g"
371: args: -pc_hpddm_harmonic_overlap 1 -pc_hpddm_levels_1_eps_nev 30 -pc_hpddm_levels_1_eps_threshold_relative 1e+1 -pc_hpddm_levels_1_st_pc_type lu -pc_hpddm_levels_1_eps_pc_type lu -pc_hpddm_define_subdomains false -pc_hpddm_levels_1_pc_type asm -pc_hpddm_levels_1_pc_asm_overlap 2 -mat_type baij
372: test:
373: suffix: harmonic_overlap_1
374: output_file: output/ex76_geneo_share.out
375: filter: sed -e "s/Linear solve converged due to CONVERGED_RTOL iterations 1[0-3]/Linear solve converged due to CONVERGED_RTOL iterations 15/g"
376: args: -pc_hpddm_harmonic_overlap 1 -pc_hpddm_levels_1_eps_nev 30 -pc_hpddm_levels_1_eps_threshold_relative 1e+1 -pc_hpddm_levels_1_st_pc_type lu -pc_hpddm_levels_1_eps_pc_type lu -mat_type baij
377: test:
378: requires: cuda
379: suffix: harmonic_overlap_1_cuda
380: output_file: output/ex76_geneo_share.out
381: filter: sed -e "s/Linear solve converged due to CONVERGED_RTOL iterations 1[0-3]/Linear solve converged due to CONVERGED_RTOL iterations 15/g"
382: args: -pc_hpddm_harmonic_overlap 1 -pc_hpddm_levels_1_eps_nev 30 -pc_hpddm_levels_1_eps_threshold_relative 1e+1 -pc_hpddm_levels_1_st_pc_type lu -pc_hpddm_levels_1_eps_pc_type lu -mat_type aijcusparse
383: test:
384: suffix: harmonic_overlap_1_share_petsc
385: output_file: output/ex76_geneo_share.out
386: filter: sed -e "s/Linear solve converged due to CONVERGED_RTOL iterations 1[0-3]/Linear solve converged due to CONVERGED_RTOL iterations 15/g"
387: args: -pc_hpddm_harmonic_overlap 1 -pc_hpddm_levels_1_eps_nev 30 -pc_hpddm_levels_1_eps_threshold_relative 1e+1 -pc_hpddm_levels_1_st_pc_type lu -pc_hpddm_levels_1_st_share_sub_ksp -pc_hpddm_levels_1_sub_pc_factor_mat_solver_type petsc -pc_hpddm_levels_1_eps_pc_type lu -mat_type baij
388: test:
389: requires: mumps
390: suffix: harmonic_overlap_1_share_mumps
391: output_file: output/ex76_geneo_share.out
392: filter: sed -e "s/Linear solve converged due to CONVERGED_RTOL iterations 1[0-3]/Linear solve converged due to CONVERGED_RTOL iterations 15/g"
393: args: -pc_hpddm_harmonic_overlap 1 -pc_hpddm_levels_1_eps_nev 30 -pc_hpddm_levels_1_eps_threshold_relative 1e+1 -pc_hpddm_levels_1_st_share_sub_ksp -pc_hpddm_levels_1_sub_pc_factor_mat_solver_type mumps -pc_hpddm_coarse_pc_type cholesky -pc_hpddm_coarse_pc_factor_mat_solver_type mumps -pc_hpddm_coarse_mat_mumps_icntl_15 1
394: test:
395: requires: mumps
396: suffix: harmonic_overlap_1_share_mumps_not_set_explicitly
397: output_file: output/ex76_geneo_share.out
398: filter: sed -e "s/Linear solve converged due to CONVERGED_RTOL iterations 1[0-3]/Linear solve converged due to CONVERGED_RTOL iterations 15/g"
399: args: -pc_hpddm_harmonic_overlap 1 -pc_hpddm_levels_1_eps_nev 30 -pc_hpddm_levels_1_eps_threshold_relative 1e+1 -pc_hpddm_levels_1_st_share_sub_ksp -pc_hpddm_levels_1_eps_mat_type baij
400: test:
401: requires: mkl_pardiso
402: suffix: harmonic_overlap_1_share_mkl_pardiso
403: output_file: output/ex76_geneo_share.out
404: filter: sed -e "s/Linear solve converged due to CONVERGED_RTOL iterations [12][0-3]/Linear solve converged due to CONVERGED_RTOL iterations 15/g"
405: args: -pc_hpddm_harmonic_overlap 1 -pc_hpddm_levels_1_eps_nev 30 -pc_hpddm_levels_1_eps_threshold_relative 1e+1 -pc_hpddm_levels_1_st_share_sub_ksp -pc_hpddm_levels_1_eps_mat_type shell -pc_hpddm_levels_1_sub_pc_factor_mat_solver_type mkl_pardiso
406: test:
407: requires: mkl_pardiso !mumps
408: suffix: harmonic_overlap_1_share_mkl_pardiso_no_set_explicitly
409: output_file: output/ex76_geneo_share.out
410: filter: sed -e "s/Linear solve converged due to CONVERGED_RTOL iterations [12][0-3]/Linear solve converged due to CONVERGED_RTOL iterations 15/g"
411: args: -pc_hpddm_harmonic_overlap 1 -pc_hpddm_levels_1_eps_nev 30 -pc_hpddm_levels_1_eps_threshold_relative 1e+1 -pc_hpddm_levels_1_st_share_sub_ksp -pc_hpddm_levels_1_eps_mat_type shell
412: test:
413: suffix: harmonic_overlap_2_threshold_relative
414: output_file: output/ex76_geneo_share.out
415: filter: sed -e "s/Linear solve converged due to CONVERGED_RTOL iterations 9/Linear solve converged due to CONVERGED_RTOL iterations 15/g"
416: args: -pc_hpddm_harmonic_overlap 2 -pc_hpddm_levels_1_svd_nsv 15 -pc_hpddm_levels_1_svd_threshold_relative 1e-1 -pc_hpddm_levels_1_st_share_sub_ksp -mat_type sbaij
417: test:
418: suffix: harmonic_overlap_2
419: output_file: output/ex76_geneo_share.out
420: filter: sed -e "s/Linear solve converged due to CONVERGED_RTOL iterations 9/Linear solve converged due to CONVERGED_RTOL iterations 15/g"
421: args: -pc_hpddm_harmonic_overlap 2 -pc_hpddm_levels_1_svd_nsv 12 -pc_hpddm_levels_1_svd_type {{trlanczos randomized}shared output} -pc_hpddm_levels_1_st_share_sub_ksp -mat_type sbaij
422: test:
423: requires: cuda
424: suffix: harmonic_overlap_2_cuda
425: output_file: output/ex76_geneo_share.out
426: filter: sed -e "s/Linear solve converged due to CONVERGED_RTOL iterations 9/Linear solve converged due to CONVERGED_RTOL iterations 15/g"
427: args: -pc_hpddm_harmonic_overlap 2 -pc_hpddm_levels_1_svd_nsv 12 -pc_hpddm_levels_1_svd_type trlanczos -pc_hpddm_levels_1_st_share_sub_ksp -mat_type aijcusparse
429: testset:
430: requires: hpddm slepc datafilespath double !complex !defined(PETSC_USE_64BIT_INDICES) defined(PETSC_HAVE_DYNAMIC_LIBRARIES) defined(PETSC_USE_SHARED_LIBRARIES)
431: nsize: 4
432: args: -ksp_converged_reason -ksp_max_it 150 -pc_type hpddm -pc_hpddm_levels_1_eps_nev 5 -pc_hpddm_coarse_p 1 -load_dir ${DATAFILESPATH}/matrices/hpddm/GENEO -pc_hpddm_define_subdomains
433: test:
434: suffix: geneo_share_cholesky
435: output_file: output/ex76_geneo_share.out
436: # extra -pc_hpddm_levels_1_eps_gen_non_hermitian needed to avoid failures with PETSc Cholesky
437: args: -pc_hpddm_levels_1_sub_pc_type cholesky -pc_hpddm_levels_1_st_pc_type cholesky -mat_type {{aij sbaij}shared output} -pc_hpddm_levels_1_eps_gen_non_hermitian -pc_hpddm_has_neumann -pc_hpddm_levels_1_st_share_sub_ksp {{false true}shared output} -successive_solves
438: test:
439: suffix: geneo_share_cholesky_matstructure
440: output_file: output/ex76_geneo_share.out
441: # extra -pc_hpddm_levels_1_eps_gen_non_hermitian needed to avoid failures with PETSc Cholesky
442: filter: sed -e "s/Linear solve converged due to CONVERGED_RTOL iterations 14/Linear solve converged due to CONVERGED_RTOL iterations 15/g"
443: args: -pc_hpddm_levels_1_sub_pc_type cholesky -mat_type {{baij sbaij}shared output} -pc_hpddm_levels_1_eps_gen_non_hermitian -pc_hpddm_levels_1_st_share_sub_ksp -pc_hpddm_levels_1_st_matstructure same -set_rhs {{false true} shared output}
444: test:
445: suffix: geneo_transpose
446: output_file: output/ex76_geneo_share.out
447: filter: sed -e "s/Linear solve converged due to CONVERGED_RTOL iterations 1[234]/Linear solve converged due to CONVERGED_RTOL iterations 15/g" -e "s/Linear solve converged due to CONVERGED_RTOL iterations 26/Linear solve converged due to CONVERGED_RTOL iterations 15/g"
448: args: -pc_hpddm_levels_1_sub_pc_type cholesky -pc_hpddm_levels_1_st_pc_type cholesky -pc_hpddm_levels_1_eps_gen_non_hermitian -pc_hpddm_has_neumann -pc_hpddm_levels_1_st_share_sub_ksp -successive_solves -transpose -pc_hpddm_coarse_correction {{additive deflated balanced}shared output}
449: test:
450: TODO: broken # slightly different convergence rate, which may be a sign of something wrong somewhere
451: requires: cuda
452: suffix: geneo_transpose_cuda
453: output_file: output/ex76_geneo_share.out
454: filter: sed -e "s/Linear solve converged due to CONVERGED_RTOL iterations 1[2-4]/Linear solve converged due to CONVERGED_RTOL iterations 15/g"
455: args: -pc_hpddm_levels_1_sub_pc_type cholesky -pc_hpddm_levels_1_st_pc_type cholesky -pc_hpddm_levels_1_eps_gen_non_hermitian -pc_hpddm_has_neumann -pc_hpddm_levels_1_st_share_sub_ksp -transpose -pc_hpddm_coarse_correction balanced -mat_type aijcusparse
456: test:
457: suffix: geneo_explicittranspose
458: output_file: output/ex76_geneo_share.out
459: filter: sed -e "s/Linear solve converged due to CONVERGED_RTOL iterations 1[234]/Linear solve converged due to CONVERGED_RTOL iterations 15/g" -e "s/Linear solve converged due to CONVERGED_RTOL iterations 26/Linear solve converged due to CONVERGED_RTOL iterations 15/g"
460: args: -pc_hpddm_levels_1_sub_pc_type cholesky -pc_hpddm_levels_1_st_pc_type cholesky -pc_hpddm_levels_1_eps_gen_non_hermitian -pc_hpddm_has_neumann -pc_hpddm_levels_1_st_share_sub_ksp -transpose -ksp_use_explicittranspose -rhs 2
461: test:
462: requires: mumps
463: suffix: geneo_share_lu
464: output_file: output/ex76_geneo_share.out
465: # extra -pc_factor_mat_solver_type mumps needed to avoid failures with PETSc LU
466: args: -pc_hpddm_levels_1_sub_pc_type lu -pc_hpddm_levels_1_st_pc_type lu -mat_type baij -pc_hpddm_levels_1_st_pc_factor_mat_solver_type mumps -pc_hpddm_levels_1_sub_pc_factor_mat_solver_type mumps -pc_hpddm_has_neumann -pc_hpddm_levels_1_st_share_sub_ksp {{false true}shared output} -pc_hpddm_coarse_pc_factor_mat_solver_type mumps -pc_hpddm_coarse_mat_mumps_icntl_15 1
467: test:
468: requires: mumps
469: suffix: geneo_share_lu_matstructure
470: output_file: output/ex76_geneo_share.out
471: # extra -pc_factor_mat_solver_type mumps needed to avoid failures with PETSc LU
472: args: -pc_hpddm_levels_1_sub_pc_type lu -mat_type aij -pc_hpddm_levels_1_sub_pc_factor_mat_solver_type mumps -pc_hpddm_levels_1_st_share_sub_ksp -pc_hpddm_levels_1_st_matstructure {{same different}shared output} -pc_hpddm_levels_1_st_pc_type lu -pc_hpddm_levels_1_st_pc_factor_mat_solver_type mumps -successive_solves -pc_hpddm_levels_1_eps_target 1e-5
473: test:
474: suffix: geneo_share_not_asm
475: output_file: output/ex76_geneo_pc_hpddm_levels_1_eps_nev-5.out
476: # extra -pc_hpddm_levels_1_eps_gen_non_hermitian needed to avoid failures with PETSc Cholesky
477: args: -pc_hpddm_levels_1_sub_pc_type cholesky -pc_hpddm_levels_1_st_pc_type cholesky -pc_hpddm_levels_1_eps_gen_non_hermitian -pc_hpddm_has_neumann -pc_hpddm_levels_1_st_share_sub_ksp true -pc_hpddm_levels_1_pc_type gasm -successive_solves
478: test:
479: TODO: broken # PCGASM does not handle MATAIJCUSPARSE, see GitLab issue #1873
480: requires: cuda
481: suffix: geneo_share_not_asm_cuda
482: output_file: output/ex76_geneo_pc_hpddm_levels_1_eps_nev-5.out
483: args: -pc_hpddm_levels_1_sub_pc_type cholesky -pc_hpddm_levels_1_st_pc_type cholesky -pc_hpddm_levels_1_eps_gen_non_hermitian -pc_hpddm_has_neumann -pc_hpddm_levels_1_st_share_sub_ksp true -pc_hpddm_levels_1_pc_type gasm -successive_solves -mat_type aijcusparse
485: test:
486: requires: hpddm slepc datafilespath double !complex !defined(PETSC_USE_64BIT_INDICES) defined(PETSC_HAVE_DYNAMIC_LIBRARIES) defined(PETSC_USE_SHARED_LIBRARIES)
487: suffix: fgmres_geneo_20_p_2
488: nsize: 4
489: args: -ksp_converged_reason -pc_type hpddm -pc_hpddm_levels_1_sub_pc_type lu -pc_hpddm_levels_1_eps_nev 20 -pc_hpddm_coarse_p 2 -pc_hpddm_coarse_pc_type redundant -ksp_type fgmres -pc_hpddm_coarse_mat_type {{baij sbaij}shared output} -pc_hpddm_log_separate {{false true}shared output} -load_dir ${DATAFILESPATH}/matrices/hpddm/GENEO
491: testset:
492: requires: hpddm slepc datafilespath double !complex !defined(PETSC_USE_64BIT_INDICES) defined(PETSC_HAVE_DYNAMIC_LIBRARIES) defined(PETSC_USE_SHARED_LIBRARIES)
493: output_file: output/ex76_fgmres_geneo_20_p_2.out
494: nsize: 4
495: args: -ksp_converged_reason -pc_type hpddm -pc_hpddm_levels_1_sub_pc_type cholesky -pc_hpddm_levels_1_eps_nev 20 -pc_hpddm_levels_2_p 2 -pc_hpddm_levels_2_mat_type {{baij sbaij}shared output} -pc_hpddm_levels_2_eps_nev {{5 20}shared output} -pc_hpddm_levels_2_sub_pc_type cholesky -pc_hpddm_levels_2_ksp_type gmres -ksp_type fgmres -pc_hpddm_coarse_mat_type {{baij sbaij}shared output} -load_dir ${DATAFILESPATH}/matrices/hpddm/GENEO
496: test:
497: suffix: fgmres_geneo_20_p_2_geneo
498: args: -mat_type {{aij sbaij}shared output}
499: test:
500: suffix: fgmres_geneo_20_p_2_geneo_algebraic
501: args: -pc_hpddm_levels_2_st_pc_type mat
502: # PCHPDDM + KSPHPDDM test to exercise multilevel + multiple RHS in one go
503: testset:
504: requires: hpddm slepc datafilespath double !complex !defined(PETSC_USE_64BIT_INDICES) defined(PETSC_HAVE_DYNAMIC_LIBRARIES) defined(PETSC_USE_SHARED_LIBRARIES)
505: output_file: output/ex76_fgmres_geneo_20_p_2.out
506: # for -pc_hpddm_coarse_correction additive
507: filter: sed -e "s/Linear solve converged due to CONVERGED_RTOL iterations 37/Linear solve converged due to CONVERGED_RTOL iterations 25/g"
508: nsize: 4
509: args: -ksp_converged_reason -pc_type hpddm -pc_hpddm_levels_1_sub_pc_type cholesky -pc_hpddm_levels_1_eps_nev 20 -ksp_type hpddm -ksp_hpddm_variant flexible -pc_hpddm_coarse_mat_type baij -load_dir ${DATAFILESPATH}/matrices/hpddm/GENEO -rhs 4
510: test:
511: suffix: fgmres_geneo_20_p_2_geneo_rhs
512: args: -pc_hpddm_levels_2_p 2 -pc_hpddm_levels_2_mat_type baij -pc_hpddm_levels_2_eps_nev 5 -pc_hpddm_levels_2_sub_pc_type cholesky -pc_hpddm_levels_2_ksp_max_it 10 -pc_hpddm_levels_2_ksp_type hpddm -pc_hpddm_levels_2_ksp_hpddm_type gmres -mat_type aij -pc_hpddm_coarse_correction {{additive deflated balanced}shared output}
513: test:
514: requires: cuda
515: suffix: fgmres_geneo_20_rhs_cuda
516: args: -mat_type aijcusparse -pc_hpddm_coarse_correction {{deflated balanced}shared output}
518: testset:
519: requires: hpddm slepc datafilespath double !complex !defined(PETSC_USE_64BIT_INDICES) defined(PETSC_HAVE_DYNAMIC_LIBRARIES) defined(PETSC_USE_SHARED_LIBRARIES) mumps defined(PETSC_HAVE_OPENMP_SUPPORT)
520: filter: grep -E -e "Linear solve" -e " executing" | sed -e "s/MPI = 1/MPI = 2/g" -e "s/OMP = 1/OMP = 2/g"
521: nsize: 4
522: args: -ksp_converged_reason -pc_type hpddm -pc_hpddm_levels_1_sub_pc_type cholesky -pc_hpddm_levels_1_eps_nev 15 -pc_hpddm_levels_1_st_pc_type cholesky -pc_hpddm_coarse_p {{1 2}shared output} -load_dir ${DATAFILESPATH}/matrices/hpddm/GENEO -pc_hpddm_coarse_pc_factor_mat_solver_type mumps -pc_hpddm_coarse_mat_mumps_icntl_4 2 -pc_hpddm_coarse_mat_mumps_use_omp_threads {{1 2}shared output}
523: test:
524: suffix: geneo_mumps_use_omp_threads_1
525: output_file: output/ex76_geneo_mumps_use_omp_threads.out
526: args: -pc_hpddm_coarse_mat_type {{baij sbaij}shared output}
527: test:
528: suffix: geneo_mumps_use_omp_threads_2
529: output_file: output/ex76_geneo_mumps_use_omp_threads.out
530: args: -pc_hpddm_coarse_mat_type aij -pc_hpddm_levels_1_eps_threshold_absolute 0.4 -pc_hpddm_coarse_pc_type cholesky -pc_hpddm_coarse_mat_filter 1e-12
532: testset: # converge really poorly because of a tiny -pc_hpddm_levels_1_eps_threshold_absolute, but needed for proper code coverage where some subdomains don't call EPSSolve()
533: requires: hpddm slepc datafilespath double !complex !defined(PETSC_USE_64BIT_INDICES) defined(PETSC_HAVE_DYNAMIC_LIBRARIES) defined(PETSC_USE_SHARED_LIBRARIES)
534: nsize: 4
535: args: -ksp_converged_reason -pc_type hpddm -pc_hpddm_levels_1_sub_pc_type cholesky -pc_hpddm_levels_1_eps_threshold_absolute 0.005 -pc_hpddm_levels_1_eps_use_inertia -load_dir ${DATAFILESPATH}/matrices/hpddm/GENEO -pc_hpddm_levels_1_st_share_sub_ksp -pc_hpddm_define_subdomains -pc_hpddm_has_neumann -ksp_rtol 0.9
536: filter: sed -e "s/Linear solve converged due to CONVERGED_RTOL iterations 1/Linear solve converged due to CONVERGED_RTOL iterations 141/g"
537: test:
538: suffix: inertia_petsc
539: output_file: output/ex76_1.out
540: args: -pc_hpddm_levels_1_sub_pc_factor_mat_solver_type petsc
541: test:
542: suffix: inertia_mumps
543: output_file: output/ex76_1.out
544: requires: mumps
546: testset:
547: requires: hpddm slepc datafilespath double !complex !defined(PETSC_USE_64BIT_INDICES) defined(PETSC_HAVE_DYNAMIC_LIBRARIES) defined(PETSC_USE_SHARED_LIBRARIES)
548: output_file: output/empty.out
549: nsize: 4
550: args: -pc_type hpddm -pc_hpddm_levels_1_sub_pc_type cholesky -pc_hpddm_levels_1_eps_nev 20 -rhs 4 -ksp_max_it 20 -ksp_type hpddm -load_dir ${DATAFILESPATH}/matrices/hpddm/GENEO -pc_hpddm_define_subdomains -ksp_error_if_not_converged
551: test:
552: suffix: reuse_symbolic
553: args: -pc_hpddm_coarse_correction {{additive deflated balanced}shared output} -ksp_pc_side {{left right}shared output} -transpose {{true false} shared output}
554: test:
555: requires: cuda
556: suffix: reuse_symbolic_cuda
557: args: -pc_hpddm_coarse_correction deflated -ksp_pc_side right -transpose -mat_type aijcusparse
559: TEST*/