Actual source code: petscpctypes.h
1: #pragma once
3: /* SUBMANSEC = PC */
5: /*S
6: PC - Abstract PETSc object that manages all preconditioners including direct solvers such as `PCLU`
8: Level: beginner
10: .seealso: [](doc_linsolve), [](sec_pc), `PCCreate()`, `PCSetType()`, `PCType`
11: S*/
12: typedef struct _p_PC *PC;
14: /*J
15: PCType - String with the name of a PETSc preconditioner
17: Level: beginner
19: Note:
20: `PCRegister()` is used to register preconditioners that are then accessible via `PCSetType()`
22: .seealso: [](doc_linsolve), [](sec_pc), `PCSetType()`, `PC`, `PCCreate()`, `PCRegister()`, `PCSetFromOptions()`, `PCLU`, `PCJACOBI`, `PCBJACOBI`
23: J*/
24: typedef const char *PCType;
25: #define PCNONE "none"
26: #define PCJACOBI "jacobi"
27: #define PCSOR "sor"
28: #define PCLU "lu"
29: #define PCQR "qr"
30: #define PCSHELL "shell"
31: #define PCAMGX "amgx"
32: #define PCBJACOBI "bjacobi"
33: #define PCMG "mg"
34: #define PCEISENSTAT "eisenstat"
35: #define PCILU "ilu"
36: #define PCICC "icc"
37: #define PCASM "asm"
38: #define PCGASM "gasm"
39: #define PCKSP "ksp"
40: #define PCBJKOKKOS "bjkokkos"
41: #define PCCOMPOSITE "composite"
42: #define PCREDUNDANT "redundant"
43: #define PCSPAI "spai"
44: #define PCNN "nn"
45: #define PCCHOLESKY "cholesky"
46: #define PCPBJACOBI "pbjacobi"
47: #define PCVPBJACOBI "vpbjacobi"
48: #define PCMAT "mat"
49: #define PCHYPRE "hypre"
50: #define PCPARMS "parms"
51: #define PCFIELDSPLIT "fieldsplit"
52: #define PCTFS "tfs"
53: #define PCML "ml"
54: #define PCGALERKIN "galerkin"
55: #define PCEXOTIC "exotic"
56: #define PCCP "cp"
57: #define PCBFBT "bfbt"
58: #define PCLSC "lsc"
59: #define PCPYTHON "python"
60: #define PCPFMG "pfmg"
61: #define PCSMG "smg"
62: #define PCSYSPFMG "syspfmg"
63: #define PCREDISTRIBUTE "redistribute"
64: #define PCSVD "svd"
65: #define PCGAMG "gamg"
66: #define PCCHOWILUVIENNACL "chowiluviennacl"
67: #define PCROWSCALINGVIENNACL "rowscalingviennacl"
68: #define PCSAVIENNACL "saviennacl"
69: #define PCBDDC "bddc"
70: #define PCKACZMARZ "kaczmarz"
71: #define PCTELESCOPE "telescope"
72: #define PCPATCH "patch"
73: #define PCLMVM "lmvm"
74: #define PCHMG "hmg"
75: #define PCDEFLATION "deflation"
76: #define PCHPDDM "hpddm"
77: #define PCH2OPUS "h2opus"
78: #define PCMPI "mpi"
80: /*E
81: PCSide - If the preconditioner is to be applied to the left, right
82: or symmetrically around the operator.
84: Values:
85: + `PC_LEFT` - applied after the operator is applied
86: . `PC_RIGHT` - applied before the operator is applied
87: - `PC_SYMMETRIC` - a portion of the preconditioner is applied before the operator and the transpose of this portion is applied after the operator is applied.
89: Level: beginner
91: Note:
92: Certain `KSPType` support only a subset of `PCSide` values
94: .seealso: [](sec_pc), `PC`, `KSPSetPCSide()`
95: E*/
96: typedef enum {
97: PC_SIDE_DEFAULT = -1,
98: PC_LEFT,
99: PC_RIGHT,
100: PC_SYMMETRIC
101: } PCSide;
102: #define PC_SIDE_MAX (PC_SYMMETRIC + 1)
104: /*E
105: PCRichardsonConvergedReason - reason a `PCRICHARDSON` `PCApplyRichardson()` method terminated
107: Level: advanced
109: .seealso: [](sec_pc), `PCRICHARDSON`, `PC`, `PCApplyRichardson()`
110: E*/
111: typedef enum {
112: PCRICHARDSON_CONVERGED_RTOL = 2,
113: PCRICHARDSON_CONVERGED_ATOL = 3,
114: PCRICHARDSON_CONVERGED_ITS = 4,
115: PCRICHARDSON_DIVERGED_DTOL = -4
116: } PCRichardsonConvergedReason;
118: /*E
119: PCJacobiType - What elements of the matrix are used to form the Jacobi preconditioner
121: Values:
122: + `PC_JACOBI_DIAGONAL` - use the diagonal entry, if it is zero use one
123: . `PC_JACOBI_ROWMAX` - use the maximum absolute value in the row
124: - `PC_JACOBI_ROWSUM` - use the sum of the values in the row (not the absolute values)
126: Level: intermediate
128: .seealso: [](sec_pc), `PCJACOBI`, `PC`
129: E*/
130: typedef enum {
131: PC_JACOBI_DIAGONAL,
132: PC_JACOBI_ROWMAX,
133: PC_JACOBI_ROWSUM
134: } PCJacobiType;
136: /*E
137: PCASMType - Type of additive Schwarz method to use
139: Values:
140: + `PC_ASM_BASIC` - Symmetric version where residuals from the ghost points are used
141: and computed values in ghost regions are added together.
142: Classical standard additive Schwarz.
143: . `PC_ASM_RESTRICT` - Residuals from ghost points are used but computed values in ghost
144: region are discarded.
145: Default.
146: . `PC_ASM_INTERPOLATE` - Residuals from ghost points are not used, computed values in ghost
147: region are added back in.
148: - `PC_ASM_NONE` - Residuals from ghost points are not used, computed ghost values are
149: discarded.
150: Not very good.
152: Level: beginner
154: .seealso: [](sec_pc), `PC`, `PCASM`, `PCASMSetType()`, `PCGASMType`
155: E*/
156: typedef enum {
157: PC_ASM_BASIC = 3,
158: PC_ASM_RESTRICT = 1,
159: PC_ASM_INTERPOLATE = 2,
160: PC_ASM_NONE = 0
161: } PCASMType;
163: /*E
164: PCGASMType - Type of generalized additive Schwarz method to use (differs from `PCASM` in allowing multiple processors per subdomain).
166: Values:
167: + `PC_GASM_BASIC` - Symmetric version where the full from the outer subdomain is used, and the resulting correction is applied
168: over the outer subdomains. As a result, points in the overlap will receive the sum of the corrections
169: from neighboring subdomains.
170: Classical standard additive Schwarz.
171: . `PC_GASM_RESTRICT` - Residual from the outer subdomain is used but the correction is restricted to the inner subdomain only
172: (i.e., zeroed out over the overlap portion of the outer subdomain before being applied). As a result,
173: each point will receive a correction only from the unique inner subdomain containing it (nonoverlapping covering
174: assumption).
175: Default.
176: . `PC_GASM_INTERPOLATE` - Residual is zeroed out over the overlap portion of the outer subdomain, but the resulting correction is
177: applied over the outer subdomain. As a result, points in the overlap will receive the sum of the corrections
178: from neighboring subdomains.
179: - `PC_GASM_NONE` - Residuals and corrections are zeroed out outside the local subdomains.
180: Not very good.
182: Level: beginner
184: Note:
185: Each subdomain has nested inner and outer parts. The inner subdomains are assumed to form a non-overlapping covering of the computational
186: domain, while the outer subdomains contain the inner subdomains and overlap with each other. This preconditioner will compute
187: a subdomain correction over each *outer* subdomain from a residual computed there, but its different variants will differ in
188: (a) how the outer subdomain residual is computed, and (b) how the outer subdomain correction is computed.
190: .seealso: [](sec_pc), `PCGASM`, `PCASM`, `PC`, `PCGASMSetType()`, `PCASMType`
191: E*/
192: typedef enum {
193: PC_GASM_BASIC = 3,
194: PC_GASM_RESTRICT = 1,
195: PC_GASM_INTERPOLATE = 2,
196: PC_GASM_NONE = 0
197: } PCGASMType;
199: /*E
200: PCCompositeType - Determines how two or more preconditioner are composed with the `PCType` of `PCCOMPOSITE`
202: Values:
203: + `PC_COMPOSITE_ADDITIVE` - results from application of all preconditioners are added together
204: . `PC_COMPOSITE_MULTIPLICATIVE` - preconditioners are applied sequentially to the residual freshly
205: computed after the previous preconditioner application
206: . `PC_COMPOSITE_SYMMETRIC_MULTIPLICATIVE` - preconditioners are applied sequentially to the residual freshly
207: computed from first preconditioner to last and then back (Use only for symmetric matrices and preconditioners)
208: . `PC_COMPOSITE_SPECIAL` - This is very special for a matrix of the form alpha I + R + S
209: where first preconditioner is built from alpha I + S and second from
210: alpha I + R
211: . `PC_COMPOSITE_SCHUR` - composes the Schur complement of the matrix from two blocks, see `PCFIELDSPLIT`
212: - `PC_COMPOSITE_GKB` - the generalized Golub-Kahan bidiagonalization preconditioner, see `PCFIELDSPLIT`
214: Level: beginner
216: .seealso: [](sec_pc), `PCCOMPOSITE`, `PCFIELDSPLIT`, `PC`, `PCCompositeSetType()`, `SNESCompositeType`
217: E*/
218: typedef enum {
219: PC_COMPOSITE_ADDITIVE,
220: PC_COMPOSITE_MULTIPLICATIVE,
221: PC_COMPOSITE_SYMMETRIC_MULTIPLICATIVE,
222: PC_COMPOSITE_SPECIAL,
223: PC_COMPOSITE_SCHUR,
224: PC_COMPOSITE_GKB
225: } PCCompositeType;
227: /*E
228: PCFieldSplitSchurPreType - Determines how to precondition a Schur complement
230: Values:
231: + `PC_FIELDSPLIT_SCHUR_PRE_SELF` - the preconditioner for the Schur complement is generated from the symbolic representation of the Schur complement matrix.
232: The only preconditioners that currently work with this symbolic representation matrix object are `PCLSC` and `PCHPDDM`
233: . `PC_FIELDSPLIT_SCHUR_PRE_SELFP` - the preconditioning for the Schur complement is generated from an explicitly-assembled approximation Sp = A11 - A10 inv(diag(A00)) A01.
234: This is only a good preconditioner when diag(A00) is a good preconditioner for A00. Optionally, A00 can be
235: lumped before extracting the diagonal using the additional option `-fieldsplit_1_mat_schur_complement_ainv_type lump`
236: . `PC_FIELDSPLIT_SCHUR_PRE_A11` - the preconditioner for the Schur complement is generated from the block diagonal part of the matrix used to define the preconditioner,
237: associated with the Schur complement (i.e. A11), not the Schur complement matrix
238: . `PC_FIELDSPLIT_SCHUR_PRE_USER` - the preconditioner for the Schur complement is generated from the user provided matrix (pre argument
239: to this function).
240: - `PC_FIELDSPLIT_SCHUR_PRE_FULL` - the preconditioner for the Schur complement is generated from the exact Schur complement matrix representation
241: computed internally by `PCFIELDSPLIT` (this is expensive) useful mostly as a test that the Schur complement approach can work for your problem
243: Level: intermediate
245: .seealso: [](sec_pc), `PCFIELDSPLIT`, `PCFieldSplitSetSchurPre()`, `PC`
246: E*/
247: typedef enum {
248: PC_FIELDSPLIT_SCHUR_PRE_SELF,
249: PC_FIELDSPLIT_SCHUR_PRE_SELFP,
250: PC_FIELDSPLIT_SCHUR_PRE_A11,
251: PC_FIELDSPLIT_SCHUR_PRE_USER,
252: PC_FIELDSPLIT_SCHUR_PRE_FULL
253: } PCFieldSplitSchurPreType;
255: /*E
256: PCFieldSplitSchurFactType - determines which off-diagonal parts of the approximate block factorization to use
258: Values:
259: + `PC_FIELDSPLIT_SCHUR_FACT_DIAG` - the preconditioner is solving `D`
260: . `PC_FIELDSPLIT_SCHUR_FACT_LOWER` - the preconditioner is solving `L D`
261: . `PC_FIELDSPLIT_SCHUR_FACT_UPPER` - the preconditioner is solving `D U`
262: - `PC_FIELDSPLIT_SCHUR_FACT_FULL` - the preconditioner is solving `L(D U)`
264: where the matrix is factorized as
265: .vb
266: (A B) = (1 0) (A 0) (1 Ainv*B) = L D U
267: (C E) (C*Ainv 1) (0 S) (0 1)
268: .ve
270: Level: intermediate
272: .seealso: [](sec_pc), `PCFIELDSPLIT`, `PCFieldSplitSetSchurFactType()`, `PC`
273: E*/
274: typedef enum {
275: PC_FIELDSPLIT_SCHUR_FACT_DIAG,
276: PC_FIELDSPLIT_SCHUR_FACT_LOWER,
277: PC_FIELDSPLIT_SCHUR_FACT_UPPER,
278: PC_FIELDSPLIT_SCHUR_FACT_FULL
279: } PCFieldSplitSchurFactType;
281: /*E
282: PCPARMSGlobalType - Determines the global preconditioner method in `PCPARMS`
284: Level: intermediate
286: .seealso: [](sec_pc), `PCPARMS`, `PCPARMSSetGlobal()`, `PC`
287: E*/
288: typedef enum {
289: PC_PARMS_GLOBAL_RAS,
290: PC_PARMS_GLOBAL_SCHUR,
291: PC_PARMS_GLOBAL_BJ
292: } PCPARMSGlobalType;
294: /*E
295: PCPARMSLocalType - Determines the local preconditioner method in `PCPARMS`
297: Level: intermediate
299: .seealso: [](sec_pc), `PCPARMS`, `PCPARMSSetLocal()`, `PC`
300: E*/
301: typedef enum {
302: PC_PARMS_LOCAL_ILU0,
303: PC_PARMS_LOCAL_ILUK,
304: PC_PARMS_LOCAL_ILUT,
305: PC_PARMS_LOCAL_ARMS
306: } PCPARMSLocalType;
308: /*J
309: PCGAMGType - type of generalized algebraic multigrid `PCGAMG` method
311: Values:
312: + `PCGAMGAGG` - (the default) smoothed aggregation algorithm, robust, very well tested
313: . `PCGAMGGEO` - geometric coarsening, uses mesh generator to produce coarser meshes, limited to triangles, not supported, reference implementation (2D)
314: - `PCGAMGCLASSICAL` - classical algebraic multigrid preconditioner, incomplete, not supported, reference implementation
316: Level: intermediate
318: .seealso: [](sec_pc), `PCGAMG`, `PCMG`, `PC`, `PCSetType()`, `PCGAMGSetThreshold()`, `PCGAMGSetThreshold()`, `PCGAMGSetReuseInterpolation()`
319: J*/
320: typedef const char *PCGAMGType;
321: #define PCGAMGAGG "agg"
322: #define PCGAMGGEO "geo"
323: #define PCGAMGCLASSICAL "classical"
325: typedef const char *PCGAMGClassicalType;
326: #define PCGAMGCLASSICALDIRECT "direct"
327: #define PCGAMGCLASSICALSTANDARD "standard"
329: /*E
330: PCMGType - Determines the type of multigrid method that is run.
332: Values:
333: + `PC_MG_MULTIPLICATIVE` (default) - traditional V or W cycle as determined by `PCMGSetCycleType()`
334: . `PC_MG_ADDITIVE` - the additive multigrid preconditioner where all levels are
335: smoothed before updating the residual. This only uses the
336: down smoother, in the preconditioner the upper smoother is ignored
337: . `PC_MG_FULL` - same as multiplicative except one also performs grid sequencing,
338: that is starts on the coarsest grid, performs a cycle, interpolates
339: to the next, performs a cycle etc. This is much like the F-cycle presented in "Multigrid" by Trottenberg, Oosterlee, Schuller page 49, but that
340: algorithm supports smoothing on before the restriction on each level in the initial restriction to the coarsest stage. In addition that algorithm
341: calls the V-cycle only on the coarser level and has a post-smoother instead.
342: - `PC_MG_KASKADE` - like full multigrid except one never goes back to a coarser level from a finer
344: Level: beginner
346: .seealso: [](sec_pc), `PCMG`, `PC`, `PCMGSetType()`, `PCMGSetCycleType()`, `PCMGSetCycleTypeOnLevel()`
347: E*/
348: typedef enum {
349: PC_MG_MULTIPLICATIVE,
350: PC_MG_ADDITIVE,
351: PC_MG_FULL,
352: PC_MG_KASKADE
353: } PCMGType;
354: #define PC_MG_CASCADE PC_MG_KASKADE;
356: /*E
357: PCMGCycleType - Use V-cycle or W-cycle
359: Values:
360: + `PC_MG_V_CYCLE` - use the v cycle
361: - `PC_MG_W_CYCLE` - use the w cycle
363: Level: beginner
365: .seealso: [](sec_pc), `PCMG`, `PC`, `PCMGSetCycleType()`
366: E*/
367: typedef enum {
368: PC_MG_CYCLE_V = 1,
369: PC_MG_CYCLE_W = 2
370: } PCMGCycleType;
372: /*E
373: PCMGalerkinType - Determines if the coarse grid operators are computed via the Galerkin process
375: Values:
376: + `PC_MG_GALERKIN_PMAT` - computes the pmat (matrix from which the preconditioner is built) via the Galerkin process from the finest grid
377: . `PC_MG_GALERKIN_MAT` - computes the mat (matrix used to apply the operator) via the Galerkin process from the finest grid
378: . `PC_MG_GALERKIN_BOTH` - computes both the mat and pmat via the Galerkin process (if pmat == mat the construction is only done once
379: - `PC_MG_GALERKIN_NONE` - neither operator is computed via the Galerkin process, the user must provide the operator
381: Level: beginner
383: Note:
384: Users should never set `PC_MG_GALERKIN_EXTERNAL`, it is used by `PCHYPRE` and `PCML`
386: .seealso: [](sec_pc), `PCMG`, `PC`, `PCMGSetCycleType()`
387: E*/
388: typedef enum {
389: PC_MG_GALERKIN_BOTH,
390: PC_MG_GALERKIN_PMAT,
391: PC_MG_GALERKIN_MAT,
392: PC_MG_GALERKIN_NONE,
393: PC_MG_GALERKIN_EXTERNAL
394: } PCMGGalerkinType;
396: /*E
397: PCExoticType - Face based or wirebasket based coarse grid space
399: Level: beginner
401: .seealso: [](sec_pc), `PCExoticSetType()`, `PCEXOTIC`
402: E*/
403: typedef enum {
404: PC_EXOTIC_FACE,
405: PC_EXOTIC_WIREBASKET
406: } PCExoticType;
408: /*E
409: PCBDDCInterfaceExtType - Defines how interface balancing is extended into the interior of subdomains.
411: Values:
412: + `PC_BDDC_INTERFACE_EXT_DIRICHLET` - solves Dirichlet interior problem; this is the standard BDDC algorithm
413: - `PC_BDDC_INTERFACE_EXT_LUMP` - skips interior solve; sometimes called M_1 and associated with "lumped FETI-DP"
415: Level: intermediate
417: .seealso: [](sec_pc), `PCBDDC`, `PC`
418: E*/
419: typedef enum {
420: PC_BDDC_INTERFACE_EXT_DIRICHLET,
421: PC_BDDC_INTERFACE_EXT_LUMP
422: } PCBDDCInterfaceExtType;
424: /*E
425: PCMGCoarseSpaceType - Function space for coarse space for adaptive interpolation
427: Level: beginner
429: .seealso: [](sec_pc), `PCMGSetAdaptCoarseSpaceType()`, `PCMG`, `PC`
430: E*/
431: typedef enum {
432: PCMG_ADAPT_NONE,
433: PCMG_ADAPT_POLYNOMIAL,
434: PCMG_ADAPT_HARMONIC,
435: PCMG_ADAPT_EIGENVECTOR,
436: PCMG_ADAPT_GENERALIZED_EIGENVECTOR,
437: PCMG_ADAPT_GDSW
438: } PCMGCoarseSpaceType;
440: /*E
441: PCPatchConstructType - The algorithm used to construct patches for the `PCPATCH` preconditioner
443: Level: beginner
445: .seealso: [](sec_pc), `PCPatchSetConstructType()`, `PCPATCH`, `PC`
446: E*/
447: typedef enum {
448: PC_PATCH_STAR,
449: PC_PATCH_VANKA,
450: PC_PATCH_PARDECOMP,
451: PC_PATCH_USER,
452: PC_PATCH_PYTHON
453: } PCPatchConstructType;
455: /*E
456: PCDeflationSpaceType - Type of deflation
458: Values:
459: + `PC_DEFLATION_SPACE_HAAR` - directly assembled based on Haar (db2) wavelet with overflowed filter cuted-off
460: . `PC_DEFLATION_SPACE_DB2` - `MATCOMPOSITE` of 1-lvl matices based on db2 (2 coefficient Daubechies / Haar wavelet)
461: . `PC_DEFLATION_SPACE_DB4` - same as above, but with db4 (4 coefficient Daubechies)
462: . `PC_DEFLATION_SPACE_DB8` - same as above, but with db8 (8 coefficient Daubechies)
463: . `PC_DEFLATION_SPACE_DB16` - same as above, but with db16 (16 coefficient Daubechies)
464: . `PC_DEFLATION_SPACE_BIORTH22` - same as above, but with biorthogonal 2.2 (6 coefficients)
465: . `PC_DEFLATION_SPACE_MEYER` - same as above, but with Meyer/FIR (62 coefficients)
466: . `PC_DEFLATION_SPACE_AGGREGATION` - aggregates local indices (given by operator matrix distribution) into a subdomain
467: - `PC_DEFLATION_SPACE_USER` - indicates space set by user
469: Level: intermediate
471: Note:
472: Wavelet-based space (except Haar) can be used in multilevel deflation.
474: .seealso: [](sec_pc), `PCDeflationSetSpaceToCompute()`, `PCDEFLATION`, `PC`
475: E*/
476: typedef enum {
477: PC_DEFLATION_SPACE_HAAR,
478: PC_DEFLATION_SPACE_DB2,
479: PC_DEFLATION_SPACE_DB4,
480: PC_DEFLATION_SPACE_DB8,
481: PC_DEFLATION_SPACE_DB16,
482: PC_DEFLATION_SPACE_BIORTH22,
483: PC_DEFLATION_SPACE_MEYER,
484: PC_DEFLATION_SPACE_AGGREGATION,
485: PC_DEFLATION_SPACE_USER
486: } PCDeflationSpaceType;
488: /*E
489: PCHPDDMCoarseCorrectionType - Type of coarse correction used by `PCHPDDM`
491: Values:
492: + `PC_HPDDM_COARSE_CORRECTION_DEFLATED` (default) - eq. (1) in `PCHPDDMShellApply()`
493: . `PC_HPDDM_COARSE_CORRECTION_ADDITIVE` - eq. (2)
494: - `PC_HPDDM_COARSE_CORRECTION_BALANCED` - eq. (3)
496: Level: intermediate
498: .seealso: [](sec_pc), `PCHPDDM`, `PC`, `PCSetType()`, `PCHPDDMShellApply()`
499: E*/
500: typedef enum {
501: PC_HPDDM_COARSE_CORRECTION_DEFLATED,
502: PC_HPDDM_COARSE_CORRECTION_ADDITIVE,
503: PC_HPDDM_COARSE_CORRECTION_BALANCED
504: } PCHPDDMCoarseCorrectionType;
506: /*E
507: PCHPDDMSchurPreType - Type of `PCHPDDM` preconditioner for a `MATSCHURCOMPLEMENT` generated by `PCFIELDSPLIT` with `PCFieldSplitSchurPreType` set to `PC_FIELDSPLIT_SCHUR_PRE_SELF`
509: Values:
510: + `PC_HPDDM_SCHUR_PRE_LEAST_SQUARES` (default) - only with a near-zero A11 block and A10 = A01^T; a preconditioner for solving A01^T A00^-1 A01 x = b is built by approximating the Schur complement with (inv(sqrt(diag(A00))) A01)^T (inv(sqrt(diag(A00))) A01) and by considering the associated linear least squares problem
511: - `PC_HPDDM_SCHUR_PRE_GENEO` - only with A10 = A01^T, `PCHPDDMSetAuxiliaryMat()` called on the `PC` of the A00 block, and if A11 is nonzero, then `PCHPDDMSetAuxiliaryMat()` must be called on the associated `PC` as well (it is built automatically for the user otherwise); the Schur complement `PC` is set internally to `PCKSP`, with the prefix `-fieldsplit_1_pc_hpddm_`; the operator associated to the `PC` is spectrally equivalent to the original Schur complement
513: Level: advanced
515: .seealso: [](sec_pc), `PCHPDDM`, `PC`, `PCFIELDSPLIT`, `PC_FIELDSPLIT_SCHUR_PRE_SELF`, `PCFieldSplitSetSchurPre()`, `PCHPDDMSetAuxiliaryMat()`
516: E*/
517: typedef enum {
518: PC_HPDDM_SCHUR_PRE_LEAST_SQUARES,
519: PC_HPDDM_SCHUR_PRE_GENEO,
520: } PCHPDDMSchurPreType;
522: /*E
523: PCFailedReason - indicates type of `PC` failure
525: Level: beginner
527: .seealso: [](sec_pc), `PC`
528: E*/
529: typedef enum {
530: PC_SETUP_ERROR = -1,
531: PC_NOERROR,
532: PC_FACTOR_STRUCT_ZEROPIVOT,
533: PC_FACTOR_NUMERIC_ZEROPIVOT,
534: PC_FACTOR_OUTMEMORY,
535: PC_FACTOR_OTHER,
536: PC_INCONSISTENT_RHS,
537: PC_SUBPC_ERROR
538: } PCFailedReason;
540: /*E
541: PCGAMGLayoutType - Layout for reduced grids
543: Level: intermediate
545: .seealso: [](sec_pc), `PCGAMG`, `PC`, `PCGAMGSetCoarseGridLayoutType()`
546: E*/
547: typedef enum {
548: PCGAMG_LAYOUT_COMPACT,
549: PCGAMG_LAYOUT_SPREAD
550: } PCGAMGLayoutType;