Actual source code: ex46.cxx
2: static char help[] = "Demonstrates calling Trilinos and then PETSc in the same program.\n\n";
4: /*T
5: Concepts: introduction to PETSc^Trilinos
6: Processors: n
8: Example obtained from: http://trilinos.org/docs/dev/packages/tpetra/doc/html/Tpetra_Lesson01.html
9: T*/
11: #include <petscsys.h>
12: #include <Tpetra_DefaultPlatform.hpp>
13: #include <Tpetra_Version.hpp>
14: #include <Teuchos_GlobalMPISession.hpp> // used if Trilinos is the one that starts up MPI
16: // Do something with the given communicator. In this case, we just
17: // print Tpetra's version to stdout on Process 0 in the given
18: // communicator.
19: void
20: exampleRoutine (const Teuchos::RCP<const Teuchos::Comm<int> >& comm)
21: {
22: if (comm->getRank () == 0) {
23: // On (MPI) Process 0, print out the Tpetra software version.
24: std::cout << Tpetra::version () << std::endl << std::endl;
25: }
26: }
28: int main(int argc,char **argv)
29: {
30: // These "using" declarations make the code more concise, in that
31: // you don't have to write the namespace along with the class or
32: // object name. This is especially helpful with commonly used
33: // things like std::endl.
34: using std::cout;
35: using std::endl;
36: // Start up MPI, if using MPI. Trilinos doesn't have to be built
37: // with MPI; it's called a "serial" build if you build without MPI.
38: // GlobalMPISession hides this implementation detail.
39: //
40: // Note the third argument. If you pass GlobalMPISession the
41: // address of an std::ostream, it will print a one-line status
42: // message with the rank on each MPI process. This may be
43: // undesirable if running with a large number of MPI processes.
44: // You can avoid printing anything here by passing in either
45: // NULL or the address of a Teuchos::oblackholestream.
46: Teuchos::GlobalMPISession mpiSession (&argc, &argv, NULL);
47: // Get a pointer to the communicator object representing
48: // MPI_COMM_WORLD. getDefaultPlatform.getComm() doesn't create a
49: // new object every time you call it; it just returns the same
50: // communicator each time. Thus, you can call it anywhere and get
51: // the same communicator. (This is handy if you don't want to pass
52: // a communicator around everywhere, though it's always better to
53: // parameterize your algorithms on the communicator.)
54: //
55: // "Tpetra::DefaultPlatform" knows whether or not we built with MPI
56: // support. If we didn't build with MPI, we'll get a "communicator"
57: // with size 1, whose only process has rank 0.
58: Teuchos::RCP<const Teuchos::Comm<int> > comm = Tpetra::DefaultPlatform::getDefaultPlatform ().getComm ();
60: PetscInitialize(&argc,&argv,(char*)0,help);
62: // Get my process' rank, and the total number of processes.
63: // Equivalent to MPI_Comm_rank resp. MPI_Comm_size.
64: const int myRank = comm->getRank ();
65: const int size = comm->getSize ();
66: if (myRank == 0) {
67: cout << "Total number of processes: " << size << endl;
68: }
69: // Do something with the new communicator.
70: exampleRoutine (comm);
71: // This tells the Trilinos test framework that the test passed.
72: if (myRank == 0) {
73: cout << "End Result: TEST PASSED" << endl;
74: }
75: // GlobalMPISession calls MPI_Finalize() in its destructor, if
76: // appropriate. You don't have to do anything here! Just return
77: // from main(). Isn't that helpful?
78: PetscFinalize();
79: return 0;
80: }
82: /*TEST
84: build:
85: requires: trilinos
87: test:
88: nsize: 3
89: filter: grep -v "Tpetra in Trilinos"
91: TEST*/