Parallel Computation
Fortran and MPI
40 ! iterative solution of the system
41 do k = 1, iter_max
42 ! trasfer data at processors boundaries
43 do i = 0,nranks-2
44 if (myrank==i) then ! send the 2nd last point to next processor 1st point
45 Temp = T(npoints+1)
46 call MPI_SEND(Temp, 1, MPI_REAL, myrank+1, i, MPI_COMM_WORLD, ierror)
47 elseif (myrank==i+1) then ! receive the 2nd last point from the previous proc as 1st point
48 call MPI_RECV(Temp, 1, MPI_REAL, myrank-1, i, MPI_COMM_WORLD, istatus, ierror)
49 T(1) = Temp
50 end if
51 end do
52
53 do l = 1,nranks-1
54 ! send the second point T(2) of the current proc to previous proc last point T(npoints+2)
55 if (myrank==l) then ! send the second point to previous procs
56 Temp = T(2)
57 call MPI_SEND(Temp, 1, MPI_REAL, myrank-1, l, MPI_COMM_WORLD, ierror)
58 elseif (myrank==l-1) then ! receive the second point from the next procs as last point
59 call MPI_RECV(Temp, 1, MPI_REAL, myrank+1, l, MPI_COMM_WORLD, istatus, ierror)
60 T(npoints+2) = Temp
61 end if
62 end do
63
64 ! solution using Gauss-Seidel method with Successive Over Relaxation (SOR)
65 do i = 2,npoints+1
66 R = 0.5*omega*(T(i+1) - 2*T(i) + T(i-1))
67 T(i) = T(i) + R
68 end do
69
70 ! set boundary conditions
71 if (myrank == 0) then
72 T(1) = Tl
73 end if
74 if (myrank == nranks-1) then
75 T(npoints+2) = Tr
76 end if
77 end do
78
79 ! put T from all procs to root (master) in T_full
80 call MPI_Gather(T(2:npoints+1), npoints, MPI_REAL, T_full, npoints, &
81 MPI_REAL, 0, MPI_COMM_WORLD, ierror)
82
83 call MPI_FINALIZE(ierror)
MPI
communications
Iterative solution