Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

ci: Restore MPI notebook testing #2478

Merged
merged 1 commit into from
Nov 5, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 1 addition & 2 deletions .github/workflows/examples-mpi.yml
Original file line number Diff line number Diff line change
Expand Up @@ -65,12 +65,11 @@ jobs:
python3 scripts/clear_devito_cache.py
- name: Test mpi notebooks
continue-on-error: true
run : |
./scripts/create_ipyparallel_mpi_profile.sh
ipcluster start --profile=mpi --engines=mpi -n 4 --daemonize
# A few seconds to ensure workers are ready
sleep 20
sleep 10
py.test --nbval examples/mpi
ipcluster stop --profile=mpi
Expand Down
95 changes: 38 additions & 57 deletions examples/mpi/overview.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -245,17 +245,7 @@
"output_type": "stream",
"text": [
"[stderr:0] \n",
"Operator `Kernel` ran in 0.01 s\n",
"INFO:Devito:Operator `Kernel` ran in 0.01 s\n",
"[stderr:1] \n",
"Operator `Kernel` ran in 0.01 s\n",
"INFO:Devito:Operator `Kernel` ran in 0.01 s\n",
"[stderr:2] \n",
"Operator `Kernel` ran in 0.01 s\n",
"INFO:Devito:Operator `Kernel` ran in 0.01 s\n",
"[stderr:3] \n",
"Operator `Kernel` ran in 0.01 s\n",
"INFO:Devito:Operator `Kernel` ran in 0.01 s\n"
"Operator `Kernel` ran in 0.01 s\n"
]
}
],
Expand Down Expand Up @@ -447,10 +437,10 @@
" double section0;\n",
"} ;\n",
"\n",
"static void gather0(float *restrict buf_vec, int bx_size, int by_size, struct dataobj *restrict u_vec, const int otime, const int ox, const int oy);\n",
"static void scatter0(float *restrict buf_vec, int bx_size, int by_size, struct dataobj *restrict u_vec, const int otime, const int ox, const int oy);\n",
"static void sendrecv0(struct dataobj *restrict u_vec, const int x_size, const int y_size, int ogtime, int ogx, int ogy, int ostime, int osx, int osy, int fromrank, int torank, MPI_Comm comm);\n",
"static void haloupdate0(struct dataobj *restrict u_vec, MPI_Comm comm, struct neighborhood * nb, int otime);\n",
"static void gather0(float *restrict buf_vec, int bx_size, int by_size, struct dataobj *restrict u_vec, const int otime, const int ox, const int oy);\n",
"static void scatter0(float *restrict buf_vec, int bx_size, int by_size, struct dataobj *restrict u_vec, const int otime, const int ox, const int oy);\n",
"\n",
"int Kernel(struct dataobj *restrict u_vec, const float h_x, const int time_M, const int time_m, const int x_M, const int x_m, const int y_M, const int y_m, MPI_Comm comm, struct neighborhood * nb, struct profiler * timers)\n",
"{\n",
Expand All @@ -471,7 +461,7 @@
" #pragma omp simd aligned(u:64)\n",
" for (int y = y_m; y <= y_M; y += 1)\n",
" {\n",
" u[t1][x + 2][y + 2] = r0*(-u[t0][x + 2][y + 2]) + r0*u[t0][x + 3][y + 2] + 1;\n",
" u[t1][x + 2][y + 2] = -r0*u[t0][x + 2][y + 2] + r0*u[t0][x + 3][y + 2] + 1;\n",
" }\n",
" }\n",
" STOP(section0,timers)\n",
Expand All @@ -480,6 +470,38 @@
" return 0;\n",
"}\n",
"\n",
"static void sendrecv0(struct dataobj *restrict u_vec, const int x_size, const int y_size, int ogtime, int ogx, int ogy, int ostime, int osx, int osy, int fromrank, int torank, MPI_Comm comm)\n",
"{\n",
" MPI_Request rrecv;\n",
" MPI_Request rsend;\n",
"\n",
" float *restrict bufg_vec __attribute__ ((aligned (64)));\n",
" posix_memalign((void**)(&bufg_vec),64,x_size*y_size*sizeof(float));\n",
" float *restrict bufs_vec __attribute__ ((aligned (64)));\n",
" posix_memalign((void**)(&bufs_vec),64,x_size*y_size*sizeof(float));\n",
"\n",
" MPI_Irecv(bufs_vec,x_size*y_size,MPI_FLOAT,fromrank,13,comm,&(rrecv));\n",
" if (torank != MPI_PROC_NULL)\n",
" {\n",
" gather0(bufg_vec,x_size,y_size,u_vec,ogtime,ogx,ogy);\n",
" }\n",
" MPI_Isend(bufg_vec,x_size*y_size,MPI_FLOAT,torank,13,comm,&(rsend));\n",
" MPI_Wait(&(rsend),MPI_STATUS_IGNORE);\n",
" MPI_Wait(&(rrecv),MPI_STATUS_IGNORE);\n",
" if (fromrank != MPI_PROC_NULL)\n",
" {\n",
" scatter0(bufs_vec,x_size,y_size,u_vec,ostime,osx,osy);\n",
" }\n",
"\n",
" free(bufg_vec);\n",
" free(bufs_vec);\n",
"}\n",
"\n",
"static void haloupdate0(struct dataobj *restrict u_vec, MPI_Comm comm, struct neighborhood * nb, int otime)\n",
"{\n",
" sendrecv0(u_vec,u_vec->hsize[3],u_vec->npsize[2],otime,u_vec->oofs[2],u_vec->hofs[4],otime,u_vec->hofs[3],u_vec->hofs[4],nb->rc,nb->lc,comm);\n",
"}\n",
"\n",
"static void gather0(float *restrict buf_vec, int bx_size, int by_size, struct dataobj *restrict u_vec, const int otime, const int ox, const int oy)\n",
"{\n",
" float (*restrict buf)[bx_size][by_size] __attribute__ ((aligned (64))) = (float (*)[bx_size][by_size]) buf_vec;\n",
Expand All @@ -489,6 +511,7 @@
" const int y_m = 0;\n",
" const int x_M = bx_size - 1;\n",
" const int y_M = by_size - 1;\n",
"\n",
" for (int x = x_m; x <= x_M; x += 1)\n",
" {\n",
" #pragma omp simd aligned(u:64)\n",
Expand Down Expand Up @@ -518,38 +541,6 @@
" }\n",
" }\n",
"}\n",
"\n",
"static void sendrecv0(struct dataobj *restrict u_vec, const int x_size, const int y_size, int ogtime, int ogx, int ogy, int ostime, int osx, int osy, int fromrank, int torank, MPI_Comm comm)\n",
"{\n",
" float *restrict bufg_vec __attribute__ ((aligned (64)));\n",
" posix_memalign((void**)(&bufg_vec),64,x_size*y_size*sizeof(float));\n",
" float *restrict bufs_vec __attribute__ ((aligned (64)));\n",
" posix_memalign((void**)(&bufs_vec),64,x_size*y_size*sizeof(float));\n",
"\n",
" MPI_Request rrecv;\n",
" MPI_Request rsend;\n",
"\n",
" MPI_Irecv(bufs_vec,x_size*y_size,MPI_FLOAT,fromrank,13,comm,&(rrecv));\n",
" if (torank != MPI_PROC_NULL)\n",
" {\n",
" gather0(bufg_vec,x_size,y_size,u_vec,ogtime,ogx,ogy);\n",
" }\n",
" MPI_Isend(bufg_vec,x_size*y_size,MPI_FLOAT,torank,13,comm,&(rsend));\n",
" MPI_Wait(&(rsend),MPI_STATUS_IGNORE);\n",
" MPI_Wait(&(rrecv),MPI_STATUS_IGNORE);\n",
" if (fromrank != MPI_PROC_NULL)\n",
" {\n",
" scatter0(bufs_vec,x_size,y_size,u_vec,ostime,osx,osy);\n",
" }\n",
"\n",
" free(bufg_vec);\n",
" free(bufs_vec);\n",
"}\n",
"\n",
"static void haloupdate0(struct dataobj *restrict u_vec, MPI_Comm comm, struct neighborhood * nb, int otime)\n",
"{\n",
" sendrecv0(u_vec,u_vec->hsize[3],u_vec->npsize[2],otime,u_vec->oofs[2],u_vec->hofs[4],otime,u_vec->hofs[3],u_vec->hofs[4],nb->rc,nb->lc,comm);\n",
"}\n",
"\n"
]
},
Expand Down Expand Up @@ -789,17 +780,7 @@
"output_type": "stream",
"text": [
"[stderr:0] \n",
"Operator `Kernel` ran in 0.01 s\n",
"INFO:Devito:Operator `Kernel` ran in 0.01 s\n",
"[stderr:1] \n",
"Operator `Kernel` ran in 0.01 s\n",
"INFO:Devito:Operator `Kernel` ran in 0.01 s\n",
"[stderr:2] \n",
"Operator `Kernel` ran in 0.01 s\n",
"INFO:Devito:Operator `Kernel` ran in 0.01 s\n",
"[stderr:3] \n",
"Operator `Kernel` ran in 0.01 s\n",
"INFO:Devito:Operator `Kernel` ran in 0.01 s\n"
"Operator `Kernel` ran in 0.01 s\n"
]
}
],
Expand Down
Loading