\item{allocate \verb+size+ of memory segments in bytes}
\item{\verb+info+ can be used to provide directives that control the desired location of the allocated memory}
\item{\verb+*baseptr+ is the pointer to the beginning of the memory segment}
\end{itemize}
\begin{lstlisting}[language=C,frame=lines]
int MPI_Alloc_mem(MPI_Aint size, MPI_Info info, void *baseptr)
\end{lstlisting}
\end{frame}
\begin{frame}[containsverbatim]
\frametitle{Memory \texttt{window} creation}
\begin{itemize}
\item{A \verb+MPI_Win+ is an opaque object which can be reused to perform one-sided communication}
\item{A \verb+window+ is a specified region in memory that can be accessed by another process}
\end{itemize}
\begin{lstlisting}[language=C,frame=lines]
int MPI_Win_create(void *base, MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm, MPI_Win *win)
\end{lstlisting}
where \verb+base+ is the initial address of the region, of \verb+size+ length of size \verb+disp_unit+ in bytes.
\end{frame}
\begin{frame}[containsverbatim]
\frametitle{\texttt{Put}/\texttt{Get} within the \texttt{window}}
\begin{itemize}
\item{close to an \verb+MPI_Send+ call with
\begin{itemize}
\item{\textit{what to send} : \verb+origin_addr+ start of the buffer of size \verb+origin_count+ of type \verb+origin_datatype+}
\item{\textit{to which process} : \verb+target_rank+ at the place \verb+target_count+ of type \verb+target_datatype+}
\item{\textit{in which context} : within the window \verb+win+}
\end{itemize}
}
% \item {}
\end{itemize}
\begin{lstlisting}[language=C,frame=lines]
int MPI_Put(const void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank, MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Win win)
\end{lstlisting}
\begin{lstlisting}[language=C,frame=lines]
int MPI_Get(void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank, MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Win win)
\item{Same structure than Independent routines but with \verb+_all+ at the end }
\item{for instance : }
\end{itemize}
\begin{lstlisting}[language=C,frame=lines]
int MPI_File_write_all(MPI_File fh, ROMIO_CONST void *buf, int count, MPI_Datatype datatype, MPI_Status *status)
\end{lstlisting}
\end{frame}
%\begin{frame}[containsverbatim]
%\frametitle{Using subarrays}
%\begin{itemize}
% \item {subarray from a structured data grid}
% \item {definition of subarrays leads to a collective call}
% \item {definition of a pattern}
% \item {hallos (or ghostcells) are allowed}
% \item {like defining a new datatype}
% \item {subarrays \textbf{must} have the same size on each process}
%\end{itemize}
%\begin{lstlisting}[language=C,frame=lines]
%int MPI_Type_create_subarray(int ndims, const int array_of_sizes[], const int array_of_subsizes[], const int array_of_starts[], int order, MPI_Datatype oldtype, MPI_Datatype *newtype)
%\end{lstlisting}
%\end{frame}
%\subsection{Virtual topology}
%\begin{frame}[containsverbatim]
%\frametitle{Parenthesis : Virtual Topology}
%\frametitle{Virtual Topology}
%\begin{itemize}
% \item {A subarray must be mapped onto a topology}
% \item {It is done through the creation of a new communicator}
% \item {Cartesian topology fits with subarrays from a regular cartesian grid}
%\end{itemize}
%\begin{lstlisting}[language=C,frame=lines]
%int MPI_Cart_create(MPI_Comm comm_old, int ndims, const int dims[], const int periods[], int reorder, MPI_Comm *comm_cart)
\frametitle{Non-blocking collectives (NBC) for what ?}
\begin{itemize}
\item{Same situations as for non-blocking point-to-point communications :
\begin{itemize}
\item{small message sizes}
\item{enough computation to perform between start and end of the communication}
\item{algorithm that authorizes to compute with a partial knowledge of the data}
\end{itemize}
}
% \item{}
\end{itemize}
\end{frame}
\begin{frame}[containsverbatim]
\frametitle{Non-blocking collectives (NBC)}
\begin{itemize}
\item{\verb+int MPI_Ibarrier(MPI_Comm comm,+\\\verb+MPI_Request *request)+ : NB version of \verb+MPI_Barrier()+}
\item{\verb+int MPI_Ibcast(void* buffer, int count,+\\\verb+ MPI_Datatype datatype, int root,MPI_Comm comm, MPI_Request *request)+ : NB version of \verb+MPI_Bcast()+. Example :
\item{Operations on the new \verb+comm+ communicator :}
\end{itemize}
\begin{lstlisting}[language=C,frame=lines]
MPI_Comm_rank(comm, &rank);
MPI_Cart_coords(comm, rank, 2, coords);
printf("Process %d has position (%d, %d) \n", rank, coords[0], coords[1]);
\end{lstlisting}
\end{frame}
\begin{frame}[containsverbatim]
\frametitle{Generic virtual topologies}
\begin{itemize}
\item{despite MPI-1 provided functions to create general graph topology (\verb+int MPI_Graph_create()+), it was not scalable (all process needed to know the complete graph)}
\item{MPI-2.2 introduce the \textit{distributed graph topology} : each process does not need to know the complete graph.}
\item{\verb+int MPI_Dist_graph_create_adjacent()+ creates a new (local) communicator to which a topology information has been attached. Only adjacent processes. Example : stencil-based algorithm}
\item{\verb+int MPI_Dist_graph_create()+ : create a new (local) communicator to which a topology has been attached (more general). }