intmain(int argc,char* argv[]){ // std::cout<<"argc:"<<argc<<std::endl; // for(int i=0;i<=argc;i++){ // std::cout<<"argv"<<i<<": "<<*(argv+i)<<std::endl; // } MPI::Init(argc,argv); int numP=MPI::COMM_WORLD.Get_size(); int myID=MPI::COMM_WORLD.Get_rank(); std::cout<<"my id :"<<myID<<" total:"<<numP<<std::endl; MPI::Finalize(); }
使用mpirun运行程序与直接运行程序argc和argv不变。进程数4没有放在argv里
1 2
MPI::Init(argc,argv); MPI::Finalize();
表示MPI的初始化和析构函数
1 2
int numP=MPI::COMM_WORLD.Get_size(); int myID=MPI::COMM_WORLD.Get_rank();
get_size获取总共进程数量
get_rank获取当前进程id。进程id每个进程不一样,用于管理当前进程
id从0开始
点到点通信
非阻塞通信
send发送和recv接收函数
1 2
voidSend(constvoid* buf, int count, const Datatype& datatype, int dest,int tag) voidRecv(void* buf, int count, const Datatype& datatype, int source, int tag)
$ mpirun -np 4 ./hello my id :1 pingCount:1 my id :2 pingCount:2 my id :3 pingCount:3 my id :0 pingCount:4 my id :1 pingCount:5 my id :2 pingCount:6 my id :3 pingCount:7 my id :0 pingCount:8 my id :1 pingCount:9 my id :2 pingCount:10 my id :3 pingCount:11 my id :0 pingCount:12 my id :1 pingCount:13
循环到第十三次是由于第九次发送出消息,此时还在循环中,等待着消息走一圈回来后变成第十三次
如果不加sleep结果如下
1 2 3 4 5 6 7 8 9 10 11 12 13 14
$ mpirun -np 4 ./hello my id :1 pingCount:1 my id :2 pingCount:2 my id :3 pingCount:3 my id :0 pingCount:4 my id :0 pingCount:8 my id :0 pingCount:12 my id :1 pingCount:5 my id :1 pingCount:9 my id :1 pingCount:13 my id :2 pingCount:6 my id :2 pingCount:10 my id :3 pingCount:7 my id :3 pingCount:11
打印顺序是乱序,猜测是由于进程间通信速度快于cout打印速度导致出现插队的现象
非阻塞通信
1 2
MPI::Request Isend(constvoid* buf, int count, const Datatype& datatype, int dest,int tag) MPI::Request Irecv(void* buf, int count, const Datatype& datatype, int source, int tag)
MPI::Init(argc,argv); int numP=MPI::COMM_WORLD.Get_size(); int myID=MPI::COMM_WORLD.Get_rank(); std::cout<<"my id :"<<myID<<" mes:"<<mes<<std::endl; if(myID == 0)mes=54321; int root =0; MPI::COMM_WORLD.Bcast(&mes,1,MPI::INT,root); std::cout<<"my id :"<<myID<<" mes:"<<mes<<std::endl; //sleep(2); MPI::Finalize(); }
1 2 3 4 5 6 7 8 9
$ mpirun -np 4 ./hello my id :0 mes:12345 my id :0 mes:54321 my id :1 mes:12345 my id :2 mes:12345 my id :2 mes:54321 my id :3 mes:12345 my id :1 mes:54321 my id :3 mes:54321
数据分配与收集
数据分配
1
Scatter(constvoid* sendbuf, int sendcount, const MPI::Datatype& sendtype, void* recvbuf, int recvcount, const MPI::Datatype& recvtype, int root)
int mes[10]={0,1,2,3,4,5,6,7,8,9}; int mydata=-1; MPI::Init(argc,argv); int numP=MPI::COMM_WORLD.Get_size(); int myID=MPI::COMM_WORLD.Get_rank(); int root =0; std::cout<<"my id :"<<myID<<" mydata:"<<mydata<<std::endl; MPI::COMM_WORLD.Scatter(mes,1,MPI::INT,&mydata,1,MPI::INT,root); std::cout<<"my id :"<<myID<<" mydata:"<<mydata<<std::endl; //sleep(2);
MPI::Finalize(); }
1 2 3 4 5 6 7 8 9
$ mpirun -np 4 ./hello my id :0 mydata:-1 my id :0 mydata:0 my id :1 mydata:-1 my id :1 mydata:1 my id :2 mydata:-1 my id :2 mydata:2 my id :3 mydata:-1 my id :3 mydata:3
int mes[10]={-1,-1,-1,-1,-1,-1,-1,-1,-1,-1}; int mydata=-1; MPI::Init(argc,argv); int numP=MPI::COMM_WORLD.Get_size(); int myID=MPI::COMM_WORLD.Get_rank(); if(myID==0){ for (int i=0;i<numP;i++)std::cout<<mes[i]<<" "; std::cout<<std::endl; } mydata=myID+10; int root =0; std::cout<<"my id :"<<myID<<" mydata:"<<mydata<<std::endl; MPI::COMM_WORLD.Gather(&mydata,1,MPI::INT,mes,1,MPI::INT,root); std::cout<<"my id :"<<myID<<" mydata:"<<mydata<<std::endl; if(myID==0){ for (int i=0;i<numP;i++)std::cout<<mes[i]<<" "; std::cout<<std::endl; } //sleep(2);
MPI::Finalize(); }
1 2 3 4 5 6 7 8 9 10 11
$ mpirun -np 4 ./hello my id :1 mydata:11 my id :2 mydata:12 -1 -1 -1 -1 my id :1 mydata:11 my id :3 mydata:13 my id :3 mydata:13 my id :0 mydata:10 my id :0 mydata:10 10 11 12 13 my id :2 mydata:12