当前位置: 首页>>代码示例>>C++>>正文


C++ MPI_Unpack函数代码示例

本文整理汇总了C++中MPI_Unpack函数的典型用法代码示例。如果您正苦于以下问题:C++ MPI_Unpack函数的具体用法?C++ MPI_Unpack怎么用?C++ MPI_Unpack使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了MPI_Unpack函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: COM_Recv

/* blocking receive */
void COM_Recv (void *pattern)
{
  COMPATTERN *cp = pattern;
  int (*recv_sizes) [3] = cp->recv_sizes,
        recv_count = cp->recv_count,
        send_count = cp->send_count,
        i, j;
  char **recv_data = cp->recv_data;
  MPI_Request *send_req = cp->send_req,
	      *recv_req = cp->recv_req;
  MPI_Status *send_sta = cp->send_sta,
             *recv_sta = cp->recv_sta;
  MPI_Comm comm = cp->comm;
  COMDATA *cd;

  /* wait until until send is done */
  MPI_Waitall (send_count, send_req, send_sta);

  /* wait until until receive is done */
  MPI_Waitall (recv_count, recv_req, recv_sta);

  /* unpack data */
  for (i = j = 0, cd = cp->recv; i < recv_count; i ++, cd ++, j = 0)
  {
    MPI_Unpack (recv_data [i], recv_sizes [i][2], &j, cd->i, cd->ints, MPI_INT, comm);
    MPI_Unpack (recv_data [i], recv_sizes [i][2], &j, cd->d, cd->doubles, MPI_DOUBLE, comm);
  }
}
开发者ID:KonstantinosKr,项目名称:solfec,代码行数:29,代码来源:com.c

示例2: send_struct

//number of atoms, types and backbone atoms
void send_struct(int *nback, int iproc, int nprocs, int *nat, int *ntypes, MPI_Status astatus)
{
	int buffer_size, position,i;
	char buffer[buffer_max];
	buffer_size = 3*sizeof(int);
	if(buffer_size>buffer_max)
	{
		fprintf(stderr,"Buffer too small\n");
		MPI_Finalize();
		exit(1);
	}
	
	if(iproc==0) 
	{
		position = 0;	
		MPI_Pack(nat,1,MPI_INT,buffer,buffer_size,&position,MPI_COMM_WORLD);
		MPI_Pack(ntypes,1,MPI_INT,buffer,buffer_size,&position,MPI_COMM_WORLD);
		MPI_Pack(nback,1,MPI_INT,buffer,buffer_size,&position,MPI_COMM_WORLD);
		
		for(i=1;i<nprocs;i++) MPI_Send(buffer,position,MPI_PACKED,i, 200+i, MPI_COMM_WORLD);
	}
	if(iproc!=0)
	{
		MPI_Recv(buffer,buffer_size,MPI_PACKED,0, 200+iproc, MPI_COMM_WORLD, &astatus);
		position=0;
		MPI_Unpack(buffer,buffer_size,&position,nat,1,MPI_INT,MPI_COMM_WORLD);
		MPI_Unpack(buffer,buffer_size,&position,ntypes,1,MPI_INT,MPI_COMM_WORLD);
		MPI_Unpack(buffer,buffer_size,&position,nback,1,MPI_INT,MPI_COMM_WORLD);
	}

	return;
}
开发者ID:zhanyinx,项目名称:Montegrappa-1.2,代码行数:33,代码来源:MPIfunc.c

示例3: MPI_Recv

void WorkerMaster::getBestSolutionFromProcessors() {
    cout << myRank << " waiting for solution=========================================================" << endl;
    cout << "My solution is: " << result->IsExistResult() << ", distance:" << result->GetHammingDistance() << ", ham weight:" << result->GetHammingWeight() << endl;

    for (int i = 1; i < processors; i++) {
        char buffer[sizeOfHelpArray];
        int position = 0;
        bool array[sizeOfHelpArray];
        bool existSolution;

        MPI_Recv(buffer, workBufferLenght, MPI_PACKED, i, TAG_SOLUTION, MPI_COMM_WORLD, &status);
        MPI_Unpack(buffer, workBufferLenght, &position, &existSolution, 1, MPI_C_BOOL, MPI_COMM_WORLD);
        MPI_Unpack(buffer, workBufferLenght, &position, &array, sizeOfHelpArray, MPI_C_BOOL, MPI_COMM_WORLD);

        cout << "0: recieved from " << i << " solution: ";
        if (existSolution) {
            cout << "exist ";
            BinaryVector::printArray(array, sizeOfHelpArray);
        } else {
            cout << "doesn´t exist ";
        }
        cout << endl;

        if (existSolution) {
            solveBestSolution(array);
        }
    }
}
开发者ID:cervebar,项目名称:MI-PAR-parallel-solution-Game-of-Life,代码行数:28,代码来源:WorkerMaster.cpp

示例4: unpack_solution

solution_vector unpack_solution(void* buff, int buff_size, MPI_Comm comm, int* pos,
								float* score, void* problem_data) {
	color_assignment* cassign = (color_assignment*) get_root_partial_solution(problem_data);

	MPI_Unpack(buff, buff_size, pos, score, 1, MPI_FLOAT, comm);
	MPI_Unpack(buff, buff_size, pos, &cassign->curr_length, 1, MPI_INT, comm);
	MPI_Unpack(buff, buff_size, pos, cassign->vertex_colors, cassign->max_length, MPI_INT, comm);
	return (solution_vector) cassign;
}
开发者ID:rajaths589,项目名称:Distributed-Branch-and-Bound,代码行数:9,代码来源:graph_color.c

示例5: DCellUnpack

//Unpack [x,y,size,coorX,coorY] into DCell
void DCellUnpack(DCell cell, void *buffer, int bufSize, int *pos, MPI_Comm comm ) {
	MPI_Unpack(buffer,bufSize,pos,&cell->x,1,MPI_DOUBLE,comm);
	MPI_Unpack(buffer,bufSize,pos,&cell->y,1,MPI_DOUBLE,comm);
	MPI_Unpack(buffer,bufSize,pos,&cell->size,1,MPI_INT,comm);
	cell->coorX = (double *) malloc(cell->size*sizeof(double));
	cell->coorY = (double *) malloc(cell->size*sizeof(double));
	MPI_Unpack(buffer,bufSize,pos,cell->coorX,cell->size,MPI_DOUBLE,comm);
	MPI_Unpack(buffer,bufSize,pos,cell->coorY,cell->size,MPI_DOUBLE,comm);
}
开发者ID:adrielb,项目名称:DCell,代码行数:10,代码来源:DCell.c

示例6: main

int main(int argc, char** argv) {

	int rank, numprocs;
	int intA;
	double doubleB;
	unsigned long ulongC;

	int packsize, position;
	char packbuf[100];

	MPI_Init(&argc, &argv);
	MPI_Comm_rank(MPI_COMM_WORLD, &rank);
	MPI_Comm_size(MPI_COMM_WORLD, &numprocs);

	do {
		if(rank == 0) {
			fprintf(stdout, "Please input an int, a double, and a ull:\n");
			scanf_s("%d%lf%lu", &intA, &doubleB, &ulongC);
			packsize = 0;

			MPI_Pack(&intA, 1, MPI_INT, packbuf, 100, &packsize, MPI_COMM_WORLD);
			printf("packsize = %d\t", packsize);
			printf("size(int) = %d\n", sizeof(int));
			MPI_Pack(&doubleB, 1, MPI_DOUBLE, packbuf, 100, &packsize, MPI_COMM_WORLD);
			printf("packsize = %d\t", packsize);
			printf("size(double) = %d\n", sizeof(double));
			MPI_Pack(&ulongC, 1, MPI_UNSIGNED_LONG, packbuf, 100, &packsize, MPI_COMM_WORLD);
			printf("packsize = %d\t", packsize); 
			printf("size(unsigned long) = %d\n", sizeof(unsigned long));
		}

		MPI_Bcast(&packsize, 1, MPI_INT, 0, MPI_COMM_WORLD);
		MPI_Bcast(packbuf, packsize, MPI_PACKED, 0, MPI_COMM_WORLD);

		if(rank != 0) {
			position = 0;
			MPI_Unpack(packbuf, packsize, &position, &intA, 1, MPI_INT, MPI_COMM_WORLD);
			printf("postion= %d\t", position);
			MPI_Unpack(packbuf, packsize, &position, &doubleB, 1, MPI_DOUBLE, MPI_COMM_WORLD);
			printf("postion= %d\t", position);
			MPI_Unpack(packbuf, packsize, &position, &ulongC, 1, MPI_UNSIGNED_LONG, MPI_COMM_WORLD);
			printf("postion= %d\n", position);
			printf("rank %d got int %d, double %lf, and ull %lu\n", rank, intA, doubleB, ulongC);
		}

		MPI_Barrier(MPI_COMM_WORLD);

	} while (intA >= 0);

	printf("myid = %d, %d, %lf\n", rank, intA, doubleB);

	MPI_Finalize();
	return 0;
}
开发者ID:wenhao87,项目名称:MPI_Programming,代码行数:54,代码来源:pack.cpp

示例7: COMALL_Repeat

/* communicate integers and doubles accodring
 * to the pattern computed by COMALL_Pattern */
int COMALL_Repeat (void *pattern)
{
  COMALLPATTERN *pp = pattern;
  COMDATA *cd;
  int i;

  for (i = 0; i < pp->ncpu; i ++) pp->send_position [i] = pp->recv_position [i] = 0;

  /* pack ints */
  for (i = 0, cd = pp->send; i < pp->nsend; i ++, cd ++)
  {
    if (cd->ints)
    {
      MPI_Pack (cd->i, cd->ints, MPI_INT, &pp->send_data [pp->send_disps [cd->rank]], pp->send_counts [cd->rank], &pp->send_position [cd->rank], pp->comm);
    }
  }

  /* pack doubles */
  for (i = 0, cd = pp->send; i < pp->nsend; i ++, cd ++)
  {
    if (cd->doubles)
    {
      MPI_Pack (cd->d, cd->doubles, MPI_DOUBLE, &pp->send_data [pp->send_disps [cd->rank]], pp->send_counts [cd->rank], &pp->send_position [cd->rank], pp->comm); 
    }
  }

#if DEBUG
  for (i = 0; i < pp->ncpu; i ++)
  {
    ASSERT_DEBUG (pp->send_position [i] <= pp->send_counts [i], "Incorrect packing");
  }
#endif

  /* all to all send and receive */
  MPI_Alltoallv (pp->send_data, pp->send_counts, pp->send_disps, MPI_PACKED, pp->recv_data, pp->recv_counts, pp->recv_disps, MPI_PACKED, pp->comm);

  if (pp->recv_size)
  {
    /* unpack data */
    for (i = 0; i < pp->ncpu; i ++)
    {
      MPI_Unpack (&pp->recv_data [pp->recv_disps [i]], pp->recv_counts [i], &pp->recv_position [i], pp->recv [i].i, pp->recv [i].ints, MPI_INT, pp->comm);
      MPI_Unpack (&pp->recv_data [pp->recv_disps [i]], pp->recv_counts [i], &pp->recv_position [i], pp->recv [i].d, pp->recv [i].doubles, MPI_DOUBLE, pp->comm);
    }
  }

  return pp->send_size;
}
开发者ID:KonstantinosKr,项目名称:solfec,代码行数:50,代码来源:com.c

示例8: MPI_Unpack

template < class T > void
AbstractCommunicator::unpackArray (T * p, int n)
{
  if(n > 0)
    MPI_Unpack (mRecvBuffer.getBuf(), mRecvBuffer.getSize(), &mRecvBufferPosition,
		(void *) p, n, getMPIType < T > (), *(MPI_Comm*)mAuxData);
}
开发者ID:yesyestian,项目名称:BNB-solver,代码行数:7,代码来源:mpiaclib.hpp

示例9: p7_oprofile_MPIRecv

/* Function:  p7_oprofile_MPIRecv()
 * Synopsis:  Receives an OPROFILE as a work unit from an MPI sender.
 * Incept:    MSF, Wed Oct 21, 2009 [Janelia]
 *
 * Purpose:   Receive a work unit that consists of a single OPROFILE
 *            sent by MPI <source> (<0..nproc-1>, or
 *            <MPI_ANY_SOURCE>) tagged as <tag> for MPI communicator <comm>.
 *            
 *            Work units are prefixed by a status code. If the unit's
 *            code is <eslOK> and no errors are encountered, this
 *            routine will return <eslOK> and a non-<NULL> <*ret_om>.
 *            If the unit's code is <eslEOD> (a shutdown signal), 
 *            this routine returns <eslEOD> and <*ret_om> is <NULL>.
 *   
 *            Caller provides a working buffer <*buf> of size
 *            <*nalloc> characters. These are passed by reference, so
 *            that <*buf> can be reallocated and <*nalloc> increased
 *            if necessary. As a special case, if <*buf> is <NULL> and
 *            <*nalloc> is 0, the buffer will be allocated
 *            appropriately, but the caller is still responsible for
 *            free'ing it.
 *            
 *            Caller may or may not already know what alphabet the OPROFILE
 *            is expected to be in.  A reference to the current
 *            alphabet is passed in <abc>. If the alphabet is unknown,
 *            pass <*abc = NULL>, and when the OPROFILE is received, an
 *            appropriate new alphabet object is allocated and passed
 *            back to the caller via <*abc>.  If the alphabet is
 *            already known, <*ret_abc> is that alphabet, and the new
 *            OPROFILE's alphabet type is verified to agree with it. This
 *            mechanism allows an application to let the first OPROFILE
 *            determine the alphabet type for the application, while
 *            still keeping the alphabet under the application's scope
 *            of control.
 *
 * Returns:   <eslOK> on success. <*ret_om> contains the received OPROFILE;
 *            it is allocated here, and the caller is responsible for
 *            free'ing it.  <*buf> may have been reallocated to a
 *            larger size, and <*nalloc> may have been increased.  If
 *            <*abc> was passed as <NULL>, it now points to an
 *            <ESL_ALPHABET> object that was allocated here; caller is
 *            responsible for free'ing this.
 *            
 *            Returns <eslEOD> if an end-of-data signal was received.
 *            In this case, <*buf>, <*nalloc>, and <*abc> are left unchanged,
 *            and <*ret_om> is <NULL>.
 *            
 *            Returns <eslEINCOMPAT> if the OPROFILE is in a different alphabet
 *            than <*abc> said to expect. In this case, <*abc> is unchanged,
 *            <*buf> and <*nalloc> may have been changed, and <*ret_om> is
 *            <NULL>.
 *            
 * Throws:    <eslEMEM> on allocation error, in which case <*ret_om> is 
 *            <NULL>.           
 */
int
p7_oprofile_MPIRecv(int source, int tag, MPI_Comm comm, char **buf, int *nalloc, ESL_ALPHABET **abc, P7_OPROFILE **ret_om)
{
  int         status;
  int         code;
  P7_OPROFILE     *om     = NULL;
  int         n;
  int         pos;
  MPI_Status  mpistatus;

  /* Probe first, because we need to know if our buffer is big enough. */
  MPI_Probe(source, tag, comm, &mpistatus);
  MPI_Get_count(&mpistatus, MPI_PACKED, &n);

  /* Make sure the buffer is allocated appropriately */
  if (*buf == NULL || n > *nalloc) {
    void *tmp;
    ESL_RALLOC(*buf, tmp, sizeof(char) * n);
    *nalloc = n; 
  }

  /* Receive the packed work unit */
  MPI_Recv(*buf, n, MPI_PACKED, source, tag, comm, &mpistatus);

  /* Unpack it, looking at the status code prefix for EOD/EOK  */
  pos = 0;
  if (MPI_Unpack(*buf, n, &pos, &code, 1, MPI_INT, comm) != 0) ESL_XEXCEPTION(eslESYS, "mpi unpack failed");
  if (code == eslEOD)  { *ret_om = NULL;  return eslEOD; }

  return p7_oprofile_MPIUnpack(*buf, *nalloc, &pos, comm, abc, ret_om);

 ERROR:
  if (om != NULL) p7_oprofile_Destroy(om);
  return status;
}
开发者ID:Denis84,项目名称:EPA-WorkBench,代码行数:90,代码来源:mpi.c

示例10: p7_hmm_mpi_Recv

/* Function:  p7_hmm_mpi_Recv()
 * Synopsis:  Receives an HMM as a work unit from an MPI sender.
 *
 * Purpose:   Receive a work unit that consists of a single HMM
 *            sent by MPI <source> (<0..nproc-1>, or
 *            <MPI_ANY_SOURCE>) tagged as <tag> for MPI communicator <comm>.
 *            
 *            Work units are prefixed by a status code that gives the
 *            number of HMMs to follow; here, 0 or 1 (but in the future,
 *            we could easily extend to sending several HMMs in one 
 *            packed buffer). If we receive a 1 code and we successfully
 *            unpack an HMM, this routine will return <eslOK> and a non-<NULL> <*ret_hmm>.
 *            If we receive a 0 code (a shutdown signal), 
 *            this routine returns <eslEOD> and <*ret_hmm> is <NULL>.
 *   
 *            Caller provides a working buffer <*buf> of size
 *            <*nalloc> characters. These are passed by reference, so
 *            that <*buf> can be reallocated and <*nalloc> increased
 *            if necessary. As a special case, if <*buf> is <NULL> and
 *            <*nalloc> is 0, the buffer will be allocated
 *            appropriately, but the caller is still responsible for
 *            free'ing it.
 *            
 *            Caller may or may not already know what alphabet the HMM
 *            is expected to be in.  A reference to the current
 *            alphabet is passed in <byp_abc>. If the alphabet is unknown,
 *            pass <*byp_abc = NULL>, and when the HMM is received, an
 *            appropriate new alphabet object is allocated and passed
 *            back to the caller via <*abc>.  If the alphabet is
 *            already known, <*byp_abc> is that alphabet, and the new
 *            HMM's alphabet type is verified to agree with it. This
 *            mechanism allows an application to let the first HMM
 *            determine the alphabet type for the application, while
 *            still keeping the alphabet under the application's scope
 *            of control.
 *
 * Args:      source  - index of MPI sender, 0..nproc-1 (0=master), or MPI_ANY_SOURCE
 *            tag     - MPI message tag;  MPI_ANY_TAG, or a specific message tag (0..32767 will work on any MPI)
 *            comm    - MPI communicator; MPI_COMM_WORLD, or a specific MPI communicator
 *            buf     - working buffer (for receiving packed message);
 *                      if <*buf> == NULL, a <*buf> is allocated and returned;
 *                      if <*buf> != NULL, it is used (and may be reallocated)
 *            nalloc  - allocation size of <*buf> in bytes; pass 0 if <*buf==NULL>.           
 *            byp_abc - BYPASS: <*byp_abc> == ESL_ALPHABET *> if known;
 *                              <*byp_abc> == NULL> if alphabet unknown.
 *            ret_hmm  - RETURN: newly allocated/received profile
 *
 * Returns:   <eslOK> on success. <*ret_hmm> contains the received HMM;
 *            it is allocated here, and the caller is responsible for
 *            free'ing it.  <*buf> may have been reallocated to a
 *            larger size, and <*nalloc> may have been increased.  If
 *            <*abc> was passed as <NULL>, it now points to an
 *            <ESL_ALPHABET> object that was allocated here; caller is
 *            responsible for free'ing this.
 *            
 *            Returns <eslEOD> if an end-of-data signal was received.
 *            In this case, <*buf>, <*nalloc>, and <*abc> are left unchanged,
 *            and <*ret_hmm> is <NULL>.
 *            
 *            Returns <eslEINCOMPAT> if the HMM is in a different alphabet
 *            than <*abc> said to expect. In this case, <*abc> is unchanged,
 *            <*buf> and <*nalloc> may have been changed, and <*ret_hmm> is
 *            <NULL>.
 *            
 * Throws:    <eslEMEM> on allocation error, and <eslESYS> on MPI communication
 *            errors; in either case <*ret_hmm> is <NULL>.           
 */
int
p7_hmm_mpi_Recv(int source, int tag, MPI_Comm comm, char **buf, int *nalloc, ESL_ALPHABET **byp_abc, P7_HMM **ret_hmm)
{
  int         pos = 0;
  int         code;
  int         n;
  MPI_Status  mpistatus;
  int         status;

  /* Probe first, because we need to know if our buffer is big enough. */
  if ( MPI_Probe(source, tag, comm, &mpistatus)  != MPI_SUCCESS) ESL_EXCEPTION(eslESYS, "mpi probe failed");
  if ( MPI_Get_count(&mpistatus, MPI_PACKED, &n) != MPI_SUCCESS) ESL_EXCEPTION(eslESYS, "mpi get count failed");

  /* Make sure the buffer is allocated appropriately */
  if (*buf == NULL || n > *nalloc) 
    {
      ESL_REALLOC(*buf, sizeof(char) * n);
      *nalloc = n; 
    }

  /* Receive the entire packed work unit */
  if (MPI_Recv(*buf, n, MPI_PACKED, source, tag, comm, &mpistatus) != MPI_SUCCESS) ESL_EXCEPTION(eslESYS, "mpi recv failed");

  /* Unpack the status code prefix */
  if (MPI_Unpack(*buf, n, &pos, &code, 1, MPI_INT, comm) != MPI_SUCCESS) ESL_EXCEPTION(eslESYS, "mpi unpack failed");

  if      (code == 0) { status = eslEOD; *ret_hmm = NULL; }
  else if (code == 1)   status = p7_hmm_mpi_Unpack(*buf, *nalloc, &pos, comm, byp_abc, ret_hmm);
  else                  ESL_EXCEPTION(eslESYS, "bad mpi buffer transmission code");
  return status;

 ERROR: /* from ESL_REALLOC only */
  *ret_hmm = NULL;
//.........这里部分代码省略.........
开发者ID:EddyRivasLab,项目名称:hmmer,代码行数:101,代码来源:p7_hmm_mpi.c

示例11: PackUnpack

/* Extract the source array into the dest array using the DARRAY datatype.
   "count" integers are returned in destArray */
int PackUnpack( MPI_Datatype darraytype, const int srcArray[], int destArray[],
		int count )
{
    int packsize, position;
    int *packArray;

    MPI_Type_commit( &darraytype );
    MPI_Pack_size( 1, darraytype, MPI_COMM_SELF, &packsize );
    packArray = (int *)malloc( packsize );
    if (!packArray) {
	fprintf( stderr, "Unable to allocate pack array of size %d\n", 
		 packsize );
	MPI_Abort( MPI_COMM_WORLD, 1 );
        exit(1);
    }
    position = 0;
    MPI_Pack( (int*)srcArray, 1, darraytype, packArray, packsize, &position, 
	      MPI_COMM_SELF );
    packsize = position;
    position = 0;
    MPI_Unpack( packArray, packsize, &position, destArray, count, MPI_INT, 
		MPI_COMM_SELF );
    free( packArray );
    return 0;
}
开发者ID:Julio-Anjos,项目名称:simgrid,代码行数:27,代码来源:darray-cyclic.c

示例12: SendPrimalSolutiontoServer

/* ------------------------------------------------------------------------- */
void SendPrimalSolutiontoServer(char       *stop,
				                char       *sleep_ag,
				                PrimalType *PrimalSol,
				                MPI_Comm communicator)
{
 
  int method = AGENT_SEND_SOLUTION,
      sizeOfBuffer = 0,
      position = 0;
             
  char *buffer;
  
  
  sizeOfBuffer = ( (sizeof(int)) + 
                   ((sizeof(char)) * TotalAgents) + 
                   ((sizeof(int)) * nb_col) +
                   (sizeof(int)) + 
                   (sizeof(double)) + 
                   sizeof(unsigned int)
                  ); 
   
  buffer = malloc(sizeOfBuffer);
  
  MPI_Pack(&method, 1, MPI_INT, buffer, sizeOfBuffer, &position, communicator);
  MPI_Pack(PrimalSol->agent, TotalAgents, MPI_CHAR, buffer, sizeOfBuffer, &position, communicator);
  MPI_Pack(PrimalSol->var_x, nb_col, MPI_INT, buffer, sizeOfBuffer, &position, communicator);
  MPI_Pack(&PrimalSol->proc_time, 1, MPI_INT, buffer, sizeOfBuffer, &position, communicator);
  MPI_Pack(&PrimalSol->value, 1, MPI_DOUBLE, buffer, sizeOfBuffer, &position, communicator);  
  MPI_Pack(&PrimalSol->agent_ID, 1, MPI_UNSIGNED, buffer, sizeOfBuffer, &position, communicator);
  
  // O agente envia a solucao gerada ou modificada por um dos agentes primais
  //para o servidor de memoria de solucoes primais. 
  
  MPI_Send(buffer, position, MPI_PACKED, 0, 1, communicator);
  
  
  char messageConfirm[2 * sizeof(char)];
  MPI_Status status;
  int positionConfirm = 0;
  
  MPI_Recv(messageConfirm, 2, MPI_PACKED, MPI_ANY_SOURCE, MPI_ANY_TAG, communicator, &status);
  MPI_Unpack(messageConfirm, 2 * sizeof(char), &positionConfirm, stop, 1, MPI_CHAR, communicator);
  MPI_Unpack(messageConfirm, 2 * sizeof(char), &positionConfirm, sleep_ag, 1, MPI_CHAR, communicator);
  
  free(buffer);
  //printf("\n\n === Fim agentrot SendPrimal \n\n");
}
开发者ID:dnaziozeno,项目名称:ateamscp,代码行数:48,代码来源:agentrot.c

示例13: FC_FUNC

FC_FUNC( mpi_unpack , MPI_UNPACK )
     ( void *inbuf, int *insize, int *position,
       void *outbuf, int *outcount, int *datatype,
       int *comm, int *ierror )
{
  *ierror=MPI_Unpack( inbuf, *insize, position,
                      outbuf, *outcount, *datatype, *comm);
}
开发者ID:ACME-Climate,项目名称:cime,代码行数:8,代码来源:pack.c

示例14: fprintf

//polymer status
struct s_polymer *send_pol(int iproc, int nprocs, int nback, MPI_Datatype Backtype, MPI_Datatype Sidetype,  MPI_Datatype Rottype, struct s_polymer *startp, MPI_Status astatus, int npol, int shell, int nosidechains)
{
	int i, j, k, l,position,buffer_size;

	char buffer[buffer_max];
	(startp+npol)->nback = nback;
	buffer_size=(sizeof(struct s_back)*nback);
//	fprintf(stderr,"send_pol: BUFFER SIZE IS %d\n",buffer_size);
	if(buffer_size>buffer_max)
        {
        	fprintf(stderr,"Buffer too small\n");
                MPI_Finalize();
                exit(1);
        }

	if(iproc==0) 
	{
		position=0;
		for(j=0;j<nback;j++)
			MPI_Pack(((startp+npol)->back)+j,1,Backtype,buffer,buffer_size,&position,MPI_COMM_WORLD);
		for(i=1; i<nprocs; i++)
			MPI_Send(buffer,position,MPI_PACKED,i, 300+100*i, MPI_COMM_WORLD);
	}

	if(iproc!=0)
	{
		MPI_Recv(buffer,buffer_size,MPI_PACKED,0, 300+100*iproc, MPI_COMM_WORLD, &astatus);		
		position=0;
		for(j=0;j<nback;j++)
			MPI_Unpack(buffer,buffer_size,&position,((startp+npol)->back)+j,1,Backtype,MPI_COMM_WORLD);	
	}

	if(!(nosidechains))
        {
		if(iproc==0)
		{
			for(i=1; i<nprocs; i++) for(j=0; j<nback;j++) for(k=0; k<((startp->back)+j)->nside;k++) 
					MPI_Send(((((startp+npol)->back)+j)->side)+k, 1, Sidetype, i, 2000+100*i+10*j+k, MPI_COMM_WORLD);
		}	

	if(iproc!=0) for(j=0; j<nback;j++) for(k=0; k<((startp->back)+j)->nside;k++)
					MPI_Recv(((((startp+npol)->back)+j)->side)+k, 1, Sidetype, 0, 2000+100*iproc+10*j+k, MPI_COMM_WORLD, &astatus);
		if(iproc==0) for(i=1; i<nprocs; i++) for(j=0; j<nback;j++) for(k=0; k<((startp->back)+j)->nside;k++) for(l=0; l<((startp->back)+j)->nrot; l++)
					MPI_Send(((((((startp+npol)->back)+j)->side)+k)->rot)+l, 1, Rottype, i, 10000+1000*i+100*j+10*k+l, MPI_COMM_WORLD);
		if(iproc!=0) for(j=0; j<nback;j++) for(k=0; k<((startp->back)+j)->nside;k++) for(l=0; l<((startp->back)+j)->nrot; l++)
					MPI_Recv(((((((startp+npol)->back)+j)->side)+k)->rot)+l, 1, Rottype, 0, 10000+1000*iproc+100*j+10*k+l , MPI_COMM_WORLD, &astatus);	
	}


////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
	if(iproc!=0)
	{
		for(i=0; i<nback; i++) ((startp+npol)->vback)[(((startp+npol)->back)+i)->ia ] = &(((((startp+npol)->back)+i)->pos));
		for(i=0; i<nback; i++) for(j=0; j<(((startp+npol)->back)+i)->nside; j++) ((startp+npol)->vback)[ (((((startp+npol)->back)+i)->side)+j)->ia ] = &((((((startp+npol)->back)+i)->side)+j)->pos);
	}
		
	return startp;
}
开发者ID:zhanyinx,项目名称:Montegrappa-1.2,代码行数:59,代码来源:MPIfunc.c

示例15: deserializeSolution

void deserializeSolution(char* buffer, int& position, vector<Move>& _solution) {
    int size;
    MPI_Unpack(buffer, BUFFER_SIZE, &position, &size, 1, MPI_INT, MPI_COMM_WORLD);
    for (int i = 0; i < size; ++i) {
        Move move;
        move.deserialize(buffer, position);
        _solution.push_back(move);
    }
}
开发者ID:cerevka,项目名称:Towers-of-Hanoi-Parallel,代码行数:9,代码来源:Solver.cpp


注:本文中的MPI_Unpack函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。