当前位置: 首页>>代码示例>>Python>>正文


Python MPI.SUM属性代码示例

本文整理汇总了Python中mpi4py.MPI.SUM属性的典型用法代码示例。如果您正苦于以下问题:Python MPI.SUM属性的具体用法?Python MPI.SUM怎么用?Python MPI.SUM使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在mpi4py.MPI的用法示例。


在下文中一共展示了MPI.SUM属性的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: adapt_param_noise

# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import SUM [as 别名]
def adapt_param_noise(self):
        if self.param_noise is None:
            return 0.

        # Perturb a separate copy of the policy to adjust the scale for the next "real" perturbation.
        batch = self.memory.sample(batch_size=self.batch_size)
        self.sess.run(self.perturb_adaptive_policy_ops, feed_dict={
            self.param_noise_stddev: self.param_noise.current_stddev,
        })
        distance = self.sess.run(self.adaptive_policy_distance, feed_dict={
            self.obs0: batch['obs0'],
            self.param_noise_stddev: self.param_noise.current_stddev,
        })

        mean_distance = MPI.COMM_WORLD.allreduce(distance, op=MPI.SUM) / MPI.COMM_WORLD.Get_size()
        self.param_noise.adapt(mean_distance)
        return mean_distance 
开发者ID:Hwhitetooth,项目名称:lirpg,代码行数:19,代码来源:ddpg.py

示例2: compute_gradients

# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import SUM [as 别名]
def compute_gradients(self, loss, var_list, **kwargs):
        grads_and_vars = tf.train.AdamOptimizer.compute_gradients(self, loss, var_list, **kwargs)
        grads_and_vars = [(g, v) for g, v in grads_and_vars if g is not None]
        flat_grad = tf.concat([tf.reshape(g, (-1,)) for g, v in grads_and_vars], axis=0)
        shapes = [v.shape.as_list() for g, v in grads_and_vars]
        sizes = [int(np.prod(s)) for s in shapes]

        num_tasks = self.comm.Get_size()
        buf = np.zeros(sum(sizes), np.float32)

        def _collect_grads(flat_grad):
            self.comm.Allreduce(flat_grad, buf, op=MPI.SUM)
            np.divide(buf, float(num_tasks), out=buf)
            return buf

        avg_flat_grad = tf.py_func(_collect_grads, [flat_grad], tf.float32)
        avg_flat_grad.set_shape(flat_grad.shape)
        avg_grads = tf.split(avg_flat_grad, sizes, axis=0)
        avg_grads_and_vars = [(tf.reshape(g, v.shape), v)
                    for g, (_, v) in zip(avg_grads, grads_and_vars)]

        return avg_grads_and_vars 
开发者ID:MaxSobolMark,项目名称:HardRLWithYoutube,代码行数:24,代码来源:mpi_adam_optimizer.py

示例3: compute_gradients

# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import SUM [as 别名]
def compute_gradients(self, loss, var_list, **kwargs):
        """
        Same as normal compute_gradients, except average grads over processes.
        """
        grads_and_vars = super().compute_gradients(loss, var_list, **kwargs)
        grads_and_vars = [(g, v) for g, v in grads_and_vars if g is not None]
        flat_grad = flat_concat([g for g, v in grads_and_vars])
        shapes = [v.shape.as_list() for g, v in grads_and_vars]
        sizes = [int(np.prod(s)) for s in shapes]

        num_tasks = self.comm.Get_size()
        buf = np.zeros(flat_grad.shape, np.float32)

        def _collect_grads(flat_grad):
            self.comm.Allreduce(flat_grad, buf, op=MPI.SUM)
            np.divide(buf, float(num_tasks), out=buf)
            return buf

        avg_flat_grad = tf.py_func(_collect_grads, [flat_grad], tf.float32)
        avg_flat_grad.set_shape(flat_grad.shape)
        avg_grads = tf.split(avg_flat_grad, sizes, axis=0)
        avg_grads_and_vars = [(tf.reshape(g, v.shape), v)
                    for g, (_, v) in zip(avg_grads, grads_and_vars)]

        return avg_grads_and_vars 
开发者ID:openai,项目名称:spinningup,代码行数:27,代码来源:mpi_tf.py

示例4: mpi_mean

# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import SUM [as 别名]
def mpi_mean(arr, axis=0, comm=None, keepdims=False):
    """
    calculates the mean of an array, using MPI

    :param arr: (np.ndarray)
    :param axis: (int or tuple or list) the axis to run the means over
    :param comm: (MPI Communicators) if None, MPI.COMM_WORLD
    :param keepdims: (bool) keep the other dimensions intact
    :return: (np.ndarray or Number) the result of the sum
    """
    arr = np.asarray(arr)
    assert arr.ndim > 0
    if comm is None:
        comm = MPI.COMM_WORLD
    xsum = arr.sum(axis=axis, keepdims=keepdims)
    size = xsum.size
    localsum = np.zeros(size + 1, arr.dtype)
    localsum[:size] = xsum.ravel()
    localsum[size] = arr.shape[axis]
    globalsum = np.zeros_like(localsum)
    comm.Allreduce(localsum, globalsum, op=MPI.SUM)
    return globalsum[:size].reshape(xsum.shape) / globalsum[size], globalsum[size] 
开发者ID:Stable-Baselines-Team,项目名称:stable-baselines,代码行数:24,代码来源:mpi_moments.py

示例5: _adapt_param_noise

# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import SUM [as 别名]
def _adapt_param_noise(self):
        """
        calculate the adaptation for the parameter noise

        :return: (float) the mean distance for the parameter noise
        """
        if self.param_noise is None:
            return 0.

        # Perturb a separate copy of the policy to adjust the scale for the next "real" perturbation.
        obs, *_ = self.replay_buffer.sample(batch_size=self.batch_size, env=self._vec_normalize_env)
        self.sess.run(self.perturb_adaptive_policy_ops, feed_dict={
            self.param_noise_stddev: self.param_noise.current_stddev,
        })
        distance = self.sess.run(self.adaptive_policy_distance, feed_dict={
            self.obs_adapt_noise: obs, self.obs_train: obs,
            self.param_noise_stddev: self.param_noise.current_stddev,
        })

        mean_distance = MPI.COMM_WORLD.allreduce(distance, op=MPI.SUM) / MPI.COMM_WORLD.Get_size()
        self.param_noise.adapt(mean_distance)
        return mean_distance 
开发者ID:Stable-Baselines-Team,项目名称:stable-baselines,代码行数:24,代码来源:ddpg.py

示例6: mpi_moments

# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import SUM [as 别名]
def mpi_moments(x, axis=0):
    x = np.asarray(x, dtype='float64')
    newshape = list(x.shape)
    newshape.pop(axis)
    n = np.prod(newshape,dtype=int)
    totalvec = np.zeros(n*2+1, 'float64')
    addvec = np.concatenate([x.sum(axis=axis).ravel(), 
        np.square(x).sum(axis=axis).ravel(), 
        np.array([x.shape[axis]],dtype='float64')])
    MPI.COMM_WORLD.Allreduce(addvec, totalvec, op=MPI.SUM)
    sum = totalvec[:n]
    sumsq = totalvec[n:2*n]
    count = totalvec[2*n]
    if count == 0:
        mean = np.empty(newshape); mean[:] = np.nan
        std = np.empty(newshape); std[:] = np.nan
    else:
        mean = sum/count
        std = np.sqrt(np.maximum(sumsq/count - np.square(mean),0))
    return mean, std, count 
开发者ID:AdamStelmaszczyk,项目名称:learning2run,代码行数:22,代码来源:mpi_moments.py

示例7: update

# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import SUM [as 别名]
def update(self, localg, stepsize):
        if self.t % 100 == 0:
            self.check_synced()
        localg = localg.astype('float32')
        if self.comm is not None:
            globalg = np.zeros_like(localg)
            self.comm.Allreduce(localg, globalg, op=MPI.SUM)
            if self.scale_grad_by_procs:
                globalg /= self.comm.Get_size()
        else:
            globalg = np.copy(localg)

        self.t += 1
        a = stepsize * np.sqrt(1 - self.beta2**self.t)/(1 - self.beta1**self.t)
        self.m = self.beta1 * self.m + (1 - self.beta1) * globalg
        self.v = self.beta2 * self.v + (1 - self.beta2) * (globalg * globalg)
        step = (- a) * self.m / (np.sqrt(self.v) + self.epsilon)
        self.setfromflat(self.getflat() + step) 
开发者ID:hiwonjoon,项目名称:ICML2019-TREX,代码行数:20,代码来源:mpi_adam.py

示例8: _mpi_average

# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import SUM [as 别名]
def _mpi_average(self, x):
        buf = np.zeros_like(x)
        MPI.COMM_WORLD.Allreduce(x, buf, op=MPI.SUM)
        buf /= MPI.COMM_WORLD.Get_size()
        return buf 
开发者ID:Hwhitetooth,项目名称:lirpg,代码行数:7,代码来源:normalizer.py

示例9: mpi_mean

# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import SUM [as 别名]
def mpi_mean(x, axis=0, comm=None, keepdims=False):
    x = np.asarray(x)
    assert x.ndim > 0
    if comm is None: comm = MPI.COMM_WORLD
    xsum = x.sum(axis=axis, keepdims=keepdims)
    n = xsum.size
    localsum = np.zeros(n+1, x.dtype)
    localsum[:n] = xsum.ravel()
    localsum[n] = x.shape[axis]
    globalsum = np.zeros_like(localsum)
    comm.Allreduce(localsum, globalsum, op=MPI.SUM)
    return globalsum[:n].reshape(xsum.shape) / globalsum[n], globalsum[n] 
开发者ID:Hwhitetooth,项目名称:lirpg,代码行数:14,代码来源:mpi_moments.py

示例10: update

# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import SUM [as 别名]
def update(self, x):
        x = x.astype('float64')
        n = int(np.prod(self.shape))
        totalvec = np.zeros(n*2+1, 'float64')
        addvec = np.concatenate([x.sum(axis=0).ravel(), np.square(x).sum(axis=0).ravel(), np.array([len(x)],dtype='float64')])
        MPI.COMM_WORLD.Allreduce(addvec, totalvec, op=MPI.SUM)
        self.incfiltparams(totalvec[0:n].reshape(self.shape), totalvec[n:2*n].reshape(self.shape), totalvec[2*n]) 
开发者ID:Hwhitetooth,项目名称:lirpg,代码行数:9,代码来源:mpi_running_mean_std.py

示例11: update

# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import SUM [as 别名]
def update(self, localg, stepsize):
        if self.t % 100 == 0:
            self.check_synced()
        localg = localg.astype('float32')
        globalg = np.zeros_like(localg)
        self.comm.Allreduce(localg, globalg, op=MPI.SUM)
        if self.scale_grad_by_procs:
            globalg /= self.comm.Get_size()

        self.t += 1
        a = stepsize * np.sqrt(1 - self.beta2**self.t)/(1 - self.beta1**self.t)
        self.m = self.beta1 * self.m + (1 - self.beta1) * globalg
        self.v = self.beta2 * self.v + (1 - self.beta2) * (globalg * globalg)
        step = (- a) * self.m / (np.sqrt(self.v) + self.epsilon)
        self.setfromflat(self.getflat() + step) 
开发者ID:Hwhitetooth,项目名称:lirpg,代码行数:17,代码来源:mpi_adam.py

示例12: safeAllreduceInPlace

# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import SUM [as 别名]
def safeAllreduceInPlace(comm, in_array):
    shape = in_array.shape
    length = len(shape)
    # just use 16 for blocksize, size of complex(double)
    chunk_size = get_max_blocksize_from_mem(list(shape),16.,MEM_SIZE,priority_list=numpy.arange(length)[::-1])
    task_list = generate_task_list(chunk_size,shape)
    for block in task_list:
        which_slice = [slice(*x) for x in block]
        tmp = in_array[tuple(which_slice)].copy()
        comm.Allreduce(MPI.IN_PLACE, tmp, op=MPI.SUM)
        in_array[tuple(which_slice)] = tmp 
开发者ID:pyscf,项目名称:pyscf,代码行数:13,代码来源:mpi_helper.py

示例13: reduce

# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import SUM [as 别名]
def reduce(sendbuf, op=MPI.SUM, root=0):
    sendbuf = numpy.asarray(sendbuf, order='C')
    shape, mpi_dtype = comm.bcast((sendbuf.shape, sendbuf.dtype.char))
    _assert(sendbuf.shape == shape and sendbuf.dtype.char == mpi_dtype)

    recvbuf = numpy.zeros_like(sendbuf)
    comm.Reduce(sendbuf, recvbuf, op, root)
    if rank == root:
        return recvbuf
    else:
        return sendbuf 
开发者ID:pyscf,项目名称:pyscf,代码行数:13,代码来源:mpi.py


注:本文中的mpi4py.MPI.SUM属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。