当前位置: 首页>>代码示例>>Python>>正文


Python numpy.asscalar函数代码示例

本文整理汇总了Python中numpy.asscalar函数的典型用法代码示例。如果您正苦于以下问题:Python asscalar函数的具体用法?Python asscalar怎么用?Python asscalar使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了asscalar函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _node_to_dict

    def _node_to_dict(self, node):
        '''
        This method help master to save MasterNode in JSON format.

        Parameter(s):
            node: MasterNode - Root node of tree that will change to dict type.
        Reutrn(s):
            result: dict - Dict type of tree.
        '''
        if node == None:
            return None

        result = {}

        if node.prop == None:
            result['prop'] = None
        else:
            result['prop'] = list(node.prop)

        if node.theta == None:
            result['theta'] = None
            result['tau'] = None
        else:
            result['theta'] = np.asscalar(node.theta)
            result['tau'] = np.asscalar(node.tau)

        result['left'] = self._node_to_dict(node.left)
        result['right'] = self._node_to_dict(node.right)

        return result
开发者ID:wasit7,项目名称:ImageSearch,代码行数:30,代码来源:master.py

示例2: build_seq_block

def build_seq_block(sub_num, stims, sub_A_sd, sub_B_sd, block_size):
    # block stimulus list and shuffle within each block
    q = len(stims.index)
    stims = [stims.iloc[:q//2,], stims.iloc[q//2:,]]
    stims = [x.reindex(np.random.permutation(x.index)) for x in stims]
    shuffle(stims)
    stims = [[x.iloc[k:(k+block_size),] for k in range(0, q//2, block_size)] for x in stims]
    stims = pd.concat([val for pair in zip(stims[0], stims[1]) for val in pair])

    # inter-stimulus interval is randomly selected from [1,2,3,4]
    # the first ISI is removed (so sequence begins with a stim presentation)
    ISI = np.delete(np.repeat(2, len(stims.index), axis=0), 0)

    # create matrix of stimulus predictors and add ISIs
    X = np.diag(stims['effect'])
    X = np.apply_along_axis(func1d=insert_ISI, axis=0, arr=X, ISI=ISI)

    # reorder the columns so they are in the same order (0-39) for everyone
    X = X[:,[list(stims['stim']).index([i]) for i in range(len(stims.index))]]

    # now convolve all predictors with double gamma HRF
    X = np.apply_along_axis(func1d=np.convolve, axis=0, arr=X, v=spm_hrf(1))

    # build and return this subject's dataframe
    df = pd.DataFrame(X)
    df['time'] = range(len(df.index))
    df['sub_num'] = sub_num
    # df['sub_intercept'] = np.asscalar(np.random.normal(size=1))
    df['sub_A'] = np.asscalar(np.random.normal(size=1, scale=sub_A_sd))
    df['sub_B'] = np.asscalar(np.random.normal(size=1, scale=sub_B_sd))
    return df
开发者ID:tyarkoni,项目名称:nipymc,代码行数:31,代码来源:xsim.py

示例3: launch_configuration

    def launch_configuration(self, part):
        if self._is_direct:
            max_smem = self._max_shared_memory_needed_per_set_element
            smem_offset = max_smem * _WARPSIZE
            max_block = _device.get_attribute(driver.device_attribute.MAX_BLOCK_DIM_X)
            if max_smem == 0:
                block_size = max_block
            else:
                threads_per_sm = _AVAILABLE_SHARED_MEMORY / max_smem
                block_size = min(max_block, (threads_per_sm / _WARPSIZE) * _WARPSIZE)
            max_grid = _device.get_attribute(driver.device_attribute.MAX_GRID_DIM_X)
            grid_size = min(max_grid, (block_size + part.size) / block_size)

            grid_size = np.asscalar(np.int64(grid_size))
            block_size = (block_size, 1, 1)
            grid_size = (grid_size, 1, 1)

            required_smem = np.asscalar(max_smem * np.prod(block_size))
            return {'op2stride': self._it_space.size,
                    'smem_offset': smem_offset,
                    'WARPSIZE': _WARPSIZE,
                    'required_smem': required_smem,
                    'block_size': block_size,
                    'grid_size': grid_size}
        else:
            return {'op2stride': self._it_space.size,
                    'WARPSIZE': 32}
开发者ID:jabooth,项目名称:PyOP2,代码行数:27,代码来源:cuda.py

示例4: evaluate

    def evaluate(self, state_batch):

        # Get an action batch
        actions = self.sess.run(self.action_output, feed_dict={self.map_input: state_batch})

        # Create summaries for the actions
        actions_mean = np.mean(np.asarray(actions, dtype=float), axis=0)
        self.actions_mean_plot += actions_mean

        # Only save files every PLOT_STEP steps
        if self.train_counter % PLOT_STEP == 0:

            self.actions_mean_plot /= PLOT_STEP

            summary_action_0 = tf.Summary(value=[tf.Summary.Value(tag='actions_mean[0]',
                                                                  simple_value=np.asscalar(
                                                                      self.actions_mean_plot[0]))])
            summary_action_1 = tf.Summary(value=[tf.Summary.Value(tag='actions_mean[1]',
                                                                  simple_value=np.asscalar(
                                                                      self.actions_mean_plot[1]))])
            self.summary_writer.add_summary(summary_action_0, self.train_counter)
            self.summary_writer.add_summary(summary_action_1, self.train_counter)

            self.actions_mean_plot = [0, 0]

        return actions
开发者ID:JakobBreuninger,项目名称:neurobotics,代码行数:26,代码来源:actor.py

示例5: test_exclude_targets_combinations_subjectchunks

def test_exclude_targets_combinations_subjectchunks():
    partitioner = ChainNode([NFoldPartitioner(attr='subjects'),
                             ExcludeTargetsCombinationsPartitioner(
                                 k=1,
                                 targets_attr='chunks',
                                 space='partitions')],
                            space='partitions')
    # targets do not need even to be defined!
    ds = Dataset(np.arange(18).reshape(9, 2),
                 sa={'chunks': np.arange(9) // 3,
                     'subjects': np.arange(9) % 3})
    dss = list(partitioner.generate(ds))
    assert_equal(len(dss), 9)

    testing_subjs, testing_chunks = [], []
    for ds_ in dss:
        testing_partition = ds_.sa.partitions == 2
        training_partition = ds_.sa.partitions == 1
        # must be scalars -- so implicit test here
        # if not -- would be error
        testing_subj = np.asscalar(np.unique(ds_.sa.subjects[testing_partition]))
        testing_subjs.append(testing_subj)
        testing_chunk = np.asscalar(np.unique(ds_.sa.chunks[testing_partition]))
        testing_chunks.append(testing_chunk)
        # and those must not appear for training
        ok_(not testing_subj in ds_.sa.subjects[training_partition])
        ok_(not testing_chunk in ds_.sa.chunks[training_partition])
    # and we should have gone through all chunks/subjs pairs
    testing_pairs = set(zip(testing_subjs, testing_chunks))
    assert_equal(len(testing_pairs), 9)
    # yoh: equivalent to set(itertools.product(range(3), range(3))))
    #      but .product is N/A for python2.5
    assert_equal(testing_pairs, set(zip(*np.where(np.ones((3,3))))))
开发者ID:Soletmons,项目名称:PyMVPA,代码行数:33,代码来源:test_generators.py

示例6: find_tip_coordination

def find_tip_coordination(a, bondlength=2.6, bulk_nn=4):
    """
    Find position of tip in crack cluster from coordination
    """
    i, j = neighbour_list("ij", a, bondlength)
    nn = np.bincount(i, minlength=len(a))

    a.set_array('n_neighb', nn)
    g = a.get_array('groups')

    y = a.positions[:, 1]
    above = (nn < bulk_nn) & (g != 0) & (y > a.cell[1,1]/2.0)
    below = (nn < bulk_nn) & (g != 0) & (y < a.cell[1,1]/2.0)

    a.set_array('above', above)
    a.set_array('below', below)

    bond1 = np.asscalar(above.nonzero()[0][a.positions[above, 0].argmax()])
    bond2 = np.asscalar(below.nonzero()[0][a.positions[below, 0].argmax()])

    # These need to be ints, otherwise they are no JSON serializable.
    a.info['bond1'] = bond1
    a.info['bond2'] = bond2

    return bond1, bond2
开发者ID:libAtoms,项目名称:matscipy,代码行数:25,代码来源:crack.py

示例7: lpc_formants

def lpc_formants(signal, sr, num_formants, max_freq, time_step,
                 win_len, window_shape='gaussian'):
    output = {}
    new_sr = 2 * max_freq
    alpha = np.exp(-2 * np.pi * 50 * (1 / new_sr))
    proc = lfilter([1., -alpha], 1, signal)
    if sr > new_sr:
        proc = librosa.resample(proc, sr, new_sr)
    nperseg = int(win_len * new_sr)
    nperstep = int(time_step * new_sr)
    if window_shape == 'gaussian':
        window = gaussian(nperseg + 2, 0.45 * (nperseg - 1) / 2)[1:nperseg + 1]
    else:
        window = np.hanning(nperseg + 2)[1:nperseg + 1]
    indices = np.arange(int(nperseg / 2), proc.shape[0] - int(nperseg / 2) + 1, nperstep)
    num_frames = len(indices)
    for i in range(num_frames):
        if nperseg % 2 != 0:
            X = proc[indices[i] - int(nperseg / 2):indices[i] + int(nperseg / 2) + 1]
        else:
            X = proc[indices[i] - int(nperseg / 2):indices[i] + int(nperseg / 2)]
        frqs, bw = process_frame(X, window, num_formants, new_sr)
        formants = []
        for j, f in enumerate(frqs):
            if f < 50:
                continue
            if f > max_freq - 50:
                continue
            formants.append((np.asscalar(f), np.asscalar(bw[j])))
        missing = num_formants - len(formants)
        if missing:
            formants += [(None, None)] * missing
        output[indices[i] / new_sr] = formants
    return output
开发者ID:mmcauliffe,项目名称:python-acoustic-similarity,代码行数:34,代码来源:lpc.py

示例8: rformat

def rformat(item, precision=2, pretty=True):
    #NOTE: LOOK AT pprint
    '''
    Apply numerical formatting recursively for arbitrarily nested iterators, 
    optionally applying a conversion function on each item.
    '''
    if isinstance(item, str):
        return item
    
    if isinstance(item, (int, float)):
        return minfloatformat(item, precision)
        
    try:                #array-like items with len(item) in [0,1]
        #NOTE: This will suppress the type representation of the object str
        if isinstance(np.asscalar(item), str):
            #np.asscalar converts np types to python builtin types (Phew!!)
            return str(item)
            
        if isinstance(np.asscalar(item), (int, float)):
            return minfloatformat(item, precision)
    except:
        #Item is not str, int, float, or convertible to such...
        pass
    
    if isinstance(item, np.ndarray):
        return np.array2string(item, precision=precision)
        #NOTE:  lots more functionality here
        
    return pformat(item)
开发者ID:apodemus,项目名称:recipes,代码行数:29,代码来源:string.py

示例9: __init__

    def __init__(self, train_plans, purchased_plan):
        classes, indices, y = np.unique(purchased_plan.values, return_index=True, return_inverse=True)
        lov_classes, lov_indices, y_lov = np.unique(train_plans.values, return_index=True, return_inverse=True)
        old_to_new_purchased = dict()
        old_to_new_lov = dict()
        for k in range(len(classes)):
            # create inverse mapping that returns new class label given the old class label
            old_to_new_purchased[str(np.asscalar(purchased_plan.values[indices[k]]))] = k
        for k in range(len(lov_classes)):
            old_to_new_lov[str(np.asscalar(train_plans.values[lov_indices[k]]))] = k
        self.old_to_new = old_to_new_purchased
        self.old_to_new_lov = old_to_new_lov
        self.nclasses_purchased = len(classes)
        self.nclasses_lov = len(np.unique(train_plans.values))
        self.classes = classes
        self.classes_lov = lov_classes
        self.priors = np.zeros((self.nclasses_purchased, self.nclasses_lov))
        new_id = pd.Series(data=y, index=purchased_plan.index)
        for j in xrange(self.nclasses_lov):
            class_counts = np.bincount(new_id.ix[train_plans[train_plans == lov_classes[j]].index],
                                       minlength=len(classes))
            # priors[k, j] is fraction in class k (new label) with last observed value as class j (new label)
            if np.sum(class_counts) > 0:
                self.priors[:, j] = class_counts / float(np.sum(class_counts))

        prior_norm = self.priors.sum(axis=0)
        prior_norm[prior_norm == 0] = 1.0  # don't divide by zero
        self.priors /= prior_norm  # normalize so probabilities sum to one
开发者ID:brandonckelly,项目名称:allstate,代码行数:28,代码来源:boost_truncated_history2.py

示例10: getLowerLimbAngles

 def getLowerLimbAngles(self, tf, side):
     """
     Defines the joint angles of the human legs, starting from the position
     of the tf generated accordin to the data coming from the kinect
     
     @param tf tf
     @param 'L' for left lower limb, 'R' for right lower limb
     """
     if side == 'L':        
         self.last_updated, sys_hip = utils.getSkeletonTransformation(self.id, tf, 'left_hip', self.kin_frame, self.last_updated)
         self.last_updated, sys_knee = utils.getSkeletonTransformation(self.id, tf, 'left_knee', self.kin_frame, self.last_updated)
         self.last_updated, sys_foot = utils.getSkeletonTransformation(self.id, tf, 'left_foot', self.kin_frame, self.last_updated)
     else:            
         self.last_updated, sys_hip = utils.getSkeletonTransformation(self.id, tf, 'right_hip', self.kin_frame, self.last_updated)
         self.last_updated, sys_knee = utils.getSkeletonTransformation(self.id, tf, 'right_knee', self.kin_frame, self.last_updated)
         self.last_updated, sys_foot = utils.getSkeletonTransformation(self.id, tf, 'right_foot', self.kin_frame, self.last_updated)
         
     if sys_hip is None or sys_knee is None or sys_foot is None:
         return None
     
     vect_kh = (sys_hip[0:3,3] - sys_knee[0:3,3])/  \
               numpy.linalg.norm([sys_hip[0:3,3] - sys_knee[0:3,3]])
     vect_fk = (sys_knee[0:3,3] - sys_foot[0:3,3])/ \
               numpy.linalg.norm([sys_knee[0:3,3] - sys_foot[0:3,3]])
     q2 = - numpy.arccos(utils.checkArg(numpy.asscalar(numpy.dot(vect_kh.T,vect_fk))))
     
     q1 = numpy.asscalar(numpy.arccos(vect_kh[1]))                                       #[0,pi]
     if numpy.asscalar(numpy.arcsin(vect_kh[2])) < 0:                                    #[-pi,pi]
         q1 = -q1 
         
     return [q1, q2]
开发者ID:personalrobotics,项目名称:humanpy,代码行数:31,代码来源:humantracking_kinect1.py

示例11: stations_json

def stations_json():

    stations = np.recfromcsv('chi-stations.csv', delimiter=',')

    output = {'type': "FeatureCollection", 'features':[]}

    for s in stations:

        output['features'].append({
            'type': "Feature",
            "id": np.asscalar(s[0]),
            "geometry": {
                "type":"Point",
                "coordinates":[np.asscalar(s[2]),np.asscalar(s[1])] #long, lat
            },
            "geometry_name": "origin_geom",
            "properties": {
                'name': s[3]
            }})

    f = io.open('chi-stations.json', 'w', encoding='utf-8') 
    f.write(unicode(json.dumps(output, ensure_ascii=False)))
    f.close()

    json_output=open('chi-stations.json')
    output_data = json.load(json_output)
    pprint(output_data)
    json_output.close()
开发者ID:inachen,项目名称:CS171-Final-Project,代码行数:28,代码来源:dataclean.py

示例12: noisy_alignment_similarity_transform

def noisy_alignment_similarity_transform(source, target, noise_type='uniform',
                                         noise_percentage=0.1,
                                         allow_alignment_rotation=False):
    r"""
    Constructs and perturbs the optimal similarity transform between the source
    and target shapes by adding noise to its parameters.

    Parameters
    ----------
    source : `menpo.shape.PointCloud`
        The source pointcloud instance used in the alignment
    target : `menpo.shape.PointCloud`
        The target pointcloud instance used in the alignment
    noise_type : ``{'uniform', 'gaussian'}``, optional
        The type of noise to be added.
    noise_percentage : `float` in ``(0, 1)`` or `list` of `len` `3`, optional
        The standard percentage of noise to be added. If `float`, then the same
        amount of noise is applied to the scale, rotation and translation
        parameters of the optimal similarity transform. If `list` of
        `float` it must have length 3, where the first, second and third elements
        denote the amount of noise to be applied to the scale, rotation and
        translation parameters, respectively.
    allow_alignment_rotation : `bool`, optional
        If ``False``, then the rotation is not considered when computing the
        optimal similarity transform between source and target.

    Returns
    -------
    noisy_alignment_similarity_transform : `menpo.transform.Similarity`
        The noisy Similarity Transform between source and target.
    """
    if isinstance(noise_percentage, float):
        noise_percentage = [noise_percentage] * 3
    elif len(noise_percentage) == 1:
        noise_percentage *= 3

    similarity = AlignmentSimilarity(source, target,
                                     rotation=allow_alignment_rotation)

    if noise_type is 'gaussian':
        s = noise_percentage[0] * (0.5 / 3) * np.asscalar(np.random.randn(1))
        r = noise_percentage[1] * (180 / 3) * np.asscalar(np.random.randn(1))
        t = noise_percentage[2] * (target.range() / 3) * np.random.randn(2)

        s = scale_about_centre(target, 1 + s)
        r = rotate_ccw_about_centre(target, r)
        t = Translation(t, source.n_dims)
    elif noise_type is 'uniform':
        s = noise_percentage[0] * 0.5 * (2 * np.asscalar(np.random.randn(1)) - 1)
        r = noise_percentage[1] * 180 * (2 * np.asscalar(np.random.rand(1)) - 1)
        t = noise_percentage[2] * target.range() * (2 * np.random.rand(2) - 1)

        s = scale_about_centre(target, 1. + s)
        r = rotate_ccw_about_centre(target, r)
        t = Translation(t, source.n_dims)
    else:
        raise ValueError('Unexpected noise type. '
                         'Supported values are {gaussian, uniform}')

    return similarity.compose_after(t.compose_after(s.compose_after(r)))
开发者ID:geshiming,项目名称:menpofit,代码行数:60,代码来源:fitter.py

示例13: run_epoch

def run_epoch(session, m, mode):

    total_cost = 0.0
    num_samples_seen= 0
    total_num_correct_predictions= 0

    if mode == 'training':
        if flags.first_training_epoch:
            flags.first_training_epoch= False

        num_correct_predictions,num_samples, _ = session.run([m.num_correct_predictions,m.num_samples, m.train_op])

        avg_accuracy = num_correct_predictions/num_samples
        print("Traversed through %d samples." %num_samples_seen)
        return np.asscalar(avg_accuracy)

    else:
        if flags.first_validation_epoch or flags.testing_epoch:
            flags.first_validation_epoch= False
            flags.testing_epoch= False

        cost, num_correct_predictions,num_samples = session.run([m.cost ,m.num_correct_predictions,m.num_samples])

        accuracy= num_correct_predictions/num_samples
        print("total cost is %.4f" %total_cost)
        return np.asscalar(accuracy)
开发者ID:AaronZhouQian,项目名称:lstm_tensorflow_imdb,代码行数:26,代码来源:lstm_tf_imdb3.py

示例14: get_statistics

  def get_statistics(self, attribute=0):
    attribute = self._storage["attribute/%s" % attribute]

    if "min" not in attribute.attrs or "max" not in attribute.attrs:
      attribute_min = None
      attribute_max = None

      chunk_size = 1000
      for begin in numpy.arange(0, len(attribute), chunk_size):
        slice = attribute[begin : begin + chunk_size]
        if attribute.dtype.char in ["O", "S", "U"]:
          data_min = min(slice)
          data_max = max(slice)
          attribute_min = str(data_min) if attribute_min is None else str(min(data_min, attribute_min))
          attribute_max = str(data_max) if attribute_max is None else str(max(data_max, attribute_max))
        else:
          slice = slice[numpy.invert(numpy.isnan(slice))]
          if len(slice):
            data_min = numpy.asscalar(slice.min())
            data_max = numpy.asscalar(slice.max())
            attribute_min = data_min if attribute_min is None else min(data_min, attribute_min)
            attribute_max = data_max if attribute_max is None else max(data_max, attribute_max)

      if attribute_min is not None:
        attribute.attrs["min"] = attribute_min
      if attribute_max is not None:
        attribute.attrs["max"] = attribute_max

    return dict(min=attribute.attrs.get("min", None), max=attribute.attrs.get("max", None))
开发者ID:gitter-badger,项目名称:slycat,代码行数:29,代码来源:hdf5.py

示例15: __init__

    def __init__(self, obj):
        self.obj = obj
        parameters = []
        names = []
        ties = {}

        def add_par(p, name):
            if not isinstance(p, Parameter):
                p = Parameter(p,p)
            for par_check in parameters + [None]:
                if p is par_check:
                    break
            if par_check is not None:
                # if the above loop encountered a break, it
                # means the parameter is tied

                # we will rename the parameter so that when it is printed it
                # better reflects how it is used
                new_name = tied_name(names[parameters.index(p)], name)
                names[parameters.index(p)] = new_name

                if new_name in ties:
                    # if there is already an existing tie group we need to
                    # do a few things to get the name right
                    group = ties[new_name]

                else:
                    group = [name]

                group.append(name)
                ties[new_name] = group

            else:
                if not p.fixed:
                    parameters.append(p)
                    names.append(name)

        # find all the Parameter's in the obj
        for name, par in sorted(iter(obj.parameters.items()), key=lambda x: x[0]):
            if isinstance(par, ComplexParameter):
                add_par(par.real, name+'.real')
                add_par(par.imag, name+'.imag')
            elif isinstance(par, dict):
                for key, val in par.items():
                    add_par(val, name + '_' + key)
            elif isinstance(par, xr.DataArray):
                if len(par.dims)==1:
                    dimname = par.dims[0]
                else:
                    raise ParameterSpecificationError('Multi-dimensional parameters are not supported')
                for key in par[dimname]:
                    add_par(np.asscalar(par.sel(**{dimname:key})),name+'_'+np.asscalar(key))
            elif isinstance(par, Parameter):
                add_par(par, name)

        parameters = deepcopy(parameters)
        for i, name in enumerate(names):
            parameters[i].name = name
        self.parameters = parameters
        self.ties = ties
开发者ID:barkls,项目名称:holopy,代码行数:60,代码来源:model.py


注:本文中的numpy.asscalar函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。