当前位置: 首页>>代码示例>>Python>>正文


Python recfunctions.drop_fields方法代码示例

本文整理汇总了Python中numpy.lib.recfunctions.drop_fields方法的典型用法代码示例。如果您正苦于以下问题:Python recfunctions.drop_fields方法的具体用法?Python recfunctions.drop_fields怎么用?Python recfunctions.drop_fields使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在numpy.lib.recfunctions的用法示例。


在下文中一共展示了recfunctions.drop_fields方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: remove_cols

# 需要导入模块: from numpy.lib import recfunctions [as 别名]
# 或者: from numpy.lib.recfunctions import drop_fields [as 别名]
def remove_cols(M, col_names):
    """Remove columns specified by col_names from structured array

    Parameters
    ----------
    M : numpy.ndarray
        structured array
    col_names : list of str
        names for columns to remove

    Returns
    -------
    numpy.ndarray
        structured array without columns
    """
    M, col_names = check_consistent(M, col_names=col_names)
    return nprf.drop_fields(M, col_names, usemask=False) 
开发者ID:dssg,项目名称:diogenes,代码行数:19,代码来源:utils.py

示例2: pop_columns

# 需要导入模块: from numpy.lib import recfunctions [as 别名]
# 或者: from numpy.lib.recfunctions import drop_fields [as 别名]
def pop_columns(self, names):
        """
        Pop several columns from the table

        Parameters
        ----------

        names: sequence
            A list containing the names of the columns to remove

        Returns
        -------

        values: tuple
            list of columns
        """

        if not hasattr(names, '__iter__') or type(names) in basestring:
            names = [names]

        p = [self[k] for k in names]

        _names = set([ self.resolve_alias(k) for k in names ])
        self.data = recfunctions.drop_fields(self.data, _names)
        for k in names:
            self._aliases.pop(k, None)
            self._units.pop(k, None)
            self._desc.pop(k, None)

        return p 
开发者ID:annayqho,项目名称:TheCannon,代码行数:32,代码来源:simpletable.py

示例3: remove_from_rec

# 需要导入模块: from numpy.lib import recfunctions [as 别名]
# 或者: from numpy.lib.recfunctions import drop_fields [as 别名]
def remove_from_rec(rec_array, name):
    return _drop_fields(rec_array, name, usemask=False, asrecarray=True) 
开发者ID:jungmannlab,项目名称:picasso,代码行数:4,代码来源:lib.py

示例4: drop_fields

# 需要导入模块: from numpy.lib import recfunctions [as 别名]
# 或者: from numpy.lib.recfunctions import drop_fields [as 别名]
def drop_fields(arr, *args, **kwargs):
    """Drop fields from numpy structured array
    Gives error if fields don't exist
    """
    return recfunctions.drop_fields(arr, usemask=False, *args, **kwargs) 
开发者ID:XENON1T,项目名称:pax,代码行数:7,代码来源:recarray_tools.py

示例5: drop_fields_if_exist

# 需要导入模块: from numpy.lib import recfunctions [as 别名]
# 或者: from numpy.lib.recfunctions import drop_fields [as 别名]
def drop_fields_if_exist(arr, fields):
    return drop_fields(arr, [f for f in fields if f in arr.dtype.names]) 
开发者ID:XENON1T,项目名称:pax,代码行数:4,代码来源:recarray_tools.py

示例6: fields_view

# 需要导入模块: from numpy.lib import recfunctions [as 别名]
# 或者: from numpy.lib.recfunctions import drop_fields [as 别名]
def fields_view(arr, fields):
    """View one or several columns from a numpy record array"""
    # Single field is easy:
    if isinstance(fields, str):
        return arr[fields]
    for f in fields:
        if f not in arr.dtype.names:
            raise ValueError("Field %s is not in the array..." % f)
    # Don't know how to do it for multiple fields, make a copy for now... (probably?)
    return drop_fields(arr, [f for f in arr.dtype.names if f not in fields])
    # The solution in
    # http://stackoverflow.com/questions/15182381/how-to-return-a-view-of-several-columns-in-numpy-structured-array
    # doesn't work in combination with filter_on_fields...
    # dtype2 = np.dtype({name:arr.dtype.fields[name] for name in columns})
    # return np.ndarray(arr.shape, dtype2, arr, 0, arr.strides) 
开发者ID:XENON1T,项目名称:pax,代码行数:17,代码来源:recarray_tools.py

示例7: indexToZero

# 需要导入模块: from numpy.lib import recfunctions [as 别名]
# 或者: from numpy.lib.recfunctions import drop_fields [as 别名]
def indexToZero(f, path, col, name="picopore.{}_index", dataColumn=None):
    dataset = f[path]
    name = name.format(col)
    data = f[path].value
    if not name in dataset.attrs.keys():
        dataColumn = data[col] if dataColumn is None else dataColumn
        start_index = min(dataColumn)
        dataset.attrs.create(name, start_index, dtype=getDtype(start_index))
        dataColumn = dataColumn-start_index
        data = drop_fields(data, [col])
        data = append_fields(data, [col], [dataColumn], [getDtype(dataColumn)])
    return data 
开发者ID:scottgigante,项目名称:picopore,代码行数:14,代码来源:compress.py

示例8: deepLosslessCompress

# 需要导入模块: from numpy.lib import recfunctions [as 别名]
# 或者: from numpy.lib.recfunctions import drop_fields [as 别名]
def deepLosslessCompress(f, group):
    paths = findDatasets(f, group, "Events")
    paths = [path for path in paths if "Basecall" in path]
    # index event detection
    if "UniqueGlobalKey/channel_id" in f:
        sampleRate = f["UniqueGlobalKey/channel_id"].attrs["sampling_rate"]
        for path in paths:
            if f[path].parent.parent.attrs.__contains__("event_detection"):
                # index back to event detection
                dataset = f[path].value
                start = np.array([int(round(sampleRate * i)) for i in dataset["start"]])
                dataset = indexToZero(f, path, "start", dataColumn=start)
                move = dataset["move"] # rewrite move dataset because it's int64 for max 2
                # otherwise, event by event
                dataset = drop_fields(dataset, ["mean", "stdv", "length", "move"])
                dataset = append_fields(dataset, ["move"], [move], [getDtype(move)])
                rewriteDataset(f, path, compression="gzip", compression_opts=9, dataset=dataset)
                # rewrite eventdetection too - start is also way too big here
                eventDetectionPath = findDatasets(f, "all", entry_point=f[path].parent.parent.attrs.get("event_detection"))[0]
                if "picopore.start_index" not in f[eventDetectionPath].attrs.keys():
                    eventData = indexToZero(f, eventDetectionPath, "start")
                    rewriteDataset(f, eventDetectionPath, compression="gzip", compression_opts=9, dataset=eventData)

    if __basegroup_name__ not in f:
        f.create_group(__basegroup_name__)
        for name, group in f.items():
            if name != __basegroup_name__:
                recursiveCollapseGroups(f, __basegroup_name__, name, group)
    return losslessCompress(f, group) 
开发者ID:scottgigante,项目名称:picopore,代码行数:31,代码来源:compress.py

示例9: __init__

# 需要导入模块: from numpy.lib import recfunctions [as 别名]
# 或者: from numpy.lib.recfunctions import drop_fields [as 别名]
def __init__(self, filename, date_sep='-', time_sep=':', format='stroke_DC3'):
        """ Load NLDN data from a file, into a numpy named array stored in the
            *data* attribute. *data*['time'] is relative to the *basedate* datetime
            attribute
            """
        self.format=format
        
        dtype_specs = getattr(self, format)
        
        
        nldn_initial = np.genfromtxt(filename, dtype=dtype_specs['columns'])
        date_part = np.genfromtxt(nldn_initial['date'],
                        delimiter=date_sep, dtype=dtype_specs['date_dtype'])
        time_part = np.genfromtxt(nldn_initial['time'],
                        delimiter=time_sep, dtype=dtype_specs['time_dtype'])
        dates = [datetime(a['year'], a['month'], a['day'], b['hour'], b['minute']) 
                    for a, b in zip(date_part, time_part)]
        min_date = min(dates)
        min_date = datetime(min_date.year, min_date.month, min_date.day)
        t = np.fromiter( ((d-min_date).total_seconds() for d in dates), dtype='float64')
        t += time_part['second']
        
        self.basedate = min_date
        data = drop_fields(nldn_initial, ('date', 'time'))
        data = append_fields(data, 'time', t)
        
        self.data = data 
开发者ID:deeplycloudy,项目名称:lmatools,代码行数:29,代码来源:NLDN.py

示例10: classify_line

# 需要导入模块: from numpy.lib import recfunctions [as 别名]
# 或者: from numpy.lib.recfunctions import drop_fields [as 别名]
def classify_line(filename, classifier):
    """ Use `classifier` to classify data stored in `filename`

    Args:
        filename (str): filename of stored results
        classifier (sklearn classifier): pre-trained classifier

    """
    z = np.load(filename)
    rec = z['record']

    if rec.shape[0] == 0:
        logger.debug('No records in {f}. Continuing'.format(f=filename))
        return

    # Rescale intercept term
    coef = rec['coef'].copy()  # copy so we don't transform npz coef
    coef[:, 0, :] = (coef[:, 0, :] + coef[:, 1, :] *
                     ((rec['start'] + rec['end']) / 2.0)[:, np.newaxis])

    # Include RMSE for full X matrix
    newdim = (coef.shape[0], coef.shape[1] * coef.shape[2])
    X = np.hstack((coef.reshape(newdim), rec['rmse']))

    # Create output and classify
    classes = classifier.classes_
    classified = np.zeros(rec.shape[0], dtype=[
        ('class', 'u2'),
        ('class_proba', 'float32', classes.size)
    ])
    classified['class'] = classifier.predict(X)
    classified['class_proba'] = classifier.predict_proba(X)

    # Replace with new classification if exists, or add by merging
    if ('class' in rec.dtype.names and 'class_proba' in rec.dtype.names and
            rec['class_proba'].shape[1] == classes.size):
        rec['class'] = classified['class']
        rec['class_proba'] = classified['class_proba']
    else:
        # Drop incompatible classified results if needed
        # e.g., if the number of classes changed
        if 'class' in rec.dtype.names and 'class_proba' in rec.dtype.names:
            rec = nprfn.drop_fields(rec, ['class', 'class_proba'])
        rec = nprfn.merge_arrays((rec, classified), flatten=True)

    # Create dict for re-saving `npz` file (only way to append)
    out = {}
    for k, v in six.iteritems(z):
        out[k] = v
    out['classes'] = classes
    out['record'] = rec

    np.savez(filename, **out) 
开发者ID:ceholden,项目名称:yatsm,代码行数:55,代码来源:classify.py

示例11: deepLosslessDecompress

# 需要导入模块: from numpy.lib import recfunctions [as 别名]
# 或者: from numpy.lib.recfunctions import drop_fields [as 别名]
def deepLosslessDecompress(f, group):
    # rebuild group hierarchy
    if __basegroup_name__ in f.keys():
        uncollapseGroups(f, f[__basegroup_name__])
    paths = findDatasets(f, group)
    paths = [path for path in paths if "Basecall" in path]
    sampleRate = f["UniqueGlobalKey/channel_id"].attrs["sampling_rate"]
    for path in paths:
        if f[path].parent.parent.attrs.__contains__("event_detection"):
            # index back to event detection
            dataset = f[path].value
            if "mean" not in dataset.dtype.names:
                eventDetectionPath = findDatasets(f, "all", entry_point=f[path].parent.parent.attrs.get("event_detection"))[0]
                eventData = f[eventDetectionPath].value
                try:
                    start = eventData["start"] + f[eventDetectionPath].attrs["picopore.start_index"]
                    del f[eventDetectionPath].attrs["picopore.start_index"]
                    eventData = drop_fields(eventData, ["start"])
                    eventData = append_fields(eventData, ["start"], [start], [getDtype(start)])
                    rewriteDataset(f, eventDetectionPath, compression="gzip", compression_opts=1, dataset=eventData)
                except KeyError:
                    # must have been compressed without start indexing
                    pass
                try:
                    start_index = f[path].attrs["picopore.start_index"]
                    del f[path].attrs["picopore.start_index"]
                except KeyError:
                    # must have been compressed without start indexing
                    start_index=0
                start = dataset["start"][0] + start_index
                end = dataset["start"][-1] + start_index
                # constrain to range in basecall
                eventData = eventData[np.logical_and(eventData["start"] >= start, eventData["start"] <= end)]
                # remove missing events
                i=0
                keepIndex = []
                for time in dataset["start"]:
                    while eventData["start"][i] != time + start_index and i < eventData.shape[0]:
                        i += 1
                    keepIndex.append(i)
                eventData = eventData[keepIndex]
                dataset = drop_fields(dataset, "start")
                start = [i/sampleRate for i in eventData["start"]]
                length = [i/sampleRate for i in eventData["length"]]
                dataset = append_fields(dataset, ["mean", "start", "stdv", "length"], [eventData["mean"], start, eventData["stdv"], length])
                rewriteDataset(f, path, dataset=dataset)
    return losslessDecompress(f, group) 
开发者ID:scottgigante,项目名称:picopore,代码行数:49,代码来源:compress.py


注:本文中的numpy.lib.recfunctions.drop_fields方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。