本文整理汇总了Python中glue.core.data.Data.add_component方法的典型用法代码示例。如果您正苦于以下问题:Python Data.add_component方法的具体用法?Python Data.add_component怎么用?Python Data.add_component使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类glue.core.data.Data
的用法示例。
在下文中一共展示了Data.add_component方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_histogram_data
# 需要导入模块: from glue.core.data import Data [as 别名]
# 或者: from glue.core.data.Data import add_component [as 别名]
def test_histogram_data():
data = Data(label="Test Data")
comp_a = Component(np.random.uniform(size=500))
comp_b = Component(np.random.normal(size=500))
data.add_component(comp_a, 'uniform')
data.add_component(comp_b, 'normal')
return data
示例2: test_image
# 需要导入模块: from glue.core.data import Data [as 别名]
# 或者: from glue.core.data.Data import add_component [as 别名]
def test_image():
data = Data(label="Test Image")
comp_a = Component(np.ones((25, 25)))
data.add_component(comp_a, 'test_1')
comp_b = Component(np.zeros((25, 25)))
data.add_component(comp_b, 'test_2')
return data
示例3: npy_npz_reader
# 需要导入模块: from glue.core.data import Data [as 别名]
# 或者: from glue.core.data.Data import add_component [as 别名]
def npy_npz_reader(filename, format='auto', auto_merge=False, **kwargs):
"""
Read in a Numpy structured array saved to a .npy or .npz file.
Parameters
----------
source: str
The pathname to the Numpy save file.
"""
import numpy as np
data = np.load(filename)
if isinstance(data, np.ndarray):
data = {None: data}
groups = []
for groupname in sorted(data):
d = Data(label=groupname)
arr = data[groupname]
if arr.dtype.names is None:
comp = Component.autotyped(arr)
d.add_component(comp, label='array')
else:
for name in arr.dtype.names:
comp = Component.autotyped(arr[name])
d.add_component(comp, label=name)
groups.append(d)
return groups
示例4: astropy_tabular_data
# 需要导入模块: from glue.core.data import Data [as 别名]
# 或者: from glue.core.data.Data import add_component [as 别名]
def astropy_tabular_data(*args, **kwargs):
"""
Build a data set from a table. We restrict ourselves to tables
with 1D columns.
All arguments are passed to
astropy.table.Table.read(...).
"""
result = Data()
table = astropy_table_read(*args, **kwargs)
result.meta = table.meta
# Loop through columns and make component list
for column_name in table.columns:
c = table[column_name]
u = c.unit if hasattr(c, 'unit') else c.units
if table.masked:
# fill array for now
try:
c = c.filled(fill_value=np.nan)
except (ValueError, TypeError): # assigning nan to integer dtype
c = c.filled(fill_value=-1)
nc = Component.autotyped(c, units=u)
result.add_component(nc, column_name)
return result
示例5: panda_process
# 需要导入模块: from glue.core.data import Data [as 别名]
# 或者: from glue.core.data.Data import add_component [as 别名]
def panda_process(indf):
"""
Build a data set from a table using pandas. This attempts to respect
categorical data input by letting pandas.read_csv infer the type
"""
result = Data()
for name, column in indf.iteritems():
if (column.dtype == np.object) | (column.dtype == np.bool):
# try to salvage numerical data
coerced = column.convert_objects(convert_numeric=True)
if (coerced.dtype != column.dtype) and coerced.isnull().mean() < 0.4:
c = Component(coerced.values)
else:
# pandas has a 'special' nan implementation and this doesn't
# play well with np.unique
c = CategoricalComponent(column.fillna(''))
else:
c = Component(column.values)
# convert header to string - in some cases if the first row contains
# numbers, these are cast to numerical types, so we want to change that
# here.
if not isinstance(name, six.string_types):
name = str(name)
# strip off leading #
name = name.strip()
if name.startswith('#'):
name = name[1:].strip()
result.add_component(c, name)
return result
示例6: npz_reader
# 需要导入模块: from glue.core.data import Data [as 别名]
# 或者: from glue.core.data.Data import add_component [as 别名]
def npz_reader(filename, format='auto', auto_merge=False, **kwargs):
"""
Read in a Numpy structured array saved to a .npy or .npz file.
Parameters
----------
source: str
The pathname to the Numpy save file.
"""
import numpy as np
npy_data = np.load(filename)
groups = []
for groupname in sorted(npy_data.files):
d = Data(label=groupname)
arr = npy_data[groupname]
if not hasattr(arr.dtype, 'names'):
raise ValueError("Numpy save file loading currently only supports structured"
" arrays, e.g., with specified names.")
for name in arr.dtype.names:
comp = Component.autotyped(arr[name])
d.add_component(comp, label=name)
groups.append(d)
return groups
示例7: _load_data
# 需要导入模块: from glue.core.data import Data [as 别名]
# 或者: from glue.core.data.Data import add_component [as 别名]
def _load_data(rec, context):
label = rec['label']
result = Data(label=label)
result.coords = context.object(rec['coords'])
# we manually rebuild pixel/world components, so
# we override this function. This is pretty ugly
result._create_pixel_and_world_components = lambda: None
comps = [list(map(context.object, [cid, comp]))
for cid, comp in rec['components']]
comps = sorted(comps,
key=lambda x: isinstance(x[1], (DerivedComponent,
CoordinateComponent)))
for cid, comp in comps:
if isinstance(comp, CoordinateComponent):
comp._data = result
result.add_component(comp, cid)
assert result._world_component_ids == []
coord = [c for c in comps if isinstance(c[1], CoordinateComponent)]
coord = [x[0] for x in sorted(coord, key=lambda x: x[1])]
assert len(coord) == result.ndim * 2
result._world_component_ids = coord[:len(coord) // 2]
result._pixel_component_ids = coord[len(coord) // 2:]
for s in rec['subsets']:
result.add_subset(context.object(s))
return result
示例8: merge
# 需要导入模块: from glue.core.data import Data [as 别名]
# 或者: from glue.core.data.Data import add_component [as 别名]
def merge(self, *data, **kwargs):
"""
Merge two or more datasets into a single dataset.
This has the following effects:
All components from all datasets are added to the first argument
All datasets except the first argument are removed from the collection
Any component name conflicts are disambiguated
The pixel and world components apart from the first argument are discarded
:note: All arguments must have the same shape
:param data: One or more :class:`~glue.core.data.Data` instances.
:returns: self
"""
if len(data) < 2:
raise ValueError("merge requires 2 or more arguments")
shp = data[0].shape
for d in data:
if d.shape != shp:
raise ValueError("All arguments must have the same shape")
label = kwargs.get('label', data[0].label)
master = Data(label=label)
self.append(master)
master.coords = data[0].coords
for d in data:
skip = d.pixel_component_ids + d.world_component_ids
for c in d.components:
if c in skip:
continue
if c in master.components: # already present (via a link)
continue
taken = [_.label for _ in master.components]
lbl = c.label
# Special-case 'PRIMARY', rename to data label
if lbl == 'PRIMARY':
lbl = d.label
# First-pass disambiguation, try component_data
if lbl in taken:
lbl = '%s_%s' % (lbl, d.label)
lbl = disambiguate(lbl, taken)
c._label = lbl
master.add_component(d.get_component(c), c)
self.remove(d)
return self
示例9: merge
# 需要导入模块: from glue.core.data import Data [as 别名]
# 或者: from glue.core.data.Data import add_component [as 别名]
def merge(self, *data, **kwargs):
"""
Merge two or more datasets into a single dataset.
This has the following effects:
All components from all datasets are added to the first argument
All datasets except the first argument are removed from the collection
Any component name conflicts are disambiguated
The pixel and world components apart from the first argument are discarded
:note: All arguments must have the same shape
:param data: One or more :class:`~glue.core.data.Data` instances.
:returns: self
"""
if len(data) < 2:
raise ValueError("merge requires 2 or more arguments")
shp = data[0].shape
for d in data:
if d.shape != shp:
raise ValueError("All arguments must have the same shape")
label = kwargs.get('label', data[0].label)
master = Data(label=label)
self.append(master)
master.coords = data[0].coords
for i, d in enumerate(data):
if isinstance(d.coords, WCSCoordinates):
master.coords = d.coords
break
# Find ambiguous components (ones which have labels in more than one
# dataset
from collections import Counter
clabel_count = Counter([c.label for d in data for c in d.visible_components])
for d in data:
for c in d.components:
if c in master.components: # already present (via a link)
continue
lbl = c.label
if clabel_count[lbl] > 1:
lbl = lbl + " [{0}]".format(d.label)
c._label = lbl
c.parent = master
master.add_component(d.get_component(c), c)
self.remove(d)
return self
示例10: test_data
# 需要导入模块: from glue.core.data import Data [as 别名]
# 或者: from glue.core.data.Data import add_component [as 别名]
def test_data():
data = Data(label="Test Data 1")
data2 = Data(label="Teset Data 2")
comp_a = Component(np.array([1, 2, 3]))
comp_b = Component(np.array([1, 2, 3]))
comp_c = Component(np.array([2, 4, 6]))
comp_d = Component(np.array([1, 3, 5]))
data.add_component(comp_a, 'a')
data.add_component(comp_b, 'b')
data2.add_component(comp_c, 'c')
data2.add_component(comp_d, 'd')
return data, data2
示例11: make_test_data
# 需要导入模块: from glue.core.data import Data [as 别名]
# 或者: from glue.core.data.Data import add_component [as 别名]
def make_test_data():
data = Data(label="Test Cat Data 1")
comp_x1 = Component(np.array([4, 5, 6, 3]))
comp_y1 = Component(np.array([1, 2, 3, 2]))
comp_z1 = Component(np.array([2, 3, 4, 1]))
data.add_component(comp_x1, 'x_gal')
data.add_component(comp_y1, 'y_gal')
data.add_component(comp_z1, 'z_gal')
return data
示例12: hdf5_reader
# 需要导入模块: from glue.core.data import Data [as 别名]
# 或者: from glue.core.data.Data import add_component [as 别名]
def hdf5_reader(filename, auto_merge=False, memmap=True, **kwargs):
"""
Read in all datasets from an HDF5 file
Parameters
----------
filename : str
The filename of the HDF5 file
memmap : bool, optional
Whether to use memory mapping
"""
from astropy.table import Table
# Read in all datasets
datasets = extract_hdf5_datasets(filename, memmap=memmap)
label_base = os.path.basename(filename).rpartition('.')[0]
if not label_base:
label_base = os.path.basename(filename)
data_by_shape = {}
groups = OrderedDict()
for key in datasets:
label = '{0}[{1}]'.format(label_base, key)
array = datasets[key]
if isinstance(array, Table):
data = Data(label=label)
groups[label] = data
for column_name in array.columns:
column = array[column_name]
if column.ndim == 1:
component = Component.autotyped(column, units=column.unit)
data.add_component(component=component,
label=column_name)
else:
warnings.warn("HDF5: Ignoring vector column {0}".format(column_name))
else:
if auto_merge and array.shape in data_by_shape:
data = data_by_shape[datasets[key].shape]
else:
data = Data(label=label)
data_by_shape[array.shape] = data
groups[label] = data
data.add_component(component=datasets[key], label=key[1:])
return [groups[idx] for idx in groups]
示例13: test_categorical_data
# 需要导入模块: from glue.core.data import Data [as 别名]
# 或者: from glue.core.data.Data import add_component [as 别名]
def test_categorical_data():
data = Data(label="Test Cat Data 1")
data2 = Data(label="Teset Cat Data 2")
comp_x1 = CategoricalComponent(np.array(['a', 'a', 'b']))
comp_y1 = Component(np.array([1, 2, 3]))
comp_x2 = CategoricalComponent(np.array(['c', 'a', 'b']))
comp_y2 = Component(np.array([1, 3, 5]))
data.add_component(comp_x1, 'x1')
data.add_component(comp_y1, 'y1')
data2.add_component(comp_x2, 'x2')
data2.add_component(comp_y2, 'y2')
return data, data2
示例14: test_high_cardinatility_timing
# 需要导入模块: from glue.core.data import Data [as 别名]
# 或者: from glue.core.data.Data import add_component [as 别名]
def test_high_cardinatility_timing(self):
card = 50000
data = Data()
card_data = [str(num) for num in range(card)]
data.add_component(Component(np.arange(card * 5)), 'y')
data.add_component(
CategoricalComponent(np.repeat([card_data], 5)), 'xcat')
self.add_data(data)
comp = data.find_component_id('xcat')
timer_func = partial(self.client._set_xydata, 'x', comp)
timer = timeit(timer_func, number=1)
assert timer < 3 # this is set for Travis speed
示例15: casalike_cube
# 需要导入模块: from glue.core.data import Data [as 别名]
# 或者: from glue.core.data.Data import add_component [as 别名]
def casalike_cube(filename, **kwargs):
"""
This provides special support for 4D CASA FITS - like cubes,
which have 2 spatial axes, a spectral axis, and a stokes axis
in that order.
Each stokes cube is split out as a separate component
"""
from astropy.io import fits
result = Data()
with fits.open(filename, **kwargs) as hdulist:
array = hdulist[0].data
header = hdulist[0].header
result.coords = coordinates_from_header(header)
for i in range(array.shape[0]):
result.add_component(array[[i]], label='STOKES %i' % i)
return result