本文整理汇总了Python中glue.core.data.Data类的典型用法代码示例。如果您正苦于以下问题:Python Data类的具体用法?Python Data怎么用?Python Data使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Data类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_image
def test_image():
data = Data(label="Test Image")
comp_a = Component(np.ones((25, 25)))
data.add_component(comp_a, 'test_1')
comp_b = Component(np.zeros((25, 25)))
data.add_component(comp_b, 'test_2')
return data
示例2: merge
def merge(self, *data, **kwargs):
"""
Merge two or more datasets into a single dataset.
This has the following effects:
All components from all datasets are added to the first argument
All datasets except the first argument are removed from the collection
Any component name conflicts are disambiguated
The pixel and world components apart from the first argument are discarded
:note: All arguments must have the same shape
:param data: One or more :class:`~glue.core.data.Data` instances.
:returns: self
"""
if len(data) < 2:
raise ValueError("merge requires 2 or more arguments")
shp = data[0].shape
for d in data:
if d.shape != shp:
raise ValueError("All arguments must have the same shape")
label = kwargs.get('label', data[0].label)
master = Data(label=label)
self.append(master)
master.coords = data[0].coords
for d in data:
skip = d.pixel_component_ids + d.world_component_ids
for c in d.components:
if c in skip:
continue
if c in master.components: # already present (via a link)
continue
taken = [_.label for _ in master.components]
lbl = c.label
# Special-case 'PRIMARY', rename to data label
if lbl == 'PRIMARY':
lbl = d.label
# First-pass disambiguation, try component_data
if lbl in taken:
lbl = '%s_%s' % (lbl, d.label)
lbl = disambiguate(lbl, taken)
c._label = lbl
master.add_component(d.get_component(c), c)
self.remove(d)
return self
示例3: astropy_tabular_data
def astropy_tabular_data(*args, **kwargs):
"""
Build a data set from a table. We restrict ourselves to tables
with 1D columns.
All arguments are passed to
astropy.table.Table.read(...).
"""
result = Data()
table = astropy_table_read(*args, **kwargs)
result.meta = table.meta
# Loop through columns and make component list
for column_name in table.columns:
c = table[column_name]
u = c.unit if hasattr(c, 'unit') else c.units
if table.masked:
# fill array for now
try:
c = c.filled(fill_value=np.nan)
except (ValueError, TypeError): # assigning nan to integer dtype
c = c.filled(fill_value=-1)
nc = Component.autotyped(c, units=u)
result.add_component(nc, column_name)
return result
示例4: npy_npz_reader
def npy_npz_reader(filename, format='auto', auto_merge=False, **kwargs):
"""
Read in a Numpy structured array saved to a .npy or .npz file.
Parameters
----------
source: str
The pathname to the Numpy save file.
"""
import numpy as np
data = np.load(filename)
if isinstance(data, np.ndarray):
data = {None: data}
groups = []
for groupname in sorted(data):
d = Data(label=groupname)
arr = data[groupname]
if arr.dtype.names is None:
comp = Component.autotyped(arr)
d.add_component(comp, label='array')
else:
for name in arr.dtype.names:
comp = Component.autotyped(arr[name])
d.add_component(comp, label=name)
groups.append(d)
return groups
示例5: panda_process
def panda_process(indf):
"""
Build a data set from a table using pandas. This attempts to respect
categorical data input by letting pandas.read_csv infer the type
"""
result = Data()
for name, column in indf.iteritems():
if (column.dtype == np.object) | (column.dtype == np.bool):
# try to salvage numerical data
coerced = column.convert_objects(convert_numeric=True)
if (coerced.dtype != column.dtype) and coerced.isnull().mean() < 0.4:
c = Component(coerced.values)
else:
# pandas has a 'special' nan implementation and this doesn't
# play well with np.unique
c = CategoricalComponent(column.fillna(''))
else:
c = Component(column.values)
# convert header to string - in some cases if the first row contains
# numbers, these are cast to numerical types, so we want to change that
# here.
if not isinstance(name, six.string_types):
name = str(name)
# strip off leading #
name = name.strip()
if name.startswith('#'):
name = name[1:].strip()
result.add_component(c, name)
return result
示例6: npz_reader
def npz_reader(filename, format='auto', auto_merge=False, **kwargs):
"""
Read in a Numpy structured array saved to a .npy or .npz file.
Parameters
----------
source: str
The pathname to the Numpy save file.
"""
import numpy as np
npy_data = np.load(filename)
groups = []
for groupname in sorted(npy_data.files):
d = Data(label=groupname)
arr = npy_data[groupname]
if not hasattr(arr.dtype, 'names'):
raise ValueError("Numpy save file loading currently only supports structured"
" arrays, e.g., with specified names.")
for name in arr.dtype.names:
comp = Component.autotyped(arr[name])
d.add_component(comp, label=name)
groups.append(d)
return groups
示例7: test_histogram_data
def test_histogram_data():
data = Data(label="Test Data")
comp_a = Component(np.random.uniform(size=500))
comp_b = Component(np.random.normal(size=500))
data.add_component(comp_a, 'uniform')
data.add_component(comp_b, 'normal')
return data
示例8: merge
def merge(self, *data, **kwargs):
"""
Merge two or more datasets into a single dataset.
This has the following effects:
All components from all datasets are added to the first argument
All datasets except the first argument are removed from the collection
Any component name conflicts are disambiguated
The pixel and world components apart from the first argument are discarded
:note: All arguments must have the same shape
:param data: One or more :class:`~glue.core.data.Data` instances.
:returns: self
"""
if len(data) < 2:
raise ValueError("merge requires 2 or more arguments")
shp = data[0].shape
for d in data:
if d.shape != shp:
raise ValueError("All arguments must have the same shape")
label = kwargs.get('label', data[0].label)
master = Data(label=label)
self.append(master)
master.coords = data[0].coords
for i, d in enumerate(data):
if isinstance(d.coords, WCSCoordinates):
master.coords = d.coords
break
# Find ambiguous components (ones which have labels in more than one
# dataset
from collections import Counter
clabel_count = Counter([c.label for d in data for c in d.visible_components])
for d in data:
for c in d.components:
if c in master.components: # already present (via a link)
continue
lbl = c.label
if clabel_count[lbl] > 1:
lbl = lbl + " [{0}]".format(d.label)
c._label = lbl
c.parent = master
master.add_component(d.get_component(c), c)
self.remove(d)
return self
示例9: new_data
def new_data():
label = '{0}[{1}]'.format(
label_base,
hdu_name
)
data = Data(label=label)
data.coords = coords
groups[hdu_name] = data
extension_by_shape[shape] = hdu_name
return data
示例10: _load_data
def _load_data(rec, context):
label = rec['label']
result = Data(label=label)
result.coords = context.object(rec['coords'])
# we manually rebuild pixel/world components, so
# we override this function. This is pretty ugly
result._create_pixel_and_world_components = lambda: None
comps = [list(map(context.object, [cid, comp]))
for cid, comp in rec['components']]
comps = sorted(comps,
key=lambda x: isinstance(x[1], (DerivedComponent,
CoordinateComponent)))
for cid, comp in comps:
if isinstance(comp, CoordinateComponent):
comp._data = result
result.add_component(comp, cid)
assert result._world_component_ids == []
coord = [c for c in comps if isinstance(c[1], CoordinateComponent)]
coord = [x[0] for x in sorted(coord, key=lambda x: x[1])]
assert len(coord) == result.ndim * 2
result._world_component_ids = coord[:len(coord) // 2]
result._pixel_component_ids = coord[len(coord) // 2:]
for s in rec['subsets']:
result.add_subset(context.object(s))
return result
示例11: make_test_data
def make_test_data():
data = Data(label="Test Cat Data 1")
comp_x1 = Component(np.array([4, 5, 6, 3]))
comp_y1 = Component(np.array([1, 2, 3, 2]))
comp_z1 = Component(np.array([2, 3, 4, 1]))
data.add_component(comp_x1, 'x_gal')
data.add_component(comp_y1, 'y_gal')
data.add_component(comp_z1, 'z_gal')
return data
示例12: test_limits_inf
def test_limits_inf(self):
d = Data()
x = Component(np.array([[1, 2], [np.infty, 4]]))
y = Component(np.array([[2, 4], [-np.infty, 8]]))
xid = d.add_component(x, 'x')
yid = d.add_component(y, 'y')
self.collect.append(d)
self.client.add_layer(d)
self.client.xatt = xid
self.client.yatt = yid
assert self.client._visible_limits(0) == (1, 4)
assert self.client._visible_limits(1) == (2, 8)
示例13: setup_method
def setup_method(self, method):
self.data = Data(x=[1, 2, 3, 2, 2, 3, 1])
figure = MagicMock()
self.collect = DataCollection()
self.client = HistogramClient(self.collect, figure)
self.axes = self.client.axes
self.hub = self.collect.hub
self.connect()
示例14: casalike_cube
def casalike_cube(filename, **kwargs):
"""
This provides special support for 4D CASA FITS - like cubes,
which have 2 spatial axes, a spectral axis, and a stokes axis
in that order.
Each stokes cube is split out as a separate component
"""
from astropy.io import fits
result = Data()
with fits.open(filename, **kwargs) as hdulist:
array = hdulist[0].data
header = hdulist[0].header
result.coords = coordinates_from_header(header)
for i in range(array.shape[0]):
result.add_component(array[[i]], label='STOKES %i' % i)
return result
示例15: setup_class
def setup_class(self):
x = +np.arange(2520).reshape((3, 4, 5, 6, 7))
y = -np.arange(2520).reshape((3, 4, 5, 6, 7))
self.data = Data(x=x, y=y, label='Test data')
self.x_id, self.y_id = self.data.main_components
self.subset_state = self.x_id >= 1200