本文整理汇总了Python中sherpa.astro.data.DataPHA._set_initial_quantity方法的典型用法代码示例。如果您正苦于以下问题:Python DataPHA._set_initial_quantity方法的具体用法?Python DataPHA._set_initial_quantity怎么用?Python DataPHA._set_initial_quantity使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sherpa.astro.data.DataPHA
的用法示例。
在下文中一共展示了DataPHA._set_initial_quantity方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: read_pha
# 需要导入模块: from sherpa.astro.data import DataPHA [as 别名]
# 或者: from sherpa.astro.data.DataPHA import _set_initial_quantity [as 别名]
#.........这里部分代码省略.........
if data["staterror"] is None:
msg = "systematic"
elif data["syserror"] is None:
msg = "statistical"
if output_once:
wmsg = "systematic errors were not found in " + "file '{}'".format(filename)
warning(wmsg)
else:
msg = "statistical and systematic"
if output_once:
imsg = (
msg
+ " errors were found in file "
+ "'{}' \nbut not used; ".format(filename)
+ "to use them, re-read with use_errors=True"
)
info(imsg)
data["staterror"] = None
data["syserror"] = None
dname = os.path.dirname(filename)
albl = "ARF"
rlbl = "RMF"
if use_background:
albl = albl + " (background)"
rlbl = rlbl + " (background)"
arf = _read_ancillary(data, "arffile", albl, dname, read_arf, output_once)
rmf = _read_ancillary(data, "rmffile", rlbl, dname, read_rmf, output_once)
backgrounds = []
if data["backfile"] and data["backfile"].lower() != "none":
try:
if os.path.dirname(data["backfile"]) == "":
data["backfile"] = os.path.join(os.path.dirname(filename), data["backfile"])
bkg_datasets = []
# Do not read backgrounds of backgrounds
if not use_background:
bkg_datasets = read_pha(data["backfile"], use_errors, True)
if output_once:
info("read background file {}".format(data["backfile"]))
if numpy.iterable(bkg_datasets):
for bkg_dataset in bkg_datasets:
if bkg_dataset.get_response() == (None, None) and rmf is not None:
bkg_dataset.set_response(arf, rmf)
backgrounds.append(bkg_dataset)
else:
if bkg_datasets.get_response() == (None, None) and rmf is not None:
bkg_datasets.set_response(arf, rmf)
backgrounds.append(bkg_datasets)
except:
if output_once:
warning(str(sys.exc_info()[1]))
for bkg_type, bscal_type in izip(("background_up", "background_down"), ("backscup", "backscdn")):
if data[bkg_type] is not None:
b = DataPHA(
filename,
channel=data["channel"],
counts=data[bkg_type],
bin_lo=data["bin_lo"],
bin_hi=data["bin_hi"],
grouping=data["grouping"],
quality=data["quality"],
exposure=data["exposure"],
backscal=data[bscal_type],
header=data["header"],
)
b.set_response(arf, rmf)
if output_once:
info("read {} into a dataset from file {}".format(bkg_type, filename))
backgrounds.append(b)
for k in ["backfile", "arffile", "rmffile", "backscup", "backscdn", "background_up", "background_down"]:
data.pop(k, None)
pha = DataPHA(filename, **data)
pha.set_response(arf, rmf)
for id, b in enumerate(backgrounds):
if b.grouping is None:
b.grouping = pha.grouping
b.grouped = b.grouping is not None
if b.quality is None:
b.quality = pha.quality
pha.set_background(b, id + 1)
# set units *after* bkgs have been set
pha._set_initial_quantity()
phasets.append(pha)
output_once = False
if len(phasets) == 1:
phasets = phasets[0]
return phasets
示例2: read_pha
# 需要导入模块: from sherpa.astro.data import DataPHA [as 别名]
# 或者: from sherpa.astro.data.DataPHA import _set_initial_quantity [as 别名]
#.........这里部分代码省略.........
wmsg = "systematic errors were not found in " + \
"file '{}'".format(filename)
warning(wmsg)
else:
msg = 'statistical and systematic'
if output_once:
imsg = msg + " errors were found in file " + \
"'{}' \nbut not used; ".format(filename) + \
"to use them, re-read with use_errors=True"
info(imsg)
data['staterror'] = None
data['syserror'] = None
dname = os.path.dirname(filename)
albl = 'ARF'
rlbl = 'RMF'
if use_background:
albl = albl + ' (background)'
rlbl = rlbl + ' (background)'
arf = _read_ancillary(data, 'arffile', albl, dname, read_arf,
output_once)
rmf = _read_ancillary(data, 'rmffile', rlbl, dname, read_rmf,
output_once)
backgrounds = []
if data['backfile'] and data['backfile'].lower() != 'none':
try:
if os.path.dirname(data['backfile']) == '':
data['backfile'] = os.path.join(os.path.dirname(filename),
data['backfile'])
bkg_datasets = []
# Do not read backgrounds of backgrounds
if not use_background:
bkg_datasets = read_pha(data['backfile'], use_errors, True)
if output_once:
info('read background file {}'.format(
data['backfile']))
if numpy.iterable(bkg_datasets):
for bkg_dataset in bkg_datasets:
if bkg_dataset.get_response() == (None, None) and \
rmf is not None:
bkg_dataset.set_response(arf, rmf)
backgrounds.append(bkg_dataset)
else:
if bkg_datasets.get_response() == (None, None) and \
rmf is not None:
bkg_datasets.set_response(arf, rmf)
backgrounds.append(bkg_datasets)
except:
if output_once:
warning(str(sys.exc_info()[1]))
for bkg_type, bscal_type in izip(('background_up', 'background_down'),
('backscup', 'backscdn')):
if data[bkg_type] is not None:
b = DataPHA(filename,
channel=data['channel'],
counts=data[bkg_type],
bin_lo=data['bin_lo'],
bin_hi=data['bin_hi'],
grouping=data['grouping'],
quality=data['quality'],
exposure=data['exposure'],
backscal=data[bscal_type],
header=data['header'])
b.set_response(arf, rmf)
if output_once:
info("read {} into a dataset from file {}".format(
bkg_type, filename))
backgrounds.append(b)
for k in ['backfile', 'arffile', 'rmffile', 'backscup', 'backscdn',
'background_up', 'background_down']:
data.pop(k, None)
pha = DataPHA(filename, **data)
pha.set_response(arf, rmf)
for id, b in enumerate(backgrounds):
if b.grouping is None:
b.grouping = pha.grouping
b.grouped = (b.grouping is not None)
if b.quality is None:
b.quality = pha.quality
pha.set_background(b, id + 1)
# set units *after* bkgs have been set
pha._set_initial_quantity()
phasets.append(pha)
output_once = False
if len(phasets) == 1:
phasets = phasets[0]
return phasets
示例3: read_pha
# 需要导入模块: from sherpa.astro.data import DataPHA [as 别名]
# 或者: from sherpa.astro.data.DataPHA import _set_initial_quantity [as 别名]
#.........这里部分代码省略.........
wmsg = "systematic errors were not found in " + \
"file '{}'".format(filename)
warning(wmsg)
else:
msg = 'statistical and systematic'
if output_once:
imsg = msg + " errors were found in file " + \
"'{}' \nbut not used; ".format(filename) + \
"to use them, re-read with use_errors=True"
info(imsg)
data['staterror'] = None
data['syserror'] = None
dname = os.path.dirname(filename)
albl = 'ARF'
rlbl = 'RMF'
if use_background:
albl = albl + ' (background)'
rlbl = rlbl + ' (background)'
arf = _read_ancillary(data, 'arffile', albl, dname, read_arf,
output_once)
rmf = _read_ancillary(data, 'rmffile', rlbl, dname, read_rmf,
output_once)
backgrounds = []
if data['backfile'] and data['backfile'].lower() != 'none':
try:
if os.path.dirname(data['backfile']) == '':
data['backfile'] = os.path.join(os.path.dirname(filename),
data['backfile'])
bkg_datasets = []
# Do not read backgrounds of backgrounds
if not use_background:
bkg_datasets = read_pha(data['backfile'], use_errors, True)
if output_once:
info('read background file {}'.format(
data['backfile']))
if numpy.iterable(bkg_datasets):
for bkg_dataset in bkg_datasets:
if bkg_dataset.get_response() == (None, None) and \
rmf is not None:
bkg_dataset.set_response(arf, rmf)
backgrounds.append(bkg_dataset)
else:
if bkg_datasets.get_response() == (None, None) and \
rmf is not None:
bkg_datasets.set_response(arf, rmf)
backgrounds.append(bkg_datasets)
except:
if output_once:
warning(str(sys.exc_info()[1]))
for bkg_type, bscal_type in izip(('background_up', 'background_down'),
('backscup', 'backscdn')):
if data[bkg_type] is not None:
b = DataPHA(filename,
channel=data['channel'],
counts=data[bkg_type],
bin_lo=data['bin_lo'],
bin_hi=data['bin_hi'],
grouping=data['grouping'],
quality=data['quality'],
exposure=data['exposure'],
backscal=data[bscal_type],
header=data['header'])
b.set_response(arf, rmf)
if output_once:
info("read {} into a dataset from file {}".format(
bkg_type, filename))
backgrounds.append(b)
for k in ['backfile', 'arffile', 'rmffile', 'backscup', 'backscdn',
'background_up', 'background_down']:
data.pop(k, None)
pha = DataPHA(filename, **data)
pha.set_response(arf, rmf)
for id, b in enumerate(backgrounds):
if b.grouping is None:
b.grouping = pha.grouping
b.grouped = (b.grouping is not None)
if b.quality is None:
b.quality = pha.quality
pha.set_background(b, id + 1)
# set units *after* bkgs have been set
pha._set_initial_quantity()
phasets.append(pha)
output_once = False
if len(phasets) == 1:
phasets = phasets[0]
return phasets