当前位置: 首页>>代码示例>>Python>>正文


Python DataPHA.grouped方法代码示例

本文整理汇总了Python中sherpa.astro.data.DataPHA.grouped方法的典型用法代码示例。如果您正苦于以下问题:Python DataPHA.grouped方法的具体用法?Python DataPHA.grouped怎么用?Python DataPHA.grouped使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sherpa.astro.data.DataPHA的用法示例。


在下文中一共展示了DataPHA.grouped方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: read_pha

# 需要导入模块: from sherpa.astro.data import DataPHA [as 别名]
# 或者: from sherpa.astro.data.DataPHA import grouped [as 别名]
def read_pha(arg, use_errors=False, use_background=False):
    """
    read_pha( filename [, use_errors=False [, use_background=False]] )

    read_pha( PHACrate [, use_errors=False [, use_background=False]] )
    """
    datasets, filename = backend.get_pha_data(arg, use_background=use_background)
    phasets = []
    output_once = True
    for data in datasets:
        if not use_errors:
            if data["staterror"] is not None or data["syserror"] is not None:
                if data["staterror"] is None:
                    msg = "systematic"
                elif data["syserror"] is None:
                    msg = "statistical"
                    if output_once:
                        wmsg = "systematic errors were not found in " + "file '{}'".format(filename)
                        warning(wmsg)
                else:
                    msg = "statistical and systematic"
                if output_once:
                    imsg = (
                        msg
                        + " errors were found in file "
                        + "'{}' \nbut not used; ".format(filename)
                        + "to use them, re-read with use_errors=True"
                    )
                    info(imsg)
                data["staterror"] = None
                data["syserror"] = None

        dname = os.path.dirname(filename)
        albl = "ARF"
        rlbl = "RMF"
        if use_background:
            albl = albl + " (background)"
            rlbl = rlbl + " (background)"

        arf = _read_ancillary(data, "arffile", albl, dname, read_arf, output_once)
        rmf = _read_ancillary(data, "rmffile", rlbl, dname, read_rmf, output_once)

        backgrounds = []

        if data["backfile"] and data["backfile"].lower() != "none":
            try:
                if os.path.dirname(data["backfile"]) == "":
                    data["backfile"] = os.path.join(os.path.dirname(filename), data["backfile"])

                bkg_datasets = []
                # Do not read backgrounds of backgrounds
                if not use_background:
                    bkg_datasets = read_pha(data["backfile"], use_errors, True)

                    if output_once:
                        info("read background file {}".format(data["backfile"]))

                if numpy.iterable(bkg_datasets):
                    for bkg_dataset in bkg_datasets:
                        if bkg_dataset.get_response() == (None, None) and rmf is not None:
                            bkg_dataset.set_response(arf, rmf)
                        backgrounds.append(bkg_dataset)
                else:
                    if bkg_datasets.get_response() == (None, None) and rmf is not None:
                        bkg_datasets.set_response(arf, rmf)
                    backgrounds.append(bkg_datasets)

            except:
                if output_once:
                    warning(str(sys.exc_info()[1]))

        for bkg_type, bscal_type in izip(("background_up", "background_down"), ("backscup", "backscdn")):
            if data[bkg_type] is not None:
                b = DataPHA(
                    filename,
                    channel=data["channel"],
                    counts=data[bkg_type],
                    bin_lo=data["bin_lo"],
                    bin_hi=data["bin_hi"],
                    grouping=data["grouping"],
                    quality=data["quality"],
                    exposure=data["exposure"],
                    backscal=data[bscal_type],
                    header=data["header"],
                )
                b.set_response(arf, rmf)
                if output_once:
                    info("read {} into a dataset from file {}".format(bkg_type, filename))
                backgrounds.append(b)

        for k in ["backfile", "arffile", "rmffile", "backscup", "backscdn", "background_up", "background_down"]:
            data.pop(k, None)

        pha = DataPHA(filename, **data)
        pha.set_response(arf, rmf)
        for id, b in enumerate(backgrounds):
            if b.grouping is None:
                b.grouping = pha.grouping
                b.grouped = b.grouping is not None
            if b.quality is None:
#.........这里部分代码省略.........
开发者ID:OrbitalMechanic,项目名称:sherpa,代码行数:103,代码来源:__init__.py

示例2: read_pha

# 需要导入模块: from sherpa.astro.data import DataPHA [as 别名]
# 或者: from sherpa.astro.data.DataPHA import grouped [as 别名]

#.........这里部分代码省略.........
                        wmsg = "systematic errors were not found in " + \
                               "file '{}'".format(filename)
                        warning(wmsg)
                else:
                    msg = 'statistical and systematic'
                if output_once:
                    imsg = msg + " errors were found in file " + \
                           "'{}' \nbut not used; ".format(filename) + \
                           "to use them, re-read with use_errors=True"
                    info(imsg)
                data['staterror'] = None
                data['syserror'] = None

        dname = os.path.dirname(filename)
        albl = 'ARF'
        rlbl = 'RMF'
        if use_background:
            albl = albl + ' (background)'
            rlbl = rlbl + ' (background)'

        arf = _read_ancillary(data, 'arffile', albl, dname, read_arf,
                              output_once)
        rmf = _read_ancillary(data, 'rmffile', rlbl, dname, read_rmf,
                              output_once)

        backgrounds = []

        if data['backfile'] and data['backfile'].lower() != 'none':
            try:
                if os.path.dirname(data['backfile']) == '':
                    data['backfile'] = os.path.join(os.path.dirname(filename),
                                                    data['backfile'])

                bkg_datasets = []
                # Do not read backgrounds of backgrounds
                if not use_background:
                    bkg_datasets = read_pha(data['backfile'], use_errors, True)

                    if output_once:
                        info('read background file {}'.format(
                            data['backfile']))

                if numpy.iterable(bkg_datasets):
                    for bkg_dataset in bkg_datasets:
                        if bkg_dataset.get_response() == (None, None) and \
                           rmf is not None:
                            bkg_dataset.set_response(arf, rmf)
                        backgrounds.append(bkg_dataset)
                else:
                    if bkg_datasets.get_response() == (None, None) and \
                       rmf is not None:
                        bkg_datasets.set_response(arf, rmf)
                    backgrounds.append(bkg_datasets)

            except:
                if output_once:
                    warning(str(sys.exc_info()[1]))

        for bkg_type, bscal_type in izip(('background_up', 'background_down'),
                                         ('backscup', 'backscdn')):
            if data[bkg_type] is not None:
                b = DataPHA(filename,
                            channel=data['channel'],
                            counts=data[bkg_type],
                            bin_lo=data['bin_lo'],
                            bin_hi=data['bin_hi'],
                            grouping=data['grouping'],
                            quality=data['quality'],
                            exposure=data['exposure'],
                            backscal=data[bscal_type],
                            header=data['header'])
                b.set_response(arf, rmf)
                if output_once:
                    info("read {} into a dataset from file {}".format(
                        bkg_type, filename))
                backgrounds.append(b)

        for k in ['backfile', 'arffile', 'rmffile', 'backscup', 'backscdn',
                  'background_up', 'background_down']:
            data.pop(k, None)

        pha = DataPHA(filename, **data)
        pha.set_response(arf, rmf)
        for id, b in enumerate(backgrounds):
            if b.grouping is None:
                b.grouping = pha.grouping
                b.grouped = (b.grouping is not None)
            if b.quality is None:
                b.quality = pha.quality
            pha.set_background(b, id + 1)

        # set units *after* bkgs have been set
        pha._set_initial_quantity()
        phasets.append(pha)
        output_once = False

    if len(phasets) == 1:
        phasets = phasets[0]

    return phasets
开发者ID:DougBurke,项目名称:sherpa,代码行数:104,代码来源:__init__.py

示例3: read_pha

# 需要导入模块: from sherpa.astro.data import DataPHA [as 别名]
# 或者: from sherpa.astro.data.DataPHA import grouped [as 别名]

#.........这里部分代码省略.........
                        wmsg = "systematic errors were not found in " + \
                               "file '{}'".format(filename)
                        warning(wmsg)
                else:
                    msg = 'statistical and systematic'
                if output_once:
                    imsg = msg + " errors were found in file " + \
                           "'{}' \nbut not used; ".format(filename) + \
                           "to use them, re-read with use_errors=True"
                    info(imsg)
                data['staterror'] = None
                data['syserror'] = None

        dname = os.path.dirname(filename)
        albl = 'ARF'
        rlbl = 'RMF'
        if use_background:
            albl = albl + ' (background)'
            rlbl = rlbl + ' (background)'

        arf = _read_ancillary(data, 'arffile', albl, dname, read_arf,
                              output_once)
        rmf = _read_ancillary(data, 'rmffile', rlbl, dname, read_rmf,
                              output_once)

        backgrounds = []

        if data['backfile'] and data['backfile'].lower() != 'none':
            try:
                if os.path.dirname(data['backfile']) == '':
                    data['backfile'] = os.path.join(os.path.dirname(filename),
                                                    data['backfile'])

                bkg_datasets = []
                # Do not read backgrounds of backgrounds
                if not use_background:
                    bkg_datasets = read_pha(data['backfile'], use_errors, True)

                    if output_once:
                        info('read background file {}'.format(
                            data['backfile']))

                if numpy.iterable(bkg_datasets):
                    for bkg_dataset in bkg_datasets:
                        if bkg_dataset.get_response() == (None, None) and \
                           rmf is not None:
                            bkg_dataset.set_response(arf, rmf)
                        backgrounds.append(bkg_dataset)
                else:
                    if bkg_datasets.get_response() == (None, None) and \
                       rmf is not None:
                        bkg_datasets.set_response(arf, rmf)
                    backgrounds.append(bkg_datasets)

            except:
                if output_once:
                    warning(str(sys.exc_info()[1]))

        for bkg_type, bscal_type in izip(('background_up', 'background_down'),
                                         ('backscup', 'backscdn')):
            if data[bkg_type] is not None:
                b = DataPHA(filename,
                            channel=data['channel'],
                            counts=data[bkg_type],
                            bin_lo=data['bin_lo'],
                            bin_hi=data['bin_hi'],
                            grouping=data['grouping'],
                            quality=data['quality'],
                            exposure=data['exposure'],
                            backscal=data[bscal_type],
                            header=data['header'])
                b.set_response(arf, rmf)
                if output_once:
                    info("read {} into a dataset from file {}".format(
                        bkg_type, filename))
                backgrounds.append(b)

        for k in ['backfile', 'arffile', 'rmffile', 'backscup', 'backscdn',
                  'background_up', 'background_down']:
            data.pop(k, None)

        pha = DataPHA(filename, **data)
        pha.set_response(arf, rmf)
        for id, b in enumerate(backgrounds):
            if b.grouping is None:
                b.grouping = pha.grouping
                b.grouped = (b.grouping is not None)
            if b.quality is None:
                b.quality = pha.quality
            pha.set_background(b, id + 1)

        # set units *after* bkgs have been set
        pha._set_initial_quantity()
        phasets.append(pha)
        output_once = False

    if len(phasets) == 1:
        phasets = phasets[0]

    return phasets
开发者ID:abigailStev,项目名称:sherpa,代码行数:104,代码来源:__init__.py


注:本文中的sherpa.astro.data.DataPHA.grouped方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。