当前位置: 首页>>代码示例>>Python>>正文


Python AstroData.rename_ext方法代码示例

本文整理汇总了Python中astrodata.AstroData.rename_ext方法的典型用法代码示例。如果您正苦于以下问题:Python AstroData.rename_ext方法的具体用法?Python AstroData.rename_ext怎么用?Python AstroData.rename_ext使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在astrodata.AstroData的用法示例。


在下文中一共展示了AstroData.rename_ext方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: write_new_table

# 需要导入模块: from astrodata import AstroData [as 别名]
# 或者: from astrodata.AstroData import rename_ext [as 别名]
 def write_new_table(self, fname):
     cols = list(self.get_columns())
     cols.extend(self.get_fiber_positions_columns())
     
     # Create the table HDU
     tablehdu = pf.new_table(cols)
     
     # Create an AstroData object to contain the table
     # and write to disk.
     new_ad = AstroData(tablehdu)
     new_ad.rename_ext('SCI', 1)
     new_ad.write(fname, clobber=True)
开发者ID:pyrrho314,项目名称:recipesystem,代码行数:14,代码来源:integratedfieldunit.py

示例2: as_astrodata

# 需要导入模块: from astrodata import AstroData [as 别名]
# 或者: from astrodata.AstroData import rename_ext [as 别名]
    def as_astrodata(self):
        """
            
          With each cut object in the cut_list having the SCI,DQ,VAR set,
          form an hdu and append it to adout.  Update keywords EXTNAME= 'SCI', 
          EXTVER=<footprint#>, CCDSEC, DISPAXIS, CUTSECT, CUTORDER in the header
          and reset WCS information if there was a WCS in the input AD header.

          ::

           Input:
              self.cut_list: List of Cut objects.
              self.adout:    Output AD object with MDF and
                             TRACEFP extensions.
           Output:
              adout: contains the appended HDUs.
        """

        adout = self._init_as_astrodata()

        ad = self.ad
        scihdr =        ad['SCI',1].header.copy()
        if self.has_dq:
            dqheader =  ad['DQ', 1].header.copy()
        if self.has_var:
            varheader = ad['VAR',1].header.copy()

        # Update NSCIEXT keyword to represent the current number of cuts.
        if new_pyfits_version:
            adout.phu.header.update = adout.phu.header.set
        adout.phu.header.update('NSCIEXT',len(self.cut_list)) 

        # This is a function renaming when using Pyfits 3.1
        if new_pyfits_version:
            scihdr.update = scihdr.set
        extver = 1

        # Generate the cuts using the region's sci_cut,var_cut and
        # dq_cut
        for region,sci_cut,var_cut,dq_cut in self.cut_list: 
            rx1,rx2,ry1,ry2 = np.asarray(region) + 1   # To 1-based
            csec = '[%d:%d,%d:%d]'%(rx1,rx2,ry1,ry2)
            scihdr.update('NSCUTSEC',csec,
                          comment="Region extracted by 'cut_footprints'")
            scihdr.update('NSCUTSPC',extver,comment="Spectral order")
            form_extn_wcs(scihdr, self.wcs, region)
            new_sci_ext = AstroData(data=sci_cut,header=scihdr)
            new_sci_ext.rename_ext(name='SCI',ver=extver)
            adout.append(new_sci_ext)
            if self.has_dq:
                new_dq_ext = AstroData(data=dq_cut, header=dqheader)
                new_dq_ext.rename_ext(name='DQ',ver=extver)
                adout.append(new_dq_ext)
            if self.has_var:
                new_var_ext = AstroData(data=var_cut, header=varheader)
                new_var_ext.rename_ext(name='VAR',ver=extver)
                adout.append(new_var_ext)
            extver += 1

        return adout
开发者ID:pyrrho314,项目名称:recipesystem,代码行数:62,代码来源:extract.py

示例3: test_method_rename_ext_3

# 需要导入模块: from astrodata import AstroData [as 别名]
# 或者: from astrodata.AstroData import rename_ext [as 别名]
def test_method_rename_ext_3():
    ad = AstroData(TESTFILE2)      # Single 'SCI' ext
    ad.rename_ext("FOO")
    assert ad.extname() == "FOO"
开发者ID:mmorage,项目名称:DRAGONS,代码行数:6,代码来源:test_AstroDataAPI.py

示例4: test_method_rename_ext_2

# 需要导入模块: from astrodata import AstroData [as 别名]
# 或者: from astrodata.AstroData import rename_ext [as 别名]
def test_method_rename_ext_2():
    ad = AstroData(TESTFILE)      
    with pytest.raises(SingleHDUMemberExcept):
        ad.rename_ext("FOO")
开发者ID:mmorage,项目名称:DRAGONS,代码行数:6,代码来源:test_AstroDataAPI.py

示例5: test_method_rename_ext_1

# 需要导入模块: from astrodata import AstroData [as 别名]
# 或者: from astrodata.AstroData import rename_ext [as 别名]
def test_method_rename_ext_1():    # Raise on multi-ext 
    ad = AstroData(TESTFILE)
    with pytest.raises(SingleHDUMemberExcept):
        ad.rename_ext("SCI", ver=99)
开发者ID:mmorage,项目名称:DRAGONS,代码行数:6,代码来源:test_AstroDataAPI.py

示例6: getRefs

# 需要导入模块: from astrodata import AstroData [as 别名]
# 或者: from astrodata.AstroData import rename_ext [as 别名]
    def getRefs(self):
        """ Run the add reference catalog. Actually adding the 
            Bintable to the input ad object.
 
        """

        from pyfits import Column

        log = self.log

        extname = 'REFCAT'

        outad = self.outad

        # Select catalog and format the output data
        usecols,formats,band,delimiter = self.selStdsCatalog()
        refid,ra,dec,fmag = self.readStds(usecols, formats, delimiter)

        # Loop through the SCI extensions
        for scix in outad['SCI']:

            xtver = scix.extver()

            # x,y are the coordinates of the reference stars within the 
            # input image field.
            g,x,y = self.search4standards(ra, dec, xtver)

            log.info("Found %d standards for field in  %s['SCI',%d]"%\
                (len(g[0]),outad.filename,xtver))

            # g: index array with the index of the standards within the field.
            if len(g[0])>0:
                nlines = len(ra)

                # If extension already exists, just update
                if outad[extname,xtver]:
                    log.info('Table already exists,updating values.')
                    tdata = outad[extname,xtver].data
                    theader = outad[extname, xtver].header
                    tdata.field('refid')[:] = refid[g]
                    tdata.field('ra')[:]    = ra[g]
                    tdata.field('dec')[:]   = dec[g]
                    tdata.field('x')[:]     = x
                    tdata.field('y')[:]     = y
                    tdata.field('refmag')[:]  = fmag[g]
                else:
                    c1 = Column (name='refid', format='22A', array=refid[g])
                    c2 = Column (name='ra',    format='E', array=ra[g])
                    c3 = Column (name='dec',   format='E', array=dec[g])
                    c4 = Column (name='x',     format='E', array=x)
                    c5 = Column (name='y',     format='E', array=y)
                    # band:       1-char:  'u','g','r','i' or 'z'
                    c6 = Column (name='refmag',unit=band,format='E',array=fmag[g])
                    colsdef = pf.ColDefs([c1,c2,c3,c4,c5,c6])

                    tbhdu = pf.new_table(colsdef)         # Creates a BINTABLE

                    # pyfits to AstroData
                    tabad = AstroData(tbhdu)

                    # Add or append keywords EXTNAME, EXTVER
                    tabad.rename_ext(extname, xtver)
                
                    outad.append(tabad)
            else:
                log.warning( 'No standard stars were found for this field.')

        return outad
开发者ID:pyrrho314,项目名称:recipesystem,代码行数:70,代码来源:addReferenceCatalogs.py

示例7: as_bintable

# 需要导入模块: from astrodata import AstroData [as 别名]
# 或者: from astrodata.AstroData import rename_ext [as 别名]

#.........这里部分代码省略.........

        # Write data to table columns

        orientation = footprints[0].edges[0].orientation
        for k,footprint in enumerate(footprints):
            edge1 = footprint.edges[0]; edge2 = footprint.edges[1]
            tb.data.field('id')[k]        = footprint.id
            tb.data.field('region')[k]    = np.asarray(footprint.region)

            # EGDE_1 DATA with respect to original image co-ords
            range1 = np.asarray(edge1.xlim+edge1.ylim)  # (x1, x2, y1, y2)
            tb.data.field('range1')[k]    = range1
            tb.data.field('function1')[k] = edge1.function
            tb.data.field('order1')[k]    = edge1.order
            tb.data.field('coeff1')[k]    = edge1.coefficients

            # EGDE_2 DATA with respect to original image co-ords
            range2 = np.asarray(edge2.xlim+edge2.ylim)  # (x1, x2, y1, y2)
            tb.data.field('range2')[k]    = range2
            tb.data.field('function2')[k] = edge2.function
            tb.data.field('order2')[k]    = edge2.order
            tb.data.field('coeff2')[k]    = edge2.coefficients

            region_x1 = footprint.region[0]
            region_y1 = footprint.region[2]
            # Setup the coefficient of the edge fit functions. We are
            # shifting the origin; so refit
            lcoeff=[]
            zval=[]
            for xx,yy in [edge1.trace,edge2.trace]:
                # We need to refit inside the cutregion
                xmr = xx - region_x1
                ymr = yy - region_y1
                if orientation == 0:
                    z = gfit.Gfit(xmr,ymr,edge1.function,edge1.order) 
                else:
                    z = gfit.Gfit(ymr,xmr,edge1.function,edge1.order) 
                lcoeff.append(z.coeff)
                zval.append(z)

            xlim1 = np.asarray(edge1.xlim)
            ylim1 = np.asarray(edge1.ylim)
            xlim2 = np.asarray(edge2.xlim)
            ylim2 = np.asarray(edge2.ylim)

            # Get the maximum values from both edges, so we can zero
            # the areas outside the footprint when cutting.
            #
            if orientation == 0:
                # Choose the largest x between both edges. 
                xmax = max(xlim1[1],xlim2[1])
                xlim1[1] = xmax
                xlim2[1] = xmax
                x1,x2 = (min(0,xlim1[0]),xmax)
                # And reevaluate the y values at this xmax
                y1 = ylim1[0] - region_y1
                y2 = zval[1](xmax)[0]
            else:
                # Choose the largest y between both edges
                ymax = max(ylim1[1],ylim2[1])
                ylim1[1] = ymax
                ylim2[1] = ymax
                y1,y2 = (min(0,ylim1[0]),ymax)
                # And reevaluate the x values at this ymax
                x1 = xlim1[0] - region_x1
                x2 = zval[1](ymax)[0] 

            # --- Set edge_1 data with respect to cutout image co-ords.
            tb.data.field('cutrange1')[k]    = (x1,x2,y1,y2)
            tb.data.field('cutfunction1')[k] = edge1.function
            tb.data.field('cutorder1')[k]    = edge1.order
            tb.data.field('cutcoeff1')[k]    = lcoeff[0]


            # --- Set edge_2 data with respect to cutout image co-ords
            # Applied offsets to range2 from footprint.region(x1,y1) 
            tb.data.field('cutrange2')[k]    = (x1,x2,y1,y2)
            tb.data.field('cutfunction2')[k] = edge2.function
            tb.data.field('cutorder2')[k]    = edge2.order
            tb.data.field('cutcoeff2')[k]    = lcoeff[1]

        # Add comment to TTYPE card
        hdr = tb.header
        if new_pyfits_version:
            hdr.update = hdr.set
        hdr.update('TTYPE2',hdr['TTYPE2'],
                  comment='(x1,y1,x2,y2): footprint window of pixel co-ords.')
        hdr.update('TTYPE3',hdr['TTYPE3'], comment='type of fitting function.')
        hdr.update('TTYPE4',hdr['TTYPE4'], comment='Number of coefficients.')
        hdr.update('TTYPE5',hdr['TTYPE5'], 
             comment='Coeff array: c[0]*x**3 + c[1]*x**2+ c[2]*x+c[3]')
        hdr.update('TTYPE6',hdr['TTYPE6'], 
             comment='(x1,y1,x2,y2): Edge fit window definition.')
        tb.header = hdr

        # Create an AD object with this
        tabad = AstroData(tbhdu)
        tabad.rename_ext("TRACEFP", 1)

        return tabad
开发者ID:pyrrho314,项目名称:recipesystem,代码行数:104,代码来源:extract.py

示例8: merge_catalogs

# 需要导入模块: from astrodata import AstroData [as 别名]
# 或者: from astrodata.AstroData import rename_ext [as 别名]

#.........这里部分代码省略.........
                # Turn tuples values (col,row) to index
                bindx = block[0]+nbx*block[1]
                nxx,nyy = self._transform_xy(bindx,xx,yy) 

                # Now change the origin of the block's (nxx,nyy) set to the 
                # mosaic lower left. We find the offset of the LF corner
                # by adding the width and the gaps of all the block to 
                # the left of the current block.
                #  

                if tile: gap_mode = 'tile_gaps'
                else:    gap_mode = 'transform_gaps'
                gaps = self.geometry.gap_dict[gap_mode]
                # The block size in pixels.
                blksz_x,blksz_y = self.blocksize
                col,row = block
                # the sum of the gaps to the left of the current block
                sgapx = sum([gaps[k,row][0] for k in range(col+1)])
                # the sum of the gaps below of the current block
                sgapy = sum([gaps[col,k][1] for k in range(row+1)])
                ref_x1 = int(col*blksz_x + sgapx)
                ref_x2 = ref_x1 + blksz_x
                ref_y1 = int(row*blksz_y + sgapy)
                ref_y2 = int(ref_y1 + blksz_y)

                newdata[Xcolname] = nxx+ref_x1
                newdata[Ycolname] = nyy+ref_y1
                xx = []
                yy = []

        # Eliminate possible duplicates values in ra, dec columns
        ra,  raindx  = np.unique(col_data[ra_colname].round(decimals=7),
                        return_index=True)
        dec, decindx = np.unique(col_data[dec_colname].round(decimals=7),
                        return_index=True)

        # Duplicates are those with the same index in raindx and decindx lists.
        # Look for elements with differents indices; to do this we need to sort
        # the lists.
        raindx.sort()
        decindx.sort()

        # See if the 2 arrays have the same length
        ilen = min(len(raindx), len(decindx))

        # Get the indices from the 2 lists of the same size
        v, = np.where(raindx[:ilen] != decindx[:ilen])
        if len(v) > 0:
            # Filter the duplicates
           try:
               for name in col_names:
                   col_data[name] = col_data[name][v]
           except:
               print 'ERRR:',len(v),name

        # Now that we have the catalog data from all extensions in the dictionary,
        # we calculate the new pixel position w/r to the reference WCS.
        # Only an Object table contains X,Y column information. Reference catalog
        # do not.
        #
        if (recalculate_xy == 'wcs') and (Xcolname != None):

            xx = col_data[Xcolname]
            yy = col_data[Ycolname]
            ra = col_data[ra_colname]
            dec = col_data[dec_colname]

            # Get new pixel coordinates for all ra,dec in the dictionary.
            # Use the input wcs object.
            newx,newy = ref_wcs.wcs_sky2pix(ra,dec,1)

            # Update pixel position in the dictionary to the new values.
            col_data[Xcolname] = newx
            col_data[Ycolname] = newy

        # Create columns information
        columns = {}
        table_columns = []
        for name,format in zip(col_names,col_fmts):
            # Let add_catalog auto-number sources
            if name=="NUMBER":
                continue

            # Define pyfits columns
            data = columns.get(name, pf.Column(name=name,format=format,
                            array=col_data[name]))
            table_columns.append(data)

        # Make the output table using pyfits functions
        col_def = pf.ColDefs(table_columns)
        tb_hdu = pf.new_table(col_def)

        # Now make an AD object from this table
        adout = AstroData(tb_hdu)
        adout.rename_ext(tab_extname,1)

        # Append to any other new table we might have
        adoutput_list.append(adout)

        return adoutput_list
开发者ID:pyrrho314,项目名称:recipesystem,代码行数:104,代码来源:mosaicAD.py

示例9: _calculate_var

# 需要导入模块: from astrodata import AstroData [as 别名]
# 或者: from astrodata.AstroData import rename_ext [as 别名]

#.........这里部分代码省略.........
                units = "ADU"
            elif bunit == "electron" or bunit == "electrons":
                units = "electrons"
            else:
                # Perhaps something more sensible should be done here?
                raise Errors.InputError("No units found. Not calculating "
                                        "variance.")
            
            if add_read_noise:
                # Get the read noise value (in units of electrons) using the
                # appropriate descriptor. The read noise is only used if
                # add_read_noise is True
                read_noise = read_noise_dv.get_value(extver=extver)
                if read_noise is not None:
                    log.fullinfo("Read noise for %s[%s,%d] = %f"
                                 % (adinput.filename, SCI, extver, read_noise))
                    
                    # Determine the variance value to use when calculating the
                    # read noise component of the variance.
                    read_noise_var_value = read_noise
                    if units == "ADU":
                        read_noise_var_value = read_noise / gain
                    
                    # Add the read noise component of the variance to a zeros
                    # array that is the same size as the pixel data in the
                    # science extension
                    log.fullinfo("Calculating the read noise component of the "
                                 "variance in %s" % units)
                    var_array_rn = np.add(
                      np.zeros(ext.data.shape), (read_noise_var_value)**2)
                else:
                    logwarning("Read noise for %s[%s,%d] is None. Setting to "
                               "zero" % (adinput.filename, SCI, extver))
                    var_array_rn = np.zeros(ext.data.shape)
                    
            if add_poisson_noise:
                # Determine the variance value to use when calculating the
                # poisson noise component of the variance
                poisson_noise_var_value = ext.data
                if units == "ADU":
                    poisson_noise_var_value = ext.data / gain
                
                # Calculate the poisson noise component of the variance. Set
                # pixels that are less than or equal to zero to zero.
                log.fullinfo("Calculating the poisson noise component of "
                             "the variance in %s" % units)
                var_array_pn = np.where(
                  ext.data > 0, poisson_noise_var_value, 0)
            
            # Create the final variance array
            if add_read_noise and add_poisson_noise:
                var_array_final = np.add(var_array_rn, var_array_pn)
            
            if add_read_noise and not add_poisson_noise:
                var_array_final = var_array_rn
            
            if not add_read_noise and add_poisson_noise:
                var_array_final = var_array_pn
            
            var_array_final = var_array_final.astype(var_dtype)
            
            # If the read noise component and the poisson noise component are
            # calculated and added separately, then a variance extension will
            # already exist in the input AstroData object. In this case, just
            # add this new array to the current variance extension
            if adinput[VAR, extver]:
                
                # If both the read noise component and the poisson noise
                # component have been calculated, don't add to the variance
                # extension
                if add_read_noise and add_poisson_noise:
                    raise Errors.InputError(
                        "Cannot add read noise component and poisson noise "
                        "component to variance extension as the variance "
                        "extension already exists")
                else:
                    log.fullinfo("Combining the newly calculated variance "
                                 "with the current variance extension "
                                 "%s[%s,%d]" % (adinput.filename, VAR, extver))
                    adinput[VAR, extver].data = np.add(
                      adinput[VAR, extver].data,
                      var_array_final).astype(var_dtype)
            else:
                # Create the variance AstroData object
                var = AstroData(data=var_array_final)
                var.rename_ext(VAR, ver=extver)
                var.filename = adinput.filename
                
                # Call the _update_var_header helper function to update the
                # header of the variance extension with some useful keywords
                var = self._update_var_header(sci=ext, var=var, bunit=bunit)
                
                # Append the variance AstroData object to the input AstroData
                # object. 
                log.fullinfo("Adding the [%s,%d] extension to the input "
                             "AstroData object %s" % (VAR, extver,
                                                      adinput.filename))
                adinput.append(moredata=var)
        
        return adinput
开发者ID:pyrrho314,项目名称:recipesystem,代码行数:104,代码来源:primitives_standardize.py

示例10: addDQ

# 需要导入模块: from astrodata import AstroData [as 别名]
# 或者: from astrodata.AstroData import rename_ext [as 别名]

#.........这里部分代码省略.........
                                         "level = %.2f" % (ad.filename, SCI,
                                                           extver,
                                                           non_linear_level))

                            non_linear_array = np.where(
                                ((ext.data >= non_linear_level) &
                                (ext.data < saturation_level)), 2, 0)
                            
                        elif saturation_level < non_linear_level:
                            log.warning("%s[%s,%d] saturation_level value is"
                                        "less than the non_linear_level not"
                                        "flagging non linear pixels" %
                                        (ad.filname, SCI, extver))
                        else:
                            log.fullinfo("Saturation and non-linear values "
                                         "for %s[%s,%d] are the same. Only "
                                         "flagging saturated pixels."
                                         % (ad.filename, SCI, extver))
                            
                    else:
                        log.fullinfo("Flagging pixels in the DQ extension "
                                     "corresponding to non linear pixels "
                                     "in %s[%s,%d] using non linear "
                                     "level = %.2f" % (ad.filename, SCI, extver,
                                                       non_linear_level))

                        non_linear_array = np.where(
                            (ext.data >= non_linear_level), 2, 0)
                    
                    dq_bit_arrays.append(non_linear_array)

                # Create an array that contains pixels that have a value of 4
                # when that pixel is saturated in the input science extension
                if saturation_level is not None:
                    saturation_array = None
                    log.fullinfo("Flagging pixels in the DQ extension "
                                 "corresponding to saturated pixels in "
                                 "%s[%s,%d] using saturation level = %.2f" %
                                 (ad.filename, SCI, extver, saturation_level))
                    saturation_array = np.where(
                        ext.data >= saturation_level, 4, 0)
                    dq_bit_arrays.append(saturation_array)
                
                # BPMs have an EXTNAME equal to DQ
                bpmname = None
                if final_bpm is not None:
                    bpm_array = None
                    bpmname = os.path.basename(final_bpm.filename)
                    log.fullinfo("Flagging pixels in the DQ extension "
                                 "corresponding to bad pixels in %s[%s,%d] "
                                 "using the BPM %s[%s,%d]" %
                                 (ad.filename, SCI, extver, bpmname, DQ, extver))
                    bpm_array = final_bpm[DQ, extver].data
                    dq_bit_arrays.append(bpm_array)
                
                # Create a single DQ extension from the three arrays (BPM,
                # non-linear and saturated)
                if not dq_bit_arrays:
                    # The BPM, non-linear and saturated arrays were not
                    # created. Create a single DQ array with all pixels set
                    # equal to 0 
                    log.fullinfo("The BPM, non-linear and saturated arrays "
                                 "were not created. Creating a single DQ "
                                 "array with all the pixels set equal to zero")
                    final_dq_array = np.zeros(ext.data.shape).astype(dq_dtype)

                else:
                    final_dq_array = self._bitwise_OR_list(dq_bit_arrays)
                    final_dq_array = final_dq_array.astype(dq_dtype)
                
                # Create a data quality AstroData object
                dq = AstroData(data=final_dq_array)
                dq.rename_ext(DQ, ver=extver)
                dq.filename = ad.filename
                
                # Call the _update_dq_header helper function to update the
                # header of the data quality extension with some useful
                # keywords
                dq = self._update_dq_header(sci=ext, dq=dq, bpmname=bpmname)
                
                # Append the DQ AstroData object to the input AstroData object
                log.fullinfo("Adding extension [%s,%d] to %s"
                             % (DQ, extver, ad.filename))
                ad.append(moredata=dq)
            
            # Add the appropriate time stamps to the PHU
            gt.mark_history(adinput=ad, keyword=timestamp_key)
            
            # Change the filename
            ad.filename = gt.filename_updater(adinput=ad, suffix=rc["suffix"],
                                              strip=True)
            
            # Append the output AstroData object to the list of output
            # AstroData objects
            adoutput_list.append(ad)

        # Report the list of output AstroData objects to the reduction context
        rc.report_output(adoutput_list)
        
        yield rc
开发者ID:pyrrho314,项目名称:recipesystem,代码行数:104,代码来源:primitives_standardize.py

示例11: addMDF

# 需要导入模块: from astrodata import AstroData [as 别名]
# 或者: from astrodata.AstroData import rename_ext [as 别名]

#.........这里部分代码省略.........
             continue
         
         # Parameters specified on the command line to reduce are converted
         # to strings, including None
         if rc["mdf"] and rc["mdf"] != "None":
             # The user supplied an input to the mdf parameter
             mdf = rc["mdf"]
         else:
             # The user did not supply an input to the mdf parameter, so try
             # to find an appropriate one. Get the dictionary containing the
             # list of MDFs for all instruments and modes.
             all_mdf_dict = Lookups.get_lookup_table("Gemini/MDFDict",
                                                     "mdf_dict")
             
             # The MDFs are keyed by the instrument and the MASKNAME. Get
             # the instrument and the MASKNAME values using the appropriate
             # descriptors 
             instrument = ad.instrument()
             mask_name = ad.phu_get_key_value("MASKNAME")
             
             # Create the key for the lookup table
             if instrument is None or mask_name is None:
                 log.warning("Unable to create the key for the lookup "
                             "table (%s), so no MDF will be added"
                             % ad.exception_info)
                 
                 # Append the input AstroData object to the list of output
                 # AstroData objects without further processing
                 adoutput_list.append(ad)
                 continue
             
             key = "%s_%s" % (instrument, mask_name)
             
             # Get the appropriate MDF from the look up table
             if key in all_mdf_dict:
                 mdf = lookup_path(all_mdf_dict[key])
             else:
                 # The MASKNAME keyword defines the actual name of an MDF
                 if not mask_name.endswith(".fits"):
                     mdf = "%s.fits" % mask_name
                 else:
                     mdf = str(mask_name)
                 
                 # Check if the MDF exists in the current working directory
                 if not os.path.exists(mdf):
                     log.warning("The MDF %s was not found in the current "
                                 "working directory, so no MDF will be "
                                 "added" % mdf)
                 
                 # Append the input AstroData object to the list of output
                 # AstroData objects without further processing
                 adoutput_list.append(ad)
                 continue
         
         # Ensure that the MDFs are AstroData objects
         if not isinstance(mdf, AstroData):
             mdf_ad = AstroData(mdf)
         
         if mdf_ad is None:
             log.warning("Cannot convert %s into an AstroData object, so "
                         "no MDF will be added" % mdf)
             
             # Append the input AstroData object to the list of output
             # AstroData objects without further processing
             adoutput_list.append(ad)
             continue
         
         # Check if the MDF is a single extension fits file
         if len(mdf_ad) > 1:
             log.warning("The MDF %s is not a single extension fits file, "
                         "so no MDF will be added" % mdf)
             
             # Append the input AstroData object to the list of output
             # AstroData objects without further processing
             adoutput_list.append(ad)
             continue
             
         # Name the extension appropriately
         mdf_ad.rename_ext("MDF", 1)
         
         # Append the MDF AstroData object to the input AstroData object
         log.fullinfo("Adding the MDF %s to the input AstroData object "
                      "%s" % (mdf_ad.filename, ad.filename))
         ad.append(moredata=mdf_ad)
         
         # Add the appropriate time stamps to the PHU
         gt.mark_history(adinput=ad, keyword=timestamp_key)
         
         # Change the filename
         ad.filename = gt.filename_updater(adinput=ad, suffix=rc["suffix"],
                                           strip=True)
         
         # Append the output AstroData object to the list of output
         # AstroData objects
         adoutput_list.append(ad)
     
     # Report the list of output AstroData objects to the reduction context
     rc.report_output(adoutput_list)
     
     yield rc
开发者ID:pyrrho314,项目名称:recipesystem,代码行数:104,代码来源:primitives_standardize.py

示例12: runDS

# 需要导入模块: from astrodata import AstroData [as 别名]
# 或者: from astrodata.AstroData import rename_ext [as 别名]
    def runDS(self):        
        """ Do the actual object detection.
            - Create the table OBJCAT
            - return the output Astrodata object

        """
        log = self.log
        extname = 'OBJCAT'

        outad = self.outad

        for scix in outad['SCI']:

            xtver = scix.extver()
  
            # Mask the non illuminated regions.
            if outad['BPM',xtver]:
                sdata = scix.data
                bpmdata = outad['BPM',xtver].data
                if bpmdata.shape != sdata.shape:       # bpmdata is already trimmed.
                    try :                            # See if DATASEC is in the header
                        dsec = scix.data_section()
                    except:
                        log.error("*** ERROR: DATASEC not found in SCI header.")
                        log.error("*** Cannot masked SCI: size(BPM) != size(SCI)."+\
                                   " bpmsize: "+str(bpmdata.shape))
                        log.error(" *** SCI data not masked.")
                    else:
                        # bpmdata is already trimmed.
                        s,e = map(int, dsec.split(',')[0][1:].split(':'))
                        #Trim number of columns to match the bpm data.
                        sdata = sdata[:,s-1:e]
                else:
                    bpmdata = np.where(bpmdata==0,1,0)
                    scix.data = sdata*bpmdata
              
            self.findObjects(scix)

            sciHeader = scix.header
            if len(self.x) == 0:
                log.warning( " **** WARNING: No objects were detected: Table OBJCAT, not created")
                continue
       
            wcs = pywcs.WCS(sciHeader)

            # Convert pixel coordinates to world coordinates
            # The second argument is "origin" -- in this case we're declaring we
            # have 1-based (Fortran-like) coordinates.

            xy = np.array(zip(self.x, self.y),np.float32)
            radec = wcs.wcs_pix2sky(xy, 1)

            ra,dec = radec[:,0],radec[:,1]

            nobjs = len(ra)
            log.info("Found %d sources for field in  %s['SCI',%d]"%\
                (nobjs ,outad.filename,xtver))
            
            if outad[extname,xtver]:
                log.info('Table already exists,updating values.')
                tdata = outad[extname, xtver].data
                theader = outad[extname, xtver].header
                tdata.field('id')[:]    = range(len(ra))
                tdata.field('x')[:]     = self.x
                tdata.field('y')[:]     = self.y
                tdata.field('ra')[:]    = ra
                tdata.field('dec')[:]   = dec
                tdata.field('flux')[:]  = self.flux
            else:
                #colsdef = self.define_Table_cols(ra, dec, flux, ellip, fwhm)
                colsdef = self.define_Table_cols(ra, dec, self.flux)
                tbhdu = pf.new_table(colsdef)         # Creates a BINTABLE

                th = tbhdu.header
                
                tabad = AstroData(tbhdu)
                tabad.rename_ext("OBJCAT", xtver)
            
                outad.append(tabad)

        return outad
开发者ID:pyrrho314,项目名称:recipesystem,代码行数:83,代码来源:detectSources.py

示例13: addReferenceCatalog

# 需要导入模块: from astrodata import AstroData [as 别名]
# 或者: from astrodata.AstroData import rename_ext [as 别名]

#.........这里部分代码省略.........
            dec.append(key[1])
            vals.append(jhk[key])
        # sort in ra
        order = np.argsort(ra)
        ra,dec = map(np.asarray, (ra,dec))
        ra = ra[order]
        dec = dec[order]
        vals = [vals[k] for k in order]
        # Get the magnitudes and errs from each record (j,je,h,he,k,ke,name)
        vals = np.asarray([vals[k][:6] for k in range(len(ra))])
        # Separate mags into J,H,K mags arrays for clarity
        irmag={}
        irmag['Jmag']=     vals[:,0]
        irmag['Jmag_err']= vals[:,1]
        irmag['Hmag']=     vals[:,2]
        irmag['Hmag_err']= vals[:,3]
        irmag['Kmag']=     vals[:,4]
        irmag['Kmag_err']= vals[:,5]

        #print 'JMAG00:',[(irmag['Jmag'][i],irmag['Jmag_err'][i]) 
        #                for i in range(5)]

        # Loop over each input AstroData object in the input list
        adinput = rc.get_inputs_as_astrodata()
        for ad in adinput:

            try:
                input_ra = ad.ra().as_pytype()
                input_dec = ad.dec().as_pytype()
            except:
                if "qa" in rc.context:
                    log.warning("No RA/Dec in header of %s; cannot find "\
                                "reference sources" % ad.filename)
                    adoutput_list.append(ad)
                    continue
                else:
                    raise

            table_name = 'jhk.tab'
            # Loop through the science extensions
            for sciext in ad['SCI']:
                extver = sciext.extver()

                # Did we get anything?
                if (1): # We do have a dict with ra,dec
                    # Create on table per extension

                    # Create a running id number
                    refid=range(1, len(ra)+1)

                    # Make the pyfits columns and table
                    c1 = pf.Column(name="Id",format="J",array=refid)
                    c3 = pf.Column(name="RAJ2000",format="D",unit="deg",array=ra)
                    c4 = pf.Column(name="DEJ2000",format="D",unit="deg",array=dec)
                    c5 = pf.Column(name="Jmag",format="E",array=irmag['Jmag'])
                    c6 = pf.Column(name="e_Jmag",format="E",array=irmag['Jmag_err'])
                    c7 = pf.Column(name="Hmag",format="E",array=irmag['Hmag'])
                    c8 = pf.Column(name="e_Hmag",format="E",array=irmag['Hmag_err'])
                    c9 = pf.Column(name="Kmag",format="E",array=irmag['Kmag'])
                    c10= pf.Column(name="e_Kmag",format="E",array=irmag['Kmag_err'])
                    col_def = pf.ColDefs([c1,c3,c4,c5,c6,c7,c8,c9,c10])
                    tb_hdu = pf.new_table(col_def)

                    # Add comments to the REFCAT header to describe it.
                    tb_hdu.header.add_comment('Source catalog derived from the %s'
                                         ' catalog on vizier' % table_name)

                    tb_ad = AstroData(tb_hdu)
                    tb_ad.rename_ext('REFCAT', extver)

                    if(ad['REFCAT',extver]):
                        log.fullinfo("Replacing existing REFCAT in %s" % ad.filename)
                        ad.remove(('REFCAT', extver))
                    else:
                        log.fullinfo("Adding REFCAT to %s" % ad.filename)
                    ad.append(tb_ad)

            # Match the object catalog against the reference catalog
            # Update the refid and refmag columns in the object catalog
            if ad.count_exts("OBJCAT")>0:
                ad = _match_objcat_refcat(adinput=ad)[0]
            else:
                log.warning("No OBJCAT found; not matching OBJCAT to REFCAT")

            # Add the appropriate time stamps to the PHU
            gt.mark_history(adinput=ad, keyword=timestamp_key)

            # Change the filename
            ad.filename = gt.filename_updater(adinput=ad, suffix=rc["suffix"],
                                              strip=True)

            # Append the output AstroData object to the list 
            # of output AstroData objects
            adoutput_list.append(ad)

        # Report the list of output AstroData objects to the reduction
        # context
        rc.report_output(adoutput_list)

        yield rc
开发者ID:pyrrho314,项目名称:recipesystem,代码行数:104,代码来源:primitives_NIRI_IMAGE.py

示例14: makeFringeFrame

# 需要导入模块: from astrodata import AstroData [as 别名]
# 或者: from astrodata.AstroData import rename_ext [as 别名]
    def makeFringeFrame(self,rc):

        # Instantiate the log
        log = gemLog.getGeminiLog(logType=rc["logType"],
                                  logLevel=rc["logLevel"])

        # Log the standard "starting primitive" debug message
        log.debug(gt.log_message("primitive", "makeFringeFrame", 
                                 "starting"))

        # Initialize the list of output AstroData objects
        adoutput_list = []

        # Check for at least 3 input frames
        adinput = rc.get_inputs_as_astrodata()
        if len(adinput)<3:
            log.stdinfo('Fewer than 3 frames provided as input. ' +
                        'Not making fringe frame.')

            # Report the empty list to the reduction context
            rc.report_output(adoutput_list)
        
        else:
            rc.run("correctBackgroundToReferenceImage"\
                       "(remove_zero_level=True)")

            # If needed, do a rough median on all frames, subtract,
            # and then redetect to help distinguish sources from fringes
            sub_med = rc["subtract_median_image"]
            if sub_med:
                adinput = rc.get_inputs_as_astrodata()

                # Get data by science extension
                data = {}
                for ad in adinput:
                    for sciext in ad["SCI"]:
                        key = (sciext.extname(),sciext.extver())
                        if data.has_key(key):
                            data[key].append(sciext.data)
                        else:
                            data[key] = [sciext.data]


                # Make a median image for each extension
                import pyfits as pf
                median_ad = AstroData()
                median_ad.filename = gt.filename_updater(
                    adinput=adinput[0], suffix="_stack_median", strip=True)
                for key in data:
                    med_data = np.median(np.dstack(data[key]),axis=2)
                    hdr = pf.Header()
                    ext = AstroData(data=med_data, header=hdr)
                    ext.rename_ext(key)
                    median_ad.append(ext)

                # Subtract the median image
                rc["operand"] = median_ad
                rc.run("subtract")

                # Redetect to get a good object mask
                rc.run("detectSources")

                # Add the median image back in to the input
                rc.run("add")

            # Add the object mask into the DQ plane
            rc.run("addObjectMaskToDQ")
            
            # Stack frames with masking from DQ plane
            rc.run("stackFrames(operation=%s)" % rc["operation"])

        yield rc
开发者ID:pyrrho314,项目名称:recipesystem,代码行数:74,代码来源:primitives_GMOS_IMAGE.py

示例15: test_method_rename_ext_4

# 需要导入模块: from astrodata import AstroData [as 别名]
# 或者: from astrodata.AstroData import rename_ext [as 别名]
def test_method_rename_ext_4():
    ad = AstroData(TESTFILE2)
    ad.rename_ext("FOO", ver=99)
    assert ad.extname() == "FOO"
    assert ad.extver() == 99
开发者ID:mmorage,项目名称:DRAGONS,代码行数:7,代码来源:test_AstroDataAPI.py


注:本文中的astrodata.AstroData.rename_ext方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。