本文整理汇总了Python中tkp.db.associations.associate_extracted_sources函数的典型用法代码示例。如果您正苦于以下问题:Python associate_extracted_sources函数的具体用法?Python associate_extracted_sources怎么用?Python associate_extracted_sources使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了associate_extracted_sources函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_basic_same_field_case
def test_basic_same_field_case(self):
""" Here we start with 1 source in image0.
We then add image1 (same field as image0), with a double association
for the source, and check assocskyrgn updates correctly.
"""
n_images = 2
im_params = db_subs.generate_timespaced_dbimages_data(n_images)
idx = 0
src_a = db_subs.example_extractedsource_tuple(
ra=im_params[idx]['centre_ra'],
dec=im_params[idx]['centre_decl'])
src_b = src_a._replace(ra=src_a.ra + 1. / 60.) # 1 arcminute offset
imgs = []
imgs.append(tkp.db.Image(dataset=self.dataset, data=im_params[idx]))
insert_extracted_sources(imgs[idx]._id, [src_a])
associate_extracted_sources(imgs[idx]._id, deRuiter_r, new_source_sigma_margin)
idx = 1
imgs.append(tkp.db.Image(dataset=self.dataset, data=im_params[idx]))
insert_extracted_sources(imgs[idx]._id, [src_a, src_b])
associate_extracted_sources(imgs[idx]._id, deRuiter_r, new_source_sigma_margin)
imgs[idx].update()
runcats = columns_from_table('runningcatalog',
where={'dataset':self.dataset.id})
self.assertEqual(len(runcats), 2) #Just a sanity check.
skyassocs = columns_from_table('assocskyrgn',
where={'skyrgn':imgs[idx]._data['skyrgn']})
self.assertEqual(len(skyassocs), 2)
示例2: test_one2oneflux
def test_one2oneflux(self):
dataset = tkp.db.DataSet(database=self.database, data={'description': 'flux test set: 1-1'})
n_images = 3
im_params = db_subs.example_dbimage_datasets(n_images)
src_list = []
src = db_subs.example_extractedsource_tuple()
src0 = src._replace(flux=2.0)
src_list.append(src0)
src1 = src._replace(flux=2.5)
src_list.append(src1)
src2 = src._replace(flux=2.4)
src_list.append(src2)
for idx, im in enumerate(im_params):
image = tkp.db.Image(database=self.database, dataset=dataset, data=im)
image.insert_extracted_sources([src_list[idx]])
associate_extracted_sources(image.id, deRuiter_r=3.717)
query = """\
SELECT rf.avg_f_int
FROM runningcatalog r
,runningcatalog_flux rf
WHERE r.dataset = %(dataset)s
AND r.id = rf.runcat
"""
self.database.cursor.execute(query, {'dataset': dataset.id})
result = zip(*self.database.cursor.fetchall())
avg_f_int = result[0]
self.assertEqual(len(avg_f_int), 1)
self.assertAlmostEqual(avg_f_int[0], 2.3)
示例3: test_probably_not_a_transient
def test_probably_not_a_transient(self):
"""
No source at 250MHz, but we detect a source at 50MHz.
Not necessarily a transient.
Should trivially ignore 250MHz data when looking at a new 50MHz source.
"""
img_params = self.img_params
img0 = img_params[0]
# This time around, we just manually exclude the steady src from
# the first image detections.
steady_low_freq_src = MockSource(
example_extractedsource_tuple(ra=img_params[0]['centre_ra'],
dec=img_params[0]['centre_decl']
),
lightcurve=defaultdict(lambda :self.always_detectable_flux)
)
# Insert first image, no sources.
tkp.db.Image(data=img_params[0],dataset=self.dataset)
# Now set up second image.
img1 = tkp.db.Image(data=img_params[1],dataset=self.dataset)
xtr = steady_low_freq_src.simulate_extraction(img1,
extraction_type='blind')
insert_extracted_sources(img1._id, [xtr], 'blind')
associate_extracted_sources(img1._id, deRuiter_r, self.new_source_sigma_margin)
transients = get_newsources_for_dataset(self.dataset.id)
# Should have no marked transients
self.assertEqual(len(transients), 0)
示例4: TestMeridianLowerEdgeCase
def TestMeridianLowerEdgeCase(self):
"""What happens if a source is right on the meridian?"""
dataset = DataSet(data={'description':"Assoc 1-to-1:" +
self._testMethodName})
n_images = 3
im_params = db_subs.example_dbimage_datasets(n_images, centre_ra=0.5,
centre_decl=10)
src_list = []
src0 = db_subs.example_extractedsource_tuple(ra=0.0002, dec=10.5,
ra_fit_err=0.01, dec_fit_err=0.01)
src_list.append(src0)
src1 = src0._replace(ra=0.0003)
src_list.append(src1)
src2 = src0._replace(ra=0.0004)
src_list.append(src2)
for idx, im in enumerate(im_params):
im['centre_ra'] = 359.9
image = tkp.db.Image(dataset=dataset, data=im)
image.insert_extracted_sources([src_list[idx]])
associate_extracted_sources(image.id, deRuiter_r=3.717)
runcat = columns_from_table('runningcatalog', ['datapoints', 'wm_ra'],
where={'dataset':dataset.id})
# print "***\nRESULTS:", runcat, "\n*****"
self.assertEqual(len(runcat), 1)
self.assertEqual(runcat[0]['datapoints'], 3)
avg_ra = (src0.ra + src1.ra +src2.ra)/3
self.assertAlmostEqual(runcat[0]['wm_ra'], avg_ra)
示例5: test_two_field_overlap_new_transient
def test_two_field_overlap_new_transient(self):
"""Now for something more interesting - two overlapping fields, 4 sources:
one steady source only in lower field,
one steady source in both fields,
one steady source only in upper field,
one transient source in both fields but only at 2nd timestep.
"""
n_images = 2
xtr_radius = 1.5
im_params = db_subs.generate_timespaced_dbimages_data(n_images,
xtr_radius=xtr_radius)
im_params[1]['centre_decl'] += xtr_radius * 1
imgs = []
lower_steady_src = db_subs.example_extractedsource_tuple(
ra=im_params[0]['centre_ra'],
dec=im_params[0]['centre_decl'] - 0.5 * xtr_radius)
upper_steady_src = db_subs.example_extractedsource_tuple(
ra=im_params[1]['centre_ra'],
dec=im_params[1]['centre_decl'] + 0.5 * xtr_radius)
overlap_steady_src = db_subs.example_extractedsource_tuple(
ra=im_params[0]['centre_ra'],
dec=im_params[0]['centre_decl'] + 0.2 * xtr_radius)
overlap_transient = db_subs.example_extractedsource_tuple(
ra=im_params[0]['centre_ra'],
dec=im_params[0]['centre_decl'] + 0.8 * xtr_radius)
imgs.append(tkp.db.Image(dataset=self.dataset, data=im_params[0]))
imgs.append(tkp.db.Image(dataset=self.dataset, data=im_params[1]))
insert_extracted_sources(imgs[0]._id, [lower_steady_src, overlap_steady_src])
associate_extracted_sources(imgs[0]._id, deRuiter_r=0.1,
new_source_sigma_margin=new_source_sigma_margin)
nd_posns = dbnd.get_nulldetections(imgs[0].id)
self.assertEqual(len(nd_posns), 0)
insert_extracted_sources(imgs[1]._id, [upper_steady_src, overlap_steady_src,
overlap_transient])
associate_extracted_sources(imgs[1]._id, deRuiter_r=0.1,
new_source_sigma_margin=new_source_sigma_margin)
nd_posns = dbnd.get_nulldetections(imgs[1].id)
self.assertEqual(len(nd_posns), 0)
runcats = columns_from_table('runningcatalog',
where={'dataset': self.dataset.id})
self.assertEqual(len(runcats), 4) #sanity check.
newsources_qry = """\
SELECT *
FROM newsource tr
,runningcatalog rc
WHERE rc.dataset = %s
AND tr.runcat = rc.id
"""
self.database.cursor.execute(newsources_qry, (self.dataset.id,))
newsources = get_db_rows_as_dicts(self.database.cursor)
self.assertEqual(len(newsources), 1)
示例6: test_marginal_transient
def test_marginal_transient(self):
"""
( flux1 > (rms_min0*(det0 + margin) )
but ( flux1 < (rms_max0*(det0 + margin) )
--> Possible transient
If it was in a region of rms_min, we would (almost certainly) have seen
it in the first image. So new source --> Possible transient.
But if it was in a region of rms_max, then perhaps we would have missed
it. In which case, new source --> Just seeing deeper.
Note that if we are tiling overlapping images, then the first time
a field is processed with image-centre at the edge of the old field,
we may get a bunch of unhelpful 'possible transients'.
Furthermore, this will pick up fluctuating sources near the
image-margins even with a fixed field of view.
But without a more complex store of image-rms-per-position, we cannot
do better.
Hopefully we can use a 'distance from centre' feature to separate out
the good and bad candidates in this case.
"""
img_params = self.img_params
#Must pick flux value carefully to fire correct logic branch:
marginal_transient_flux = self.reliably_detected_at_image_centre_flux
marginal_transient = MockSource(
example_extractedsource_tuple(ra=img_params[0]['centre_ra'],
dec=img_params[0]['centre_decl']),
lightcurve={img_params[1]['taustart_ts'] : marginal_transient_flux}
)
# First, check that we've set up the test correctly
rms_min0 = img_params[0]['rms_min']
rms_max0 = img_params[0]['rms_max']
det0 = img_params[0]['detection_thresh']
self.assertTrue(marginal_transient_flux <
rms_max0 * (det0 + self.new_source_sigma_margin))
self.assertTrue(marginal_transient_flux >
rms_min0 * (det0 + self.new_source_sigma_margin))
for pars in self.img_params:
img = tkp.db.Image(data=pars, dataset=self.dataset)
xtr = marginal_transient.simulate_extraction(img,
extraction_type='blind')
if xtr is not None:
insert_extracted_sources(img._id, [xtr], 'blind')
associate_extracted_sources(img._id, deRuiter_r, self.new_source_sigma_margin)
newsources = get_newsources_for_dataset(self.dataset.id)
# Should have one 'possible' transient
self.assertEqual(len(newsources), 1)
self.assertTrue(
newsources[0]['low_thresh_sigma'] > self.new_source_sigma_margin)
self.assertTrue(
newsources[0]['high_thresh_sigma'] < self.new_source_sigma_margin)
示例7: test_single_fixed_source
def test_single_fixed_source(self):
"""test_single_fixed_source
- Pretend to extract the same source in each of a series of images.
- Perform source association
- Check the image source listing works
- Check runcat, assocxtrsource.
"""
fixed_src_runcat_id = None
for img_idx, im in enumerate(self.im_params):
self.db_imgs.append( Image(data=im, dataset=self.dataset))
last_img = self.db_imgs[-1]
insert_extracted_sources(last_img._id,
[db_subs.example_extractedsource_tuple()],'blind')
associate_extracted_sources(last_img._id, deRuiter_r,
new_source_sigma_margin)
running_cat = columns_from_table(table="runningcatalog",
keywords=['id', 'datapoints'],
where={"dataset":self.dataset.id})
self.assertEqual(len(running_cat), 1)
self.assertEqual(running_cat[0]['datapoints'], img_idx+1)
# Check runcat ID does not change for a steady single source
if img_idx == 0:
fixed_src_runcat_id = running_cat[0]['id']
self.assertIsNotNone(fixed_src_runcat_id, "No runcat id assigned to source")
self.assertEqual(running_cat[0]['id'], fixed_src_runcat_id,
"Multiple runcat ids for same fixed source")
runcat_flux = columns_from_table(table="runningcatalog_flux",
keywords=['f_datapoints'],
where={"runcat":fixed_src_runcat_id})
self.assertEqual(len(runcat_flux),1)
self.assertEqual(img_idx+1, runcat_flux[0]['f_datapoints'])
last_img.update()
last_img.update_sources()
img_xtrsrc_ids = [src.id for src in last_img.sources]
self.assertEqual(len(img_xtrsrc_ids), 1)
#Get the association row for most recent extraction:
assocxtrsrcs_rows = columns_from_table(table="assocxtrsource",
keywords=['runcat', 'xtrsrc' ],
where={"xtrsrc":img_xtrsrc_ids[0]})
# print "ImageID:", last_img.id
# print "Imgs sources:", img_xtrsrc_ids
# print "Assoc entries:", assocxtrsrcs_rows
# print "First extracted source id:", ds_source_ids[0]
# if len(assocxtrsrcs_rows):
# print "Associated source:", assocxtrsrcs_rows[0]['xtrsrc']
self.assertEqual(len(assocxtrsrcs_rows),1,
msg="No entries in assocxtrsrcs for image number "+str(img_idx))
self.assertEqual(assocxtrsrcs_rows[0]['runcat'], fixed_src_runcat_id,
"Mismatched runcat id in assocxtrsrc table")
示例8: test_only_first_epoch_source
def test_only_first_epoch_source(self):
"""test_only_first_epoch_source
- Pretend to extract a source only from the first image.
- Run source association for each image, as we would in TraP.
- Check the image source listing works
- Check runcat and assocxtrsource are correct.
"""
first_epoch = True
extracted_source_ids = []
for im in self.im_params:
self.db_imgs.append(Image( data=im, dataset=self.dataset))
last_img = self.db_imgs[-1]
if first_epoch:
insert_extracted_sources(last_img._id,
[db_subs.example_extractedsource_tuple()], 'blind')
associate_extracted_sources(last_img._id, deRuiter_r,
new_source_sigma_margin)
# First, check the runcat has been updated correctly
running_cat = columns_from_table(table="runningcatalog",
keywords=['datapoints'],
where={"dataset": self.dataset.id})
self.assertEqual(len(running_cat), 1)
self.assertEqual(running_cat[0]['datapoints'], 1)
last_img.update()
last_img.update_sources()
img_xtrsrc_ids = [src.id for src in last_img.sources]
if first_epoch:
self.assertEqual(len(img_xtrsrc_ids),1)
extracted_source_ids.extend(img_xtrsrc_ids)
assocxtrsrcs_rows = columns_from_table(table="assocxtrsource",
keywords=['runcat', 'xtrsrc' ],
where={"xtrsrc":img_xtrsrc_ids[0]})
self.assertEqual(len(assocxtrsrcs_rows),1)
self.assertEqual(assocxtrsrcs_rows[0]['xtrsrc'], img_xtrsrc_ids[0])
else:
self.assertEqual(len(img_xtrsrc_ids),0)
first_epoch=False
#Assocxtrsources still ok after multiple images?
self.assertEqual(len(extracted_source_ids),1)
assocxtrsrcs_rows = columns_from_table(table="assocxtrsource",
keywords=['runcat', 'xtrsrc' ],
where={"xtrsrc":extracted_source_ids[0]})
self.assertEqual(len(assocxtrsrcs_rows),1)
self.assertEqual(assocxtrsrcs_rows[0]['xtrsrc'], extracted_source_ids[0],
"Runcat xtrsrc entry must match the only extracted source")
示例9: associate_extracted_sources
def associate_extracted_sources(self, deRuiter_r, new_source_sigma_margin):
"""Associate sources from the last images with previously
extracted sources within the same dataset
Args:
deRuiter_r (float): The De Ruiter radius for source
association. The default value is set through the
tkp.config module
"""
associate_extracted_sources(self._id, deRuiter_r, new_source_sigma_margin=new_source_sigma_margin)
示例10: test_rejected_initial_image
def test_rejected_initial_image(self):
"""
An image which is rejected should not be taken into account when
deciding whether a patch of sky has been previously observed, and
hence whether any detections in that area are (potential) transients.
Here, we create a database with two images. The first
(choronologically) is rejected; the second contains a source. That
source should not be marked as a transient.
"""
dataset = tkp.db.DataSet(data={"description": "Trans:" + self._testMethodName}, database=tkp.db.Database())
# We use a dataset with two images
# NB the routine in db_subs automatically increments time between
# images.
n_images = 2
db_imgs = [
tkp.db.Image(data=im_params, dataset=dataset)
for im_params in db_subs.generate_timespaced_dbimages_data(n_images)
]
# The first image is rejected for an arbitrary reason
# (for the sake of argument, we use an unacceptable RMS).
db_quality.reject(
imageid=db_imgs[0].id,
reason=db_quality.reject_reasons["rms"],
comment=self._testMethodName,
session=self.session,
)
# Have to commit here: old DB code makes queries in a separate transaction.
self.session.commit()
# Since we rejected the first image, we only find a source in the
# second.
source = db_subs.example_extractedsource_tuple()
insert_extracted_sources(db_imgs[1]._id, [source])
# Standard source association procedure etc.
associate_extracted_sources(db_imgs[1].id, deRuiter_r=3.7, new_source_sigma_margin=3)
# Our source should _not_ be a transient. That is, there should be no
# entries in the newsource table for this dataset.
cursor = tkp.db.execute(
"""\
SELECT t.id FROM newsource t, runningcatalog rc
WHERE t.runcat = rc.id
AND rc.dataset = %(ds_id)s
""",
{"ds_id": dataset.id},
)
self.assertEqual(cursor.rowcount, 0)
示例11: test_two_field_overlap_nulling_src
def test_two_field_overlap_nulling_src(self):
"""Similar to above, but one source disappears:
Two overlapping fields, 4 sources:
one steady source only in lower field,
one steady source in both fields,
one steady source only in upper field,
one transient source in both fields but only at *1st* timestep.
"""
n_images = 2
xtr_radius = 1.5
im_params = db_subs.generate_timespaced_dbimages_data(n_images,
xtr_radius=xtr_radius)
im_params[1]['centre_decl'] += xtr_radius * 1
imgs = []
lower_steady_src = db_subs.example_extractedsource_tuple(
ra=im_params[0]['centre_ra'],
dec=im_params[0]['centre_decl'] - 0.5 * xtr_radius)
upper_steady_src = db_subs.example_extractedsource_tuple(
ra=im_params[1]['centre_ra'],
dec=im_params[1]['centre_decl'] + 0.5 * xtr_radius)
overlap_steady_src = db_subs.example_extractedsource_tuple(
ra=im_params[0]['centre_ra'],
dec=im_params[0]['centre_decl'] + 0.2 * xtr_radius)
overlap_transient = db_subs.example_extractedsource_tuple(
ra=im_params[0]['centre_ra'],
dec=im_params[0]['centre_decl'] + 0.8 * xtr_radius)
imgs.append(tkp.db.Image(dataset=self.dataset, data=im_params[0]))
imgs.append(tkp.db.Image(dataset=self.dataset, data=im_params[1]))
insert_extracted_sources(imgs[0]._id, [lower_steady_src, overlap_steady_src,
overlap_transient])
associate_extracted_sources(imgs[0]._id, deRuiter_r=0.1,
new_source_sigma_margin=new_source_sigma_margin)
nd_posns = dbnd.get_nulldetections(imgs[0].id)
self.assertEqual(len(nd_posns), 0)
insert_extracted_sources(imgs[1]._id, [upper_steady_src, overlap_steady_src])
associate_extracted_sources(imgs[1]._id, deRuiter_r=0.1,
new_source_sigma_margin=new_source_sigma_margin)
#This time we don't expect to get an immediate transient detection,
#but we *do* expect to get a null-source forced extraction request:
nd_posns = dbnd.get_nulldetections(imgs[1].id)
self.assertEqual(len(nd_posns), 1)
runcats = columns_from_table('runningcatalog',
where={'dataset':self.dataset.id})
self.assertEqual(len(runcats), 4) #sanity check.
示例12: insert_image_and_simulated_sources
def insert_image_and_simulated_sources(dataset, image_params, mock_sources,
new_source_sigma_margin,
deruiter_radius=3.7):
"""
Simulates the standard database image-and-source insertion logic using mock
sources.
Args:
dataset: The dataset object
image_params (dict): Contains the image properties.
mock_sources (list of MockSource): The mock sources to simulate.
new_source_sigma_margin (float): Parameter passed to source-association
routines.
deruiter_radius (float): Parameter passed to source-association
routines.
Returns:
3-tuple (image, list of blind extractions, list of forced fits).
"""
image = tkp.db.Image(data=image_params, dataset=dataset)
blind_extractions=[]
for src in mock_sources:
xtr = src.simulate_extraction(image, extraction_type='blind')
if xtr is not None:
blind_extractions.append(xtr)
insert_extracted_sources(image._id, blind_extractions, 'blind')
associate_extracted_sources(image._id, deRuiter_r=deruiter_radius,
new_source_sigma_margin=new_source_sigma_margin)
nd_ids_posns = nulldetections.get_nulldetections(image.id)
nd_posns = [(ra,decl) for ids, ra, decl in nd_ids_posns]
forced_fits = []
for posn in nd_posns:
for src in mock_sources:
eps = 1e-13
if (math.fabs(posn[0] - src.base_source.ra)<eps and
math.fabs(posn[1] - src.base_source.dec)<eps ):
forced_fits.append(
src.simulate_extraction(image,extraction_type='ff_nd')
)
if len(nd_posns) != len(forced_fits):
raise LookupError("Something went wrong, nulldetection position did "
"not match a mock source.")
#image.insert_extracted_sources(forced_fits, 'ff_nd')
dbgen.insert_extracted_sources(image.id, forced_fits, 'ff_nd',
ff_runcat_ids=[ids for ids, ra, decl in nd_ids_posns])
nulldetections.associate_nd(image.id)
return image, blind_extractions, forced_fits
示例13: test_rejected_initial_image
def test_rejected_initial_image(self):
"""
An image which is rejected should not be taken into account when
deciding whether a patch of sky has been previously observed, and
hence whether any detections in that area are (potential) transients.
Here, we create a database with two images. The first
(choronologically) is rejected; the second contains a source. That
source should not be marked as a transient.
"""
dataset = tkp.db.DataSet(
data={'description':"Trans:" + self._testMethodName},
database=tkp.db.Database()
)
# We use a dataset with two images
# NB the routine in db_subs automatically increments time between
# images.
n_images = 2
db_imgs = [
tkp.db.Image(data=im_params, dataset=dataset) for
im_params in db_subs.example_dbimage_datasets(n_images)
]
# The first image is rejected for an arbitrary reason
# (for the sake of argument, we use an unacceptable RMS).
tkp.db.quality.reject(
db_imgs[0].id, tkp.db.quality.reason['rms'].id, self._testMethodName
)
# Since we rejected the first image, we only find a source in the
# second.
source = db_subs.example_extractedsource_tuple()
db_imgs[1].insert_extracted_sources([source])
# Standard source association procedure etc.
associate_extracted_sources(db_imgs[1].id, 3.7)
# Our source should _not_ be a transient. That is, there should be no
# entries in the transient table for this dataset.
cursor = tkp.db.execute("""\
SELECT t.id FROM transient t, runningcatalog rc
WHERE t.runcat = rc.id
AND rc.dataset = %(ds_id)s
""", {"ds_id": dataset.id}
)
self.assertEqual(cursor.rowcount, 0)
示例14: test_new_skyregion_insertion
def test_new_skyregion_insertion(self):
"""Here we test the association logic executed upon insertion of a
new skyregion.
We expect that any pre-existing entries in the runningcatalog
which lie within the field of view will be marked as
'within this region', through the presence of an entry in table
``assocskyrgn``.
Conversely sources outside the FoV should not be marked as related.
We begin with img0, with a source at centre.
Then we add 2 more (empty) images/fields at varying positions.
"""
n_images = 6
im_params = db_subs.generate_timespaced_dbimages_data(n_images)
src_in_img0 = db_subs.example_extractedsource_tuple(
ra=im_params[0]['centre_ra'],
dec=im_params[0]['centre_decl'],)
# First image
image0 = tkp.db.Image(dataset=self.dataset, data=im_params[0])
insert_extracted_sources(image0._id, [src_in_img0])
associate_extracted_sources(image0._id, deRuiter_r, new_source_sigma_margin)
image0.update()
runcats = columns_from_table('runningcatalog',
where={'dataset':self.dataset.id})
self.assertEqual(len(runcats), 1) #Just a sanity check.
##Second, different *But overlapping* image:
idx = 1
im_params[idx]['centre_decl'] += im_params[idx]['xtr_radius'] * 0.9
image1 = tkp.db.Image(dataset=self.dataset, data=im_params[idx])
image1.update()
assocs = columns_from_table('assocskyrgn',
where={'skyrgn':image1._data['skyrgn']})
self.assertEqual(len(assocs), 1)
self.assertEqual(assocs[0]['runcat'], runcats[0]['id'])
##Third, different *and NOT overlapping* image:
idx = 2
im_params[idx]['centre_decl'] += im_params[idx]['xtr_radius'] * 1.1
image2 = tkp.db.Image(dataset=self.dataset, data=im_params[idx])
image2.update()
assocs = columns_from_table('assocskyrgn',
where={'skyrgn':image2._data['skyrgn']})
self.assertEqual(len(assocs), 0)
示例15: test_null_case_sequential
def test_null_case_sequential(self):
"""test_null_case_sequential
-Check extractedsource insertion routines can deal with empty input!
-Check source association can too
"""
for im in self.im_params:
self.db_imgs.append(Image(data=im, dataset=self.dataset))
insert_extracted_sources(self.db_imgs[-1]._id, [],'blind')
associate_extracted_sources(self.db_imgs[-1]._id, deRuiter_r,
new_source_sigma_margin)
running_cat = columns_from_table(table="runningcatalog",
keywords="*",
where={"dataset":self.dataset.id})
self.assertEqual(len(running_cat), 0)