本文整理汇总了Python中astrocats.catalog.utils.pbar函数的典型用法代码示例。如果您正苦于以下问题:Python pbar函数的具体用法?Python pbar怎么用?Python pbar使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pbar函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: do_smt
def do_smt(catalog):
task_str = catalog.get_current_task_str()
smt_url = 'http://www.mso.anu.edu.au/skymapper/smt/transients/tns/'
html = catalog.load_url(smt_url,
os.path.join(catalog.get_current_task_repo(),
'SMT', 'index.html'))
if not html:
return
bs = BeautifulSoup(html, 'html5lib')
trs = bs.find('table').findAll('tr')
for tr in pbar(trs, task_str):
cols = [str(xx.text) for xx in tr.findAll('td')]
if not cols:
continue
name = 'AT' + cols[0]
name, source = catalog.new_entry(name, srcname='SMT', url=smt_url)
catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, cols[1], source)
catalog.entries[name].add_quantity(SUPERNOVA.CLAIMED_TYPE, cols[2],
source)
catalog.entries[name].add_quantity(
SUPERNOVA.RA, cols[3], source, u_value='floatdegrees')
catalog.entries[name].add_quantity(
SUPERNOVA.DEC, cols[4], source, u_value='floatdegrees')
if catalog.args.update:
catalog.journal_entries()
catalog.journal_entries()
return
示例2: do_agn_bhm_database
def do_agn_bhm_database(catalog):
"""Load data from the 'AGN Blackhole Mass Database': 2015PASP..127...67B.
FIX: Add archiving flags!
"""
log = catalog.log
log.debug("do_agn_bhm_database()")
task_str = catalog.get_current_task_str()
# Load data from URL or cached copy of it
cached_path = SOURCE_BIBCODE + '.txt'
html = catalog.load_url(DATA_URL, cached_path, fail=True)
if html is None:
return False
# Get this line for description of mass calculation
# 'M<sub>BH</sub> calculated using <i>< f ></i> =  2.8'
mass_scale_factor = re.search("<i>< f ></i> =  (.*)", html).groups()
# If pattern matches expectations, store it
if len(mass_scale_factor) == 1:
mass_scale_factor = mass_scale_factor[0]
# Otherwise, forget it
else:
err_msg = "Could not load `mass_scale_factor` from '{}'".format(mass_scale_factor)
catalog.log.error(err_msg)
return False
soup = BeautifulSoup(html, 'html5lib')
# The whole table is nested in a `<table class="hovertable">`
full_table = soup.find('table', attrs={'class': 'hovertable'})
# Each line in the file is separated with `'tr'`
div_lines = full_table.find_all('tr')
# Go through each element of the tables
entries = 0
for div in utils.pbar(div_lines, task_str):
# Get the `varname` -- ID number for each row
# The first element of the `contents` contains an href with the 'varname'
cell_text = str(div.contents[0])
groups = re.search('varname=([0-9]*)', cell_text)
# If no match is found, this is one of the header lines (not an entry line, skip)
if groups is not None:
varname = groups.groups()[0]
try:
name = _add_entry_for_data_line(catalog, div.text, varname, mass_scale_factor)
except Exception:
log.error("Failed `_add_entry_for_data_line()`")
log.error("`div.text`: '{}'".format(div.text))
log.error("`varname`: '{}'".format(varname))
raise
if name is not None:
entries += 1
if catalog.args.travis and (entries > catalog.TRAVIS_QUERY_LIMIT):
log.warning("Exiting on travis limit")
break
return True
示例3: do_fermi
def do_fermi(catalog):
task_str = catalog.get_current_task_str()
with open(os.path.join(catalog.get_current_task_repo(),
'1SC_catalog_v01.asc'), 'r') as ff:
tsvin = list(csv.reader(ff, delimiter=','))
for ri, row in enumerate(pbar(tsvin, task_str)):
if row[0].startswith('#'):
if len(row) > 1 and 'UPPER_LIMITS' in row[1]:
break
continue
if 'Classified' not in row[1]:
continue
name = row[0].replace('SNR', 'G')
name = catalog.add_entry(name)
source = (catalog.entries[name]
.add_source(bibcode='2016ApJS..224....8A'))
catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
catalog.entries[name].add_quantity(
SUPERNOVA.ALIAS, row[0].replace('SNR', 'MWSNR'), source)
catalog.entries[name].add_quantity(
SUPERNOVA.RA, row[2], source, u_value='floatdegrees')
catalog.entries[name].add_quantity(
SUPERNOVA.DEC, row[3], source, u_value='floatdegrees')
catalog.journal_entries()
return
示例4: load_stubs
def load_stubs(self):
"""
"""
currenttask = 'Loading entry stubs'
files = self.PATHS.get_repo_output_file_list()
for fi in pbar(files, currenttask):
fname = fi
# FIX: should this be ``fi.endswith(``.gz')`` ?
if '.gz' in fi:
fname = uncompress_gz(fi)
name = os.path.basename(
os.path.splitext(fname)[0]).replace('.json', '')
new_entry = self.proto.init_from_file(
self, path=fname, delete=False)
# Make sure a non-stub entry doesnt already exist with this name
if name in self.entries and not self.entries[name]._stub:
err_str = (
"ERROR: non-stub entry already exists with name '{}'"
.format(name))
self.log.error(err_str)
raise RuntimeError(err_str)
self.entries[name] = new_entry.get_stub()
self.log.debug("Added stub for '{}'".format(name))
return self.entries
示例5: do_grb
def do_grb(catalog):
task_str = catalog.get_current_task_str()
file_path = os.path.join(
catalog.get_current_task_repo(), 'GRB-catalog/catalog.csv')
csvtxt = catalog.load_cached_url(
'http://grb.pa.msu.edu/grbcatalog/'
'download_data?cut_0_min=10&cut_0=BAT%20T90'
'&cut_0_max=100000&num_cuts=1&no_date_cut=True',
file_path)
if not csvtxt:
return
data = list(csv.reader(csvtxt.splitlines(), delimiter=',',
quotechar='"', skipinitialspace=True))
for r, row in enumerate(pbar(data, task_str)):
if r == 0:
continue
(name,
source) = catalog.new_entry('GRB ' +
row[0],
srcname='Gamma-ray Bursts Catalog',
url='http://grbcatalog.org')
catalog.entries[name].add_quantity(
SUPERNOVA.RA, row[2], source, u_value='floatdegrees')
catalog.entries[name].add_quantity(
SUPERNOVA.DEC, row[3], source, u_value='floatdegrees')
catalog.entries[name].add_quantity(SUPERNOVA.REDSHIFT, row[8], source)
catalog.journal_entries()
return
示例6: do_snls_photo
def do_snls_photo(catalog):
task_str = catalog.get_current_task_str()
snls_path = os.path.join(catalog.get_current_task_repo(), 'SNLS-ugriz.dat')
data = list(csv.reader(open(snls_path, 'r'), delimiter=' ',
quotechar='"', skipinitialspace=True))
for row in pbar(data, task_str):
flux = row[3]
err = row[4]
# Being extra strict here with the flux constraint, see note below.
if float(flux) < 3.0 * float(err):
continue
name = 'SNLS-' + row[0]
name = catalog.add_entry(name)
source = catalog.entries[name].add_source(
bibcode='2010A&A...523A...7G')
catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
band = row[1]
mjd = row[2]
sig = get_sig_digits(flux.split('E')[0]) + 1
# Conversion comes from SNLS-Readme
# NOTE: Datafiles avail for download suggest diff zeropoints than 30,
# need to inquire.
magnitude = pretty_num(30.0 - 2.5 * log10(float(flux)), sig=sig)
e_mag = pretty_num(
2.5 * log10(1.0 + float(err) / float(flux)), sig=sig)
# e_mag = pretty_num(2.5*(log10(float(flux) + float(err)) -
# log10(float(flux))), sig=sig)
catalog.entries[name].add_photometry(
time=mjd, band=band, magnitude=magnitude, e_magnitude=e_mag,
counts=flux, e_counts=err, source=source)
catalog.journal_entries()
return
示例7: do_pessto
def do_pessto(catalog):
task_str = catalog.get_current_task_str()
pessto_path = os.path.join(
catalog.get_current_task_repo(), 'PESSTO_MPHOT.csv')
tsvin = list(csv.reader(open(pessto_path, 'r'), delimiter=','))
for ri, row in enumerate(pbar(tsvin, task_str)):
if ri == 0:
bands = [xx.split('_')[0] for xx in row[3::2]]
systems = [xx.split('_')[1].capitalize().replace(
'Ab', 'AB') for xx in row[3::2]]
continue
name = row[1]
name = catalog.add_entry(name)
source = catalog.entries[name].add_source(
bibcode='2015A&A...579A..40S')
catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
for hi, ci in enumerate(range(3, len(row) - 1, 2)):
if not row[ci]:
continue
teles = 'Swift' if systems[hi] == 'Swift' else ''
(catalog.entries[name]
.add_photometry(time=row[2], magnitude=row[ci],
e_magnitude=row[ci + 1],
band=bands[hi], system=systems[hi],
telescope=teles,
source=source))
catalog.journal_entries()
return
示例8: do_itep
def do_itep(catalog):
task_str = catalog.get_current_task_str()
itepbadsources = ['2004ApJ...602..571B']
needsbib = []
with open(os.path.join(catalog.get_current_task_repo(),
'itep-refs.txt'), 'r') as refs_file:
refrep = refs_file.read().splitlines()
refrepf = dict(list(zip(refrep[1::2], refrep[::2])))
fname = os.path.join(catalog.get_current_task_repo(),
'itep-lc-cat-28dec2015.txt')
tsvin = list(csv.reader(open(fname, 'r'),
delimiter='|', skipinitialspace=True))
curname = ''
for rr, row in enumerate(pbar(tsvin, task_str)):
if rr <= 1 or len(row) < 7:
continue
oldname = 'SN' + row[0].strip()
mjd = str(jd_to_mjd(Decimal(row[1].strip())))
band = row[2].strip()
magnitude = row[3].strip()
e_magnitude = row[4].strip()
reference = row[6].strip().strip(',')
if curname != oldname:
curname = oldname
name = catalog.add_entry(oldname)
sec_reference = ('Sternberg Astronomical Institute '
'Supernova Light Curve Catalogue')
sec_refurl = 'http://dau.itep.ru/sn/node/72'
sec_source = catalog.entries[name].add_source(
name=sec_reference, url=sec_refurl, secondary=True)
catalog.entries[name].add_quantity(
SUPERNOVA.ALIAS, oldname, sec_source)
year = re.findall(r'\d+', name)[0]
catalog.entries[name].add_quantity(
SUPERNOVA.DISCOVER_DATE, year, sec_source)
if reference in refrepf:
bibcode = unescape(refrepf[reference])
source = catalog.entries[name].add_source(bibcode=bibcode)
else:
needsbib.append(reference)
source = catalog.entries[name].add_source(
name=reference) if reference else ''
if bibcode not in itepbadsources:
catalog.entries[name].add_photometry(time=mjd, band=band,
magnitude=magnitude,
e_magnitude=e_magnitude,
source=sec_source + ',' +
source)
# Write out references that could use aa bibcode
needsbib = list(OrderedDict.fromkeys(needsbib))
with open('../itep-needsbib.txt', 'w') as bib_file:
bib_file.writelines(['%ss\n' % ii for ii in needsbib])
catalog.journal_entries()
return
示例9: do_snhunt
def do_snhunt(catalog):
task_str = catalog.get_current_task_str()
snh_url = 'http://nesssi.cacr.caltech.edu/catalina/current.html'
html = catalog.load_cached_url(snh_url, os.path.join(
catalog.get_current_task_repo(), 'SNhunt/current.html'))
if not html:
return
text = html.splitlines()
findtable = False
for ri, row in enumerate(text):
if 'Supernova Discoveries' in row:
findtable = True
if findtable and '<table' in row:
tstart = ri + 1
if findtable and '</table>' in row:
tend = ri - 1
tablestr = '<html><body><table>'
for row in text[tstart:tend]:
if row[:3] == 'tr>':
tablestr = tablestr + '<tr>' + row[3:]
else:
tablestr = tablestr + row
tablestr = tablestr + '</table></body></html>'
bs = BeautifulSoup(tablestr, 'html5lib')
trs = bs.find('table').findAll('tr')
for tr in pbar(trs, task_str):
cols = [str(xx.text) for xx in tr.findAll('td')]
if not cols:
continue
name = re.sub('<[^<]+?>', '', cols[4]
).strip().replace(' ', '').replace('SNHunt', 'SNhunt')
name = catalog.add_entry(name)
source = catalog.entries[name].add_source(
name='Supernova Hunt', url=snh_url)
catalog.entries[name].add_quantity('alias', name, source)
host = re.sub('<[^<]+?>', '', cols[1]).strip().replace('_', ' ')
catalog.entries[name].add_quantity('host', host, source)
catalog.entries[name].add_quantity(
'ra', cols[2], source, u_value='floatdegrees')
catalog.entries[name].add_quantity(
'dec', cols[3], source, u_value='floatdegrees')
dd = cols[0]
discoverdate = dd[:4] + '/' + dd[4:6] + '/' + dd[6:8]
catalog.entries[name].add_quantity(
'discoverdate', discoverdate, source)
discoverers = cols[5].split('/')
for discoverer in discoverers:
catalog.entries[name].add_quantity('discoverer', 'CRTS', source)
catalog.entries[name].add_quantity(
'discoverer', discoverer, source)
if catalog.args.update:
catalog.journal_entries()
catalog.journal_entries()
return
示例10: delete_old_entry_files
def delete_old_entry_files(self):
if len(self.entries):
err_str = "`delete_old_entry_files` with `entries` not empty!"
self.log.error(err_str)
raise RuntimeError(err_str)
# Delete all old entry JSON files
repo_files = self.PATHS.get_repo_output_file_list()
for rfil in pbar(repo_files, desc='Deleting old entries'):
os.remove(rfil)
self.log.debug("Deleted '{}'".format(os.path.split(rfil)[-1]))
return
示例11: do_simbad_novae
def do_simbad_novae(catalog):
task_str = catalog.get_current_task_str()
simbad_mirrors = ['http://simbad.harvard.edu/simbad/sim-script',
'http://simbad.u-strasbg.fr/simbad/sim-script']
customSimbad = Simbad()
customSimbad.ROW_LIMIT = -1
customSimbad.TIMEOUT = 120
for mirror in simbad_mirrors:
customSimbad.SIMBAD_URL = mirror
try:
table = customSimbad.query_criteria('maintype=No* | maintype="No?"')
except:
continue
else:
break
if not table:
catalog.log.warning('SIMBAD unable to load, probably offline.')
for name in pbar(catalog.entries, task_str):
try:
nova_name = "V* " + get_nova_name(name)
aliases = customSimbad.query_objectids(nova_name)
except:
#THROW WARNING HERE
tprint("Could not find " + nova_name)
continue
table = customSimbad.query_object(nova_name)
name = catalog.add_entry(name)
bibcode = table[0]['COO_BIBCODE'].decode()
ra = str(table[0]['RA'])
dec = str(table[0]['DEC'])
source = catalog.entries[name].add_source(name='SIMBAD astronomical database',
bibcode=bibcode,
url="http://simbad.u-strasbg.fr/",
secondary=True)
catalog.entries[name].add_quantity(NOVA.RA, ra, source)
catalog.entries[name].add_quantity(NOVA.DEC, dec, source)
for i in range(len(aliases)):
try: alias = aliases[i][0].decode()
except: alias = str(aliases[i][0])
catalog.entries[name].add_quantity(NOVA.ALIAS, alias, source)
catalog.journal_entries()
示例12: set_preferred_names
def set_preferred_names(self):
"""Choose between each entries given name and its possible aliases for
the best one.
"""
if len(self.entries) == 0:
self.log.error("WARNING: `entries` is empty, loading stubs")
self.load_stubs()
task_str = self.get_current_task_str()
for ni, oname in enumerate(pbar(self.entries, task_str)):
name = self.add_entry(oname)
self.entries[name].set_preferred_name()
if self.args.travis and ni > self.TRAVIS_QUERY_LIMIT:
break
return
示例13: do_ptss_meta
def do_ptss_meta(catalog):
"""Import metadata from PTSS webpage."""
task_str = catalog.get_current_task_str()
years = list(range(2015, datetime.today().year + 1))
for year in years:
jsontxt = None
while jsontxt is None:
try:
jsontxt = catalog.load_url(
'http://www.cneost.org/ptss/fetchlist.php?vip=sn&gdate=' +
str(year),
os.path.join(catalog.get_current_task_repo(),
'PTSS/catalog-' + str(year) + '.json'),
json_sort='name', timeout=5)
except Exception:
pass
meta = json.loads(jsontxt)
for met in pbar(meta, task_str + ' - ' + str(year)):
oldname = met['name']
name, source = catalog.new_entry(
oldname, srcname='PMO & Tsinghua Supernova Survey (PTSS)',
url='http://www.cneost.org/ptss/index.php')
coo = coord(met['ra'], met['dec'], unit=(un.deg, un.deg))
catalog.entries[name].add_quantity(
SUPERNOVA.RA, coo.ra.to_string(unit=un.hour, sep=':'), source)
catalog.entries[name].add_quantity(
SUPERNOVA.DEC, coo.dec.to_string(unit=un.degree, sep=':'),
source)
if met['filter'] is not None:
mjd = str(astrotime(met['obsdate'], format='isot').mjd)
photodict = {
PHOTOMETRY.TIME: mjd,
PHOTOMETRY.MAGNITUDE: str(met['mag']),
PHOTOMETRY.E_MAGNITUDE: str(met['magerr']),
PHOTOMETRY.BAND: met['filter'].replace('sdss-', ''),
PHOTOMETRY.SOURCE: source
}
catalog.entries[name].add_photometry(**photodict)
catalog.journal_entries()
return
示例14: do_pessto
def do_pessto(catalog):
task_str = catalog.get_current_task_str()
pessto_path = os.path.join(
catalog.get_current_task_repo(), 'PESSTO_MPHOT.csv')
tsvin = list(csv.reader(open(pessto_path, 'r'), delimiter=','))
for ri, row in enumerate(pbar(tsvin, task_str)):
if ri == 0:
bands = [xx.split('_')[0] for xx in row[3::2]]
systems = [xx.split('_')[1].capitalize().replace(
'Ab', 'AB') for xx in row[3::2]]
continue
name = row[1]
name = catalog.add_entry(name)
source = catalog.entries[name].add_source(
bibcode='2015A&A...579A..40S')
catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name, source)
for hi, ci in enumerate(range(3, len(row) - 1, 2)):
if not row[ci]:
continue
if systems[hi] == 'Swift':
teles = 'Swift'
instrument = 'UVOT'
bandset = 'Swift'
else:
teles = 'NTT'
instrument = 'EFOSC'
bandset = 'Johnson'
photodict = {
PHOTOMETRY.TIME: row[2],
PHOTOMETRY.U_TIME: 'MJD',
PHOTOMETRY.MAGNITUDE: row[ci],
PHOTOMETRY.E_MAGNITUDE: row[ci + 1],
PHOTOMETRY.BAND: bands[hi],
PHOTOMETRY.SYSTEM: systems[hi],
PHOTOMETRY.BAND_SET: bandset,
PHOTOMETRY.TELESCOPE: teles,
PHOTOMETRY.INSTRUMENT: instrument,
PHOTOMETRY.SOURCE: source
}
catalog.entries[name].add_photometry(**photodict)
catalog.journal_entries()
return
示例15: do_hst
def do_hst(catalog):
task_str = catalog.get_current_task_str()
url = 'http://archive.stsci.edu/hst/search.php'
reference = 'Hubble Pointings'
jtxt = catalog.load_url(
url,
os.path.join(catalog.get_current_task_repo(), 'HST.json'),
post={
'sci_target_descrip': '*supernova*',
'outputformat': 'JSON_file',
'action': 'Search',
'max_records': '50000',
'max_rpp': '50000'
},
verify=False)
rows = json.loads(jtxt)
allowed_prefixes = ('PS1', 'DES', 'GAIA', 'ASASSN', 'AT', 'IPTF', 'LSQ',
'PTF')
loopcnt = 0
for row in pbar(rows, task_str):
oldname = name_clean(row['Target Name'])
if not oldname.upper().startswith(allowed_prefixes):
continue
if oldname.startswith('PS1-') and not is_number(oldname[4]):
continue
name, source = catalog.new_entry(oldname, srcname=reference, url=url)
if (ENTRY.RA in catalog.entries[name] and
ENTRY.DEC in catalog.entries[name]):
continue
catalog.entries[name].add_quantity(
ENTRY.RA, row['RA (J2000)'], source=source)
catalog.entries[name].add_quantity(
ENTRY.DEC, row['Dec (J2000)'], source=source)
catalog.journal_entries()
loopcnt = loopcnt + 1
if (catalog.args.travis and loopcnt % catalog.TRAVIS_QUERY_LIMIT == 0):
break
catalog.journal_entries()
return