本文整理汇总了Python中catalog.Catalog.select_geographically方法的典型用法代码示例。如果您正苦于以下问题:Python Catalog.select_geographically方法的具体用法?Python Catalog.select_geographically怎么用?Python Catalog.select_geographically使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类catalog.Catalog
的用法示例。
在下文中一共展示了Catalog.select_geographically方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: study_slc
# 需要导入模块: from catalog import Catalog [as 别名]
# 或者: from catalog.Catalog import select_geographically [as 别名]
def study_slc(self):
"""
receives nothing
apply single link to all regions of japan
saves the results on pickle objects
"""
#defines
COMPLETE_CATALOG = "../catalogs/new_jma.txt"
TEMP_CATALOG1 = '../catalogs/temp1.txt'
TEMP_CATALOG2 = '../catalogs/temp2.txt'
DEBUG = '../catalogs/reduced_jma.txt'
BOUND = 0
MAG_THRESH = 1
# auxiliary classes needed
cat = Catalog()
slc1 = Single_Link()
# distance threshold, the distance to use when the distance calculated
# by the single link cluster returns negative
dist_thresh = [12.0]
# dictionary containing information of the quakes and region names
regions = {}
regions["kanto"] = [cat.get_kanto_bounds(), 0.0]
#regions["kansai"] = [cat.get_kansai_bounds(), 0.0]
regions["tohoku"] = [cat.get_tohoku_bounds(), 0.0]
regions["east_japan"] = [cat.get_east_japan_bounds(), 0.0]
## try to run for the whole japan catalog
#japan_bound = [0.0, 400.0, 0.0, 400.0]
#regions["japan"] = [japan_bound, 0.0]
# iterate throgh every region
for region, info in regions.items():
for thresh in dist_thresh:
# obtain catalog for the region
cat.select_geographically(info[BOUND], COMPLETE_CATALOG, TEMP_CATALOG1)
cat.get_mag_threshold(TEMP_CATALOG1, TEMP_CATALOG2, info[MAG_THRESH], index = 2)
# apply single link to the catalog and get quake
#quakes = slc1.apply_single_link(TEMP_CATALOG2, region, thresh)
# save array as a pickle object in the correct location
# save declustered quake array using pickle
#path = 'data_persistence/declustered_array_slc_' + region + '_' + str(round(thresh, 3))
#file_aux1 = open(path, 'wb')
#pickle.dump(quakes, file_aux1)
# classify quakes by making the centroid the medoid of the cluster
slc1.classify_quakes(path, 'closest')
quake_path = path + '_classified_medoid'
# clean up
call(["rm", TEMP_CATALOG1, TEMP_CATALOG2])
示例2: get_catalogs
# 需要导入模块: from catalog import Catalog [as 别名]
# 或者: from catalog.Catalog import select_geographically [as 别名]
def get_catalogs(self):
"""
reads each quake array, obtain correspondent catalog for each quake array
in the end, unite all catalogs in one big catalog - jma style -
"""
# defines
CATALOG_READ = '../catalogs/jma.txt'
# auxiliary classes needed
cat = Catalog()
slc = Single_Link()
# get dictionary, containing for a given region the region bounds
bounds = {}
bounds["kanto"] = cat.get_kanto_bounds()
bounds["kansai"] = cat.get_kansai_bounds()
bounds["tohoku"] = cat.get_tohoku_bounds()
bounds["east_japan"] = cat.get_east_japan_bounds()
# obtain catalog for each region
for region, bound in bounds.items():
catalog_write = '../results/single_link/' + region + '_allshocks.txt'
cat.select_geographically(bound, CATALOG_READ, catalog_write)
# for each region, classify quakes
for region, bound in bounds.items():
quakes_path = 'data_persistence/declustered_array_slc_' + region
slc.classify_quakes(quakes_path, 'closest')
# for each region
for region, bound in bounds.items():
# obtain quakes path, catalog to read from and catalog to write
quakes_path = 'data_persistence/declustered_array_slc_' + region + '_classified_medoid'
catalog_read = '../results/single_link/' + region + '_allshocks.txt'
catalog_write = '../results/single_link/' + region + '_classified.txt'
slc.get_catalog_mainshocks(quakes_path, catalog_read, catalog_write)
# merge catalog in a big one
call(["cat ../results/single_link/*classified.txt > ../results/single_link/temp.txt"], shell = True)
# sort file by columns - firts the year, then the month and so it goes
call(['sort -n -k 3,3 -n -k 4,4 -n -k 5,5 -n -k 8,8 -n -k 9,9 -n -k 10,10 ../results/single_link/temp.txt \
> ../results/single_link/temp2.txt'], shell = True)
# remove duplicate lines
call(['uniq -u ../results/single_link/temp2.txt > ../results/single_link/regions_classified.txt'], shell = True)
# remove temporary files
call(['rm ../results/single_link/temp*.txt'], shell=True)
示例3: study_chi_square
# 需要导入模块: from catalog import Catalog [as 别名]
# 或者: from catalog.Catalog import select_geographically [as 别名]
def study_chi_square(self, mag_thresh, region, time_spacing):
"""
receives a magnitude threshold, a region and a interval of time
perform a poisson test on the clusters that had quakes with magnitude above the threshold
prints the results and returns nothing
"""
CLUSTER_NUM = 1
# auxiliar class
stat = Statistic()
cat = Catalog()
slc = Single_Link()
print('\nApplying chi-squared for magnitude ' + str(mag_thresh) + ' and region ' + str(region) + \
' and time interval of ' + str(time_spacing) + ' seconds')
# get the name of the declustered and undeclustered catalog
decluster_catalog = '../results/single_link/temp_decluster_' + region + '.txt'
original_catalog = '../results/single_link/temp_original_' + region + '.txt'
aux_catalog = '../results/single_link/temp_aux_' + region + '.txt'
# obtain catalog containing the quakes of the region - cat_region
cat_orig = '../catalogs/new_jma.txt'
cat_region = '../results/single_link/temp.txt'
bounds = get_region_bounds(region)
cat.select_geographically(bounds, cat_orig, cat_region)
# generates catalog containing only the mainshocks of clusters that had one quake (at least)
# with a shock greater than the magnitude threshold
# open quake declustered array
path = '../results/single_link/declustered_array_' + region
quakes = pickle.load(open(path, 'rb'))
# mark every quake initially as an aftershock
for quake in quakes:
quake.is_aftershock = True
# iterate through all quakes
count = 0
for quake in quakes:
# if the current quake has magnitude bigger than or equal to the threshold
if quake.mag >= mag_thresh:
# mark as mainshock the centroid it belongs to
quakes[quake.centroid_index].is_aftershock = False
count += 1
#print(count)
# from the quake and catalog to read from, obtain declustered catalog
cat.record_mainshocks(quakes, aux_catalog, cat_region)
cat.get_mainshocks(decluster_catalog, aux_catalog, index = 5)
# obtain original catalog for the region, only with quakes bigger than magnitude
cat.get_mag_threshold(cat_region, original_catalog, mag_thresh)
# apply the chisquared test for the declustered catalog and for the original catalog
print("Applying chi-squared for declustered catalog")
stat.do_chi_square(decluster_catalog, 0.05, time_spacing)
print("Applying chi-squared for the original catalog")
stat.do_chi_square(original_catalog, 0.05, time_spacing)
示例4: study_chisquare
# 需要导入模块: from catalog import Catalog [as 别名]
# 或者: from catalog.Catalog import select_geographically [as 别名]
def study_chisquare(self, folder, array_path, days, alpha, mag_threshold=None):
"""
receives a folder to look for the quakes, the path where the earthquake array is located,
the number of days, a significance level and optionally a magnitude threshold
performs a chisquare test for the 4 regions of japan and all the japan
by taking the observed frequencies in that number of days
"""
# auxiliar classes needed
catalog = Catalog()
stat = Statistic()
# calculate the number of seconds to be done on the interval
time_interval = days * 24.0 * 360.0
# generate catalog in a good format to work with
quakes = pickle.load(open(array_path, 'rb'))
catalog.record_mainshocks(quakes, folder + 'quakes.txt', '../catalogs/new_jma.txt')
# get the core catalog name, and obtain it
if mag_threshold == None:
CATALOG_EXTENSION = 'quakes.txt'
else:
CATALOG_EXTENSION = 'quakes_' + str(mag_threshold) + '.txt'
catalog.get_mag_threshold(folder + 'quakes.txt', folder + CATALOG_EXTENSION ,mag_threshold)
# do chi square test for all japan, with aftershocks
print("Doing chi-square test for all japan, including aftershocks on analysis:")
stat.do_chi_square(folder + CATALOG_EXTENSION, 0.05, time_interval)
# do chi square test for all japan, without aftershocks
print("\nDoing chi-square test for all japan, excluding aftershocks on analysis")
catalog.get_mainshocks(folder + 'japan_mainshocks.txt', folder + CATALOG_EXTENSION, 5)
stat.do_chi_square(folder + 'japan_mainshocks.txt', 0.05, time_interval)
# get bounds for kanto
bounds = catalog.get_kanto_bounds()
# construct a catalog for quakes that happened in kanto
catalog.select_geographically(bounds, folder + CATALOG_EXTENSION,
folder + 'quakes_kanto.txt')
# do chi_squared test for kanto, with aftershocks
print("\nDoing chi-square test for kanto, including aftershocks on analysis:")
stat.do_chi_square(folder + 'quakes_kanto.txt', 0.05, time_interval)
# do chi_squared test for kanto, without aftershocks
print("\nDoing chi-square test for kanto, excluding aftershocks on analysis")
catalog.get_mainshocks(folder + 'mainshocks_kanto.txt', folder + 'quakes_kanto.txt', 5)
stat.do_chi_square(folder + 'mainshocks_kanto.txt', 0.05, time_interval)
# get bounds for kansai
bounds = catalog.get_kansai_bounds()
# construct a catalog for quakes that happened in kansai
catalog.select_geographically(bounds, folder + CATALOG_EXTENSION,
folder + 'quakes_kansai.txt')
# do chi_squared test for kansai, with aftershocks
print("\nDoing chi-square test for kansai, including aftershocks on analysis:")
stat.do_chi_square(folder + 'quakes_kansai.txt', 0.05, time_interval)
# do chi_squared test for kansai, without aftershocks
print("\nDoing chi-square test for kansai, excluding aftershocks on analysis")
catalog.get_mainshocks(folder + 'mainshocks_kansai.txt', folder + 'quakes_kansai.txt', 5)
stat.do_chi_square(folder + 'mainshocks_kansai.txt', 0.05, time_interval)
# get bounds for tohoku
bounds = catalog.get_tohoku_bounds()
# construct a catalog for quakes that happened in tohoku
catalog.select_geographically(bounds, folder + CATALOG_EXTENSION,
folder + 'quakes_tohoku.txt')
# do chi_squared test for tohoku, with aftershocks
print("\nDoing chi-square test for tohoku, including aftershocks on analysis:")
stat.do_chi_square(folder + 'quakes_tohoku.txt', 0.05, time_interval)
# do chi_squared test for tohoku, without aftershocks
print("\nDoing chi-square test for tohoku, excluding aftershocks on analysis")
catalog.get_mainshocks(folder + 'mainshocks_tohoku.txt', folder + 'quakes_tohoku.txt', 5)
stat.do_chi_square(folder + 'mainshocks_tohoku.txt', 0.05, time_interval)
# get bounds for east_japan
bounds = catalog.get_east_japan_bounds()
# construct a catalog for quakes that happened in east_japan
catalog.select_geographically(bounds, folder + CATALOG_EXTENSION,
folder + 'quakes_east_japan.txt')
# do chi_squared test for east_japan, with aftershocks
print("\nDoing chi-square test for east_japan, including aftershocks on analysis:")
stat.do_chi_square(folder + 'quakes_east_japan.txt', 0.05, time_interval)
# do chi_squared test for east_japan, without aftershocks
print("\nDoing chi-square test for east_japan, excluding aftershocks on analysis")
catalog.get_mainshocks(folder + 'mainshocks_east_japan.txt', folder + 'quakes_east_japan.txt', 5)
stat.do_chi_square(folder + 'mainshocks_east_japan.txt', 0.05, time_interval)