本文整理汇总了Python中sortedcontainers.SortedDict.keys方法的典型用法代码示例。如果您正苦于以下问题:Python SortedDict.keys方法的具体用法?Python SortedDict.keys怎么用?Python SortedDict.keys使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sortedcontainers.SortedDict
的用法示例。
在下文中一共展示了SortedDict.keys方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: PrioritizedIntensity
# 需要导入模块: from sortedcontainers import SortedDict [as 别名]
# 或者: from sortedcontainers.SortedDict import keys [as 别名]
class PrioritizedIntensity(object):
_MIN_VALUE = 0.005
def __init__(self):
self._values = SortedDict()
def set(self, value, priority=100):
value = float(value)
if value < self._MIN_VALUE and priority in self._values:
del self._values[priority]
else:
self._values[priority] = value
def eval(self):
if not self._values:
return 0.0
return self._values[self._values.iloc[- 1]]
def top_priority(self):
if not self._values:
return 0
return self._values.keys()[len(self._values) - 1]
def reset(self):
self._values.clear()
示例2: createTermIndex
# 需要导入模块: from sortedcontainers import SortedDict [as 别名]
# 或者: from sortedcontainers.SortedDict import keys [as 别名]
def createTermIndex():
sortTepDic = SortedDict()
# Structure for each term
# sortTepDic['term']=({'DocId1':['Pos1','Pos2'],'DocId2':['Pos1','Pos2']},'termFreq','DocFreq')
for root, dirs, files in os.walk(Contants.DATA_DIRECTORY_NAME, topdown=True):
for name in files:
file_name = os.path.join(root, name)
# 'r' when the file will only be read
# 'w' for only writing (an existing file with the same name will be erased)
# 'a' opens the file for appending; any data written to the file is automatically added to the end.
# 'r+' opens the file for both reading and writing.
mode = "r"
file_object = open(file_name, mode)
DocId = os.path.split(file_name)[1]
wordPos = 0
for word in file_object.read().split():
wordPos = wordPos + 1 # increment word location
lamma = applyFilters(word)
if lamma:
if lamma not in sortTepDic:
sortTepDic[lamma] = [{DocId: [wordPos]}, 1, 1] # add a new term
else:
sortTepDic[lamma][1] = sortTepDic[lamma][1] + 1 # increment the term frequency
if DocId in sortTepDic[lamma][0]:
sortTepDic[lamma][0][DocId].append(
wordPos
) # add new word position for the existing document
else:
sortTepDic[lamma][0][DocId] = [wordPos] # add a new document ID and he word position
sortTepDic[lamma][2] = sortTepDic[lamma][2] + 1 # increment the document frequecy
# covert lists to tuples
for key in sortTepDic.keys():
for DocId in sortTepDic[key][0]:
sortTepDic[key][0][DocId] = tuple(sortTepDic[key][0][DocId])
sortTepDic[key] = tuple(sortTepDic[key])
Data.write_dataStruct_to_file(Contants.WORD_INDEX_FILE_NAME, sortTepDic)
createLexicon(sortTepDic)
createPostingList(sortTepDic)
示例3: InMemoryStorage
# 需要导入模块: from sortedcontainers import SortedDict [as 别名]
# 或者: from sortedcontainers.SortedDict import keys [as 别名]
class InMemoryStorage(object):
def __init__(self):
self.kvstore = SortedDict() # hashtable
def get(self, k):
try:
return self.kvstore[k]
except:
return 1
def put(self, k, v):
self.kvstore[k] = v
return 0
def delete(self, k):
try:
del self.kvstore[k]
return 0
except:
return 1
def split(self, section, keyspace_mid):
""" delete one half of keystore for group split operation """
midKey = None
for key in self.kvstore.keys(): # TODO make more efficient for better performance
if key > str(keyspace_mid): # use iloc to estimate midpoint
midKey = self.kvstore.index(key)
break
if section: # section is either 0 or 1
self.kvstore = self.kvstore.items()[midKey:]
else:
self.kvstore = self.kvstore.items()[:midKey]
print(self.kvstore)
return 0
def save(self): # need metadata here
save_state("data/backup/db_copy.pkl", self.kvstore)
def load(self):
self.kvstore = load_state("data/backup/db_copy.pkl")
示例4: test_keysview
# 需要导入模块: from sortedcontainers import SortedDict [as 别名]
# 或者: from sortedcontainers.SortedDict import keys [as 别名]
def test_keysview():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping[:13])
keys = temp.keys()
assert len(keys) == 13
assert 'a' in keys
assert list(keys) == [val for val, pos in mapping[:13]]
assert keys[0] == 'a'
assert list(reversed(keys)) == list(reversed(string.ascii_lowercase[:13]))
assert keys.index('f') == 5
assert keys.count('m') == 1
assert keys.count('0') == 0
assert keys.isdisjoint(['1', '2', '3'])
temp.update(mapping[13:])
assert len(keys) == 26
assert 'z' in keys
assert list(keys) == [val for val, pos in mapping]
that = dict(mapping)
that_keys = get_keysview(that)
assert keys == that_keys
assert not (keys != that_keys)
assert not (keys < that_keys)
assert not (keys > that_keys)
assert keys <= that_keys
assert keys >= that_keys
assert list(keys & that_keys) == [val for val, pos in mapping]
assert list(keys | that_keys) == [val for val, pos in mapping]
assert list(keys - that_keys) == []
assert list(keys ^ that_keys) == []
keys = SortedDict(mapping[:2]).keys()
assert repr(keys) == "SortedKeysView(SortedDict({'a': 0, 'b': 1}))"
示例5: plotWidth
# 需要导入模块: from sortedcontainers import SortedDict [as 别名]
# 或者: from sortedcontainers.SortedDict import keys [as 别名]
def plotWidth(dwdictX,fname,nameX,mX,cuts):
sorted_dwdictX = SortedDict(dwdictX)
n = len(sorted_dwdictX)-1
x = array('d',sorted_dwdictX.keys())
y = array('d',sorted_dwdictX.values())
gwX = TGraph(n,x,y)
gwX.SetName("gwX")
gwX.SetTitle("")
gwX.GetXaxis().SetTitle("tan#beta")
gwX.GetYaxis().SetTitle("#Gamma_{#it{"+nameX+"}}/#it{m}_{#it{"+nameX+"}} [%]")
gwX.SetLineColor(ROOT.kBlack)
gwX.SetMarkerColor(ROOT.kBlack)
gwX.SetMarkerStyle(20)
gwX.SetMarkerSize(0.5)
ptxt = TPaveText(0.62,0.70,0.87,0.87,"NDC")
ptxt.SetFillStyle(4000) #will be transparent
ptxt.SetFillColor(0)
ptxt.SetTextFont(42)
ptxt.SetBorderSize(0)
ptxt.AddText("sin(#beta-#alpha)=1")
ptxt.AddText("#it{m}_{#it{"+nameX+"}}="+str(mX)+" GeV")
c = TCanvas("c","c",600,600)
c.cd()
c.SetLogx()
c.SetLogy()
c.SetGridx()
c.SetGridy()
c.SetTicks(1,1)
c.Draw()
# gwX.Draw("p")
gwX.Draw()
ptxt.Draw("same")
c.Modified()
c.Update()
c.SaveAs(fname)
示例6: doMarginal
# 需要导入模块: from sortedcontainers import SortedDict [as 别名]
# 或者: from sortedcontainers.SortedDict import keys [as 别名]
def doMarginal(net,argies):
prob = 0
for i in argies:
# First argument Variable
var = i[0]
# First argument state
state = i[1]
# Base Cases for Marginal Recursion
if var == 'p':
if state == 'true':
prob += net[0].p['pL']
else:
prob += net[0].p['pH']
if var == 's':
if state == 'true':
prob += net[1].p['sT']
else:
prob += net[1].p['sF']
if var == 'c':
pars = net[2].parents
a = SortedDict(net[2].p)
b = SortedDict(pars[0].p)
c = SortedDict(pars[1].p)
# MARGINAL LOGIC
if state == 'true':
for combos in a.keys():
for combo1 in b.keys():
for combo2 in c.keys():
if combo1 in combos and combo2 in combos:
prob += net[2].p[combos]*pars[0].p[combo1]*pars[1].p[combo2]
elif state == 'false':
prob = 1 - doMarginal(net,[('c', 'true')])
# CONDITIONALS FROM CLASS
elif state == 'sT':
for combos in a.keys():
for combo1 in b.keys():
for combo2 in c.keys():
if combo2 == 'sT' and combo1 in combos and combo2 in combos:
breakP()
prob += net[2].p[combos]*pars[0].p[combo1]*pars[1].p[combo2]
elif state == 'sF':
for combos in a.keys():
for combo1 in b.keys():
for combo2 in c.keys():
if combo2 == 'sF' and combo1 in combos and combo2 in combos:
breakP()
prob += net[2].p[combos]*pars[0].p[combo1]*pars[1].p[combo2]
elif state == 'pH':
for combos in a.keys():
for combo1 in b.keys():
for combo2 in c.keys():
if combo1 == 'pH' and combo1 in combos and combo2 in combos:
breakP()
prob += net[2].p[combos]*pars[0].p[combo1]*pars[1].p[combo2]
elif state == 'pL':
for combos in a.keys():
for combo1 in b.keys():
for combo2 in c.keys():
if combo1 == 'pL' and combo1 in combos and combo2 in combos:
breakP()
prob += net[2].p[combos]*pars[0].p[combo1]*pars[1].p[combo2]
else:
raise Exception('CANCER CANT BE DETERMINED FROM XRAY OR DYSPNOEA BECAUSE THEY DEPEND ON CANCER OUTPUT')
if var == 'x':
pars = net[3].parents
a = SortedDict(net[3].p)
b = SortedDict(pars[0].p)
if state == 'true':
for combos in a.keys():
for combo1 in b.keys():
if combos == 'XcT':
prob += net[3].p[combos]*pars[0].p[combo1]
#if state == 'true':
# prob += net[2].p['CpHsT']*net[0].['pH']*net[1].['sT']
# prob += net[2].p['CpLsT']*net[0].['pL']*net[1].['sT']
# prob += net[2].p['Cp
return prob
示例7: KeyedRegion
# 需要导入模块: from sortedcontainers import SortedDict [as 别名]
# 或者: from sortedcontainers.SortedDict import keys [as 别名]
class KeyedRegion(object):
"""
KeyedRegion keeps a mapping between stack offsets and all objects covering that offset. It assumes no variable in
this region overlap with another variable in this region.
Registers and function frames can all be viewed as a keyed region.
"""
def __init__(self, tree=None):
self._storage = SortedDict() if tree is None else tree
def _get_container(self, offset):
try:
base_offset = next(self._storage.irange(maximum=offset, reverse=True))
except StopIteration:
return offset, None
else:
container = self._storage[base_offset]
if container.includes(offset):
return base_offset, container
return offset, None
def __contains__(self, offset):
"""
Test if there is at least one varaible covering the given offset.
:param offset:
:return:
"""
return self._get_container(offset)[1] is not None
def __len__(self):
return len(self._storage)
def __iter__(self):
return iter(self._storage.values())
def __eq__(self, other):
if set(self._storage.keys()) != set(other._storage.keys()):
return False
for k, v in self._storage.items():
if v != other._storage[k]:
return False
return True
def copy(self):
if not self._storage:
return KeyedRegion()
kr = KeyedRegion()
for key, ro in self._storage.items():
kr._storage[key] = ro.copy()
return kr
def merge(self, other, make_phi_func=None):
"""
Merge another KeyedRegion into this KeyedRegion.
:param KeyedRegion other: The other instance to merge with.
:return: None
"""
# TODO: is the current solution not optimal enough?
for _, item in other._storage.items(): # type: RegionObject
for loc_and_var in item.stored_objects:
self.__store(loc_and_var, overwrite=False, make_phi_func=make_phi_func)
return self
def dbg_repr(self):
"""
Get a debugging representation of this keyed region.
:return: A string of debugging output.
"""
keys = self._storage.keys()
offset_to_vars = { }
for key in sorted(keys):
ro = self._storage[key]
variables = [ obj.obj for obj in ro.stored_objects ]
offset_to_vars[key] = variables
s = [ ]
for offset, variables in offset_to_vars.items():
s.append("Offset %#x: %s" % (offset, variables))
return "\n".join(s)
def add_variable(self, start, variable):
"""
Add a variable to this region at the given offset.
:param int start:
:param SimVariable variable:
:return: None
"""
size = variable.size if variable.size is not None else 1
#.........这里部分代码省略.........
示例8: CacheStore
# 需要导入模块: from sortedcontainers import SortedDict [as 别名]
# 或者: from sortedcontainers.SortedDict import keys [as 别名]
class CacheStore(object):
class CacheItem(object):
__slots__ = ('valid', 'data')
def __init__(self):
self.valid = Event()
self.data = None
def __init__(self, key=None):
self.lock = RLock()
self.store = SortedDict(key)
def __getitem__(self, item):
return self.get(item)
def put(self, key, data):
with self.lock:
try:
item = self.store[key]
item.data = data
item.valid.set()
return False
except KeyError:
item = self.CacheItem()
item.data = data
item.valid.set()
self.store[key] = item
return True
def update(self, **kwargs):
with self.lock:
items = {}
created = []
updated = []
for k, v in kwargs.items():
items[k] = self.CacheItem()
items[k].data = v
items[k].valid.set()
if k in self.store:
updated.append(k)
else:
created.append(k)
self.store.update(**items)
return created, updated
def update_one(self, key, **kwargs):
with self.lock:
item = self.get(key)
if not item:
return False
for k, v in kwargs.items():
set(item, k, v)
self.put(key, item)
return True
def update_many(self, key, predicate, **kwargs):
with self.lock:
updated = []
for k, v in self.itervalid():
if predicate(v):
if self.update_one(k, **kwargs):
updated.append(key)
return updated
def get(self, key, default=None, timeout=None):
item = self.store.get(key)
if item:
item.valid.wait(timeout)
return item.data
return default
def remove(self, key):
with self.lock:
try:
del self.store[key]
return True
except KeyError:
return False
def remove_many(self, keys):
with self.lock:
removed = []
for key in keys:
try:
del self.store[key]
removed.append(key)
except KeyError:
pass
return removed
def clear(self):
with self.lock:
items = list(self.store.keys())
self.store.clear()
#.........这里部分代码省略.........
示例9: FederationRemoteSendQueue
# 需要导入模块: from sortedcontainers import SortedDict [as 别名]
# 或者: from sortedcontainers.SortedDict import keys [as 别名]
class FederationRemoteSendQueue(object):
"""A drop in replacement for FederationSender"""
def __init__(self, hs):
self.server_name = hs.hostname
self.clock = hs.get_clock()
self.notifier = hs.get_notifier()
self.is_mine_id = hs.is_mine_id
self.presence_map = {} # Pending presence map user_id -> UserPresenceState
self.presence_changed = SortedDict() # Stream position -> list[user_id]
# Stores the destinations we need to explicitly send presence to about a
# given user.
# Stream position -> (user_id, destinations)
self.presence_destinations = SortedDict()
self.keyed_edu = {} # (destination, key) -> EDU
self.keyed_edu_changed = SortedDict() # stream position -> (destination, key)
self.edus = SortedDict() # stream position -> Edu
self.device_messages = SortedDict() # stream position -> destination
self.pos = 1
self.pos_time = SortedDict()
# EVERYTHING IS SAD. In particular, python only makes new scopes when
# we make a new function, so we need to make a new function so the inner
# lambda binds to the queue rather than to the name of the queue which
# changes. ARGH.
def register(name, queue):
LaterGauge("synapse_federation_send_queue_%s_size" % (queue_name,),
"", [], lambda: len(queue))
for queue_name in [
"presence_map", "presence_changed", "keyed_edu", "keyed_edu_changed",
"edus", "device_messages", "pos_time", "presence_destinations",
]:
register(queue_name, getattr(self, queue_name))
self.clock.looping_call(self._clear_queue, 30 * 1000)
def _next_pos(self):
pos = self.pos
self.pos += 1
self.pos_time[self.clock.time_msec()] = pos
return pos
def _clear_queue(self):
"""Clear the queues for anything older than N minutes"""
FIVE_MINUTES_AGO = 5 * 60 * 1000
now = self.clock.time_msec()
keys = self.pos_time.keys()
time = self.pos_time.bisect_left(now - FIVE_MINUTES_AGO)
if not keys[:time]:
return
position_to_delete = max(keys[:time])
for key in keys[:time]:
del self.pos_time[key]
self._clear_queue_before_pos(position_to_delete)
def _clear_queue_before_pos(self, position_to_delete):
"""Clear all the queues from before a given position"""
with Measure(self.clock, "send_queue._clear"):
# Delete things out of presence maps
keys = self.presence_changed.keys()
i = self.presence_changed.bisect_left(position_to_delete)
for key in keys[:i]:
del self.presence_changed[key]
user_ids = set(
user_id
for uids in self.presence_changed.values()
for user_id in uids
)
keys = self.presence_destinations.keys()
i = self.presence_destinations.bisect_left(position_to_delete)
for key in keys[:i]:
del self.presence_destinations[key]
user_ids.update(
user_id for user_id, _ in self.presence_destinations.values()
)
to_del = [
user_id for user_id in self.presence_map if user_id not in user_ids
]
for user_id in to_del:
del self.presence_map[user_id]
# Delete things out of keyed edus
keys = self.keyed_edu_changed.keys()
i = self.keyed_edu_changed.bisect_left(position_to_delete)
for key in keys[:i]:
#.........这里部分代码省略.........
示例10: WordData
# 需要导入模块: from sortedcontainers import SortedDict [as 别名]
# 或者: from sortedcontainers.SortedDict import keys [as 别名]
class WordData(QObject):
# Define the signal we emit when we have loaded new data
WordsUpdated = pyqtSignal()
def __init__(self, my_book):
super().__init__(None)
# Save reference to the book
self.my_book = my_book
# Save reference to the metamanager
self.metamgr = my_book.get_meta_manager()
# Save reference to the edited document
self.document = my_book.get_edit_model()
# Save reference to a speller, which will be the default
# at this point.
self.speller = my_book.get_speller()
# The vocabulary list as a sorted dict.
self.vocab = SortedDict()
# Key and Values views on the vocab list for indexing by table row.
self.vocab_kview = self.vocab.keys()
self.vocab_vview = self.vocab.values()
# The count of available words based on the latest sort
self.active_word_count = 0
# The good- and bad-words sets and the scannos set.
self.good_words = set()
self.bad_words = set()
self.scannos = set()
# A dict of words that use an alt-dict tag. The key is a word and the
# value is the alt-dict tag string.
self.alt_tags = SortedDict()
# Cached sort vectors, see get_sort_vector()
self.sort_up_vectors = [None, None, None]
self.sort_down_vectors = [None, None, None]
self.sort_key_funcs = [None, None, None]
# Register metadata readers and writers.
self.metamgr.register(C.MD_GW, self.good_read, self.good_save)
self.metamgr.register(C.MD_BW, self.bad_read, self.bad_save)
self.metamgr.register(C.MD_SC, self.scanno_read, self.scanno_save)
self.metamgr.register(C.MD_VL, self.word_read, self.word_save)
# End of __init__
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# Methods used when saving metadata. The items in the good_words,
# bad_words, and scanno sets are simply returned as a list of strings.
#
def good_save(self, section) :
return [ token for token in self.good_words ]
def bad_save(self, section) :
return [ token for token in self.bad_words ]
def scanno_save(self, section) :
return [ token for token in self.scannos ]
#
# To save the vocabulary, write a list for each word:
# [ "token", "tag", count, [prop-code...] ]
# where "token" is the word as a string, "tag" is its alt-dict tag
# or a null string, count is an integer and [prop-code...] is the
# integer values from the word's property set as a list. Note that
# alt_tag needs to be a string because json doesn't handle None.
#
def word_save(self, section) :
vlist = []
for word in self.vocab:
[count, prop_set] = self.vocab[word]
#tag = "" if AD not in prop_set else self.alt_tags[word]
tag = ""
if AD in prop_set :
if word in self.alt_tags :
tag = self.alt_tags[word]
else : # should never occur, could be assertion error
worddata_logger.error( 'erroneous alt tag on ' + word )
plist = list(prop_set)
vlist.append( [ word, count, tag, plist ] )
return vlist
#
# Methods used to load metadata. Called by the metadata manager with
# a single Python object, presumably the object that was prepared by
# the matching _save method above. Because the user might edit the metadata
# file, do a little quality control.
#
def good_read(self, section, value, version):
if isinstance(value, list) :
for token in value :
if isinstance(token, str) :
if token in self.bad_words :
worddata_logger.warn(
'"{}" is in both good and bad words - use in good ignored'.format(token)
)
else :
self.good_words.add(token)
if token in self.vocab : # vocab already loaded, it seems
props = self.vocab[token][1]
props.add(GW)
props &= prop_nox
else :
worddata_logger.error(
'{} in GOODWORDS list ignored'.format(token)
#.........这里部分代码省略.........
示例11: test_keys
# 需要导入模块: from sortedcontainers import SortedDict [as 别名]
# 或者: from sortedcontainers.SortedDict import keys [as 别名]
def test_keys():
mapping = [(val, pos) for pos, val in enumerate(string.ascii_lowercase)]
temp = SortedDict(mapping)
assert list(temp.keys()) == [key for key, pos in mapping]
示例12: open_file
# 需要导入模块: from sortedcontainers import SortedDict [as 别名]
# 或者: from sortedcontainers.SortedDict import keys [as 别名]
def open_file(path):
book = xlrd.open_workbook(path)
sheet = book.sheet_by_index(0)
major_name = []
numStudByMajor1y = []
numStudByMajor2y = []
for row_index in xrange(1, sheet.nrows):
row = sheet.row_values(row_index)
if row[4] not in major_name:
major_name.append(row[4])
if len(major_name) is 18:
break
flow = dict([(major,{}) for major in major_name])
numStudByMajor1y = dict([(major,{}) for major in major_name])
numStudByMajor2y = dict([(major,{}) for major in major_name])
for key in flow:
flow[key] = dict([(major,{}) for major in major_name])
numStudByMajor1y[key]["Total"] = 0;
numStudByMajor1y[key]["Male"] = 0;
numStudByMajor1y[key]["Female"] = 0;
numStudByMajor1y[key]["Good"] = 0;
numStudByMajor1y[key]["Moderate"] = 0;
numStudByMajor1y[key]["Poor"] = 0;
numStudByMajor2y[key]["Total"] = 0;
numStudByMajor2y[key]["Male"] = 0;
numStudByMajor2y[key]["Female"] = 0;
numStudByMajor2y[key]["Good"] = 0;
numStudByMajor2y[key]["Moderate"] = 0;
numStudByMajor2y[key]["Poor"] = 0;
for key1 in flow[key]:
flow[key][key1]["_1Total"] = 0
flow[key][key1]["_1Male"] = 0
flow[key][key1]["_1Female"] = 0
flow[key][key1]["_1Good"] = 0
flow[key][key1]["_1Moderate"] = 0
flow[key][key1]["_1Poor"] = 0
flow[key][key1]["_2Total"] = 0
flow[key][key1]["_2Male"] = 0
flow[key][key1]["_2Female"] = 0
flow[key][key1]["_2Good"] = 0
flow[key][key1]["_2Moderate"] = 0
flow[key][key1]["_2Poor"] = 0
for key in flow:
print flow[key]
for row_index in xrange(1, sheet.nrows):
row = sheet.row_values(row_index)
numStudByMajor1y[row[2]]["Total"] += row[12];
numStudByMajor1y[row[2]][row[0]] += row[12];
numStudByMajor1y[row[2]][row[6]] += row[12];
numStudByMajor2y[row[3]]["Total"] += row[12];
numStudByMajor2y[row[3]][row[0]] += row[12];
numStudByMajor2y[row[3]][row[6]] += row[12];
for row_index in xrange(1, sheet.nrows):
row = sheet.row_values(row_index)
flow[row[2]][row[3]]["_1Total"] += row[12] / numStudByMajor1y[row[2]]["Total"]
flow[row[2]][row[3]]["_1" + row[0]] += row[12] / numStudByMajor1y[row[2]][row[0]]
flow[row[2]][row[3]]["_1" + row[6]] += row[12] / numStudByMajor1y[row[2]][row[6]]
flow[row[3]][row[4]]["_2Total"] += row[12] / numStudByMajor2y[row[3]]["Total"]
flow[row[3]][row[4]]["_2" + row[0]] += row[12] / numStudByMajor2y[row[3]][row[0]]
flow[row[3]][row[4]]["_2" + row[6]] += row[12] / numStudByMajor2y[row[3]][row[6]]
flow = SortedDict(flow)
for key in flow:
flow[key] = SortedDict(flow[key])
with open('flow.csv', 'wb') as testfile:
csv_writer = csv.writer(testfile)
title = []
title.extend(flow.keys())
csv_writer.writerow(title)
for key in flow:
row = []
row.extend(flow[key].values())
csv_writer.writerow(row)
示例13: OrderTree
# 需要导入模块: from sortedcontainers import SortedDict [as 别名]
# 或者: from sortedcontainers.SortedDict import keys [as 别名]
class OrderTree(object):
'''A red-black tree used to store OrderLists in price order
The exchange will be using the OrderTree to hold bid and ask data (one OrderTree for each side).
Keeping the information in a red black tree makes it easier/faster to detect a match.
'''
def __init__(self):
self.price_map = SortedDict() # Dictionary containing price : OrderList object
self.prices = self.price_map.keys()
self.order_map = {} # Dictionary containing order_id : Order object
self.volume = 0 # Contains total quantity from all Orders in tree
self.num_orders = 0 # Contains count of Orders in tree
self.depth = 0 # Number of different prices in tree (http://en.wikipedia.org/wiki/Order_book_(trading)#Book_depth)
def __len__(self):
return len(self.order_map)
def get_price_list(self, price):
return self.price_map[price]
def get_order(self, order_id):
return self.order_map[order_id]
def create_price(self, price):
self.depth += 1 # Add a price depth level to the tree
new_list = OrderList()
self.price_map[price] = new_list
def remove_price(self, price):
self.depth -= 1 # Remove a price depth level
del self.price_map[price]
def price_exists(self, price):
return price in self.price_map
def order_exists(self, order):
return order in self.order_map
def insert_order(self, quote):
if self.order_exists(quote['order_id']):
self.remove_order_by_id(quote['order_id'])
self.num_orders += 1
if quote['price'] not in self.price_map:
self.create_price(quote['price']) # If price not in Price Map, create a node in RBtree
order = Order(quote, self.price_map[quote['price']]) # Create an order
self.price_map[order.price].append_order(order) # Add the order to the OrderList in Price Map
self.order_map[order.order_id] = order
self.volume += order.quantity
def update_order(self, order_update):
order = self.order_map[order_update['order_id']]
original_quantity = order.quantity
if order_update['price'] != order.price:
# Price changed. Remove order and update tree.
order_list = self.price_map[order.price]
order_list.remove_order(order)
if len(order_list) == 0: # If there is nothing else in the OrderList, remove the price from RBtree
self.remove_price(order.price)
self.insert_order(order_update)
else:
# Quantity changed. Price is the same.
order.update_quantity(order_update['quantity'], order_update['timestamp'])
self.volume += order.quantity - original_quantity
def remove_order_by_id(self, order_id):
self.num_orders -= 1
order = self.order_map[order_id]
self.volume -= order.quantity
order.order_list.remove_order(order)
if len(order.order_list) == 0:
self.remove_price(order.price)
del self.order_map[order_id]
def max_price(self):
if self.depth > 0:
return self.prices[-1]
else:
return None
def min_price(self):
if self.depth > 0:
return self.prices[0]
else:
return None
def max_price_list(self):
if self.depth > 0:
return self.get_price_list(self.max_price())
else:
return None
def min_price_list(self):
if self.depth > 0:
return self.get_price_list(self.min_price())
else:
return None
示例14: isPhoto
# 需要导入模块: from sortedcontainers import SortedDict [as 别名]
# 或者: from sortedcontainers.SortedDict import keys [as 别名]
if isPhoto(file) :
try :
exif = getExif(os.path.join(subdir, file))
if not cameraIsValid(exif) :
continue
# get focal length and convert from rational data type to float
focalLength = exif[FOCALLENGTH_TAG][0] / exif[FOCALLENGTH_TAG][1]
# count every focal length occurence in dictionary
if (focalLength in occurences) :
occurences[focalLength] = occurences[focalLength] + 1
else: # find nearest
index = occurences.bisect(focalLength)
greater = occurences.iloc[index]
smaller = occurences.iloc[index - 1]
nearestFL = greater if (greater - focalLength < focalLength - smaller) else smaller
occurences[nearestFL] = occurences[nearestFL] + 1
except (KeyError, TypeError, IndexError) :
# there is no focal length info in image exif data (Key/Type/IndexError)
pass
# plot the graph
position = arange(len(focalLengths)) + .5
barh(position, occurences.values(), align='center', color='#FF0000')
yticks(position, occurences.keys())
xlabel('Occurrences')
ylabel('Focal length')
title('Focal length usage analysis')
grid(True)
show()
示例15: DotMap
# 需要导入模块: from sortedcontainers import SortedDict [as 别名]
# 或者: from sortedcontainers.SortedDict import keys [as 别名]
#.........这里部分代码省略.........
d[k] = v
return d
def pprint(self):
pprint(self.to_dict())
# proper dict subclassing
def values(self):
return self._map.values()
@staticmethod
def parse_other(other):
if type(other) is DotMap:
return other._map
else:
return other
def __cmp__(self, other):
other = DotMap.parse_other(other)
return self._map.__cmp__(other)
def __eq__(self, other):
other = DotMap.parse_other(other)
if not isinstance(other, dict):
return False
return self._map.__eq__(other)
def __ge__(self, other):
other = DotMap.parse_other(other)
return self._map.__ge__(other)
def __gt__(self, other):
other = DotMap.parse_other(other)
return self._map.__gt__(other)
def __le__(self, other):
other = DotMap.parseOther(other)
return self._map.__le__(other)
def __lt__(self, other):
other = DotMap.parse_other(other)
return self._map.__lt__(other)
def __ne__(self, other):
other = DotMap.parse_other(other)
return self._map.__ne__(other)
def __delitem__(self, key):
return self._map.__delitem__(key)
def __len__(self):
return self._map.__len__()
def copy(self):
return self
def get(self, key, default=None):
return self._map.get(key, default)
def has_key(self, key):
return key in self._map
def iterkeys(self):
return self._map.iterkeys()
def itervalues(self):
return self._map.itervalues()
def keys(self):
return self._map.keys()
def pop(self, key, default=None):
return self._map.pop(key, default)
def setdefault(self, key, default=None):
return self._map.setdefault(key, default)
def viewitems(self):
if version_info.major == 2 and version_info.minor >= 7:
return self._map.viewitems()
else:
return self._map.items()
def viewkeys(self):
if version_info.major == 2 and version_info.minor >= 7:
return self._map.viewkeys()
else:
return self._map.keys()
def viewvalues(self):
if version_info.major == 2 and version_info.minor >= 7:
return self._map.viewvalues()
else:
return self._map.values()
@classmethod
def fromkeys(cls, seq, value=None):
d = DotMap()
d._map = SortedDict.fromkeys(seq, value)
return d