本文整理汇总了Python中serializer.Serializer.deserialize方法的典型用法代码示例。如果您正苦于以下问题:Python Serializer.deserialize方法的具体用法?Python Serializer.deserialize怎么用?Python Serializer.deserialize使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类serializer.Serializer
的用法示例。
在下文中一共展示了Serializer.deserialize方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: RPCStub
# 需要导入模块: from serializer import Serializer [as 别名]
# 或者: from serializer.Serializer import deserialize [as 别名]
class RPCStub(object):
def __init__(self, uri, stub, namespace=None):
self.uri = uri
self.stub = stub
self.namespace = namespace
self._seril = Serializer(namespace)
self.logger = None
def process(self, method, data):
ret = exception = None
try:
args, kw = self._seril.deserialize(data)
try:
self.logger and self.logger.info(u"method:%s, args:%s, kw:%s" % (method,
args, kw))
except Exception, e:
#@todo: fix the decode problem.
self.logger and self.logger.info(str(e))
h = self._local_service(self.stub, method)
if h is None: raise RPCException(u"Not found interface '%s'" % method)
ret = h(*args, **kw)
self.logger and self.logger.info("return:%s" % (ret, ))
except BaseException, e:
exception = e
self.logger and self.logger.exception(e)
示例2: testSimpleSerialization
# 需要导入模块: from serializer import Serializer [as 别名]
# 或者: from serializer.Serializer import deserialize [as 别名]
def testSimpleSerialization(self):
m = self.createTestMessage()
serialized = StringIO(Serializer.serialize(m))
deserialized = Serializer.deserialize(serialized, TestMessage1)
self.assertDictEqual(m.__dict__, deserialized.__dict__)
示例3: RPCProxy
# 需要导入模块: from serializer import Serializer [as 别名]
# 或者: from serializer.Serializer import deserialize [as 别名]
class RPCProxy(object):
def __init__(self, uri, method=None, namespaces=None, sid=''):
self.__serviceURL = uri
self.__serviceName = method
self.namespaces = isinstance(namespaces, ClassLoader) and namespaces or \
ClassLoader(namespaces or [])
self._seril = Serializer(self.namespaces)
self.sid = sid
self.logger = None
self.start_call_listener = []
self.end_call_listener = []
def __call__(self, *args, **kw):
args = self._seril.serialize((args, kw))
post_data = {"method": self.__serviceName,
'params': args,
'id':'httprpc',
'sid':self.sid}
#@todo xx
for l in self.start_call_listener:
l(name=self.__serviceName, args=args, kw_args=kw)
rpc_response = self.post_request(self.__serviceURL, post_data)
try:
respdata = rpc_response.read()
ret, e = self._seril.deserialize(respdata)
except Exception, e:
raise RPCException("Failed to deserialize response data:%s, exception:%s" %
(respdata, e))
finally:
示例4: _fetch_metadata_by_json
# 需要导入模块: from serializer import Serializer [as 别名]
# 或者: from serializer.Serializer import deserialize [as 别名]
def _fetch_metadata_by_json(self, json_obj):
""" parses incoming json object representation and fetches related
object metadata from the server. Returns None or the Metadata object.
This method is called only when core object is not found in cache, and
if metadata should be requested anyway, it could be done faster with
this method that uses GET /neo/<obj_type>/198272/metadata/
Currently not used, because inside the pull function (in cascade mode)
it may be faster to collect all metadata after the whole object tree is
fetched. For situations, when, say, dozens of of objects tagged with the
same value are requested, it's faster to fetch this value once at the
end rather than requesting related metadata for every object.
"""
if not json_obj['fields'].has_key('metadata') or \
not json_obj['fields']['metadata']:
return None # no metadata field or empty metadata
url = json_obj['permalink']
# TODO move requests to the remote class
resp = requests.get( url + 'metadata' , cookies=self._meta.cookie_jar )
raw_json = get_json_from_response( resp )
if not resp.status_code == 200:
message = '%s (%s)' % (raw_json['message'], raw_json['details'])
raise errors.error_codes[resp.status_code]( message )
if not raw_json['metadata']: # if no objects exist return empty result
return None
mobj = Metadata()
for p, v in raw_json['metadata']:
prp = Serializer.deserialize(p, self)
val = Serializer.deserialize(v, self)
prp.append( val )
# save both objects to cache
self._cache.add_object( prp )
self._cache.add_object( val )
setattr( mobj, prp.name, prp )
return mobj # Metadata object with list of properties (tags)
示例5: write
# 需要导入模块: from serializer import Serializer [as 别名]
# 或者: from serializer.Serializer import deserialize [as 别名]
def write(self, buf, offset=0):
"""
Updates object information
:param buf: a YAML representation of an object.
:type buf: str
:return: 0 on success or and negative error code.
"""
try:
new = Serializer.deserialize(self.model_instance.__class__, buf)
new.id = self.model_instance.id
except Exception, e:
return -1 # TODO find a way to handle expceptions better..
示例6: parse_from_string
# 需要导入模块: from serializer import Serializer [as 别名]
# 或者: from serializer.Serializer import deserialize [as 别名]
def parse_from_string(self, serialized):
serializer = Serializer()
return serializer.deserialize(serialized, self.__class__)
示例7: SyncObj
# 需要导入模块: from serializer import Serializer [as 别名]
# 或者: from serializer.Serializer import deserialize [as 别名]
#.........这里部分代码省略.........
self.__raftNextIndex[nodeAddr] = self.__raftLog[0][1]
sendingSerialized = False
else:
sendingSerialized = True
else:
sendingSerialized = False
nextNodeIndex = self.__raftNextIndex[nodeAddr]
sendSingle = False
delta = time.time() - startTime
if delta > self.__conf.appendEntriesPeriod:
break
def __send(self, nodeAddr, message):
for node in self.__nodes:
if node.getAddress() == nodeAddr:
node.send(message)
break
def __processUnknownConnections(self, descr, event):
conn = self.__unknownConnections[descr]
partnerNode = None
remove = False
if event & POLL_EVENT_TYPE.READ:
conn.read()
nodeAddr = conn.getMessage()
if nodeAddr is not None:
for node in self.__nodes:
if node.getAddress() == nodeAddr:
partnerNode = node
break
else:
remove = True
if event & POLL_EVENT_TYPE.ERROR:
remove = True
if remove or conn.isDisconnected():
self.__unknownConnections.pop(descr)
self.__poller.unsubscribe(descr)
conn.close()
return
if partnerNode is not None:
self.__unknownConnections.pop(descr)
assert conn.fileno() is not None
partnerNode.onPartnerConnected(conn)
def _getSelfNodeAddr(self):
return self.__selfNodeAddr
def _getConf(self):
return self.__conf
def _getResolver(self):
return self.__resolver
def _getPoller(self):
return self.__poller
def __tryLogCompaction(self):
currTime = time.time()
serializeState, serializeID = self.__serializer.checkSerializing()
if serializeState == SERIALIZER_STATE.SUCCESS:
self.__lastSerializedTime = currTime
self.__deleteEntriesTo(serializeID)
if serializeState == SERIALIZER_STATE.FAILED:
LOG_WARNING("Failed to store full dump")
if serializeState != SERIALIZER_STATE.NOT_SERIALIZING:
return
if len(self.__raftLog) <= self.__conf.logCompactionMinEntries and \
currTime - self.__lastSerializedTime <= self.__conf.logCompactionMinTime and\
not self.__forceLogCompaction:
return
self.__forceLogCompaction = False
lastAppliedEntries = self.__getEntries(self.__raftLastApplied - 1, 2)
if not lastAppliedEntries:
return
data = dict([(k, self.__dict__[k]) for k in self.__dict__.keys() if k not in self.__properies])
self.__serializer.serialize((data, lastAppliedEntries[1], lastAppliedEntries[0]), lastAppliedEntries[1][1])
def __loadDumpFile(self):
try:
data = self.__serializer.deserialize()
for k, v in data[0].iteritems():
self.__dict__[k] = v
self.__raftLog = [data[2], data[1]]
self.__raftLastApplied = data[1][1]
except:
LOG_WARNING('Failed to load full dump')
LOG_CURRENT_EXCEPTION()
示例8: SyncObj
# 需要导入模块: from serializer import Serializer [as 别名]
# 或者: from serializer.Serializer import deserialize [as 别名]
#.........这里部分代码省略.........
entries = []
if nextNodeIndex <= self.__getCurrentLogIndex():
entries = self.__getEntries(nextNodeIndex, None, self.__conf.appendEntriesBatchSizeBytes)
self.__raftNextIndex[nodeAddr] = entries[-1][1] + 1
message = {
'type': 'append_entries',
'term': self.__raftCurrentTerm,
'commit_index': self.__raftCommitIndex,
'entries': entries,
'prevLogIdx': prevLogIdx,
'prevLogTerm': prevLogTerm,
}
node.send(message)
else:
transmissionData = self.__serializer.getTransmissionData(nodeAddr)
message = {
'type': 'append_entries',
'term': self.__raftCurrentTerm,
'commit_index': self.__raftCommitIndex,
'serialized': transmissionData,
}
node.send(message)
if transmissionData is not None:
isLast = transmissionData[2]
if isLast:
self.__raftNextIndex[nodeAddr] = self.__raftLog[0][1]
sendingSerialized = False
else:
sendingSerialized = True
else:
sendingSerialized = False
nextNodeIndex = self.__raftNextIndex[nodeAddr]
sendSingle = False
delta = time.time() - startTime
if delta > self.__conf.appendEntriesPeriod:
break
def __send(self, nodeAddr, message):
for node in self.__nodes:
if node.getAddress() == nodeAddr:
node.send(message)
break
def __connectedToAnyone(self):
for node in self.__nodes:
if node.getStatus() == NODE_STATUS.CONNECTED:
return True
return False
def _getSelfNodeAddr(self):
return self.__selfNodeAddr
def _getConf(self):
return self.__conf
def _getEncryptor(self):
return self.__encryptor
def __tryLogCompaction(self):
currTime = time.time()
serializeState, serializeID = self.__serializer.checkSerializing()
if serializeState == SERIALIZER_STATE.SUCCESS:
self.__lastSerializedTime = currTime
self.__deleteEntriesTo(serializeID)
if serializeState == SERIALIZER_STATE.FAILED:
LOG_WARNING("Failed to store full dump")
if serializeState != SERIALIZER_STATE.NOT_SERIALIZING:
return
if len(self.__raftLog) <= self.__conf.logCompactionMinEntries and \
currTime - self.__lastSerializedTime <= self.__conf.logCompactionMinTime and \
not self.__forceLogCompaction:
return
self.__forceLogCompaction = False
lastAppliedEntries = self.__getEntries(self.__raftLastApplied - 1, 2)
if len(lastAppliedEntries) < 2:
return
data = dict([(k, self.__dict__[k]) for k in self.__dict__.keys() if k not in self.__properies])
self.__serializer.serialize((data, lastAppliedEntries[1], lastAppliedEntries[0]), lastAppliedEntries[0][1])
def __loadDumpFile(self):
try:
data = self.__serializer.deserialize()
for k, v in data[0].iteritems():
self.__dict__[k] = v
self.__raftLog = [data[2], data[1]]
self.__raftLastApplied = data[1][1]
except:
LOG_WARNING('Failed to load full dump')
LOG_CURRENT_EXCEPTION()
示例9: SyncObj
# 需要导入模块: from serializer import Serializer [as 别名]
# 或者: from serializer.Serializer import deserialize [as 别名]
class SyncObj(object):
def __init__(self, selfNodeAddr, otherNodesAddrs, conf=None):
"""
Main SyncObj class, you should inherit your own class from it.
:param selfNodeAddr: address of the current node server, 'host:port'
:type selfNodeAddr: str
:param otherNodesAddrs: addresses of partner nodes, ['host1:port1', 'host2:port2', ...]
:type otherNodesAddrs: list of str
:param conf: configuration object
:type conf: SyncObjConf
"""
if conf is None:
self.__conf = SyncObjConf()
else:
self.__conf = conf
self.__conf.validate()
if self.__conf.password is not None:
if not HAS_CRYPTO:
raise ImportError("Please install 'cryptography' module")
self.__encryptor = getEncryptor(self.__conf.password)
else:
self.__encryptor = None
self.__selfNodeAddr = selfNodeAddr
self.__otherNodesAddrs = otherNodesAddrs
self.__unknownConnections = {} # descr => _Connection
self.__raftState = _RAFT_STATE.FOLLOWER
self.__raftCurrentTerm = 0
self.__votedFor = None
self.__votesCount = 0
self.__raftLeader = None
self.__raftElectionDeadline = time.time() + self.__generateRaftTimeout()
self.__raftLog = createJournal(self.__conf.journalFile)
if len(self.__raftLog) == 0:
self.__raftLog.add(_bchr(_COMMAND_TYPE.NO_OP), 1, self.__raftCurrentTerm)
self.__raftCommitIndex = 1
self.__raftLastApplied = 1
self.__raftNextIndex = {}
self.__raftMatchIndex = {}
self.__lastSerializedTime = time.time()
self.__lastSerializedEntry = None
self.__forceLogCompaction = False
self.__leaderCommitIndex = None
self.__onReadyCalled = False
self.__changeClusterIDx = None
self.__noopIDx = None
self.__destroying = False
self.__recvTransmission = ''
self.__startTime = time.time()
globalDnsResolver().setTimeouts(self.__conf.dnsCacheTime, self.__conf.dnsFailCacheTime)
self.__serializer = Serializer(self.__conf.fullDumpFile,
self.__conf.logCompactionBatchSize,
self.__conf.useFork,
self.__conf.serializer,
self.__conf.deserializer,
self.__conf.serializeChecker)
self.__isInitialized = False
self.__lastInitTryTime = 0
self._poller = createPoller(self.__conf.pollerType)
if selfNodeAddr is not None:
bindAddr = self.__conf.bindAddress or selfNodeAddr
host, port = bindAddr.split(':')
self.__server = TcpServer(self._poller, host, port, onNewConnection=self.__onNewConnection,
sendBufferSize=self.__conf.sendBufferSize,
recvBufferSize=self.__conf.recvBufferSize,
connectionTimeout=self.__conf.connectionTimeout)
self._methodToID = {}
self._idToMethod = {}
methods = sorted([m for m in dir(self) if callable(getattr(self, m))])
for i, method in enumerate(methods):
self._methodToID[method] = i
self._idToMethod[i] = getattr(self, method)
self.__thread = None
self.__mainThread = None
self.__initialised = None
self.__bindedEvent = threading.Event()
self.__bindRetries = 0
self.__commandsQueue = FastQueue(self.__conf.commandsQueueSize)
if not self.__conf.appendEntriesUseBatch:
self.__pipeNotifier = PipeNotifier(self._poller)
self.__nodes = []
self.__readonlyNodes = []
self.__readonlyNodesCounter = 0
self.__lastReadonlyCheck = 0
self.__newAppendEntriesTime = 0
self.__commandsWaitingCommit = collections.defaultdict(list) # logID => [(termID, callback), ...]
self.__commandsLocalCounter = 0
self.__commandsWaitingReply = {} # commandLocalCounter => callback
#.........这里部分代码省略.........
示例10: select
# 需要导入模块: from serializer import Serializer [as 别名]
# 或者: from serializer.Serializer import deserialize [as 别名]
def select(self, model_name, params={}, data_load=False, mode='obj'):
""" requests objects of a given type from server in bulk mode.
caching: caches files only
cascade: no
data_load: yes/no
Arguments:
model_name: type of the object (like 'block', 'segment' or 'section'.)
params: dict that can contain several categories of key-value pairs
data_load: fetch the data or not (applied if mode == 'obj')
mode: 'obj' or 'json' - return mode, python object or JSON
Params can be:
1. filters, like:
'owner__username': 'robert'
'segment__id__in': [19485,56223,89138]
'n_definition__icontains': 'blafoo' # negative filter! (has 'n_')
2. common params, like
'at_time': '2013-02-22 15:34:57'
'offset': 50
'max_results': 20
3. data params, to get only parts of the original object(s). These only
work for the data-related objects (like 'analogsignal' or
'spiketrain').
start_time - start time of the required range (calculated
using the same time unit as the t_start of the signal)
end_time - end time of the required range (calculated using
the same time unit as the t_start of the signal)
duration - duration of the required range (calculated using
the same time unit as the t_start of the signal)
start_index - start index of the required datarange (an index
of the starting datapoint)
end_index - end index of the required range (an index of the
end datapoint)
samples_count - number of points of the required range (an
index of the end datapoint)
downsample - number of datapoints. This parameter is used to
indicate whether downsampling is needed. The downsampling
is applied on top of the selected data range using other
parameters (if specified)
Examples:
get('analogsignal', params={'id__in': [38551], 'downsample': 100})
get('analogsignal', params={'segment__id': 93882, 'start_time': 500.0})
get('section', params={'odml_type': 'experiment', 'date_created': '2013-02-22'})
"""
if model_name in self._meta.cls_aliases.values(): # TODO put into model_safe decorator
model_name = [k for k, v in self._meta.cls_aliases.items() if v==model_name][0]
if not model_name in self._meta.models_map.keys():
raise TypeError('Objects of that type are not supported.')
# fetch from remote + save in cache if possible
json_objs = self._remote.get_list( model_name, params )
if mode == 'json':
# return pure JSON (no data) if requested
objects = json_objs
else:
# convert to objects in 'obj' mode
app = self._meta.app_prefix_dict[ model_name ]
model = models_map[ model_name ]
objects = []
for json_obj in json_objs:
data_refs = {} # is a dict like {'signal': <array...>, ...}
if data_load:
data_refs = self.__parse_data_from_json( json_obj )
obj = Serializer.deserialize( json_obj, self._meta, data_refs )
objects.append( obj )
self._cache.save_data_map() # updates on-disk cache with new datafiles
self._cache.save_h5_map()
return objects
示例11: pull
# 需要导入模块: from serializer import Serializer [as 别名]
# 或者: from serializer.Serializer import deserialize [as 别名]
def pull(self, location, params={}, cascade=True, data_load=True):
""" pulls object from the specified location on the server.
caching: yes
cascade: True/False
data_load: True/False
Arguments:
location: object location as URL like
'http://<host>/metadata/section/2394/', or just a location
'/metadata/section/2394' or a stripped version like
'/mtd/sec/2394'
params: dict that can contain several categories of key-value pairs
cascade: fetch related objects recursively (True/False)
data_load: fetch the data (True/False)
Params can be:
1. common params, like
'at_time': '2013-02-22 15:34:57'
2. data params, to get only parts of the original object(s). These only
work for the data-related objects (like 'analogsignal' or
'spiketrain').
start_time - start time of the required range (calculated
using the same time unit as the t_start of the signal)
end_time - end time of the required range (calculated using
the same time unit as the t_start of the signal)
duration - duration of the required range (calculated using
the same time unit as the t_start of the signal)
start_index - start index of the required datarange (an index
of the starting datapoint)
end_index - end index of the required range (an index of the
end datapoint)
samples_count - number of points of the required range (an
index of the end datapoint)
downsample - number of datapoints. This parameter is used to
indicate whether downsampling is needed. The downsampling
is applied on top of the selected data range using other
parameters (if specified)
"""
location = self._meta.parse_location( location )
supp_models = [k for k in models_map.keys() if \
not k in ['property', 'value']]
if not location[1] in supp_models:
raise TypeError('Objects of that type are not pull-supported.')
processed = {} # collector of processed objects like
# {"metadata/section/2394/": <object..>, ...}
to_clean = [] # collector of ids of objects to clean parent
stack = [ location ] # a stack of objects to sync
while len( stack ) > 0:
loc = stack[0]
# find object in cache
etag = None
cached_obj = self._cache.get_obj_by_location( loc )
if not type(cached_obj) == type(None):
obj_descr = self._meta.get_gnode_descr(cached_obj)
if obj_descr and obj_descr['fields'].has_key('guid'):
etag = obj_descr['fields']['guid']
# request object from the server (with ETag)
json_obj = self._remote.get(loc, params, etag)
if json_obj == 304: # get object from cache
obj = cached_obj
print_status('%s loaded from cache.' % str(loc))
else: # request from server
# download related data
data_refs = {} # is a dict like {'signal': <array...>, ...}
if data_load:
data_refs = self.__parse_data_from_json( json_obj )
# parse json (+data) into python object
obj = Serializer.deserialize( json_obj, self._meta, data_refs )
# put metadata in the stack
#if json_obj['fields'].has_key('metadata'):
# for value in json_obj['fields']['metadata']:
# cl_value = self._meta.clean_location( value )
# stack.append( cl_value )
# or just download attached metadata here?
# metadata = self._fetch_metadata_by_json(cls, json_obj)
print_status("%s fetched from server." % loc)
stack.remove( loc ) # not to forget to remove processed object
processed[ str(loc) ] = obj # add it to processed
app, cls, lid = loc[0], loc[1], loc[2]
children = self._meta.app_definitions[cls]['children'] # child object types
obj_descr = self._meta.get_gnode_descr(obj)
if cascade and children and obj_descr:
#.........这里部分代码省略.........