本文整理汇总了Python中numpy.asscalar方法的典型用法代码示例。如果您正苦于以下问题:Python numpy.asscalar方法的具体用法?Python numpy.asscalar怎么用?Python numpy.asscalar使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类numpy
的用法示例。
在下文中一共展示了numpy.asscalar方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: predict
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import asscalar [as 别名]
def predict(self, f, k=5, resize_mode='fill'):
from keras.preprocessing import image
from vergeml.img import resize_image
filename = os.path.basename(f)
if not os.path.exists(f):
return dict(filename=filename, prediction=[])
img = image.load_img(f)
img = resize_image(img, self.image_size, self.image_size, 'antialias', resize_mode)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = self.preprocess_input(x)
preds = self.model.predict(x)
pred = self._decode(preds, top=k)[0]
prediction=[dict(probability=np.asscalar(perc), label=klass) for _, klass, perc in pred]
return dict(filename=filename, prediction=prediction)
示例2: add
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import asscalar [as 别名]
def add(self, es, ta, ma=None):
if ma is not None:
raise Exception('mask is not implemented')
es = es.ravel()
ta = ta.ravel()
if es.shape[0] != ta.shape[0]:
raise Exception('invalid shape of es, or ta')
if es.min() < 0 or es.max() > 1:
raise Exception('estimate has wrong value range')
ta_p = (ta == 1)
ta_n = (ta == 0)
es_p = es[ta_p]
es_n = es[ta_n]
for idx, wp in enumerate(self.thresholds):
wp = np.asscalar(wp)
self.tps[idx] += (es_p > wp).sum()
self.fps[idx] += (es_n > wp).sum()
self.fns[idx] += (es_p <= wp).sum()
self.tns[idx] += (es_n <= wp).sum()
self.n_pos += ta_p.sum()
self.n_neg += ta_n.sum()
示例3: accuracy
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import asscalar [as 别名]
def accuracy(model):
accuracy = []
prefix = model.net.Proto().name
for device in model._devices:
accuracy.append(
np.asscalar(workspace.FetchBlob("gpu_{}/{}_accuracy".format(device, prefix))))
return np.average(accuracy)
# ## Part 11: Run Multi-GPU Training and Get Test Results
# You've come a long way. Now is the time to see it all pay off. Since you already ran ResNet once, you can glance at the code below and run it. The big difference this time is your model is parallelized!
#
# The additional components at the end deal with accuracy so you may want to dig into those specifics as a bonus task. You can try it again: just adjust the `num_epochs` value below, run the block, and see the results. You can also go back to Part 10 to reinitialize the model, and run this step again. (You may want to add `workspace.ResetWorkspace()` before you run the new models again.)
#
# Go back and check the images/sec from when you ran single GPU. Note how you can scale up with a small amount of overhead.
#
# ### Task: How many GPUs would it take to train ImageNet in under a minute?
# In[ ]:
# Start looping through epochs where we run the batches of images to cover the entire dataset
# Usually you would want to run a lot more epochs to increase your model's accuracy
示例4: __call__
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import asscalar [as 别名]
def __call__(self, wav, srate=16000, nbits=16):
""" Add noise to clean wav """
if isinstance(wav, torch.Tensor):
wav = wav.numpy()
noise_idx = np.random.choice(list(range(len(self.noises))), 1)
sel_noise = self.noises[np.asscalar(noise_idx)]
noise = sel_noise['data']
snr = np.random.choice(self.snr_levels, 1)
# print('Applying SNR: {} dB'.format(snr[0]))
if wav.ndim > 1:
wav = wav.reshape((-1,))
noisy, noise_bound = self.addnoise_asl(wav, noise, srate,
nbits, snr,
do_IRS=self.do_IRS)
# normalize to avoid clipping
if np.max(noisy) >= 1 or np.min(noisy) < -1:
small = 0.1
while np.max(noisy) >= 1 or np.min(noisy) < -1:
noisy = noisy / (1. + small)
small = small + 0.1
return torch.FloatTensor(noisy.astype(np.float32))
示例5: bar2e
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import asscalar [as 别名]
def bar2e(ex,ey,ep):
"""
Compute the element stiffness matrix for two dimensional bar element.
:param list ex: element x coordinates [x1, x2]
:param list ey: element y coordinates [y1, y2]
:param list ep: [E, A]: E - Young's modulus, A - Cross section area
:return mat Ke: stiffness matrix, [4 x 4]
"""
E=ep[0]
A=ep[1]
b = np.mat([[ex[1]-ex[0]],[ey[1]-ey[0]]])
L = np.asscalar(np.sqrt(b.T*b))
Kle = np.mat([[1.,-1.],[-1.,1.]])*E*A/L
n = np.asarray(b.T/L).reshape(2,)
G = np.mat([
[n[0],n[1],0.,0.],
[0.,0.,n[0],n[1]]
])
return G.T*Kle*G
示例6: l
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import asscalar [as 别名]
def l(self, x, u, i, terminal=False):
"""Instantaneous cost function.
Args:
x: Current state [state_size].
u: Current control [action_size]. None if terminal.
i: Current time step.
terminal: Compute terminal cost. Default: False.
Returns:
Instantaneous cost (scalar).
"""
if terminal:
z = np.hstack([x, i])
return np.asscalar(self._l_terminal(*z))
z = np.hstack([x, u, i])
return np.asscalar(self._l(*z))
示例7: test_parse
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import asscalar [as 别名]
def test_parse(self):
"""Base test for the `dket.data.decode` function."""
words = [1, 2, 3, 0]
formula = [12, 23, 34, 45, 0]
example = data.encode(words, formula)
serialized = example.SerializeToString()
words_t, sent_len_t, formula_t, form_len_t = data.parse(serialized)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
actual = sess.run([words_t, sent_len_t, formula_t, form_len_t])
self.assertEqual(words, actual[0].tolist())
self.assertEqual(len(words), np.asscalar(actual[1]))
self.assertEqual(formula, actual[2].tolist())
self.assertEqual(len(formula), np.asscalar(actual[3]))
示例8: set_cumulative_capacities
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import asscalar [as 别名]
def set_cumulative_capacities(self, node):
if node.l:
self.set_cumulative_capacities(node.l)
if node.r:
self.set_cumulative_capacities(node.r)
if node.parent:
if node.name:
elevdiff = node.parent.elev - self.dem[self.ws[node.level] == node.name]
vol = abs(np.asscalar(elevdiff[elevdiff > 0].sum()) * self.x * self.y)
node.vol = vol
else:
leaves = []
self.enumerate_leaves(node, level=node.level, stack=leaves)
mask = np.isin(self.ws[node.level], leaves)
boundary = list(chain.from_iterable([self.b[node.level].setdefault(pair, [])
for pair in combinations(leaves, 2)]))
mask.flat[boundary] = True
elevdiff = node.parent.elev - self.dem[mask]
vol = abs(np.asscalar(elevdiff[elevdiff > 0].sum()) * self.x * self.y)
node.vol = vol
示例9: generate_storage_uncontrolled
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import asscalar [as 别名]
def generate_storage_uncontrolled(self, ixes, **kwargs):
storage_uncontrolled = {}
depths = 4
init_depths = 0.1
storage_ends = [np.asscalar(self.endnodes[np.where(self.startnodes == ix)])
for ix in ixes]
storage_uncontrolled['name'] = 'ST' + pd.Series(ixes).astype(str)
storage_uncontrolled['elev'] = self.grid.view(self.dem).flat[storage_ends]
storage_uncontrolled['ymax'] = self.channel_d.flat[ixes] + 1
storage_uncontrolled['y0'] = 0
storage_uncontrolled['Acurve'] = 'FUNCTIONAL'
storage_uncontrolled['A0'] = self.channel_w.flat[ixes]
storage_uncontrolled['A1'] = 0
storage_uncontrolled['A2'] = 1
storage_uncontrolled = pd.DataFrame.from_dict(storage_uncontrolled)
# Manual overrides
for key, value in kwargs.items():
storage_uncontrolled[key] = value
self.storage_uncontrolled = storage_uncontrolled[['name', 'elev', 'ymax', 'y0', 'Acurve',
'A1', 'A2', 'A0']]
示例10: generate_storage_controlled
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import asscalar [as 别名]
def generate_storage_controlled(self, ixes, **kwargs):
storage_controlled = {}
depths = 2
init_depths = 0.1
storage_ends = [np.asscalar(self.endnodes[np.where(self.startnodes == ix)])
for ix in ixes]
storage_controlled['name'] = 'C' + pd.Series(ixes).astype(str)
storage_controlled['elev'] = self.grid.view(self.dem).flat[storage_ends]
storage_controlled['ymax'] = depths
storage_controlled['y0'] = 0
storage_controlled['Acurve'] = 'FUNCTIONAL'
storage_controlled['A0'] = 1000
storage_controlled['A1'] = 10000
storage_controlled['A2'] = 1
storage_controlled = pd.DataFrame.from_dict(storage_controlled)
# Manual overrides
for key, value in kwargs.items():
storage_controlled[key] = value
self.storage_controlled = storage_controlled[['name', 'elev', 'ymax', 'y0', 'Acurve',
'A1', 'A2', 'A0']]
示例11: __index__
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import asscalar [as 别名]
def __index__(self):
"""Returns a python scalar.
This allows using an instance of this class as an array index.
Note that only arrays of integer types with size 1 can be used as array
indices.
Returns:
A Python scalar.
Raises:
TypeError: If the array is not of an integer type.
ValueError: If the array does not have size 1.
"""
# TODO(wangpeng): Handle graph mode
return np.asscalar(self.data.numpy())
示例12: end
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import asscalar [as 别名]
def end(self, session): # pylint: disable=unused-argument
"""Runs evaluator for final model."""
# Only runs eval at the end if highest accuracy so far
# is less than self._stop_threshold.
if not self._run_success:
step = np.asscalar(session.run(self._global_step_tensor))
logging.info('Starting eval.')
eval_results = self._evaluate(session, step)
mlperf_log.resnet_print(key=mlperf_log.EVAL_STOP)
mlperf_log.resnet_print(
key=mlperf_log.EVAL_ACCURACY,
value={
'epoch': max(step // self._steps_per_epoch - 1, 0),
'value': float(eval_results[_EVAL_METRIC])
})
if eval_results[_EVAL_METRIC] >= self._stop_threshold:
mlperf_log.resnet_print(
key=mlperf_log.RUN_STOP, value={'success': 'true'})
else:
mlperf_log.resnet_print(
key=mlperf_log.RUN_STOP, value={'success': 'false'})
示例13: _toscalar
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import asscalar [as 别名]
def _toscalar(v):
if isinstance(v, (np.float16, np.float32, np.float64,
np.uint8, np.uint16, np.uint32, np.uint64,
np.int8, np.int16, np.int32, np.int64)):
return np.asscalar(v)
else:
return v
示例14: get_intent
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import asscalar [as 别名]
def get_intent(self, code_repr='label'):
''' Get intent code, parameters and name
Parameters
----------
code_repr : string
string giving output form of intent code representation.
Default is 'label'; use 'code' for integer representation.
Returns
-------
code : string or integer
intent code, or string describing code
parameters : tuple
parameters for the intent
name : string
intent name
Examples
--------
>>> hdr = Nifti1Header()
>>> hdr.set_intent('t test', (10,), name='some score')
>>> hdr.get_intent()
('t test', (10.0,), 'some score')
>>> hdr.get_intent('code')
(3, (10.0,), 'some score')
'''
hdr = self._structarr
recoder = self._field_recoders['intent_code']
code = int(hdr['intent_code'])
if code_repr == 'code':
label = code
elif code_repr == 'label':
label = recoder.label[code]
else:
raise TypeError('repr can be "label" or "code"')
n_params = len(recoder.parameters[code])
params = (float(hdr['intent_p%d' % (i+1)]) for i in range(n_params))
name = asstr(np.asscalar(hdr['intent_name']))
return label, tuple(params), name
示例15: _chk_magic_offset
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import asscalar [as 别名]
def _chk_magic_offset(hdr, fix=False):
rep = Report(HeaderDataError)
# for ease of later string formatting, use scalar of byte string
magic = np.asscalar(hdr['magic'])
offset = hdr['vox_offset']
if magic == asbytes('n+1'): # one file
if offset >= 352:
if not offset % 16:
return hdr, rep
else:
# SPM uses memory mapping to read the data, and
# apparently this has to start on 16 byte boundaries
rep.problem_msg = ('vox offset (=%s) not divisible '
'by 16, not SPM compatible' % offset)
rep.problem_level = 30
if fix:
rep.fix_msg = 'leaving at current value'
return hdr, rep
rep.problem_level = 40
rep.problem_msg = ('vox offset %d too low for '
'single file nifti1' % offset)
if fix:
hdr['vox_offset'] = 352
rep.fix_msg = 'setting to minimum value of 352'
elif magic != asbytes('ni1'): # two files
# unrecognized nii magic string, oh dear
rep.problem_msg = ('magic string "%s" is not valid' %
asstr(magic))
rep.problem_level = 45
if fix:
rep.fix_msg = 'leaving as is, but future errors are likely'
return hdr, rep