本文整理汇总了Python中numpy.put方法的典型用法代码示例。如果您正苦于以下问题:Python numpy.put方法的具体用法?Python numpy.put怎么用?Python numpy.put使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类numpy
的用法示例。
在下文中一共展示了numpy.put方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _transform_gradients
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import put [as 别名]
def _transform_gradients(self, g):
"""
Transform the gradients by multiplying the gradient factor for each
constraint to it.
"""
#py3 fix
#[np.put(g, i, c.gradfactor(self.param_array[i], g[i])) for c, i in self.constraints.iteritems() if c != __fixed__]
[np.put(g, i, c.gradfactor(self.param_array[i], g[i])) for c, i in self.constraints.items() if c != __fixed__]
if self._has_fixes(): return g[self._fixes_]
return g
#def _transform_gradients_non_natural(self, g):
# """
# Transform the gradients by multiplying the gradient factor for each
# constraint to it, using the theta transformed natural gradient.
# """
# #py3 fix
# #[np.put(g, i, c.gradfactor_non_natural(self.param_array[i], g[i])) for c, i in self.constraints.iteritems() if c != __fixed__]
# [np.put(g, i, c.gradfactor_non_natural(self.param_array[i], g[i])) for c, i in self.constraints.items() if c != __fixed__]
# if self._has_fixes(): return g[self._fixes_]
# return g
示例2: put
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import put [as 别名]
def put(a, ind, v, mode='wrap'):
"""Replaces specified elements of an array with given values.
Args:
a (cupy.ndarray): Target array.
ind (array-like): Target indices, interpreted as integers.
v (array-like): Values to place in `a` at target indices.
If `v` is shorter than `ind` it will be repeated as necessary.
mode (str): How out-of-bounds indices will behave. Its value must be
either `'raise'`, `'wrap'` or `'clip'`. Otherwise,
:class:`TypeError` is raised.
.. note::
Default `mode` is set to `'wrap'` to avoid unintended performance drop.
If you need NumPy's behavior, please pass `mode='raise'` manually.
.. seealso:: :func:`numpy.put`
"""
a.put(ind, v, mode=mode)
示例3: get_onehot_arr
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import put [as 别名]
def get_onehot_arr(place, dim, put_value=1.):
"""
get a `dim` dimensional one-hot vector, with `place`-th entry being `put_value` and dtype being np.float32
e.g.:
>>> get_onehot_arr(3, 5, 1.3)
np.ndarray([0, 0, 0, 1.3, 0], dtype=np.float32)
:param place: the place to put a non-zero value
:param dim: the length of the vector
:param put_value: the value to be put
:return: a `dim` dimensional one-hot vector, with `place`-th entry being `put_value` and dtype being np.float32
"""
if place >= dim or place < 0:
print("Invalid input: place = {}, dim = {}".format(place, dim))
ans = np.zeros(dim, dtype=np.float32)
np.put(ans, place, put_value)
return ans
示例4: readCuts
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import put [as 别名]
def readCuts(myfile):
cuts = open(myfile).readlines()
haps = np.zeros((len(cuts), m))
totalPSVs = 0
for idx, psvs in enumerate(cuts):
psvs=psvs.strip().split()
psvs = map(int, psvs)
for psv in psvs:
psvPositions.append(psv)
np.put(haps[idx], psvs, [1])
totalPSVs += len(psvs)
# get rid of positions where there are two PSVs at one stop
# not sure why this happens, will Have to fix with mark
# confirmed as a bug, a fix is in the works
# hack to skip it for now
toRm = list(np.where( (haps.sum(axis = 0) > 1) )[0])
for pos in toRm:
print("mulitple PSVs in one spot!")
psvPositions.remove(pos)
return(haps)
示例5: __init__
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import put [as 别名]
def __init__(self, action_set, x = None, **kwargs):
self.built = False
#setup the optimizer here:
self._optimizer = SolverFactory('cbc')
self._results = None
#todo: Alex fill out these functions
## todo: check what each of these does in CPLEX.
self._set_mip_time_limit = lambda mip, time_limit: True #_set_mip_time_limit(self, mip, time_limit)
self._set_mip_node_limit = lambda mip, node_limit: True #_set_mip_node_limit(self, mip, node_limit)
## todo: not sure what to put for this. let's talk about what the cplex display flag does.
self._set_mip_display = lambda mip, display_flag: True #_set_mip_display(self, mip, display)
self._apriori_infeasible = False
super().__init__(action_set = action_set, x = x, **kwargs)
#### building MIP ####
示例6: merge
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import put [as 别名]
def merge( self, inmodel=None ):
"""
Rescue profiles and info records from input model into result model.
"""
r = self.result.clone()
m = inmodel or self.model
i1, i2 = r.compareAtoms( m )
mask1 = N.zeros( len( r ), N.int )
N.put( mask1, i1, 1 )
r.atoms.update( m.atoms, mask=mask1 )
r.fileName = m.fileName
r.source = m.source
return r
示例7: train
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import put [as 别名]
def train():
# 学习率
lr = 0.01
x = tf.placeholder(tf.float32)
y = tf.placeholder(tf.float32)
y_ = inference(x)
# 损失函数
loss = tf.square(y_ - y)
# 随机梯度下降
opt = tf.train.GradientDescentOptimizer(lr)
train_op = opt.minimize(loss)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
print("start training")
for i in range(1000000):
train_x, train_y = get_train_data()
sess.run(train_op, feed_dict={x: train_x, y: train_y})
if i % 10000 == 0:
times = int(i / 10000)
test_x_ndarray = np.arange(0, 2 * np.pi, 0.01)
test_y_ndarray = np.zeros([len(test_x_ndarray)])
ind = 0
for test_x in test_x_ndarray:
test_y = sess.run(y_, feed_dict={x: test_x, y: 1})
np.put(test_y_ndarray, ind, test_y)
ind += 1
draw_correct_line()
pylab.plot(test_x_ndarray, test_y_ndarray, '--', label= str(times)+'times')
pylab.show()
示例8: row_wise_unique
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import put [as 别名]
def row_wise_unique(arr, fill_value=0):
"""Return row-wise unique values of an array.
The trick is to add a unique imaginary number to each row. That way, when
calling np.unique, the floats in the original array will be recognized as
different values if they occur in different rows, but be treated as the
same value if they occur in the same row.
PARAMETERS
----------
arr : ndarray
Array in which to find unique values.
RETURNS
----------
u_arr : ndarray
Array in which unique values are retained whereas all non-unique
elements are set to zero.
Acknowledgements
----------------
From https://stackoverflow.com/questions/26958233/numpy-row-wise-unique-elements
(by user "unutbu").
"""
weight = 1j*np.linspace(0, arr.shape[1], arr.shape[0],
endpoint=False) # row "weights"
u_arr = arr + weight[:,np.newaxis] # add weights to rows
u, ind = np.unique(u_arr, return_index=True) # now get unique values
u_arr = np.ones_like(arr)*fill_value # initialize array
np.put(u_arr, ind, arr.flat[ind]) # fill in unique values. Remaining values
# are set to zero.
return u_arr
示例9: diag
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import put [as 别名]
def diag(s, leg, dtype=None, labels=None):
"""Returns a square, diagonal matrix of entries `s`.
The resulting matrix has legs ``(leg, leg.conj())`` and charge 0.
Parameters
----------
s : scalar | 1D array
The entries to put on the diagonal. If scalar, all diagonal entries are the same.
leg : :class:`LegCharge`
The first leg of the resulting matrix.
dtype : None | type
The data type to be used for the result. By default, use dtype of `s`.
labels : list of {str | None}
Labels associated to each leg, ``None`` for non-named labels.
Returns
-------
diagonal : :class:`Array`
A square matrix with diagonal entries `s`.
See also
--------
Array.scale_axis : similar as ``tensordot(diag(s), ...)``, but faster.
"""
s = np.asarray(s, dtype)
scalar = (s.ndim == 0)
if not scalar and len(s) != leg.ind_len:
raise ValueError("len(s)={0:d} not equal to leg.ind_len={1:d}".format(len(s), leg.ind_len))
res = Array((leg, leg.conj()), s.dtype, labels=labels) # default charge is 0
# qdata = [[0, 0], [1, 1], ....]
res._qdata = np.arange(leg.block_number, dtype=np.intp)[:, np.newaxis] * np.ones(2, np.intp)
# ``res._qdata_sorted = True`` was already set
if scalar:
res._data = [np.diag(s * np.ones(size, dtype=s.dtype)) for size in leg.get_block_sizes()]
else:
res._data = [np.diag(s[leg.get_slice(qi)]) for qi in range(leg.block_number)]
return res
示例10: optimizer_array
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import put [as 别名]
def optimizer_array(self):
"""
Array for the optimizer to work on.
This array always lives in the space for the optimizer.
Thus, it is untransformed, going from Transformations.
Setting this array, will make sure the transformed parameters for this model
will be set accordingly. It has to be set with an array, retrieved from
this method, as e.g. fixing will resize the array.
The optimizer should only interfere with this array, such that transformations
are secured.
"""
if self.__dict__.get('_optimizer_copy_', None) is None or self.size != self._optimizer_copy_.size:
self._optimizer_copy_ = np.empty(self.size)
if not self._optimizer_copy_transformed:
self._optimizer_copy_.flat = self.param_array.flat
#py3 fix
#[np.put(self._optimizer_copy_, ind, c.finv(self.param_array[ind])) for c, ind in self.constraints.iteritems() if c != __fixed__]
[np.put(self._optimizer_copy_, ind, c.finv(self.param_array[ind])) for c, ind in self.constraints.items() if c != __fixed__]
self._optimizer_copy_transformed = True
if self._has_fixes():# or self._has_ties()):
self._ensure_fixes()
return self._optimizer_copy_[self._fixes_]
return self._optimizer_copy_
示例11: __sf
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import put [as 别名]
def __sf(self, X):
"""Internal function to calculate for Smoothing Factors of data points
Repeated n_iter_ of times in randomized mode.
"""
dis_ = np.zeros(shape=(X.shape[0],))
card_ = np.zeros(shape=(X.shape[0],))
# perform one process with the original input order
itr_res = self.__dis(X)
np.put(card_, X.shape[0] - sum([i > 0. for i in itr_res]),
np.where(itr_res > 0.))
# create a copy of random state to preserve original state for
# future fits (if any)
random_state = np.random.RandomState(
seed=self.random_state_.get_state()[1][0])
indices = np.arange(X.shape[0])
for _ in range(self.n_iter_):
ind_ = indices
random_state.shuffle(ind_)
_x = X[indices]
# get dissimilarity of this iteration and restore original order
itr_res = self.__dis(_x)[np.argsort(ind_)]
current_card = X.shape[0] - sum([i > 0. for i in itr_res])
# compare with previous iteration to get the maximal dissimilarity
for i, j in enumerate(itr_res):
if j > dis_[i]:
dis_[i] = j
card_[i] = current_card
# Increase random state seed by one to reorder input next iteration
random_state.seed(random_state.get_state()[1][0] + 1)
return np.multiply(dis_, card_)
示例12: _inverse_permutation
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import put [as 别名]
def _inverse_permutation(p):
"""inverse permutation p"""
n = p.size
s = np.zeros(n, dtype=np.int32)
i = np.arange(n, dtype=np.int32)
np.put(s, p, i) # s[p] = i
return s
示例13: _put_model
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import put [as 别名]
def _put_model(self, key, model):
"""
wrapper to put key, model dict pair into models dict
"""
model_dict = {'model': {'stepwise': model.export_model()}}
self.models[key] = model_dict
示例14: predict
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import put [as 别名]
def predict(data_instances, model):
if data_instances is None:
return
d_header = data_instances.schema.get("header")
best_feature = [d_header.index(x) for x in model.header]
best_mask = np.zeros(len(d_header), dtype=bool)
np.put(best_mask, best_feature, 1)
new_data = data_instances.mapValues(lambda v: Step.slice_data_instance(v, best_mask))
pred_result = model.predict(new_data)
return pred_result
示例15: __init__
# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import put [as 别名]
def __init__(self, imdb_name, resize=True):
imdb = get_imdb(imdb_name)
# Ignore the background class!!! So ['gt_classes'] must minus 1.
self.classes = [ np.zeros(imdb.num_classes - 1) for i in range(imdb.num_images) ]
for i, anno in enumerate(imdb.gt_roidb()):
np.put(self.classes[i], map(lambda x: x-1, anno['gt_classes']), 1)
# np.put(self.classes[i], random.choice(map(lambda x: x-1, anno['gt_classes'])), 1)
self.images = [ imdb.image_path_at(i) for i in range(imdb.num_images) ]
assert len(self.classes) == len(self.images)
self._perm = np.random.permutation(np.arange(len(self.images)))
self._cur = 0
self.resize = resize