當前位置: 首頁>>代碼示例>>Python>>正文


Python utils.flatten方法代碼示例

本文整理匯總了Python中utils.flatten方法的典型用法代碼示例。如果您正苦於以下問題:Python utils.flatten方法的具體用法?Python utils.flatten怎麽用?Python utils.flatten使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在utils的用法示例。


在下文中一共展示了utils.flatten方法的11個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: softmax

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import flatten [as 別名]
def softmax(logits, scope=None):
	with tf.name_scope(scope or "softmax"): # noted here is name_scope not variable
		flat_logits = flatten(logits, 1)
		flat_out = tf.nn.softmax(flat_logits)
		out = reconstruct(flat_out, logits, 1)
		return out

# softmax selection?
# return target * softmax(logits)
# target: [ ...,  J,  d]
# logits: [ ...,  J]

# so [N, M, dim] * [N, M] -> [N, dim],  so [N, M] is the attention for each M

# return: [ ...,  d] # so the target vector is attended with logits' softmax
# [N, M, JX, JQ, 2d] * [N, M, JX, JQ] (each context to query's mapping) -> [N, M, JX, 2d] # attened the JQ dimension 
開發者ID:JunweiLiang,項目名稱:FVTA_MemexQA,代碼行數:18,代碼來源:model_v2.py

示例2: softmax

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import flatten [as 別名]
def softmax(logits,scope=None):
	with tf.name_scope(scope or "softmax"): # noted here is name_scope not variable
		flat_logits = flatten(logits,1)
		flat_out = tf.nn.softmax(flat_logits)
		out = reconstruct(flat_out,logits,1)
		return out

# softmax selection?
# return target * softmax(logits)
# target: [ ..., J, d]
# logits: [ ..., J]

# so [N,M,dim] * [N,M] -> [N,dim], so [N,M] is the attention for each M

# return: [ ..., d] # so the target vector is attended with logits' softmax
# [N,M,JX,JQ,2d] * [N,M,JX,JQ] (each context to query's mapping) -> [N,M,JX,2d] # attened the JQ dimension 
開發者ID:JunweiLiang,項目名稱:FVTA_MemexQA,代碼行數:18,代碼來源:model_mcb.py

示例3: softmax

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import flatten [as 別名]
def softmax(logits,scope=None):
	with tf.name_scope(scope or "softmax"): # noted here is name_scope not variable
		flat_logits = flatten(logits,1)
		flat_out = tf.nn.softmax(flat_logits)
		out = reconstruct(flat_out,logits,1)
		return out

# softmax selection
# return target * softmax(logits)
# target: [ ..., J, d]
# logits: [ ..., J]

# so [N,M,dim] * [N,M] -> [N,dim], so [N,M] is the attention for each M

# return: [ ..., d] # so the target vector is attended with logits' softmax
# [N,M,JX,JQ,2d] * [N,M,JX,JQ] (each context to query's mapping) -> [N,M,JX,2d] # attened the JQ dimension 
開發者ID:JunweiLiang,項目名稱:FVTA_MemexQA,代碼行數:18,代碼來源:model.py

示例4: scan_P__

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import flatten [as 別名]
def scan_P__(P__):
    """Detect up_forks and down_forks per P."""

    for _P_, P_ in pairwise(P__):  # Iterate through pairs of lines.
        _iter_P_, iter_P_ = iter(_P_), iter(P_)  # Convert to iterators.
        try:
            _P, P = next(_iter_P_), next(iter_P_)  # First pair to check.
        except StopIteration:  # No more up_fork-down_fork pair.
            continue  # To next pair of _P_, P_.
        while True:
            isleft, olp = comp_edge(_P, P)  # Check for 4 different cases.
            if olp and _P['sign'] == P['sign']:
                _P['down_fork_'].append(P)
                P['up_fork_'].append(_P)
            try:  # Check for stopping:
                _P, P = (next(_iter_P_), P) if isleft else (_P, next(iter_P_))
            except StopIteration:  # No more up_fork - down_fork pair.
                break  # To next pair of _P_, P_.

    return [*flatten(P__)]  # Flatten P__ before return. 
開發者ID:boris-kz,項目名稱:CogAlg,代碼行數:22,代碼來源:intra_blob_a.py

示例5: combine_lut_dicts

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import flatten [as 別名]
def combine_lut_dicts(*args):
	ks = utils.flatten([x.keys() for x in args])
	d = {}
	for k in set(ks):
		d[k] = utils.flatten([x[k] for x in args if k in x.keys()])
	return d 
開發者ID:balint256,項目名稱:ice,代碼行數:8,代碼來源:lut.py

示例6: forward

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import flatten [as 別名]
def forward(self, ftr_1):
        '''
        Input:
          ftr_1 : features at 1x1 layer
        Output:
          lgt_glb_mlp: class logits from global features
          lgt_glb_lin: class logits from global features
        '''
        # collect features to feed into classifiers
        # - always detach() -- send no grad into encoder!
        h_top_cls = flatten(ftr_1).detach()
        # compute predictions
        lgt_glb_mlp = self.block_glb_mlp(h_top_cls)
        lgt_glb_lin = self.block_glb_lin(h_top_cls)
        return lgt_glb_mlp, lgt_glb_lin 
開發者ID:Philip-Bachman,項目名稱:amdim-public,代碼行數:17,代碼來源:model.py

示例7: kasiki

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import flatten [as 別名]
def kasiki(text):
    if args.verbose:
        print("Finding sequence duplicates and spacings...")
    utils.args = args
    min_length = 2 if (args.exhaustive or len(clean_text) < TEST_2_MAX_TEXT_LENGTH) else 3
    seqSpacings = utils.find_sequence_duplicates(clean_text, min_length)
    if args.verbose:
        if args.all:
            print(seqSpacings)
        print("Extracting spacing divisors...")
    divisors = useful_divisors(flatten(list(seqSpacings.values())))
    divisorsCount = utils.repetitions(divisors)
    if args.exhaustive:
        return [x[0] for x in divisorsCount]
    return [x[0] for x in divisorsCount if x[0] <= KEY_LENGTH_THRESHOLD] 
開發者ID:Carleslc,項目名稱:CryptTools,代碼行數:17,代碼來源:vigenere.py

示例8: linear

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import flatten [as 別名]
def linear(x, output_size, scope, add_tanh=False, wd=None):
	with tf.variable_scope(scope):
		# since the input here is not two rank,  we flat the input while keeping the last dims
		keep = 1
		#print x.get_shape().as_list()
		flat_x = flatten(x, keep) # keeping the last one dim # [N, M, JX, JQ, 2d] => [N*M*JX*JQ, 2d]
		#print flat_x.get_shape() # (?,  200) # wd+cwd
		bias_start = 0.0
		if not (type(output_size) == type(1)): # need to be get_shape()[k].value
			output_size = output_size.value

		#print [flat_x.get_shape()[-1], output_size]

		W = tf.get_variable("W", dtype="float", initializer=tf.truncated_normal([flat_x.get_shape()[-1].value, output_size], stddev=0.1))
		bias = tf.get_variable("b", dtype="float", initializer=tf.constant(bias_start, shape=[output_size]))
		flat_out = tf.matmul(flat_x, W)+bias

		if add_tanh:
			flat_out = tf.tanh(flat_out, name="tanh")


		if wd is not None:
			add_wd(wd)

		out = reconstruct(flat_out, x, keep)
		return out 
開發者ID:JunweiLiang,項目名稱:FVTA_MemexQA,代碼行數:28,代碼來源:model_v2.py

示例9: linear

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import flatten [as 別名]
def linear(x,output_size,scope,add_tanh=False,wd=None):
	with tf.variable_scope(scope):
		# since the input here is not two rank, we flat the input while keeping the last dims
		keep = 1
		#print x.get_shape().as_list()
		flat_x = flatten(x,keep) # keeping the last one dim # [N,M,JX,JQ,2d] => [N*M*JX*JQ,2d]
		#print flat_x.get_shape() # (?, 200) # wd+cwd
		bias_start = 0.0
		if not (type(output_size) == type(1)): # need to be get_shape()[k].value
			output_size = output_size.value

		#print [flat_x.get_shape()[-1],output_size]

		W = tf.get_variable("W",dtype="float",initializer=tf.truncated_normal([flat_x.get_shape()[-1].value,output_size],stddev=0.1))
		bias = tf.get_variable("b",dtype="float",initializer=tf.constant(bias_start,shape=[output_size]))
		flat_out = tf.matmul(flat_x,W)+bias


		if add_tanh:
			flat_out = tf.tanh(flat_out,name="tanh")

		if wd is not None:
			add_wd(wd)

		out = reconstruct(flat_out,x,keep)
		return out 
開發者ID:JunweiLiang,項目名稱:FVTA_MemexQA,代碼行數:28,代碼來源:model_mcb.py

示例10: softmax

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import flatten [as 別名]
def softmax(logits,scope=None):
	with tf.name_scope(scope or "softmax"): # noted here is name_scope not variable
		flat_logits = flatten(logits,1)
		flat_out = tf.nn.softmax(flat_logits)
		out = reconstruct(flat_out,logits,1)
		return out



# add current scope's variable's l2 loss to loss collection 
開發者ID:JunweiLiang,項目名稱:FVTA_MemexQA,代碼行數:12,代碼來源:model_dmnplus.py

示例11: linear

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import flatten [as 別名]
def linear(x,output_size,scope,add_tanh=False,wd=None):
	with tf.variable_scope(scope):
		# since the input here is not two rank, we flat the input while keeping the last dims
		keep = 1
		#print x.get_shape().as_list()
		flat_x = flatten(x,keep) # keeping the last one dim # [N,M,JX,JQ,2d] => [N*M*JX*JQ,2d]
		#print flat_x.get_shape() # (?, 200) # wd+cwd
		bias_start = 0.0
		if not (type(output_size) == type(1)): # need to be get_shape()[k].value
			output_size = output_size.value

		#print [flat_x.get_shape()[-1],output_size]

		W = tf.get_variable("W",dtype="float",initializer=tf.truncated_normal([flat_x.get_shape()[-1].value,output_size],stddev=0.1))
		bias = tf.get_variable("b",dtype="float",initializer=tf.constant(bias_start,shape=[output_size]))
		flat_out = tf.matmul(flat_x,W)+bias

		if add_tanh:
			flat_out = tf.tanh(flat_out,name="tanh")


		if wd is not None:
			add_wd(wd)

		out = reconstruct(flat_out,x,keep)
		return out

# from https://github.com/barronalex/Dynamic-Memory-Networks-in-TensorFlow 
開發者ID:JunweiLiang,項目名稱:FVTA_MemexQA,代碼行數:30,代碼來源:model_dmnplus.py


注:本文中的utils.flatten方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。