本文整理汇总了Python中tensorflow_fold.GetItem方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow_fold.GetItem方法的具体用法?Python tensorflow_fold.GetItem怎么用?Python tensorflow_fold.GetItem使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow_fold
的用法示例。
在下文中一共展示了tensorflow_fold.GetItem方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: dynamic_pooling_blk
# 需要导入模块: import tensorflow_fold [as 别名]
# 或者: from tensorflow_fold import GetItem [as 别名]
def dynamic_pooling_blk():
"""Input: root node dic
Output: pooled, TensorType([hyper.conv_dim, ])
"""
leaf_case = feature_detector_blk()
pool_fwd = td.ForwardDeclaration(td.PyObjectType(), td.TensorType([hyper.conv_dim, ]))
pool = td.Composition()
with pool.scope():
cur_fea = feature_detector_blk().reads(pool.input)
children = td.GetItem('children').reads(pool.input)
mapped = td.Map(pool_fwd()).reads(children)
summed = td.Reduce(td.Function(tf.maximum)).reads(mapped)
summed = td.Function(tf.maximum).reads(summed, cur_fea)
pool.output.reads(summed)
pool = td.OneOf(lambda x: x['clen'] == 0,
{True: leaf_case, False: pool})
pool_fwd.resolve_to(pool)
return pool
示例2: continous_weighted_add_blk
# 需要导入模块: import tensorflow_fold [as 别名]
# 或者: from tensorflow_fold import GetItem [as 别名]
def continous_weighted_add_blk():
block = td.Composition(name='continous_weighted_add')
with block.scope():
initial = td.GetItem(0).reads(block.input)
cur = td.GetItem(1).reads(block.input)
last = td.GetItem(0).reads(initial)
idx = td.GetItem(1).reads(initial)
cur_fea = td.GetItem(0).reads(cur)
cur_clen = td.GetItem(1).reads(cur)
pclen = td.GetItem(2).reads(cur)
Wi = linear_combine_blk().reads(cur_clen, pclen, idx)
weighted_fea = td.Function(batch_mul).reads(cur_fea, Wi)
block.output.reads(
td.Function(tf.add, name='add_last_weighted_fea').reads(last, weighted_fea),
# XXX: rewrite using tf.range
td.Function(tf.add, name='add_idx_1').reads(idx, td.FromTensor(tf.constant(1.)))
)
return block
示例3: direct_embed_blk
# 需要导入模块: import tensorflow_fold [as 别名]
# 或者: from tensorflow_fold import GetItem [as 别名]
def direct_embed_blk():
return (td.GetItem('name') >> td.Scalar('int32')
>> td.Function(lambda x: tf.nn.embedding_lookup(param.get('We'), x))
>> clip_by_norm_blk())
示例4: composed_embed_blk
# 需要导入模块: import tensorflow_fold [as 别名]
# 或者: from tensorflow_fold import GetItem [as 别名]
def composed_embed_blk():
leaf_case = direct_embed_blk()
nonleaf_case = td.Composition(name='composed_embed_nonleaf')
with nonleaf_case.scope():
children = td.GetItem('children').reads(nonleaf_case.input)
clen = td.Scalar().reads(td.GetItem('clen').reads(nonleaf_case.input))
cclens = td.Map(td.GetItem('clen') >> td.Scalar()).reads(children)
fchildren = td.Map(direct_embed_blk()).reads(children)
initial_state = td.Composition()
with initial_state.scope():
initial_state.output.reads(
td.FromTensor(tf.zeros(hyper.word_dim)),
td.FromTensor(tf.zeros([])),
)
summed = td.Zip().reads(fchildren, cclens, td.Broadcast().reads(clen))
summed = td.Fold(continous_weighted_add_blk(), initial_state).reads(summed)[0]
added = td.Function(tf.add, name='add_bias').reads(summed, td.FromTensor(param.get('B')))
normed = clip_by_norm_blk().reads(added)
act_fn = tf.nn.relu if hyper.use_relu else tf.nn.tanh
relu = td.Function(act_fn).reads(normed)
nonleaf_case.output.reads(relu)
return td.OneOf(lambda node: node['clen'] == 0,
{True: leaf_case, False: nonleaf_case})
示例5: tree_sum_blk
# 需要导入模块: import tensorflow_fold [as 别名]
# 或者: from tensorflow_fold import GetItem [as 别名]
def tree_sum_blk(loss_blk):
# traverse the tree to sum up the loss
tree_sum_fwd = td.ForwardDeclaration(td.PyObjectType(), td.TensorType([]))
tree_sum = td.Composition()
with tree_sum.scope():
myloss = loss_blk().reads(tree_sum.input)
children = td.GetItem('children').reads(tree_sum.input)
mapped = td.Map(tree_sum_fwd()).reads(children)
summed = td.Reduce(td.Function(tf.add)).reads(mapped)
summed = td.Function(tf.add).reads(summed, myloss)
tree_sum.output.reads(summed)
tree_sum_fwd.resolve_to(tree_sum)
return tree_sum