本文整理汇总了Python中tensorflow_fold.Scalar方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow_fold.Scalar方法的具体用法?Python tensorflow_fold.Scalar怎么用?Python tensorflow_fold.Scalar使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow_fold
的用法示例。
在下文中一共展示了tensorflow_fold.Scalar方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_weighted_feature
# 需要导入模块: import tensorflow_fold [as 别名]
# 或者: from tensorflow_fold import Scalar [as 别名]
def test_weighted_feature(self):
root, _ = self._load_test_data()
Wconvl = self.sess.run(tbcnn.param.get('Wconvl'))
Wconvr = self.sess.run(tbcnn.param.get('Wconvr'))
Wconvt = self.sess.run(tbcnn.param.get('Wconvt'))
idx, pclen, depth, max_depth = (1., 1., 0., 2.)
feature = tbcnn.coding_blk().eval(root, session=self.sess)
actual = (td.Vector(feature.size), td.Scalar(),
td.Scalar(), td.Scalar(), td.Scalar()) >> tbcnn.weighted_feature_blk()
actual = actual.eval((feature, idx, pclen, depth, max_depth), session=self.sess)
desired = np.matmul(feature,
tri_combined_np(idx, pclen, depth, max_depth, Wconvl, Wconvr, Wconvt))
nptest.assert_allclose(actual, desired)
示例2: test_linear_combine
# 需要导入模块: import tensorflow_fold [as 别名]
# 或者: from tensorflow_fold import Scalar [as 别名]
def test_linear_combine(self, clen, pclen, idx):
"""Test linear_combine_blk on data"""
Wl = self.sess.run(embedding.param.get('Wl'))
Wr = self.sess.run(embedding.param.get('Wr'))
actual = (td.Scalar(), td.Scalar(), td.Scalar()) >> embedding.linear_combine_blk()
actual = actual.eval((clen, pclen, idx), session=self.sess)
desired = linear_combine_np(clen, pclen, idx, Wl, Wr)
nptest.assert_allclose(actual, desired)
示例3: test_tri_combined
# 需要导入模块: import tensorflow_fold [as 别名]
# 或者: from tensorflow_fold import Scalar [as 别名]
def test_tri_combined(self, idx, pclen, depth, max_depth):
"""Test linear_combine_blk on data"""
Wconvl = self.sess.run(tbcnn.param.get('Wconvl'))
Wconvr = self.sess.run(tbcnn.param.get('Wconvr'))
Wconvt = self.sess.run(tbcnn.param.get('Wconvt'))
actual = (td.Scalar(), td.Scalar(), td.Scalar(), td.Scalar()) >> tbcnn.tri_combined_blk()
actual = actual.eval((idx, pclen, depth, max_depth), session=self.sess)
desired = tri_combined_np(idx, pclen, depth, max_depth, Wconvl, Wconvr, Wconvt)
nptest.assert_allclose(actual, desired)
示例4: weighted_feature_blk
# 需要导入模块: import tensorflow_fold [as 别名]
# 或者: from tensorflow_fold import Scalar [as 别名]
def weighted_feature_blk():
"""Input: (feature , idx , pclen, depth, max_depth)
(TensorType([hyper.word_dim, ]), Scalar, Scalar, Scalar, Scalar)
Output: weighted_feature
TensorType([hyper.conv_dim, ])
"""
blk = td.Composition()
with blk.scope():
fea = blk.input[0]
Wi = tri_combined_blk().reads(blk.input[1], blk.input[2], blk.input[3], blk.input[4])
weighted_fea = td.Function(embedding.batch_mul).reads(fea, Wi)
blk.output.reads(weighted_fea)
return blk
示例5: feature_detector_blk
# 需要导入模块: import tensorflow_fold [as 别名]
# 或者: from tensorflow_fold import Scalar [as 别名]
def feature_detector_blk(max_depth=2):
"""Input: node dict
Output: TensorType([hyper.conv_dim, ])
Single patch of the conv. Depth is max_depth
"""
blk = td.Composition()
with blk.scope():
nodes_in_patch = collect_node_for_conv_patch_blk(max_depth=max_depth).reads(blk.input)
# map from python object to tensors
mapped = td.Map(td.Record((coding_blk(), td.Scalar(), td.Scalar(),
td.Scalar(), td.Scalar()))).reads(nodes_in_patch)
# mapped = [(feature, idx, depth, max_depth), (...)]
# compute weighted feature for each elem
weighted = td.Map(weighted_feature_blk()).reads(mapped)
# weighted = [fea, fea, fea, ...]
# add together
added = td.Reduce(td.Function(tf.add)).reads(weighted)
# added = TensorType([hyper.conv_dim, ])
# add bias
biased = td.Function(tf.add).reads(added, td.FromTensor(param.get('Bconv')))
# biased = TensorType([hyper.conv_dim, ])
# tanh
tanh = td.Function(tf.nn.tanh).reads(biased)
# tanh = TensorType([hyper.conv_dim, ])
blk.output.reads(tanh)
return blk
# generalize to tree_fold, accepts one block that takes two node, returns a value
示例6: direct_embed_blk
# 需要导入模块: import tensorflow_fold [as 别名]
# 或者: from tensorflow_fold import Scalar [as 别名]
def direct_embed_blk():
return (td.GetItem('name') >> td.Scalar('int32')
>> td.Function(lambda x: tf.nn.embedding_lookup(param.get('We'), x))
>> clip_by_norm_blk())
示例7: composed_embed_blk
# 需要导入模块: import tensorflow_fold [as 别名]
# 或者: from tensorflow_fold import Scalar [as 别名]
def composed_embed_blk():
leaf_case = direct_embed_blk()
nonleaf_case = td.Composition(name='composed_embed_nonleaf')
with nonleaf_case.scope():
children = td.GetItem('children').reads(nonleaf_case.input)
clen = td.Scalar().reads(td.GetItem('clen').reads(nonleaf_case.input))
cclens = td.Map(td.GetItem('clen') >> td.Scalar()).reads(children)
fchildren = td.Map(direct_embed_blk()).reads(children)
initial_state = td.Composition()
with initial_state.scope():
initial_state.output.reads(
td.FromTensor(tf.zeros(hyper.word_dim)),
td.FromTensor(tf.zeros([])),
)
summed = td.Zip().reads(fchildren, cclens, td.Broadcast().reads(clen))
summed = td.Fold(continous_weighted_add_blk(), initial_state).reads(summed)[0]
added = td.Function(tf.add, name='add_bias').reads(summed, td.FromTensor(param.get('B')))
normed = clip_by_norm_blk().reads(added)
act_fn = tf.nn.relu if hyper.use_relu else tf.nn.tanh
relu = td.Function(act_fn).reads(normed)
nonleaf_case.output.reads(relu)
return td.OneOf(lambda node: node['clen'] == 0,
{True: leaf_case, False: nonleaf_case})