本文整理汇总了Python中node.Node.children方法的典型用法代码示例。如果您正苦于以下问题:Python Node.children方法的具体用法?Python Node.children怎么用?Python Node.children使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类node.Node
的用法示例。
在下文中一共展示了Node.children方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _convert_remaining_string_to_chain
# 需要导入模块: from node import Node [as 别名]
# 或者: from node.Node import children [as 别名]
def _convert_remaining_string_to_chain(self, string, string_pos, next_id):
"""Create new node representing any unmatched characters"""
# new_node is the root of the new subtree. The end (matching) node is never named.
first_char = string[string_pos]
if len(string) - string_pos > 2:
new_chain = list(string[string_pos+1:-1])
new_node = Node(True, first_char, len(new_chain), chain=new_chain)
new_node.children = {string[-1]: Node(False, string[-1], 0, pointer=next_id)}
elif len(string) - string_pos == 2:
new_node = Node(False, first_char, 1)
new_node.children = {string[-1]: Node(False, string[-1], 0, pointer=next_id)}
elif len(string) - string_pos == 1:
new_node = Node(False, first_char, 0, pointer=next_id)
while new_node.num_items > 63:
# If we pretend it's 7 rather than 63...
# (a)bcdefghijklmnopqr-a -> (a)bcdefghij-(k)lmnopqr-a -> (a)b-(c)defghij-(k)lmnopqr-a
new_in_edge = new_node.in_edge
new_children = new_node.chain[0:-63]
new_node.in_edge = new_node.chain[-64]
new_node.chain = new_node.chain[-63:]
new_node.num_items = 63
if len(new_children) > 1:
new_node_parent = Node(True, new_in_edge,
len(new_children), chain=new_children,
children={new_node.in_edge: new_node})
else:
new_node_parent = Node(False, new_in_edge, 0, children={new_node.in_edge: new_node})
new_node = new_node_parent
return new_node
示例2: ID3
# 需要导入模块: from node import Node [as 别名]
# 或者: from node.Node import children [as 别名]
def ID3(data_set, attribute_metadata, numerical_splits_count, depth):
'''
See Textbook for algorithm.
Make sure to handle unknown values, some suggested approaches were
given in lecture.
========================================================================================================
Input: A data_set, attribute_metadata, maximum number of splits to consider for numerical attributes,
maximum depth to search to (depth = 0 indicates that this node should output a label)
========================================================================================================
Output: The node representing the decision tree learned over the given data set
========================================================================================================
'''
root = Node()
homogenous = check_homogenous(data_set)
if homogenous!= None:
root.label = homogenous
return root
if depth == 0 or len(data_set)==0 or len(attribute_metadata)<=1:
root.label = mode(data_set)
return root
best_att, best_split = pick_best_attribute(data_set, attribute_metadata, numerical_splits_count)
if(numerical_splits_count[best_att]==0):
root.label = mode(data_set)
return root
if best_att == False:
root.label = mode(data_set)
return root
root.decision_attribute = best_att
root.splitting_value = best_split
root.name = attribute_metadata[best_att]['name']
root.is_nominal = attribute_metadata[best_att]['is_nominal']
if(root.is_nominal):
examples = {}
for k, val in split_on_nominal(data_set, best_att).items():
if is_missing(val, best_att):
val = replace_missing(val, best_att)
examples[k] = ID3(val, attribute_metadata, numerical_splits_count, depth-1)
root.children = examples
else:
root.children = []
examples = [0,0]
first_split, second_split = split_on_numerical(data_set, best_att, best_split)
if is_missing(first_split, best_att):
first_split= replace_missing(first_split, best_att)
if is_missing(second_split, best_att):
second_split = replace_missing(second_split, best_att)
numerical_splits_count[best_att] -= 1
examples[0] = ID3(first_split, attribute_metadata, numerical_splits_count, depth-1)
examples[1] = ID3(second_split, attribute_metadata, numerical_splits_count, depth-1)
root.children.append(examples[0])
root.children.append(examples[1])
return root
示例3: more_tests
# 需要导入模块: from node import Node [as 别名]
# 或者: from node.Node import children [as 别名]
def more_tests():
n2 = Node()
n2.name = "attrib2"
n2.label = 1
n3 = Node()
n3.name = "attrib3"
n3.label = 0
n0 = Node()
n0.name = "attrib0"
n0.is_nominal = True
n0.children = {1: n2, 2: n3}
n4 = Node()
n4.name = "attrib4"
n4.label = 2
n5 = Node()
n5.name = "attrib5"
n5.label = 3
n1 = Node()
n1.name = "attrib1"
n1.is_nominal = True
n1.children = {1: n4, 2: n5}
n = Node()
n.label = None
n.decision_attribute = 1
n.is_nominal = True
n.name = "attrib"
n.children = {1: n0, 2: n1}
print n.print_dnf_tree()
print n.print_tree()
print breadth_first_search(n, [n])
attribute_metadata = [{'name': "winner",'is_nominal': True},{'name': "opprundifferential",'is_nominal': False}]
data_set = [[1, 0.27], [0, 0.42], [0, 0.86], [0, 0.68], [0, 0.04], [1, 0.01], [1, 0.33], [1, 0.42], [1, 0.42], [0, 0.51], [1, 0.4]]
numerical_splits_count = [5, 5]
n = ID3(data_set, attribute_metadata, numerical_splits_count, 0)
print validation_accuracy(n,data_set)
numerical_splits_count = [1, 1]
n = ID3(data_set, attribute_metadata, numerical_splits_count, 5)
print validation_accuracy(n,data_set)
numerical_splits_count = [5, 5]
n = ID3(data_set, attribute_metadata, numerical_splits_count, 5)
print validation_accuracy(n,data_set)
print n.print_tree()
n = reduced_error_pruning(n,data_set,[[1, 0.11], [0, 0.42], [0, 0.86], [0, 0.55], [0, 0.66], [1, 0.01], [1, 0.11], [1, 0.84], [1, 0.42], [0, 0.51], [1, 0.4]])
print n.print_tree()
return n
示例4: ID3
# 需要导入模块: from node import Node [as 别名]
# 或者: from node.Node import children [as 别名]
def ID3(data_set, attribute_metadata, numerical_splits_count, depth):
'''
See Textbook for algorithm.
Make sure to handle unknown values, some suggested approaches were
given in lecture.
========================================================================================================
Input: A data_set, attribute_metadata, maximum number of splits to consider for numerical attributes,
maximum depth to search to (depth = 0 indicates that this node should output a label)
========================================================================================================
Output: The node representing the decision tree learned over the given data set
========================================================================================================
'''
# Your code here
print depth
Dtree = Node()
if len(data_set) == 0:
return Dtree
c = check_homogenous([[element[0]] for element in data_set])
if isinstance(c,int):
Dtree.label = c
return Dtree
elif len(data_set[0]) == 1 or depth <= 0 or [0]*(len(numerical_splits_count)-1) == numerical_splits_count[1:]:
Dtree.label = mode(data_set)
return Dtree
else:
data_set = missingValues(data_set)
best_attribute,threshold = pick_best_attribute(data_set,attribute_metadata,numerical_splits_count)
if not(best_attribute):
Dtree.label = mode(data_set)
return Dtree
Dtree.decision_attribute = best_attribute
Dtree.modeVal = mode([[element[Dtree.decision_attribute]] for element in data_set])
Dtree.name = attribute_metadata[best_attribute]['name']
if threshold:
Dtree.is_nominal = False
Dtree.splitting_value = threshold
less,greater = split_on_numerical(data_set,best_attribute,threshold)
new_nsc = numerical_splits_count
new_nsc[best_attribute] -= 1
Dtree.children = [ID3(less,attribute_metadata,new_nsc,depth-1),ID3(greater,attribute_metadata,new_nsc,depth-1)]
else:
Dtree.is_nominal = True
n_dict = split_on_nominal(data_set,best_attribute)
new_attribute_metadata = attribute_metadata
new_attribute_metadata.pop(best_attribute)
#try:
Dtree.children = [ID3(removeAttribute(value,best_attribute),new_attribute_metadata,numerical_splits_count,depth-1) for key,value in n_dict.iteritems()]
#except AttributeError:
# print n_dict
# print best_attribute
# print threshold
#raise Exception("wut")
return Dtree
pass
示例5: ID3
# 需要导入模块: from node import Node [as 别名]
# 或者: from node.Node import children [as 别名]
def ID3(data_set, attribute_metadata, numerical_splits_count, depth):
'''
See Textbook for algorithm.
Make sure to handle unknown values, some suggested approaches were
given in lecture.
========================================================================================================
Input: A data_set, attribute_metadata, maximum number of splits to consider for numerical attributes,
maximum depth to search to (depth = 0 indicates that this node should output a label)
========================================================================================================
Output: The node representing the decision tree learned over the given data set
========================================================================================================
'''
preprocessing(data_set, attribute_metadata)
if check_homogenous(data_set) != None:
ans = Node()
ans.label = check_homogenous(data_set)
elif depth == 0:
ans = Node()
ans.label = mode(data_set)
else:
best = pick_best_attribute(data_set, attribute_metadata,
numerical_splits_count)
if best[0] == False:
ans = Node()
ans.label = mode(data_set)
else:
ans = Node()
ans.decision_attribute = best[0]
ans.name = attribute_metadata[best[0]]['name']
depth -= 1
if str(best[1]) == 'False':
ans.is_nominal = True
ans.children = {}
divide = split_on_nominal(data_set, best[0])
for x in divide.keys():
ans.children[x] = ID3(divide[x], attribute_metadata,
numerical_splits_count, depth)
else:
ans.is_nominal = False
ans.children = []
ans.splitting_value = best[1]
divide = split_on_numerical(data_set, best[0], best[1])
ans.children.append(ID3(divide[0], attribute_metadata,
numerical_splits_count, depth))
ans.children.append(ID3(divide[1], attribute_metadata,
numerical_splits_count, depth))
return ans
示例6: ID3_helper
# 需要导入模块: from node import Node [as 别名]
# 或者: from node.Node import children [as 别名]
def ID3_helper(data_set, attribute_metadata, numerical_splits_count, depth, nominal_keys):
att = pick_best_attribute(data_set, attribute_metadata, numerical_splits_count)
print "before"
# print attribute_metadata
# print numerical_splits_count
print att
print "after"
if depth == 0 or att[0] == False: #depth or gain ratio is 0
d = Node()
default = mode(data_set)
d.label = default
return d
elif check_homogenous(data_set) is not None:
d = Node()
d.label = check_homogenous(data_set)
return d
else: #how to recursion
root = Node()
# att = pick_best_attribute(data_set, attribute_metadata, numerical_splits_count)
# if att[0] != False:
root.label = None
root.decision_attribute = att[0]
root.name = attribute_metadata[att[0]].get('name')
root.is_nominal = attribute_metadata[att[0]].get('is_nominal')
if root.is_nominal == False:
numerical_splits_count[att[0]] -= 1
root.splitting_value = att[1]
root.children = []
left_dataset = []
right_dataset = []
for i in xrange(len(data_set)):
if data_set[i][att[0]] < att[1]:
left_dataset.append(data_set[i])
else:
right_dataset.append(data_set[i])
depth = depth - 1
root.children.append(ID3_helper(left_dataset, attribute_metadata, numerical_splits_count, depth, nominal_keys))
root.children.append(ID3_helper(right_dataset, attribute_metadata, numerical_splits_count, depth, nominal_keys))
else:
root.children = {}
for key in nominal_keys[att[0]]:
chile_dataset = []
for i in xrange(len(data_set)):
if data_set[i][att[0]] == key:
chile_dataset.append(data_set[i])
child = ID3_helper(chile_dataset, attribute_metadata, numerical_splits_count, depth, nominal_keys)
root.children.update({key: child})
return root
示例7: parseExtern
# 需要导入模块: from node import Node [as 别名]
# 或者: from node.Node import children [as 别名]
def parseExtern(self):
self.accept('EXTERN')
#has the form "extern somelib.somelib.ClassName"
identnode = self.parseIdentifierComplex()
n = Node('NodeExtern', [], identnode)
#parse out the actual class name
classnamenode = identnode
while len(classnamenode.children) == 1 and \
classnamenode.children[0].type == 'Identifier':
classnamenode = classnamenode.children[0]
ident = classnamenode.leaf
#do we have constructor parameters?
if self.currentToken.type == 'EQUALS':
self.accept('EQUALS')
constructor_call_expr = self.parseExpression()
assert len(constructor_call_expr.children) == 1
assert constructor_call_expr.children[0].type == 'FunctionCall'
constructor_call = constructor_call_expr.children[0]
n.children = [constructor_call]
self.typedefDict[ident] = n
self.externList += [ident]
self.accept('SEMI')
return n
示例8: ID3
# 需要导入模块: from node import Node [as 别名]
# 或者: from node.Node import children [as 别名]
def ID3(data_set, attribute_metadata, numerical_splits_count, depth):
'''
See Textbook for algorithm.
Make sure to handle unknown values, some suggested approaches were
given in lecture.
========================================================================================================
Input: A data_set, attribute_metadata, maximum number of splits to consider for numerical attributes,
maximum depth to search to (depth = 0 indicates that this node should output a label)
========================================================================================================
Output: The node representing the decision tree learned over the given data set
========================================================================================================
'''
# Your code here
n = Node()
n.mode = mode(data_set)
label = check_homogenous(data_set)
if label is not None:
n.label = label
return n
elif depth == 0:
n.label = mode(data_set)
return n
else:
best, sv = pick_best_attribute(data_set, attribute_metadata, numerical_splits_count)
if not best:
n.label = mode(data_set)
return n
n.decision_attribute = best
n.splitting_value = sv
n.name = attribute_metadata[best]['name']
#numeric
if n.splitting_value:
m = split_on_numerical(data_set, best, n.splitting_value)
numerical_splits_count[best] = numerical_splits_count[best] - 1
if not m[0] or not m[1]:
n.label = mode(data_set)
else:
n_small = ID3(m[0], attribute_metadata, numerical_splits_count, depth-1)
n_big = ID3(m[1], attribute_metadata, numerical_splits_count, depth-1)
n.children = [n_small, n_big]
#nominal
else:
n.is_nominal = True
m = split_on_nominal(data_set, best)
for k,v in m.items():
if m[k]:
n_curr = ID3(m[k], attribute_metadata, numerical_splits_count, depth-1)
if n_curr.decision_attribute != n.decision_attribute:
n.children[k] = n_curr
return n
示例9: reduced_error_pruning
# 需要导入模块: from node import Node [as 别名]
# 或者: from node.Node import children [as 别名]
def reduced_error_pruning(temp_root, temp_originroot, root, originroot,
training_set, validation_set, attribute_metadata):
if temp_root.is_nominal == True:
subset = split_on_nominal(training_set, temp_root.decision_attribute)
for div in temp_root.children.keys():
if temp_root.children[div].label == None:
new_Node = Node()
new_Node.label = mode(subset[div])
new_Node.children = {}
temp = copy.deepcopy(temp_root.children[div])
temp_root.children[div] = new_Node
prune_acc = validation_accuracy(temp_originroot, validation_set,
attribute_metadata)
acc = validation_accuracy(originroot, validation_set,
attribute_metadata)
if prune_acc >= acc:
print prune_acc
root.children[div] = new_Node
else:
temp_root.children[div] = temp
reduced_error_pruning(temp_root.children[div],
temp_originroot, root.children[div], originroot,
subset[div], validation_set, attribute_metadata)
if temp_root.is_nominal == False:
subset = split_on_numerical(training_set, root.decision_attribute,
root.splitting_value)
for i in xrange(0, 2):
if temp_root.children[i].label == None:
new_Node = Node()
new_Node.label = mode(subset[i])
new_Node.children = {}
temp = copy.deepcopy(temp_root.children[i])
temp_root.children[i] = new_Node
prune_acc = validation_accuracy(temp_originroot, validation_set,
attribute_metadata)
acc = validation_accuracy(originroot, validation_set,
attribute_metadata)
if prune_acc >= acc:
print prune_acc
root.children[i] = new_Node
else:
temp_root.children[i] = temp
reduced_error_pruning(temp_root.children[i], temp_originroot,
root.children[i], originroot, subset[i], validation_set,
attribute_metadata)
示例10: ID3
# 需要导入模块: from node import Node [as 别名]
# 或者: from node.Node import children [as 别名]
def ID3(data_set, attribute_metadata, numerical_splits_count, depth):
'''
========================================================================================================
Input: A data_set, attribute_metadata, maximum number of splits to consider for numerical attributes,
maximum depth to search to (depth = 0 indicates that this node should output a label)
========================================================================================================
Output: The node representing the decision tree learned over the given data set
========================================================================================================
'''
preprocessing(data_set, attribute_metadata)
if check_homogenous(data_set) != None:
root = Node()
root.label = check_homogenous(data_set)
else:
if depth == 0:
root = Node()
root.label = mode(data_set)
else:
best = pick_best_attribute(data_set, attribute_metadata, numerical_splits_count)
if best[0] == False:
root = Node()
root.label = mode(data_set)
else:
root = Node()
root.decision_attribute = best[0]
root.name = attribute_metadata[best[0]]['name']
depth -= 1
if str(best[1]) == 'False':
root.is_nominal = True
root.children = {}
subsets = split_on_nominal(data_set, best[0])
for splitval in subsets.keys():
root.children[splitval] = ID3(subsets[splitval], attribute_metadata, numerical_splits_count, depth)
else:
root.is_nominal = False
root.children = []
root.splitting_value = best[1]
subsets = split_on_numerical(data_set, best[0], best[1])
#numerical_splits_count[best[0]] -= 1
print numerical_splits_count
print depth
root.children.append(ID3(subsets[0], attribute_metadata, numerical_splits_count, depth))
root.children.append(ID3(subsets[1], attribute_metadata, numerical_splits_count, depth))
return root
示例11: p_MethodDeclarator
# 需要导入模块: from node import Node [as 别名]
# 或者: from node.Node import children [as 别名]
def p_MethodDeclarator(p):
'''MethodDeclarator : IDENTIFIER LPAREN RPAREN
| IDENTIFIER LPAREN FormalParameterList RPAREN
'''
node = Node('MethodDeclarator', name=p[1])
if len(p) == 5:
node.children = [p[3]]
p[0] = node
示例12: copy_node
# 需要导入模块: from node import Node [as 别名]
# 或者: from node.Node import children [as 别名]
def copy_node(node):
new_node = Node()
new_node.label = node.label
new_node.decision_attribute = node.decision_attribute
new_node.is_nominal = node.is_nominal
new_node.value = node.value
new_node.splitting_value = node.splitting_value
if node.is_nominal:
new_node.children = {}
for key in node.children:
new_node.children[key] = copy_node(node.children[key])
else:
new_node.children = []
for i in range(len(node.children)):
new_node.children.append(copy_node(node.children[i]))
new_node.name = node.name
return new_node
示例13: test_breadth_first_search
# 需要导入模块: from node import Node [as 别名]
# 或者: from node.Node import children [as 别名]
def test_breadth_first_search():
n0 = Node()
n0.label = 1
n1 = Node()
n1.label = 0
n = Node()
n.label = None
n.decision_attribute = 1
n.is_nominal = True
n.name = "whatever"
n.children = {1: n0, 2: n1}
print n.print_tree()
print breadth_first_search(n)
return n
示例14: pruning_helper
# 需要导入模块: from node import Node [as 别名]
# 或者: from node.Node import children [as 别名]
def pruning_helper(tempnode,temp_originroot,root,originroot,training_set,validation_set,attribute_metadata,div,subset):
if tempnode.children[div].label == None:
newnode = Node()
newnode.label = mode(subset[div])
newnode.children = {}
tempchild = copy.deepcopy(tempnode.children[div])
tempnode.children[div] = newnode
prune_acc = validation_accuracy(temp_originroot, validation_set, attribute_metadata)
acc = validation_accuracy(originroot, validation_set, attribute_metadata)
if prune_acc >= acc:
print prune_acc
root.children[div] = newnode
else:
tempnode.children[div] = tempchild
reduced_error_pruning(tempnode.children[div], temp_originroot, root.children[div], originroot, subset[div], validation_set, attribute_metadata)
示例15: ID3
# 需要导入模块: from node import Node [as 别名]
# 或者: from node.Node import children [as 别名]
def ID3(data_set, attribute_metadata, numerical_splits_count, depth):
'''
See Textbook for algorithm.
Make sure to handle unknown values, some suggested approaches were
given in lecture.
========================================================================================================
Input: A data_set, attribute_metadata, maximum number of splits to consider for numerical attributes,
maximum depth to search to (depth = 0 indicates that this node should output a label)
========================================================================================================
Output: The node representing the decision tree learned over the given data set
========================================================================================================
ID3 (Examples, Target_Attribute, Attributes)
Create a root node for the tree
If all examples are positive, Return the single-node tree Root, with label = +.
If all examples are negative, Return the single-node tree Root, with label = -.
If number of predicting attributes is empty, then Return the single node tree Root,
with label = most common value of the target attribute in the examples.
Otherwise Begin
A ← The Attribute that best classifies examples.
Decision Tree attribute for Root = A.
For each possible value, vi, of A,
Add a new tree branch below Root, corresponding to the test A = vi.
Let Examples(vi) be the subset of examples that have the value vi for A
If Examples(vi) is empty
Then below this new branch add a leaf node with label = most common target value in the examples
Else below this new branch add the subtree ID3 (Examples(vi), Target_Attribute, Attributes – {A})
End
Return Root
'''
total_data_set_size = 50000
splits_count = 0
# create the root node
root = Node()
# if the training set is hemogenous, set the root node's label to this value
root.label = check_homogenous(data_set)
if root.label != None:
return root
# if we've reached the maximum search depth, set the node's label to the most common attribute?
if depth == 0:
root.label = _most_common_classification(data_set)
return root
if PRUNING:
if len(data_set) < .05*total_data_set_size:
print depth
root.label = _most_common_classification(data_set)
return root
if not root.label:
# find the best attribute to split on
attribute = pick_best_attribute(data_set,attribute_metadata,numerical_splits_count)
# if there is no attribute, return the root node with label = mode value
if not attribute[0]:
root.label = mode(data_set)
return root
# get the index of the decision attribute
root.decision_attribute = attribute[0]
root.is_nominal = attribute_metadata[root.decision_attribute]['is_nominal']
root.splitting_value = attribute[1]
root.name = attribute_metadata[attribute[0]]['name']
# if the attribute is nominal...
if root.is_nominal:
# split the dataset into parts on the best attribute
split_dataset = split_on_nominal(data_set,attribute[0])
'''if PRUNING:
if len(split_dataset) < .05*total_data_set_size:
print depth
root.label = _most_common_classification(data_set)
return root'''
for example in split_dataset:
# for each subset of the data create a new dictionary in children {attribute_index:node}
root.children[example] = ID3(split_dataset[example],attribute_metadata,numerical_splits_count,depth-1)
# if the attribute is numerical...
else:
if numerical_splits_count[attribute[0]] == 0:
root.label = _most_common_classification(data_set)
else:
numerical_splits_count[attribute[0]] -= 1
root.children = []
# split the dataset into 2 parts on the best attribute
split_dataset = split_on_numerical(data_set,attribute[0],attribute[1])
''''if PRUNING:
if len(split_dataset) < .05*total_data_set_size:
print depth
root.label = _most_common_classification(data_set)
return root'''
for example in split_dataset:
# for each subset of the data create a new list in children [node,node]
root.children.append(ID3(example,attribute_metadata,numerical_splits_count,depth-1))
return root