本文整理汇总了Python中xml.dom.minidom.parseString函数的典型用法代码示例。如果您正苦于以下问题:Python parseString函数的具体用法?Python parseString怎么用?Python parseString使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了parseString函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: render
def render(self,encoding=None,fragment=False,pretty=False,nsmap=None):
"""Produce XML from this model's instance data.
A unicode string will be returned if any of the objects contain
unicode values; specifying the 'encoding' argument forces generation
of a bytestring.
By default a complete XML document is produced, including the
leading "<?xml>" declaration. To generate an XML fragment set
the 'fragment' argument to True.
"""
if nsmap is None:
nsmap = {}
data = []
if not fragment:
if encoding:
s = '<?xml version="1.0" encoding="%s" ?>' % (encoding,)
data.append(s)
else:
data.append('<?xml version="1.0" ?>')
data.extend(self._render(nsmap))
xml = "".join(data)
if pretty and encoding:
xml = minidom.parseString(xml).toprettyxml(encoding=encoding)
else:
if pretty:
xml = minidom.parseString(xml).toprettyxml()
if encoding:
xml = xml.encode(encoding)
return xml
示例2: handle_create_space
def handle_create_space(self):
def unescape(s):
s = s.replace("<", "<")
s = s.replace(">", ">")
# this has to be last:
s = s.replace("&", "&")
return s
headers = self.soapheaders('http://schemas.microsoft.com/sharepoint/soap/dws/CreateDws')
space = self.ask('name', '')
soapbody = """<?xml version="1.0"?>
<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/">
<s:Body>
<CreateDws xmlns="http://schemas.microsoft.com/sharepoint/soap/dws/">
<name/>
<users/>
<title>%s</title>
<documents/>
</CreateDws>
</s:Body>
</s:Envelope>""" % space
# no space in the url, we're creating a new one!
response = self.urlopen("%s/_vti_bin/dws.asmx" % self.path, soapbody, headers)
if response.code != 200:
raise Exception("failed to create space, http error %s" % response.code)
ret = response.read()
try:
xml = minidom.parseString(ret)
inner = unescape(xml.getElementsByTagName('CreateDwsResult')[0].firstChild.toxml())
xml = minidom.parseString(inner)
url = xml.getElementsByTagName('Url')[0].firstChild.toxml()
print 'created space at %s' % url
except Exception:
print "response is invalid xml: '%s'" % ret
示例3: extract_project_deps
def extract_project_deps(project_filepath, log):
try:
with open(project_filepath, 'r') as file:
contents = file.read()
except:
# log.warning("failed to acquire {0}.".format(project_filepath))
return False, set()
deps = set()
directory = os.path.split(project_filepath)[0]
for node in xml.parseString(contents).getElementsByTagName('parameter'):
if node.getAttribute('name') == 'filename':
filepath = node.getAttribute('value')
filepath = convert_path_to_local(filepath)
filepath = os.path.join(directory, filepath)
deps.add(filepath)
for node in xml.parseString(contents).getElementsByTagName('parameters'):
if node.getAttribute('name') == 'filename':
for child in node.childNodes:
if child.nodeType == xml.Node.ELEMENT_NODE:
filepath = child.getAttribute('value')
filepath = convert_path_to_local(filepath)
filepath = os.path.join(directory, filepath)
deps.add(filepath)
return True, deps
示例4: test_geofeed_rss
def test_geofeed_rss(self):
"Tests geographic feeds using GeoRSS over RSSv2."
# Uses `GEOSGeometry` in `item_geometry`
doc1 = minidom.parseString(self.client.get('/feeds/rss1/').content)
# Uses a 2-tuple in `item_geometry`
doc2 = minidom.parseString(self.client.get('/feeds/rss2/').content)
feed1, feed2 = doc1.firstChild, doc2.firstChild
# Making sure the box got added to the second GeoRSS feed.
self.assertChildNodes(feed2.getElementsByTagName('channel')[0],
['title', 'link', 'description', 'language',
'lastBuildDate', 'item', 'georss:box', 'atom:link']
)
# Incrementing through the feeds.
for feed in [feed1, feed2]:
# Ensuring the georss namespace was added to the <rss> element.
self.assertEqual(feed.getAttribute(u'xmlns:georss'), u'http://www.georss.org/georss')
chan = feed.getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), City.objects.count())
# Ensuring the georss element was added to each item in the feed.
for item in items:
self.assertChildNodes(item, ['title', 'link', 'description', 'guid', 'georss:point'])
示例5: test_generate_report_dictionary_from_dom
def test_generate_report_dictionary_from_dom(self):
"""Test generate_report_dictionary_from_dom function."""
self.mock_the_dialog(test_entire_mode=False)
self.impact_merge_dialog.prepare_input()
self.impact_merge_dialog.validate_all_layers()
# Create the DOM
first_postprocessing_report = \
self.impact_merge_dialog.first_impact['postprocessing_report']
second_postprocessing_report = \
self.impact_merge_dialog.second_impact['postprocessing_report']
first_report = (
'<body>' +
first_postprocessing_report +
'</body>')
second_report = (
'<body>' +
second_postprocessing_report +
'</body>')
# Now create a dom document for each
first_document = minidom.parseString(get_string(first_report))
second_document = minidom.parseString(get_string(second_report))
tables = first_document.getElementsByTagName('table')
tables += second_document.getElementsByTagName('table')
report_dict = \
self.impact_merge_dialog.generate_report_dictionary_from_dom(
tables)
# There should be 4 keys in that dict
# (3 for each aggregation unit and 1 for total in aggregation unit)
expected_number_of_keys = 4
self.assertEqual(len(report_dict), expected_number_of_keys)
示例6: _compareDOM
def _compareDOM( self, found_text, expected_text, debug=False ):
found_lines = [ x.strip() for x in found_text.splitlines() ]
found_text = '\n'.join( filter( None, found_lines ) )
expected_lines = [ x.strip() for x in expected_text.splitlines() ]
expected_text = '\n'.join( filter( None, expected_lines ) )
from xml.dom.minidom import parseString
found = parseString( found_text )
expected = parseString( expected_text )
fxml = found.toxml()
exml = expected.toxml()
if fxml != exml:
if debug:
zipped = zip( fxml, exml )
diff = [ ( i, zipped[i][0], zipped[i][1] )
for i in range( len( zipped ) )
if zipped[i][0] != zipped[i][1]
]
import pdb; pdb.set_trace()
print 'Found:'
print fxml
print
print 'Expected:'
print exml
print
self.assertEqual( found.toxml(), expected.toxml() )
示例7: pprint
def pprint(self):
if self.xml is not None:
mxml = minidom.parseString(ElementTree.tostring(self.xml))
mxml = minidom.parseString(
ElementTree.tostring(self.xml.find('./cputune')))
print mxml.toprettyxml()
self.res.pprint()
示例8: scan_browse_result
def scan_browse_result(self, result, level, output_format='plain'):
if output_format == 'plain':
s = ""
xml_root = minidom.parseString(result['Result'].encode('utf-8'))
container_list = xml_root.getElementsByTagName("container")
for container in container_list:
dict = DidlInfo.extract_from_node(container, True)
npath = dict["idPath"]
adds = "C " + npath + " * " + dict["title"] + "\n"
s += adds
if int(level) > 0:
self.browse_recursive_children(npath, int(level) - 1, output_format)
item_list = xml_root.getElementsByTagName("item")
for item in item_list:
dict = DidlInfo.extract_from_node(item, True)
npath = dict["idPath"]
s += "+ " + npath + " * " + dict["title"] + "\n"
return s
else:
s = "["
xml_root = minidom.parseString(result['Result'])
container_list = xml_root.getElementsByTagName("container")
for container in container_list:
dict = DidlInfo.extract_from_node(container, True)
s += json.dumps(dict)
s += ","
item_list = xml_root.getElementsByTagName("item")
for item in item_list:
dict = DidlInfo.extract_from_node(item, True)
s += json.dumps(dict)
s += ","
if len(s) > 2:
s = s[:-1]
s += "]"
return s
示例9: main
def main(argv):
if len(sys.argv) < 3:
usage(sys.argv)
sys.exit(2)
host = sys.argv[1]
uc = UpnpCommand(host)
operation = sys.argv[2]
result = None
if operation == 'play':
result = uc.play()
elif operation == 'stop':
result = uc.stop()
elif operation == 'getv':
result = uc.get_volume()
elif operation == 'getfilter':
result = uc.get_filter()
elif operation == 'setv':
result = uc.set_volume(sys.argv[3])
elif operation == 'seek':
result = uc.seek(sys.argv[3])
elif operation == 'prev':
result = uc.previous()
elif operation == 'next':
result = uc.next()
elif operation == 'position':
result = uc.get_position_info()
elif operation == 'transport':
result = uc.get_transport_setting()
elif operation == 'getstatevar':
result = uc.get_state_var()
elif operation == 'getsetting':
result = uc.get_setting(sys.argv[3])
elif operation == 'media':
result = uc.get_media_info()
result += uc.get_position_info()
elif operation == 'allinfo':
result = uc.get_volume()
result += uc.get_position_info()
result += uc.get_transport_setting()
result += uc.get_media_info()
elif operation == 'cap':
result = uc.get_browse_capabilites()
elif operation == 'browse':
result = uc.browse(argv[3])
xml_root = minidom.parseString(result['Result'])
print(xml_root.toprettyxml(indent="\t"))
elif operation == 'browsechildren':
if argv[3].endswith('/*'):
result = uc.browse_recursive_children(argv[3][:-2])
print(result)
else:
result = uc.browsechildren(argv[3])
xml_root = minidom.parseString(result['Result'])
print(xml_root.toprettyxml(indent="\t"))
return
else:
usage(sys.argv)
print(result)
示例10: compare_xml
def compare_xml(want, got):
"""
Try to do a 'xml-comparison' of want and got. Plain string comparison
doesn't always work because, for example, attribute ordering should not be
important. Ignore comment nodes and leading and trailing whitespace.
Based on https://github.com/lxml/lxml/blob/master/src/lxml/doctestcompare.py
"""
_norm_whitespace_re = re.compile(r'[ \t\n][ \t\n]+')
def norm_whitespace(v):
return _norm_whitespace_re.sub(' ', v)
def child_text(element):
return ''.join(c.data for c in element.childNodes
if c.nodeType == Node.TEXT_NODE)
def children(element):
return [c for c in element.childNodes
if c.nodeType == Node.ELEMENT_NODE]
def norm_child_text(element):
return norm_whitespace(child_text(element))
def attrs_dict(element):
return dict(element.attributes.items())
def check_element(want_element, got_element):
if want_element.tagName != got_element.tagName:
return False
if norm_child_text(want_element) != norm_child_text(got_element):
return False
if attrs_dict(want_element) != attrs_dict(got_element):
return False
want_children = children(want_element)
got_children = children(got_element)
if len(want_children) != len(got_children):
return False
return all(check_element(want, got) for want, got in zip(want_children, got_children))
def first_node(document):
for node in document.childNodes:
if node.nodeType != Node.COMMENT_NODE:
return node
want = want.strip().replace('\\n', '\n')
got = got.strip().replace('\\n', '\n')
# If the string is not a complete xml document, we may need to add a
# root element. This allow us to compare fragments, like "<foo/><bar/>"
if not want.startswith('<?xml'):
wrapper = '<root>%s</root>'
want = wrapper % want
got = wrapper % got
# Parse the want and got strings, and compare the parsings.
want_root = first_node(parseString(want))
got_root = first_node(parseString(got))
return check_element(want_root, got_root)
示例11: xml2terms
def xml2terms(xml):
"""docstring for xml2terms"""
try:
root = parseString(xml).documentElement
except:
xml = xml.join(['<mrow>', '</mrow>'])
root = parseString(xml).documentElement
stack = [root, ]
while stack:
if stack[-1].firstChild and \
stack[-1].firstChild.nodeType != Node.TEXT_NODE:
term_raw = stack[-1].toxml()
term_gen = re.sub('>[^<]+?<', '><', term_raw);
# print term_raw, term_gen, len(stack)
term_raw = term_compress(term_raw)
term_gen = term_compress(term_gen)
# print term_raw, term_gen, len(stack)
yield term_raw, term_gen, len(stack)
if stack[-1].firstChild and \
stack[-1].firstChild.nodeType != Node.TEXT_NODE:
stack.append(stack[-1].firstChild)
elif stack[-1].nextSibling:
stack[-1] = stack[-1].nextSibling
else:
stack.pop()
while stack and not stack[-1].nextSibling:
stack.pop()
if stack:
stack[-1] = stack[-1].nextSibling
示例12: test_product_delete_byname
def test_product_delete_byname(self):
"""Delete a product by name"""
response = self.c.post(reverse('api.views.product_delete'),
{'product': self.products[0].name})
xmldoc = minidom.parseString(response.content)
msg = xmldoc.getElementsByTagName('success')
self.assertEqual(len(msg), 1, 'Delete successful')
all_products = Product.objects.all()
self.assertEquals(len(all_products), len(self.products) - 1,
'product was deleted')
response = self.c.post(reverse('api.views.product_delete'),
{'product': self.products[0].name})
xmldoc = minidom.parseString(response.content)
msg = xmldoc.getElementsByTagName('error')
errno = msg[0].getAttribute('number')
self.assertEqual(len(msg), 1, 'Delete must only be successful once')
self.assertEqual(int(errno), 102,
'must return product not found error')
all_products = Product.objects.all()
self.assertEquals(len(all_products), len(self.products) - 1,
'product was deleted only once')
示例13: test_sortable_columns
def test_sortable_columns(self):
"""Make columns sortable:
- All columns
- Only specific columns"""
generator = component.getUtility(ITableGenerator, 'ftw.tablegenerator')
employees = [
{'name': 'some name', 'date': 'somedate'},
]
columns = ('name', 'date')
parsed = parseString(
generator.generate(employees, columns, sortable=True))
# Sortable=True adds a class sortable to all table headers
self.assertEqual(
parsed.getElementsByTagName('th')[0]._attrs['class'].nodeValue,
'sortable')
self.assertEqual(
parsed.getElementsByTagName('th')[1]._attrs['class'].nodeValue,
'sortable')
# Add sortable class only on column 'name',
# all other has a nosort class
columns = ('name', 'date')
sortable = ('name', )
parsed = parseString(
generator.generate(employees, columns, sortable=sortable))
self.assertEqual(
parsed.getElementsByTagName('th')[0]._attrs['class'].nodeValue,
'sortable')
self.assertEqual(
parsed.getElementsByTagName('th')[1]._attrs['class'].nodeValue,
u'nosort')
示例14: testT04ProcessesLengthDescribeProcess
def testT04ProcessesLengthDescribeProcess(self):
"""Test, if any processes are listed in the DescribeProcess document
"""
self._setFromEnv()
getpywps = pywps.Pywps(pywps.METHOD_GET)
getpywps.parseRequest(self.getdescribeprocessrequest)
getpywps.performRequest()
xmldom = minidom.parseString(getpywps.response)
self.assertTrue(len(xmldom.getElementsByTagName("ProcessDescription"))>0)
self.assertEquals(len(xmldom.getElementsByTagName("ProcessDescription")),
len(getpywps.inputs["identifier"]))
getpywps = pywps.Pywps(pywps.METHOD_GET)
getpywps.parseRequest(self.getdescribeprocessrequestall)
getpywps.performRequest()
xmldom = minidom.parseString(getpywps.response)
self.assertEquals(len(xmldom.getElementsByTagName("ProcessDescription")),len(getpywps.request.processes))
postpywps = pywps.Pywps(pywps.METHOD_POST)
describeRequestFile = open(os.path.join(pywpsPath,"tests","requests","wps_describeprocess_request_dummyprocess.xml"))
postinputs = postpywps.parseRequest(describeRequestFile)
postpywps.performRequest(postinputs)
xmldom = minidom.parseString(postpywps.response)
self.assertTrue(len(xmldom.getElementsByTagName("ProcessDescription"))>0)
self.assertEquals(len(xmldom.getElementsByTagName("ProcessDescription")),
len(postpywps.inputs["identifier"]))
postpywps = pywps.Pywps(pywps.METHOD_POST)
describeRequestFile = open(os.path.join(pywpsPath,"tests","requests","wps_describeprocess_request_all.xml"))
postinputs = postpywps.parseRequest(describeRequestFile)
postpywps.performRequest(postinputs)
xmldom = minidom.parseString(postpywps.response)
self.assertEquals(len(xmldom.getElementsByTagName("ProcessDescription")),len(postpywps.request.processes))
示例15: get_story_url_from_epub_html
def get_story_url_from_epub_html(inputio,_is_good_url=None):
# print("get_story_url_from_epub_html called")
epub = ZipFile(inputio, 'r') # works equally well with inputio as a path or a blob
## Find the .opf file.
container = epub.read("META-INF/container.xml")
containerdom = parseString(container)
rootfilenodelist = containerdom.getElementsByTagName("rootfile")
rootfilename = rootfilenodelist[0].getAttribute("full-path")
contentdom = parseString(epub.read(rootfilename))
#firstmetadom = contentdom.getElementsByTagName("metadata")[0]
## Save the path to the .opf file--hrefs inside it are relative to it.
relpath = get_path_part(rootfilename)
# spin through the manifest--only place there are item tags.
for item in contentdom.getElementsByTagName("item"):
if( item.getAttribute("media-type") == "application/xhtml+xml" ):
filehref=relpath+item.getAttribute("href")
soup = make_soup(epub.read(filehref).decode("utf-8"))
for link in soup.findAll('a',href=re.compile(r'^http.*')):
ahref=link['href']
# print("href:(%s)"%ahref)
# hack for bad ficsaver ffnet URLs.
m = re.match(r"^http://www.fanfiction.net/s(?P<id>\d+)//$",ahref)
if m != None:
ahref="http://www.fanfiction.net/s/%s/1/"%m.group('id')
if _is_good_url == None or _is_good_url(ahref):
return ahref
return None