from __future__ import absolute_import, division, unicode_literals from six import PY2, text_type, unichr import io from . import support # noqa from html5lib.constants import namespaces, tokenTypes from html5lib import parse, parseFragment, HTMLParser # tests that aren't autogenerated from text files def test_assertDoctypeCloneable(): doc = parse('', treebuilder="dom") assert doc.cloneNode(True) is not None def test_line_counter(): # http://groups.google.com/group/html5lib-discuss/browse_frm/thread/f4f00e4a2f26d5c0 assert parse("
\nx\n>\n
") is not None def test_namespace_html_elements_0_dom(): doc = parse("", treebuilder="dom", namespaceHTMLElements=True) assert doc.childNodes[0].namespaceURI == namespaces["html"] def test_namespace_html_elements_1_dom(): doc = parse("", treebuilder="dom", namespaceHTMLElements=False) assert doc.childNodes[0].namespaceURI is None def test_namespace_html_elements_0_etree(): doc = parse("", treebuilder="etree", namespaceHTMLElements=True) assert doc.tag == "{%s}html" % (namespaces["html"],) def test_namespace_html_elements_1_etree(): doc = parse("", treebuilder="etree", namespaceHTMLElements=False) assert doc.tag == "html" def test_unicode_file(): assert parse(io.StringIO("a")) is not None def test_maintain_attribute_order(): # This is here because we impl it in parser and not tokenizer p = HTMLParser() # generate loads to maximize the chance a hash-based mutation will occur attrs = [(unichr(x), i) for i, x in enumerate(range(ord('a'), ord('z')))] token = {'name': 'html', 'selfClosing': False, 'selfClosingAcknowledged': False, 'type': tokenTypes["StartTag"], 'data': attrs} out = p.normalizeToken(token) attr_order = list(out["data"].keys()) assert attr_order == [x for x, i in attrs] def test_duplicate_attribute(): # This is here because we impl it in parser and not tokenizer doc = parse('

') el = doc[1][0] assert el.get("class") == "a" def test_maintain_duplicate_attribute_order(): # This is here because we impl it in parser and not tokenizer p = HTMLParser() attrs = [(unichr(x), i) for i, x in enumerate(range(ord('a'), ord('z')))] token = {'name': 'html', 'selfClosing': False, 'selfClosingAcknowledged': False, 'type': tokenTypes["StartTag"], 'data': attrs + [('a', len(attrs))]} out = p.normalizeToken(token) attr_order = list(out["data"].keys()) assert attr_order == [x for x, i in attrs] def test_debug_log(): parser = HTMLParser(debug=True) parser.parse("a

bd

e") expected = [('dataState', 'InitialPhase', 'InitialPhase', 'processDoctype', {'type': 'Doctype'}), ('dataState', 'BeforeHtmlPhase', 'BeforeHtmlPhase', 'processStartTag', {'name': 'title', 'type': 'StartTag'}), ('dataState', 'BeforeHeadPhase', 'BeforeHeadPhase', 'processStartTag', {'name': 'title', 'type': 'StartTag'}), ('dataState', 'InHeadPhase', 'InHeadPhase', 'processStartTag', {'name': 'title', 'type': 'StartTag'}), ('rcdataState', 'TextPhase', 'TextPhase', 'processCharacters', {'type': 'Characters'}), ('dataState', 'TextPhase', 'TextPhase', 'processEndTag', {'name': 'title', 'type': 'EndTag'}), ('dataState', 'InHeadPhase', 'InHeadPhase', 'processStartTag', {'name': 'p', 'type': 'StartTag'}), ('dataState', 'AfterHeadPhase', 'AfterHeadPhase', 'processStartTag', {'name': 'p', 'type': 'StartTag'}), ('dataState', 'InBodyPhase', 'InBodyPhase', 'processStartTag', {'name': 'p', 'type': 'StartTag'}), ('dataState', 'InBodyPhase', 'InBodyPhase', 'processCharacters', {'type': 'Characters'}), ('dataState', 'InBodyPhase', 'InBodyPhase', 'processStartTag', {'name': 'script', 'type': 'StartTag'}), ('dataState', 'InBodyPhase', 'InHeadPhase', 'processStartTag', {'name': 'script', 'type': 'StartTag'}), ('scriptDataState', 'TextPhase', 'TextPhase', 'processCharacters', {'type': 'Characters'}), ('dataState', 'TextPhase', 'TextPhase', 'processEndTag', {'name': 'script', 'type': 'EndTag'}), ('dataState', 'InBodyPhase', 'InBodyPhase', 'processCharacters', {'type': 'Characters'}), ('dataState', 'InBodyPhase', 'InBodyPhase', 'processEndTag', {'name': 'p', 'type': 'EndTag'}), ('dataState', 'InBodyPhase', 'InBodyPhase', 'processCharacters', {'type': 'Characters'})] if PY2: for i, log in enumerate(expected): log = [x.encode("ascii") if isinstance(x, text_type) else x for x in log] expected[i] = tuple(log) assert parser.log == expected def test_no_duplicate_clone(): frag = parseFragment("