diff --git a/CHANGES.rst b/CHANGES.rst
index a3dbfac..5deb91e 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -6,6 +6,21 @@ lxml_html_clean changelog
Unreleased
==========
+0.4.4 (2026-02-26)
+==================
+
+Bugs fixed
+----------
+
+* Fixed a bug where Unicode escapes in CSS were not properly decoded
+ before security checks. This prevents attackers from bypassing filters
+ using escape sequences.
+* Fixed a security issue where ```` tags could be used for URL
+ hijacking attacks. The ```` tag is now automatically removed
+ whenever the ``
`` tag is removed (via ``page_structure=True``
+ or manual configuration), as ```` must be inside ````
+ according to HTML specifications.
+
0.4.3 (2025-10-02)
==================
diff --git a/lxml_html_clean/clean.py b/lxml_html_clean/clean.py
index 3eeda47..71f2c75 100644
--- a/lxml_html_clean/clean.py
+++ b/lxml_html_clean/clean.py
@@ -422,6 +422,12 @@ def __call__(self, doc):
if self.annoying_tags:
remove_tags.update(('blink', 'marquee'))
+ # Remove tags whenever is being removed.
+ # According to HTML spec, must be in , but browsers
+ # may interpret it even when misplaced, allowing URL hijacking attacks.
+ if 'head' in kill_tags or 'head' in remove_tags:
+ kill_tags.add('base')
+
_remove = deque()
_kill = deque()
for el in doc.iter():
@@ -578,6 +584,26 @@ def _remove_javascript_link(self, link):
_comments_re = re.compile(r'/\*.*?\*/', re.S)
_find_comments = _comments_re.finditer
_substitute_comments = _comments_re.sub
+ _css_unicode_escape_re = re.compile(r'\\([0-9a-fA-F]{1,6})\s?')
+
+ def _decode_css_unicode_escapes(self, style):
+ """
+ Decode CSS Unicode escape sequences like \\69 or \\000069 to their
+ actual character values. This prevents bypassing security checks
+ using CSS escape sequences.
+
+ CSS escape syntax: backslash followed by 1-6 hex digits,
+ optionally followed by a whitespace character.
+ """
+ def replace_escape(match):
+ hex_value = match.group(1)
+ try:
+ return chr(int(hex_value, 16))
+ except (ValueError, OverflowError):
+ # Invalid unicode codepoint, keep original
+ return match.group(0)
+
+ return self._css_unicode_escape_re.sub(replace_escape, style)
def _has_sneaky_javascript(self, style):
"""
@@ -591,6 +617,7 @@ def _has_sneaky_javascript(self, style):
more sneaky attempts.
"""
style = self._substitute_comments('', style)
+ style = self._decode_css_unicode_escapes(style)
style = style.replace('\\', '')
style = _substitute_whitespace('', style)
style = style.lower()
diff --git a/setup.cfg b/setup.cfg
index fffc43a..b8281ec 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,6 +1,6 @@
[metadata]
name = lxml_html_clean
-version = 0.4.3
+version = 0.4.4
description = HTML cleaner from lxml project
long_description = file:README.md
long_description_content_type = text/markdown
@@ -19,6 +19,7 @@ classifiers =
Programming Language :: Python :: 3.11
Programming Language :: Python :: 3.12
Programming Language :: Python :: 3.13
+ Programming Language :: Python :: 3.14
[options]
packages =
diff --git a/tests/test_clean.py b/tests/test_clean.py
index 64ad52d..547ede8 100644
--- a/tests/test_clean.py
+++ b/tests/test_clean.py
@@ -393,3 +393,195 @@ def test_possibly_invalid_url_without_whitelist(self):
self.assertEqual(len(w), 0)
self.assertNotIn("google.com", result)
self.assertNotIn("example.com", result)
+
+ def test_base_tag_removed_with_page_structure(self):
+ # Test that tags are removed when page_structure=True (default)
+ # This prevents URL hijacking attacks where redirects all relative URLs
+
+ test_cases = [
+ # in proper location (inside )
+ 'link',
+ # outside
+ '',
+ # Multiple tags
+ '
',
+ # with target attribute
+ 'content
',
+ # at various positions
+ 'test',
+ ]
+
+ for html in test_cases:
+ with self.subTest(html=html):
+ cleaned = clean_html(html)
+ # Verify tag is completely removed
+ self.assertNotIn('base', cleaned.lower())
+ self.assertNotIn('evil.com', cleaned)
+ self.assertNotIn('evil2.com', cleaned)
+
+ def test_base_tag_kept_when_page_structure_false(self):
+ # When page_structure=False and head is not removed, should be kept
+ cleaner = Cleaner(page_structure=False)
+ html = 'test'
+ cleaned = cleaner.clean_html(html)
+ self.assertIn('', cleaned)
+
+ def test_base_tag_removed_when_head_in_remove_tags(self):
+ # Even with page_structure=False, should be removed if head is manually removed
+ cleaner = Cleaner(page_structure=False, remove_tags=['head'])
+ html = 'test'
+ cleaned = cleaner.clean_html(html)
+ self.assertNotIn('base', cleaned.lower())
+ self.assertNotIn('evil.com', cleaned)
+
+ def test_base_tag_removed_when_head_in_kill_tags(self):
+ # Even with page_structure=False, should be removed if head is in kill_tags
+ cleaner = Cleaner(page_structure=False, kill_tags=['head'])
+ html = 'test'
+ cleaned = cleaner.clean_html(html)
+ self.assertNotIn('base', cleaned.lower())
+ self.assertNotIn('evil.com', cleaned)
+
+ def test_unicode_escape_in_style(self):
+ # Test that CSS Unicode escapes are properly decoded before security checks
+ # This prevents attackers from bypassing filters using escape sequences
+ # CSS escape syntax: \HHHHHH where H is a hex digit (1-6 digits)
+
+ # Test inline style attributes (requires safe_attrs_only=False)
+ cleaner = Cleaner(safe_attrs_only=False)
+ inline_style_cases = [
+ # \6a\61\76\61\73\63\72\69\70\74 = "javascript"
+ ('test
', 'test
'),
+ # \69 = 'i', so \69mport = "import"
+ ('test
', 'test
'),
+ # \69 with space after = 'i', space consumed as part of escape
+ ('test
', 'test
'),
+ # \65\78\70\72\65\73\73\69\6f\6e = "expression"
+ ('test
', 'test
'),
+ ]
+
+ for html, expected in inline_style_cases:
+ with self.subTest(html=html):
+ cleaned = cleaner.clean_html(html)
+ self.assertEqual(expected, cleaned)
+
+ # Test ',
+ # Unicode-escaped "javascript:" without url()
+ '',
+ # Unicode-escaped "expression"
+ '',
+ # Unicode-escaped @import with 'i'
+ '',
+ # Unicode-escaped "data:" scheme
+ '',
+ # Space after escape is consumed: \69 mport = "import"
+ '',
+ # 6-digit escape: \000069 = 'i'
+ '',
+ # 6-digit escape with space
+ '',
+ ]
+
+ for html in style_tag_cases:
+ with self.subTest(html=html):
+ cleaned = clean_html(html)
+ self.assertEqual('', cleaned)
+
+ def test_unicode_escape_mixed_with_comments(self):
+ # Unicode escapes mixed with CSS comments should still be caught
+ test_cases = [
+ # \69 = 'i' with comment before
+ '',
+ # \69 = 'i' with comment after
+ '',
+ # Multiple escapes with comments
+ '',
+ ]
+
+ for html in test_cases:
+ with self.subTest(html=html):
+ cleaned = clean_html(html)
+ self.assertEqual('', cleaned)
+
+ def test_unicode_escape_case_insensitive(self):
+ # CSS hex escapes should work with both uppercase and lowercase hex digits
+ # \69 = 'i', \6D = 'm', etc.
+ test_cases = [
+ # @import with uppercase hex digits: \69\6D\70\6F\72\74
+ '',
+ # @import with some uppercase
+ '',
+ ]
+
+ for html in test_cases:
+ with self.subTest(html=html):
+ cleaned = clean_html(html)
+ self.assertEqual('', cleaned)
+
+ def test_unicode_escape_various_schemes(self):
+ # Test Unicode escapes for various malicious schemes
+ test_cases = [
+ # \76\62\73\63\72\69\70\74 = "vbscript"
+ '',
+ # \6a\73\63\72\69\70\74 = "jscript"
+ '',
+ # \6c\69\76\65\73\63\72\69\70\74 = "livescript"
+ '',
+ # \6d\6f\63\68\61 = "mocha"
+ '',
+ ]
+
+ for html in test_cases:
+ with self.subTest(html=html):
+ cleaned = clean_html(html)
+ self.assertEqual('', cleaned)
+
+ def test_unicode_escape_with_whitespace_variations(self):
+ # Test different whitespace characters after Unicode escapes
+ cleaner = Cleaner(safe_attrs_only=False)
+ test_cases = [
+ # Tab after escape
+ ('test
', 'test
'),
+ # Newline after escape (note: actual newline, not \n)
+ ('test
', 'test
'),
+ # Form feed after escape
+ ('test
', 'test
'),
+ ]
+
+ for html, expected in test_cases:
+ with self.subTest(html=html):
+ cleaned = cleaner.clean_html(html)
+ self.assertEqual(expected, cleaned)
+
+ def test_backslash_removal_after_unicode_decode(self):
+ # After decoding Unicode escapes, remaining backslashes are removed
+ # This ensures double-obfuscation (unicode + backslashes) is caught
+ test_cases = [
+ # Step 1: \69 → 'i', Step 2: remove \, Result: @import
+ '',
+ # Multiple unicode escapes with backslashes mixed in
+ '',
+ ]
+
+ for html in test_cases:
+ with self.subTest(html=html):
+ cleaned = clean_html(html)
+ self.assertEqual('', cleaned)
+
+ def test_backslash_obfuscation_without_unicode(self):
+ # Test that patterns using ONLY backslash obfuscation (no unicode) are caught
+ # Step 1: No unicode escapes, Step 2: remove \, Result: malicious pattern
+ test_cases = [
+ # @\i\m\p\o\r\t → @import (caught by '@import' check)
+ '',
+ # Can also test combinations that create javascript schemes
+ '',
+ ]
+
+ for html in test_cases:
+ with self.subTest(html=html):
+ cleaned = clean_html(html)
+ self.assertEqual('', cleaned)