mirror of
https://github.com/xonsh/xonsh.git
synced 2025-03-04 16:34:47 +01:00
Moved tokenize_ansi
into ptk_shell.shell
, added unit tests for ANSI tokenization.
This commit is contained in:
parent
bd9b3d6a9f
commit
a4b61f5fc1
3 changed files with 71 additions and 36 deletions
|
@ -8,6 +8,7 @@ from xonsh.platform import minimum_required_ptk_version
|
|||
|
||||
# verify error if ptk not installed or below min
|
||||
|
||||
from xonsh.ptk_shell.shell import tokenize_ansi
|
||||
from xonsh.shell import Shell
|
||||
|
||||
|
||||
|
@ -26,7 +27,15 @@ from xonsh.shell import Shell
|
|||
((4, 0, 0), "prompt_toolkit", "prompt_toolkit", None, False),
|
||||
],
|
||||
)
|
||||
def test_prompt_toolkit_version_checks(ptk_ver, ini_shell_type, exp_shell_type, warn_snip, using_vended_ptk, monkeypatch, xonsh_builtins):
|
||||
def test_prompt_toolkit_version_checks(
|
||||
ptk_ver,
|
||||
ini_shell_type,
|
||||
exp_shell_type,
|
||||
warn_snip,
|
||||
using_vended_ptk,
|
||||
monkeypatch,
|
||||
xonsh_builtins,
|
||||
):
|
||||
|
||||
mocked_warn = ""
|
||||
|
||||
|
@ -43,9 +52,15 @@ def test_prompt_toolkit_version_checks(ptk_ver, ini_shell_type, exp_shell_type,
|
|||
nonlocal ptk_ver
|
||||
return ptk_ver is not None
|
||||
|
||||
monkeypatch.setattr("xonsh.shell.warnings.warn", mock_warning) # hardwon: patch the caller!
|
||||
monkeypatch.setattr("xonsh.shell.ptk_above_min_supported", mock_ptk_above_min_supported) # have to patch both callers
|
||||
monkeypatch.setattr("xonsh.platform.ptk_above_min_supported", mock_ptk_above_min_supported)
|
||||
monkeypatch.setattr(
|
||||
"xonsh.shell.warnings.warn", mock_warning
|
||||
) # hardwon: patch the caller!
|
||||
monkeypatch.setattr(
|
||||
"xonsh.shell.ptk_above_min_supported", mock_ptk_above_min_supported
|
||||
) # have to patch both callers
|
||||
monkeypatch.setattr(
|
||||
"xonsh.platform.ptk_above_min_supported", mock_ptk_above_min_supported
|
||||
)
|
||||
monkeypatch.setattr("xonsh.platform.has_prompt_toolkit", mock_has_prompt_toolkit)
|
||||
|
||||
old_syspath = sys.path.copy()
|
||||
|
@ -60,7 +75,6 @@ def test_prompt_toolkit_version_checks(ptk_ver, ini_shell_type, exp_shell_type,
|
|||
|
||||
sys.path = old_syspath
|
||||
|
||||
|
||||
assert act_shell_type == exp_shell_type
|
||||
|
||||
if warn_snip:
|
||||
|
@ -68,4 +82,27 @@ def test_prompt_toolkit_version_checks(ptk_ver, ini_shell_type, exp_shell_type,
|
|||
|
||||
pass
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"prompt_tokens, ansi_string_parts",
|
||||
[
|
||||
# no ansi, single token
|
||||
([("fake style", "no ansi here")], ["no ansi here"]),
|
||||
# no ansi, multiple tokens
|
||||
([("s1", "no"), ("s2", "ansi here")], ["no", "ansi here"]),
|
||||
# ansi only, multiple
|
||||
([("s1", "\x1b[33mansi \x1b[1monly")], ["", "ansi ", "only"]),
|
||||
# mixed
|
||||
(
|
||||
[("s1", "no ansi"), ("s2", "mixed \x1b[33mansi")],
|
||||
["no ansi", "mixed ", "ansi"],
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_tokenize_ansi(prompt_tokens, ansi_string_parts):
|
||||
ansi_tokens = tokenize_ansi(prompt_tokens)
|
||||
for token, text in zip(ansi_tokens, ansi_string_parts):
|
||||
assert token[1] == text
|
||||
|
||||
|
||||
# someday: initialize PromptToolkitShell and have it actually do something.
|
||||
|
|
|
@ -16,8 +16,8 @@ from xonsh.pygments_cache import get_all_styles
|
|||
from xonsh.ptk_shell.history import PromptToolkitHistory, _cust_history_matches
|
||||
from xonsh.ptk_shell.completer import PromptToolkitCompleter
|
||||
from xonsh.ptk_shell.key_bindings import load_xonsh_bindings
|
||||
from xonsh.ptk_shell.tokenize_ansi import tokenize_ansi
|
||||
|
||||
from prompt_toolkit import ANSI
|
||||
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
|
||||
from prompt_toolkit.lexers import PygmentsLexer
|
||||
from prompt_toolkit.enums import EditingMode
|
||||
|
@ -26,7 +26,7 @@ from prompt_toolkit.history import ThreadedHistory
|
|||
from prompt_toolkit.shortcuts import print_formatted_text as ptk_print
|
||||
from prompt_toolkit.shortcuts import CompleteStyle
|
||||
from prompt_toolkit.shortcuts.prompt import PromptSession
|
||||
from prompt_toolkit.formatted_text import PygmentsTokens
|
||||
from prompt_toolkit.formatted_text import PygmentsTokens, to_formatted_text
|
||||
from prompt_toolkit.styles import merge_styles, Style
|
||||
from prompt_toolkit.styles.pygments import (
|
||||
style_from_pygments_cls,
|
||||
|
@ -47,6 +47,32 @@ Fired after prompt toolkit has been initialized
|
|||
)
|
||||
|
||||
|
||||
def tokenize_ansi(tokens):
|
||||
"""Checks a list of (token, str) tuples for ANSI escape sequences and
|
||||
extends the token list with the new formatted entries.
|
||||
During processing tokens are converted to ``prompt_toolkit.FormattedText``.
|
||||
Returns a list of similar (token, str) tuples.
|
||||
"""
|
||||
formatted_tokens = to_formatted_text(tokens)
|
||||
ansi_tokens = []
|
||||
for style, text in formatted_tokens:
|
||||
if "\x1b" in text:
|
||||
formatted_ansi = to_formatted_text(ANSI(text))
|
||||
ansi_text = ""
|
||||
prev_style = ""
|
||||
for ansi_style, ansi_text_part in formatted_ansi:
|
||||
if prev_style == ansi_style:
|
||||
ansi_text += ansi_text_part
|
||||
else:
|
||||
ansi_tokens.append((prev_style or style, ansi_text))
|
||||
prev_style = ansi_style
|
||||
ansi_text = ansi_text_part
|
||||
ansi_tokens.append((prev_style or style, ansi_text))
|
||||
else:
|
||||
ansi_tokens.append((style, text))
|
||||
return ansi_tokens
|
||||
|
||||
|
||||
class PromptToolkitShell(BaseShell):
|
||||
"""The xonsh shell for prompt_toolkit v2 and later."""
|
||||
|
||||
|
|
|
@ -1,28 +0,0 @@
|
|||
from prompt_toolkit import ANSI
|
||||
from prompt_toolkit.formatted_text import to_formatted_text
|
||||
|
||||
|
||||
def tokenize_ansi(tokens):
|
||||
"""Checks a list of (token, str) tuples for ANSI escape sequences and
|
||||
extends the token list with the new formatted entries.
|
||||
During processing tokens are converted to ``prompt_toolkit.FormattedText``.
|
||||
Returns a list of similar (token, str) tuples.
|
||||
"""
|
||||
formatted_tokens = to_formatted_text(tokens)
|
||||
ansi_tokens = []
|
||||
for style, text in formatted_tokens:
|
||||
if "\x1b" in text:
|
||||
formatted_ansi = to_formatted_text(ANSI(text))
|
||||
ansi_text = ""
|
||||
prev_style = ""
|
||||
for ansi_style, ansi_text_part in formatted_ansi:
|
||||
if prev_style == ansi_style:
|
||||
ansi_text += ansi_text_part
|
||||
else:
|
||||
ansi_tokens.append((prev_style or style, ansi_text))
|
||||
prev_style = ansi_style
|
||||
ansi_text = ansi_text_part
|
||||
ansi_tokens.append((prev_style or style, ansi_text))
|
||||
else:
|
||||
ansi_tokens.append((style, text))
|
||||
return ansi_tokens
|
Loading…
Add table
Reference in a new issue