refactoring: moving tokenize to parsers (#5596)

* moving tokenize to parsers

* moving tokenize to parsers

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

---------

Co-authored-by: a <1@1.1>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
This commit is contained in:
Andy Kipp 2024-07-12 11:11:52 +02:00 committed by GitHub
parent d08248f6aa
commit bdfa67c637
Failed to generate hash of commit
6 changed files with 6 additions and 6 deletions

View file

@ -226,7 +226,7 @@ convention = "numpy"
"xonsh/history.py" = ["F821"] "xonsh/history.py" = ["F821"]
"xonsh/parsers/lexer.py" = ["E741"] "xonsh/parsers/lexer.py" = ["E741"]
"xonsh/parsers/completion_context.py" = ["B018"] "xonsh/parsers/completion_context.py" = ["B018"]
"xonsh/lib/tokenize.py" = [ "xonsh/parsers/tokenize.py" = [
"F821", "F821",
"F841", "F841",
"B904" # Within an `except` clause, raise exceptions with `raise ... from err` "B904" # Within an `except` clause, raise exceptions with `raise ... from err`

View file

@ -18,7 +18,7 @@ import types
from xonsh.lib.lazyasd import LazyObject from xonsh.lib.lazyasd import LazyObject
from xonsh.lib.lazyimps import pyghooks, pygments from xonsh.lib.lazyimps import pyghooks, pygments
from xonsh.lib.openpy import read_py_file from xonsh.lib.openpy import read_py_file
from xonsh.lib.tokenize import detect_encoding from xonsh.parsers.tokenize import detect_encoding
from xonsh.platform import HAS_PYGMENTS from xonsh.platform import HAS_PYGMENTS
from xonsh.style_tools import partial_color_tokenize from xonsh.style_tools import partial_color_tokenize
from xonsh.tools import cast_unicode, format_color, indent, print_color, safe_hasattr from xonsh.tools import cast_unicode, format_color, indent, print_color, safe_hasattr

View file

@ -16,7 +16,7 @@ import io
import re import re
from xonsh.lib.lazyasd import LazyObject from xonsh.lib.lazyasd import LazyObject
from xonsh.lib.tokenize import detect_encoding, tokopen from xonsh.parsers.tokenize import detect_encoding, tokopen
cookie_comment_re = LazyObject( cookie_comment_re = LazyObject(
lambda: re.compile(r"^\s*#.*coding[:=]\s*([-\w.]+)", re.UNICODE), lambda: re.compile(r"^\s*#.*coding[:=]\s*([-\w.]+)", re.UNICODE),

View file

@ -11,13 +11,13 @@ from collections.abc import Iterable, Mapping, Sequence
from threading import Thread from threading import Thread
from xonsh.lib.lazyasd import LazyObject from xonsh.lib.lazyasd import LazyObject
from xonsh.lib.tokenize import SearchPath, StringPrefix
from xonsh.parsers import ast from xonsh.parsers import ast
from xonsh.parsers.ast import has_elts, load_attribute_chain, xonsh_call from xonsh.parsers.ast import has_elts, load_attribute_chain, xonsh_call
from xonsh.parsers.context_check import check_contexts from xonsh.parsers.context_check import check_contexts
from xonsh.parsers.fstring_adaptor import FStringAdaptor from xonsh.parsers.fstring_adaptor import FStringAdaptor
from xonsh.parsers.lexer import Lexer, LexToken from xonsh.parsers.lexer import Lexer, LexToken
from xonsh.parsers.ply import yacc from xonsh.parsers.ply import yacc
from xonsh.parsers.tokenize import SearchPath, StringPrefix
from xonsh.platform import PYTHON_VERSION_INFO from xonsh.platform import PYTHON_VERSION_INFO
RE_SEARCHPATH = LazyObject(lambda: re.compile(SearchPath), globals(), "RE_SEARCHPATH") RE_SEARCHPATH = LazyObject(lambda: re.compile(SearchPath), globals(), "RE_SEARCHPATH")

View file

@ -11,7 +11,8 @@ import re
import typing as tp import typing as tp
from xonsh.lib.lazyasd import lazyobject from xonsh.lib.lazyasd import lazyobject
from xonsh.lib.tokenize import ( from xonsh.parsers.ply.lex import LexToken
from xonsh.parsers.tokenize import (
CASE, CASE,
COMMENT, COMMENT,
DEDENT, DEDENT,
@ -36,7 +37,6 @@ from xonsh.lib.tokenize import (
TokenError, TokenError,
tokenize, tokenize,
) )
from xonsh.parsers.ply.lex import LexToken
from xonsh.platform import PYTHON_VERSION_INFO from xonsh.platform import PYTHON_VERSION_INFO