Merge pull request #2930 from xonsh/andash

More and/or subprocess token fixes.
This commit is contained in:
Gil Forsyth 2018-11-08 15:01:19 -05:00 committed by GitHub
commit d53fc4f40a
Failed to generate hash of commit
5 changed files with 76 additions and 9 deletions

28
news/andash.rst Normal file
View file

@ -0,0 +1,28 @@
**Added:**
* <news item>
**Changed:**
* <news item>
**Deprecated:**
* <news item>
**Removed:**
* <news item>
**Fixed:**
* Fixed issue with ``and`` & ``or`` being incoreectly tokenized in implicit
subprocesses. Auto-wrapping of certain subprocesses will now correctly work.
For example::
$ echo x-and-y
x-and-y
**Security:**
* <news item>

View file

@ -165,7 +165,10 @@ def test_atdollar_expression():
def test_and():
assert check_token("and", ["AND", "and", 0])
# no preceding whitespace or other tokens, so this
# resolves to NAME, since it doesn't make sense for
# Python code to start with "and"
assert check_token("and", ["NAME", "and", 0])
def test_ampersand():

View file

@ -374,6 +374,29 @@ def test_subproc_toks_pyeval_nested():
assert exp == obs
@pytest.mark.parametrize('phrase', [
'xandy',
'xory',
'xand',
'andy',
'xor',
'ory',
'x-and',
'x-or',
'and-y',
'or-y',
'x-and-y',
'x-or-y',
'in/and/path',
'in/or/path',
])
def test_subproc_toks_and_or(phrase):
s = "echo " + phrase
exp = "![{0}]".format(s)
obs = subproc_toks(s, lexer=LEXER, returnline=True)
assert exp == obs
def test_subproc_toks_pyeval_nested_parens():
s = "echo @(min(1, 42))"
inp = "({0})".format(s)

View file

@ -4,6 +4,7 @@
Written using a hybrid of ``tokenize`` and PLY.
"""
import io
import re
# 'keyword' interferes with ast.keyword
import keyword as kwmod
@ -116,21 +117,33 @@ def token_map():
return tm
NEED_WHITESPACE = frozenset(["and", "or"])
@lazyobject
def RE_NEED_WHITESPACE():
pattern = r"\s?(" + "|".join(NEED_WHITESPACE) + r")(\s|[\\]$)"
return re.compile(pattern)
def handle_name(state, token):
"""Function for handling name tokens"""
typ = "NAME"
state["last"] = token
needs_whitespace = token.string in NEED_WHITESPACE
has_whitespace = needs_whitespace and RE_NEED_WHITESPACE.match(
token.line[max(0, token.start[1] - 1) :]
)
if state["pymode"][-1][0]:
if token.string in kwmod.kwlist:
if needs_whitespace and not has_whitespace:
pass
elif token.string in kwmod.kwlist:
typ = token.string.upper()
state["last"] = token
yield _new_token(typ, token.string, token.start)
else:
prev = state["last"]
state["last"] = token
has_whitespace = prev.end != token.start
if token.string == "and" and has_whitespace:
if has_whitespace and token.string == "and":
yield _new_token("AND", token.string, token.start)
elif token.string == "or" and has_whitespace:
elif has_whitespace and token.string == "or":
yield _new_token("OR", token.string, token.start)
else:
yield _new_token("NAME", token.string, token.start)