mirror of
https://github.com/xonsh/xonsh.git
synced 2025-03-05 17:00:58 +01:00
Merge pull request #2930 from xonsh/andash
More and/or subprocess token fixes.
This commit is contained in:
commit
d53fc4f40a
5 changed files with 76 additions and 9 deletions
28
news/andash.rst
Normal file
28
news/andash.rst
Normal file
|
@ -0,0 +1,28 @@
|
||||||
|
**Added:**
|
||||||
|
|
||||||
|
* <news item>
|
||||||
|
|
||||||
|
**Changed:**
|
||||||
|
|
||||||
|
* <news item>
|
||||||
|
|
||||||
|
**Deprecated:**
|
||||||
|
|
||||||
|
* <news item>
|
||||||
|
|
||||||
|
**Removed:**
|
||||||
|
|
||||||
|
* <news item>
|
||||||
|
|
||||||
|
**Fixed:**
|
||||||
|
|
||||||
|
* Fixed issue with ``and`` & ``or`` being incoreectly tokenized in implicit
|
||||||
|
subprocesses. Auto-wrapping of certain subprocesses will now correctly work.
|
||||||
|
For example::
|
||||||
|
|
||||||
|
$ echo x-and-y
|
||||||
|
x-and-y
|
||||||
|
|
||||||
|
**Security:**
|
||||||
|
|
||||||
|
* <news item>
|
|
@ -2,4 +2,4 @@
|
||||||
aliases['echo'] = lambda args, stdin=None: print(' '.join(args))
|
aliases['echo'] = lambda args, stdin=None: print(' '.join(args))
|
||||||
|
|
||||||
$WAKKA = "jawaka"
|
$WAKKA = "jawaka"
|
||||||
x = $(echo "hello mom" $WAKKA)
|
x = $(echo "hello mom" $WAKKA)
|
||||||
|
|
|
@ -165,7 +165,10 @@ def test_atdollar_expression():
|
||||||
|
|
||||||
|
|
||||||
def test_and():
|
def test_and():
|
||||||
assert check_token("and", ["AND", "and", 0])
|
# no preceding whitespace or other tokens, so this
|
||||||
|
# resolves to NAME, since it doesn't make sense for
|
||||||
|
# Python code to start with "and"
|
||||||
|
assert check_token("and", ["NAME", "and", 0])
|
||||||
|
|
||||||
|
|
||||||
def test_ampersand():
|
def test_ampersand():
|
||||||
|
|
|
@ -374,6 +374,29 @@ def test_subproc_toks_pyeval_nested():
|
||||||
assert exp == obs
|
assert exp == obs
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('phrase', [
|
||||||
|
'xandy',
|
||||||
|
'xory',
|
||||||
|
'xand',
|
||||||
|
'andy',
|
||||||
|
'xor',
|
||||||
|
'ory',
|
||||||
|
'x-and',
|
||||||
|
'x-or',
|
||||||
|
'and-y',
|
||||||
|
'or-y',
|
||||||
|
'x-and-y',
|
||||||
|
'x-or-y',
|
||||||
|
'in/and/path',
|
||||||
|
'in/or/path',
|
||||||
|
])
|
||||||
|
def test_subproc_toks_and_or(phrase):
|
||||||
|
s = "echo " + phrase
|
||||||
|
exp = "![{0}]".format(s)
|
||||||
|
obs = subproc_toks(s, lexer=LEXER, returnline=True)
|
||||||
|
assert exp == obs
|
||||||
|
|
||||||
|
|
||||||
def test_subproc_toks_pyeval_nested_parens():
|
def test_subproc_toks_pyeval_nested_parens():
|
||||||
s = "echo @(min(1, 42))"
|
s = "echo @(min(1, 42))"
|
||||||
inp = "({0})".format(s)
|
inp = "({0})".format(s)
|
||||||
|
|
|
@ -4,6 +4,7 @@
|
||||||
Written using a hybrid of ``tokenize`` and PLY.
|
Written using a hybrid of ``tokenize`` and PLY.
|
||||||
"""
|
"""
|
||||||
import io
|
import io
|
||||||
|
import re
|
||||||
|
|
||||||
# 'keyword' interferes with ast.keyword
|
# 'keyword' interferes with ast.keyword
|
||||||
import keyword as kwmod
|
import keyword as kwmod
|
||||||
|
@ -116,21 +117,33 @@ def token_map():
|
||||||
return tm
|
return tm
|
||||||
|
|
||||||
|
|
||||||
|
NEED_WHITESPACE = frozenset(["and", "or"])
|
||||||
|
|
||||||
|
|
||||||
|
@lazyobject
|
||||||
|
def RE_NEED_WHITESPACE():
|
||||||
|
pattern = r"\s?(" + "|".join(NEED_WHITESPACE) + r")(\s|[\\]$)"
|
||||||
|
return re.compile(pattern)
|
||||||
|
|
||||||
|
|
||||||
def handle_name(state, token):
|
def handle_name(state, token):
|
||||||
"""Function for handling name tokens"""
|
"""Function for handling name tokens"""
|
||||||
typ = "NAME"
|
typ = "NAME"
|
||||||
|
state["last"] = token
|
||||||
|
needs_whitespace = token.string in NEED_WHITESPACE
|
||||||
|
has_whitespace = needs_whitespace and RE_NEED_WHITESPACE.match(
|
||||||
|
token.line[max(0, token.start[1] - 1) :]
|
||||||
|
)
|
||||||
if state["pymode"][-1][0]:
|
if state["pymode"][-1][0]:
|
||||||
if token.string in kwmod.kwlist:
|
if needs_whitespace and not has_whitespace:
|
||||||
|
pass
|
||||||
|
elif token.string in kwmod.kwlist:
|
||||||
typ = token.string.upper()
|
typ = token.string.upper()
|
||||||
state["last"] = token
|
|
||||||
yield _new_token(typ, token.string, token.start)
|
yield _new_token(typ, token.string, token.start)
|
||||||
else:
|
else:
|
||||||
prev = state["last"]
|
if has_whitespace and token.string == "and":
|
||||||
state["last"] = token
|
|
||||||
has_whitespace = prev.end != token.start
|
|
||||||
if token.string == "and" and has_whitespace:
|
|
||||||
yield _new_token("AND", token.string, token.start)
|
yield _new_token("AND", token.string, token.start)
|
||||||
elif token.string == "or" and has_whitespace:
|
elif has_whitespace and token.string == "or":
|
||||||
yield _new_token("OR", token.string, token.start)
|
yield _new_token("OR", token.string, token.start)
|
||||||
else:
|
else:
|
||||||
yield _new_token("NAME", token.string, token.start)
|
yield _new_token("NAME", token.string, token.start)
|
||||||
|
|
Loading…
Add table
Reference in a new issue