fixed up dashing

This commit is contained in:
Anthony Scopatz 2018-11-08 13:16:44 -05:00
parent 84a5b05a36
commit 351fa27d7c
3 changed files with 43 additions and 14 deletions

28
news/andash.rst Normal file
View file

@ -0,0 +1,28 @@
**Added:**
* <news item>
**Changed:**
* <news item>
**Deprecated:**
* <news item>
**Removed:**
* <news item>
**Fixed:**
* Fixed issue with ``and`` & ``or`` being incoreectly tokenized in implicit
subprocesses. Auto-wrapping of certain subprocesses will now correctly work.
For example::
$ echo x-and-y
x-and-y
**Security:**
* <news item>

View file

@ -2,4 +2,4 @@
aliases['echo'] = lambda args, stdin=None: print(' '.join(args)) aliases['echo'] = lambda args, stdin=None: print(' '.join(args))
$WAKKA = "jawaka" $WAKKA = "jawaka"
x = $(echo "hello mom" $WAKKA) x = $(echo "hello mom" $WAKKA)

View file

@ -4,6 +4,7 @@
Written using a hybrid of ``tokenize`` and PLY. Written using a hybrid of ``tokenize`` and PLY.
""" """
import io import io
import re
# 'keyword' interferes with ast.keyword # 'keyword' interferes with ast.keyword
import keyword as kwmod import keyword as kwmod
@ -119,34 +120,34 @@ def token_map():
NEED_WHITESPACE = frozenset(["and", "or"]) NEED_WHITESPACE = frozenset(["and", "or"])
@lazyobject
def RE_NEED_WHITESPACE():
pattern = "\s?(" + "|".join(NEED_WHITESPACE) + r")(\s|[\\]$)"
return re.compile(pattern)
def handle_name(state, token): def handle_name(state, token):
"""Function for handling name tokens""" """Function for handling name tokens"""
typ = "NAME" typ = "NAME"
prev = state["last"] prev = state["last"]
state["last"] = token state["last"] = token
next_tok = next(state["stream"]) needs_whitespace = token.string in NEED_WHITESPACE
has_whitespace = (prev is not None and has_whitespace = needs_whitespace and RE_NEED_WHITESPACE.match(
prev.end != token.start token.line[max(0, token.start[1] - 1) :]
and token.end != next_tok.start) )
#)
print(state)
if state["pymode"][-1][0]: if state["pymode"][-1][0]:
if not has_whitespace and token.string in NEED_WHITESPACE: if needs_whitespace and not has_whitespace:
pass pass
elif token.string in kwmod.kwlist: elif token.string in kwmod.kwlist:
typ = token.string.upper() typ = token.string.upper()
print("transformed to ", typ)
yield _new_token(typ, token.string, token.start) yield _new_token(typ, token.string, token.start)
else: else:
if token.string == "and" and has_whitespace: if has_whitespace and token.string == "and":
yield _new_token("AND", token.string, token.start) yield _new_token("AND", token.string, token.start)
elif token.string == "or" and has_whitespace: elif has_whitespace and token.string == "or":
yield _new_token("OR", token.string, token.start) yield _new_token("OR", token.string, token.start)
else: else:
yield _new_token("NAME", token.string, token.start) yield _new_token("NAME", token.string, token.start)
print('\n\n')
#yield from handle_token(state, next_tok)
yield _new_token(next_tok.type, next_tok.string, next_tok.start)
def _end_delimiter(state, token): def _end_delimiter(state, token):