From 0430524ee65a5635101fd3ae7dad811c89a13dbc Mon Sep 17 00:00:00 2001 From: Anthony Scopatz Date: Sun, 14 Aug 2016 18:34:48 -0400 Subject: [PATCH] raw attempt --- xonsh/lexer.py | 3 ++- xonsh/parsers/base.py | 19 +++++++++++++++++-- xonsh/tokenize.py | 9 +++++++-- 3 files changed, 26 insertions(+), 5 deletions(-) diff --git a/xonsh/lexer.py b/xonsh/lexer.py index 6c491876e..72948d940 100644 --- a/xonsh/lexer.py +++ b/xonsh/lexer.py @@ -15,7 +15,7 @@ from xonsh.lazyasd import lazyobject from xonsh.platform import PYTHON_VERSION_INFO from xonsh.tokenize import (OP, IOREDIRECT, STRING, DOLLARNAME, NUMBER, SEARCHPATH, NEWLINE, INDENT, DEDENT, NL, COMMENT, ENCODING, - ENDMARKER, NAME, ERRORTOKEN, tokenize, TokenError) + ENDMARKER, NAME, ERRORTOKEN, tokenize, TokenError, NOCOMMA) @lazyobject @@ -62,6 +62,7 @@ def token_map(): from xonsh.tokenize import ASYNC, AWAIT tm[ASYNC] = 'ASYNC' tm[AWAIT] = 'AWAIT' + tm[NOCOMMA] = 'NOCOMMA' return tm diff --git a/xonsh/parsers/base.py b/xonsh/parsers/base.py index c6f4c60f1..b8a3375ea 100644 --- a/xonsh/parsers/base.py +++ b/xonsh/parsers/base.py @@ -233,7 +233,7 @@ class BaseParser(object): 'op_factor_list', 'trailer_list', 'testlist_comp', 'yield_expr_or_testlist_comp', 'dictorsetmaker', 'comma_subscript_list', 'test', 'sliceop', 'comp_iter', - 'yield_arg', 'test_comma_list'] + 'yield_arg', 'test_comma_list', 'comma_nocomma_list', 'macroarglist'] for rule in opt_rules: self._opt_rule(rule) @@ -247,7 +247,7 @@ class BaseParser(object): 'pm_term', 'op_factor', 'trailer', 'comma_subscript', 'comma_expr_or_star_expr', 'comma_test', 'comma_argument', 'comma_item', 'attr_period_name', 'test_comma', - 'equals_yield_expr_or_testlist'] + 'equals_yield_expr_or_testlist', 'comma_nocomma'] for rule in list_rules: self._list_rule(rule) @@ -1831,6 +1831,21 @@ class BaseParser(object): """ p[0] = [p[1]] + def p_comma_nocomma(self, p): + """comma_nocomma : COMMA NOCOMMA""" + p[0] = [p[2]] + + def p_macroarglist(self, p): + """macroarglist : NOCOMMA comma_nocomma_list_opt comma_opt""" + p1, p2 = p[1], p[2] + if p2 is None: + elts = [p1] + else: + elts = [p1] + p2 + p0 = ast.Tuple(elts=elts, ctx.load(), lineno=p1.lineno, + col_offset=p1.col_offset) + p[0] = p0 + def p_subscriptlist(self, p): """subscriptlist : subscript comma_subscript_list_opt comma_opt""" p1, p2 = p[1], p[2] diff --git a/xonsh/tokenize.py b/xonsh/tokenize.py index 8ab583c79..e3f50610c 100644 --- a/xonsh/tokenize.py +++ b/xonsh/tokenize.py @@ -51,7 +51,7 @@ import token __all__ = token.__all__ + ["COMMENT", "tokenize", "detect_encoding", "NL", "untokenize", "ENCODING", "TokenInfo", "TokenError", 'SEARCHPATH', 'ATDOLLAR', 'ATEQUAL', - 'DOLLARNAME', 'IOREDIRECT'] + 'DOLLARNAME', 'IOREDIRECT', 'NOCOMMA'] PY35 = PYTHON_VERSION_INFO >= (3, 5, 0) if PY35: ASYNC = token.ASYNC @@ -85,6 +85,9 @@ N_TOKENS += 1 ATEQUAL = N_TOKENS tok_name[N_TOKENS] = 'ATEQUAL' N_TOKENS += 1 +NOCOMMA = N_TOKENS +tok_name[N_TOKENS] = 'NOCOMMA' +N_TOKENS += 1 _xonsh_tokens = { '?': 'QUESTION', '@=': 'ATEQUAL', @@ -241,8 +244,10 @@ Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"!=", r"//=?", r"->", Bracket = '[][(){}]' Special = group(r'\r?\n', r'\.\.\.', r'[:;.,@]') Funny = group(Operator, Bracket, Special) +NoComma = r"('.*'|\".*\"|'''.*'''|\"\"\".*\"\"\"|\(.*\)|\[.*\]|{.*}|[^,]*)*" -PlainToken = group(IORedirect, Number, Funny, String, Name_RE, SearchPath) +PlainToken = group(IORedirect, Number, Funny, String, Name_RE, SearchPath, + NoComma) Token = Ignore + PlainToken # First (or only) line of ' or " string.