raw attempt

This commit is contained in:
Anthony Scopatz 2016-08-14 18:34:48 -04:00
parent acce6ed89a
commit 0430524ee6
3 changed files with 26 additions and 5 deletions

View file

@ -15,7 +15,7 @@ from xonsh.lazyasd import lazyobject
from xonsh.platform import PYTHON_VERSION_INFO from xonsh.platform import PYTHON_VERSION_INFO
from xonsh.tokenize import (OP, IOREDIRECT, STRING, DOLLARNAME, NUMBER, from xonsh.tokenize import (OP, IOREDIRECT, STRING, DOLLARNAME, NUMBER,
SEARCHPATH, NEWLINE, INDENT, DEDENT, NL, COMMENT, ENCODING, SEARCHPATH, NEWLINE, INDENT, DEDENT, NL, COMMENT, ENCODING,
ENDMARKER, NAME, ERRORTOKEN, tokenize, TokenError) ENDMARKER, NAME, ERRORTOKEN, tokenize, TokenError, NOCOMMA)
@lazyobject @lazyobject
@ -62,6 +62,7 @@ def token_map():
from xonsh.tokenize import ASYNC, AWAIT from xonsh.tokenize import ASYNC, AWAIT
tm[ASYNC] = 'ASYNC' tm[ASYNC] = 'ASYNC'
tm[AWAIT] = 'AWAIT' tm[AWAIT] = 'AWAIT'
tm[NOCOMMA] = 'NOCOMMA'
return tm return tm

View file

@ -233,7 +233,7 @@ class BaseParser(object):
'op_factor_list', 'trailer_list', 'testlist_comp', 'op_factor_list', 'trailer_list', 'testlist_comp',
'yield_expr_or_testlist_comp', 'dictorsetmaker', 'yield_expr_or_testlist_comp', 'dictorsetmaker',
'comma_subscript_list', 'test', 'sliceop', 'comp_iter', 'comma_subscript_list', 'test', 'sliceop', 'comp_iter',
'yield_arg', 'test_comma_list'] 'yield_arg', 'test_comma_list', 'comma_nocomma_list', 'macroarglist']
for rule in opt_rules: for rule in opt_rules:
self._opt_rule(rule) self._opt_rule(rule)
@ -247,7 +247,7 @@ class BaseParser(object):
'pm_term', 'op_factor', 'trailer', 'comma_subscript', 'pm_term', 'op_factor', 'trailer', 'comma_subscript',
'comma_expr_or_star_expr', 'comma_test', 'comma_argument', 'comma_expr_or_star_expr', 'comma_test', 'comma_argument',
'comma_item', 'attr_period_name', 'test_comma', 'comma_item', 'attr_period_name', 'test_comma',
'equals_yield_expr_or_testlist'] 'equals_yield_expr_or_testlist', 'comma_nocomma']
for rule in list_rules: for rule in list_rules:
self._list_rule(rule) self._list_rule(rule)
@ -1831,6 +1831,21 @@ class BaseParser(object):
""" """
p[0] = [p[1]] p[0] = [p[1]]
def p_comma_nocomma(self, p):
"""comma_nocomma : COMMA NOCOMMA"""
p[0] = [p[2]]
def p_macroarglist(self, p):
"""macroarglist : NOCOMMA comma_nocomma_list_opt comma_opt"""
p1, p2 = p[1], p[2]
if p2 is None:
elts = [p1]
else:
elts = [p1] + p2
p0 = ast.Tuple(elts=elts, ctx.load(), lineno=p1.lineno,
col_offset=p1.col_offset)
p[0] = p0
def p_subscriptlist(self, p): def p_subscriptlist(self, p):
"""subscriptlist : subscript comma_subscript_list_opt comma_opt""" """subscriptlist : subscript comma_subscript_list_opt comma_opt"""
p1, p2 = p[1], p[2] p1, p2 = p[1], p[2]

View file

@ -51,7 +51,7 @@ import token
__all__ = token.__all__ + ["COMMENT", "tokenize", "detect_encoding", __all__ = token.__all__ + ["COMMENT", "tokenize", "detect_encoding",
"NL", "untokenize", "ENCODING", "TokenInfo", "NL", "untokenize", "ENCODING", "TokenInfo",
"TokenError", 'SEARCHPATH', 'ATDOLLAR', 'ATEQUAL', "TokenError", 'SEARCHPATH', 'ATDOLLAR', 'ATEQUAL',
'DOLLARNAME', 'IOREDIRECT'] 'DOLLARNAME', 'IOREDIRECT', 'NOCOMMA']
PY35 = PYTHON_VERSION_INFO >= (3, 5, 0) PY35 = PYTHON_VERSION_INFO >= (3, 5, 0)
if PY35: if PY35:
ASYNC = token.ASYNC ASYNC = token.ASYNC
@ -85,6 +85,9 @@ N_TOKENS += 1
ATEQUAL = N_TOKENS ATEQUAL = N_TOKENS
tok_name[N_TOKENS] = 'ATEQUAL' tok_name[N_TOKENS] = 'ATEQUAL'
N_TOKENS += 1 N_TOKENS += 1
NOCOMMA = N_TOKENS
tok_name[N_TOKENS] = 'NOCOMMA'
N_TOKENS += 1
_xonsh_tokens = { _xonsh_tokens = {
'?': 'QUESTION', '?': 'QUESTION',
'@=': 'ATEQUAL', '@=': 'ATEQUAL',
@ -241,8 +244,10 @@ Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"!=", r"//=?", r"->",
Bracket = '[][(){}]' Bracket = '[][(){}]'
Special = group(r'\r?\n', r'\.\.\.', r'[:;.,@]') Special = group(r'\r?\n', r'\.\.\.', r'[:;.,@]')
Funny = group(Operator, Bracket, Special) Funny = group(Operator, Bracket, Special)
NoComma = r"('.*'|\".*\"|'''.*'''|\"\"\".*\"\"\"|\(.*\)|\[.*\]|{.*}|[^,]*)*"
PlainToken = group(IORedirect, Number, Funny, String, Name_RE, SearchPath) PlainToken = group(IORedirect, Number, Funny, String, Name_RE, SearchPath,
NoComma)
Token = Ignore + PlainToken Token = Ignore + PlainToken
# First (or only) line of ' or " string. # First (or only) line of ' or " string.