diff --git a/xonsh/ast.py b/xonsh/ast.py index 98931ff51..ac147ca90 100644 --- a/xonsh/ast.py +++ b/xonsh/ast.py @@ -103,7 +103,6 @@ class CtxAwareTransformer(NodeTransformer): maxcol = None# if self.mode == 'eval' else node.col_offset spline = subproc_toks(line, mincol=mincol, maxcol=maxcol, returnline=False, lexer=self.parser.lexer) - print('spline',spline) try: newnode = self.parser.parse(spline, mode=self.mode) newnode = newnode.body diff --git a/xonsh/lexer.py b/xonsh/lexer.py index ad5d8e89f..8d8c5ed8a 100644 --- a/xonsh/lexer.py +++ b/xonsh/lexer.py @@ -315,16 +315,13 @@ class Lexer(object): def input(self, s): """Calls the lexer on the string s.""" - print("input\n", repr(s)) s = re.sub(r'#.*?\n', '', s) - print("mod\n",repr(s)) self.token_stream = preprocess_tokens(tok(s)) def token(self): """Retrieves the next token.""" try: self.last = next(self.token_stream) - print(self.last) return self.last except StopIteration: return None diff --git a/xonsh/tools.py b/xonsh/tools.py index 4b18c09d7..1c35e8e02 100644 --- a/xonsh/tools.py +++ b/xonsh/tools.py @@ -48,7 +48,6 @@ def subproc_toks(line, mincol=-1, maxcol=None, lexer=None, returnline=False): for tok in lexer: pos = tok.lexpos if pos >= maxcol: - print('too big',pos,maxcol) break if len(toks) > 0 and toks[-1].type == 'SEMI': toks.clear() @@ -72,7 +71,6 @@ def subproc_toks(line, mincol=-1, maxcol=None, lexer=None, returnline=False): if len(toks) == 0: return # handle comment lines beg, end = toks[0].lexpos, (toks[-1].lexpos + end_offset) - print(repr(line), toks, beg, end) rtn = '$[' + line[beg:end] + ']' if returnline: rtn = line[:beg] + rtn + line[end:]