mirror of
https://github.com/xonsh/xonsh.git
synced 2025-03-05 17:00:58 +01:00
more formatting
This commit is contained in:
parent
4aeac8fb4e
commit
0e15ec0385
4 changed files with 23 additions and 19 deletions
|
@ -1,6 +1,6 @@
|
|||
"""
|
||||
Tools to open .py files as Unicode, using the encoding specified within the file,
|
||||
as per PEP 263.
|
||||
Tools to open .py files as Unicode, using the encoding specified within the
|
||||
file, as per PEP 263.
|
||||
|
||||
Much of the code is taken from the tokenize module in Python 3.2.
|
||||
|
||||
|
@ -44,20 +44,21 @@ except ImportError:
|
|||
# Copied from Python 3.2 tokenize
|
||||
def detect_encoding(readline):
|
||||
"""
|
||||
The detect_encoding() function is used to detect the encoding that should
|
||||
be used to decode a Python source file. It requires one argment, readline,
|
||||
in the same way as the tokenize() generator.
|
||||
The detect_encoding() function is used to detect the encoding that
|
||||
should be used to decode a Python source file. It requires one
|
||||
argment, readline, in the same way as the tokenize() generator.
|
||||
|
||||
It will call readline a maximum of twice, and return the encoding used
|
||||
(as a string) and a list of any lines (left as bytes) it has read in.
|
||||
|
||||
It detects the encoding from the presence of a utf-8 bom or an encoding
|
||||
cookie as specified in pep-0263. If both a bom and a cookie are present,
|
||||
but disagree, a SyntaxError will be raised. If the encoding cookie is an
|
||||
invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,
|
||||
'utf-8-sig' is returned.
|
||||
cookie as specified in pep-0263. If both a bom and a cookie are
|
||||
present, but disagree, a SyntaxError will be raised. If the encoding
|
||||
cookie is an invalid charset, raise a SyntaxError. Note that if a
|
||||
utf-8 bom is found, 'utf-8-sig' is returned.
|
||||
|
||||
If no encoding is specified, then the default of 'utf-8' will be returned.
|
||||
If no encoding is specified, then the default of 'utf-8' will be
|
||||
returned.
|
||||
"""
|
||||
bom_found = False
|
||||
encoding = None
|
||||
|
|
|
@ -58,7 +58,7 @@ class XonshConsoleLexer(PythonLexer):
|
|||
'root': [(r'^(>>>|\.\.\.) ', Generic.Prompt),
|
||||
(r'\n(>>>|\.\.\.)', Generic.Prompt),
|
||||
(r'\n(?![>.][>.][>.] )([^\n]*)', Generic.Output),
|
||||
(r'\n(?![>.][>.][>.] )(.*?)$', Generic.Output), ] + ROOT_TOKENS,
|
||||
(r'\n(?![>.][>.][>.] )(.*?)$', Generic.Output)] + ROOT_TOKENS,
|
||||
'pymode': PYMODE_TOKENS,
|
||||
'subproc': SUBPROC_TOKENS,
|
||||
}
|
||||
|
|
|
@ -93,8 +93,11 @@ class Shell(Cmd):
|
|||
stdout=stdout)
|
||||
self.execer = Execer()
|
||||
env = builtins.__xonsh_env__
|
||||
self.ctx = ctx if ctx is not None else \
|
||||
xonshrc_context(rcfile=env.get('XONSHRC', None), execer=self.execer)
|
||||
if ctx is not None:
|
||||
self.ctx = ctx
|
||||
else:
|
||||
rc = env.get('XONSHRC', None)
|
||||
self.ctx = xonshrc_context(rcfile=rc, execer=self.execer)
|
||||
self.ctx['__name__'] = '__main__'
|
||||
self.completer = Completer()
|
||||
self.buffer = []
|
||||
|
|
Loading…
Add table
Reference in a new issue