pmacs3/mode/python.py

600 lines
24 KiB
Python
Raw Normal View History

2008-11-08 10:30:04 -05:00
import commands, os.path, string, sys, traceback
import color, completer, context, default, mode, method, regex, tab, method.introspect
from point import Point
2007-10-21 20:52:48 -04:00
from lex import Grammar, PatternRule, RegionRule, OverridePatternRule
2009-02-04 08:48:58 -05:00
from parse import Any, And, Or, Optional, Name, Match, Matchs
2007-07-21 11:40:53 -04:00
try:
import bike
has_bike = True
except ImportError:
has_bike = False
class StringGrammar1(Grammar):
2007-07-21 11:40:53 -04:00
rules = [
PatternRule(r'octal', r'\\[0-7]{3}'),
2008-09-28 22:34:56 -04:00
PatternRule(r'hex', r'\\x[0-9a-fA-F]{2}'),
2007-07-21 11:40:53 -04:00
PatternRule(r'escaped', r'\\.'),
PatternRule(r'data', r"[^\\']+"),
]
class StringGrammar2(Grammar):
rules = [
PatternRule(r'octal', r'\\[0-7]{3}'),
PatternRule(r'hex', r'\\x[0-9a-fA-F]{2}'),
PatternRule(r'escaped', r'\\.'),
PatternRule(r'data', r'[^\\"]+'),
]
class StringGrammar3(Grammar):
rules = [
PatternRule(r'octal', r'\\[0-7]{3}'),
PatternRule(r'hex', r'\\x[0-9a-fA-F]{2}'),
PatternRule(r'escaped', r'\\.'),
PatternRule(r'data', r"(?:[^\\']|'(?!')|''(?!'))+"),
]
class StringGrammar4(Grammar):
rules = [
PatternRule(r'octal', r'\\[0-7]{3}'),
PatternRule(r'hex', r'\\x[0-9a-fA-F]{2}'),
PatternRule(r'escaped', r'\\.'),
PatternRule(r'data', r'(?:[^\\"]|"(?!")|""(?!"))+'),
2007-07-21 11:40:53 -04:00
]
class PythonGrammar(Grammar):
rules = [
PatternRule(r'functionname', r'(?<=def )[a-zA-Z_][a-zA-Z0-9_]*'),
PatternRule(r'classname', r'(?<=class )[a-zA-Z_][a-zA-Z0-9_]*'),
PatternRule(r'python_reserved', r'(?:True|None|False|Exception|self)(?![a-zA-Z0-9_])'),
PatternRule(r'python_keyword', r'(?:yield|while|try|return|raise|print|pass|or|not|lambda|is|in|import|if|global|from|for|finally|exec|except|else|elif|del|def|continue|class|break|assert|as|and)(?![a-zA-Z0-9_])'),
PatternRule(r"python_builtin", r'(?<!\.)(?:zip|xrange|vars|unicode|unichr|type|tuple|super|sum|str|staticmethod|sorted|slice|setattr|set|round|repr|reduce|raw_input|range|property|pow|ord|open|oct|object|max|min|map|long|locals|list|len|iter|issubclass|isinstance|int|input|id|hex|hash|hasattr|globals|getattr|frozenset|float|filter|file|execfile|eval|enumerate|divmod|dir|dict|delattr|complex|compile|coerce|cmp|classmethod|chr|callable|bool)(?![a-zA-Z0-9_])'),
2007-07-21 11:40:53 -04:00
PatternRule(r'methodcall', r'(?<=\. )[a-zA-Z_][a-zA-Z0-9_]*(?= *\()'),
PatternRule(r'functioncall', r'[a-zA-Z_][a-zA-Z0-9_]*(?= *\()'),
PatternRule(r'system_identifier', r'__[a-zA-Z0-9_]+__'),
PatternRule(r'private_identifier', r'__[a-zA-Z0-9_]*'),
PatternRule(r'hidden_identifier', r'_[a-zA-Z0-9_]*'),
RegionRule(r'rawstring', r'r"""', StringGrammar4, r'"""'),
RegionRule(r'rawstring', r"r'''", StringGrammar3, r"'''"),
RegionRule(r'rawstring', r'r"', StringGrammar2, r'"'),
RegionRule(r'rawstring', r"r'", StringGrammar1, r"'"),
RegionRule(r'string', r'u?"""', StringGrammar4, r'"""'),
RegionRule(r'string', r"u?'''", StringGrammar3, r"'''"),
RegionRule(r'string', r'u?"', StringGrammar2, r'"'),
RegionRule(r'string', r"u?'", StringGrammar1, r"'"),
2007-07-21 11:40:53 -04:00
PatternRule(r'identifier', r'[a-zA-Z_][a-zA-Z0-9_]*'),
PatternRule(r'delimiter', r'\(|\)|\[|\]|{|}|@|,|:|\.|`|=|;|\+=|-=|\*=|/=|//=|%=|&=|\|=|\^=|>>=|<<=|\*\*='),
PatternRule(r"integer", r"(?<![\.0-9a-zA-Z_])(?:0|-?[1-9][0-9]*|0[0-7]+|0[xX][0-9a-fA-F]+)[lL]?(?![\.0-9a-zA-Z_])"),
PatternRule(r"float", r"(?<![\.0-9a-zA-Z_])(?:-?[0-9]+\.[0-9]*|-?\.[0-9]+|(?:[0-9]|[0-9]+\.[0-9]*|-?\.[0-9]+)[eE][\+-]?[0-9]+)(?![\.0-9a-zA-Z_])"),
2007-07-21 11:40:53 -04:00
PatternRule(r"imaginary", r"(?<![\.0-9a-zA-Z_])(?:[0-9]+|(?:[0-9]+\.[0-9]*|\.[0-9]+|(?:[0-9]|[0-9]+\.[0-9]*|\.[0-9]+)[eE][\+-]?[0-9]+)[jJ])(?![\.0-9a-zA-Z_])"),
PatternRule(r"operator", r"\+|<>|<<|<=|<|-|>>|>=|>|\*\*|&|\*|\||/|\^|==|//|~|!=|%"),
2007-07-21 11:40:53 -04:00
OverridePatternRule(r'comment', r'#@@:(?P<token>[.a-zA-Z0-9_]+):(?P<mode>[.a-zA-Z0-9_]+) *$'),
PatternRule(r'comment', r'#.*$'),
PatternRule(r'continuation', r'\\\n$'),
2008-09-30 01:47:20 -04:00
PatternRule(r'spaces', r' +'),
2007-07-21 11:40:53 -04:00
PatternRule(r'eol', r'\n$'),
]
2007-10-21 20:55:29 -04:00
class PythonTabber(tab.StackTabber):
2007-07-21 11:40:53 -04:00
# NOTE: yield might initially seem like an endlevel name, but it's not one.
# NOTE: return should be an endlevel name but for now it can't b
endlevel_names = ('pass', 'raise', 'break', 'continue')
2007-07-21 11:40:53 -04:00
startlevel_names = ('if', 'try', 'class', 'def', 'for', 'while', 'try')
def __init__(self, m):
2007-10-21 20:55:29 -04:00
tab.StackTabber.__init__(self, m)
2007-07-21 11:40:53 -04:00
self.base_level = 0
def is_base(self, y):
if y == 0:
# we always know that line 0 is indented at the 0 level
return True
tokens = self.get_tokens(y)
t0 = tokens[0]
if t0.name == 'python_keyword' and t0.string in self.startlevel_names:
2007-07-21 11:40:53 -04:00
# if a line has no whitespace and beings with something like
# 'while','class','def','if',etc. then we can start at it
return True
else:
# otherwise, we can't be sure that its level is correct
return False
def get_level(self, y):
self._calc_level(y)
return self.lines.get(y)
def _calc_level(self, y):
# ok, so first remember where we are going, and find our starting point
target = y
y = max(0, y - 1)
2007-07-21 11:40:53 -04:00
while not self.is_base(y) and y > 0:
y -= 1
# ok, so clear out our stack and then loop over each line
self.popped = False
self.markers = []
while y <= target:
self.continued = False
self.last_popped = self.popped
self.popped = False
tokens = self.get_tokens(y)
currlvl = self.get_curr_level()
# if we were continuing, let's pop that previous continuation token
# and note that we're continuing
if self.markers and self.markers[-1].name == 'cont':
self.continued = True
self._pop()
# if we haven't reached the target-line yet, we can detect how many
# levels of unindention, if any, the user chose on previous lines
if y < target and len(tokens) > 2:
if self.token_is_space(y, 0):
l = len(tokens[0].string)
else:
l = 0
while currlvl > l:
self._pop()
currlvl = self.get_curr_level()
self.popped = True
# ok, having done all that, we can now process each token on the line
for i in range(0, len(tokens)):
currlvl = self._handle_token(currlvl, y, i)
# so let's store the level for this line, as well as some debugging
self.lines[y] = currlvl
self.record[y] = tuple(self.markers)
y += 1
def _handle_close_token(self, currlvl, y, i):
try:
2007-10-21 20:55:29 -04:00
return tab.StackTabber._handle_close_token(self, currlvl, y, i)
2007-07-21 11:40:53 -04:00
except:
return currlvl
def _handle_other_token(self, currlvl, y, i):
2008-04-02 19:06:52 -04:00
w = self.mode.tabwidth
2007-07-21 11:40:53 -04:00
token = self.get_token(y, i)
fqname = token.fqname()
if fqname == 'continuation':
# we need to pop the indentation level over, unless last line was
# also a continued line
if self.continued:
self._opt_append('cont', currlvl)
else:
2008-04-02 19:06:52 -04:00
self._opt_append('cont', currlvl + w)
2007-07-21 11:40:53 -04:00
elif fqname == 'string.start':
# while inside of a string, there is no indention leve
self._opt_append('string', None)
elif fqname == 'string.end':
# since we're done with the string, resume our indentation level
self._opt_pop('string')
elif fqname == 'delimiter':
# we only really care about a colon as part of a one-line statement,
# i.e. "while ok: foo()" or "if True: print 3"
if token.string == ':':
if self.markers and self.markers[-1].name in ('[', '{', '('):
2007-07-21 11:40:53 -04:00
pass
elif self.is_rightmost_token(y, i):
pass
else:
self._pop()
elif fqname == 'python_keyword':
2007-07-21 11:40:53 -04:00
if token.string in self.endlevel_names:
# we know we'll unindent at least once
self._pop()
self.popped = True
elif token.string in self.startlevel_names and self.is_leftmost_token(y, i):
# we know we will indent exactly once
2008-04-02 19:06:52 -04:00
self._append(token.string, currlvl + w)
2007-07-21 11:40:53 -04:00
elif token.string in ('elif', 'else') and self.is_leftmost_token(y, i):
# we know we'll unindent at least to the first if/elif
2008-10-22 18:25:45 -04:00
if not self.popped and not self.last_popped and self._peek_until('if', 'elif'):
2007-07-21 11:40:53 -04:00
self._pop_until('if', 'elif')
currlvl = self.get_curr_level()
2008-04-02 19:06:52 -04:00
self._append(token.string, currlvl + w)
2007-07-21 11:40:53 -04:00
elif token.string == 'except' and self.is_leftmost_token(y, i):
# we know we'll unindent at least to the first try
if not self.popped and not self.last_popped:
self._pop_until('try')
currlvl = self.get_curr_level()
2008-04-02 19:06:52 -04:00
self._append(token.string, currlvl + w)
2007-07-21 11:40:53 -04:00
elif token.string == 'finally' and self.is_leftmost_token(y, i):
# we know we'll unindent at least to the first try/except
if not self.popped and not self.last_popped:
self._pop_until('try', 'except')
currlvl = self.get_curr_level()
2008-04-02 19:06:52 -04:00
self._append(token.string, currlvl + w)
2007-07-21 11:40:53 -04:00
return currlvl
class PythonCheckSyntax(method.Method):
'''Check the syntax of the current python file'''
def _execute(self, w, **vargs):
pythonlib = w.application.config.get('python.lib')
if pythonlib:
sys.path.insert(0, pythonlib)
source = w.buffer.make_string()
try:
code = compile(source, w.buffer.path, 'exec')
w.set_error("Syntax OK")
except Exception, e:
output = traceback.format_exc()
w.application.data_buffer("*PythonSyntax*", output, switch_to=True,
modename='error')
2007-10-18 11:31:12 -04:00
del sys.path[0]
2007-07-21 11:40:53 -04:00
class PythonDictCleanup(method.Method):
'''Align assignment blocks and literal dictionaries'''
def _execute(self, w, **vargs):
cursor = w.logical_cursor()
b = w.buffer
# so this is where we will store the groups that we find
groups_by_line = {}
# the regex we will try
regexes = [regex.python_dict_cleanup,
regex.python_assign_cleanup]
# if we aren't in a hash, inform the user and exit
line = b.lines[cursor.y]
myregex = None
for r in regexes:
if r.match(line):
myregex = r
if myregex is None:
raise Exception, "Not a python dict line"
groups_by_line[cursor.y] = myregex.match(line).groups()
# find the beginning of this hash block
start = 0
i = cursor.y - 1
while i >= 0:
line = b.lines[i]
m = myregex.match(line)
if not m:
start = i + 1
break
else:
groups_by_line[i] = m.groups()
i -= 1
# find the end of this hash block
end = len(b.lines) - 1
i = cursor.y + 1
while i < len(b.lines):
line = b.lines[i]
m = myregex.match(line)
if not m:
end = i - 1
break
else:
groups_by_line[i] = m.groups()
i += 1
# assume that the least indented line is correct
indent_w = min([len(groups_by_line[k][0]) for k in groups_by_line])
# find the longest hash key to base all the other padding on
key_w = max([len(groups_by_line[k][1]) for k in groups_by_line])
# for each line, format it correctly
keys = groups_by_line.keys()
keys.sort()
data = ''
for i in keys:
indent_pad = ' ' * indent_w
key = groups_by_line[i][1]
sep = groups_by_line[i][3]
value = groups_by_line[i][5]
key_pad = ' ' * (key_w - len(key))
if sep == '=':
data += indent_pad + key + key_pad + ' ' + sep + ' ' + value + '\n'
else:
data += indent_pad + key + sep + ' ' + key_pad + value + '\n'
# remove the old text and add the new
start_p = Point(0, start)
2007-08-21 09:07:39 -04:00
if end + 1 < len(w.buffer.lines):
end_p = Point(0, end + 1)
else:
end_p = Point(len(w.buffer.lines[-1]), len(w.buffer.lines) - 1)
w.delete(start_p, end_p)
2007-07-21 11:40:53 -04:00
w.insert_string(start_p, data)
2007-10-19 02:41:33 -04:00
class PythonInsertTripleSquotes(method.Method):
'''Insert a triple-quoted string using single-quotes'''
_q = "'''"
def _execute(self, w, **vargs):
2008-05-11 19:59:35 -04:00
w.insert_string_at_cursor('%s%s' % (self._q, self._q))
for i in range(0, 3):
w.backward()
class PythonInsertTripleDquotes(PythonInsertTripleSquotes):
'''Insert a triple-quoted string using double-quotes'''
_q = '"""'
class PythonInitNames(method.Method):
'''Jump to a function defined in this module'''
def _execute(self, w, **vargs):
w.mode.context.build_name_map()
w.application.set_error("Initialized name maps")
class PythonSemanticComplete(method.introspect.TokenComplete):
_mini_prompt = 'Semantic Complete'
def _min_completion(self, w, t):
a = w.application
a.methods['ipython-path-start'].execute(w, switch=False)
name = buffer.IperlBuffer.create_name(w.buffer)
b = a.get_buffer_by_name(name)
line = w.buffer.lines[t.y]
(x1, x2) = (t.x, t.end_x())
candidates = [t.string + s for s in b.completions(line[x1:x2])]
minlen = None
for candidate in candidates:
if minlen is None:
minlen = len(candidate)
else:
minlen = min(minlen, len(candidate))
return self._prune_candidates(t, minlen, candidates)
class PythonGotoName(method.Method):
'''Jump to a class or function defined in this module'''
args = [method.Argument("name", type(""), "pythonname", "Goto Name: ")]
title = 'Name'
def _get_dict(self, w):
return w.mode.context.get_names()
def _execute(self, w, **vargs):
name = vargs['name']
d = self._get_dict(w)
if name in d:
w.goto(Point(0, d[name]))
else:
w.application.set_error("%r %r was not found" % (title, name))
class PythonGotoFunction(PythonGotoName):
'''Jump to a function defined in this module'''
args = [method.Argument("name", type(""), "pythonfunction", "Goto Function: ")]
title = 'Function'
def _get_dict(self, w):
return w.mode.context.get_functions()
class PythonGotoClass(method.Method):
'''Jump to a class defined in this module'''
args = [method.Argument("name", type(""), "pythonclass", "Goto Class: ")]
title = 'Class'
def _get_dict(self, w):
return w.mode.context.get_classes()
class PythonListNames(method.Method):
'''Show the user all functions defined in this module'''
def _execute(self, w, **vargs):
names = w.mode.context.get_names()
output = '\n'.join(sorted(names)) + "\n"
w.application.data_buffer("*Python-List-Names*", output, switch_to=True)
class PythonBrmFindReferences(method.Method):
def _execute(self, w, **vargs):
if w.mode.brm is None:
w.set_error('bicycle repairman not installed')
return
path = w.buffer.path
cursor = w.logical_cursor()
y, x = cursor.yx()
refs = w.mode.brm.findReferencesByCoordinates(path, y, x)
lines = []
n = 0
for r in refs:
f, n, c = r.filename, r.lineno, r.confidence
s = '%s:%d: %3d%% confidence' % (f, n, c)
lines.append(s)
n += 1
if n == 0:
w.set_error('no references found')
return
data = '\n'.join(lines)
w.application.data_buffer("*References*", data, switch_to=True)
if n == 1:
w.set_error('1 reference found')
else:
w.set_error('%d references found' % n)
class PythonNameCompleter(completer.Completer):
def _get_dict(self, w):
return w.buffer.method.old_window.mode.context.get_names()
def get_candidates(self, s, w=None):
return [n for n in self._get_dict(w) if n.startswith(s)]
class PythonFunctionCompleter(PythonNameCompleter):
def _get_dict(self, w):
return w.buffer.method.old_window.mode.context.get_functions()
class PythonClassCompleter(completer.Completer):
def _get_dict(self, w):
return w.buffer.method.old_window.mode.context.get_classes()
2009-02-04 08:48:58 -05:00
CLASS_MATCH = And(Optional(Name('spaces')),
Matchs('python_keyword', ('public', 'protected', 'private')),
Name('spaces'),
Match('keyword', 'class'),
Name('spaces'),
Name('identifier'))
CLASS_OFFSET = 1
class PythonContext(context.Context):
2009-02-04 08:48:58 -05:00
empty_match = And(Optional(Name('spaces')), Name('eol'))
class_match = And(Optional(Name('spaces')),
Match('python_keyword', 'class'),
Name('spaces'),
Name('classname'))
func_match = And(Optional(Name('spaces')),
Match('python_keyword', 'def'),
Name('spaces'),
Name('functionname'))
def __init__(self, mode):
self.mode = mode
self.names = None
self.namelines = None
self.classes = None
self.functions = None
# new object methods
def get_functions(self):
if self.functions is None:
self.build_name_map()
return self.functions
def get_classes(self):
if self.classes is None:
self.build_name_map()
return self.classes
def get_function_list(self):
return self._ordered_dict(self.get_functions())
def get_class_list(self):
return self._ordered_dict(self.get_classes())
# overridden object methods
def _init_name_map(self):
self.names = {}
self.classes = {}
self.functions = {}
self.namelines = [(None, None)] * len(self.mode.window.buffer.lines)
def _del_name(self, y, name):
if name:
if name in self.names:
del self.names[name]
if name in self.classes:
del self.classes[name]
if name in self.functions:
del self.functions[name]
self.namelines[y] = (None, None)
def _build_name_map(self, y1, y2, last, curr, stack):
blen = len(self.mode.window.buffer.lines)
highlights = self.mode.window.get_highlighter()
i = y1
while i < y2:
2009-02-04 08:48:58 -05:00
tokens = highlights.tokens[i]
g = highlights.tokens[i]
2009-02-04 08:48:58 -05:00
if self.empty_match.match(tokens):
#if (len(g) == 1 and g[0].name == 'eol' or
# len(g) == 2 and g[0].name == 'spaces' and g[1].name == 'eol'):
if last is None:
last = i
i += 1
2009-02-04 08:48:58 -05:00
#if i == y2 and y2 < blen:
# y2 += 1
continue
2008-09-30 01:47:20 -04:00
if g[0].name == 'spaces':
j, lvl = 1, len(g[0].string)
else:
j, lvl = 0, 0
2009-02-04 08:48:58 -05:00
while stack and lvl <= stack[-1][0]:
stack.pop(-1)
if last is not None:
curr = '.'.join([x[1] for x in stack])
if curr:
for k in range(last, i):
self.namelines[k] = (curr, None)
last = None
if len(g[j:]) > 3:
d, found = None, False
if g[j].name == 'python_keyword' and g[j].string == 'class':
d, found = self.classes, True
elif g[j].name == 'python_keyword' and g[j].string == 'def':
d, found = self.functions, True
if found:
stack.append([lvl, g[j+2].string])
curr = '.'.join([x[1] for x in stack])
d[curr] = i
self.names[curr] = i
else:
curr = '.'.join([x[1] for x in stack])
if i == y2 - 1 and curr != self.namelines[i][0] and y2 < blen:
y2 += 1
if curr:
self.namelines[i] = (curr, None)
i += 1
if last is not None and y2 < len(self.namelines):
2008-06-23 09:24:10 -04:00
if self.namelines[y2] and self.namelines[y2][0]:
n = len(self.namelines[y2][0].split('.'))
curr = '.'.join([x[1] for x in stack[:n]])
if curr:
for k in range(last, y2):
self.namelines[k] = (curr, None)
class Python(mode.Fundamental):
modename = 'Python'
extensions = ['.py']
detection = ['python']
tabbercls = PythonTabber
grammar = PythonGrammar
opentokens = ('delimiter',)
opentags = {'(': ')', '[': ']', '{': '}'}
closetokens = ('delimiter',)
closetags = {')': '(', ']': '[', '}': '{'}
colors = {
'python_keyword': ('cyan', 'default', 'bold'),
'python_reserved': ('magenta', 'default', 'bold'),
'python_builtin': ('cyan', 'default', 'bold'),
'functionname': ('blue', 'default', 'bold'),
'classname': ('green', 'default', 'bold'),
'rawstring.start': ('green', 'default', 'bold'),
2008-09-30 01:47:20 -04:00
'rawstring.data': ('green', 'default', 'bold'),
'rawstring.null': ('green', 'default', 'bold'),
'rawstring.escaped': ('magenta', 'default', 'bold'),
'rawstring.end': ('green', 'default', 'bold'),
'system_identifier': ('cyan', 'default', 'bold'),
}
config = {
'python.lib': '.',
}
lconfig = {
'ignore-suffix': ['.pyc'],
}
actions = [PythonInitNames, PythonListNames, PythonGotoName,
PythonGotoFunction, PythonGotoClass, PythonCheckSyntax,
PythonDictCleanup, PythonSemanticComplete, PythonBrmFindReferences,
PythonInsertTripleSquotes, PythonInsertTripleDquotes]
completers = {
"pythonname": PythonNameCompleter(None),
"pythonfunction": PythonFunctionCompleter(None),
"pythonclass": PythonClassCompleter(None),
}
#format = "%(flag)s %(bname)-18s (%(mname)s) %(indent)s %(cursor)s/%(mark)s %(perc)s [%(name)s]"
format = "%(flag)s %(bname)-18s (%(mname)s) %(indent)s %(cursor)s/%(first)s %(perc)s [%(name)s]"
def get_status_names(self):
names = mode.Fundamental.get_status_names(self)
c = self.window.logical_cursor()
names['name'] = self.context.get_line_name(c.y)
names['first'] = self.window.first.xy()
return names
def __init__(self, w):
mode.Fundamental.__init__(self, w)
self.add_bindings('close-paren', (')',))
self.add_bindings('close-brace', ('}',))
self.add_bindings('close-bracket', (']',))
self.add_bindings('python-goto-name', ('C-c M-g',))
self.add_bindings('python-goto-function', ('C-c M-f',))
self.add_bindings('python-goto-class', ('C-c M-c',))
self.add_bindings('python-check-syntax', ('C-c s',))
self.add_bindings('python-dict-cleanup', ('C-c h',))
self.add_bindings('python-insert-triple-squotes', ('C-c M-\'',))
self.add_bindings('python-insert-triple-dquotes', ('C-c M-"',))
self.add_bindings('python-semantic-complete', ('C-c TAB',))
self.context = PythonContext(self)
# bicycle repairman!
if has_bike:
self.brm = bike.init()
else:
self.brm = None
2007-10-19 02:41:33 -04:00
install = Python.install