483 lines
17 KiB
Python
483 lines
17 KiB
Python
import commands, os.path, sets, string, sys
|
|
|
|
import color, completer, default, mode2, lex2, method, regex, tab2
|
|
import ctag_python
|
|
|
|
from point2 import Point
|
|
from lex2 import Grammar, ConstantRule, PatternRule, RegionRule, DualRegionRule
|
|
|
|
class StringGrammar(Grammar):
|
|
rules = [
|
|
PatternRule(
|
|
name=r'octal',
|
|
pattern=r'\\[0-7]{3}',
|
|
),
|
|
PatternRule(
|
|
name=r'escaped',
|
|
pattern=r'\\.',
|
|
),
|
|
#PatternRule(
|
|
# name=r'format',
|
|
# pattern=r'%(?:\([a-zA-Z_]+\))?[-# +]*(?:[0-9]+|\*)?\.?(?:[0-9]+|\*)?[hlL]?[a-zA-Z%]',
|
|
#),
|
|
]
|
|
|
|
class PythonGrammar(Grammar):
|
|
rules = [
|
|
PatternRule(
|
|
name=r'functiondef',
|
|
pattern=r'(?<=def )[a-zA-Z_][a-zA-Z0-9_]*',
|
|
),
|
|
PatternRule(
|
|
name=r'classdef',
|
|
pattern=r'(?<=class )[a-zA-Z_][a-zA-Z0-9_]*',
|
|
),
|
|
PatternRule(
|
|
name=r'reserved',
|
|
pattern=r'(?:True|None|False|Exception|self)(?![a-zA-Z0-9_])',
|
|
),
|
|
PatternRule(
|
|
name=r'keyword',
|
|
pattern=r'(?:yield|while|try|return|raise|print|pass|or|not|lambda|is|in|import|if|global|from|for|finally|exec|except|else|elif|del|def|continue|class|break|assert|as|and)(?![a-zA-Z0-9_])',
|
|
),
|
|
PatternRule(
|
|
name=r"builtin",
|
|
pattern=r'(?<!\.)(?:zip|xrange|vars|unicode|unichr|type|tuple|super|sum|str|staticmethod|sorted|slice|setattr|set|round|repr|reduce|raw_input|range|property|pow|ord|open|oct|object|max|min|map|long|locals|list|len|iter|issubclass|isinstance|int|input|id|hex|hash|hasattr|globals|getattr|frozenset|float|filter|file|execfile|eval|enumerate|divmod|dir|dict|delattr|complex|compile|coerce|cmp|classmethod|chr|callable|bool)(?![a-zA-Z0-9_])',
|
|
),
|
|
|
|
PatternRule(
|
|
name=r'methodcall',
|
|
pattern=r'(?<=\. )[a-zA-Z_][a-zA-Z0-9_]*(?= *\()',
|
|
),
|
|
PatternRule(
|
|
name=r'functioncall',
|
|
pattern=r'[a-zA-Z_][a-zA-Z0-9_]*(?= *\()',
|
|
),
|
|
PatternRule(
|
|
name=r'system_identifier',
|
|
pattern=r'__[a-zA-Z0-9_]+__',
|
|
),
|
|
PatternRule(
|
|
name=r'private_identifier',
|
|
pattern=r'__[a-zA-Z0-9_]*',
|
|
),
|
|
PatternRule(
|
|
name=r'hidden_identifier',
|
|
pattern=r'_[a-zA-Z0-9_]*',
|
|
),
|
|
PatternRule(
|
|
name=r'identifier',
|
|
pattern=r'[a-zA-Z_][a-zA-Z0-9_]*',
|
|
),
|
|
PatternRule(
|
|
name=r'delimiter',
|
|
pattern=r'\(|\)|\[|\]|{|}|@|,|:|\.|`|=|;|\+=|-=|\*=|/=|//=|%=|&=|\|=|\^=|>>=|<<=|\*\*=',
|
|
),
|
|
PatternRule(
|
|
name=r"operator",
|
|
pattern=r"\+|<>|<<|<=|<|-|>>|>=|>|\*\*|&|\*|\||/|\^|==|//|~|!=|%",
|
|
),
|
|
|
|
PatternRule(
|
|
name=r"integer",
|
|
pattern=r"(?<![\.0-9a-zA-Z_])(?:0|[1-9][0-9]*|0[0-7]+|0[xX][0-9a-fA-F]+)[lL]?(?![\.0-9a-zA-Z_])",
|
|
),
|
|
PatternRule(
|
|
name=r"float",
|
|
pattern=r"(?<![\.0-9a-zA-Z_])(?:[0-9]+\.[0-9]*|\.[0-9]+|(?:[0-9]|[0-9]+\.[0-9]*|\.[0-9]+)[eE][\+-]?[0-9]+)(?![\.0-9a-zA-Z_])",
|
|
),
|
|
PatternRule(
|
|
name=r"imaginary",
|
|
pattern=r"(?<![\.0-9a-zA-Z_])(?:[0-9]+|(?:[0-9]+\.[0-9]*|\.[0-9]+|(?:[0-9]|[0-9]+\.[0-9]*|\.[0-9]+)[eE][\+-]?[0-9]+)[jJ])(?![\.0-9a-zA-Z_])",
|
|
),
|
|
|
|
RegionRule(
|
|
name=r'docstring',
|
|
start=r'^ *(?P<tag>"""|\'\'\')',
|
|
grammar=Grammar(),
|
|
end=r'%(tag)s',
|
|
),
|
|
RegionRule(
|
|
name=r'tq_string',
|
|
start=r'(?P<tag>"""|\'\'\')',
|
|
grammar=Grammar(),
|
|
end=r'%(tag)s',
|
|
),
|
|
RegionRule(
|
|
name=r'string',
|
|
start=r'(?P<tag>"|\')',
|
|
grammar=StringGrammar(),
|
|
end=r'%(tag)s',
|
|
),
|
|
|
|
PatternRule(
|
|
name=r'comment',
|
|
pattern=r'#.*$',
|
|
),
|
|
PatternRule(
|
|
name=r'continuation',
|
|
pattern=r'\\$',
|
|
),
|
|
]
|
|
|
|
class PythonTabber(tab2.Tabber):
|
|
start_tags = {'(': ')',
|
|
'{': '}',
|
|
'[': ']'}
|
|
|
|
close_tags = {')': '(',
|
|
'}': '{',
|
|
']': '['}
|
|
|
|
def __init__(self, m):
|
|
tab2.Tabber.__init__(self, m)
|
|
|
|
|
|
|
|
def stack_append(self, item):
|
|
self.tab_stack.append(item)
|
|
def stack_pop(self):
|
|
self.tab_stack.pop(-1)
|
|
|
|
def base_indentation_level(self, y):
|
|
return y == 0
|
|
|
|
def calculate_tabs(self, start=0, goal=None):
|
|
lines = self.mode.window.buffer.lines
|
|
tokens = self.mode.highlighter.tokens
|
|
buffer = self.mode.window.buffer
|
|
|
|
if self.levels is None:
|
|
self.levels = [None] * (len(lines))
|
|
|
|
self.index = 0
|
|
self.y = start
|
|
self.base = 0
|
|
self.tab_stack = []
|
|
|
|
# we want to process every logical line in the file
|
|
while self.y < len(lines):
|
|
line = lines[self.y]
|
|
start_index = self.index
|
|
|
|
start_point = point.Point(0, self.y)
|
|
start_offset = buffer.get_point_offset(start_point)
|
|
end_point = point.Point(len(line), self.y)
|
|
end_offset = buffer.get_point_offset(end_point)
|
|
|
|
# we want to find all the tokens on the line we are currently processing
|
|
while self.index < len(tokens):
|
|
token = tokens[self.index]
|
|
if token.end > end_offset:
|
|
break
|
|
self.index += 1
|
|
|
|
self.handle_line(line,
|
|
start_offset, start_index,
|
|
end_offset, self.index)
|
|
|
|
self.levels[self.y] = self.line_depth
|
|
self.y += 1
|
|
if goal is not None and self.y > goal:
|
|
break
|
|
|
|
def get_line_depth(self):
|
|
if len(self.tab_stack) > 0:
|
|
return self.tab_stack[-1][1]
|
|
else:
|
|
return self.base
|
|
|
|
def handle_line(self, line, start_offset, start_index, end_offset, end_index):
|
|
self.line_depth = self.get_line_depth()
|
|
tokens = self.mode.highlighter.tokens
|
|
|
|
if start_index >= len(tokens):
|
|
return
|
|
if regex.whitespace.match(line):
|
|
return
|
|
|
|
if len(self.tab_stack) == 0 and tokens[start_index].start >= start_offset:
|
|
self.base = util.count_leading_whitespace(line)
|
|
|
|
for i in range(start_index, end_index):
|
|
token = tokens[i]
|
|
s = token.string
|
|
if s in self.start_tags:
|
|
if i < end_index - 1:
|
|
i = tokens[i+1].start - start_offset
|
|
elif len(self.tab_stack) > 0:
|
|
i = self.tab_stack[-1][1] + 4
|
|
else:
|
|
i = self.base + 4
|
|
self.stack_append((s, i))
|
|
elif s in self.close_tags:
|
|
assert len(self.tab_stack), "Unbalanced closing tag"
|
|
assert self.tab_stack[-1][0] == self.close_tags[s], "Unmatched closing tag"
|
|
self.stack_pop()
|
|
if i == start_index:
|
|
self.line_depth = self.get_line_depth()
|
|
|
|
if tokens[start_index].start < start_offset:
|
|
self.line_depth = -1
|
|
|
|
prebase = self.base
|
|
s = tokens[start_index].string
|
|
e = tokens[end_index-1].string
|
|
|
|
if s == "except" or s == "elif" or s == "else":
|
|
if self.y > 0 and self.line_depth == self.levels[self.y - 1]:
|
|
self.line_depth = max(0, self.line_depth - 4)
|
|
elif (s == "return" or s == "raise" or s == "yield" or s == "break" or
|
|
s == "pass" or s == 'continue'):
|
|
self.base = max(0, self.base - 4)
|
|
|
|
if e == "\\":
|
|
if len(self.tab_stack) and self.tab_stack[-1][0] == "\\":
|
|
pass
|
|
else:
|
|
self.stack_append(("\\", prebase + 4))
|
|
return
|
|
elif e == ":":
|
|
self.base += 4
|
|
elif len(self.tab_stack) and self.tab_stack[-1][0] == "\\":
|
|
self.stack_pop()
|
|
|
|
def get_indentation_level(self, y):
|
|
if self.levels is not None and self.levels[y] is not None:
|
|
result = self.levels[y]
|
|
else:
|
|
i = max(0, y - 1)
|
|
while i > 0:
|
|
if self.base_indentation_level(i):
|
|
break
|
|
i -= 1
|
|
self.calculate_tabs(i, y)
|
|
result = self.levels[y]
|
|
if result == -1:
|
|
return None
|
|
return result
|
|
|
|
class Python(mode2.Fundamental):
|
|
tabbercls = PythonTabber
|
|
grammar = PythonGrammar()
|
|
opentoken = 'delimiter'
|
|
opentags = {'(': ')', '[': ']', '{': '}'}
|
|
closetoken = 'delimiter'
|
|
closetags = {')': '(', ']': '[', '}': '{'}
|
|
def __init__(self, w):
|
|
mode2.Fundamental.__init__(self, w)
|
|
|
|
# add python-specific methods
|
|
self.add_action_and_bindings(PythonCheckSyntax(), ('C-c s',))
|
|
self.add_action_and_bindings(PythonDictCleanup(), ('C-c h',))
|
|
self.add_action_and_bindings(PythonUpdateTags(), ('C-c t',))
|
|
self.add_action_and_bindings(PythonTagComplete(), ('C-c k',))
|
|
|
|
# we want to do these kinds of tag matching
|
|
self.add_bindings('close-paren', (')',))
|
|
self.add_bindings('close-brace', ('}',))
|
|
self.add_bindings('close-bracket', (']',))
|
|
|
|
self.colors = {
|
|
'keyword': color.build('cyan', 'default'),
|
|
'reserved': color.build('cyan', 'default'),
|
|
'builtin': color.build('cyan', 'default'),
|
|
'functiondef': color.build('blue', 'default'),
|
|
'classdef': color.build('green', 'default'),
|
|
|
|
'string.start': color.build('green', 'default'),
|
|
'string.null': color.build('green', 'default'),
|
|
'string.escaped': color.build('magenta', 'default'),
|
|
'string.octal': color.build('magenta', 'default'),
|
|
'string.format': color.build('yellow', 'default'),
|
|
'string.end': color.build('green', 'default'),
|
|
|
|
'integer': color.build('default', 'default'),
|
|
'float': color.build('default', 'default'),
|
|
'imaginary': color.build('default', 'default'),
|
|
|
|
'tq_string.start': color.build('green', 'default'),
|
|
'tq_string.null': color.build('green', 'default'),
|
|
'tq_string.end': color.build('green', 'default'),
|
|
|
|
'docstring.start': color.build('green', 'default'),
|
|
'docstring.null': color.build('green', 'default'),
|
|
'docstring.end': color.build('green', 'default'),
|
|
|
|
'comment': color.build('red', 'default'),
|
|
'continuation': color.build('red', 'default'),
|
|
'system_identifier': color.build('cyan', 'default'),
|
|
}
|
|
|
|
def name(self):
|
|
return "Python"
|
|
|
|
class PythonCheckSyntax(method.Method):
|
|
'''Check the syntax of the current python file'''
|
|
def _args(self):
|
|
return [method.Argument("lib", type=type(""), prompt="Python Path: ",
|
|
datatype='path',
|
|
default=default.build_constant("."))]
|
|
def _execute(self, w, **vargs):
|
|
a = vargs['lib']
|
|
mod = os.path.splitext(os.path.basename(w.buffer.path))[0]
|
|
cmd = "PYTHONPATH=%s python -c 'import %s'" % (a, mod)
|
|
(status, output) = commands.getstatusoutput(cmd)
|
|
if status == 0:
|
|
w.application.set_error("Syntax OK")
|
|
w.application.data_buffer("python-syntax", output, switch_to=False)
|
|
else:
|
|
output = output + "\ncommand exit status: %d" % (status)
|
|
w.application.data_buffer("python-syntax", output, switch_to=True)
|
|
|
|
class PythonUpdateTags(method.Method):
|
|
'''Update the CTag data associated with a python buffer'''
|
|
def _args(self):
|
|
return [method.Argument("lib", prompt="Module Base: ", datatype='path',
|
|
default=default.build_constant("."))]
|
|
def _execute(self, w, **vargs):
|
|
w.mode.ctagger = ctag_python.PythonCTagger()
|
|
w.mode.ctagger.process_paths([vargs['lib']])
|
|
w.application.set_error('Tag data updated')
|
|
|
|
class PythonTagComplete(method.Method):
|
|
'''Complete a symbol using tag data'''
|
|
def _execute(self, w, **vargs):
|
|
if not w.mode.ctagger.packages:
|
|
w.application.methods['python-update-tags'].execute(w)
|
|
return
|
|
|
|
cursor = w.logical_cursor()
|
|
b = w.buffer
|
|
line = b.lines[cursor.y]
|
|
end = cursor.x
|
|
start = cursor.x
|
|
|
|
word_chars = string.letters + string.digits + '_'
|
|
if start == 0:
|
|
w.application.set_error('walrus 1')
|
|
return
|
|
|
|
c = line[start - 1]
|
|
if c == '(':
|
|
w.application.set_error('goldfinch 1')
|
|
return
|
|
elif c not in word_chars:
|
|
w.application.set_error('walrus 2')
|
|
return
|
|
|
|
while start > 0 and line[start - 1] in word_chars:
|
|
start -= 1
|
|
if start == end:
|
|
w.application.set_error('walrus 3')
|
|
return
|
|
word = line[start:end]
|
|
|
|
candidates = []
|
|
seen = sets.Set()
|
|
for p in w.mode.ctagger.packages.iterkeys():
|
|
if p.startswith(word):
|
|
if p in seen:
|
|
continue
|
|
candidates.append(p)
|
|
seen.add(p)
|
|
for e in w.mode.ctagger.entries.itervalues():
|
|
if e.symbol.startswith(word):
|
|
if e.symbol in seen:
|
|
continue
|
|
candidates.append(e.symbol)
|
|
seen.add(e.symbol)
|
|
if len(candidates) == 0:
|
|
w.application.set_error('No match: %r' % word)
|
|
return
|
|
elif len(candidates) == 1:
|
|
newword = candidates[0]
|
|
if word == newword:
|
|
w.application.set_error('Already completed!')
|
|
return
|
|
else:
|
|
w.application.set_error('Unique match!')
|
|
else:
|
|
newword = completer.find_common_string(candidates)
|
|
w.application.set_error('Ambiguous match: %r' % (candidates))
|
|
b.delete_string(Point(start, cursor.y), Point(end, cursor.y))
|
|
b.insert_string(Point(start, cursor.y), newword)
|
|
|
|
class PythonDictCleanup(method.Method):
|
|
'''Align assignment blocks and literal dictionaries'''
|
|
def _execute(self, w, **vargs):
|
|
cursor = w.logical_cursor()
|
|
b = w.buffer
|
|
|
|
# so this is where we will store the groups that we find
|
|
groups_by_line = {}
|
|
|
|
# the regex we will try
|
|
regexes = [regex.python_dict_cleanup,
|
|
regex.python_assign_cleanup]
|
|
|
|
# if we aren't in a hash, inform the user and exit
|
|
line = b.lines[cursor.y]
|
|
myregex = None
|
|
for r in regexes:
|
|
if r.match(line):
|
|
myregex = r
|
|
|
|
if myregex is None:
|
|
raise Exception, "Not a python dict line"
|
|
|
|
groups_by_line[cursor.y] = myregex.match(line).groups()
|
|
|
|
# find the beginning of this hash block
|
|
start = 0
|
|
i = cursor.y - 1
|
|
while i >= 0:
|
|
line = b.lines[i]
|
|
m = myregex.match(line)
|
|
if not m:
|
|
start = i + 1
|
|
break
|
|
else:
|
|
groups_by_line[i] = m.groups()
|
|
i -= 1
|
|
|
|
# find the end of this hash block
|
|
end = len(b.lines) - 1
|
|
i = cursor.y + 1
|
|
while i < len(b.lines):
|
|
line = b.lines[i]
|
|
m = myregex.match(line)
|
|
if not m:
|
|
end = i - 1
|
|
break
|
|
else:
|
|
groups_by_line[i] = m.groups()
|
|
i += 1
|
|
|
|
# assume that the least indented line is correct
|
|
indent_w = min([len(groups_by_line[k][0]) for k in groups_by_line])
|
|
|
|
# find the longest hash key to base all the other padding on
|
|
key_w = max([len(groups_by_line[k][1]) for k in groups_by_line])
|
|
|
|
# for each line, format it correctly
|
|
keys = groups_by_line.keys()
|
|
keys.sort()
|
|
data = ''
|
|
for i in keys:
|
|
indent_pad = ' ' * indent_w
|
|
key = groups_by_line[i][1]
|
|
sep = groups_by_line[i][3]
|
|
value = groups_by_line[i][5]
|
|
key_pad = ' ' * (key_w - len(key))
|
|
if sep == '=':
|
|
data += indent_pad + key + key_pad + ' ' + sep + ' ' + value + '\n'
|
|
else:
|
|
data += indent_pad + key + sep + ' ' + key_pad + value + '\n'
|
|
|
|
# remove the old text and add the new
|
|
start_p = Point(0, start)
|
|
end_p = Point(0, end + 1)
|
|
w.kill(start_p, end_p)
|
|
w.insert(start_p, data)
|