221 lines
8.2 KiB
Python
221 lines
8.2 KiB
Python
|
import commands, os.path, sets, string, sys
|
||
|
|
||
|
import color, default, mode, lex, lex_python, method, point, regex, tab_python
|
||
|
import ctag_python, completer
|
||
|
|
||
|
class Python(mode.Fundamental):
|
||
|
def __init__(self, w):
|
||
|
mode.Fundamental.__init__(self, w)
|
||
|
|
||
|
self.tag_matching = True
|
||
|
self.grammar = lex_python.PythonGrammar()
|
||
|
self.lexer = lex.Lexer(self.grammar)
|
||
|
|
||
|
self.add_action_and_bindings(PythonCheckSyntax(), ('C-c s',))
|
||
|
self.add_action_and_bindings(PythonDictCleanup(), ('C-c h',))
|
||
|
self.add_action_and_bindings(PythonUpdateTags(), ('C-c t',))
|
||
|
self.add_action_and_bindings(PythonTagComplete(), ('C-c k',))
|
||
|
|
||
|
self.add_bindings('close-paren', (')',))
|
||
|
self.add_bindings('close-brace', ('}',))
|
||
|
self.add_bindings('close-bracket', (']',))
|
||
|
|
||
|
self.default_color = color.build('default', 'default')
|
||
|
self.colors = {
|
||
|
'keyword' : color.build('cyan', 'default', 'bold'),
|
||
|
'pseudo-keyword' : color.build('cyan', 'default', 'bold'),
|
||
|
'built-in method' : color.build('cyan', 'default', 'bold'),
|
||
|
'method declaration' : color.build('blue', 'default', 'bold'),
|
||
|
'class declaration' : color.build('green', 'default'),
|
||
|
'string4' : color.build('green', 'default'),
|
||
|
'string3' : color.build('green', 'default'),
|
||
|
'string2' : color.build('green', 'default'),
|
||
|
'string1' : color.build('green', 'default'),
|
||
|
'comment' : color.build('red', 'default'),
|
||
|
'continuation' : color.build('red', 'default'),
|
||
|
#'operator' : color.build('yellow', 'default'),
|
||
|
#'delimiter' : color.build('magenta', 'default'),
|
||
|
'system_identifier' : color.build('cyan', 'default', 'bold'),
|
||
|
#'bound method' : color.build('yellow', 'default'),
|
||
|
'import statement' : color.build('magenta', 'green'),
|
||
|
'bizzaro' : color.build('magenta', 'green'),
|
||
|
}
|
||
|
|
||
|
#self.highlighter.lex_buffer()
|
||
|
#self.get_regions()
|
||
|
self.tabber = tab_python.PythonTabber(self)
|
||
|
self.ctagger = ctag_python.PythonCTagger()
|
||
|
|
||
|
def name(self):
|
||
|
return "Python"
|
||
|
|
||
|
class PythonCheckSyntax(method.Method):
|
||
|
'''Check the syntax of the current python file'''
|
||
|
def _args(self):
|
||
|
return [method.Argument("lib", type=type(""), prompt="Python Path: ",
|
||
|
datatype='path',
|
||
|
default=default.build_constant("."))]
|
||
|
def _execute(self, w, **vargs):
|
||
|
a = vargs['lib']
|
||
|
mod = os.path.splitext(os.path.basename(w.buffer.path))[0]
|
||
|
cmd = "PYTHONPATH=%s python -c 'import %s'" % (a, mod)
|
||
|
(status, output) = commands.getstatusoutput(cmd)
|
||
|
if status == 0:
|
||
|
w.application.set_error("Syntax OK")
|
||
|
w.application.data_buffer("python-syntax", output, switch_to=False)
|
||
|
else:
|
||
|
output = output + "\ncommand exit status: %d" % (status)
|
||
|
w.application.data_buffer("python-syntax", output, switch_to=True)
|
||
|
|
||
|
class PythonUpdateTags(method.Method):
|
||
|
'''Update the CTag data associated with a python buffer'''
|
||
|
def _args(self):
|
||
|
return [method.Argument("lib", prompt="Module Base: ", datatype='path',
|
||
|
default=default.build_constant("."))]
|
||
|
def _execute(self, w, **vargs):
|
||
|
w.mode.ctagger = ctag_python.PythonCTagger()
|
||
|
w.mode.ctagger.process_paths([vargs['lib']])
|
||
|
w.application.set_error('Tag data updated')
|
||
|
|
||
|
class PythonTagComplete(method.Method):
|
||
|
'''Complete a symbol using tag data'''
|
||
|
def _execute(self, w, **vargs):
|
||
|
if not w.mode.ctagger.packages:
|
||
|
w.application.methods['python-update-tags'].execute(w)
|
||
|
return
|
||
|
|
||
|
cursor = w.logical_cursor()
|
||
|
b = w.buffer
|
||
|
line = b.lines[cursor.y]
|
||
|
end = cursor.x
|
||
|
start = cursor.x
|
||
|
|
||
|
word_chars = string.letters + string.digits + '_'
|
||
|
#word_chars = string.letters + string.digits + string.punctuation
|
||
|
if start == 0:
|
||
|
w.application.set_error('walrus 1')
|
||
|
return
|
||
|
|
||
|
c = line[start - 1]
|
||
|
if c == '(':
|
||
|
w.application.set_error('goldfinch 1')
|
||
|
return
|
||
|
elif c not in word_chars:
|
||
|
w.application.set_error('walrus 2')
|
||
|
return
|
||
|
|
||
|
while start > 0 and line[start - 1] in word_chars:
|
||
|
start -= 1
|
||
|
if start == end:
|
||
|
w.application.set_error('walrus 3')
|
||
|
return
|
||
|
word = line[start:end]
|
||
|
|
||
|
candidates = []
|
||
|
seen = sets.Set()
|
||
|
for p in w.mode.ctagger.packages.iterkeys():
|
||
|
if p.startswith(word):
|
||
|
if p in seen:
|
||
|
continue
|
||
|
candidates.append(p)
|
||
|
seen.add(p)
|
||
|
for e in w.mode.ctagger.entries.itervalues():
|
||
|
if e.symbol.startswith(word):
|
||
|
if e.symbol in seen:
|
||
|
continue
|
||
|
candidates.append(e.symbol)
|
||
|
seen.add(e.symbol)
|
||
|
if len(candidates) == 0:
|
||
|
w.application.set_error('No match: %r' % word)
|
||
|
return
|
||
|
elif len(candidates) == 1:
|
||
|
newword = candidates[0]
|
||
|
if word == newword:
|
||
|
w.application.set_error('Already completed!')
|
||
|
return
|
||
|
else:
|
||
|
w.application.set_error('Unique match!')
|
||
|
else:
|
||
|
newword = completer.find_common_string(candidates)
|
||
|
w.application.set_error('Ambiguous match: %r' % (candidates))
|
||
|
b.delete_string(point.Point(start, cursor.y), point.Point(end, cursor.y))
|
||
|
b.insert_string(point.Point(start, cursor.y), newword)
|
||
|
|
||
|
class PythonDictCleanup(method.Method):
|
||
|
'''Align assignment blocks and literal dictionaries'''
|
||
|
def _execute(self, w, **vargs):
|
||
|
cursor = w.logical_cursor()
|
||
|
b = w.buffer
|
||
|
|
||
|
# so this is where we will store the groups that we find
|
||
|
groups_by_line = {}
|
||
|
|
||
|
# the regex we will try
|
||
|
regexes = [regex.python_dict_cleanup,
|
||
|
regex.python_assign_cleanup]
|
||
|
|
||
|
# if we aren't in a hash, inform the user and exit
|
||
|
line = b.lines[cursor.y]
|
||
|
myregex = None
|
||
|
for r in regexes:
|
||
|
if r.match(line):
|
||
|
myregex = r
|
||
|
|
||
|
if myregex is None:
|
||
|
raise Exception, "Not a python dict line"
|
||
|
|
||
|
groups_by_line[cursor.y] = myregex.match(line).groups()
|
||
|
|
||
|
# find the beginning of this hash block
|
||
|
start = 0
|
||
|
i = cursor.y - 1
|
||
|
while i >= 0:
|
||
|
line = b.lines[i]
|
||
|
m = myregex.match(line)
|
||
|
if not m:
|
||
|
start = i + 1
|
||
|
break
|
||
|
else:
|
||
|
groups_by_line[i] = m.groups()
|
||
|
i -= 1
|
||
|
|
||
|
# find the end of this hash block
|
||
|
end = len(b.lines) - 1
|
||
|
i = cursor.y + 1
|
||
|
while i < len(b.lines):
|
||
|
line = b.lines[i]
|
||
|
m = myregex.match(line)
|
||
|
if not m:
|
||
|
end = i - 1
|
||
|
break
|
||
|
else:
|
||
|
groups_by_line[i] = m.groups()
|
||
|
i += 1
|
||
|
|
||
|
# assume that the least indented line is correct
|
||
|
indent_w = min([len(groups_by_line[k][0]) for k in groups_by_line])
|
||
|
|
||
|
# find the longest hash key to base all the other padding on
|
||
|
key_w = max([len(groups_by_line[k][1]) for k in groups_by_line])
|
||
|
|
||
|
# for each line, format it correctly
|
||
|
keys = groups_by_line.keys()
|
||
|
keys.sort()
|
||
|
data = ''
|
||
|
for i in keys:
|
||
|
indent_pad = ' ' * indent_w
|
||
|
key = groups_by_line[i][1]
|
||
|
sep = groups_by_line[i][3]
|
||
|
value = groups_by_line[i][5]
|
||
|
key_pad = ' ' * (key_w - len(key))
|
||
|
if sep == '=':
|
||
|
data += indent_pad + key + key_pad + ' ' + sep + ' ' + value + '\n'
|
||
|
else:
|
||
|
data += indent_pad + key + sep + ' ' + key_pad + value + '\n'
|
||
|
|
||
|
# remove the old text and add the new
|
||
|
start_p = point.Point(0, start)
|
||
|
end_p = point.Point(0, end + 1)
|
||
|
w.kill(start_p, end_p)
|
||
|
w.insert(start_p, data)
|