2007-06-26 09:47:57 -04:00
|
|
|
import commands, os.path, sets, string
|
2007-07-14 10:21:22 -04:00
|
|
|
import color, completer, default, mode2, method, regex, tab2
|
2007-06-17 22:14:07 -04:00
|
|
|
import ctag_python
|
2007-06-05 00:49:24 -04:00
|
|
|
from point2 import Point
|
2007-07-11 12:20:33 -04:00
|
|
|
from lex2 import Grammar, PatternRule, RegionRule, ConstantRule
|
2007-06-17 22:14:07 -04:00
|
|
|
|
|
|
|
class StringGrammar(Grammar):
|
|
|
|
rules = [
|
2007-07-08 19:16:53 -04:00
|
|
|
PatternRule(r'octal', r'\\[0-7]{3}'),
|
|
|
|
PatternRule(r'escaped', r'\\.'),
|
2007-06-17 22:14:07 -04:00
|
|
|
]
|
|
|
|
|
|
|
|
class PythonGrammar(Grammar):
|
|
|
|
rules = [
|
2007-07-08 19:16:53 -04:00
|
|
|
PatternRule(r'functionname', r'(?<=def )[a-zA-Z_][a-zA-Z0-9_]*'),
|
|
|
|
PatternRule(r'classname', r'(?<=class )[a-zA-Z_][a-zA-Z0-9_]*'),
|
|
|
|
PatternRule(r'reserved', r'(?:True|None|False|Exception|self)(?![a-zA-Z0-9_])'),
|
|
|
|
PatternRule(r'keyword', r'(?:yield|while|try|return|raise|print|pass|or|not|lambda|is|in|import|if|global|from|for|finally|exec|except|else|elif|del|def|continue|class|break|assert|as|and)(?![a-zA-Z0-9_])'),
|
|
|
|
PatternRule(r"builtin", r'(?<!\.)(?:zip|xrange|vars|unicode|unichr|type|tuple|super|sum|str|staticmethod|sorted|slice|setattr|set|round|repr|reduce|raw_input|range|property|pow|ord|open|oct|object|max|min|map|long|locals|list|len|iter|issubclass|isinstance|int|input|id|hex|hash|hasattr|globals|getattr|frozenset|float|filter|file|execfile|eval|enumerate|divmod|dir|dict|delattr|complex|compile|coerce|cmp|classmethod|chr|callable|bool)(?![a-zA-Z0-9_])'),
|
|
|
|
PatternRule(r'methodcall', r'(?<=\. )[a-zA-Z_][a-zA-Z0-9_]*(?= *\()'),
|
|
|
|
PatternRule(r'functioncall', r'[a-zA-Z_][a-zA-Z0-9_]*(?= *\()'),
|
|
|
|
PatternRule(r'system_identifier', r'__[a-zA-Z0-9_]+__'),
|
|
|
|
PatternRule(r'private_identifier', r'__[a-zA-Z0-9_]*'),
|
|
|
|
PatternRule(r'hidden_identifier', r'_[a-zA-Z0-9_]*'),
|
|
|
|
PatternRule(r'identifier', r'[a-zA-Z_][a-zA-Z0-9_]*'),
|
|
|
|
PatternRule(r'delimiter', r'\(|\)|\[|\]|{|}|@|,|:|\.|`|=|;|\+=|-=|\*=|/=|//=|%=|&=|\|=|\^=|>>=|<<=|\*\*='),
|
|
|
|
PatternRule(r"operator", r"\+|<>|<<|<=|<|-|>>|>=|>|\*\*|&|\*|\||/|\^|==|//|~|!=|%"),
|
|
|
|
PatternRule(r"integer", r"(?<![\.0-9a-zA-Z_])(?:0|[1-9][0-9]*|0[0-7]+|0[xX][0-9a-fA-F]+)[lL]?(?![\.0-9a-zA-Z_])"),
|
|
|
|
PatternRule(r"float", r"(?<![\.0-9a-zA-Z_])(?:[0-9]+\.[0-9]*|\.[0-9]+|(?:[0-9]|[0-9]+\.[0-9]*|\.[0-9]+)[eE][\+-]?[0-9]+)(?![\.0-9a-zA-Z_])"),
|
|
|
|
PatternRule(r"imaginary", r"(?<![\.0-9a-zA-Z_])(?:[0-9]+|(?:[0-9]+\.[0-9]*|\.[0-9]+|(?:[0-9]|[0-9]+\.[0-9]*|\.[0-9]+)[eE][\+-]?[0-9]+)[jJ])(?![\.0-9a-zA-Z_])"),
|
2007-07-10 19:01:44 -04:00
|
|
|
RegionRule(r'string', r'"""', StringGrammar, r'"""'),
|
|
|
|
RegionRule(r'string', r"'''", StringGrammar, r"'''"),
|
|
|
|
RegionRule(r'string', r'"', StringGrammar, r'"'),
|
|
|
|
RegionRule(r'string', r"'", StringGrammar, r"'"),
|
2007-07-08 19:16:53 -04:00
|
|
|
PatternRule(r'comment', r'#.*$'),
|
2007-07-11 12:20:33 -04:00
|
|
|
PatternRule(r'continuation', r'\\\n$'),
|
|
|
|
PatternRule(r'eol', r'\n$'),
|
2007-06-17 22:14:07 -04:00
|
|
|
]
|
2007-06-05 00:49:24 -04:00
|
|
|
|
2007-06-26 09:47:57 -04:00
|
|
|
class PythonTabber(tab2.StackTabber):
|
|
|
|
endlevel_names = ('pass', 'return', 'yield', 'raise', 'break', 'continue')
|
|
|
|
startlevel_names = ('if', 'try', 'class', 'def', 'for', 'while', 'try')
|
|
|
|
def __init__(self, m):
|
|
|
|
tab2.StackTabber.__init__(self, m)
|
|
|
|
self.base_level = 0
|
|
|
|
|
2007-06-23 15:07:44 -04:00
|
|
|
def is_base(self, y):
|
|
|
|
if y == 0:
|
2007-06-26 23:01:49 -04:00
|
|
|
# we always know that line 0 is indented at the 0 level
|
2007-06-23 15:07:44 -04:00
|
|
|
return True
|
2007-06-26 09:47:57 -04:00
|
|
|
tokens = self.get_tokens(y)
|
2007-07-11 15:36:52 -04:00
|
|
|
#if not tokens:
|
|
|
|
# # if a line has no tokens, we don't know much about its indentation
|
|
|
|
# return False
|
|
|
|
#elif tokens[0].name in self.startlevel_names:
|
|
|
|
if tokens[0].name in self.startlevel_names:
|
2007-06-26 23:01:49 -04:00
|
|
|
# if a line has no whitespace and beings with something like
|
|
|
|
# 'while','class','def','if',etc. then we can start at it
|
2007-06-26 09:47:57 -04:00
|
|
|
return True
|
2007-06-22 12:38:35 -04:00
|
|
|
else:
|
2007-06-26 23:01:49 -04:00
|
|
|
# otherwise, we can't be sure that its level is correct
|
2007-06-26 09:47:57 -04:00
|
|
|
return False
|
2007-06-22 12:38:35 -04:00
|
|
|
|
2007-06-26 09:47:57 -04:00
|
|
|
def get_level(self, y):
|
|
|
|
self._calc_level(y)
|
|
|
|
return self.lines.get(y)
|
|
|
|
|
|
|
|
def _calc_level(self, y):
|
2007-06-26 23:01:49 -04:00
|
|
|
# ok, so first remember where we are going, and find our starting point
|
2007-06-26 09:47:57 -04:00
|
|
|
target = y
|
|
|
|
while not self.is_base(y) and y > 0:
|
|
|
|
y -= 1
|
|
|
|
|
2007-06-26 23:01:49 -04:00
|
|
|
# ok, so clear out our stack and then loop over each line
|
2007-06-27 18:53:02 -04:00
|
|
|
self.popped = False
|
2007-06-26 09:47:57 -04:00
|
|
|
self.markers = []
|
|
|
|
while y <= target:
|
2007-06-27 18:53:02 -04:00
|
|
|
self.continued = False
|
|
|
|
self.last_popped = self.popped
|
|
|
|
self.popped = False
|
|
|
|
tokens = self.get_tokens(y)
|
|
|
|
currlvl = self.get_curr_level()
|
2007-06-26 23:01:49 -04:00
|
|
|
# if we were continuing, let's pop that previous continuation token
|
|
|
|
# and note that we're continuing
|
|
|
|
if self.markers and self.markers[-1].name == 'cont':
|
|
|
|
self.continued = True
|
|
|
|
self._pop()
|
|
|
|
# if we haven't reached the target-line yet, we can detect how many
|
|
|
|
# levels of unindention, if any, the user chose on previous lines
|
2007-07-11 15:36:52 -04:00
|
|
|
if y < target and len(tokens) > 2:
|
|
|
|
if self.token_is_space(y, 0):
|
2007-06-26 09:47:57 -04:00
|
|
|
l = len(tokens[0].string)
|
2007-06-22 12:38:35 -04:00
|
|
|
else:
|
2007-06-26 09:47:57 -04:00
|
|
|
l = 0
|
|
|
|
while currlvl > l:
|
|
|
|
self._pop()
|
|
|
|
currlvl = self.get_curr_level()
|
|
|
|
self.popped = True
|
2007-06-26 23:01:49 -04:00
|
|
|
# ok, having done all that, we can now process each token on the line
|
2007-06-26 09:47:57 -04:00
|
|
|
for i in range(0, len(tokens)):
|
|
|
|
currlvl = self._handle_token(currlvl, y, i)
|
2007-06-26 23:01:49 -04:00
|
|
|
# so let's store the level for this line, as well as some debugging
|
2007-06-26 09:47:57 -04:00
|
|
|
self.lines[y] = currlvl
|
|
|
|
self.record[y] = tuple(self.markers)
|
|
|
|
y += 1
|
|
|
|
|
2007-06-27 18:53:02 -04:00
|
|
|
def _handle_close_token(self, currlvl, y, i):
|
|
|
|
try:
|
|
|
|
return tab2.StackTabber._handle_close_token(self, currlvl, y, i)
|
|
|
|
except:
|
|
|
|
return currlvl
|
|
|
|
|
2007-06-26 09:47:57 -04:00
|
|
|
def _handle_other_token(self, currlvl, y, i):
|
|
|
|
token = self.get_token(y, i)
|
|
|
|
fqname = token.fqname()
|
2007-06-26 23:01:49 -04:00
|
|
|
if fqname == 'continuation':
|
|
|
|
# we need to pop the indentation level over, unless last line was
|
|
|
|
# also a continued line
|
|
|
|
if self.continued:
|
|
|
|
self._opt_append('cont', currlvl)
|
|
|
|
else:
|
|
|
|
self._opt_append('cont', currlvl + 4)
|
|
|
|
elif fqname == 'string.start':
|
|
|
|
# while inside of a string, there is no indention leve
|
2007-06-26 09:47:57 -04:00
|
|
|
self._opt_append('string', None)
|
|
|
|
elif fqname == 'string.end':
|
2007-06-26 23:01:49 -04:00
|
|
|
# since we're done with the string, resume our indentation level
|
2007-06-26 09:47:57 -04:00
|
|
|
self._opt_pop('string')
|
|
|
|
elif fqname == 'delimiter':
|
2007-07-11 12:20:33 -04:00
|
|
|
# we only really care about a colon as part of a one-line statement,
|
2007-06-26 23:01:49 -04:00
|
|
|
# i.e. "while ok: foo()" or "if True: print 3"
|
|
|
|
if token.string == ':':
|
|
|
|
if self.markers and self.markers[-1].name in ('[', '{'):
|
|
|
|
pass
|
|
|
|
elif self.is_rightmost_token(y, i):
|
|
|
|
pass
|
|
|
|
else:
|
|
|
|
self._pop()
|
2007-06-26 09:47:57 -04:00
|
|
|
elif fqname == 'keyword':
|
|
|
|
if token.string in self.endlevel_names:
|
2007-06-26 23:01:49 -04:00
|
|
|
# we know we'll unindent at least once
|
2007-06-26 09:47:57 -04:00
|
|
|
self._pop()
|
2007-06-27 18:53:02 -04:00
|
|
|
self.popped = True
|
2007-06-26 09:47:57 -04:00
|
|
|
elif token.string in self.startlevel_names and self.is_leftmost_token(y, i):
|
2007-06-26 23:01:49 -04:00
|
|
|
# we know we will indent exactly once
|
2007-06-26 09:47:57 -04:00
|
|
|
self._append(token.string, currlvl + 4)
|
|
|
|
elif token.string in ('elif', 'else') and self.is_leftmost_token(y, i):
|
2007-06-26 23:01:49 -04:00
|
|
|
# we know we'll unindent at least to the first if/elif
|
2007-06-27 18:53:02 -04:00
|
|
|
if not self.popped and not self.last_popped:
|
2007-06-26 09:47:57 -04:00
|
|
|
self._pop_until('if', 'elif')
|
|
|
|
currlvl = self.get_curr_level()
|
|
|
|
self._append(token.string, currlvl + 4)
|
|
|
|
elif token.string == 'except' and self.is_leftmost_token(y, i):
|
2007-06-26 23:01:49 -04:00
|
|
|
# we know we'll unindent at least to the first try
|
2007-06-27 18:53:02 -04:00
|
|
|
if not self.popped and not self.last_popped:
|
2007-06-26 09:47:57 -04:00
|
|
|
self._pop_until('try')
|
|
|
|
currlvl = self.get_curr_level()
|
|
|
|
self._append(token.string, currlvl + 4)
|
|
|
|
elif token.string == 'finally' and self.is_leftmost_token(y, i):
|
2007-06-26 23:01:49 -04:00
|
|
|
# we know we'll unindent at least to the first try/except
|
2007-06-27 18:53:02 -04:00
|
|
|
if not self.popped and not self.last_popped:
|
2007-06-26 09:47:57 -04:00
|
|
|
self._pop_until('try', 'except')
|
|
|
|
currlvl = self.get_curr_level()
|
|
|
|
self._append(token.string, currlvl + 4)
|
|
|
|
return currlvl
|
2007-06-21 00:54:33 -04:00
|
|
|
|
2007-06-05 00:49:24 -04:00
|
|
|
class Python(mode2.Fundamental):
|
2007-07-12 19:06:33 -04:00
|
|
|
tabbercls = PythonTabber
|
|
|
|
grammar = PythonGrammar
|
|
|
|
opentokens = ('delimiter',)
|
|
|
|
opentags = {'(': ')', '[': ']', '{': '}'}
|
|
|
|
closetokens = ('delimiter',)
|
|
|
|
closetags = {')': '(', ']': '[', '}': '{'}
|
2007-03-06 10:05:38 -05:00
|
|
|
def __init__(self, w):
|
2007-06-05 00:49:24 -04:00
|
|
|
mode2.Fundamental.__init__(self, w)
|
2007-06-26 23:01:49 -04:00
|
|
|
# tag matching
|
|
|
|
self.add_bindings('close-paren', (')',))
|
|
|
|
self.add_bindings('close-brace', ('}',))
|
|
|
|
self.add_bindings('close-bracket', (']',))
|
2007-06-19 11:36:39 -04:00
|
|
|
# add python-specific methods
|
2007-03-06 10:05:38 -05:00
|
|
|
self.add_action_and_bindings(PythonCheckSyntax(), ('C-c s',))
|
|
|
|
self.add_action_and_bindings(PythonDictCleanup(), ('C-c h',))
|
2007-07-08 19:16:53 -04:00
|
|
|
#self.add_action_and_bindings(PythonUpdateTags(), ('C-c t',))
|
|
|
|
#self.add_action_and_bindings(PythonTagComplete(), ('C-c k',))
|
2007-06-26 23:01:49 -04:00
|
|
|
# highlighting
|
2007-03-06 10:05:38 -05:00
|
|
|
self.colors = {
|
2007-06-26 23:01:49 -04:00
|
|
|
'keyword': color.build('cyan', 'default'),
|
2007-07-02 20:05:58 -04:00
|
|
|
'reserved': color.build('magenta', 'default'),
|
2007-06-26 23:01:49 -04:00
|
|
|
'builtin': color.build('cyan', 'default'),
|
|
|
|
'functionname': color.build('blue', 'default'),
|
|
|
|
'classname': color.build('green', 'default'),
|
|
|
|
'string.start': color.build('green', 'default'),
|
|
|
|
'string.null': color.build('green', 'default'),
|
|
|
|
'string.octal': color.build('magenta', 'default'),
|
|
|
|
'string.escaped': color.build('magenta', 'default'),
|
|
|
|
'string.format': color.build('yellow', 'default'),
|
|
|
|
'string.end': color.build('green', 'default'),
|
|
|
|
'integer': color.build('default', 'default'),
|
|
|
|
'float': color.build('default', 'default'),
|
|
|
|
'imaginary': color.build('default', 'default'),
|
2007-06-05 00:49:24 -04:00
|
|
|
'comment': color.build('red', 'default'),
|
|
|
|
'continuation': color.build('red', 'default'),
|
|
|
|
'system_identifier': color.build('cyan', 'default'),
|
2007-03-06 10:05:38 -05:00
|
|
|
}
|
2007-06-27 18:53:02 -04:00
|
|
|
self.pythonlib = "."
|
2007-03-06 10:05:38 -05:00
|
|
|
def name(self):
|
|
|
|
return "Python"
|
|
|
|
|
2007-06-27 18:53:02 -04:00
|
|
|
class PythonSetLib(method.Method):
|
|
|
|
'''Set the path(s) to find perl modules'''
|
2007-07-06 18:27:52 -04:00
|
|
|
args = [method.Argument("lib", type=type(""), prompt="Python Path: ",
|
|
|
|
default=default.build_constant("."))]
|
2007-03-06 10:05:38 -05:00
|
|
|
def _execute(self, w, **vargs):
|
2007-06-27 18:53:02 -04:00
|
|
|
w.mode.pythonlib = vargs['lib']
|
|
|
|
|
|
|
|
class PythonCheckSyntax(method.Method):
|
|
|
|
'''Check the syntax of the current python file'''
|
|
|
|
def _execute(self, w, **vargs):
|
2007-03-06 10:05:38 -05:00
|
|
|
mod = os.path.splitext(os.path.basename(w.buffer.path))[0]
|
2007-06-27 18:53:02 -04:00
|
|
|
cmd = "PYTHONPATH=%s python -c 'import %s'" % (w.mode.pythonlib, mod)
|
2007-03-06 10:05:38 -05:00
|
|
|
(status, output) = commands.getstatusoutput(cmd)
|
|
|
|
if status == 0:
|
|
|
|
w.application.set_error("Syntax OK")
|
|
|
|
w.application.data_buffer("python-syntax", output, switch_to=False)
|
|
|
|
else:
|
|
|
|
output = output + "\ncommand exit status: %d" % (status)
|
|
|
|
w.application.data_buffer("python-syntax", output, switch_to=True)
|
|
|
|
|
2007-07-08 19:16:53 -04:00
|
|
|
#class PythonUpdateTags(method.Method):
|
|
|
|
# '''Update the CTag data associated with a python buffer'''
|
|
|
|
# args = [method.Argument("lib", prompt="Module Base: ", datatype='path',
|
|
|
|
# default=default.build_constant("."))]
|
|
|
|
# def _execute(self, w, **vargs):
|
|
|
|
# w.mode.ctagger = ctag_python.PythonCTagger()
|
|
|
|
# w.mode.ctagger.process_paths([vargs['lib']])
|
|
|
|
# w.application.set_error('Tag data updated')
|
|
|
|
#
|
|
|
|
#class PythonTagComplete(method.Method):
|
|
|
|
# '''Complete a symbol using tag data'''
|
|
|
|
# def _execute(self, w, **vargs):
|
|
|
|
# if not w.mode.ctagger.packages:
|
|
|
|
# w.application.methods['python-update-tags'].execute(w)
|
|
|
|
# return
|
|
|
|
#
|
|
|
|
# cursor = w.logical_cursor()
|
|
|
|
# b = w.buffer
|
|
|
|
# line = b.lines[cursor.y]
|
|
|
|
# end = cursor.x
|
|
|
|
# start = cursor.x
|
|
|
|
#
|
|
|
|
# word_chars = string.letters + string.digits + '_'
|
|
|
|
# if start == 0:
|
|
|
|
# w.application.set_error('walrus 1')
|
|
|
|
# return
|
|
|
|
#
|
|
|
|
# c = line[start - 1]
|
|
|
|
# if c == '(':
|
|
|
|
# w.application.set_error('goldfinch 1')
|
|
|
|
# return
|
|
|
|
# elif c not in word_chars:
|
|
|
|
# w.application.set_error('walrus 2')
|
|
|
|
# return
|
|
|
|
#
|
|
|
|
# while start > 0 and line[start - 1] in word_chars:
|
|
|
|
# start -= 1
|
|
|
|
# if start == end:
|
|
|
|
# w.application.set_error('walrus 3')
|
|
|
|
# return
|
|
|
|
# word = line[start:end]
|
|
|
|
#
|
|
|
|
# candidates = []
|
|
|
|
# seen = sets.Set()
|
|
|
|
# for p in w.mode.ctagger.packages.iterkeys():
|
|
|
|
# if p.startswith(word):
|
|
|
|
# if p in seen:
|
|
|
|
# continue
|
|
|
|
# candidates.append(p)
|
|
|
|
# seen.add(p)
|
|
|
|
# for e in w.mode.ctagger.entries.itervalues():
|
|
|
|
# if e.symbol.startswith(word):
|
|
|
|
# if e.symbol in seen:
|
|
|
|
# continue
|
|
|
|
# candidates.append(e.symbol)
|
|
|
|
# seen.add(e.symbol)
|
|
|
|
# if len(candidates) == 0:
|
|
|
|
# w.application.set_error('No match: %r' % word)
|
|
|
|
# return
|
|
|
|
# elif len(candidates) == 1:
|
|
|
|
# newword = candidates[0]
|
|
|
|
# if word == newword:
|
|
|
|
# w.application.set_error('Already completed!')
|
|
|
|
# return
|
|
|
|
# else:
|
|
|
|
# w.application.set_error('Unique match!')
|
|
|
|
# else:
|
|
|
|
# newword = completer.find_common_string(candidates)
|
|
|
|
# w.application.set_error('Ambiguous match: %r' % (candidates))
|
|
|
|
# b.delete_string(Point(start, cursor.y), Point(end, cursor.y))
|
|
|
|
# b.insert_string(Point(start, cursor.y), newword)
|
2007-03-06 10:05:38 -05:00
|
|
|
|
|
|
|
class PythonDictCleanup(method.Method):
|
|
|
|
'''Align assignment blocks and literal dictionaries'''
|
|
|
|
def _execute(self, w, **vargs):
|
|
|
|
cursor = w.logical_cursor()
|
|
|
|
b = w.buffer
|
|
|
|
|
|
|
|
# so this is where we will store the groups that we find
|
|
|
|
groups_by_line = {}
|
|
|
|
|
|
|
|
# the regex we will try
|
|
|
|
regexes = [regex.python_dict_cleanup,
|
|
|
|
regex.python_assign_cleanup]
|
|
|
|
|
|
|
|
# if we aren't in a hash, inform the user and exit
|
|
|
|
line = b.lines[cursor.y]
|
|
|
|
myregex = None
|
|
|
|
for r in regexes:
|
|
|
|
if r.match(line):
|
|
|
|
myregex = r
|
|
|
|
|
|
|
|
if myregex is None:
|
|
|
|
raise Exception, "Not a python dict line"
|
|
|
|
|
|
|
|
groups_by_line[cursor.y] = myregex.match(line).groups()
|
|
|
|
|
|
|
|
# find the beginning of this hash block
|
|
|
|
start = 0
|
|
|
|
i = cursor.y - 1
|
|
|
|
while i >= 0:
|
|
|
|
line = b.lines[i]
|
|
|
|
m = myregex.match(line)
|
|
|
|
if not m:
|
|
|
|
start = i + 1
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
groups_by_line[i] = m.groups()
|
|
|
|
i -= 1
|
|
|
|
|
|
|
|
# find the end of this hash block
|
|
|
|
end = len(b.lines) - 1
|
|
|
|
i = cursor.y + 1
|
|
|
|
while i < len(b.lines):
|
|
|
|
line = b.lines[i]
|
|
|
|
m = myregex.match(line)
|
|
|
|
if not m:
|
|
|
|
end = i - 1
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
groups_by_line[i] = m.groups()
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
# assume that the least indented line is correct
|
|
|
|
indent_w = min([len(groups_by_line[k][0]) for k in groups_by_line])
|
|
|
|
|
|
|
|
# find the longest hash key to base all the other padding on
|
|
|
|
key_w = max([len(groups_by_line[k][1]) for k in groups_by_line])
|
|
|
|
|
|
|
|
# for each line, format it correctly
|
|
|
|
keys = groups_by_line.keys()
|
|
|
|
keys.sort()
|
|
|
|
data = ''
|
|
|
|
for i in keys:
|
|
|
|
indent_pad = ' ' * indent_w
|
|
|
|
key = groups_by_line[i][1]
|
|
|
|
sep = groups_by_line[i][3]
|
|
|
|
value = groups_by_line[i][5]
|
|
|
|
key_pad = ' ' * (key_w - len(key))
|
|
|
|
if sep == '=':
|
|
|
|
data += indent_pad + key + key_pad + ' ' + sep + ' ' + value + '\n'
|
|
|
|
else:
|
|
|
|
data += indent_pad + key + sep + ' ' + key_pad + value + '\n'
|
|
|
|
|
|
|
|
# remove the old text and add the new
|
2007-06-05 00:49:24 -04:00
|
|
|
start_p = Point(0, start)
|
|
|
|
end_p = Point(0, end + 1)
|
2007-03-06 10:05:38 -05:00
|
|
|
w.kill(start_p, end_p)
|
2007-06-27 18:53:02 -04:00
|
|
|
w.insert_string(start_p, data)
|