pmacs3/mode_perl.py

651 lines
28 KiB
Python
Raw Normal View History

2007-03-06 10:05:38 -05:00
import re, sets, string, sys
import color, commands, default, lex2, method, mode2, regex, tab2
2007-06-05 00:49:24 -04:00
from point2 import Point
from lex2 import Grammar, ConstantRule, PatternRule, ContextPatternRule, \
RegionRule, DualRegionRule
class PodGrammar(Grammar):
rules = [
PatternRule(name=r'entry', pattern=r'(?<=^=head[1-4]) +.*$'),
PatternRule(name=r'entry', pattern=r'(?<=^=over) +.*$'),
PatternRule(name=r'entry', pattern=r'(?<=^=item) +.*$'),
PatternRule(name=r'entry', pattern=r'(?:(?<=^=begin)|(?<=^=end)) +.*$'),
PatternRule(name=r'entry', pattern=r'(?<=^=encoding) +.*$'),
]
class StringGrammar(Grammar):
rules = [
PatternRule(name=r'escaped', pattern=r'\\.'),
PatternRule(name=r'deref', pattern=r"\$\$*[A-Za-z0-9_](?:[A-Za-z0-9_]|::)*(?:->{\$?(?:[a-zA-Z_][a-zA-Z_0-9]*|'(?:\\.|[^'\\])*'|\"(\\.|[^\\\"])*\")}|->\[\$?[0-9a-zA-Z_]+\])+"),
PatternRule(name=r'length', pattern=r"\$#[A-Za-z0-9_](?:[A-Za-z0-9_]|::)*"),
ContextPatternRule(name=r'scalar', pattern=r"\$[^A-Za-z0-9 %(delim)s](?![A-Za-z0-9_])", fallback=r"\$[^A-Za-z0-9 ](?![A-Za-z0-9_])"),
PatternRule(name=r'scalar', pattern=r"\$\$*[A-Za-z0-9_](?:[A-Za-z0-9_]|::)*"),
PatternRule(name=r'cast', pattern=r"[\$\@\%\&]{.*?}"),
PatternRule(name=r'array', pattern=r"@\$*[A-Za-z_](?:[A-Za-z0-9_]|::)*"),
#PatternRule(name=r'hash', pattern=r"%\$*[A-Za-z_](?:[A-Za-z0-9_]|::)*"),
2007-06-18 18:22:57 -04:00
]
g = Grammar()
pg = PodGrammar()
sg = StringGrammar()
class PerlGrammar(Grammar):
rules = [
RegionRule(name=r'heredoc1', start=r"<<(?P<heredoc>[a-zA-Z0-9_]+) *;", grammar=sg, end=r'^%(heredoc)s$'),
RegionRule(name=r'heredoc1', start=r'<< *"(?P<heredoc>[a-zA-Z0-9_]+)" *;', grammar=sg, end=r'^%(heredoc)s$'),
RegionRule(name=r'heredoc2', start=r"<< *'(?P<heredoc>[a-zA-Z0-9_]+)' *;", grammar=g, end=r'^%(heredoc)s$'),
RegionRule(name=r'eval_heredoc', start=r"<< *`(?P<heredoc>[a-zA-Z0-9_]+)` *;", grammar=sg, end=r'^%(heredoc)s$'),
RegionRule(name=r'endblock', start=r"^__END__|__DATA__ *$", grammar=g, end=r''),
RegionRule(name=r'pod', start=r'^=[a-zA-Z0-9_]+', grammar=pg, end=r'^=cut'),
PatternRule(name=r'comment', pattern=r'#.*$'),
RegionRule(name=r'string1', start=r'"', grammar=sg, end=r'"'),
RegionRule(name=r'string2', start=r"'", grammar=g, end=r"'"),
RegionRule(name=r'evalstring', start=r"`", grammar=sg, end=r"`"),
PatternRule(name=r'number', pattern=r'0?\.[0-9]+|[0-9]+(?:\.[0-9]+)?'),
PatternRule(name=r'keyword', pattern=r"(?<!->)(?:STDIN|STDERR|STDOUT|and|cmp|continue|do|else|elsif|eq|eval|foreach|for|if|last|my|next|ne|not|or|our|package|require|return|sub|undef|unless|until|use|while)(?![a-zA-Z0-9_])"),
PatternRule(name=r'hash_key', pattern=r'(?<={)[A-Za-z0-9_]+(?=})'),
PatternRule(name=r'hash_key', pattern=r'[A-Za-z0-9_]+(?= *=>)'),
PatternRule(name=r'length', pattern=r"\$#[A-Za-z0-9_](?:[A-Za-z0-9_]|::)*"),
PatternRule(name=r'cast', pattern=r'[\$\@\%\^\&](?= *{)'),
PatternRule(name=r'scalar', pattern=r"\$[][><ab/'\"_@\?#\$!%^|&*()](?![A-Za-z0-9_])"),
PatternRule(name=r'array', pattern=r"@_"),
PatternRule(name=r'function', pattern=r"\$\$*[A-Za-z0-9_](?:[A-Za-z0-9_]|::)*(?=-> *\()"),
PatternRule(name=r'scalar', pattern=r"\$\$*[A-Za-z0-9_](?:[A-Za-z0-9_]|::)*"),
PatternRule(name=r'array', pattern=r"@\$*[A-Za-z_](?:[A-Za-z0-9_]|::)*"),
PatternRule(name=r'hash', pattern=r"%\$*[A-Za-z_](?:[A-Za-z0-9_]|::)*"),
PatternRule(name=r'deref', pattern=r"[@%\$&\*](?={)"),
RegionRule(name=r'quoted', start=r'q[rqwx]? *\(', grammar=g, end=r'\)'),
RegionRule(name=r'quoted', start=r'q[rqwx]? *{', grammar=g, end=r'}'),
RegionRule(name=r'quoted', start=r'q[rqwx]? *<', grammar=g, end=r'>'),
RegionRule(name=r'quoted', start=r'q[rqwx]? *\[', grammar=g, end=r'\]'),
RegionRule(name=r'quoted', start=r'q[rqwx]? *(?P<delim>[^ #])', grammar=g, end=r'%(delim)s'),
RegionRule(name=r'quoted', start=r'q[rqwx]?#', grammar=g, end=r'#'),
# match regexes
RegionRule(name=r'match', start=r'(?:(?<==~)|(?<=!~)|(?<=\()|(?<=split)) *(?P<delim>/)', grammar=sg, end=r'/[a-z]*'),
RegionRule(name=r'match', start=r'm *(?P<delim>[^ #a-zA-Z0-9_])', grammar=sg, end=r'%(delim)s[a-z]*'),
RegionRule(name=r'match', start=r'm(?P<delim>#)', grammar=sg, end=r'#[a-z]*'),
# replace regexes
DualRegionRule(name=r'replace', start=r's *(?P<delim>[^ a-zA-Z0-9_])', grammar1=sg, middle=r'%(delim)s', grammar2=sg, end=r'%(delim)s[a-z]*'),
DualRegionRule(name=r'replace', start=r's(?P<delim>#)', grammar1=sg, middle=r'#', grammar2=sg, end=r'#[a-z]*'),
# translate operator
DualRegionRule(name=r'translate', start=r'(?:y|tr) *(?P<delim>[^ a-zA-Z0-9_])', grammar1=g, middle=r'%(delim)s', grammar2=g, end=r'%(delim)s[a-z]*'),
DualRegionRule(name=r'translate', start=r'(?:y|tr)#', grammar1=g, middle=r'#', grammar2=g, end=r'#[a-z]*'),
# some more basic stuff
PatternRule(name=r'package', pattern=r"(?<=package )(?:[a-zA-Z_][a-zA-Z_0-9]*::)*[a-zA-Z_][a-zA-Z_0-9]*"),
PatternRule(name=r'sub', pattern=r"(?<=sub )[a-zA-Z_][a-zA-Z_0-9]*"),
PatternRule(name=r'use', pattern=r"(?<=use )(?:[a-zA-Z_][a-zA-Z_0-9]*::)*[a-zA-Z_][a-zA-Z_0-9]*"),
PatternRule(name=r'label', pattern=r'[a-zA-Z_][a-zA-Z0-9_]*:(?!:)'),
PatternRule(name=r'method', pattern=r"(?<=->)[a-zA-Z_][a-zA-Z_0-9]*"),
PatternRule(name=r'function', pattern=r"&\$*(?:[a-zA-Z_][a-zA-Z_0-9]*::)*[a-zA-Z_][a-zA-Z_0-9]*"),
PatternRule(name=r'builtin', pattern=r"(?<!->)&?(?:write|warn|wantarray|waitpid|wait|vec|values|utime|use|untie|unshift|unpack|unlink|undef|umask|ucfirst|uc|truncate|times|time|tied|tie|telldir|tell|syswrite|system|sysseek|sysread|sysopen|syscall|symlink|substr|sub|study|stat|srand|sqrt|sprintf|split|splice|sort|socketpair|socket|sleep|sin|shutdown|shmwrite|shmread|shmget|shmctl|shift|setsockopt|setservent|setpwent|setprotoent|setpriority|setpgrp|setnetent|sethostent|setgrent|send|semop|semget|semctl|select|seekdir|seek|scalar|rmdir|rindex|rewinddir|reverse|return|reset|require|rename|ref|redo|recv|readpipe|readlink|readline|readdir|read|rand|quotemeta|push|prototype|printf|print|pos|pop|pipe|package|pack|our|ord|opendir|open|oct|no|next|my|msgsnd|msgrcv|msgget|msgctl|mkdir|map|lstat|log|lock|localtime|local|listen|link|length|lcfirst|lc|last|kill|keys|join|ioctl|int|index|import|hex|grep|goto|gmtime|glob|getsockopt|getsockname|getservent|getservbyport|getservbyname|getpwuid|getpwnam|getpwent|getprotoent|getprotobynumber|getprotobyname|getpriority|getppid|getpgrp|getpeername|getnetent|getnetbyname|getnetbyaddr|getlogin|gethostent|gethostbyname|gethostbyaddr|getgrnam|getgrgid|getgrent|getc|formline|format|fork|flock|fileno|fcntl|exp|exit|exists|exec|eval|eof|endservent|endpwent|endprotoent|endnetent|endhostent|endgrent|each|dump|do|die|delete|defined|dbmopen|dbmclose|crypt|cos|continue|connect|closedir|close|chroot|chr|chown|chop|chomp|chmod|chdir|caller|bless|binmode|bind|atan2|alarm|accept|abs)(?![a-zA-Z0-9_])"),
PatternRule(name=r'function', pattern=r"(?:[a-zA-Z_][a-zA-Z_0-9]*::)*[a-zA-Z_][a-zA-Z_0-9]*(?= *\()"),
PatternRule(name=r'class', pattern=r"(?:[a-zA-Z_][a-zA-Z_0-9]*::)*[a-zA-Z_][a-zA-Z_0-9]*(?=->)"),
# nested regions
2007-06-18 18:22:57 -04:00
#RegionRule(name=r'paren', start=r'\(', grammar=None, end=r'\)'),
#RegionRule(name=r'brace', start=r'{', grammar=None, end=r'}'),
#RegionRule(name=r'bracket', start=r'\[', grammar=None, end=r'\]'),
# some basic stuff
#PatternRule(name=r'delimiter', pattern=r",|;|->|=>|=|\?|(?<!:):(?!=:)"),
PatternRule(name=r'delimiter', pattern=r"[,;=\?(){}\[\]]|->|=>|(?<!:):(?!=:)"),
PatternRule(name=r'operator', pattern=r"\+=|-=|\*=|/=|//=|%=|&=\|\^=|>>=|<<=|\*\*="),
PatternRule(name=r'operator', pattern=r"\+|<=>|<>|<<|<=|<|-|>>|>=|>|\*\*|&|\*|\||/|\^|==|//|~|=~|!~|!=|%|!|\."),
2007-06-18 18:22:57 -04:00
PatternRule(name=r'bareword', pattern=r'(?:[a-zA-Z_][a-zA-Z_0-9]*::)*[a-zA-Z_][a-zA-Z_0-9]*')
]
2007-03-06 10:05:38 -05:00
class PerlTabber(tab2.StackTabber):
def is_base(self, y):
if y == 0:
return True
highlighter = self.mode.window.buffer.highlights[self.mode.name()]
if not highlighter.tokens[y]:
return False
t = highlighter.tokens[y][0]
if t.name == 'keyword' and t.string == 'sub':
return True
return False
def _handle_open_token(self, currlvl, y, i):
currlvl = tab2.StackTabber._handle_open_token(self, currlvl, y, i)
return currlvl
def _handle_close_token(self, currlvl, y, i):
self._opt_pop('cont')
currlvl = tab2.StackTabber._handle_close_token(self, currlvl, y, i)
token = self.get_token(y, i)
if token.string == '}':
self._opt_pop('cont')
elif self.is_rightmost_token(y, i):
self._opt_append('cont', currlvl + 4)
return currlvl
def _handle_other_token(self, currlvl, y, i):
token = self.get_token(y, i)
fqname = token.fqname()
if fqname == 'delimiter' and token.string == ';':
self._opt_pop('cont')
elif fqname == 'heredoc.start':
self._opt_append('heredoc', None)
elif fqname == 'heredoc.end':
self._opt_pop('heredoc')
self._opt_pop('cont')
elif fqname == 'pod.start':
self._opt_append('pod', None)
elif fqname == 'pod.end':
self._opt_pop('pod')
currlvl = 0
elif fqname == 'string1.start' or fqname == 'string2.start':
self._opt_append('string', None)
elif fqname == 'string1.end' or fqname == 'string2.end':
self._opt_pop('string')
if self.is_rightmost_token(y, i):
self._opt_append('cont', currlvl + 4)
if self.is_rightmost_token(y, i):
if(not fqname.startswith('pod') and
not fqname.startswith('heredoc') and
not fqname.startswith('string1') and
not fqname.startswith('string2') and
not fqname.startswith('endblock') and
not fqname == 'comment' and
not fqname == 'null' and
token.string not in ('}', ';', '(', '{', '[', ',')):
self._opt_append('cont', currlvl + 4)
return currlvl
2007-06-05 00:49:24 -04:00
class Perl(mode2.Fundamental):
tabbercls = PerlTabber
2007-06-19 14:45:51 -04:00
grammar = PerlGrammar()
opentoken = 'delimiter'
opentags = {'(': ')', '[': ']', '{': '}'}
closetoken = 'delimiter'
closetags = {')': '(', ']': '[', '}': '{'}
2007-03-06 10:05:38 -05:00
def __init__(self, w):
2007-06-05 00:49:24 -04:00
mode2.Fundamental.__init__(self, w)
2007-03-06 10:05:38 -05:00
self.add_action_and_bindings(PerlCheckSyntax(), ('C-c s',))
self.add_action_and_bindings(PerlHashCleanup(), ('C-c h',))
#self.add_action_and_bindings(PerlHashCleanup2(), ('C-c h',))
self.add_action_and_bindings(PerlViewModulePerldoc(), ('C-c v',))
self.add_action_and_bindings(PerlViewWordPerldoc(), ('C-c p',))
self.add_action_and_bindings(PerlWrapLine(), ('M-q',))
self.add_action_and_bindings(PerlGotoFunction(), ('C-c M-g',))
self.add_action_and_bindings(PerlWhichFunction(), ('C-c w',))
self.add_action_and_bindings(PerlListFunctions(), ('C-c W',))
self.default_color = color.build('default', 'default')
2007-06-05 00:49:24 -04:00
2007-03-06 10:05:38 -05:00
self.colors = {
2007-06-05 00:49:24 -04:00
# basic stuff
'escaped': color.build('magenta', 'default'),
'null': color.build('default', 'default'),
'delimiter': color.build('default', 'default'),
'sub': color.build('cyan', 'default'),
'number': color.build('default', 'default'),
'operator': color.build('default', 'default'),
'endblock': color.build('red', 'default'),
'keyword': color.build('magenta', 'default'),
2007-06-13 11:44:09 -04:00
'cast': color.build('yellow', 'default'),
2007-06-05 00:49:24 -04:00
'scalar': color.build('yellow', 'default'),
'array': color.build('yellow', 'default'),
'deref': color.build('yellow', 'default'),
'hash': color.build('yellow', 'default'),
'hash_key': color.build('green', 'default'),
'comment': color.build('red', 'default'),
'function': color.build('cyan', 'default'),
'builtin': color.build('magenta', 'default'),
'method': color.build('cyan', 'default'),
'bareword': color.build('default', 'default'),
'label': color.build('cyan', 'default'),
'package': color.build('cyan', 'default'),
'class': color.build('cyan', 'default'),
'use': color.build('cyan', 'default'),
'method': color.build('cyan', 'default'),
2007-06-13 11:44:09 -04:00
2007-06-05 00:49:24 -04:00
# heredoc
'heredoc1.start': color.build('green', 'default'),
'heredoc1.null': color.build('green', 'default'),
'heredoc1.end': color.build('green', 'default'),
'heredoc2.start': color.build('green', 'default'),
'heredoc2.null': color.build('green', 'default'),
'heredoc2.end': color.build('green', 'default'),
'eval_heredoc.start': color.build('cyan', 'default'),
'eval_heredoc.null': color.build('cyan', 'default'),
'eval_heredoc.end': color.build('cyan', 'default'),
# pod
'pod.start': color.build('red', 'default'),
'pod.null': color.build('red', 'default'),
'pod.entry': color.build('magenta', 'default'),
'pod.end': color.build('red', 'default'),
# "" strings
'string1.start': color.build('green', 'default'),
'string1.null': color.build('green', 'default'),
'string1.escaped': color.build('magenta', 'default'),
'string1.deref': color.build('yellow', 'default'),
'string1.end': color.build('green', 'default'),
# '' strings
'string2.start': color.build('green', 'default'),
'string2.null': color.build('green', 'default'),
'string2.end': color.build('green', 'default'),
# `` strings
2007-06-18 18:22:57 -04:00
'evalstring.start': color.build('cyan', 'default'),
2007-06-18 16:58:22 -04:00
'evalstring.null': color.build('cyan', 'default'),
'string1.escaped': color.build('magenta', 'default'),
'string1.deref': color.build('yellow', 'default'),
'evalstring.end': color.build('cyan', 'default'),
2007-06-05 00:49:24 -04:00
# quoted region
'quoted': color.build('cyan', 'default'),
'quoted.start': color.build('cyan', 'default'),
'quoted.null': color.build('cyan', 'default'),
'quoted.end': color.build('cyan', 'default'),
# match regex
'match.start': color.build('cyan', 'default'),
'match.end': color.build('cyan', 'default'),
'match.null': color.build('cyan', 'default'),
# replace regex
2007-06-12 18:05:09 -04:00
'replace.start': color.build('cyan', 'default'),
'replace.middle': color.build('cyan', 'default'),
'replace.end': color.build('cyan', 'default'),
'replace.null': color.build('cyan', 'default'),
'replace.escaped': color.build('magenta', 'default'),
'replace.deref': color.build('yellow', 'default'),
'replace.length': color.build('yellow', 'default'),
'replace.scalar': color.build('yellow', 'default'),
'replace.hash': color.build('yellow', 'default'),
'replace.cast': color.build('yellow', 'default'),
2007-06-05 00:49:24 -04:00
# translate regex
'translate.start': color.build('magenta', 'default'),
'translate.middle': color.build('magenta', 'default'),
'translate.end': color.build('magenta', 'default'),
'translate.null': color.build('magenta', 'default'),
2007-03-06 10:05:38 -05:00
}
def name(self):
return "Perl"
def build_function_map(self):
b = self.window.buffer
self.functions = {}
for i in range(0, len(b.lines)):
m = regex.perl_function.match(b.lines[i])
if m:
self.functions[m.group(1)] = i
def get_functions(self):
if self.functions is None:
self.build_function_map()
return self.functions
def get_function_names(self):
functions = self.get_functions()
pairs = [[functions[key], key] for key in functions]
pairs.sort()
names = [x[1] for x in pairs]
return names
class PerlWrapLine(method.Method):
'''Wrap lines, comments, POD'''
margin = 80
comment_re = re.compile('^( *)(#+)( *)([^ ].*)$')
def _execute(self, w, **vargs):
pcursor = w.physical_cursor()
r = w.get_region(pcursor)
if r is None:
return
t = r[4]
if t == 'pod':
assert False, 'POD: %s' % repr(r)
elif t == 'comment':
self._wrap_comment(w)
else:
return
def _wrap_comment(self, w):
l = w.logical_cursor()
m = self.comment_re.match(w.buffer.lines[l.y])
if not m:
assert False, 'no match oh geez'
pad = m.group(1) + m.group(2) + m.group(3)
data = m.group(4) + ' '
start = l.y
end = l.y + 1
while end < len(w.buffer.lines):
m = self.comment_re.match(w.buffer.lines[end])
if m:
data += m.group(4) + ' '
end += 1
else:
break
words = [word for word in data.split() if word]
lines = [pad]
for word in words:
if len(lines[-1]) == len(pad):
lines[-1] += word
elif len(lines[-1]) + 1 + len(word) <= self.margin:
lines[-1] += ' ' + word
else:
lines.append(pad + word)
# remove the old text and add the new
2007-06-05 00:49:24 -04:00
start_p = Point(0, start)
end_p = Point(len(w.buffer.lines[end-1]), end-1)
2007-03-06 10:05:38 -05:00
w.kill(start_p, end_p)
w.insert(start_p, '\n'.join(lines))
class PerlCheckSyntax(method.Method):
'''Check the syntax of a perl file'''
def _args(self):
return [method.Argument("lib", type=type(""), prompt="Location of lib: ",
default=default.build_constant("."))]
def _execute(self, window, **vargs):
a = vargs['lib']
cmd = "perl -c -I '%s' '%s'" % (a, window.buffer.path)
(status, output) = commands.getstatusoutput(cmd)
if status == 0:
window.application.set_error("Syntax OK")
window.application.data_buffer("*Perl-Check-Syntax*", output, switch_to=False)
else:
window.application.data_buffer("*Perl-Check-Syntax*", output)
class PerlViewModulePerldoc(method.Method):
'''View documentation about this file using perldoc'''
def _execute(self, w, **vargs):
cmd = "perldoc -t -T '%s'" % w.buffer.path
(status, output) = commands.getstatusoutput(cmd)
w.application.data_buffer("*Perldoc*", output, switch_to=True)
class PerlViewWordPerldoc(method.Method):
'''View documentation about a package or function using perldoc'''
def _execute(self, w, **vargs):
cursor = w.logical_cursor()
line = w.buffer.lines[cursor.y]
word_chars = string.letters + string.digits + '_:'
if line[cursor.x] not in word_chars:
w.application.set_error('error: no word selected')
return
start = cursor.x
while start > 0 and line[start - 1] in word_chars:
start -= 1
end = cursor.x + 1
while end < len(line) - 1 and line[end] in word_chars:
end += 1
word = line[start:end]
w.application.set_error('the current word is: %r' % word)
ok = False
data = ''
perl_word_re = re.compile('^[a-zA-Z_][a-zA-Z_0-9]*(?:::[a-zA-Z_][a-zA-Z0-9]*)*$')
if not perl_word_re.match(word):
w.application.set_error('invalid word: %r' % word)
return
if '::' in word:
# we are probably dealing with a package
parts = word.split('::')
while len(parts) > 0:
newword = '::'.join(parts)
cmd = "perldoc -t -T '%s'" % newword
(status, data) = commands.getstatusoutput(cmd)
if status == 0:
word = newword
ok = True
break
parts.pop(-1)
elif ':' in word:
w.application.set_error('invalid word2222: %r' % word)
return
else:
cmd = "perldoc -t -T -f '%s'" % word
(status, data) = commands.getstatusoutput(cmd)
if status == 0:
ok = True
else:
cmd = "perldoc -t -T -f '%s'" % word
(status, data) = commands.getstatusoutput(cmd)
ok = status == 0
if not ok:
w.application.set_error('nothing found for %r' % word)
else:
w.application.data_buffer("*Perldoc*", data, switch_to=True)
w.application.set_error('displaying documentation for %r' % word)
class PerlGotoFunction(method.Method):
'''Jump to a function defined in this module'''
def _args(self):
return [method.Argument("name", type=type(""), datatype="perlfunction",
prompt="Goto Function: ")]
def _execute(self, w, **vargs):
name = vargs['name']
functions = w.mode.get_functions()
if name in functions:
number = functions[name]
2007-06-05 00:49:24 -04:00
p = Point(0, number)
2007-03-06 10:05:38 -05:00
w.goto(p)
else:
w.application.set_error("Function %r was not found" % name)
class PerlListFunctions(method.Method):
'''Show the user all functions defined in this module'''
def _execute(self, w, **vargs):
names = w.mode.get_function_names()
output = "\n".join(names) + "\n"
w.application.data_buffer("*Perl-List-Functions*", output, switch_to=True)
class PerlWhichFunction(method.Method):
'''Show the user what function they are in'''
def _execute(self, w, **vargs):
cursor = w.logical_cursor()
i = cursor.y
name = None
while i >= 0 and name is None:
line = w.buffer.lines[i]
m = regex.perl_function.match(line)
if m:
name = m.group(1)
else:
i -= 1
if name is None:
w.application.set_error("None");
else:
w.application.set_error("line %d: %s" % (i, name))
class PerlHashCleanup(method.Method):
'''Correctly align assignment blocks and literal hashes'''
def _execute(self, window, **vargs):
cursor = window.logical_cursor()
b = window.buffer
# so this is where we will store the groups that we find
groups_by_line = {}
# the regex we will try
regexes = [regex.perl_hash_cleanup,
regex.perl_assign_cleanup]
# if we aren't in a hash, inform the user and exit
line = b.lines[cursor.y]
myregex = None
for r in regexes:
if r.match(line):
myregex = r
if myregex is None:
raise Exception, "Not a perl hash line"
groups_by_line[cursor.y] = myregex.match(line).groups()
# find the beginning of this hash block
start = 0
i = cursor.y - 1
while i >= 0:
line = b.lines[i]
m = myregex.match(line)
if not m:
start = i + 1
break
else:
groups_by_line[i] = m.groups()
i -= 1
# find the end of this hash block
end = len(b.lines) - 1
i = cursor.y + 1
while i < len(b.lines):
line = b.lines[i]
m = myregex.match(line)
if not m:
end = i - 1
break
else:
groups_by_line[i] = m.groups()
i += 1
# assume that the least indented line is correct
indent_w = min([len(groups_by_line[k][0]) for k in groups_by_line])
# find the longest hash key to base all the other padding on
key_w = max([len(groups_by_line[k][1]) for k in groups_by_line])
# for each line, format it correctly
keys = groups_by_line.keys()
keys.sort()
data = ''
for i in keys:
indent_pad = ' ' * indent_w
key = groups_by_line[i][1]
sep = groups_by_line[i][3]
value = groups_by_line[i][5]
key_pad = ' ' * (key_w - len(key))
data += indent_pad + key + key_pad + ' ' + sep + ' ' + value + '\n'
# remove the old text and add the new
2007-06-05 00:49:24 -04:00
start_p = Point(0, start)
end_p = Point(0, end + 1)
2007-03-06 10:05:38 -05:00
window.kill(start_p, end_p)
window.insert_string(start_p, data)
2007-03-06 10:05:38 -05:00
class PerlHashCleanup2(method.Method):
'''Correctly align assignment blocks and literal hashes'''
def process_line2(self, line_regions, sep=None, indent=None):
(pre_toks, sep_tok, post_toks) = ([], None, [])
ok = False
before = True
for r in line_regions:
(start, end, attr, s, name) = r
if name == "":
continue
elif before:
if len(pre_toks) == 0:
pre_toks.append(r)
elif (name == "delimiter" and s == sep or
(sep is None and (s == "=" or s == "=>"))):
sep_tok = r
before = False
else:
pre_toks.append(r)
else:
post_toks.append(r)
ok = True
if ok:
return (True, sep_tok[3], (pre_toks, sep_tok, post_toks))
else:
return (False, "", ([], None, []))
def _execute(self, w, **vargs):
cursor = w.logical_cursor()
b = w.buffer
# so this is where we will store the groups that we find
groups_by_line = {}
all_regions = w.mode.get_regions()
line_regions = all_regions[cursor.y]
(ok, sep, group) = self.process_line2(line_regions)
if not ok:
raise Exception, "Not a perl hash line"
groups_by_line[cursor.y] = group
# find the beginning of this hash block
start = cursor.y
while start >= 0:
(ok2, sep2, group2) = self.process_line2(all_regions[start - 1], sep)
if not ok2:
break
start -= 1
groups_by_line[start] = group2
# find the end of this hash block
end = cursor.y
while end < len(b.lines) - 1:
(ok2, sep2, group2) = self.process_line2(all_regions[end + 1], sep)
if not ok2:
break
end += 1
groups_by_line[end] = group2
# find the minimum indented line
indent_w = None
for k in groups_by_line:
x = groups_by_line[k][0][0].start
if indent_w is None or x < indent_w:
indent_w = x
# find the max key length
key_w = None
for k in groups_by_line:
x = groups_by_line[k][0][-1].end - groups_by_line[k][0][0].start
if key_w is None or x > key_w:
key_w = x
# for each line, format it correctly
keys = groups_by_line.keys()
keys.sort()
data = ''
for i in keys:
line = ' ' * indent_w
l = groups_by_line[i][0][0].start
for t in groups_by_line[i][0]:
line += ' ' * max(0, t.start - l)
line += t.value
l = t.end
line += ' ' * max(0, key_w - l + groups_by_line[i][0][0].start)
line += ' ' + groups_by_line[i][1].value + ' '
l = groups_by_line[i][2][0].start
for t in groups_by_line[i][2]:
line += ' ' * max(0, t.start - l)
line += t.value
l = t.end
data += line + '\n'
# remove the old text and add the new
2007-06-05 00:49:24 -04:00
start_p = Point(0, start)
end_p = Point(0, end + 1)
2007-03-06 10:05:38 -05:00
w.kill(start_p, end_p)
w.insert(start_p, data)