pmacs3/mode/perl.py

713 lines
29 KiB
Python
Raw Normal View History

2007-07-21 11:40:53 -04:00
import re, sets, string, sys
import color, commands, default, method, mode2, regex, tab2
from point2 import Point
from lex3 import Grammar, PatternRule, ContextPatternRule, RegionRule, OverridePatternRule, PatternGroupRule
2007-08-23 10:51:05 -04:00
from method import Argument, Method, WrapParagraph
2007-07-21 11:40:53 -04:00
class PodGrammar(Grammar):
rules = [
RegionRule(r'entry', r'(?<=^=head[1-4]) +.*$', Grammar, '^\n$'),
RegionRule(r'entry', r'(?<=^=over) +.*$', Grammar, '^\n$'),
RegionRule(r'entry', r'(?<=^=item) +.*$', Grammar, '^\n$'),
RegionRule(r'entry', r'(?:(?<=^=begin)|(?<=^=end)) +.*$', Grammar, '^\n$'),
RegionRule(r'entry', r'(?<=^=encoding) +.*$', Grammar, '^\n$'),
]
def _make_string_rules(forbidden=None):
if forbidden:
rule = PatternRule(r'scalar', r"\$[^A-Za-z0-9 \\%s](?![A-Za-z0-9_])" % forbidden)
else:
rule = ContextPatternRule(r'scalar', r"\$[^A-Za-z0-9 %(delim)s](?![A-Za-z0-9_])", r"\$[^A-Za-z0-9 ](?![A-Za-z0-9_])")
2007-08-17 00:11:26 -04:00
2007-07-21 11:40:53 -04:00
rules = [
PatternRule(r'octal', r'\\[0-7]{3}'),
PatternRule(r'escaped', r'\\.'),
PatternRule(r'deref', r"\$+[A-Za-z0-9_](?:[A-Za-z0-9_]|::)*(?:->{\$?(?:[a-zA-Z_][a-zA-Z_0-9]*|'(?:\\.|[^'\\])*'|\"(\\.|[^\\\"])*\")}|->\[\$?[0-9a-zA-Z_]+\])+"),
PatternRule(r'length', r"\$#[A-Za-z0-9_](?:[A-Za-z0-9_]|::)*"),
rule,
2007-07-21 11:40:53 -04:00
PatternRule(r'scalar', r"\$\$*[A-Za-z0-9_](?:[A-Za-z0-9_]|::)*"),
PatternRule(r'cast', r"[\$\@\%\&]{.*?}"),
PatternRule(r'array', r"@\$*[A-Za-z_](?:[A-Za-z0-9_]|::)*"),
]
return rules
class StringGrammar(Grammar):
rules = _make_string_rules()
class QuotedGrammar1(Grammar):
rules = _make_string_rules(')')
class QuotedGrammar2(Grammar):
rules = _make_string_rules('}')
class QuotedGrammar3(Grammar):
rules = _make_string_rules('>')
class QuotedGrammar4(Grammar):
rules = _make_string_rules(']')
2007-07-21 11:40:53 -04:00
class PerlGrammar(Grammar):
rules = [
RegionRule(r'heredoc', r"<<(?P<heredoc>[a-zA-Z0-9_]+) *;", StringGrammar, r'^%(heredoc)s$'),
RegionRule(r'heredoc', r'<< *"(?P<heredoc>[a-zA-Z0-9_]+)" *;', StringGrammar, r'^%(heredoc)s$'),
RegionRule(r'heredoc', r"<< *'(?P<heredoc>[a-zA-Z0-9_]+)' *;", Grammar, r'^%(heredoc)s$'),
RegionRule(r'evaldoc', r"<< *`(?P<heredoc>[a-zA-Z0-9_]+)` *;", StringGrammar, r'^%(heredoc)s$'),
RegionRule(r'endblock', r"^__END__|__DATA__ *$", Grammar, r''),
RegionRule(r'pod', r'^=[a-zA-Z0-9_]+', PodGrammar, r'^=cut'),
OverridePatternRule(r'comment', r'#@@:(?P<token>[.a-zA-Z0-9_]+):(?P<mode>[.a-zA-Z0-9_]+) *$'),
#PatternRule(r'prototype', r'\([\\@$%&*;]+\)'),
PatternGroupRule(r'prototype', r'delimiter', r'\(', r'prototype', r'[\[\]\\@$%&*;]+', r'delimiter', '\)'),
2007-07-21 11:40:53 -04:00
PatternRule(r'comment', r'#.*$'),
RegionRule(r'string', r'"', StringGrammar, r'"'),
RegionRule(r'string', r"'", Grammar, r"'"),
RegionRule(r'evalstring', r"`", StringGrammar, r"`"),
PatternRule(r'number', r'0?\.[0-9]+|[0-9]+(?:\.[0-9]+)?'),
PatternRule(r'keyword', r"(?<!->)(?:STDIN|STDERR|STDOUT|continue|do|else|elsif|eval|foreach|for|if|last|my|next|our|package|require|return|sub|undef|unless|until|use|while)(?![a-zA-Z0-9_])"),
PatternRule(r'hash_key', r'(?<={)[A-Za-z0-9_]+(?=})'),
PatternRule(r'hash_key', r'[A-Za-z0-9_]+(?= *=>)'),
PatternRule(r'length', r"\$#[A-Za-z0-9_](?:[A-Za-z0-9_]|::)*"),
PatternRule(r'cast', r'[\$\@\%\^\&](?= *{)'),
PatternRule(r'scalar', r"\$[\[\]<>ab/'\"_@\?#\$!%^|&*()](?![A-Za-z0-9_])"),
PatternRule(r'array', r"@_"),
PatternRule(r'function', r"\$\$*[A-Za-z0-9_](?:[A-Za-z0-9_]|::)*(?=-> *\()"),
PatternRule(r'scalar', r"\$\$*[A-Za-z0-9_](?:[A-Za-z0-9_]|::)*"),
PatternRule(r'array', r"@\$*[A-Za-z_](?:[A-Za-z0-9_]|::)*"),
PatternRule(r'hash', r"%\$*[A-Za-z_](?:[A-Za-z0-9_]|::)*"),
PatternRule(r'deref', r"[@%\$&\*](?={)"),
# match regexes
RegionRule(r'match', r'(?:(?<==~)|(?<=!~)|(?<=\()|(?<=split)) *(?P<delim>/)', StringGrammar, r'/[a-z]*'),
RegionRule(r'match', r'm *(?P<delim>[^ #a-zA-Z0-9_])', StringGrammar, r'%(delim)s[a-z]*'),
RegionRule(r'match', r'm(?P<delim>#)', StringGrammar, r'#[a-z]*'),
# replace regexes
RegionRule(r'replace', r's *(?P<delim>[^ a-zA-Z0-9_])', StringGrammar, r'%(delim)s', StringGrammar, r'%(delim)s[a-z]*'),
RegionRule(r'replace', r's(?P<delim>#)', StringGrammar, r'#', StringGrammar, r'#[a-z]*'),
# translate operator
RegionRule(r'translate', r'(?:y|tr) *(?P<delim>[^ a-zA-Z0-9_])', Grammar, r'%(delim)s', Grammar, r'%(delim)s[a-z]*'),
RegionRule(r'translate', r'(?:y|tr)#', Grammar, r'#', Grammar, r'#[a-z]*'),
# some more basic stuff
PatternRule(r'package', r"(?<=package )(?:[a-zA-Z_][a-zA-Z_0-9]*::)*[a-zA-Z_][a-zA-Z_0-9]*"),
PatternRule(r'sub', r"(?<=sub )[a-zA-Z_][a-zA-Z_0-9]*"),
PatternRule(r'use', r"(?<=use )(?:[a-zA-Z_][a-zA-Z_0-9]*::)*[a-zA-Z_][a-zA-Z_0-9]*"),
PatternRule(r'require', r"(?<=require )(?:[a-zA-Z_][a-zA-Z_0-9]*::)*[a-zA-Z_][a-zA-Z_0-9]*"),
PatternRule(r'label', r'[a-zA-Z_][a-zA-Z0-9_]*:(?!:)'),
PatternRule(r'method', r"(?<=->)[a-zA-Z_][a-zA-Z_0-9]*"),
PatternRule(r'function', r"&\$*(?:[a-zA-Z_][a-zA-Z_0-9]*::)*[a-zA-Z_][a-zA-Z_0-9]*"),
PatternRule(r'builtin', r"(?<!->)&?(?:write|warn|wantarray|waitpid|wait|vec|values|utime|use|untie|unshift|unpack|unlink|undef|umask|ucfirst|uc|truncate|times|time|tied|tie|telldir|tell|syswrite|system|sysseek|sysread|sysopen|syscall|symlink|substr|sub|study|stat|srand|sqrt|sprintf|split|splice|sort|socketpair|socket|sleep|sin|shutdown|shmwrite|shmread|shmget|shmctl|shift|setsockopt|setservent|setpwent|setprotoent|setpriority|setpgrp|setnetent|sethostent|setgrent|send|semop|semget|semctl|select|seekdir|seek|scalar|rmdir|rindex|rewinddir|reverse|return|reset|require|rename|ref|redo|recv|readpipe|readlink|readline|readdir|read|rand|quotemeta|push|prototype|printf|print|pos|pop|pipe|package|pack|our|ord|opendir|open|oct|no|next|my|msgsnd|msgrcv|msgget|msgctl|mkdir|map|lstat|log|lock|localtime|local|listen|link|length|lcfirst|lc|last|kill|keys|join|ioctl|int|index|import|hex|grep|goto|gmtime|glob|getsockopt|getsockname|getservent|getservbyport|getservbyname|getpwuid|getpwnam|getpwent|getprotoent|getprotobynumber|getprotobyname|getpriority|getppid|getpgrp|getpeername|getnetent|getnetbyname|getnetbyaddr|getlogin|gethostent|gethostbyname|gethostbyaddr|getgrnam|getgrgid|getgrent|getc|formline|format|fork|flock|fileno|fcntl|exp|exit|exists|exec|eval|eof|endservent|endpwent|endprotoent|endnetent|endhostent|endgrent|each|dump|do|die|delete|defined|dbmopen|dbmclose|crypt|cos|continue|connect|closedir|close|chroot|chr|chown|chop|chomp|chmod|chdir|caller|bless|binmode|bind|atan2|alarm|accept|abs)(?![a-zA-Z0-9_])"),
# quote operator: qq(), qx() and qr() usually interpolate
RegionRule(r'quoted', r'q[rqx] *(?P<delim>\()', QuotedGrammar1, r'\)'),
RegionRule(r'quoted', r'q[rqx] *(?P<delim>{)', QuotedGrammar2, r'}'),
RegionRule(r'quoted', r'q[rqx] *(?P<delim><)', QuotedGrammar3, r'>'),
RegionRule(r'quoted', r'q[rqx] *(?P<delim>\[)', QuotedGrammar4, r'\]'),
RegionRule(r'quoted', r"q[rqx] *(?P<delim>')", Grammar, r"'"),
RegionRule(r'quoted', r'q[rqx] *(?P<delim>[^ #])', StringGrammar, r'%(delim)s'),
RegionRule(r'quoted', r'q[rqx](?P<delim>#)', StringGrammar, r'#'),
# quote operator: q() and qw() do not interpolate
RegionRule(r'quoted', r'qw? *\(', Grammar, r'\)'),
RegionRule(r'quoted', r'qw? *{', Grammar, r'}'),
RegionRule(r'quoted', r'qw? *<', Grammar, r'>'),
RegionRule(r'quoted', r'qw? *\[', Grammar, r'\]'),
RegionRule(r'quoted', r'qw?#', Grammar, r'#'),
RegionRule(r'quoted', r'qw? *(?P<delim>[^ #])', Grammar, r'%(delim)s'),
2007-07-21 11:40:53 -04:00
PatternRule(r'function', r"(?:[a-zA-Z_][a-zA-Z_0-9]*::)*[a-zA-Z_][a-zA-Z_0-9]*(?= *\()"),
PatternRule(r'class', r"(?:[a-zA-Z_][a-zA-Z_0-9]*::)*[a-zA-Z_][a-zA-Z_0-9]*(?=->)"),
# some basic stuff
2007-08-24 03:10:02 -04:00
PatternRule(r'delimiter', r"->|=>|(?<!:):(?!=:)|[,;=\?(){}\[\]\(\)]"),
2007-07-21 11:40:53 -04:00
PatternRule(r'operator', r"\+=|-=|\*=|/=|//=|%=|&=\|\^=|>>=|<<=|\*\*="),
PatternRule(r'operator', r"\+\+|\+|<=>|<>|<<|<=|<|-|>>|>=|>|\*\*|\*|&&|&|\|\||\||/|\^|==|//|~|=~|!~|!=|%|!|\.|x(?![a-zA-Z_])"),
PatternRule(r'noperator', r"(?:xor|or|not|ne|lt|le|gt|ge|eq|cmp|and)(?![a-zA-Z_])"),
PatternRule(r'bareword', r'(?:[a-zA-Z_][a-zA-Z_0-9]*::)*[a-zA-Z_][a-zA-Z_0-9]*'),
PatternRule(r"eol", r"\n$"),
]
class PerlTabber(tab2.StackTabber):
def is_base(self, y):
if y == 0:
return True
highlighter = self.mode.window.buffer.highlights[self.mode.name()]
if not highlighter.tokens[y]:
return False
t = highlighter.tokens[y][0]
return t.name == 'keyword' and t.string == 'sub'
def _handle_open_token(self, currlvl, y, i):
currlvl = tab2.StackTabber._handle_open_token(self, currlvl, y, i)
return currlvl
def _handle_close_token(self, currlvl, y, i):
self._opt_pop('cont')
currlvl = tab2.StackTabber._handle_close_token(self, currlvl, y, i)
token = self.get_token(y, i)
if self.is_rightmost_token(y, i):
if token.string == '}':
self._opt_pop('cont')
else:
self._opt_append('cont', currlvl + 4)
return currlvl
def _handle_other_token(self, currlvl, y, i):
token = self.get_token(y, i)
fqname = token.fqname()
if fqname == 'delimiter' and token.string == ';':
self._opt_pop('cont')
elif fqname == 'heredoc.start':
self._opt_append('heredoc', None)
elif fqname == 'heredoc.end':
self._opt_pop('heredoc')
self._opt_pop('cont')
elif fqname == 'quoted.start':
self._opt_append('quoted', currlvl + 4)
elif fqname == 'quoted.end':
self._opt_pop('cont')
self._opt_pop('quoted')
2007-07-21 11:40:53 -04:00
elif fqname == 'evaldoc.start':
self._opt_append('evaldoc', None)
elif fqname == 'evaldoc.end':
self._opt_pop('evaldoc')
self._opt_pop('cont')
elif fqname == 'pod.start':
self._opt_append('pod', None)
elif fqname == 'pod.end':
self._opt_pop('pod')
currlvl = 0
elif fqname == 'string.start':
self._opt_append('string', None)
elif fqname == 'string.end':
self._opt_pop('string')
if self.is_rightmost_token(y, i):
self._opt_append('cont', currlvl + 4)
if self.is_rightmost_token(y, i):
if(not fqname.startswith('pod') and
not fqname.startswith('heredoc') and
not fqname.startswith('string') and
not fqname.startswith('endblock') and
not fqname == 'eol' and
not fqname == 'comment' and
not fqname == 'null' and
token.string not in ('}', ';', '(', '{', '[', ',')):
self._opt_append('cont', currlvl + 4)
return currlvl
class Perl(mode2.Fundamental):
tabbercls = PerlTabber
grammar = PerlGrammar
opentokens = ('delimiter',)
opentags = {'(': ')', '[': ']', '{': '}'}
closetokens = ('delimiter',)
closetags = {')': '(', ']': '[', '}': '{'}
colors = {
2007-08-17 11:54:09 -04:00
# comments
'comment': ('red', 'default'),
'endblock.start': ('red', 'default'),
'endblock.null': ('red', 'default'),
'endblock.end': ('red', 'default'),
# pod
'pod.start': ('red', 'default'),
'pod.null': ('red', 'default'),
'pod.entry.start': ('magenta', 'default'),
'pod.entry.null': ('magenta', 'default'),
'pod.entry.end': ('magenta', 'default'),
'pod.end': ('red', 'default'),
2007-07-21 11:40:53 -04:00
# basic stuff
'escaped': ('magenta', 'default'),
'null': ('default', 'default'),
'delimiter': ('default', 'default'),
'sub': ('cyan', 'default'),
'prototype': ('magenta', 'default'),
2007-07-21 11:40:53 -04:00
'number': ('default', 'default'),
'operator': ('default', 'default'),
'noperator': ('magenta', 'default'),
'endblock': ('red', 'default'),
'keyword': ('magenta', 'default'),
'cast': ('yellow', 'default'),
'scalar': ('yellow', 'default'),
'array': ('yellow', 'default'),
'deref': ('yellow', 'default'),
'hash': ('yellow', 'default'),
'hash_key': ('green', 'default'),
'function': ('cyan', 'default'),
'builtin': ('magenta', 'default'),
'method': ('cyan', 'default'),
'bareword': ('default', 'default'),
'label': ('cyan', 'default'),
'package': ('cyan', 'default'),
'class': ('cyan', 'default'),
'use': ('cyan', 'default'),
'require': ('cyan', 'default'),
'method': ('cyan', 'default'),
# heredoc/evaldoc
'heredoc.start': ('green', 'default'),
'heredoc.null': ('green', 'default'),
'heredoc.end': ('green', 'default'),
'evaldoc.start': ('cyan', 'default'),
'evaldoc.null': ('cyan', 'default'),
'evaldoc.end': ('cyan', 'default'),
# strings
'string.start': ('green', 'default'),
'string.null': ('green', 'default'),
'string.escaped': ('magenta', 'default'),
'string.deref': ('yellow', 'default'),
'string.end': ('green', 'default'),
# `` strings
'evalstring.start': ('cyan', 'default'),
'evalstring.null': ('cyan', 'default'),
'evalstring.escaped': ('magenta', 'default'),
'evalstring.deref': ('yellow', 'default'),
'evalstring.end': ('cyan', 'default'),
# quoted region
'quoted': ('cyan', 'default'),
'quoted.start': ('cyan', 'default'),
'quoted.null': ('cyan', 'default'),
'quoted.escaped': ('magenta', 'default'),
'quoted.deref': ('yellow', 'default'),
'quoted.end': ('cyan', 'default'),
2007-07-21 11:40:53 -04:00
# match regex
'match.start': ('cyan', 'default'),
'match.end': ('cyan', 'default'),
'match.null': ('cyan', 'default'),
# replace regex
'replace.start': ('cyan', 'default'),
'replace.middle0': ('cyan', 'default'),
'replace.end': ('cyan', 'default'),
'replace.null': ('cyan', 'default'),
'replace.escaped': ('magenta', 'default'),
'replace.deref': ('yellow', 'default'),
'replace.length': ('yellow', 'default'),
'replace.scalar': ('yellow', 'default'),
'replace.hash': ('yellow', 'default'),
'replace.cast': ('yellow', 'default'),
# translate regex
'translate.start': ('magenta', 'default'),
'translate.middle0': ('magenta', 'default'),
'translate.end': ('magenta', 'default'),
'translate.null': ('magenta', 'default'),
}
def __init__(self, w):
mode2.Fundamental.__init__(self, w)
self.add_action_and_bindings(PerlSetLib(), ('C-c l',))
self.add_action_and_bindings(PerlCheckSyntax(), ('C-c s',))
self.add_action_and_bindings(PerlHashCleanup(), ('C-c h',))
#self.add_action_and_bindings(PerlHashCleanup2(), ('C-c h',))
self.add_action_and_bindings(PerlViewModulePerldoc(), ('C-c v',))
self.add_action_and_bindings(PerlViewWordPerldoc(), ('C-c p',))
2007-08-23 10:51:05 -04:00
self.add_action_and_bindings(PerlWrapParagraph(), ('M-q',))
2007-07-21 11:40:53 -04:00
self.add_action_and_bindings(PerlGotoFunction(), ('C-c M-g',))
self.add_action_and_bindings(PerlWhichFunction(), ('C-c w',))
self.add_action_and_bindings(PerlListFunctions(), ('C-c W',))
# visual tag matching
self.add_bindings('close-paren', (')'))
self.add_bindings('close-bracket', (']'))
self.add_bindings('close-brace', ('}'))
# perl-specific
self.functions = None
self.perllib = 'lib'
def name(self):
return "Perl"
def build_function_map(self):
b = self.window.buffer
self.functions = {}
for i in range(0, len(b.lines)):
m = regex.perl_function.match(b.lines[i])
if m:
self.functions[m.group(1)] = i
def get_functions(self):
if self.functions is None:
self.build_function_map()
return self.functions
def get_function_names(self):
functions = self.get_functions()
pairs = [[functions[key], key] for key in functions]
pairs.sort()
names = [x[1] for x in pairs]
return names
class PerlSetLib(Method):
'''Set the path(s) to find perl modules'''
args = [Argument("lib", type=type(""), prompt="Location of lib: ",
default=default.build_constant("."))]
def _execute(self, w, **vargs):
w.mode.perllib = vargs['lib']
class PerlCheckSyntax(Method):
'''Check the syntax of a perl file'''
def _execute(self, w, **vargs):
app = w.application
cmd = "perl -c -I '%s' '%s'" % (w.mode.perllib, w.buffer.path)
(status, output) = commands.getstatusoutput(cmd)
if status == 0:
app.set_error("Syntax OK")
app.data_buffer("*Perl-Check-Syntax*", output, switch_to=False)
else:
app.data_buffer("*Perl-Check-Syntax*", output)
class PerlViewModulePerldoc(Method):
'''View documentation about this file using perldoc'''
def _execute(self, w, **vargs):
cmd = "perldoc -t -T '%s'" % w.buffer.path
(status, output) = commands.getstatusoutput(cmd)
w.application.data_buffer("*Perldoc*", output, switch_to=True)
class PerlViewWordPerldoc(Method):
'''View documentation about a package or function using perldoc'''
def _try(self, w, word, asfunc=False):
if asfunc:
cmd = "PERL5LIB=%r perldoc -t -T -f '%s'" % (w.mode.perllib, word)
else:
cmd = "PERL5LIB=%r perldoc -t -T '%s'" % (w.mode.perllib, word)
(status, data) = commands.getstatusoutput(cmd)
if status == 0:
return data
else:
return None
def _show(self, w, data, word):
w.application.data_buffer("*Perldoc*", data, switch_to=True)
w.application.set_error('displaying documentation for %r' % word)
def _execute(self, w, **vargs):
token = w.get_token()
#word = w.get_word(wl=string.letters + string.digits + '_:')
word = token.string
# make sure that the name is (mostly) valid
if word is None:
w.application.set_error('no word selected')
return
elif ':' in word and '::' not in word:
w.application.set_error('invalid word: %r' % word)
return
# first try it is a package
parts = word.split('::')
while len(parts) > 0:
newword = '::'.join(parts)
data = self._try(w, newword, asfunc=False)
if data:
self._show(w, data, newword)
return
parts.pop(-1)
# then try it as a function
data = self._try(w, word, asfunc=True)
if data:
self._show(w, data, word)
else:
w.application.set_error('nothing found for %r' % word)
class PerlGotoFunction(Method):
'''Jump to a function defined in this module'''
args = [Argument("name", type(""), "perlfunction", "Goto Function: ")]
def _execute(self, w, **vargs):
name = vargs['name']
functions = w.mode.get_functions()
if name in functions:
w.goto(Point(0, functions[name]))
else:
w.application.set_error("Function %r was not found" % name)
class PerlListFunctions(Method):
'''Show the user all functions defined in this module'''
def _execute(self, w, **vargs):
names = w.mode.get_function_names()
output = "\n".join(names) + "\n"
w.application.data_buffer("*Perl-List-Functions*", output, switch_to=True)
class PerlWhichFunction(Method):
'''Show the user what function they are in'''
def _execute(self, w, **vargs):
cursor = w.logical_cursor()
i = cursor.y
name = None
while i >= 0 and name is None:
line = w.buffer.lines[i]
m = regex.perl_function.match(line)
if m:
name = m.group(1)
else:
i -= 1
if name is None:
w.application.set_error("None");
else:
w.application.set_error("line %d: %s" % (i, name))
class PerlHashCleanup(Method):
'''Correctly align assignment blocks and literal hashes'''
2007-08-21 09:23:45 -04:00
def _execute(self, w, **vargs):
cursor = w.logical_cursor()
b = w.buffer
2007-07-21 11:40:53 -04:00
# so this is where we will store the groups that we find
groups_by_line = {}
# the regex we will try
regexes = [regex.perl_hash_cleanup,
regex.perl_assign_cleanup]
# if we aren't in a hash, inform the user and exit
line = b.lines[cursor.y]
myregex = None
for r in regexes:
if r.match(line):
myregex = r
if myregex is None:
raise Exception, "Not a perl hash line"
groups_by_line[cursor.y] = myregex.match(line).groups()
# find the beginning of this hash block
start = 0
i = cursor.y - 1
while i >= 0:
line = b.lines[i]
m = myregex.match(line)
if not m:
start = i + 1
break
else:
groups_by_line[i] = m.groups()
i -= 1
# find the end of this hash block
end = len(b.lines) - 1
i = cursor.y + 1
while i < len(b.lines):
line = b.lines[i]
m = myregex.match(line)
if not m:
end = i - 1
break
else:
groups_by_line[i] = m.groups()
i += 1
# assume that the least indented line is correct
indent_w = min([len(groups_by_line[k][0]) for k in groups_by_line])
# find the longest hash key to base all the other padding on
key_w = max([len(groups_by_line[k][1]) for k in groups_by_line])
# for each line, format it correctly
keys = groups_by_line.keys()
keys.sort()
data = ''
for i in keys:
indent_pad = ' ' * indent_w
key = groups_by_line[i][1]
sep = groups_by_line[i][3]
value = groups_by_line[i][5]
key_pad = ' ' * (key_w - len(key))
data += indent_pad + key + key_pad + ' ' + sep + ' ' + value + '\n'
# remove the old text and add the new
start_p = Point(0, start)
2007-08-21 09:23:45 -04:00
if end < len(w.buffer.lines) - 1:
end_p = Point(0, end + 1)
else:
end_p = Point(len(w.buffer.lines[end]), end)
w.kill(start_p, end_p)
w.insert_string(start_p, data)
2007-07-21 11:40:53 -04:00
2007-08-24 03:10:02 -04:00
class PerlHashCleanup2(Method):
#_hash_parts = (
# (TokenMatch('null', None),),
# (TokenMatch('hash_key', None), TokenMatch('string.start', None)),
# (TokenMatch('null', None),),
# (TokenMatch('delimiter', '=>'),),
# (TokenMatch('null', None),),
#)
def _hash_matchXXX(self, group, line):
i = 0
j = 0
stages = []
while tok_i < len(group):
token = group[i]
name = token.fqname()
data = token.string
k = len(stages)
if k < len(self._hash_parts):
for (name2, data2) in self._hash_parts[k]:
if (name2 is None or name == name2 and
data2 is None or data == data2):
stages.append(line[j:token.x])
j = token.x
else:
stages.append(line[j:])
return stages
i += 1
return None
def _assign_match(self, group):
return None
def _execute(self, w, **vargs):
cursor = w.logical_cursor()
tokens = w.buffer.highlights[w.mode.name()].tokens
if self._hash_match(tokens[cursor.y]):
token_groups = self._parse_hash(w, **vargs)
elif self._assign_match(tokens[cursor.y]):
token_groups = self._parse_assign(w, **vargs)
else:
w.set_error("Not a hash line")
return
ys = token_groups.keys()
ys.sort()
segment_groups = []
for y in ys:
line = w.buffer.lines[y]
segments = []
i = 0
for token in token_groups[y]:
segments.append(line[i:token.x])
i = token.x
segments.append(line[i:])
segment_groups.append(segments)
output = "Lines %d through %d\n%r" % (ys[0] + 1, ys[-1] + 1, segment_groups)
w.application.data_buffer("hash-dump", output, switch_to=True)
def _parse_hash(self, w, **vargs):
cursor = w.logical_cursor()
tokens = w.buffer.highlights[w.mode.name()].tokens
lines = {cursor.y: self._hash_match(tokens[cursor.y])}
y1 = cursor.y
while y1 > 0:
match = self._hash_match(tokens[y1 - 1])
if not match:
break
lines[y1 - 1] = match
y1 -= 1
y2 = cursor.y
while y2 < len(tokens) - 1:
match = self._hash_match(tokens[y2 + 1])
if not match:
break
lines[y2 + 1] = match
y2 += 1
return lines
def _parse_assign(self, w, **vargs):
pass
2007-08-23 10:51:05 -04:00
#class PerlWrapParagraph(WrapParagraph):
2007-08-23 18:24:13 -04:00
class PerlWrapParagraph(Method):
2007-07-21 11:40:53 -04:00
'''Wrap Comments and POD'''
2007-08-22 12:32:32 -04:00
# enumerations for line types
LT_COMMENT = 1
LT_POD = 2
margin = 80
comment_re = re.compile('( *)(#+)( *)(.*)')
2007-07-21 11:40:53 -04:00
def _is_newline(self, t):
return t.name == 'eol'
def _is_space(self, t):
return t.name == 'null' and regex.space.match(t.string)
def _detect_line_type(self, w, y):
2007-08-22 12:32:32 -04:00
h = w.buffer.highlights[w.mode.name()]
2007-07-21 11:40:53 -04:00
ltype = None
2007-08-22 12:32:32 -04:00
for t in h.tokens[y]:
fqname = t.fqname()
2007-08-23 10:51:05 -04:00
if fqname == 'null' or fqname == 'eol':
2007-07-21 11:40:53 -04:00
pass
2007-08-22 12:32:32 -04:00
elif fqname.startswith('comment'):
if ltype and ltype != 'comment':
ltype = None
break
ltype = self.LT_COMMENT
elif fqname.startswith('pod'):
if ltype and ltype != 'pod':
ltype = None
break
ltype = self.LT_POD
else:
ltype = None
break
return ltype
def _fix_comments(self, c, w):
h = w.buffer.highlights[w.mode.name()]
y1 = c.y
y2 = c.y
while y2 < len(w.buffer.lines) - 1:
if self._detect_line_type(w, y2 + 1):
y2 += 1
else:
break
lines = w.buffer.lines[y1:y2 + 1]
m = self.comment_re.match(lines[0])
assert m
prepend = m.group(1) + m.group(2)
rmargin = self.margin - len(prepend)
dpad = m.group(3)
segments = []
for line in lines:
m = self.comment_re.match(line)
assert m
pad, data = m.group(3), m.group(4)
if segments and pad == dpad and segments[-1][0] == dpad and segments[-1][1]:
data = segments.pop(-1)[1] + ' ' + data
i = 0
while len(pad) + len(data[i:]) > rmargin:
while data[i] == ' ':
i += 1
j = rmargin - len(pad)
while j >= 0 and data[i + j] != ' ':
j -= 1
if j < 0:
j = rmargin - len(pad)
segments.append([pad, data[i:i + j]])
i += j
if data:
while data[i] == ' ':
i += 1
segments.append([pad, data[i:]])
2007-07-21 11:40:53 -04:00
else:
2007-08-22 12:32:32 -04:00
segments.append(['', ''])
lines2 = [prepend + x[0] + x[1] for x in segments]
p1 = Point(0, y1)
p2 = Point(len(w.buffer.lines[y2]), y2)
w.buffer.delete(p1, p2)
w.buffer.insert_lines(p1, lines2)
w.set_error("wrapped comment lines %d-%d" % (y1 + 1, y2 + 1))
def _fix_pod(self, c, w):
w.set_error("pod wrapping not yet supported")
2007-07-21 11:40:53 -04:00
def _execute(self, w, **vargs):
c = w.logical_cursor()
ltype = self._detect_line_type(w, c.y)
2007-08-22 12:32:32 -04:00
if ltype == self.LT_COMMENT:
self._fix_comments(c, w)
elif ltype == self.LT_POD:
2007-08-23 10:51:05 -04:00
WrapParagraph._execute(self, w, **vargs)
2007-07-21 11:40:53 -04:00
else:
w.set_error("did not detect comment or pod lines")