parent
df449931af
commit
bea23f0e07
87
mode/perl.py
87
mode/perl.py
|
@ -120,7 +120,7 @@ class PerlGrammar(Grammar):
|
|||
PatternRule(r'class', r"(?:[a-zA-Z_][a-zA-Z_0-9]*::)*[a-zA-Z_][a-zA-Z_0-9]*(?=->)"),
|
||||
|
||||
# some basic stuff
|
||||
PatternRule(r'delimiter', r"[,;=\?(){}\[\]\(\)]|->|=>|(?<!:):(?!=:)"),
|
||||
PatternRule(r'delimiter', r"->|=>|(?<!:):(?!=:)|[,;=\?(){}\[\]\(\)]"),
|
||||
PatternRule(r'operator', r"\+=|-=|\*=|/=|//=|%=|&=\|\^=|>>=|<<=|\*\*="),
|
||||
PatternRule(r'operator', r"\+\+|\+|<=>|<>|<<|<=|<|-|>>|>=|>|\*\*|\*|&&|&|\|\||\||/|\^|==|//|~|=~|!~|!=|%|!|\.|x(?![a-zA-Z_])"),
|
||||
PatternRule(r'noperator', r"(?:xor|or|not|ne|lt|le|gt|ge|eq|cmp|and)(?![a-zA-Z_])"),
|
||||
|
@ -527,6 +527,91 @@ class PerlHashCleanup(Method):
|
|||
w.kill(start_p, end_p)
|
||||
w.insert_string(start_p, data)
|
||||
|
||||
class PerlHashCleanup2(Method):
|
||||
#_hash_parts = (
|
||||
# (TokenMatch('null', None),),
|
||||
# (TokenMatch('hash_key', None), TokenMatch('string.start', None)),
|
||||
# (TokenMatch('null', None),),
|
||||
# (TokenMatch('delimiter', '=>'),),
|
||||
# (TokenMatch('null', None),),
|
||||
#)
|
||||
def _hash_matchXXX(self, group, line):
|
||||
i = 0
|
||||
j = 0
|
||||
stages = []
|
||||
while tok_i < len(group):
|
||||
token = group[i]
|
||||
name = token.fqname()
|
||||
data = token.string
|
||||
k = len(stages)
|
||||
if k < len(self._hash_parts):
|
||||
for (name2, data2) in self._hash_parts[k]:
|
||||
if (name2 is None or name == name2 and
|
||||
data2 is None or data == data2):
|
||||
stages.append(line[j:token.x])
|
||||
j = token.x
|
||||
else:
|
||||
stages.append(line[j:])
|
||||
return stages
|
||||
i += 1
|
||||
return None
|
||||
|
||||
def _assign_match(self, group):
|
||||
return None
|
||||
|
||||
def _execute(self, w, **vargs):
|
||||
cursor = w.logical_cursor()
|
||||
tokens = w.buffer.highlights[w.mode.name()].tokens
|
||||
if self._hash_match(tokens[cursor.y]):
|
||||
token_groups = self._parse_hash(w, **vargs)
|
||||
elif self._assign_match(tokens[cursor.y]):
|
||||
token_groups = self._parse_assign(w, **vargs)
|
||||
else:
|
||||
w.set_error("Not a hash line")
|
||||
return
|
||||
|
||||
ys = token_groups.keys()
|
||||
ys.sort()
|
||||
|
||||
segment_groups = []
|
||||
for y in ys:
|
||||
line = w.buffer.lines[y]
|
||||
segments = []
|
||||
i = 0
|
||||
for token in token_groups[y]:
|
||||
segments.append(line[i:token.x])
|
||||
i = token.x
|
||||
segments.append(line[i:])
|
||||
segment_groups.append(segments)
|
||||
|
||||
output = "Lines %d through %d\n%r" % (ys[0] + 1, ys[-1] + 1, segment_groups)
|
||||
w.application.data_buffer("hash-dump", output, switch_to=True)
|
||||
|
||||
def _parse_hash(self, w, **vargs):
|
||||
cursor = w.logical_cursor()
|
||||
tokens = w.buffer.highlights[w.mode.name()].tokens
|
||||
lines = {cursor.y: self._hash_match(tokens[cursor.y])}
|
||||
|
||||
y1 = cursor.y
|
||||
while y1 > 0:
|
||||
match = self._hash_match(tokens[y1 - 1])
|
||||
if not match:
|
||||
break
|
||||
lines[y1 - 1] = match
|
||||
y1 -= 1
|
||||
|
||||
y2 = cursor.y
|
||||
while y2 < len(tokens) - 1:
|
||||
match = self._hash_match(tokens[y2 + 1])
|
||||
if not match:
|
||||
break
|
||||
lines[y2 + 1] = match
|
||||
y2 += 1
|
||||
|
||||
return lines
|
||||
def _parse_assign(self, w, **vargs):
|
||||
pass
|
||||
|
||||
#class PerlWrapParagraph(WrapParagraph):
|
||||
class PerlWrapParagraph(Method):
|
||||
'''Wrap Comments and POD'''
|
||||
|
|
Loading…
Reference in New Issue