branch : pmacs2
This commit is contained in:
moculus 2007-06-13 15:44:09 +00:00
parent efadfdf4b5
commit b1512f9150
8 changed files with 166 additions and 204 deletions

View File

@ -277,7 +277,7 @@ class Application:
f.close()
b = buffer2.FileBuffer(path)
b.open()
#self.add_window_to_buffer(b, self.active_slot)
window2.Window(b, self, height=0, width=0)
self.add_buffer(b)
if switch_to:
self.switch_buffer(b)
@ -288,7 +288,7 @@ class Application:
b = buffer2.DataBuffer(name, data)
if modename is not None:
b.modename = modename
#self.add_window_to_buffer(b, self.active_slot)
window2.Window(b, self, height=0, width=0)
self.add_buffer(b)
if switch_to:
self.switch_buffer(b)
@ -476,19 +476,23 @@ class Application:
line = lines[y]
if modename in w.buffer.highlights:
group = w.buffer.highlights[modename].tokens[y]
highlighter = w.buffer.highlights[modename]
group = highlighter.tokens[y]
j = 0
for token in group:
#assert token.y == y, '%d == %d' % (token.y, y)
assert token.y == y, w.buffer.highlights[modename].dump()
assert token.y == y, highlighter.dump()
if token.x < x:
continue
elif token.x >= x + slot.width:
break
name = token.fqname()
#assert name in w.mode.colors, name
c = w.mode.colors.get(name, w.mode.default_color)
fqlist = token.fqlist()
c = w.mode.default_color
for i in range(0, len(fqlist)):
name = '.'.join(fqlist[i:])
if name in w.mode.colors:
c = w.mode.colors[name]
break
if DARK_BACKGROUND:
c |= curses.A_BOLD
@ -507,6 +511,7 @@ class Application:
else:
x += slot.width
count += 1
self.win.addch(slot.offset, 0, 'N', curses.A_REVERSE)
if self.margins_visible:
for (limit, shade) in self.margins:
@ -518,9 +523,14 @@ class Application:
char = self.win.inch(j + slot.offset, limit) & 255
attr = color.build('default', shade, 'bold')
self.win.addch(j + slot.offset, limit, char, attr)
self.win.addch(slot.offset, 0, 'O', curses.A_REVERSE)
assert self.mini_active is False and self.active_slot == i, \
'%r %d (%d)' % (self.mini_active, self.active_slot, i)
if self.mini_active is False and self.active_slot == i:
self.win.addch(slot.offset, 0, 'P', curses.A_REVERSE)
if False and w.active_point is not None and w.point_is_visible(w.active_point):
raise Exception, "no way man"
pa = w.physical_point(w.active_point)
va = pa.offset(0, -w.visible_offset())
if len(lines[va.y]):
@ -530,17 +540,21 @@ class Application:
self.win.addch(va.y + slot.offset, va.x, a, curses.A_REVERSE)
else:
assert px is not None and py is not None
self.win.addch(slot.offset, 0, 'Q', curses.A_REVERSE)
if cy >= len(lines):
assert False
self.set_error('in main1: cursor error; %d >= %d' % (cy, len(lines)))
return
elif cx == len(lines[cy]):
c = ' '
elif px > len(lines[cy]):
assert False
self.set_error('why? %r %r' % (cx, len(lines[cy])))
return
else:
c = lines[cy][cx]
c = 'X'
self.win.addch(slot.offset + py , px, c, curses.A_REVERSE)
def draw_status_bar(self, slotname):

View File

@ -234,6 +234,8 @@ class Buffer(object):
# insertion into buffer
def insert_lines(self, p, lines, act=ACT_NORM, force=False):
if lines == ['(']:
raise Exception, "damn"
llen = len(lines)
assert llen > 0
if not force:

22
foo.pl
View File

@ -1,3 +1,25 @@
package Foo;
use warnings FATAL => 'all';
use strict;
use Carp;
${$blarg->{foo}} = 33;
${blarg} = 33;
@{$blarg};
@$blarg;
foreach my $i (0..40) {
if($foo) {
print "joshua!\n";
}
}
$blah::blahrg->foo(one=>3, two=>2, three=>1);
my $foo = <<EOF;
hi hi hi EOF hi hi EOF
EOF

View File

@ -76,13 +76,11 @@ class Highlighter:
# relexing
# ======================
def relex(self, lines, y1, x1, y2, x2):
# start the relexing process
if self.tokens[y1]:
token = self.tokens[y1][0]
def relex(self, lines, y1, x1, y2, x2, token=None):
if token:
self.lexer.resume(lines, y1, 0, token)
else:
token = None
self.lexer.resume(lines, y1, 0, token)
self.lexer.lex(lines, y1, 0)
# these keep track of the current y coordinate, the current token index
# on line[y], and the current "new token", respectively.
@ -121,19 +119,25 @@ class Highlighter:
old_token = self.tokens[y][i]
assert old_token.y == y, "%d == %d" % (old_token.y, y)
else:
#raise Exception, "K %d %r" % (i, new_token)
old_token = None
if old_token is None:
#raise Exception, "J %d %r" % (i, new_token)
# since we don't have a previous token at this location, just
# insert the new one
self.tokens[y].insert(i, new_token)
i += 1
getnext = True
elif '.' not in old_token.name and old_token == new_token:
elif old_token == new_token:
# if they match, then leave the old one alone
#if i > 1:
# raise Exception, "A %r == %r" % (old_token, new_token)
i += 1
getnext = True
if new_token.y >= y2 and new_token.end_x() >= x2:
#if i > 1:
# raise Exception, "B %r == %r" % (old_token, new_token)
# in this case, we can (probably) assume that the rest of
# the lines will lex the same way
break
@ -151,8 +155,6 @@ class Highlighter:
# this should never happen
raise Exception, "this isn't happening"
# deletion
# ======================
def update_del(self, lines, y1, x1, y2, x2):
@ -162,46 +164,45 @@ class Highlighter:
# first let's delete any token who falls in the range of the change (or,
# in the case of child tokens, whose parent is being deleted).
y = y1
i = 0
done = False
y = y1
i = 0
done = False
if self.tokens[y1]:
ctoken = self.tokens[y1][0]
else:
ctoken = None
while not done:
if i < len(self.tokens[y]):
# figure out if this token is in our range. notice that
# delete_token() will take care of the need to recursively
# delete children for us
token = self.tokens[y][i]
if token.y >= y2 and token.x >= x2:
if token.y > y2 or y == y2 and token.x >= x2:
done = True
elif token.y <= y1 and token.x < x1:
pass
elif token.y < y1 or token.y == y1 and token.x < x1:
i += 1
else:
self.delete_token(y, i)
# ok, so now figure out what we should do next, either advancing a
# token, or being finished with this part
if i < len(self.tokens[y]) - 1:
i += 1
elif y < len(self.tokens) - 1:
y += 1
i = 0
else:
done = True
y += 1
i = 0
# ok, so now we need to "adjust" the (x,y) coordinates of all the tokens
# after the change. first we will copy over the pre-deletion tokens.
newtokens = [[] for x in range(0, len(self.tokens) - y2 + y1)]
for y in range(0, y1 + 1):
for y in range(0, y1):
for token in self.tokens[y]:
newtokens[y].append(token)
# then the tokens which occured on the same line as the end of the
# deletion.
for token in self.tokens[y2]:
token.x = token.x - x2 + x1
token.y = y1
for token in self.tokens[y1]:
newtokens[y1].append(token)
if y2 != y1:
for token in self.tokens[y2]:
token.x = token.x - x2 + x1
token.y = y1
newtokens[y1].append(token)
# finally, we will copy over the tokens from subsequent lines
for y in range(y2 + 1, len(self.tokens)):
@ -211,73 +212,14 @@ class Highlighter:
# now save our new tokens
self.tokens = newtokens
def update_del2(self, lines, y1, x1, y2, x2):
assert y1 >= 0
assert y1 <= y2
assert y2 < len(lines)
xdelta = x2 - x1
ydelta = y2 - y1
# construct a new token data structure; it will have one list for
# every y index in lines. also, fill in tokens before the change
newtokens = [[] for x in lines]
for y in range(0, y1):
newtokens[y] = self.tokens[y]
# so for each line we currently have, we need to process every token,
# transferring them from the old structure to the new, and modifying
# them appropriately to take the change into account.
for y in range(y1, len(self.tokens)):
while self.tokens[y]:
# so remove the token from the old structure, and figure out
# where it stands in relation to the deletion
token = self.tokens[y].pop(0)
tx1 = token.x
tx2 = token.x + len(token.string)
# the notation "*|*| " refers to what the text spans, i.e.:
# before|during|after the deletion
if (y, tx2) <= (y1, x1):
# *| |
newtokens[y].append(token)
elif (y, tx1) >= (y2, x2):
# | |*
token.y -= ydelta
if y == y2:
token.x -= xdelta
newtokens[token.y].append(token)
elif (y, tx1) < (y1, x1):
if (y, tx2) <= (y2, x2):
# *|*|
token.string = token.string[:x1 - tx1]
else:
# *|*|*
token.string = token.string[:x1 - tx1] + token.string[x2 - tx1:]
newtokens[y].append(token)
elif (y, tx1) < (y2, x2):
if (y, tx2) <= (y2, x2):
# |*|
pass
else:
# |*|*
token.x = x1
token.y -= ydelta
token.string = token.string[x2 - tx1:]
newtokens[token.y].append(token)
else:
raise Exception, "this should never happen: %r" % token
# ok, now that we have built a correct new structure, store a reference
# to it instead.
self.tokens = newtokens
return ctoken
def relex_del(self, lines, y1, x1, y2, x2):
# first let's update our existing tokens to fix their offsets, etc.
self.update_del(lines, y1, x1, y2, x2)
ctoken = self.update_del(lines, y1, x1, y2, x2)
# then let's do some relexing
self.relex(lines, y1, x1, y2, x2)
self.relex(lines, y1, x1, y2, x2, ctoken)
# addition
# ======================
@ -293,6 +235,10 @@ class Highlighter:
xdelta = x2 - x1
ydelta = y2 - y1
if self.tokens[y1]:
ctoken = self.tokens[y1][0]
else:
ctoken = None
# construct a new token data structure, with the right number of lines
newtokens = []
@ -349,10 +295,11 @@ class Highlighter:
# ok, now that we have built a correct new structure, store a reference
# to it instead.
self.tokens = newtokens
return ctoken
def relex_add(self, lines, y1, x1, newlines):
# first let's update our existing tokens to fix their offsets, etc.
self.update_add(lines, y1, x1, newlines)
ctoken = self.update_add(lines, y1, x1, newlines)
# create some extra info that we need
y2 = y1 + len(newlines) - 1
@ -362,4 +309,4 @@ class Highlighter:
x2 = len(newlines[-1])
# now let's start the relexing process
self.relex(lines, y1, x1, y2, x2)
self.relex(lines, y1, x1, y2, x2, ctoken)

57
lex2.py
View File

@ -21,25 +21,22 @@ class Token(object):
return []
def domain(self):
if self.parent is not None:
names = []
names.extend(self.parent.domain())
if names[-1] != self.rule.name:
names.append(self.rule.name)
return names
names = self.parent.domain()
else:
return [self.rule.name]
names = []
names.append(self.rule.name)
return names
def fqlist(self):
names = []
if self.parent is not None:
names.extend(self.parent.domain())
names = self.parent.domain()
else:
names = []
if self.name == 'start':
names.append(self.rule.name)
names.append(self.name)
return names
def fqname(self):
if self.name == 'start':
names = self.domain()
names.append(self.name)
else:
names = self.fqlist()
names = self.fqlist()
return '.'.join(names)
def copy(self):
return Token(self.name, self.rule, self.y, self.x, self.string,
@ -134,6 +131,7 @@ class RegionRule(Rule):
self.start_re = re.compile(start)
def resume(self, lexer, toresume):
#raise Exception, "%r %r" % (lexer, toresume) #XYZ
assert toresume, "can't resume without tokens to resume!"
self._match(lexer, None, None, toresume)
return True
@ -203,9 +201,12 @@ class RegionRule(Rule):
reenter = False
rule2 = toresume[1].rule
rule2.resume(lexer, toresume[1:])
found = True
null_t = None
break
if lexer.y >= len(lexer.lines):
return True
elif lexer.x >= len(lexer.lines[lexer.y]):
lexer.y += 1
lexer.x = 0
# if we are looking for an end token, then see if we've
# found it. if so, then we are done!
@ -232,8 +233,9 @@ class RegionRule(Rule):
if null_t is None:
null_t = Token('null', None, lexer.y, lexer.x, '', parent)
lexer.add_token(null_t)
null_t.add_to_string(lexer.lines[lexer.y][lexer.x])
lexer.x += 1
if len(lexer.lines[lexer.y]) > lexer.x:
null_t.add_to_string(lexer.lines[lexer.y][lexer.x])
lexer.x += 1
# ok, since we're soon going to be on a different line (or
# already are), we want a new null token. so forget about the
@ -271,7 +273,8 @@ class DualRegionRule(Rule):
token = toresume[0]
if token.name == 'start':
t2 = self._match_first(lexer, token, toresume)
t3 = self._match_second(lexer, t2, [])
if t2 is not None:
t3 = self._match_second(lexer, t2, [])
return True
elif token.name == 'middle':
t3 = self._match_second(lexer, token, toresume)
@ -284,7 +287,8 @@ class DualRegionRule(Rule):
if m:
t1 = self._add_from_regex('start', lexer, parent, m, m.groupdict())
t2 = self._match_first(lexer, t1, [])
t3 = self._match_second(lexer, t2, [])
if t2 is not None:
t3 = self._match_second(lexer, t2, [])
return True
else:
# region was not matched; we never started. so return false
@ -307,7 +311,7 @@ class DualRegionRule(Rule):
while not done and lexer.y < len(lexer.lines):
old_y = lexer.y
# if this line is empty, then we will skip it, but here weinsert
# if this line is empty, then we will skip it, but here we insert
# an empty null token just so we have something
if len(lexer.lines[lexer.y]) == 0:
null_t = Token('null', None, lexer.y, lexer.x, '', parent)
@ -477,17 +481,16 @@ class Lexer:
self.tokens = []
def resume(self, lines, y, x, token):
#raise Exception, "%r %r" % (self, token) #XYZ
self.y = y
self.x = x
#self.x = 0
self.lines = lines
self.tokens = []
if token:
toresume = token.parents()
if toresume:
toresume[0].rule.resume(self, toresume)
#raise Exception, "aw damn3"
toresume = token.parents()
if toresume:
toresume[0].rule.resume(self, toresume)
#else:
# raise Exception, "dammmmit"
def __iter__(self):
if self.lines is None:

View File

@ -148,6 +148,11 @@ class PerlGrammar(Grammar):
name=r'length',
pattern=r"\$#[A-Za-z0-9_](?:[A-Za-z0-9_]|::)*",
),
# XYZ
PatternRule(
name=r'cast',
pattern=r'[\$\@\%\^\&](?= *{)',
),
PatternRule(
name=r'scalar',
pattern=r"\$[][><ab/'\"_@\?#\$!%^|&*()](?![A-Za-z0-9_])",
@ -304,28 +309,28 @@ class PerlGrammar(Grammar):
),
PatternRule(
name=r'class',
pattern=r"(?:[a-zA-Z_][a-zA-Z_0-9]*::)*[a-zA-Z_][a-zA-Z_0-9]*",
pattern=r"(?:[a-zA-Z_][a-zA-Z_0-9]*::)*[a-zA-Z_][a-zA-Z_0-9]*(?=->)",
),
# # nested regions
# RegionRule(
# name=r'paren',
# start=r'\(',
# grammar=None,
# end=r'\)',
# ),
# RegionRule(
# name=r'brace',
# start=r'{',
# grammar=None,
# end=r'}',
# ),
# RegionRule(
# name=r'bracket',
# start=r'\[',
# grammar=None,
# end=r'\]',
# ),
# nested regions
RegionRule(
name=r'paren',
start=r'\(',
grammar=None,
end=r'\)',
),
RegionRule(
name=r'brace',
start=r'{',
grammar=None,
end=r'}',
),
RegionRule(
name=r'bracket',
start=r'\[',
grammar=None,
end=r'\]',
),
# some basic stuff
PatternRule(

View File

@ -335,8 +335,8 @@ class InsertString(Method):
self.args = []
self.help = "Insert %r into the current buffer." % s
self.string = s
def _execute(self, window, **vargs):
window.insert_string_at_cursor(self.string)
def _execute(self, w, **vargs):
w.insert_string_at_cursor(self.string)
# killing/copying/etc.
class Kill(Method):
@ -449,9 +449,23 @@ class DeleteRightWhitespace(Method):
class DumpTokens(Method):
'''Dump all lexical tokens (syntax highlighting debugging)'''
def _execute(self, w, **vargs):
lines = ['Tokens from %s:' % (w.buffer.name())]
for t in w.mode.highlighter.tokens:
lines.append(repr(t))
modename = w.mode.name()
lines = []
if modename in w.buffer.highlights:
tokens = w.buffer.highlights[modename].tokens
for i in range(0, len(tokens)):
lines.append("LINE %d" % i)
group = tokens[i]
for token in group:
coord = '(%d, %d)' % (token.x, token.y)
if token.parent is None:
pcoord = ''
else:
pcoord = '[%d, %d]' % (token.parent.x, token.parent.y)
fields = (coord, pcoord, token.fqname(), token.string)
lines.append(' %-10s %-10s %-30s %r' % fields)
else:
lines.append("no tokens")
output = "\n".join(lines)
w.application.data_buffer("token-dump", output, switch_to=True)
class MetaX(Method):

View File

@ -20,9 +20,9 @@ class Perl(mode2.Fundamental):
self.add_action_and_bindings(PerlWhichFunction(), ('C-c w',))
self.add_action_and_bindings(PerlListFunctions(), ('C-c W',))
self.add_bindings('close-paren', (')',))
self.add_bindings('close-brace', ('}',))
self.add_bindings('close-bracket', (']',))
#self.add_bindings('close-paren', (')',))
#self.add_bindings('close-brace', ('}',))
#self.add_bindings('close-bracket', (']',))
self.default_color = color.build('default', 'default')
@ -36,6 +36,7 @@ class Perl(mode2.Fundamental):
'operator': color.build('default', 'default'),
'endblock': color.build('red', 'default'),
'keyword': color.build('magenta', 'default'),
'cast': color.build('yellow', 'default'),
'scalar': color.build('yellow', 'default'),
'array': color.build('yellow', 'default'),
'deref': color.build('yellow', 'default'),
@ -114,52 +115,6 @@ class Perl(mode2.Fundamental):
'translate.null': color.build('magenta', 'default'),
}
# self.colors = {
# 'heredoc': color.build('green', 'default'),
# 'endblock': color.build('red', 'default'),
# 'pod': color.build('red', 'default'),
# 'comment': color.build('red', 'default'),
# 'string1': color.build('green', 'default'),
# 'string2': color.build('green', 'default'),
# 'evalstring': color.build('cyan', 'default'),
# 'default string': color.build('green', 'default'),
# 'keyword': color.build('magenta', 'default'),
# 'length scalar': color.build('yellow', 'default'),
# 'system scalar': color.build('yellow', 'default'),
# 'system array': color.build('yellow', 'default'),
# 'scalar': color.build('yellow', 'default'),
# 'dereference': color.build('yellow', 'default'),
# 'array': color.build('yellow', 'default'),
# 'hash': color.build('yellow', 'default'),
# 'hash bareword index': color.build('green', 'default'),
# 'quoted region': color.build('cyan', 'default'),
# 'match regex': color.build('cyan', 'default'),
# 'replace regex': color.build('cyan', 'default'),
# 'literal hash bareword index': color.build('green', 'default'),
# 'interpolated scalar': color.build('yellow', 'default'),
# 'interpolated system scalar': color.build('yellow', 'default'),
# 'interpolated array': color.build('yellow', 'default'),
# 'interpolated system array': color.build('yellow', 'default'),
# 'interpolated hash': color.build('yellow', 'default'),
# 'label': color.build('cyan', 'default'),
# 'package': color.build('cyan', 'default'),
# 'use': color.build('cyan', 'default'),
# 'method': color.build('cyan', 'default'),
# 'methodref': color.build('cyan', 'default'),
# 'method declaration': color.build('cyan', 'default'),
# 'instance method': color.build('cyan', 'default'),
# 'static method': color.build('cyan', 'default'),
# 'built-in method': color.build('magenta', 'default'),
# 'bareword method': color.build('cyan', 'default'),
# #'bareword': color.build('yellow', 'magenta'),
# 'bizzaro': color.build('magenta', 'green')
# }
#self.highlighter.lex_buffer()
#self.get_regions()
#self.tabber = tab_perl.PerlTabber(self)
#self.functions = None
def name(self):
return "Perl"