parent
9e1a1711ab
commit
02b2aafb77
256
highlight2.py
256
highlight2.py
|
@ -1,4 +1,5 @@
|
||||||
import sys
|
import sys
|
||||||
|
import lex2
|
||||||
|
|
||||||
color_list = []
|
color_list = []
|
||||||
color_list.extend(['\033[3%dm' % x for x in range(0, 8)])
|
color_list.extend(['\033[3%dm' % x for x in range(0, 8)])
|
||||||
|
@ -22,7 +23,9 @@ class Highlighter:
|
||||||
|
|
||||||
def dump(self, fmt='(%3s, %2s) | %s'):
|
def dump(self, fmt='(%3s, %2s) | %s'):
|
||||||
print fmt % ('y', 'x', 'string')
|
print fmt % ('y', 'x', 'string')
|
||||||
for group in self.tokens:
|
for i in range(0, len(self.tokens)):
|
||||||
|
group = self.tokens[i]
|
||||||
|
print 'LINE %d' % i
|
||||||
for token in group:
|
for token in group:
|
||||||
print fmt % (token.y, token.x, token.string)
|
print fmt % (token.y, token.x, token.string)
|
||||||
|
|
||||||
|
@ -52,6 +55,83 @@ class Highlighter:
|
||||||
for token in self.lexer:
|
for token in self.lexer:
|
||||||
self.tokens[token.y].append(token)
|
self.tokens[token.y].append(token)
|
||||||
|
|
||||||
|
# relexing
|
||||||
|
# ======================
|
||||||
|
def relex(self, lines, y1, x1, y2, x2):
|
||||||
|
# start the relexing process
|
||||||
|
self.lexer.lex(lines, y1, 0)
|
||||||
|
|
||||||
|
# this keeps track of the current y coordinate, the current token index
|
||||||
|
# on line[y], and the current "new token", respectively.
|
||||||
|
y = y1
|
||||||
|
i = 0
|
||||||
|
getnext = True
|
||||||
|
new_token = None
|
||||||
|
|
||||||
|
while True:
|
||||||
|
# if we have overstepped our bounds, then exit!
|
||||||
|
if y >= len(lines):
|
||||||
|
break
|
||||||
|
|
||||||
|
# if we need another new_token, then try to get it.
|
||||||
|
if getnext:
|
||||||
|
try:
|
||||||
|
new_token = self.lexer.next()
|
||||||
|
getnext = False
|
||||||
|
except StopIteration:
|
||||||
|
# ok, so this means that ALL the rest of the tokens didn't
|
||||||
|
# show up, because we're done. so delete them and exit
|
||||||
|
for j in range(y, len(lines)):
|
||||||
|
del self.tokens[j][i:]
|
||||||
|
i = 0
|
||||||
|
break
|
||||||
|
|
||||||
|
# if our next token is one a future line, we need to just get rid of
|
||||||
|
# all our old tokens until we get there
|
||||||
|
while new_token.y > y:
|
||||||
|
del self.tokens[y][i:]
|
||||||
|
i = 0
|
||||||
|
y += 1
|
||||||
|
|
||||||
|
# ok, so see if we have current tokens on this line; if so get it
|
||||||
|
if i < len(self.tokens[y]):
|
||||||
|
old_token = self.tokens[y][i]
|
||||||
|
assert old_token.y == y, "%d == %d" % (old_token.y, y)
|
||||||
|
else:
|
||||||
|
old_token = None
|
||||||
|
|
||||||
|
if old_token is None:
|
||||||
|
# since we don't have a previous token at this location, just
|
||||||
|
# insert the new one
|
||||||
|
self.tokens[y].insert(i, new_token)
|
||||||
|
i += 1
|
||||||
|
getnext = True
|
||||||
|
elif old_token == new_token:
|
||||||
|
# if they match, then leave the old one alone
|
||||||
|
i += 1
|
||||||
|
getnext = True
|
||||||
|
if new_token.y >= y2 and new_token.end_x() >= x2:
|
||||||
|
# in this case, we can (probably) assume that the rest of
|
||||||
|
# the lines will lex the same way
|
||||||
|
break
|
||||||
|
elif old_token.x < new_token.end_x():
|
||||||
|
# ok, so we haven't gotten to this new token yet. obviously
|
||||||
|
# this token never showed up in the new lexing, so delete it.
|
||||||
|
del self.tokens[y][i]
|
||||||
|
elif old_token.x >= new_token.end_x():
|
||||||
|
# ok, this token is further out, so just insert the new token
|
||||||
|
# ahead of it, move our counter out and continue
|
||||||
|
self.tokens[y].insert(i, new_token)
|
||||||
|
i += 1
|
||||||
|
getnext = True
|
||||||
|
else:
|
||||||
|
# this should never happen
|
||||||
|
raise Exception, "this isn't happening"
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# deletion
|
||||||
|
# ======================
|
||||||
def update_del(self, lines, y1, x1, y2, x2):
|
def update_del(self, lines, y1, x1, y2, x2):
|
||||||
assert y1 >= 0
|
assert y1 >= 0
|
||||||
assert y1 <= y2
|
assert y1 <= y2
|
||||||
|
@ -60,15 +140,25 @@ class Highlighter:
|
||||||
xdelta = x2 - x1
|
xdelta = x2 - x1
|
||||||
ydelta = y2 - y1
|
ydelta = y2 - y1
|
||||||
|
|
||||||
newtokens = [[] for x in range(0, len(self.tokens) - ydelta)]
|
# construct a new token data structure; it will have one list for
|
||||||
|
# every y index in lines. also, fill in tokens before the change
|
||||||
|
newtokens = [[] for x in lines]
|
||||||
for y in range(0, y1):
|
for y in range(0, y1):
|
||||||
newtokens[y] = self.tokens[y]
|
newtokens[y] = self.tokens[y]
|
||||||
|
|
||||||
for y in range(y1, len(lines)):
|
# so for each line we currently have, we need to process every token,
|
||||||
|
# transferring them from the old structure to the new, and modifying
|
||||||
|
# them appropriately to take the change into account.
|
||||||
|
for y in range(y1, len(self.tokens)):
|
||||||
while self.tokens[y]:
|
while self.tokens[y]:
|
||||||
|
# so remove the token from the old structure, and figure out
|
||||||
|
# where it stands in relation to the deletion
|
||||||
token = self.tokens[y].pop(0)
|
token = self.tokens[y].pop(0)
|
||||||
tx1 = token.x
|
tx1 = token.x
|
||||||
tx2 = token.x + len(token.string)
|
tx2 = token.x + len(token.string)
|
||||||
|
|
||||||
|
# the notation "*|*| " refers to what the text spans, i.e.:
|
||||||
|
# before|during|after
|
||||||
if (y, tx2) <= (y1, x1):
|
if (y, tx2) <= (y1, x1):
|
||||||
# *| |
|
# *| |
|
||||||
newtokens[y].append(token)
|
newtokens[y].append(token)
|
||||||
|
@ -79,89 +169,117 @@ class Highlighter:
|
||||||
token.x -= xdelta
|
token.x -= xdelta
|
||||||
newtokens[token.y].append(token)
|
newtokens[token.y].append(token)
|
||||||
elif (y, tx1) < (y1, x1):
|
elif (y, tx1) < (y1, x1):
|
||||||
token2 = token.copy()
|
|
||||||
if (y, tx2) <= (y2, x2):
|
if (y, tx2) <= (y2, x2):
|
||||||
# *|*|
|
# *|*|
|
||||||
s = token2.string[:x1 - tx1]
|
token.string = token.string[:x1 - tx1]
|
||||||
else:
|
else:
|
||||||
# *|*|*
|
# *|*|*
|
||||||
s = token2.string[:x1 - tx1] + token2.string[x2 - tx1:]
|
token.string = token.string[:x1 - tx1] + token.string[x2 - tx1:]
|
||||||
token2.string = s
|
newtokens[y].append(token)
|
||||||
newtokens[y].append(token2)
|
|
||||||
elif (y, tx1) < (y2, x2):
|
elif (y, tx1) < (y2, x2):
|
||||||
if (y, tx2) <= (y2, x2):
|
if (y, tx2) <= (y2, x2):
|
||||||
# |*|
|
# |*|
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
# |*|*
|
# |*|*
|
||||||
token2 = token.copy()
|
token.x = x1
|
||||||
token2.x = x1
|
token.y -= ydelta
|
||||||
token2.y = token2.y - ydelta
|
token.string = token.string[x2 - tx1:]
|
||||||
token2.string = token2.string[x2 - tx1:]
|
newtokens[token.y].append(token)
|
||||||
newtokens[token2.y].append(token2)
|
else:
|
||||||
|
raise Exception, "this should never happen: %r" % token
|
||||||
|
# ok, now that we have built a correct new structure, store a reference
|
||||||
|
# to it instead.
|
||||||
self.tokens = newtokens
|
self.tokens = newtokens
|
||||||
|
|
||||||
def relex_del(self, lines, y1, x1, y2, x2):
|
def relex_del(self, lines, y1, x1, y2, x2):
|
||||||
|
# first let's update our existing tokens to fix their offsets, etc.
|
||||||
self.update_del(lines, y1, x1, y2, x2)
|
self.update_del(lines, y1, x1, y2, x2)
|
||||||
self.lexer.lex(lines, y1, 0)
|
|
||||||
|
|
||||||
y = y1
|
# then let's do some relexing
|
||||||
i = 0
|
self.relex(lines, y1, x1, y2, x2)
|
||||||
getnext = True
|
|
||||||
|
|
||||||
while True:
|
# addition
|
||||||
if y >= len(lines):
|
# ======================
|
||||||
break
|
def update_add(self, lines, y1, x1, newlines):
|
||||||
|
assert y1 >= 0
|
||||||
|
assert len(newlines) > 0
|
||||||
|
|
||||||
if getnext:
|
y2 = y1 + len(newlines) - 1
|
||||||
try:
|
if y2 == y1:
|
||||||
new_token = self.lexer.next()
|
x2 = x1 + len(newlines[0])
|
||||||
getnext = False
|
|
||||||
except StopIteration:
|
|
||||||
for j in range(y, len(lines)):
|
|
||||||
print 'DELETE END ROW %d[%d:]: %r' % (j, i, [x.string for x in self.tokens[j][i:]])
|
|
||||||
del self.tokens[j][i:]
|
|
||||||
i = 0
|
|
||||||
break
|
|
||||||
|
|
||||||
# if our next token is one a future line, we need to just get rid of
|
|
||||||
# all our old tokens until we get there
|
|
||||||
while new_token.y > y:
|
|
||||||
print 'DELETE MID ROW %d[%d:]: %r' % (y, i, [x.string for x in self.tokens[y][i:]])
|
|
||||||
del self.tokens[y][i:]
|
|
||||||
i = 0
|
|
||||||
y += 1
|
|
||||||
|
|
||||||
if i < len(self.tokens[y]):
|
|
||||||
old_token = self.tokens[y][i]
|
|
||||||
assert old_token.y == y
|
|
||||||
else:
|
else:
|
||||||
old_token = None
|
x2 = len(newlines[-1])
|
||||||
|
|
||||||
if old_token is None:
|
xdelta = x2 - x1
|
||||||
print 'DEFAULT INSERT %d[%d]: %r' % (y, i, new_token.string)
|
ydelta = y2 - y1
|
||||||
self.tokens[y].insert(i, new_token)
|
|
||||||
i += 1
|
|
||||||
getnext = True
|
|
||||||
continue
|
|
||||||
elif old_token == new_token:
|
|
||||||
print 'MATCH %d[%d]: %r == %r' % (y, i, old_token.string, new_token.string)
|
|
||||||
i += 1
|
|
||||||
getnext = True
|
|
||||||
if new_token.y >= y2 and new_token.end_x() >= x2:
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
continue
|
|
||||||
elif old_token.x < new_token.end_x():
|
|
||||||
print 'DELETE BEFORE %d[%d]: %r' % (y, i, old_token.string)
|
|
||||||
del self.tokens[y][i]
|
|
||||||
continue
|
|
||||||
elif old_token.x >= new_token.end_x():
|
|
||||||
print 'INSERT %d[%d]: %r' % (y, i, new_token.string)
|
|
||||||
self.tokens[y].insert(i, new_token)
|
|
||||||
i += 1
|
|
||||||
getnext = True
|
|
||||||
continue
|
|
||||||
else:
|
|
||||||
raise Exception, "what what?"
|
|
||||||
|
|
||||||
|
# construct a new token data structure, with the right number of lines
|
||||||
|
newtokens = []
|
||||||
|
for i in range(0, len(self.tokens) + ydelta):
|
||||||
|
newtokens.append([])
|
||||||
|
|
||||||
|
# copy the tokens that show up before the changed line
|
||||||
|
for y in range(0, y1):
|
||||||
|
newtokens[y] = self.tokens[y]
|
||||||
|
|
||||||
|
# process the tokens that show up on the changed line
|
||||||
|
post_change_list = []
|
||||||
|
for t in self.tokens[y1]:
|
||||||
|
tx1 = t.x
|
||||||
|
tx2 = t.x + len(t.string)
|
||||||
|
ty = t.y
|
||||||
|
ts = t.string
|
||||||
|
if tx2 <= x1:
|
||||||
|
# '*| ' before the insertion
|
||||||
|
newtokens[y1].append(t)
|
||||||
|
elif tx1 >= x1:
|
||||||
|
# ' |*' after the insertion
|
||||||
|
t.x += xdelta
|
||||||
|
t.y = y2
|
||||||
|
post_change_list.append(t)
|
||||||
|
else:
|
||||||
|
# '*|*' around the insertion
|
||||||
|
t1 = t.copy()
|
||||||
|
t1.string = t.string[:x1 - tx1]
|
||||||
|
newtokens[y1].append(t1)
|
||||||
|
|
||||||
|
t2 = t.copy()
|
||||||
|
t2.string = t.string[x1 - tx1:]
|
||||||
|
t2.x = x2
|
||||||
|
t2.y = y2
|
||||||
|
post_change_list.append(t2)
|
||||||
|
|
||||||
|
# add in the new data
|
||||||
|
newtokens[y1].append(lex2.Token('new', '', y1, x1, newlines[0]))
|
||||||
|
for i in range(1, len(newlines)):
|
||||||
|
yi = y1 + i
|
||||||
|
newtokens[yi].append(lex2.Token('new', '', yi, 0, newlines[i]))
|
||||||
|
|
||||||
|
# add the post-change tokens back
|
||||||
|
for t in post_change_list:
|
||||||
|
newtokens[y2].append(t)
|
||||||
|
|
||||||
|
# for each subsequent line, fix it's tokens' y coordinates
|
||||||
|
for y in range(y1 + 1, len(self.tokens)):
|
||||||
|
for t in self.tokens[y]:
|
||||||
|
t.y += ydelta
|
||||||
|
newtokens[t.y].append(t)
|
||||||
|
|
||||||
|
# ok, now that we have built a correct new structure, store a reference
|
||||||
|
# to it instead.
|
||||||
|
self.tokens = newtokens
|
||||||
|
|
||||||
|
def relex_add(self, lines, y1, x1, newlines):
|
||||||
|
# first let's update our existing tokens to fix their offsets, etc.
|
||||||
|
self.update_add(lines, y1, x1, newlines)
|
||||||
|
|
||||||
|
# create some extra info that we need
|
||||||
|
y2 = y1 + len(newlines) - 1
|
||||||
|
if y2 == y1:
|
||||||
|
x2 = x1 + len(newlines[0])
|
||||||
|
else:
|
||||||
|
x2 = len(newlines[-1])
|
||||||
|
|
||||||
|
# now let's start the relexing process
|
||||||
|
self.relex(lines, y1, x1, y2, x2)
|
||||||
|
|
23
test3.py
23
test3.py
|
@ -166,7 +166,7 @@ for path in args:
|
||||||
h.dump()
|
h.dump()
|
||||||
else:
|
else:
|
||||||
h.display(token_colors[opts.grammar])
|
h.display(token_colors[opts.grammar])
|
||||||
else:
|
elif False:
|
||||||
(y1, x1) = (5, 9)
|
(y1, x1) = (5, 9)
|
||||||
(y2, x2) = (7, 14)
|
(y2, x2) = (7, 14)
|
||||||
#(y2, x2) = (82, 2)
|
#(y2, x2) = (82, 2)
|
||||||
|
@ -182,3 +182,24 @@ for path in args:
|
||||||
h.dump()
|
h.dump()
|
||||||
else:
|
else:
|
||||||
h.display(token_colors[opts.grammar])
|
h.display(token_colors[opts.grammar])
|
||||||
|
else:
|
||||||
|
#newlines = ['one two three']
|
||||||
|
newlines = ['one two three', 'cat', 'dog', 'del self.foo[3]', 'oops']
|
||||||
|
(y1, x1) = (5, 9)
|
||||||
|
|
||||||
|
if len(newlines) > 1:
|
||||||
|
lines.insert(y1 + 1, newlines[-1] + lines[y1][x1:])
|
||||||
|
lines[y1] = lines[y1][:x1] + newlines[0]
|
||||||
|
for i in range(1, len(newlines) - 1):
|
||||||
|
newline = newlines[i]
|
||||||
|
lines.insert(y1 + i, newline)
|
||||||
|
else:
|
||||||
|
lines[y1] = lines[y1][:x1] + newlines[0] + lines[y1][x1:]
|
||||||
|
|
||||||
|
h.relex_add(lines, y1, x1, newlines)
|
||||||
|
#h.update_add(lines, y1, x1, newlines)
|
||||||
|
#h.highlight(lines)
|
||||||
|
if opts.dump:
|
||||||
|
h.dump()
|
||||||
|
else:
|
||||||
|
h.display(token_colors[opts.grammar])
|
||||||
|
|
Loading…
Reference in New Issue