diff --git a/highlight2.py b/highlight2.py deleted file mode 100644 index 99d30d2..0000000 --- a/highlight2.py +++ /dev/null @@ -1,326 +0,0 @@ -import re, sys -from lex import Token - -color_list = [] -color_list.extend(['\033[3%dm' % x for x in range(0, 8)]) -color_list.extend(['\033[3%d;1m' % x for x in range(0, 8)]) -color_list.extend(['\033[0m']) - -color_names = [ - 'black', 'dred', 'dgreen', 'brown', 'dblue', 'dpurple', 'dcyan', 'lgrey', - 'dgrey', 'lred', 'lgreen', 'yellow', 'lblue', 'lpurple', 'lcyan', 'white', - 'unset', -] - -color_dict ={} -for i in range(0, len(color_list)): - color_dict[color_names[i]] = color_list[i] - -def token_match(self, token, name, data=None): - return token.fqname() == name and data is None or token.string == data -def token_match2(self, token, name, regex): - return token.fqname() == name and regex.match(token.string) -def token_vmatch(self, token, *pairs): - for (name, data) in pairs: - if token_match(token, name, data): - return True - return False -def token_vmatch2(self, token, *pairs): - for (name, regex) in pairs: - if token_match(token, name, regex): - return True - return False - -class Highlighter: - def __init__(self, lexer): - self.lexer = lexer - self.tokens = [] - - def dump(self, fmt='(%3s, %2s) | %s'): - print fmt % ('y', 'x', 'string') - for i in range(0, len(self.tokens)): - group = self.tokens[i] - print 'LINE %d' % i - for token in group: - print fmt % (token.y, token.x, token.string) - - def display(self, token_colors={}, debug=False): - for group in self.tokens: - for token in group: - color_name = None - name_parts = token.name.split('.') - for i in range(0, len(name_parts)): - if '.'.join(name_parts[i:]) in token_colors: - color_name = token_colors['.'.join(name_parts[i:])] - break - if color_name is not None: - sys.stdout.write(color_dict[color_name]) - pass - elif debug: - raise Exception, "no highlighting for %r" % token.name - else: - color_name = 'white' - sys.stdout.write(color_dict[color_name]) - sys.stdout.write(token.string) - sys.stdout.write('\n') - - def delete_token(self, y, i): - assert y < len(self.tokens), "%d < %d" % (y, len(self.tokens)) - assert i < len(self.tokens[y]), "%d < %d" % (i, len(self.tokens[i])) - deleted = [] - deleted.append(self.tokens[y].pop(i)) - while y < len(self.tokens): - while i < len(self.tokens[y]): - while deleted and self.tokens[y][i].parent is not deleted[-1]: - del deleted[-1] - if not deleted: - return - elif self.tokens[y][i].parent is deleted[-1]: - deleted.append(self.tokens[y].pop(i)) - else: - raise Exception, "huh?? %r %r" % (self.tokens[y][i].parent, - deleted) - i = 0 - y += 1 - - def highlight(self, lines): - self.tokens = [[] for l in lines] - #self.lexer.lex(lines, y=0, x=0) - #for token in self.lexer: - for token in self.lexer.lex(lines, y=0, x=0): - self.tokens[token.y].append(token) - - # relexing - # ====================== - def relex(self, lines, y1, x1, y2, x2, token=None): - if token: - gen = self.lexer.resume(lines, y1, 0, token) - else: - gen = self.lexer.lex(lines, y1, 0) - - # these keep track of the current y coordinate, the current token index - # on line[y], and the current "new token", respectively. - y = y1 - i = 0 - getnext = True - new_token = None - - while True: - # if we have overstepped our bounds, then exit! - if y >= len(lines): - break - - # if we need another new_token, then try to get it. - if getnext: - try: - #new_token = self.lexer.next() - new_token = gen.next() - getnext = False - except StopIteration: - # ok, so this means that ALL the rest of the tokens didn't - # show up, because we're done. so delete them and exit - for j in range(y, len(lines)): - del self.tokens[j][i:] - i = 0 - break - - # if our next token is one a future line, we need to just get rid of - # all our old tokens until we get there - while new_token.y > y: - del self.tokens[y][i:] - i = 0 - y += 1 - - # ok, so see if we have current tokens on this line; if so get it - if i < len(self.tokens[y]): - old_token = self.tokens[y][i] - assert old_token.y == y, "%d == %d" % (old_token.y, y) - else: - #raise Exception, "K %d %r" % (i, new_token) - old_token = None - - if old_token is None: - #raise Exception, "J %d %r" % (i, new_token) - # since we don't have a previous token at this location, just - # insert the new one - self.tokens[y].insert(i, new_token) - i += 1 - getnext = True - elif old_token == new_token: - # if they match, then leave the old one alone - i += 1 - getnext = True - if new_token.y > y2: - # in this case, we can be sure that the rest of the lines - # will lex the same way - break - elif old_token.x < new_token.end_x(): - # ok, so we haven't gotten to this new token yet. obviously - # this token never showed up in the new lexing, so delete it. - del self.tokens[y][i] - elif old_token.x >= new_token.end_x(): - # ok, this token is further out, so just insert the new token - # ahead of it, move our counter out and continue - self.tokens[y].insert(i, new_token) - i += 1 - getnext = True - else: - # this should never happen - raise Exception, "this isn't happening" - - # deletion - # ====================== - def update_del(self, lines, y1, x1, y2, x2): - assert y1 >= 0 - assert y1 <= y2 - - # first let's delete any token who falls in the range of the change (or, - # in the case of child tokens, whose parent is being deleted). - y = y1 - i = 0 - done = False - if self.tokens[y1]: - ctoken = self.tokens[y1][0] - else: - ctoken = None - while not done: - if y >= len(self.tokens): - break - if i < len(self.tokens[y]): - # figure out if this token is in our range. notice that - # delete_token() will take care of the need to recursively - # delete children for us - token = self.tokens[y][i] - if token.y > y2 or y == y2 and token.x >= x2: - done = True - elif token.y < y1 or token.y == y1 and token.x < x1: - i += 1 - else: - self.delete_token(y, i) - y += 1 - i = 0 - - # ok, so now we need to "adjust" the (x,y) coordinates of all the tokens - # after the change. first we will copy over the pre-deletion tokens. - newtokens = [[] for x in range(0, len(self.tokens) - y2 + y1)] - - for y in range(0, y1): - for token in self.tokens[y]: - newtokens[y].append(token) - - # then the tokens which occured on the same line as the end of the - # deletion. - for token in self.tokens[y1]: - newtokens[y1].append(token) - if y2 != y1: - for token in self.tokens[y2]: - token.x = token.x - x2 + x1 - token.y = y1 - newtokens[y1].append(token) - - # finally, we will copy over the tokens from subsequent lines - for y in range(y2 + 1, len(self.tokens)): - for token in self.tokens[y]: - token.y = token.y - y2 + y1 - newtokens[y - y2 + y1].append(token) - - # now save our new tokens - self.tokens = newtokens - return ctoken - - def relex_del(self, lines, y1, x1, y2, x2): - # first let's update our existing tokens to fix their offsets, etc. - ctoken = self.update_del(lines, y1, x1, y2, x2) - - # then let's do some relexing - self.relex(lines, y1, x1, y2, x2, ctoken) - - # addition - # ====================== - def update_add(self, lines, y1, x1, newlines): - assert y1 >= 0 - assert len(newlines) > 0 - - y2 = y1 + len(newlines) - 1 - if y2 == y1: - x2 = x1 + len(newlines[0]) - else: - x2 = len(newlines[-1]) - - xdelta = x2 - x1 - ydelta = y2 - y1 - if self.tokens[y1]: - ctoken = self.tokens[y1][0] - else: - ctoken = None - - # construct a new token data structure, with the right number of lines - newtokens = [] - for i in range(0, len(self.tokens) + ydelta): - newtokens.append([]) - - # copy the tokens that show up before the changed line - for y in range(0, y1): - newtokens[y] = self.tokens[y] - - # process the tokens that show up on the changed line - post_change_list = [] - for t in self.tokens[y1]: - tx1 = t.x - tx2 = t.x + len(t.string) - ty = t.y - ts = t.string - if tx2 <= x1: - # '*| ' before the insertion - newtokens[y1].append(t) - elif tx1 >= x1: - # ' |*' after the insertion - t.x += xdelta - t.y = y2 - post_change_list.append(t) - else: - # '*|*' around the insertion - t1 = t.copy() - t1.string = t.string[:x1 - tx1] - newtokens[y1].append(t1) - - t2 = t.copy() - t2.string = t.string[x1 - tx1:] - t2.x = x2 - t2.y = y2 - post_change_list.append(t2) - - # add in the new data - newtokens[y1].append(Token('new', '', y1, x1, newlines[0])) - for i in range(1, len(newlines)): - yi = y1 + i - newtokens[yi].append(Token('new', '', yi, 0, newlines[i])) - - # add the post-change tokens back - for t in post_change_list: - newtokens[y2].append(t) - - # for each subsequent line, fix it's tokens' y coordinates - for y in range(y1 + 1, len(self.tokens)): - for t in self.tokens[y]: - t.y += ydelta - newtokens[t.y].append(t) - - # ok, now that we have built a correct new structure, store a reference - # to it instead. - self.tokens = newtokens - return ctoken - - def relex_add(self, lines, y1, x1, newlines): - # first let's update our existing tokens to fix their offsets, etc. - ctoken = self.update_add(lines, y1, x1, newlines) - - # create some extra info that we need - y2 = y1 + len(newlines) - 1 - if y2 == y1: - x2 = x1 + len(newlines[0]) - else: - x2 = len(newlines[-1]) - - # now let's start the relexing process - self.relex(lines, y1, x1, y2, x2, ctoken) diff --git a/lex3.py b/lex3.py deleted file mode 100755 index 6c9ac41..0000000 --- a/lex3.py +++ /dev/null @@ -1,455 +0,0 @@ -import curses, re -import regex, util -from point import Point - -class Token(object): - def __init__(self, name, rule, y, x, s, color=None, parent=None, matchd={}, link=None): - self.name = name - self.rule = rule - self.y = y - self.x = x - self.string = s - self.color = color - self.parent = parent - self.matchd = matchd - self.link = link - assert parent is None or hasattr(parent, 'name'), 'oh no %r' % parent - def parents(self): - if self.parent is not None: - parents = self.parent.parents() - parents.append(self.parent) - return parents - else: - return [] - def domain(self): - if self.parent is not None: - names = self.parent.domain() - else: - names = [] - if self.link and not self.link.startswith('middle'): - names.append(self.rule.name) - return names - def fqlist(self): - if self.parent is not None: - names = self.parent.domain() - else: - names = [] - if self.link == 'start': - names.append(self.rule.name) - names.append(self.name) - return names - def fqname(self): - names = self.fqlist() - return '.'.join(names) - def copy(self): - return Token(self.name, self.rule, self.y, self.x, self.string, - self.color, self.parent, self.matchd, self.link) - def add_to_string(self, s): - self.string += s - def end_x(self): - return self.x + len(self.string) - def __eq__(self, other): - return (self.y == other.y and self.x == other.x - and self.name == other.name and self.parent is other.parent and - self.string == other.string) - def __repr__(self): - if len(self.string) < 10: - s = self.string - else: - s = self.string[:10] + '...' - fields = (self.fqname(), self.rule, self.y, self.x, s) - return "" % fields - -class Rule: - reflags = 0 - def __init__(self, name): - assert regex.valid_token_name.match(name), 'invalid name %r' % name - assert not regex.reserved_token_names.match(name), \ - "rule name %r is reserved and thus illegal" % name - self.name = name - def match(self, lexer, parent): - raise Exception, "not implemented" - def lex(self, lexer, parent, match): - raise Exception, "not implemented" - def make_token(self, lexer, s, name, parent=None, matchd={}, link=None): - t = Token(name, self, lexer.y, lexer.x, s, None, parent, matchd, link) - t.color = lexer.get_color(t) - lexer.x += len(s) - if lexer.x > len(lexer.lines[lexer.y]): - lexer.x = 0 - lexer.y += 1 - return t - def get_line(self, lexer, y=None): - if y is None: - return lexer.lines[lexer.y] + '\n' - else: - return lexer.lines[y] + '\n' - -class PatternRule(Rule): - def __init__(self, name, pattern): - Rule.__init__(self, name) - self.pattern = pattern - self.re = re.compile(self.pattern, self.reflags) - def match(self, lexer, parent): - return self.re.match(self.get_line(lexer), lexer.x) - def lex(self, lexer, parent, m): - if m: - yield self.make_token(lexer, m.group(0), self.name, parent, m.groupdict()) - raise StopIteration -class NocasePatternRule(PatternRule): - reflags = re.IGNORECASE - -class OverrideError(Exception): - pass -class OverridePatternRule(PatternRule): - def lex(self, lexer, parent, m): - if m: - d = m.groupdict() - if lexer.action == 'lex': - a = lexer.mode.window.application - try: - names = d['mode'].split('.') - modecls = a.globals()[names.pop(0)] - for name in names: - modecls = getattr(modecls, name) - mode = modecls(None) - if hasattr(mode, 'grammar') and hasattr(mode, 'colors'): - lexer.mode.gstack['%s.start' % d['token']] = mode - else: - raise OverrideError, "argh: %r" % mode - except (KeyError, AttributeError, OverrideError): - pass - yield self.make_token(lexer, m.group(0), self.name, parent, d) - raise StopIteration - -class ContextPatternRule(PatternRule): - def __init__(self, name, pattern, fallback): - Rule.__init__(self, name) - self.pattern = pattern - self.fallback_re = re.compile(fallback, self.reflags) - def match(self, lexer, parent): - try: - r = re.compile(self.pattern % parent.matchd) - except KeyError: - r = self.fallback_re - return r.match(self.get_line(lexer), lexer.x) -class NocaseContextPatternRule(ContextPatternRule): - reflags = re.IGNORECASE - -class PatternGroupRule(PatternRule): - def __init__(self, name, *args): - assert args and len(args) % 2 == 0 - i = 0 - pairs = [] - while i < len(args): - tokname, pattern = args[i], args[i+1] - pairs.append((tokname, re.compile(pattern, self.reflags))) - i += 2 - Rule.__init__(self, name) - self.pairs = tuple(pairs) - def match(self, lexer, parent): - (x, y) = (lexer.x, lexer.y) - matches = [] - for (tokname, tokre) in self.pairs: - if y >= len(lexer.lines): - return [] - line = self.get_line(lexer, y) - m = tokre.match(line, x) - if m: - x += len(m.group(0)) - if x >= len(line): - x = 0 - y += 1 - matches.append((tokname, m)) - else: - return [] - assert len(matches) == len(self.pairs) - return matches - def lex(self, lexer, parent, matches): - if matches: - for (tokname, m) in matches: - yield self.make_token(lexer, m.group(0), tokname, parent, m.groupdict()) - raise StopIteration - -class RegionRule(Rule): - def __init__(self, name, *args): - Rule.__init__(self, name) - assert len(args) > 1 - args = list(args) - self.pairs = [] - self.start_re = re.compile(args.pop(0), self.reflags) - while len(args) > 1: - grammar = args.pop(0) - pattern = args.pop(0) - assert hasattr(grammar, 'rules'), repr(grammar) - assert type(pattern) == type(''), repr(pattern) - self.pairs.append((grammar, pattern)) - if len(args) == 1: - self.pairs.append((grammar, None)) - - def match(self, lexer, parent): - return self.start_re.match(self.get_line(lexer), lexer.x) - def lex(self, lexer, parent, m): - assert m - # ok, so since we had a match, we need to create our start token, who - # will be the ancestor to all other tokens matched in this region - matchd = m.groupdict() - parent = self.make_token(lexer, m.group(0), 'start', parent, matchd, 'start') - yield parent - - # now we will loop over the different pairs of grammars/stop-patterns in - # this region, and return the resulting token; we start at 0 - for tok in self._lex_loop(lexer, [parent], matchd, 0): - yield tok - raise StopIteration - def resume(self, lexer, toresume): - assert toresume, "can't resume without tokens to resume!" - # ok, so we need to figure out in which of the grammars of our region - # we are resuming. to do this we calculate i, a position in our list - # of grammar/stop-pattern pairs - if toresume[0].link == 'start': - i = 0 - else: - m = regex.middle_token_name.match(toresume[0].link) - assert m - i = int(m.group(1)) + 1 - assert i > 0 and i < len(self.pairs) - - # now we will loop over the different pairs of grammars/stop-patterns in - # this region, and return the resulting token; we start at i - for tok in self._lex_loop(lexer, toresume, toresume[0].matchd, i): - yield tok - raise StopIteration - def _lex_loop(self, lexer, toresume, matchd, i): - # we need to loop over our grammar/stop-pattern pairs - while i < len(self.pairs): - # for each one, we will compile our stop-regex, and figure out the - # name of the stop token to be created if this stop-regex matches. - fqname = toresume[0].fqname() - p = Point(toresume[0].x, toresume[0].y) - if fqname in lexer.mode.ghist and p in lexer.mode.ghist[fqname]: - mode = lexer.mode.ghist[fqname][p] - grammar = mode.grammar - elif fqname in lexer.mode.gstack: - mode = lexer.mode.gstack[fqname] - grammar = mode.grammar - lexer.mode.ghist.setdefault(fqname, {}) - lexer.mode.ghist[fqname][p] = mode - del lexer.mode.gstack[fqname] - else: - mode = lexer.mode - grammar = self.pairs[i][0] - lexer.mstack.append(mode) - - if self.pairs[i][1]: - stopre = re.compile(self.pairs[i][1] % matchd, self.reflags) - else: - stopre = None - if i == len(self.pairs) - 1: - tokname = 'end' - else: - tokname = 'middle%d' % i - - # ok, so now loop over all the tokens in the current grammar, until - # the stop-token (if any) is found, and return each result as we get - # it. - tok = None - for tok in self._lex(lexer, toresume, tokname, stopre, grammar): - yield tok - - # ok, so now either we found the stop-token, and have a new parent - # for future tokens (if any), or we are done. - if tok is not None and tok.name == tokname: - toresume = [tok] - matchd.update(tok.matchd) - else: - raise StopIteration - # this should have already gotten done by _lex - #lexer.mstack.pop(-1) - i += 1 - # assuming we make it through all our grammars, and find the end-token, - # then we need to signal that we are done. - raise StopIteration - def _lex(self, lexer, toresume, stopname, stopre, grammar): - assert toresume - parent = toresume[0] - reenter = len(toresume) > 1 - null_t = None - - # ok, so there are only two way we want to exit this loop: either we - # lex the whole document, or we encounter the stop-token. - done = False - while not done and lexer.y < len(lexer.lines): - line = self.get_line(lexer) - old_y = lexer.y - while not done and lexer.y == old_y and lexer.x < len(line): - # ok, so reenter gets priority, since the current input might be - # intended for nested grammar. so handle it here - if reenter: - reenter = False - for t in toresume[1].rule.resume(lexer, toresume[1:]): - yield t - # since we might have changed our x/y coordinates, we need to - # do some checks here, and maybe finish or change our coordintes - if lexer.y >= len(lexer.lines): - raise StopIteration - elif lexer.x >= len(line): - lexer.y += 1 - lexer.x = 0 - - # ok, so get the *now* current line - line = self.get_line(lexer) - - if stopre: - # if we are looking for a stop-token, do that check now - m = stopre.match(line, lexer.x) - if m: - if null_t: - # if we have a null token waiting, return it first. - yield null_t - null_t = None - # ok, now return the stop-token, and signal that we are - # done and no more input is to be consumed - lexer.mstack.pop(-1) - yield self.make_token(lexer, m.group(0), stopname, - parent, m.groupdict(), stopname) - done = True - break - - m = None - # start checking our rules to see if we can match the input - for rule in grammar.rules: - m = rule.match(lexer, parent) - if m: - # ok great, we have a match - if null_t: - # if we have a null token waiting, return it first. - yield null_t - null_t = None - # ok, now for every token this rules has created, we - # return them, one by one. - for t in rule.lex(lexer, parent, m): - yield t - break - - if not m: - # we didn't find a match on a rule, so add this character to - # the current null token (creating a new one if necessary); - if not null_t: - null_t = Token('null', None, lexer.y, lexer.x, '', None, parent) - null_t.color = lexer.get_color(null_t) - null_t.add_to_string(line[lexer.x]) - lexer.x += 1 - - # ok, we are at the end of a line of input. so, if we have a null - # token waiting, now is the time to return it - if null_t: - yield null_t - null_t = None - if not done and old_y == lexer.y: - lexer.y += 1 - lexer.x = 0 - raise StopIteration -class NocaseRegionRule(RegionRule): - reflags = re.IGNORECASE - -class Grammar: - rules = [] -grammar = Grammar() - -class Lexer: - def __init__(self, mode, grammar): - self.mode = mode - self.mstack = [] - self.grammar = grammar - self.y = 0 - self.x = 0 - self.lines = None - assert self.grammar.rules - def get_line(self): - return self.lines[self.y] + '\n' - def lex(self, lines, y=0, x=0): - self.action = 'lex' - self.y = y - self.x = x - self.lines = lines - self.mstack = [] - self.mode.ghist = {} - self.mode.gstack = {} - for t in self._lex(): - yield t - del self.action - raise StopIteration - def resume(self, lines, y, x, token): - self.action = 'resume' - self.y = y - self.x = x - self.lines = lines - self.mstack = [] - toresume = token.parents() - - i = 1 - while i < len(toresume): - if toresume[i].link and toresume[i].link != 'start': - del toresume[i-1] - else: - i += 1 - - if toresume: - for t in toresume[0].rule.resume(self, toresume): - yield t - for t in self._lex(): - yield t - del self.action - raise StopIteration - - def _lex(self): - parent = None - while self.y < len(self.lines): - null_t = None - line = self.get_line() - while self.x < len(line): - m = None - for rule in self.grammar.rules: - m = rule.match(self, parent) - if m: - if null_t: - yield null_t - null_t = None - for t in rule.lex(self, parent, m): - yield t - break - - if self.y >= len(self.lines): - break - line = self.get_line() - if not m: - if self.x < len(line): - if null_t is None: - null_t = Token('null', None, self.y, self.x, '', None, parent) - null_t.color = self.get_color(null_t) - null_t.add_to_string(line[self.x]) - self.x += 1 - if null_t: - yield null_t - self.y += 1 - self.x = 0 - raise StopIteration - - def get_color(self, token): - fqlist = token.fqlist() - if self.mstack: - mode = self.mstack[-1] - else: - mode = self.mode - v = list(mode.default_color) - for j in range(0, len(fqlist)): - name = '.'.join(fqlist[j:]) - if name in mode.colors: - assert type(mode.colors[name]) == type(()), repr(mode) - v = list(mode.colors[name]) - break - #if DARK_BACKGROUND: - if True: - v.append('bold') - return v diff --git a/mode2.py b/mode2.py deleted file mode 100644 index 5fcc73a..0000000 --- a/mode2.py +++ /dev/null @@ -1,280 +0,0 @@ -import os, sets, string -import color, method -from lex import Lexer -from point import Point - -DEBUG = False - -class ActionError(Exception): - pass - -class Handler(object): - def __init__(self): - self.prefixes = sets.Set(["C-x", "C-c", "C-u"]) - self.last_sequence = '' - self.curr_tokens = [] - self.bindings = {} - - # handle adding and removing actions - def add_action(self, action): - if self.window is None: - return - elif action.name in self.window.application.methods: - return - else: - self.window.application.methods[action.name] = action - def del_action(self, name): - if self.window is None: - return - for binding in self.bindings.keys(): - if self.bindings[binding] == name: - del self.bindings[binding] - def add_binding(self, name, sequence): - if self.window is None: - return - elif not hasattr(self.window, 'application'): - raise Exception, "argh %r %r" % (self, self.window) - elif name not in self.window.application.methods: - raise Exception, "No action called %r found" % name - else: - self.bindings[sequence] = name - def add_bindings(self, name, sequences): - if self.window is None: - return - for sequence in sequences: - self.add_binding(name, sequence) - def del_binding(self, sequence): - if self.window is None: - return - del self.bindings[sequence] - def add_action_and_bindings(self, action, sequences): - if self.window is None: - return - self.add_action(action) - for sequence in sequences: - self.add_binding(action.name, sequence) - - def handle_token(self, t): - '''self.handle_token(token): returns None, or the action to - take. raises an exception on unknown input''' - self.curr_tokens.append(t) - sequence = " ".join(self.curr_tokens) - if sequence in self.bindings: - act = self.window.application.methods[self.bindings[sequence]] - self.last_sequence = sequence - self.curr_tokens = [] - return act - elif t in self.prefixes: - for binding in self.bindings: - if binding.startswith(sequence): - return None - self.curr_tokens = [] - self.last_sequence = sequence - raise ActionError, "no action defined for %r" % (sequence) - -class Fundamental(Handler): - '''This is the default mode''' - modename = "Fundamental" - paths = [] - basenames = [] - extensions = [] - detection = [] - savetabs = False - tabwidth = 4 - tabbercls = None - grammar = None - lexer = None - tabber = None - default_color = ('default', 'default',) - colors = {} - - def install(cls, app): - app.setmode(cls.modename.lower(), cls, paths=cls.paths, - basenames=cls.basenames, extensions=cls.extensions, - detection=cls.detection) - install = classmethod(install) - - def __init__(self, w): - self.window = w - - # we need to defer this due to curses startup - #self.default_color = color.pairs('default', 'default') - Handler.__init__(self) - - # first let's add all the "default" actions - self.add_bindings('start-of-line', ('C-a', 'HOME',)) - self.add_bindings('end-of-line', ('C-e', 'END',)) - self.add_bindings('backward', ('C-b', 'L_ARROW',)) - self.add_bindings('forward', ('C-f', 'R_ARROW',)) - self.add_bindings('center-view', ('C-l',)) - self.add_bindings('next-line', ('C-n', 'D_ARROW',)) - self.add_bindings('previous-line', ('C-p', 'U_ARROW',)) - self.add_bindings('next-section', ('M-n', 'M-D_ARROW',)) - self.add_bindings('previous-section', ('M-p', 'M-U_ARROW',)) - self.add_bindings('page-down', ('C-v', 'PG_DN',)) - self.add_bindings('page-up', ('M-v', 'PG_UP',)) - self.add_bindings('goto-beginning', ('M-<',)) - self.add_bindings('goto-end', ('M->',)) - self.add_bindings('delete-left', ('DELETE', 'BACKSPACE',)) - self.add_bindings('delete-left-word', ('M-DELETE', 'M-BACKSPACE',)) - self.add_bindings('delete-right', ('C-d',)) - self.add_bindings('delete-right-word', ('M-d',)) - self.add_bindings('kill-region', ('C-w',)) - self.add_bindings('copy-region', ('M-w',)) - self.add_bindings('kill', ('C-k',)) - self.add_bindings('copy', ('M-k',)) - self.add_bindings('yank', ('C-y',)) - self.add_bindings('pop-kill', ('M-y',)) - self.add_bindings('right-word', ('M-f',)) - self.add_bindings('left-word', ('M-b',)) - self.add_bindings('set-mark', ('C-@',)) - self.add_bindings('switch-buffer', ('C-x b',)) - self.add_bindings('switch-mark', ('C-x C-x',)) - self.add_bindings('undo', ('C-/', 'C-x u',)) - self.add_bindings('redo', ('M-/', 'M-_', 'C-x r',)) - self.add_bindings('goto-line', ('M-g',)) - self.add_bindings('forward-chars', ('C-x M-c',)) - self.add_bindings('forward-lines', ('C-x M-n',)) - self.add_bindings('search', ('C-s',)) - self.add_bindings('reverse-search', ('C-r',)) - self.add_bindings('regex-search', ('M-C-s',)) - self.add_bindings('regex-reverse-search', ('M-C-r',)) - self.add_bindings('toggle-margins', ('M-m',)) - self.add_bindings('replace', ('M-%',)) - self.add_bindings('regex-replace', ('M-$',)) - self.add_bindings('open-file', ('C-x C-f',)) - self.add_bindings('kill-buffer', ('C-x k',)) - self.add_bindings('list-buffers', ('C-x C-b',)) - self.add_bindings('meta-x', ('M-x',)) - self.add_bindings('wrap-line', ('M-q',)) - self.add_bindings('transpose-words', ('M-t',)) - self.add_bindings('save-buffer', ('C-x C-s',)) - self.add_bindings('save-buffer-as', ('C-x C-w',)) - self.add_bindings('relex-buffer', ('M-r',)) - self.add_bindings('exit', ('C-x C-c',)) - self.add_bindings('split-window', ('C-x s', 'C-x 2',)) - self.add_bindings('unsplit-window', ('C-u s', 'C-x 1',)) - self.add_bindings('toggle-window', ('C-x o',)) - self.add_bindings('delete-left-whitespace', ('C-c DELETE', 'C-c BACKSPACE',)) - self.add_bindings('delete-right-whitespace', ('C-c d',)) - self.add_bindings('insert-space', ('SPACE',)) - self.add_bindings('insert-tab', ('TAB',)) - self.add_bindings('insert-newline', ('RETURN',)) - self.add_bindings('comment-region', ('C-c #',)) - self.add_bindings('uncomment-region', ('C-u C-c #',)) - self.add_bindings('justify-right', ('C-c f',)) - self.add_bindings('justify-left', ('C-c b',)) - self.add_bindings('indent-block', ('C-c >',)) - self.add_bindings('unindent-block', ('C-c <',)) - self.add_bindings('token-complete', ('M-c', 'C-c c')) - self.add_bindings('shell-cmd', ('C-c !',)) - self.add_bindings('open-aes-file', ('C-c a',)) - self.add_bindings('open-console', ('M-e',)) - self.add_bindings('show-bindings-buffer', ('C-c M-h','C-c M-?',)) - self.add_bindings('which-command', ('M-?',)) - self.add_bindings('cmd-help-buffer', ('M-h',)) - self.add_bindings('set-mode', ('C-x m',)) - self.add_bindings('cancel', ('C-]',)) - self.add_bindings('exec', ('C-c e',)) - self.add_bindings('grep', ('C-c g',)) - self.add_bindings('pipe', ('C-c p',)) - self.add_bindings('view-buffer-parent', ('C-c .',)) - - # unbound actions - self.add_action(method.GetToken()) - - # create all the insert actions for the basic text input - for c in string.letters + string.digits + string.punctuation: - self.add_binding('insert-string-%s' % c, c) - - # lexing for highlighting, etc. - if self.grammar: - self.lexer = Lexer(self, self.grammar) - self.gstack = {} - self.ghist = {} - - # tab handling - if self.tabbercls: - self.tabber = self.tabbercls(self) - - # get mode name - def name(self): - return self.modename - - # handle input tokens - def handle_token(self, t): - '''self.handle_token(token): handles input "token"''' - self.window.active_point = None - #self.window.application.clear_error() - self.window.clear_error() - try: - act = Handler.handle_token(self, t) - if act is None: - self.window.set_error(' '.join(self.curr_tokens)) - return - else: - act.execute(self.window) - self.window.application.last_action = act.name - except ActionError, e: - if t != 'C-]': - self.window.set_error(str(e)) - else: - self.window.set_error('Cancelled') - except Exception, e: - if DEBUG: - raise - else: - err = "%s in mode '%s'" % (e, self.name()) - self.window.set_error(err) - - def region_added(self, p, newlines): - if self.tabber is not None: - self.tabber.region_added(p, newlines) - if self.lexer: - ydelta = len(newlines) - 1 - xdelta = len(newlines[-1]) - ghist = {} - for name in self.ghist: - for gp in self.ghist[name]: - if gp < p: - newp = gp - elif ydelta == 0: - if p.y == gp.y: - newp = Point(gp.x + xdelta, gp.y) - else: - newp = gp - else: - if gp.y == p.y: - newp = Point(gp.x + xdelta, gp.y + ydelta) - else: - newp = Point(gp.x, gp.y + ydelta) - ghist.setdefault(name, {}) - ghist[name][newp] = self.ghist[name][gp] - self.ghist = ghist - def region_removed(self, p1, p2): - if self.tabber is not None: - self.tabber.region_removed(p1, p2) - if self.lexer: - ydelta = p2.y - p1.y - xdelta = p2.x - p1.x - ghist = {} - for name in self.ghist: - for gp in self.ghist[name]: - if gp < p1: - newp = gp - elif p1 <= gp and gp < p2: - continue - elif ydelta == 0: - if gp.y == p2.y: - newp = Point(gp.x - xdelta, gp.y) - else: - newp = gp - else: - if gp.y == p2.y: - newp = Point(gp.x - xdelta, gp.y - ydelta) - else: - newp = Point(gp.x, gp.y - ydelta) - ghist.setdefault(name, {}) - ghist[name][newp] = self.ghist[name][gp] - self.ghist = ghist -install = Fundamental.install diff --git a/tab2.py b/tab2.py deleted file mode 100644 index a3215e5..0000000 --- a/tab2.py +++ /dev/null @@ -1,209 +0,0 @@ -import regex, util -from point import Point - -class Marker: - def __init__(self, name, level): - self.name = name - self.level = level - def __repr__(self): - return '' % (self.name, self.level) - -class Tabber: - wsre = regex.whitespace - wst = ('null', 'eol',) - sre = regex.space - st = ('null',) - def __init__(self, m): - self.mode = m - self.lines = {} - - def get_highlighter(self): - return self.mode.window.buffer.highlights[self.mode.name()] - def get_tokens(self, y): - return self.mode.window.buffer.highlights[self.mode.name()].tokens[y] - def get_token(self, y, i): - return self.mode.window.buffer.highlights[self.mode.name()].tokens[y][i] - - def token_is_whitespace(self, y, i): - token = self.get_token(y, i) - return token.fqname() in self.wst and self.wsre.match(token.string) - def token_is_space(self, y, i): - token = self.get_token(y, i) - return token.fqname() in self.st and self.sre.match(token.string) - - def get_next_left_token(self, y, i): - tokens = self.get_tokens(y) - assert i >= 0 and i < len(tokens) - for j in range(1, i): - if not self.token_is_whitespace(y, i - j): - return tokens[i - j] - return None - def get_next_right_token(self, y, i): - tokens = self.get_tokens(y) - assert i >= 0 and i < len(tokens) - for j in range(i + 1, len(tokens)): - if not self.token_is_whitespace(y, j): - return tokens[j] - return None - def is_leftmost_token(self, y, i): - return self.get_next_left_token(y, i) is None - def is_rightmost_token(self, y, i): - return self.get_next_right_token(y, i) is None - def is_only_token(self, y, i): - return self.is_leftmost_token(y, i) and self.is_rightmost_token(y, i) - - def get_leftmost_token(self, y): - tokens = self.get_tokens(y) - for i in range(0, len(tokens)): - if not self.token_is_whitespace(y, i): - return tokens[i] - return None - def get_rightmost_token(self, y): - tokens = self.get_tokens(y) - i = len(tokens) - 1 - for j in range(0, len(tokens)): - if not self.token_is_whitespace(y, i - j): - return tokens[i - j] - return None - - def get_nonws_tokens(self, y): - tokens = self.get_tokens(y) - for i in range(0, len(tokens)): - if not self.token_is_whitespace(y, i): - yield tokens[i] - raise StopIteration - def get_nons_tokens(self, y): - tokens = self.get_tokens(y) - for i in range(0, len(tokens)): - if not self.token_is_space(y, i): - yield tokens[i] - raise StopIteration - - def region_added(self, p, newlines): - self.lines = {} - def region_removed(self, p1, p2): - self.lines = {} - - def is_base(self, y): - return True - def get_level(self, y): - if y in self.lines: - return self.lines[y] - else: - self._calc_level(y) - return self.lines.get(y) - def _calc_level(self, y): - pass - -class StackTabber(Tabber): - def __init__(self, m): - self.mode = m - self.lines = {} - self.record = {} - self.markers = [] - - def get_curr_level(self): - if self.markers: - return self.markers[-1].level - else: - return 0 - - def region_added(self, p, newlines): - self.lines = {} - self.record = {} - self.markers = [] - def region_removed(self, p1, p2): - self.lines = {} - self.record = {} - self.markers = [] - - def is_base(self, y): - return y == 0 - def _calc_level(self, y): - # first we need to step back to find the last place where we have tab - # stops figured out, or a suitable place to start - target = y - while not self.is_base(y) and y > 0: - y -= 1 - - # ok now, let's do this shit - self.markers = [] - currlvl = 0 - while y <= target: - currlvl = self.get_curr_level() - tokens = self.get_tokens(y) - for i in range(0, len(tokens)): - currlvl = self._handle_token(currlvl, y, i) - self.lines[y] = currlvl - self.record[y] = tuple(self.markers) - y += 1 - - def _handle_token(self, currlvl, y, i): - token = self.get_token(y, i) - s = token.string - fqname = token.fqname() - - if fqname in self.mode.closetokens and s in self.mode.closetags: - currlvl = self._handle_close_token(currlvl, y, i) - elif fqname in self.mode.opentokens and s in self.mode.opentags: - currlvl = self._handle_open_token(currlvl, y, i) - else: - currlvl = self._handle_other_token(currlvl, y, i) - return currlvl - - def _handle_open_token(self, currlvl, y, i): - token = self.get_token(y, i) - rtoken = self.get_next_right_token(y, i) - if rtoken is None: - #level = self.get_curr_level() + 4 - level = self.get_curr_level() + self.mode.tabwidth - else: - level = rtoken.x - self._append(token.string, level) - return currlvl - def _handle_close_token(self, currlvl, y, i): - token = self.get_token(y, i) - s1 = token.string - if not self.markers: - raise Exception, "unmatched closing token %r" % s1 - s2 = self.markers[-1].name - if self.mode.closetags[s1] == s2: - self._pop() - if self.is_leftmost_token(y, i): - currlvl = self.get_curr_level() - else: - raise Exception, "mismatched closing tag %r vs %r" % (s2, s1) - return currlvl - def _handle_other_token(self, currlvl, y, i): - return currlvl - - def _has_markers(self): - return len(self.markers) > 0 - def _empty(self): - return len(self.markers) == 0 - def _append(self, name, level): - self.markers.append(Marker(name, level)) - def _peek(self): - return self.markers[-1] - def _peek_name(self): - return self.markers[-1].name - def _peek_level(self): - return self.markers[-1].level - def _pop(self): - self.markers.pop(-1) - def _pop_until(self, *names): - while self.markers: - if self.markers[-1].name in names: - self.markers.pop(-1) - return - else: - self.markers.pop(-1) - - def _opt_append(self, name, level): - if self.markers and self.markers[-1].name == name: - pass - else: - self._append(name, level) - def _opt_pop(self, *names): - if self.markers and self.markers[-1].name in names: - self.markers.pop(-1) diff --git a/window2.py b/window2.py deleted file mode 100644 index 7870982..0000000 --- a/window2.py +++ /dev/null @@ -1,635 +0,0 @@ -import os.path, string -import highlight, regex -from point import Point - -WORD_LETTERS = list(string.letters + string.digits) - -# note about the cursor: the cursor position will insert in front of the -# character it highlights. to this end, it needs to be able to highlight behind -# the last character on a line. thus, the x coordinate of the (logical) cursor -# can equal the length of lines[y], even though lines[y][x] throws an index -# error. both buffer and window need to be aware of this possibility for points. - -class Window(object): - margins = ((80, 'blue'),) - margins_visible = False - def __init__(self, b, a, height=24, width=80, mode_name=None): - self.buffer = b - self.application = a - - self.first = Point(0, 0) - self.last = None - self.cursor = Point(0, 0) - self.mark = None - self.active_point = None - - self.height = height - self.width = width - - self.input_line = "" - - if mode_name is not None: - pass - elif hasattr(self.buffer, 'modename') and self.buffer.modename is not None: - mode_name = self.buffer.modename - elif self.buffer.btype == 'mini': - mode_name = 'mini' - elif self.buffer.btype == 'console': - mode_name = "fundamental" - elif self.buffer.btype == 'dir': - mode_name = 'dir' - elif hasattr(self.buffer, 'path'): - path = self.buffer.path - basename = os.path.basename(path) - ext = self._get_path_ext(path) - - if path in self.application.mode_paths: - mode_name = self.application.mode_paths[path] - elif basename in self.application.mode_basenames: - mode_name = self.application.mode_basenames[basename] - elif ext in self.application.mode_extensions: - mode_name = self.application.mode_extensions[ext] - elif len(self.buffer.lines) > 0 and \ - self.buffer.lines[0].startswith('#!'): - line = self.buffer.lines[0] - for word in self.application.mode_detection: - if word in line: - mode_name = self.application.mode_detection[word] - - if mode_name is None: - mode_name = "fundamental" - - m = self.application.modes[mode_name](self) - self.set_mode(m) - self.buffer.add_window(self) - - # private method used in window constructor - def _get_path_ext(self, path): - name = os.path.basename(path).lower() - tokens = name.split('.') - if len(tokens) > 2 and tokens[-1] in ('gz', 'in', 'zip'): - return '.%s.%s' % (tokens[-2], tokens[-1]) - else: - return os.path.splitext(path)[1].lower() - - # some useful pass-through to application - def set_error(self, s): - self.application.set_error(s) - def clear_error(self): - self.application.clear_error() - - # mode stuff - def set_mode(self, m): - self.mode = m - modename = m.name() - if modename not in self.buffer.highlights and m.lexer is not None: - self.buffer.highlights[modename] = highlight.Highlighter(m.lexer) - self.buffer.highlights[modename].highlight(self.buffer.lines) - - #self.redraw() - def get_highlighter(self): - if self.mode.lexer is None: - return None - else: - return self.buffer.highlights[self.mode.name()] - - # this is used to temporarily draw the user's attention to another point - def set_active_point(self, p, msg='marking on line %(y)d, character %(x)d'): - self.active_point = p - if not self.point_is_visible(p): - self.application.set_error(msg % {'x': p.x, 'y': p.y}) - - # point left - def point_left(self, p): - if p.y == 0 and p.x == 0: - return None - elif p.x == 0: - return Point(len(self.buffer.lines[p.y - 1]), p.y - 1) - else: - return Point(p.x - 1, p.y) - - # point right - def point_right(self, p): - if p.y == len(self.buffer.lines)-1 and p.x == len(self.buffer.lines[-1]): - return None - elif p.x == len(self.buffer.lines[p.y]): - return Point(0, p.y + 1) - else: - return Point(p.x + 1, p.y) - - # cursors - def logical_cursor(self): - if len(self.buffer.lines) > self.cursor.y: - l = len(self.buffer.lines[self.cursor.y]) - else: - l = 0 - x = min(self.cursor.x, l) - return Point(x, self.cursor.y) - - # last visible point - def _calc_last(self): - (x, y) = self.first.xy() - count = 0 - while count < self.height - 1 and y < len(self.buffer.lines) - 1: - line = self.buffer.lines[y] - if x >= len(line) or len(line[x:]) <= self.width: - x = 0 - y += 1 - count += 1 - else: - count += 1 - x += self.width - - if y < len(self.buffer.lines): - x = min(x + self.width, len(self.buffer.lines[y])) - self.last = Point(x, y) - - # redrawing - def redraw(self): - self._calc_last() - - def set_size(self, width, height): - assert type(width) == type(0), width - assert type(height) == type(0), height - self.width = width - self.height = height - self.redraw() - - # region added - def region_added(self, p, newlines): - (x, y) = self.logical_cursor().xy() - l = len(newlines) - assert l > 0, repr(newlines) - visible = self.point_is_visible(p) - if l > 1: - if y > p.y: - self.cursor = Point(x, y + l - 1) - elif y == p.y and x >= p.x: - self.cursor = Point(len(newlines[-1]) + x - p.x, y + l - 1) - elif y == p.y and x >= p.x: - self.cursor = Point(x + len(newlines[0]), y) - - if not visible and l > 1 and self.first.y > p.y: - self.first = Point(self.first.x, self.first.y + l - 1) - - self.redraw() - self.mode.region_added(p, newlines) - self.assure_visible_cursor() - - # region removed - def region_removed(self, p1, p2): - cursor = self.logical_cursor() - (x, y) = cursor.xy() - visible = self.point_is_visible(p2) - - xdelta = p2.x - p1.x - ydelta = p2.y - p1.y - - if cursor < p1: - pass - elif cursor < p2: - self.cursor = p1 - elif cursor.y == p2.y: - #self.cursor = Point(self.cursor.x - p2.x + p1.x, p1.y) - self.cursor = Point(self.cursor.x - xdelta, p1.y) - else: - #self.cursor = Point(self.cursor.x, self.cursor.y - p2.y + p1.y) - self.cursor = Point(self.cursor.x, self.cursor.y - ydelta) - - if not visible and ydelta and self.first.y > p2.y: - self.first = Point(self.first.x, self.first.y - ydelta) - - self.redraw() - self.mode.region_removed(p1, p2) - self.assure_visible_cursor() - - def point_is_visible(self, p): - return self.first <= p and p <= self.last - def cursor_is_visible(self): - return self.point_is_visible(self.logical_cursor()) - def first_is_visible(self): - return self.point_is_visible(self.buffer.get_buffer_start()) - def last_is_visible(self): - return self.point_is_visible(self.buffer.get_buffer_end()) - - def center_view(self): - (x, y) = self.logical_cursor().xy() - counter = 0 - while counter < self.height / 2: - if x > self.width: - x -= self.width - elif y > 0: - y -= 1 - x = len(self.buffer.lines[y]) - else: - (x, y) = (0, 0) - break - counter += 1 - self.first = Point(x - (x % self.width), y) - self.redraw() - def assure_visible_cursor(self): - if not self.cursor_is_visible(): - #raise Exception, "%s < %s" % (self.last, self.logical_cursor()) - self.center_view() - - # moving in buffer - def forward(self): - cursor = self.logical_cursor() - if cursor.x < len(self.buffer.lines[cursor.y]): - self.cursor = Point(cursor.x + 1, cursor.y) - elif cursor.y < len(self.buffer.lines) -1: - self.cursor = Point(0, cursor.y + 1) - self.assure_visible_cursor() - def backward(self): - cursor = self.logical_cursor() - if cursor.x > 0: - self.cursor = Point(cursor.x - 1, cursor.y) - elif cursor.y > 0: - self.cursor = Point(len(self.buffer.lines[cursor.y - 1]), cursor.y - 1) - self.assure_visible_cursor() - def end_of_line(self): - cursor = self.logical_cursor() - self.cursor = Point(len(self.buffer.lines[cursor.y]), cursor.y) - self.assure_visible_cursor() - def start_of_line(self): - cursor = self.logical_cursor() - self.cursor = Point(0, cursor.y) - self.assure_visible_cursor() - def previous_line(self): - if self.cursor.y > 0: - self.cursor = Point(self.cursor.x, self.cursor.y - 1) - self.assure_visible_cursor() - def next_line(self): - if self.cursor.y < len(self.buffer.lines) - 1: - self.cursor = Point(self.cursor.x, self.cursor.y + 1) - self.assure_visible_cursor() - - # word handling - def find_left_word(self, p=None): - if p is None: - (x, y) = self.logical_cursor().xy() - else: - (x, y) = p.xy() - - start = self.buffer.get_buffer_start() - if (x, y) == start: - return - elif x == 0: - y -= 1 - x = len(self.buffer.lines[y]) - else: - x -= 1 - while (y, x) >= start and self.xy_char(x, y) not in WORD_LETTERS: - if x == 0: - y -= 1 - x = len(self.buffer.lines[y]) - else: - x -= 1 - found_word = False - while (y, x) >= start and self.xy_char(x, y) in WORD_LETTERS: - found_word = True - if x == 0: - y -= 1 - x = len(self.buffer.lines[y]) - else: - x -= 1 - if not found_word: - return None - elif x == len(self.buffer.lines[y]): - x = 0 - y += 1 - else: - x += 1 - return Point(x, y) - def find_right_word(self, p=None): - if p is None: - (x, y) = self.logical_cursor().xy() - else: - (x, y) = p.xy() - end = self.buffer.get_buffer_end() - while (y, x) < end and self.xy_char(x, y) not in WORD_LETTERS: - if x == len(self.buffer.lines[y]): - x = 0 - y += 1 - else: - x += 1 - while (y, x) < end and self.xy_char(x, y) in WORD_LETTERS: - if x == len(self.buffer.lines[y]): - x = 0 - y += 1 - else: - x += 1 - return Point(x, y) - def left_word(self): - p = self.find_left_word() - if p is not None: - self.goto(p) - def right_word(self): - p = self.find_right_word() - if p is not None: - self.goto(p) - def get_word_bounds_at_point(self, p, wl=WORD_LETTERS): - if len(self.buffer.lines[p.y]) == 0: - return None - elif self.cursor_char() not in wl: - return None - x1 = x2 = p.x - while x1 > 0 and self.xy_char(x1 - 1, p.y) in wl: - x1 -= 1 - while x2 < len(self.buffer.lines[p.y]) and self.xy_char(x2, p.y) in wl: - x2 += 1 - return (Point(x1, p.y), Point(x2, p.y)) - def get_word_at_point(self, p, wl=WORD_LETTERS): - bounds = self.get_word_bounds_at_point(p, wl) - if bounds is None: - return None - else: - return self.buffer.get_substring(bounds[0], bounds[1]) - def get_word_bounds(self, wl=WORD_LETTERS): - return self.get_word_bounds_at_point(self.logical_cursor(), wl) - def get_word(self, wl=WORD_LETTERS): - return self.get_word_at_point(self.logical_cursor(), wl) - - # page up/down - def _pshift_up(self, p, num): - (x, y) = p.xy() - orig_x = x - counter = 0 - while counter < num and y > 0: - if x > self.width: - x -= self.width - else: - y -= 1 - x = len(self.buffer.lines[y]) - counter += 1 - return Point(orig_x, y) - def _pshift_down(self, p, num): - (x, y) = p.xy() - orig_x = x - counter = 0 - while counter < num and y < len(self.buffer.lines): - if x + self.width >= len(self.buffer.lines[y]): - y += 1 - x = 0 - else: - x += self.width - counter += 1 - if y == len(self.buffer.lines): - y -= 1 - x = len(self.buffer.lines[y]) - return Point(orig_x, y) - def page_up(self): - first_point = self.buffer.get_buffer_start() - if self.point_is_visible(first_point): - self.goto_beginning() - return - self.cursor = self._pshift_up(self.cursor, self.height - 3) - if self.first > first_point: - self.first = self._pshift_up(self.first, self.height - 3) - self.redraw() - def page_down(self): - last_point = self.buffer.get_buffer_end() - if self.point_is_visible(last_point): - self.goto_end() - return - self.cursor = self._pshift_down(self.cursor, self.height - 3) - if self.last < last_point: - self.first = self._pshift_down(self.first, self.height - 3) - self.redraw() - - # jumping in buffer - def goto(self, p): - self.cursor = p - self.assure_visible_cursor() - def goto_line(self, n): - assert n > 0 and n <= len(self.buffer.lines) , "illegal line: %d" % n - self.cursor = Point(0, n - 1) - self.assure_visible_cursor() - def forward_lines(self, n): - assert n > 0, "illegal number of lines: %d" % n - y = min(self.logical_cursor().y + n, len(self.buffer.lines) - 1) - self.goto(Point(0, y)) - def forward_chars(self, n): - (x, y) = self.logical_cursor().xy() - for i in range(0, n): - if x == len(self.buffer.lines[y]): - y += 1 - x = 0 - if y >= len(self.buffer.lines): - break - else: - x += 1 - self.goto(Point(x, y)) - def goto_char(self, n): - self.goto_beginning() - self.forward_chars(n) - def goto_beginning(self): - self.cursor = Point(0, 0) - self.assure_visible_cursor() - def goto_end(self): - self.cursor = self.buffer.get_buffer_end() - (x, y) = self.logical_cursor().xy() - if x == 0: - y -= 1 - x = len(self.buffer.lines[y]) - else: - x -= 1 - counter = 0 - while counter < self.height - 3: - if x > self.width: - x -= self.width - elif y > 0: - y -= 1 - x = len(self.buffer.lines[y]) - else: - (x, y) = (0, 0) - break - counter += 1 - - if not self.cursor_is_visible(): - self.first = Point(x - (x % self.width), y) - self.redraw() - - # mark manipulation - def set_mark_point(self, p): - self.mark = p - def set_mark(self): - self.set_mark_point(self.logical_cursor()) - self.application.set_error("Mark set") - def goto_mark(self): - self.goto(self.mark) - def switch_mark(self): - if self.mark: - p = self.mark - self.set_mark_point(self.logical_cursor()) - self.goto(p) - - # deletion - def left_delete(self): - (x, y) = self.logical_cursor().xy() - if x > 0: - self.buffer.delete_char(Point(x - 1, y)) - elif y > 0: - self.buffer.delete_char(Point(len(self.buffer.lines[y - 1]), y - 1)) - def right_delete(self): - cursor = self.logical_cursor() - if cursor < self.last: - self.buffer.delete_char(cursor) - else: - pass - - # killing - def kill_line(self): - return self.copy_line(kill=True) - def kill_region(self): - return self.copy_region(kill=True) - def kill_left_word(self): - p1 = self.find_left_word() - p2 = self.logical_cursor() - if p1 == p2: - return - return self.kill(p1, p2) - def kill_right_word(self): - p1 = self.logical_cursor() - p2 = self.find_right_word() - if p1 == p2: - return - return self.kill(p1, p2) - def copy_line(self, kill=False): - cursor = self.logical_cursor() - (x, y) = cursor.xy() - lines = self.buffer.lines - if (x < len(lines[y]) and not regex.whitespace.match(lines[y][x:])): - limit = Point(len(lines[y]), y) - elif y < len(lines) - 1: - limit = Point(0, y + 1) - else: - return - if kill: - return self.kill(cursor, limit) - else: - return self.copy(cursor, limit) - def copy_region(self, kill=False): - cursor = self.logical_cursor() - if cursor < self.mark: - p1 = cursor - p2 = self.mark - elif self.mark < cursor: - p1 = self.mark - p2 = cursor - else: - self.input_line = "Empty kill region" - return - if kill: - return self.kill(p1, p2) - else: - return self.copy(p1, p2) - def kill(self, p1, p2): - killed = self.buffer.get_substring(p1, p2) - self.buffer.delete(p1, p2) - self.application.push_kill(killed) - return killed - def copy(self, p1, p2): - copied = self.buffer.get_substring(p1, p2) - self.application.push_kill(copied) - return copied - - # overwriting - def overwrite_char_at_cursor(self, c): - self.overwrite_char(self.logical_cursor(), c) - def overwrite_char(self, p, c): - line = self.buffer.lines[p.y] - if p.x >= len(line): - self.insert_string(p, c) - elif p.x == len(line) - 1: - self.buffer.overwrite_char(p, c) - if p.y < len(self.buffer.lines): - self.cursor = Point(0, p.y + 1) - else: - self.buffer.overwrite_char(p, c) - self.cursor = Point(p.x + 1, p.y) - - # insertion - def insert_string_at_cursor(self, s): - self.insert_string(self.logical_cursor(), s) - def insert_string(self, p, s): - lines = s.split('\n') - self.insert_lines(p, lines) - def insert_lines_at_cursor(self, lines): - self.insert_lines(self.logical_cursor(), lines) - def insert_lines(self, p, lines): - self.buffer.insert_lines(p, lines) - self.redraw() - - # yank/pop - def yank(self): - self.insert_string_at_cursor(self.application.get_kill()) - def get_kill(self): - return self.application.get_kill() - def has_kill(self, i=-1): - return self.application.has_kill(i) - def pop_kill(self): - return self.application.pop_kill() - def push_kill(self, s): - return self.application.push_kill(s) - - # querying - def cursor_char(self): - return self.point_char(self.logical_cursor()) - def point_char(self, p): - return self.xy_char(p.x, p.y) - def xy_char(self, x, y): - if x == len(self.buffer.lines[y]): - return "\n" - else: - return self.buffer.lines[y][x] - - # undo/redo - def undo(self): - p = self.buffer.undo() - if not self.point_is_visible(p): - self.goto(p) - def redo(self): - p = self.buffer.redo() - if not self.point_is_visible(p): - self.goto(p) - - # highlighting tokens - def get_token(self): - return self.get_token_at_point(self.logical_cursor()) - def get_token2(self): - c = self.logical_cursor() - p = Point(max(0, c.x - 1), c.y) - return self.get_token_at_point(p) - def get_token_at_point(self, p): - for token in self.get_highlighter().tokens[p.y]: - if token.end_x() <= p.x: - continue - elif token.x > p.x: - continue - else: - return token - return None - def get_next_token_by_lambda(self, p, f): - tokens = self.get_highlighter().tokens[p.y] - for token in tokens: - if token.x < p.x: - continue - if f(token): - return token - return None - def get_next_token_by_type(self, p, name): - return self.get_next_token_by_lambda(p, lambda t: t.name == name) - def get_next_token_except_type(self, p, name): - return self.get_next_token_by_lambda(p, lambda t: t.name != name) - def get_next_token_by_type_regex(self, p, name, regex): - l = lambda t: t.name == name and regex.match(t.string) - return self.get_next_token_by_lambda(p, l) - def get_next_token_except_type_regex(self, p, name, regex): - l = lambda t: t.name != name or regex.match(t.string) - return self.get_next_token_by_lambda(p, l) - - def get_next_token_by_types(self, p, *names): - return self.get_next_token_by_lambda(p, lambda t: t.name in names) - def get_next_token_except_types(self, p, *names): - return self.get_next_token_by_lambda(p, lambda t: t.name not in names)