462 lines
16 KiB
Python
462 lines
16 KiB
Python
import regex, util
|
|
from point import Point
|
|
|
|
class Marker(object):
|
|
def __init__(self, name, level, y):
|
|
self.name = name
|
|
self.level = level
|
|
self.y = y
|
|
def __repr__(self):
|
|
return '<Marker(%r, %r, %r)>' % (self.name, self.level, self.y)
|
|
|
|
class Tabber(object):
|
|
wsre = regex.whitespace
|
|
wst = ('spaces', 'null', 'eol',)
|
|
sre = regex.space
|
|
st = ('spaces', 'null',)
|
|
def __init__(self, m):
|
|
self.mode = m
|
|
self.lines = {}
|
|
|
|
def get_highlighter(self):
|
|
return self.mode.window.buffer.highlights[self.mode.name]
|
|
def get_tokens(self, y):
|
|
return self.mode.window.buffer.highlights[self.mode.name].tokens[y]
|
|
def get_token(self, y, i):
|
|
return self.mode.window.buffer.highlights[self.mode.name].tokens[y][i]
|
|
|
|
def token_is_whitespace(self, y, i):
|
|
token = self.get_token(y, i)
|
|
return token.fqname() in self.wst and self.wsre.match(token.string)
|
|
def token_is_space(self, y, i):
|
|
token = self.get_token(y, i)
|
|
return token.fqname() in self.st and self.sre.match(token.string)
|
|
|
|
def get_next_left_token(self, y, i):
|
|
tokens = self.get_tokens(y)
|
|
assert i >= 0 and i < len(tokens)
|
|
for j in range(1, i):
|
|
if not self.token_is_whitespace(y, i - j):
|
|
return tokens[i - j]
|
|
return None
|
|
def get_next_right_token(self, y, i):
|
|
tokens = self.get_tokens(y)
|
|
assert i >= 0 and i < len(tokens)
|
|
for j in range(i + 1, len(tokens)):
|
|
if not self.token_is_whitespace(y, j):
|
|
return tokens[j]
|
|
return None
|
|
def is_leftmost_token(self, y, i):
|
|
return self.get_next_left_token(y, i) is None
|
|
def is_rightmost_token(self, y, i):
|
|
return self.get_next_right_token(y, i) is None
|
|
def is_only_token(self, y, i):
|
|
return self.is_leftmost_token(y, i) and self.is_rightmost_token(y, i)
|
|
|
|
def get_leftmost_token(self, y):
|
|
tokens = self.get_tokens(y)
|
|
for i in range(0, len(tokens)):
|
|
if not self.token_is_whitespace(y, i):
|
|
return tokens[i]
|
|
return None
|
|
def get_rightmost_token(self, y):
|
|
tokens = self.get_tokens(y)
|
|
i = len(tokens) - 1
|
|
for j in range(0, len(tokens)):
|
|
if not self.token_is_whitespace(y, i - j):
|
|
return tokens[i - j]
|
|
return None
|
|
|
|
def get_nonws_tokens(self, y):
|
|
tokens = self.get_tokens(y)
|
|
for i in range(0, len(tokens)):
|
|
if not self.token_is_whitespace(y, i):
|
|
yield tokens[i]
|
|
raise StopIteration
|
|
def get_nons_tokens(self, y):
|
|
tokens = self.get_tokens(y)
|
|
for i in range(0, len(tokens)):
|
|
if not self.token_is_space(y, i):
|
|
yield tokens[i]
|
|
raise StopIteration
|
|
|
|
def region_added(self, p, newlines):
|
|
self.lines = {}
|
|
def region_removed(self, p1, p2):
|
|
self.lines = {}
|
|
|
|
def is_base(self, y):
|
|
return True
|
|
def get_level(self, y):
|
|
if y in self.lines:
|
|
return self.lines[y]
|
|
else:
|
|
self._calc_level(y)
|
|
return self.lines.get(y)
|
|
def _calc_level(self, y):
|
|
pass
|
|
|
|
class StackTabber(Tabber):
|
|
def __init__(self, m):
|
|
self.mode = m
|
|
self.lines = {}
|
|
self.record = {}
|
|
self.markers = []
|
|
|
|
def get_curr_level(self):
|
|
if self.markers:
|
|
return self.markers[-1].level
|
|
else:
|
|
return 0
|
|
|
|
def region_added(self, p, newlines):
|
|
self.lines = {}
|
|
self.record = {}
|
|
self.markers = []
|
|
def region_removed(self, p1, p2):
|
|
self.lines = {}
|
|
self.record = {}
|
|
self.markers = []
|
|
|
|
def is_base(self, y):
|
|
return y == 0
|
|
def _calc_level(self, y):
|
|
# first we need to step back to find the last place where we have tab
|
|
# stops figured out, or a suitable place to start
|
|
target = y
|
|
while not self.is_base(y) and y > 0:
|
|
y -= 1
|
|
|
|
# ok now, let's do this shit
|
|
self.markers = []
|
|
currlvl = 0
|
|
while y <= target:
|
|
currlvl = self.get_curr_level()
|
|
tokens = self.get_tokens(y)
|
|
for i in range(0, len(tokens)):
|
|
currlvl = self._handle_token(currlvl, y, i)
|
|
self.lines[y] = currlvl
|
|
self.record[y] = tuple(self.markers)
|
|
y += 1
|
|
|
|
def _handle_token(self, currlvl, y, i):
|
|
token = self.get_token(y, i)
|
|
s = token.string
|
|
fqname = token.fqname()
|
|
|
|
if fqname in self.mode.closetokens and s in self.mode.closetags:
|
|
currlvl = self._handle_close_token(currlvl, y, i)
|
|
elif fqname in self.mode.opentokens and s in self.mode.opentags:
|
|
currlvl = self._handle_open_token(currlvl, y, i)
|
|
else:
|
|
currlvl = self._handle_other_token(currlvl, y, i)
|
|
return currlvl
|
|
|
|
def _handle_open_token(self, currlvl, y, i):
|
|
token = self.get_token(y, i)
|
|
rtoken = self.get_next_right_token(y, i)
|
|
if rtoken is None:
|
|
level = self.get_curr_level() + self.mode.tabwidth
|
|
else:
|
|
level = rtoken.x
|
|
self._append(token.string, level, y)
|
|
return currlvl
|
|
def _handle_close_token(self, currlvl, y, i):
|
|
token = self.get_token(y, i)
|
|
s1 = token.string
|
|
if not self.markers:
|
|
raise Exception, "unmatched closing token %r" % s1
|
|
s2 = self.markers[-1].name
|
|
if self.mode.closetags[s1] == s2:
|
|
self._pop()
|
|
if self.is_leftmost_token(y, i):
|
|
currlvl = self.get_curr_level()
|
|
else:
|
|
raise Exception, "mismatched closing tag %r vs %r" % (s2, s1)
|
|
return currlvl
|
|
def _handle_other_token(self, currlvl, y, i):
|
|
return currlvl
|
|
|
|
def _has_markers(self):
|
|
return len(self.markers) > 0
|
|
def _empty(self):
|
|
return len(self.markers) == 0
|
|
def _append(self, name, level, y=None):
|
|
self.markers.append(Marker(name, level, y))
|
|
def _peek(self):
|
|
if self.markers:
|
|
return self.markers[-1]
|
|
else:
|
|
return None
|
|
def _peek_until(self, *names):
|
|
for i in range(1, len(self.markers) + 1):
|
|
x = self.markers[-i]
|
|
if x.name in names:
|
|
return x
|
|
return None
|
|
|
|
def _peek_name(self):
|
|
if self.markers:
|
|
return self.markers[-1].name
|
|
else:
|
|
return None
|
|
def _peek_level(self):
|
|
if self.markers:
|
|
return self.markers[-1].level
|
|
else:
|
|
return None
|
|
def _pop(self):
|
|
self.markers.pop(-1)
|
|
def _pop_while(self, *names):
|
|
while self.markers and self.markers[-1].name in names:
|
|
self.markers.pop(-1)
|
|
def _pop_unless(self, *names):
|
|
if self.markers and self.markers[-1].name not in names:
|
|
self.markers.pop(-1)
|
|
def _pop_until(self, *names):
|
|
while self.markers:
|
|
if self.markers[-1].name in names:
|
|
self.markers.pop(-1)
|
|
return
|
|
else:
|
|
self.markers.pop(-1)
|
|
|
|
def _opt_append(self, name, level, y=None):
|
|
if self.markers and self.markers[-1].name == name:
|
|
pass
|
|
else:
|
|
self._append(name, level, y)
|
|
def _opt_pop(self, *names):
|
|
if self.markers and self.markers[-1].name in names:
|
|
self.markers.pop(-1)
|
|
|
|
class Marker2(object):
|
|
def __init__(self, name, type_, level, y=None):
|
|
self.name = name
|
|
self.type_ = type_
|
|
self.level = level
|
|
self.y = y
|
|
def __repr__(self):
|
|
return '<Marker2(%r, %r, %r, %r)>' % (self.name, self.type_, self.level, self.y)
|
|
|
|
class StackTabber2(Tabber):
|
|
is_ignored_tokens = ('spaces', 'eol', 'comment')
|
|
is_indent_tokens = ('spaces',)
|
|
open_tokens = {'delimiter': {'{': '}', '(': ')', '[': ']'}}
|
|
close_tokens = {'delimiter': set(['}', ')', ']'])}
|
|
open_scope_tokens = {'delimiter': set(['{'])}
|
|
close_scope_tokens = {'delimiter': set(['}'])}
|
|
control_tokens = {'keyword': set(['if', 'else', 'while', 'do', 'for'])}
|
|
end_at_eof = True
|
|
end_at_tokens = {}
|
|
continue_tokens = {}
|
|
nocontinue_tokens = {}
|
|
start_free_tokens = {'string.start': 'string.end'}
|
|
end_free_tokens = {'string.end': 'string.start'}
|
|
start_macro_tokens = {}
|
|
end_macro_tokens = {}
|
|
fixed_indent = False
|
|
def __init__(self, m):
|
|
self.mode = m
|
|
self.name = m.name
|
|
self.lines = {}
|
|
self._reset()
|
|
def region_added(self, p, newlines):
|
|
self.lines = {}
|
|
def region_removed(self, p1, p2):
|
|
self.lines = {}
|
|
def get_level(self, y):
|
|
if y not in self.lines:
|
|
self._calc_level(y)
|
|
return self.lines.get(y)
|
|
|
|
def _calc_level(self, y):
|
|
target = y
|
|
while not self._is_base(y) and y > 0:
|
|
y -= 1
|
|
self._reset()
|
|
while y <= target:
|
|
self._save_curr_level()
|
|
self._handle_tokens(y)
|
|
y += 1
|
|
|
|
def _is_base(self, y):
|
|
return y == 0
|
|
|
|
def _reset(self):
|
|
self.record = {}
|
|
self.stack = []
|
|
self.markers = self.stack
|
|
self.curr_level = 0
|
|
def _get_curr_level(self):
|
|
if self.stack:
|
|
return self.stack[-1].level
|
|
else:
|
|
return 0
|
|
def _get_next_level(self):
|
|
return self._get_curr_level() + self.mode.tabwidth
|
|
def _save_curr_level(self):
|
|
self.curr_level = self._get_curr_level()
|
|
|
|
def _match(self, *names):
|
|
return self.stack and self.stack[-1].name in names
|
|
def _nomatch(self, *names):
|
|
return not self.stack or self.stack[-1].name not in names
|
|
def _pop(self, *names):
|
|
if self._match(*names):
|
|
self.stack.pop()
|
|
def _pop_while(self, *names):
|
|
while self._match(*names):
|
|
self.stack.pop()
|
|
def _pop_until(self, *names):
|
|
while self._nomatch(*names):
|
|
self.stack.pop()
|
|
|
|
def _append(self, name, type_, level, y=None):
|
|
self.stack.append(Marker2(name, type_, level, y))
|
|
def _append_unless(self, name, type_, level, y=None):
|
|
if self._nomatch(name):
|
|
self.stack.append(Marker2(name, type_, level, y))
|
|
def _peek(self):
|
|
if self.stack:
|
|
return self.stack[-1]
|
|
else:
|
|
return None
|
|
def _peek_until(self, *names):
|
|
for i in range(1, len(self.stack) + 1):
|
|
x = self.stack[-i]
|
|
if x.name in names:
|
|
return x
|
|
return None
|
|
|
|
def _get_tokens(self, y):
|
|
return self.mode.window.buffer.highlights[self.name].tokens[y]
|
|
def _handle_tokens(self, y):
|
|
tokens = self._get_tokens(y)
|
|
assert tokens
|
|
start = int(self._is_indent(tokens[0]))
|
|
end = len(tokens) - 1
|
|
|
|
while end > 0 and self._is_ignored(tokens[end]):
|
|
end -= 1
|
|
|
|
for i in range(0, end + 1 - start):
|
|
t = tokens[start + i]
|
|
if self._is_ignored(t):
|
|
pass
|
|
elif self._is_close_token(t):
|
|
self._handle_close_token(y, tokens, start, end, i, t)
|
|
elif self._is_open_token(t):
|
|
self._handle_open_token(y, tokens, start, end, i, t)
|
|
else:
|
|
self._handle_other_token(y, tokens, start, end, i, t)
|
|
self.lines[y] = self.curr_level
|
|
self.record[y] = tuple(self.stack)
|
|
|
|
def _is_indent(self, t):
|
|
#return t.name == 'spaces'
|
|
return t.name in self.is_indent_tokens
|
|
def _is_ignored(self, t):
|
|
#return t.name in ('spaces', 'eol', 'comment')
|
|
return t.name in self.is_ignored_tokens
|
|
|
|
def _is_close_token(self, t):
|
|
return t.string in self.close_tokens.get(t.name, set())
|
|
def _handle_close_token(self, y, tokens, start, end, i, t):
|
|
if not self.stack:
|
|
raise Exception, "unmatched %r, line %d" % (t.string, y)
|
|
while True:
|
|
marker = self.stack[-1]
|
|
if marker.name in ('control', 'continue'):
|
|
self.stack.pop()
|
|
elif marker.name in self.open_tokens[marker.type_]:
|
|
s = self.open_tokens[marker.type_][marker.name]
|
|
if s in (None, t.string):
|
|
self.stack.pop()
|
|
break
|
|
else:
|
|
raise Exception, "mismatched %r, line %d (expected %r)" % \
|
|
(t.string, y, self.open_tokens[marker.type_][marker.name])
|
|
else:
|
|
raise Exception, "what? %r" % marker.name
|
|
if i == 0:
|
|
self._save_curr_level()
|
|
|
|
# add implicit continuation XYZYXYZY
|
|
name, s = t.fqname(), t.string
|
|
top = self._peek()
|
|
atscope = True
|
|
if top:
|
|
d = self.open_scope_tokens.get(top.type_, set())
|
|
atscope = top.name in d
|
|
if (atscope and i + start == end):
|
|
d = self.nocontinue_tokens.get(name)
|
|
if d is None or d != 1 and s not in d:
|
|
if s not in self.close_scope_tokens.get(name, set()):
|
|
nextlvl = self._get_next_level()
|
|
self._append_unless('continue', name, nextlvl, y)
|
|
#XYZYZYXYXY
|
|
|
|
def _is_open_token(self, t):
|
|
return t.string in self.open_tokens.get(t.name, set())
|
|
def _handle_open_token(self, y, tokens, start, end, i, t):
|
|
if i == 0 and self.stack and self.stack[-1].name == 'continue':
|
|
self.stack.pop()
|
|
if t.string in self.open_scope_tokens.get(t.name, set()):
|
|
self._pop_while('continue', 'control')
|
|
if i == 0 and t.string in self.open_scope_tokens.get(t.name, set()):
|
|
self._save_curr_level()
|
|
|
|
if i == end - start or self.fixed_indent:
|
|
level = self._get_next_level()
|
|
else:
|
|
level = tokens[i + 1].x + 1
|
|
self._append(t.string, t.name, level, y)
|
|
|
|
def _handle_other_token(self, y, tokens, start, end, i, t):
|
|
name, s = t.fqname(), t.string
|
|
# handle "free" tokens (strings, heredocs, etc)
|
|
if name in self.start_free_tokens:
|
|
self._append('free', name, None, y)
|
|
return
|
|
elif name in self.end_free_tokens:
|
|
self._pop('free')
|
|
|
|
# handle macros
|
|
if name in self.start_macro_tokens:
|
|
self._append('macro', name, 0, y)
|
|
if i == 0:
|
|
self._save_curr_level()
|
|
return
|
|
elif name in self.end_macro_tokens:
|
|
self._pop('macro')
|
|
return
|
|
|
|
# remove implicit continuation
|
|
if self.end_at_eof and i + start == end:
|
|
self._pop_while('continue', 'control')
|
|
elif self.end_at_tokens.get(name, {}).get(s):
|
|
self._pop_while('continue', 'control')
|
|
|
|
# add implicit continuation
|
|
top = self._peek()
|
|
if (i + start == end and
|
|
(top and top.name in self.open_scope_tokens.get(top.type_, {}) or
|
|
not top)):
|
|
if self.continue_tokens:
|
|
if s in self.continue_tokens.get(name, {}):
|
|
self._append_unless('continue', name, self._get_next_level(), y)
|
|
elif self.nocontinue_tokens:
|
|
d = self.nocontinue_tokens.get(name)
|
|
if d is None or d != 1 and s not in d:
|
|
self._append_unless('continue', name, self._get_next_level(), y)
|
|
|
|
if name == 'continuation':
|
|
# handle explicit continuation
|
|
self._append_unless('continue', name, self._get_next_level(), y)
|
|
elif s in self.control_tokens.get(name, {}):
|
|
# handle control keywords
|
|
if i == start:
|
|
self._save_curr_level()
|
|
self._append_unless('control', name, self._get_next_level(), y)
|