parent
42a6359f6f
commit
6e116db9ae
|
@ -15,6 +15,7 @@ class Buffer(object):
|
||||||
assert nl in ('\n', '\r', '\r\n'), "Invalid line ending"
|
assert nl in ('\n', '\r', '\r\n'), "Invalid line ending"
|
||||||
self.nl = nl
|
self.nl = nl
|
||||||
self.modified = False
|
self.modified = False
|
||||||
|
pass
|
||||||
|
|
||||||
def num_chars(self):
|
def num_chars(self):
|
||||||
n = 0
|
n = 0
|
||||||
|
|
346
mode_python.py
346
mode_python.py
|
@ -1,262 +1,130 @@
|
||||||
import commands, os.path, sets, string, sys
|
import commands, os.path, sets, string
|
||||||
|
|
||||||
import color, completer, default, mode2, lex2, method, regex, tab2
|
import color, completer, default, mode2, lex2, method, regex, tab2
|
||||||
import ctag_python
|
import ctag_python
|
||||||
|
|
||||||
from point2 import Point
|
from point2 import Point
|
||||||
from lex2 import Grammar, ConstantRule, PatternRule, RegionRule, DualRegionRule
|
from lex2 import Grammar, PatternRule, RegionRule
|
||||||
|
|
||||||
class StringGrammar(Grammar):
|
class StringGrammar(Grammar):
|
||||||
rules = [
|
rules = [
|
||||||
PatternRule(
|
PatternRule(name=r'octal', pattern=r'\\[0-7]{3}'),
|
||||||
name=r'octal',
|
PatternRule(name=r'escaped', pattern=r'\\.'),
|
||||||
pattern=r'\\[0-7]{3}',
|
|
||||||
),
|
|
||||||
PatternRule(
|
|
||||||
name=r'escaped',
|
|
||||||
pattern=r'\\.',
|
|
||||||
),
|
|
||||||
#PatternRule(
|
|
||||||
# name=r'format',
|
|
||||||
# pattern=r'%(?:\([a-zA-Z_]+\))?[-# +]*(?:[0-9]+|\*)?\.?(?:[0-9]+|\*)?[hlL]?[a-zA-Z%]',
|
|
||||||
#),
|
|
||||||
]
|
]
|
||||||
|
|
||||||
class PythonGrammar(Grammar):
|
class PythonGrammar(Grammar):
|
||||||
rules = [
|
rules = [
|
||||||
PatternRule(
|
PatternRule(name=r'functionname', pattern=r'(?<=def )[a-zA-Z_][a-zA-Z0-9_]*'),
|
||||||
name=r'functiondef',
|
PatternRule(name=r'classname', pattern=r'(?<=class )[a-zA-Z_][a-zA-Z0-9_]*'),
|
||||||
pattern=r'(?<=def )[a-zA-Z_][a-zA-Z0-9_]*',
|
PatternRule(name=r'reserved', pattern=r'(?:True|None|False|Exception|self)(?![a-zA-Z0-9_])'),
|
||||||
),
|
PatternRule(name=r'keyword', pattern=r'(?:yield|while|try|return|raise|print|pass|or|not|lambda|is|in|import|if|global|from|for|finally|exec|except|else|elif|del|def|continue|class|break|assert|as|and)(?![a-zA-Z0-9_])'),
|
||||||
PatternRule(
|
PatternRule(name=r"builtin", pattern=r'(?<!\.)(?:zip|xrange|vars|unicode|unichr|type|tuple|super|sum|str|staticmethod|sorted|slice|setattr|set|round|repr|reduce|raw_input|range|property|pow|ord|open|oct|object|max|min|map|long|locals|list|len|iter|issubclass|isinstance|int|input|id|hex|hash|hasattr|globals|getattr|frozenset|float|filter|file|execfile|eval|enumerate|divmod|dir|dict|delattr|complex|compile|coerce|cmp|classmethod|chr|callable|bool)(?![a-zA-Z0-9_])'),
|
||||||
name=r'classdef',
|
PatternRule(name=r'methodcall', pattern=r'(?<=\. )[a-zA-Z_][a-zA-Z0-9_]*(?= *\()'),
|
||||||
pattern=r'(?<=class )[a-zA-Z_][a-zA-Z0-9_]*',
|
PatternRule(name=r'functioncall', pattern=r'[a-zA-Z_][a-zA-Z0-9_]*(?= *\()'),
|
||||||
),
|
PatternRule(name=r'system_identifier', pattern=r'__[a-zA-Z0-9_]+__'),
|
||||||
PatternRule(
|
PatternRule(name=r'private_identifier', pattern=r'__[a-zA-Z0-9_]*'),
|
||||||
name=r'reserved',
|
PatternRule(name=r'hidden_identifier', pattern=r'_[a-zA-Z0-9_]*'),
|
||||||
pattern=r'(?:True|None|False|Exception|self)(?![a-zA-Z0-9_])',
|
PatternRule(name=r'identifier', pattern=r'[a-zA-Z_][a-zA-Z0-9_]*'),
|
||||||
),
|
PatternRule(name=r'delimiter', pattern=r'\(|\)|\[|\]|{|}|@|,|:|\.|`|=|;|\+=|-=|\*=|/=|//=|%=|&=|\|=|\^=|>>=|<<=|\*\*='),
|
||||||
PatternRule(
|
PatternRule(name=r"operator", pattern=r"\+|<>|<<|<=|<|-|>>|>=|>|\*\*|&|\*|\||/|\^|==|//|~|!=|%"),
|
||||||
name=r'keyword',
|
PatternRule(name=r"integer", pattern=r"(?<![\.0-9a-zA-Z_])(?:0|[1-9][0-9]*|0[0-7]+|0[xX][0-9a-fA-F]+)[lL]?(?![\.0-9a-zA-Z_])"),
|
||||||
pattern=r'(?:yield|while|try|return|raise|print|pass|or|not|lambda|is|in|import|if|global|from|for|finally|exec|except|else|elif|del|def|continue|class|break|assert|as|and)(?![a-zA-Z0-9_])',
|
PatternRule(name=r"float", pattern=r"(?<![\.0-9a-zA-Z_])(?:[0-9]+\.[0-9]*|\.[0-9]+|(?:[0-9]|[0-9]+\.[0-9]*|\.[0-9]+)[eE][\+-]?[0-9]+)(?![\.0-9a-zA-Z_])"),
|
||||||
),
|
PatternRule(name=r"imaginary", pattern=r"(?<![\.0-9a-zA-Z_])(?:[0-9]+|(?:[0-9]+\.[0-9]*|\.[0-9]+|(?:[0-9]|[0-9]+\.[0-9]*|\.[0-9]+)[eE][\+-]?[0-9]+)[jJ])(?![\.0-9a-zA-Z_])"),
|
||||||
PatternRule(
|
RegionRule(name=r'docstring', start=r'^ *(?P<tag>"""|\'\'\')', grammar=Grammar(), end=r'%(tag)s'),
|
||||||
name=r"builtin",
|
RegionRule(name=r'string', start=r'(?P<tag>"""|\'\'\')', grammar=StringGrammar(), end=r'%(tag)s'),
|
||||||
pattern=r'(?<!\.)(?:zip|xrange|vars|unicode|unichr|type|tuple|super|sum|str|staticmethod|sorted|slice|setattr|set|round|repr|reduce|raw_input|range|property|pow|ord|open|oct|object|max|min|map|long|locals|list|len|iter|issubclass|isinstance|int|input|id|hex|hash|hasattr|globals|getattr|frozenset|float|filter|file|execfile|eval|enumerate|divmod|dir|dict|delattr|complex|compile|coerce|cmp|classmethod|chr|callable|bool)(?![a-zA-Z0-9_])',
|
RegionRule(name=r'string', start=r'(?P<tag>"|\')', grammar=StringGrammar(), end=r'%(tag)s'),
|
||||||
),
|
PatternRule(name=r'comment', pattern=r'#.*$'),
|
||||||
|
PatternRule(name=r'continuation', pattern=r'\\$'),
|
||||||
PatternRule(
|
|
||||||
name=r'methodcall',
|
|
||||||
pattern=r'(?<=\. )[a-zA-Z_][a-zA-Z0-9_]*(?= *\()',
|
|
||||||
),
|
|
||||||
PatternRule(
|
|
||||||
name=r'functioncall',
|
|
||||||
pattern=r'[a-zA-Z_][a-zA-Z0-9_]*(?= *\()',
|
|
||||||
),
|
|
||||||
PatternRule(
|
|
||||||
name=r'system_identifier',
|
|
||||||
pattern=r'__[a-zA-Z0-9_]+__',
|
|
||||||
),
|
|
||||||
PatternRule(
|
|
||||||
name=r'private_identifier',
|
|
||||||
pattern=r'__[a-zA-Z0-9_]*',
|
|
||||||
),
|
|
||||||
PatternRule(
|
|
||||||
name=r'hidden_identifier',
|
|
||||||
pattern=r'_[a-zA-Z0-9_]*',
|
|
||||||
),
|
|
||||||
PatternRule(
|
|
||||||
name=r'identifier',
|
|
||||||
pattern=r'[a-zA-Z_][a-zA-Z0-9_]*',
|
|
||||||
),
|
|
||||||
PatternRule(
|
|
||||||
name=r'delimiter',
|
|
||||||
pattern=r'\(|\)|\[|\]|{|}|@|,|:|\.|`|=|;|\+=|-=|\*=|/=|//=|%=|&=|\|=|\^=|>>=|<<=|\*\*=',
|
|
||||||
),
|
|
||||||
PatternRule(
|
|
||||||
name=r"operator",
|
|
||||||
pattern=r"\+|<>|<<|<=|<|-|>>|>=|>|\*\*|&|\*|\||/|\^|==|//|~|!=|%",
|
|
||||||
),
|
|
||||||
|
|
||||||
PatternRule(
|
|
||||||
name=r"integer",
|
|
||||||
pattern=r"(?<![\.0-9a-zA-Z_])(?:0|[1-9][0-9]*|0[0-7]+|0[xX][0-9a-fA-F]+)[lL]?(?![\.0-9a-zA-Z_])",
|
|
||||||
),
|
|
||||||
PatternRule(
|
|
||||||
name=r"float",
|
|
||||||
pattern=r"(?<![\.0-9a-zA-Z_])(?:[0-9]+\.[0-9]*|\.[0-9]+|(?:[0-9]|[0-9]+\.[0-9]*|\.[0-9]+)[eE][\+-]?[0-9]+)(?![\.0-9a-zA-Z_])",
|
|
||||||
),
|
|
||||||
PatternRule(
|
|
||||||
name=r"imaginary",
|
|
||||||
pattern=r"(?<![\.0-9a-zA-Z_])(?:[0-9]+|(?:[0-9]+\.[0-9]*|\.[0-9]+|(?:[0-9]|[0-9]+\.[0-9]*|\.[0-9]+)[eE][\+-]?[0-9]+)[jJ])(?![\.0-9a-zA-Z_])",
|
|
||||||
),
|
|
||||||
|
|
||||||
RegionRule(
|
|
||||||
name=r'docstring',
|
|
||||||
start=r'^ *(?P<tag>"""|\'\'\')',
|
|
||||||
grammar=Grammar(),
|
|
||||||
end=r'%(tag)s',
|
|
||||||
),
|
|
||||||
RegionRule(
|
|
||||||
name=r'tq_string',
|
|
||||||
start=r'(?P<tag>"""|\'\'\')',
|
|
||||||
grammar=Grammar(),
|
|
||||||
end=r'%(tag)s',
|
|
||||||
),
|
|
||||||
RegionRule(
|
|
||||||
name=r'string',
|
|
||||||
start=r'(?P<tag>"|\')',
|
|
||||||
grammar=StringGrammar(),
|
|
||||||
end=r'%(tag)s',
|
|
||||||
),
|
|
||||||
|
|
||||||
PatternRule(
|
|
||||||
name=r'comment',
|
|
||||||
pattern=r'#.*$',
|
|
||||||
),
|
|
||||||
PatternRule(
|
|
||||||
name=r'continuation',
|
|
||||||
pattern=r'\\$',
|
|
||||||
),
|
|
||||||
]
|
]
|
||||||
|
|
||||||
class PythonTabber(tab2.Tabber):
|
class PythonTabber(tab2.StackTabber):
|
||||||
|
unanchored_names = ('null', 'string', 'docstring', 'comment')
|
||||||
|
endlevel_names = ('pass', 'return', 'yield', 'raise', 'break', 'continue')
|
||||||
|
startlevel_names = ('if', 'try', 'class', 'def', 'for', 'while', 'try')
|
||||||
|
def __init__(self, m):
|
||||||
|
tab2.StackTabber.__init__(self, m)
|
||||||
|
self.base_level = 0
|
||||||
|
|
||||||
def is_base(self, y):
|
def is_base(self, y):
|
||||||
if y == 0:
|
if y == 0:
|
||||||
return True
|
return True
|
||||||
highlighter = self.mode.window.buffer.highlights[self.mode.name()]
|
tokens = self.get_tokens(y)
|
||||||
if not highlighter.tokens[y]:
|
if not tokens:
|
||||||
return False
|
return False
|
||||||
t = highlighter.tokens[y][0]
|
elif tokens[0].name not in self.unanchored_names:
|
||||||
return t.name == 'keyword' and t.string == 'def'
|
return True
|
||||||
|
|
||||||
def __init__(self, m):
|
|
||||||
tab2.Tabber.__init__(self, m)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def stack_append(self, item):
|
|
||||||
self.tab_stack.append(item)
|
|
||||||
def stack_pop(self):
|
|
||||||
self.tab_stack.pop(-1)
|
|
||||||
|
|
||||||
def base_indentation_level(self, y):
|
|
||||||
return y == 0
|
|
||||||
|
|
||||||
def calculate_tabs(self, start=0, goal=None):
|
|
||||||
lines = self.mode.window.buffer.lines
|
|
||||||
tokens = self.mode.highlighter.tokens
|
|
||||||
buffer = self.mode.window.buffer
|
|
||||||
|
|
||||||
if self.levels is None:
|
|
||||||
self.levels = [None] * (len(lines))
|
|
||||||
|
|
||||||
self.index = 0
|
|
||||||
self.y = start
|
|
||||||
self.base = 0
|
|
||||||
self.tab_stack = []
|
|
||||||
|
|
||||||
# we want to process every logical line in the file
|
|
||||||
while self.y < len(lines):
|
|
||||||
line = lines[self.y]
|
|
||||||
start_index = self.index
|
|
||||||
|
|
||||||
start_point = point.Point(0, self.y)
|
|
||||||
start_offset = buffer.get_point_offset(start_point)
|
|
||||||
end_point = point.Point(len(line), self.y)
|
|
||||||
end_offset = buffer.get_point_offset(end_point)
|
|
||||||
|
|
||||||
# we want to find all the tokens on the line we are currently processing
|
|
||||||
while self.index < len(tokens):
|
|
||||||
token = tokens[self.index]
|
|
||||||
if token.end > end_offset:
|
|
||||||
break
|
|
||||||
self.index += 1
|
|
||||||
|
|
||||||
self.handle_line(line,
|
|
||||||
start_offset, start_index,
|
|
||||||
end_offset, self.index)
|
|
||||||
|
|
||||||
self.levels[self.y] = self.line_depth
|
|
||||||
self.y += 1
|
|
||||||
if goal is not None and self.y > goal:
|
|
||||||
break
|
|
||||||
|
|
||||||
def get_line_depth(self):
|
|
||||||
if len(self.tab_stack) > 0:
|
|
||||||
return self.tab_stack[-1][1]
|
|
||||||
else:
|
else:
|
||||||
return self.base
|
return False
|
||||||
|
|
||||||
def handle_line(self, line, start_offset, start_index, end_offset, end_index):
|
def get_level(self, y):
|
||||||
self.line_depth = self.get_line_depth()
|
self._calc_level(y)
|
||||||
tokens = self.mode.highlighter.tokens
|
return self.lines.get(y)
|
||||||
|
|
||||||
if start_index >= len(tokens):
|
def _calc_level(self, y):
|
||||||
return
|
target = y
|
||||||
if regex.whitespace.match(line):
|
while not self.is_base(y) and y > 0:
|
||||||
return
|
y -= 1
|
||||||
|
|
||||||
if len(self.tab_stack) == 0 and tokens[start_index].start >= start_offset:
|
self.markers = []
|
||||||
self.base = util.count_leading_whitespace(line)
|
while y <= target:
|
||||||
|
self.popped = False
|
||||||
for i in range(start_index, end_index):
|
tokens = self.get_tokens(y)
|
||||||
token = tokens[i]
|
currlvl = self.get_curr_level()
|
||||||
s = token.string
|
if y < target and tokens:
|
||||||
if s in self.start_tags:
|
if self.token_is_whitespace(y, 0):
|
||||||
if i < end_index - 1:
|
l = len(tokens[0].string)
|
||||||
i = tokens[i+1].start - start_offset
|
|
||||||
elif len(self.tab_stack) > 0:
|
|
||||||
i = self.tab_stack[-1][1] + 4
|
|
||||||
else:
|
else:
|
||||||
i = self.base + 4
|
l = 0
|
||||||
self.stack_append((s, i))
|
while currlvl > l:
|
||||||
elif s in self.close_tags:
|
self._pop()
|
||||||
assert len(self.tab_stack), "Unbalanced closing tag"
|
currlvl = self.get_curr_level()
|
||||||
assert self.tab_stack[-1][0] == self.close_tags[s], "Unmatched closing tag"
|
self.popped = True
|
||||||
self.stack_pop()
|
for i in range(0, len(tokens)):
|
||||||
if i == start_index:
|
currlvl = self._handle_token(currlvl, y, i)
|
||||||
self.line_depth = self.get_line_depth()
|
self.lines[y] = currlvl
|
||||||
|
self.record[y] = tuple(self.markers)
|
||||||
|
y += 1
|
||||||
|
|
||||||
if tokens[start_index].start < start_offset:
|
def _handle_other_token(self, currlvl, y, i):
|
||||||
self.line_depth = -1
|
token = self.get_token(y, i)
|
||||||
|
fqname = token.fqname()
|
||||||
prebase = self.base
|
if fqname == 'string.start':
|
||||||
s = tokens[start_index].string
|
self._opt_append('string', None)
|
||||||
e = tokens[end_index-1].string
|
elif fqname == 'string.end':
|
||||||
|
self._opt_pop('string')
|
||||||
if s == "except" or s == "elif" or s == "else":
|
elif fqname == 'docstring.start':
|
||||||
if self.y > 0 and self.line_depth == self.levels[self.y - 1]:
|
self._opt_append('docstring', None)
|
||||||
self.line_depth = max(0, self.line_depth - 4)
|
elif fqname == 'docstring.end':
|
||||||
elif (s == "return" or s == "raise" or s == "yield" or s == "break" or
|
self._opt_pop('docstring')
|
||||||
s == "pass" or s == 'continue'):
|
elif fqname == 'delimiter':
|
||||||
self.base = max(0, self.base - 4)
|
if token.string == ':' and self.markers and self.markers[-1].name in ('[', '{'):
|
||||||
|
# we are in a list range [:] or dictionary key/value {:}
|
||||||
if e == "\\":
|
pass
|
||||||
if len(self.tab_stack) and self.tab_stack[-1][0] == "\\":
|
elif self.is_rightmost_token(y, i):
|
||||||
|
# we are at the end of a block
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
self.stack_append(("\\", prebase + 4))
|
# we are doing a one-liner
|
||||||
return
|
self._pop()
|
||||||
elif e == ":":
|
elif fqname == 'keyword':
|
||||||
self.base += 4
|
if token.string in self.endlevel_names:
|
||||||
elif len(self.tab_stack) and self.tab_stack[-1][0] == "\\":
|
self._pop()
|
||||||
self.stack_pop()
|
elif token.string in self.startlevel_names and self.is_leftmost_token(y, i):
|
||||||
|
self._append(token.string, currlvl + 4)
|
||||||
def get_indentation_level(self, y):
|
elif token.string in ('elif', 'else') and self.is_leftmost_token(y, i):
|
||||||
if self.levels is not None and self.levels[y] is not None:
|
if not self.popped:
|
||||||
result = self.levels[y]
|
self._pop_until('if', 'elif')
|
||||||
else:
|
currlvl = self.get_curr_level()
|
||||||
i = max(0, y - 1)
|
self._append(token.string, currlvl + 4)
|
||||||
while i > 0:
|
elif token.string == 'except' and self.is_leftmost_token(y, i):
|
||||||
if self.base_indentation_level(i):
|
if not self.popped:
|
||||||
break
|
self._pop_until('try')
|
||||||
i -= 1
|
currlvl = self.get_curr_level()
|
||||||
self.calculate_tabs(i, y)
|
self._append(token.string, currlvl + 4)
|
||||||
result = self.levels[y]
|
elif token.string == 'finally' and self.is_leftmost_token(y, i):
|
||||||
if result == -1:
|
if not self.popped:
|
||||||
return None
|
self._pop_until('try', 'except')
|
||||||
return result
|
currlvl = self.get_curr_level()
|
||||||
|
self._append(token.string, currlvl + 4)
|
||||||
|
return currlvl
|
||||||
|
|
||||||
class Python(mode2.Fundamental):
|
class Python(mode2.Fundamental):
|
||||||
tabbercls = PythonTabber
|
tabbercls = PythonTabber
|
||||||
|
@ -283,8 +151,8 @@ class Python(mode2.Fundamental):
|
||||||
'keyword': color.build('cyan', 'default'),
|
'keyword': color.build('cyan', 'default'),
|
||||||
'reserved': color.build('cyan', 'default'),
|
'reserved': color.build('cyan', 'default'),
|
||||||
'builtin': color.build('cyan', 'default'),
|
'builtin': color.build('cyan', 'default'),
|
||||||
'functiondef': color.build('blue', 'default'),
|
'functionname': color.build('blue', 'default'),
|
||||||
'classdef': color.build('green', 'default'),
|
'classname': color.build('green', 'default'),
|
||||||
|
|
||||||
'string.start': color.build('green', 'default'),
|
'string.start': color.build('green', 'default'),
|
||||||
'string.null': color.build('green', 'default'),
|
'string.null': color.build('green', 'default'),
|
||||||
|
@ -297,10 +165,6 @@ class Python(mode2.Fundamental):
|
||||||
'float': color.build('default', 'default'),
|
'float': color.build('default', 'default'),
|
||||||
'imaginary': color.build('default', 'default'),
|
'imaginary': color.build('default', 'default'),
|
||||||
|
|
||||||
'tq_string.start': color.build('green', 'default'),
|
|
||||||
'tq_string.null': color.build('green', 'default'),
|
|
||||||
'tq_string.end': color.build('green', 'default'),
|
|
||||||
|
|
||||||
'docstring.start': color.build('green', 'default'),
|
'docstring.start': color.build('green', 'default'),
|
||||||
'docstring.null': color.build('green', 'default'),
|
'docstring.null': color.build('green', 'default'),
|
||||||
'docstring.end': color.build('green', 'default'),
|
'docstring.end': color.build('green', 'default'),
|
||||||
|
|
52
tab2.py
52
tab2.py
|
@ -9,24 +9,33 @@ class Marker:
|
||||||
return '<Marker(%r, %r)>' % (self.name, self.level)
|
return '<Marker(%r, %r)>' % (self.name, self.level)
|
||||||
|
|
||||||
class Tabber:
|
class Tabber:
|
||||||
|
wsre = regex.whitespace
|
||||||
|
wst = 'null'
|
||||||
def __init__(self, m):
|
def __init__(self, m):
|
||||||
self.mode = m
|
self.mode = m
|
||||||
self.lines = {}
|
self.lines = {}
|
||||||
|
|
||||||
|
def get_highlighter(self):
|
||||||
|
return self.mode.window.buffer.highlights[self.mode.name()]
|
||||||
|
def get_tokens(self, y):
|
||||||
|
return self.mode.window.buffer.highlights[self.mode.name()].tokens[y]
|
||||||
|
def get_token(self, y, i):
|
||||||
|
return self.mode.window.buffer.highlights[self.mode.name()].tokens[y][i]
|
||||||
|
def token_is_whitespace(self, y, i):
|
||||||
|
token = self.get_token(y, i)
|
||||||
|
return token.name == self.wst and self.wsre.match(token.string)
|
||||||
def get_next_left_token(self, y, i):
|
def get_next_left_token(self, y, i):
|
||||||
tokens = self.get_tokens(y)
|
tokens = self.get_tokens(y)
|
||||||
assert i >= 0 and i < len(tokens)
|
assert i >= 0 and i < len(tokens)
|
||||||
for j in range(1, i):
|
for j in range(1, i):
|
||||||
m = regex.whitespace.match(tokens[i - j].string)
|
if not self.token_is_whitespace(y, i - j):
|
||||||
if not m:
|
|
||||||
return tokens[i - j]
|
return tokens[i - j]
|
||||||
return None
|
return None
|
||||||
def get_next_right_token(self, y, i):
|
def get_next_right_token(self, y, i):
|
||||||
tokens = self.get_tokens(y)
|
tokens = self.get_tokens(y)
|
||||||
assert i >= 0 and i < len(tokens)
|
assert i >= 0 and i < len(tokens)
|
||||||
for j in range(i + 1, len(tokens)):
|
for j in range(i + 1, len(tokens)):
|
||||||
m = regex.whitespace.match(tokens[j].string)
|
if not self.token_is_whitespace(y, j):
|
||||||
if not m:
|
|
||||||
return tokens[j]
|
return tokens[j]
|
||||||
return None
|
return None
|
||||||
def is_leftmost_token(self, y, i):
|
def is_leftmost_token(self, y, i):
|
||||||
|
@ -36,6 +45,20 @@ class Tabber:
|
||||||
def is_only_token(self, y, i):
|
def is_only_token(self, y, i):
|
||||||
return self.is_leftmost_token(y, i) and self.is_rightmost_token(y, i)
|
return self.is_leftmost_token(y, i) and self.is_rightmost_token(y, i)
|
||||||
|
|
||||||
|
def get_leftmost_token(self, y):
|
||||||
|
tokens = self.get_tokens(y)
|
||||||
|
for i in range(0, len(tokens)):
|
||||||
|
if not self.token_is_whitespace(y, i):
|
||||||
|
return tokens[i]
|
||||||
|
return None
|
||||||
|
def get_rightmost_token(self, y):
|
||||||
|
tokens = self.get_tokens(y)
|
||||||
|
i = len(tokens) - 1
|
||||||
|
for j in range(0, len(tokens)):
|
||||||
|
if not self.token_is_whitespace(y, i - j):
|
||||||
|
return tokens[i - j]
|
||||||
|
return None
|
||||||
|
|
||||||
def region_added(self, p, newlines):
|
def region_added(self, p, newlines):
|
||||||
self.lines = {}
|
self.lines = {}
|
||||||
def region_removed(self, p1, p2):
|
def region_removed(self, p1, p2):
|
||||||
|
@ -52,12 +75,6 @@ class Tabber:
|
||||||
def _calc_level(self, y):
|
def _calc_level(self, y):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def get_level(markers):
|
|
||||||
if markers:
|
|
||||||
return markers[-1].level
|
|
||||||
else:
|
|
||||||
return 0
|
|
||||||
|
|
||||||
class StackTabber(Tabber):
|
class StackTabber(Tabber):
|
||||||
def __init__(self, m):
|
def __init__(self, m):
|
||||||
self.mode = m
|
self.mode = m
|
||||||
|
@ -70,10 +87,6 @@ class StackTabber(Tabber):
|
||||||
return self.markers[-1].level
|
return self.markers[-1].level
|
||||||
else:
|
else:
|
||||||
return 0
|
return 0
|
||||||
def get_tokens(self, y):
|
|
||||||
return self.mode.window.buffer.highlights[self.mode.name()].tokens[y]
|
|
||||||
def get_token(self, y, i):
|
|
||||||
return self.mode.window.buffer.highlights[self.mode.name()].tokens[y][i]
|
|
||||||
|
|
||||||
def region_added(self, p, newlines):
|
def region_added(self, p, newlines):
|
||||||
self.lines = {}
|
self.lines = {}
|
||||||
|
@ -87,9 +100,6 @@ class StackTabber(Tabber):
|
||||||
def is_base(self, y):
|
def is_base(self, y):
|
||||||
return y == 0
|
return y == 0
|
||||||
def _calc_level(self, y):
|
def _calc_level(self, y):
|
||||||
highlighter = self.mode.window.buffer.highlights[self.mode.name()]
|
|
||||||
target = y
|
|
||||||
|
|
||||||
# first we need to step back to find the last place where we have tab
|
# first we need to step back to find the last place where we have tab
|
||||||
# stops figured out, or a suitable place to start
|
# stops figured out, or a suitable place to start
|
||||||
while not self.is_base(y) and y > 0:
|
while not self.is_base(y) and y > 0:
|
||||||
|
@ -98,6 +108,7 @@ class StackTabber(Tabber):
|
||||||
# ok now, let's do this shit
|
# ok now, let's do this shit
|
||||||
self.markers = []
|
self.markers = []
|
||||||
currlvl = 0
|
currlvl = 0
|
||||||
|
target = y
|
||||||
while y <= target:
|
while y <= target:
|
||||||
currlvl = self.get_curr_level()
|
currlvl = self.get_curr_level()
|
||||||
tokens = self.get_tokens(y)
|
tokens = self.get_tokens(y)
|
||||||
|
@ -150,6 +161,13 @@ class StackTabber(Tabber):
|
||||||
self.markers.append(Marker(name, level))
|
self.markers.append(Marker(name, level))
|
||||||
def _pop(self):
|
def _pop(self):
|
||||||
self.markers.pop(-1)
|
self.markers.pop(-1)
|
||||||
|
def _pop_until(self, *names):
|
||||||
|
while self.markers:
|
||||||
|
if self.markers[-1].name in names:
|
||||||
|
self.markers.pop(-1)
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
self.markers.pop(-1)
|
||||||
def _opt_append(self, name, level):
|
def _opt_append(self, name, level):
|
||||||
if self.markers and self.markers[-1].name == name:
|
if self.markers and self.markers[-1].name == name:
|
||||||
pass
|
pass
|
||||||
|
|
Loading…
Reference in New Issue