2009-11-11 23:22:14 -05:00
|
|
|
import re
|
|
|
|
import regex
|
2007-10-21 20:53:01 -04:00
|
|
|
from point import Point
|
|
|
|
|
2008-05-13 18:53:17 -04:00
|
|
|
def escape(s):
|
|
|
|
return re.escape(s)
|
|
|
|
|
2007-10-21 20:53:01 -04:00
|
|
|
class Token(object):
|
2009-11-10 00:30:55 -05:00
|
|
|
def __init__(self, name, rule, y, x, s, color=None, parent=None,
|
|
|
|
matchd={}, link=None):
|
|
|
|
self.name = name
|
|
|
|
self.rule = rule
|
|
|
|
self.y = y
|
|
|
|
self.x = x
|
|
|
|
self.string = s
|
|
|
|
self.color = color
|
|
|
|
self.parent = parent
|
|
|
|
self.matchd = matchd
|
|
|
|
self.link = link
|
|
|
|
self._debug = False
|
|
|
|
assert not parent or parent.name
|
2009-02-02 09:44:32 -05:00
|
|
|
|
2009-04-06 02:20:43 -04:00
|
|
|
def isa(self, *names):
|
|
|
|
return self.name in names
|
2009-02-02 09:44:32 -05:00
|
|
|
def match(self, name, string):
|
|
|
|
return self.name == name and self.string == string
|
|
|
|
def matchs(self, name, strings):
|
|
|
|
return self.name == name and self.string in strings
|
|
|
|
def matchp(self, pairs):
|
|
|
|
for (name, string) in pairs:
|
|
|
|
if self.match(name, string):
|
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
2009-04-06 02:20:43 -04:00
|
|
|
def fqisa(self, *names):
|
|
|
|
return self.fqname() in names
|
2009-02-02 09:44:32 -05:00
|
|
|
def fqmatch(self, name, string):
|
|
|
|
return self.fqname() == name and self.string == string
|
|
|
|
def fqmatchs(self, name, strings):
|
|
|
|
return self.fqname() == name and self.string in strings
|
|
|
|
def fqmatchp(self, pairs):
|
|
|
|
for (name, string) in pairs:
|
|
|
|
if self.fqmatch(name, string):
|
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
2007-10-21 20:53:01 -04:00
|
|
|
def parents(self):
|
|
|
|
if self.parent is not None:
|
|
|
|
parents = self.parent.parents()
|
|
|
|
parents.append(self.parent)
|
|
|
|
return parents
|
|
|
|
else:
|
|
|
|
return []
|
|
|
|
def domain(self):
|
|
|
|
if self.parent is not None:
|
|
|
|
names = self.parent.domain()
|
|
|
|
else:
|
|
|
|
names = []
|
|
|
|
if self.link and not self.link.startswith('middle'):
|
|
|
|
names.append(self.rule.name)
|
|
|
|
return names
|
|
|
|
def fqlist(self):
|
|
|
|
if self.parent is not None:
|
|
|
|
names = self.parent.domain()
|
|
|
|
else:
|
|
|
|
names = []
|
|
|
|
if self.link == 'start':
|
|
|
|
names.append(self.rule.name)
|
|
|
|
names.append(self.name)
|
|
|
|
return names
|
|
|
|
def fqname(self):
|
|
|
|
names = self.fqlist()
|
|
|
|
return '.'.join(names)
|
|
|
|
def copy(self):
|
|
|
|
return Token(self.name, self.rule, self.y, self.x, self.string,
|
|
|
|
self.color, self.parent, self.matchd, self.link)
|
|
|
|
def add_to_string(self, s):
|
|
|
|
self.string += s
|
|
|
|
def end_x(self):
|
|
|
|
return self.x + len(self.string)
|
2008-04-05 14:38:06 -04:00
|
|
|
# note that if parent is not none, then this is recursive
|
2007-10-21 20:53:01 -04:00
|
|
|
def __eq__(self, other):
|
2008-04-05 14:06:49 -04:00
|
|
|
return (other is not None and
|
|
|
|
self.y == other.y and self.x == other.x and
|
|
|
|
self.name == other.name and
|
2008-04-05 14:38:06 -04:00
|
|
|
self.string == other.string and
|
|
|
|
self.parent == other.parent)
|
2007-10-21 20:53:01 -04:00
|
|
|
def __repr__(self):
|
|
|
|
if len(self.string) < 10:
|
|
|
|
s = self.string
|
|
|
|
else:
|
|
|
|
s = self.string[:10] + '...'
|
|
|
|
fields = (self.fqname(), self.rule, self.y, self.x, s)
|
|
|
|
return "<Token(%r, %r, %d, %d, %r)>" % fields
|
|
|
|
|
2008-03-14 17:17:04 -04:00
|
|
|
class Rule(object):
|
2007-10-21 20:53:01 -04:00
|
|
|
reflags = 0
|
|
|
|
def __init__(self, name):
|
|
|
|
self.name = name
|
|
|
|
def match(self, lexer, parent):
|
|
|
|
raise Exception, "not implemented"
|
|
|
|
def lex(self, lexer, parent, match):
|
|
|
|
raise Exception, "not implemented"
|
|
|
|
def make_token(self, lexer, s, name, parent=None, matchd={}, link=None):
|
|
|
|
t = Token(name, self, lexer.y, lexer.x, s, None, parent, matchd, link)
|
|
|
|
t.color = lexer.get_color(t)
|
|
|
|
lexer.x += len(s)
|
|
|
|
if lexer.x > len(lexer.lines[lexer.y]):
|
|
|
|
lexer.x = 0
|
|
|
|
lexer.y += 1
|
|
|
|
return t
|
|
|
|
def get_line(self, lexer, y=None):
|
|
|
|
if y is None:
|
|
|
|
return lexer.lines[lexer.y] + '\n'
|
|
|
|
else:
|
|
|
|
return lexer.lines[y] + '\n'
|
|
|
|
|
|
|
|
class PatternRule(Rule):
|
|
|
|
def __init__(self, name, pattern):
|
|
|
|
Rule.__init__(self, name)
|
|
|
|
self.pattern = pattern
|
|
|
|
self.re = re.compile(self.pattern, self.reflags)
|
|
|
|
def match(self, lexer, parent):
|
|
|
|
return self.re.match(self.get_line(lexer), lexer.x)
|
|
|
|
def lex(self, lexer, parent, m):
|
|
|
|
if m:
|
|
|
|
yield self.make_token(lexer, m.group(0), self.name, parent, m.groupdict())
|
|
|
|
raise StopIteration
|
|
|
|
class NocasePatternRule(PatternRule):
|
|
|
|
reflags = re.IGNORECASE
|
|
|
|
|
2009-03-27 22:33:36 -04:00
|
|
|
class PatternMatchRule(PatternRule):
|
|
|
|
reflags = 0
|
|
|
|
def __init__(self, name, pattern, *names):
|
|
|
|
PatternRule.__init__(self, name, pattern)
|
|
|
|
self.names = names
|
|
|
|
def match(self, lexer, parent):
|
|
|
|
return self.re.match(self.get_line(lexer), lexer.x)
|
|
|
|
def lex(self, lexer, parent, m):
|
|
|
|
if not m:
|
|
|
|
raise StopIteration
|
|
|
|
for group, name in zip(m.groups(), self.names):
|
|
|
|
if not group:
|
|
|
|
continue
|
|
|
|
yield self.make_token(lexer, group, name, parent, m.groupdict())
|
|
|
|
raise StopIteration
|
|
|
|
class NocasePatternMatchRule(PatternMatchRule):
|
|
|
|
reflags = re.IGNORECASE
|
|
|
|
|
2008-03-16 01:23:14 -04:00
|
|
|
class FakeWindow(object):
|
2009-02-20 23:24:14 -05:00
|
|
|
def __init__(self, app, b):
|
2008-03-16 01:23:14 -04:00
|
|
|
self.application = app
|
2009-02-20 23:24:14 -05:00
|
|
|
self.buffer = b
|
2007-10-21 20:53:01 -04:00
|
|
|
class OverrideError(Exception):
|
|
|
|
pass
|
|
|
|
class OverridePatternRule(PatternRule):
|
|
|
|
def lex(self, lexer, parent, m):
|
|
|
|
if m:
|
|
|
|
d = m.groupdict()
|
|
|
|
if lexer.action == 'lex':
|
|
|
|
a = lexer.mode.window.application
|
|
|
|
try:
|
2009-02-20 23:24:14 -05:00
|
|
|
b = lexer.mode.window.buffer
|
2008-03-16 01:23:14 -04:00
|
|
|
modecls = a.modes[d['mode']]
|
2009-02-20 23:24:14 -05:00
|
|
|
mode = modecls(FakeWindow(lexer.mode.window.application, b))
|
2007-10-21 20:53:01 -04:00
|
|
|
if hasattr(mode, 'grammar') and hasattr(mode, 'colors'):
|
2008-05-03 13:31:30 -04:00
|
|
|
lexer.mode.gstack['%s' % d['token']] = mode
|
2007-10-21 20:53:01 -04:00
|
|
|
else:
|
2008-05-16 19:02:22 -04:00
|
|
|
raise OverrideError("argh: %r" % mode)
|
2007-10-21 20:53:01 -04:00
|
|
|
except (KeyError, AttributeError, OverrideError):
|
2009-02-20 23:24:14 -05:00
|
|
|
# uncomment raise to fix dynamic highlighting
|
|
|
|
#raise
|
2007-10-21 20:53:01 -04:00
|
|
|
pass
|
|
|
|
yield self.make_token(lexer, m.group(0), self.name, parent, d)
|
|
|
|
raise StopIteration
|
|
|
|
|
|
|
|
class ContextPatternRule(PatternRule):
|
|
|
|
def __init__(self, name, pattern, fallback):
|
|
|
|
Rule.__init__(self, name)
|
|
|
|
self.pattern = pattern
|
|
|
|
self.fallback_re = re.compile(fallback, self.reflags)
|
|
|
|
def match(self, lexer, parent):
|
|
|
|
try:
|
|
|
|
r = re.compile(self.pattern % parent.matchd)
|
|
|
|
except KeyError:
|
|
|
|
r = self.fallback_re
|
|
|
|
return r.match(self.get_line(lexer), lexer.x)
|
|
|
|
class NocaseContextPatternRule(ContextPatternRule):
|
|
|
|
reflags = re.IGNORECASE
|
|
|
|
|
2009-07-27 12:06:11 -04:00
|
|
|
class LazyPatternRule(Rule):
|
|
|
|
def __init__(self, name, pattern):
|
|
|
|
Rule.__init__(self, name)
|
|
|
|
self.pattern = pattern
|
2009-07-29 15:00:17 -04:00
|
|
|
self._compile({})
|
2009-07-27 12:06:11 -04:00
|
|
|
def _compile(self, d):
|
2009-07-27 14:41:27 -04:00
|
|
|
try:
|
|
|
|
self.re = re.compile(self.pattern % d, self.reflags)
|
|
|
|
except:
|
|
|
|
self.re = re.compile(self.pattern)
|
2009-07-27 12:06:11 -04:00
|
|
|
self.d = d
|
|
|
|
def match(self, lexer, parent):
|
|
|
|
if self.d != parent.matchd:
|
|
|
|
self._compile(parent.matchd)
|
|
|
|
return self.re.match(self.get_line(lexer), lexer.x)
|
|
|
|
def lex(self, lexer, parent, m):
|
|
|
|
if m:
|
|
|
|
yield self.make_token(lexer, m.group(0), self.name, parent, m.groupdict())
|
|
|
|
raise StopIteration
|
|
|
|
|
2007-10-21 20:53:01 -04:00
|
|
|
class PatternGroupRule(PatternRule):
|
|
|
|
def __init__(self, name, *args):
|
|
|
|
assert args and len(args) % 2 == 0
|
|
|
|
i = 0
|
|
|
|
pairs = []
|
|
|
|
while i < len(args):
|
|
|
|
tokname, pattern = args[i], args[i+1]
|
|
|
|
pairs.append((tokname, re.compile(pattern, self.reflags)))
|
|
|
|
i += 2
|
|
|
|
Rule.__init__(self, name)
|
|
|
|
self.pairs = tuple(pairs)
|
|
|
|
def match(self, lexer, parent):
|
|
|
|
(x, y) = (lexer.x, lexer.y)
|
|
|
|
matches = []
|
|
|
|
for (tokname, tokre) in self.pairs:
|
|
|
|
if y >= len(lexer.lines):
|
|
|
|
return []
|
|
|
|
line = self.get_line(lexer, y)
|
|
|
|
m = tokre.match(line, x)
|
|
|
|
if m:
|
|
|
|
x += len(m.group(0))
|
|
|
|
if x >= len(line):
|
|
|
|
x = 0
|
|
|
|
y += 1
|
|
|
|
matches.append((tokname, m))
|
|
|
|
else:
|
|
|
|
return []
|
|
|
|
assert len(matches) == len(self.pairs)
|
|
|
|
return matches
|
|
|
|
def lex(self, lexer, parent, matches):
|
|
|
|
if matches:
|
|
|
|
for (tokname, m) in matches:
|
|
|
|
yield self.make_token(lexer, m.group(0), tokname, parent, m.groupdict())
|
|
|
|
raise StopIteration
|
2009-03-15 13:17:03 -04:00
|
|
|
class NocasePatternGroupRule(PatternGroupRule):
|
|
|
|
reflags = re.IGNORECASE
|
2007-10-21 20:53:01 -04:00
|
|
|
|
|
|
|
class RegionRule(Rule):
|
|
|
|
def __init__(self, name, *args):
|
|
|
|
Rule.__init__(self, name)
|
|
|
|
assert len(args) > 1
|
|
|
|
args = list(args)
|
|
|
|
self.pairs = []
|
|
|
|
self.start_re = re.compile(args.pop(0), self.reflags)
|
|
|
|
while len(args) > 1:
|
|
|
|
grammar = args.pop(0)
|
|
|
|
pattern = args.pop(0)
|
2008-04-01 19:14:58 -04:00
|
|
|
#assert hasattr(grammar, 'rules'), repr(grammar)
|
2007-10-21 20:53:01 -04:00
|
|
|
assert type(pattern) == type(''), repr(pattern)
|
|
|
|
self.pairs.append((grammar, pattern))
|
|
|
|
if len(args) == 1:
|
|
|
|
self.pairs.append((grammar, None))
|
|
|
|
|
|
|
|
def match(self, lexer, parent):
|
|
|
|
return self.start_re.match(self.get_line(lexer), lexer.x)
|
|
|
|
def lex(self, lexer, parent, m):
|
|
|
|
assert m
|
|
|
|
# ok, so since we had a match, we need to create our start token, who
|
|
|
|
# will be the ancestor to all other tokens matched in this region
|
|
|
|
matchd = m.groupdict()
|
2008-05-13 18:53:17 -04:00
|
|
|
for (key, val) in matchd.iteritems():
|
|
|
|
matchd[key] = escape(val)
|
2007-10-21 20:53:01 -04:00
|
|
|
parent = self.make_token(lexer, m.group(0), 'start', parent, matchd, 'start')
|
|
|
|
yield parent
|
|
|
|
|
|
|
|
# now we will loop over the different pairs of grammars/stop-patterns in
|
|
|
|
# this region, and return the resulting token; we start at 0
|
|
|
|
for tok in self._lex_loop(lexer, [parent], matchd, 0):
|
|
|
|
yield tok
|
|
|
|
raise StopIteration
|
|
|
|
def resume(self, lexer, toresume):
|
|
|
|
assert toresume, "can't resume without tokens to resume!"
|
|
|
|
# ok, so we need to figure out in which of the grammars of our region
|
|
|
|
# we are resuming. to do this we calculate i, a position in our list
|
|
|
|
# of grammar/stop-pattern pairs
|
|
|
|
if toresume[0].link == 'start':
|
|
|
|
i = 0
|
|
|
|
else:
|
|
|
|
m = regex.middle_token_name.match(toresume[0].link)
|
|
|
|
assert m
|
|
|
|
i = int(m.group(1)) + 1
|
|
|
|
assert i > 0 and i < len(self.pairs)
|
|
|
|
|
|
|
|
# now we will loop over the different pairs of grammars/stop-patterns in
|
|
|
|
# this region, and return the resulting token; we start at i
|
2008-04-15 18:55:26 -04:00
|
|
|
matchd = toresume[0].matchd
|
|
|
|
if not matchd and toresume[0].parent:
|
|
|
|
p = toresume[0].parent
|
|
|
|
while not p.matchd and p.parent:
|
|
|
|
p = p.parent
|
|
|
|
matchd = p.matchd
|
|
|
|
#for tok in self._lex_loop(lexer, toresume, toresume[0].matchd, i):
|
|
|
|
for tok in self._lex_loop(lexer, toresume, matchd, i):
|
2007-10-21 20:53:01 -04:00
|
|
|
yield tok
|
|
|
|
raise StopIteration
|
|
|
|
def _lex_loop(self, lexer, toresume, matchd, i):
|
|
|
|
# we need to loop over our grammar/stop-pattern pairs
|
|
|
|
while i < len(self.pairs):
|
|
|
|
# for each one, we will compile our stop-regex, and figure out the
|
|
|
|
# name of the stop token to be created if this stop-regex matches.
|
|
|
|
fqname = toresume[0].fqname()
|
2008-05-03 13:31:30 -04:00
|
|
|
rname = toresume[0].rule.name
|
2007-10-21 20:53:01 -04:00
|
|
|
p = Point(toresume[0].x, toresume[0].y)
|
|
|
|
if fqname in lexer.mode.ghist and p in lexer.mode.ghist[fqname]:
|
|
|
|
mode = lexer.mode.ghist[fqname][p]
|
|
|
|
grammar = mode.grammar
|
|
|
|
elif fqname in lexer.mode.gstack:
|
|
|
|
mode = lexer.mode.gstack[fqname]
|
|
|
|
grammar = mode.grammar
|
|
|
|
lexer.mode.ghist.setdefault(fqname, {})
|
|
|
|
lexer.mode.ghist[fqname][p] = mode
|
|
|
|
del lexer.mode.gstack[fqname]
|
2008-05-03 13:31:30 -04:00
|
|
|
elif rname in lexer.mode.ghist and p in lexer.mode.ghist[rname]:
|
|
|
|
mode = lexer.mode.ghist[rname][p]
|
|
|
|
grammar = mode.grammar
|
|
|
|
elif rname in lexer.mode.gstack:
|
|
|
|
mode = lexer.mode.gstack[rname]
|
|
|
|
grammar = mode.grammar
|
|
|
|
lexer.mode.ghist.setdefault(rname, {})
|
|
|
|
lexer.mode.ghist[rname][p] = mode
|
|
|
|
if i == len(self.pairs) - 1:
|
|
|
|
del lexer.mode.gstack[rname]
|
2007-10-21 20:53:01 -04:00
|
|
|
else:
|
|
|
|
mode = lexer.mode
|
2008-10-15 22:49:28 -04:00
|
|
|
grammar = self.pairs[i][0]
|
|
|
|
if grammar is None:
|
2008-04-01 19:14:58 -04:00
|
|
|
grammar = lexer.grammar
|
2007-10-21 20:53:01 -04:00
|
|
|
lexer.mstack.append(mode)
|
|
|
|
|
|
|
|
if self.pairs[i][1]:
|
2008-04-15 18:55:26 -04:00
|
|
|
try:
|
|
|
|
stopre = re.compile(self.pairs[i][1] % matchd, self.reflags)
|
|
|
|
except:
|
|
|
|
raise Exception, "%r\n%r\n%r" % (self.pairs[i][1], matchd, self.reflags)
|
2007-10-21 20:53:01 -04:00
|
|
|
else:
|
|
|
|
stopre = None
|
|
|
|
if i == len(self.pairs) - 1:
|
|
|
|
tokname = 'end'
|
|
|
|
else:
|
|
|
|
tokname = 'middle%d' % i
|
|
|
|
|
|
|
|
# ok, so now loop over all the tokens in the current grammar, until
|
|
|
|
# the stop-token (if any) is found, and return each result as we get
|
|
|
|
# it.
|
|
|
|
tok = None
|
|
|
|
for tok in self._lex(lexer, toresume, tokname, stopre, grammar):
|
|
|
|
yield tok
|
|
|
|
|
|
|
|
# ok, so now either we found the stop-token, and have a new parent
|
|
|
|
# for future tokens (if any), or we are done.
|
|
|
|
if tok is not None and tok.name == tokname:
|
|
|
|
toresume = [tok]
|
|
|
|
matchd.update(tok.matchd)
|
|
|
|
else:
|
|
|
|
raise StopIteration
|
|
|
|
# this should have already gotten done by _lex
|
|
|
|
#lexer.mstack.pop(-1)
|
|
|
|
i += 1
|
|
|
|
# assuming we make it through all our grammars, and find the end-token,
|
|
|
|
# then we need to signal that we are done.
|
|
|
|
raise StopIteration
|
|
|
|
def _lex(self, lexer, toresume, stopname, stopre, grammar):
|
|
|
|
assert toresume
|
|
|
|
parent = toresume[0]
|
|
|
|
reenter = len(toresume) > 1
|
|
|
|
null_t = None
|
|
|
|
|
|
|
|
# ok, so there are only two way we want to exit this loop: either we
|
|
|
|
# lex the whole document, or we encounter the stop-token.
|
|
|
|
done = False
|
|
|
|
while not done and lexer.y < len(lexer.lines):
|
|
|
|
line = self.get_line(lexer)
|
|
|
|
old_y = lexer.y
|
|
|
|
while not done and lexer.y == old_y and lexer.x < len(line):
|
|
|
|
# ok, so reenter gets priority, since the current input might be
|
|
|
|
# intended for nested grammar. so handle it here
|
|
|
|
if reenter:
|
|
|
|
reenter = False
|
|
|
|
for t in toresume[1].rule.resume(lexer, toresume[1:]):
|
|
|
|
yield t
|
|
|
|
# since we might have changed our x/y coordinates, we need to
|
|
|
|
# do some checks here, and maybe finish or change our coordintes
|
|
|
|
if lexer.y >= len(lexer.lines):
|
|
|
|
raise StopIteration
|
|
|
|
elif lexer.x >= len(line):
|
|
|
|
lexer.y += 1
|
|
|
|
lexer.x = 0
|
|
|
|
|
|
|
|
# ok, so get the *now* current line
|
|
|
|
line = self.get_line(lexer)
|
|
|
|
|
|
|
|
if stopre:
|
|
|
|
# if we are looking for a stop-token, do that check now
|
|
|
|
m = stopre.match(line, lexer.x)
|
|
|
|
if m:
|
|
|
|
if null_t:
|
|
|
|
# if we have a null token waiting, return it first.
|
|
|
|
yield null_t
|
|
|
|
null_t = None
|
|
|
|
# ok, now return the stop-token, and signal that we are
|
|
|
|
# done and no more input is to be consumed
|
|
|
|
lexer.mstack.pop(-1)
|
|
|
|
yield self.make_token(lexer, m.group(0), stopname,
|
|
|
|
parent, m.groupdict(), stopname)
|
|
|
|
done = True
|
|
|
|
break
|
|
|
|
|
|
|
|
m = None
|
|
|
|
# start checking our rules to see if we can match the input
|
|
|
|
for rule in grammar.rules:
|
|
|
|
m = rule.match(lexer, parent)
|
|
|
|
if m:
|
|
|
|
# ok great, we have a match
|
|
|
|
if null_t:
|
|
|
|
# if we have a null token waiting, return it first.
|
|
|
|
yield null_t
|
|
|
|
null_t = None
|
|
|
|
# ok, now for every token this rules has created, we
|
|
|
|
# return them, one by one.
|
|
|
|
for t in rule.lex(lexer, parent, m):
|
|
|
|
yield t
|
|
|
|
break
|
|
|
|
|
|
|
|
if not m:
|
|
|
|
# we didn't find a match on a rule, so add this character to
|
|
|
|
# the current null token (creating a new one if necessary);
|
|
|
|
if not null_t:
|
|
|
|
null_t = Token('null', None, lexer.y, lexer.x, '', None, parent)
|
|
|
|
null_t.color = lexer.get_color(null_t)
|
|
|
|
null_t.add_to_string(line[lexer.x])
|
|
|
|
lexer.x += 1
|
|
|
|
|
|
|
|
# ok, we are at the end of a line of input. so, if we have a null
|
|
|
|
# token waiting, now is the time to return it
|
|
|
|
if null_t:
|
|
|
|
yield null_t
|
|
|
|
null_t = None
|
|
|
|
if not done and old_y == lexer.y:
|
|
|
|
lexer.y += 1
|
|
|
|
lexer.x = 0
|
|
|
|
raise StopIteration
|
|
|
|
class NocaseRegionRule(RegionRule):
|
|
|
|
reflags = re.IGNORECASE
|
|
|
|
|
2008-10-15 22:49:28 -04:00
|
|
|
class OverrideRegionRule(RegionRule):
|
|
|
|
def lex(self, lexer, parent, m):
|
|
|
|
assert m
|
|
|
|
d = m.groupdict()
|
|
|
|
if 'grammar' in d:
|
|
|
|
a = lexer.mode.window.application
|
2009-01-19 13:55:54 -05:00
|
|
|
name = d['grammar'].lower()
|
|
|
|
if name in a.modes:
|
|
|
|
modecls = a.modes[name]
|
2009-03-29 20:50:27 -04:00
|
|
|
b = lexer.mode.window.buffer
|
|
|
|
fw = FakeWindow(lexer.mode.window.application, b)
|
|
|
|
mode = modecls(fw)
|
2009-01-19 13:55:54 -05:00
|
|
|
assert hasattr(mode, 'grammar') and hasattr(mode, 'colors')
|
|
|
|
|
|
|
|
if parent is None:
|
|
|
|
path = self.name
|
|
|
|
else:
|
|
|
|
path = parent.domain() + '.' + self.name
|
|
|
|
lexer.mode.gstack[path] = mode
|
2008-10-15 22:49:28 -04:00
|
|
|
return RegionRule.lex(self, lexer, parent, m)
|
|
|
|
|
2008-03-14 17:17:04 -04:00
|
|
|
class Grammar(object):
|
2007-10-21 20:53:01 -04:00
|
|
|
rules = []
|
|
|
|
|
2008-03-14 17:17:04 -04:00
|
|
|
class Lexer(object):
|
2007-10-21 20:53:01 -04:00
|
|
|
def __init__(self, mode, grammar):
|
|
|
|
self.mode = mode
|
|
|
|
self.mstack = []
|
|
|
|
self.grammar = grammar
|
|
|
|
self.y = 0
|
|
|
|
self.x = 0
|
|
|
|
self.lines = None
|
|
|
|
assert self.grammar.rules
|
|
|
|
def get_line(self):
|
|
|
|
return self.lines[self.y] + '\n'
|
2008-03-30 22:44:10 -04:00
|
|
|
def lex_all(self, lines):
|
2009-11-11 23:45:03 -05:00
|
|
|
lextokens = [[] for _ in lines]
|
2009-11-11 23:22:14 -05:00
|
|
|
for t in self.lex(lines):
|
2008-03-30 22:44:10 -04:00
|
|
|
lextokens[t.y].append(t)
|
|
|
|
return lextokens
|
|
|
|
|
2007-10-21 20:53:01 -04:00
|
|
|
def lex(self, lines, y=0, x=0):
|
|
|
|
self.action = 'lex'
|
|
|
|
self.y = y
|
|
|
|
self.x = x
|
|
|
|
self.lines = lines
|
|
|
|
self.mstack = []
|
|
|
|
self.mode.ghist = {}
|
|
|
|
self.mode.gstack = {}
|
|
|
|
for t in self._lex():
|
|
|
|
yield t
|
|
|
|
del self.action
|
|
|
|
raise StopIteration
|
|
|
|
def resume(self, lines, y, x, token):
|
|
|
|
self.action = 'resume'
|
|
|
|
self.y = y
|
|
|
|
self.x = x
|
|
|
|
self.lines = lines
|
|
|
|
self.mstack = []
|
|
|
|
toresume = token.parents()
|
|
|
|
|
|
|
|
i = 1
|
|
|
|
while i < len(toresume):
|
|
|
|
if toresume[i].link and toresume[i].link != 'start':
|
|
|
|
del toresume[i-1]
|
|
|
|
else:
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
if toresume:
|
|
|
|
for t in toresume[0].rule.resume(self, toresume):
|
2008-04-05 14:06:49 -04:00
|
|
|
#t._debug = True
|
2007-10-21 20:53:01 -04:00
|
|
|
yield t
|
|
|
|
for t in self._lex():
|
2008-04-05 14:06:49 -04:00
|
|
|
#t._debug = True
|
2007-10-21 20:53:01 -04:00
|
|
|
yield t
|
|
|
|
del self.action
|
|
|
|
raise StopIteration
|
|
|
|
|
|
|
|
def _lex(self):
|
|
|
|
parent = None
|
|
|
|
while self.y < len(self.lines):
|
|
|
|
null_t = None
|
|
|
|
line = self.get_line()
|
|
|
|
while self.x < len(line):
|
|
|
|
m = None
|
|
|
|
for rule in self.grammar.rules:
|
|
|
|
m = rule.match(self, parent)
|
|
|
|
if m:
|
|
|
|
if null_t:
|
|
|
|
yield null_t
|
|
|
|
null_t = None
|
|
|
|
for t in rule.lex(self, parent, m):
|
|
|
|
yield t
|
|
|
|
break
|
|
|
|
|
|
|
|
if self.y >= len(self.lines):
|
|
|
|
break
|
|
|
|
line = self.get_line()
|
|
|
|
if not m:
|
|
|
|
if self.x < len(line):
|
|
|
|
if null_t is None:
|
|
|
|
null_t = Token('null', None, self.y, self.x, '', None, parent)
|
|
|
|
null_t.color = self.get_color(null_t)
|
|
|
|
null_t.add_to_string(line[self.x])
|
|
|
|
self.x += 1
|
|
|
|
if null_t:
|
|
|
|
yield null_t
|
|
|
|
self.y += 1
|
|
|
|
self.x = 0
|
|
|
|
raise StopIteration
|
|
|
|
|
|
|
|
def get_color(self, token):
|
2009-04-05 13:05:42 -04:00
|
|
|
app = self.mode.window.application
|
|
|
|
name = '.'.join(token.fqlist())
|
|
|
|
|
|
|
|
if name in app.token_colors:
|
|
|
|
return app.token_colors[name]
|
|
|
|
elif name in app.cached_colors:
|
|
|
|
return app.cached_colors[name]
|
|
|
|
|
|
|
|
name2 = name
|
|
|
|
while name2:
|
|
|
|
try:
|
|
|
|
i = name2.index('.')
|
|
|
|
name2 = name2[i + 1:]
|
|
|
|
except ValueError:
|
|
|
|
break
|
|
|
|
if name2 in app.token_colors:
|
|
|
|
c = app.token_colors[name2]
|
|
|
|
app.cached_colors[name] = c
|
|
|
|
return c
|
|
|
|
|
|
|
|
app.cached_colors[name] = app.config['default_color']
|
2008-11-08 12:44:59 -05:00
|
|
|
return app.config['default_color']
|