pmacs3/lex3.py

443 lines
16 KiB
Python
Raw Normal View History

import curses, re
import regex, util
2007-07-16 18:43:11 -04:00
from point2 import Point
class Token(object):
def __init__(self, name, rule, y, x, s, color=None, parent=None, matchd={}, link=None):
self.name = name
self.rule = rule
self.y = y
self.x = x
self.string = s
self.color = color
self.parent = parent
self.matchd = matchd
self.link = link
assert parent is None or hasattr(parent, 'name'), 'oh no %r' % parent
def parents(self):
if self.parent is not None:
parents = self.parent.parents()
parents.append(self.parent)
return parents
else:
return []
def domain(self):
if self.parent is not None:
names = self.parent.domain()
else:
names = []
if self.link and not self.link.startswith('middle'):
names.append(self.rule.name)
return names
def fqlist(self):
if self.parent is not None:
names = self.parent.domain()
else:
names = []
if self.link == 'start':
names.append(self.rule.name)
names.append(self.name)
return names
def fqname(self):
names = self.fqlist()
return '.'.join(names)
def copy(self):
return Token(self.name, self.rule, self.y, self.x, self.string,
self.color, self.parent, self.matchd, self.link)
def add_to_string(self, s):
self.string += s
def end_x(self):
return self.x + len(self.string)
def __eq__(self, other):
return (self.y == other.y and self.x == other.x
and self.name == other.name and self.parent is other.parent and
self.string == other.string)
def __repr__(self):
if len(self.string) < 10:
s = self.string
else:
s = self.string[:10] + '...'
fields = (self.fqname(), self.rule, self.y, self.x, s)
return "<Token(%r, %r, %d, %d, %r)>" % fields
class Rule:
reflags = 0
def __init__(self, name):
assert regex.valid_token_name.match(name), 'invalid name %r' % name
assert not regex.reserved_token_names.match(name), \
"rule name %r is reserved and thus illegal" % name
self.name = name
def match(self, lexer, parent):
raise Exception, "not implemented"
def lex(self, lexer, parent, match):
raise Exception, "not implemented"
def make_token(self, lexer, s, name, parent=None, matchd={}, link=None):
t = Token(name, self, lexer.y, lexer.x, s, None, parent, matchd, link)
t.color = lexer.get_color(t)
lexer.x += len(s)
return t
def get_line(self, lexer):
return lexer.lines[lexer.y] + '\n'
class PatternRule(Rule):
def __init__(self, name, pattern):
Rule.__init__(self, name)
self.pattern = pattern
self.re = re.compile(self.pattern, self.reflags)
def match(self, lexer, parent):
return self.re.match(self.get_line(lexer), lexer.x)
def lex(self, lexer, parent, m):
if m:
yield self.make_token(lexer, m.group(0), self.name, parent, m.groupdict())
raise StopIteration
class NocasePatternRule(PatternRule):
reflags = re.IGNORECASE
class OverrideError(Exception):
pass
class OverridePatternRule(PatternRule):
def lex(self, lexer, parent, m):
if m:
d = m.groupdict()
2007-07-16 18:43:11 -04:00
if lexer.action == 'lex':
a = lexer.mode.window.application
try:
names = d['mode'].split('.')
modecls = a.globals()[names.pop(0)]
2007-07-16 18:43:11 -04:00
for name in names:
modecls = getattr(modecls, name)
mode = modecls(None)
if hasattr(mode, 'grammar') and hasattr(mode, 'colors'):
lexer.mode.gstack['%s.start' % d['token']] = mode
else:
raise OverrideError, "argh: %r" % mode
except (KeyError, AttributeError, OverrideError):
2007-07-16 18:43:11 -04:00
pass
yield self.make_token(lexer, m.group(0), self.name, parent, d)
raise StopIteration
class ContextPatternRule(PatternRule):
def __init__(self, name, pattern, fallback):
Rule.__init__(self, name)
self.pattern = pattern
self.fallback_re = re.compile(fallback, self.reflags)
def match(self, lexer, parent):
try:
r = re.compile(self.pattern % parent.matchd)
except KeyError:
r = self.fallback_re
return r.match(self.get_line(lexer), lexer.x)
class NocaseContextPatternRule(ContextPatternRule):
reflags = re.IGNORECASE
class PatternGroupRule(PatternRule):
def __init__(self, name, *args):
assert args and len(args) % 2 == 0
i = 0
pairs = []
while i < len(args):
tokname, pattern = args[i], args[i+1]
pairs.append((name, re.compile(pattern, self.reflags)))
i += 2
Rule.__init__(self, name)
self.pairs = tuple(pairs)
def match(self, lexer, parent):
x = lexer.x
matches = []
line = self.get_line(lexer)
for (tokname, tokre) in self.pairs:
m = tokre.match(line, x)
if m:
x += len(m.group(0))
matches.append((tokname, m))
else:
return []
assert len(matches) == len(self.pairs)
return matches
def lex(self, lexer, parent, matches):
if matches:
for (tokname, m) in matches:
yield self.make_token(lexer, m.group(0), tokname, parent, m.groupdict())
raise StopIteration
class RegionRule(Rule):
def __init__(self, name, *args):
Rule.__init__(self, name)
assert len(args) > 1
args = list(args)
self.pairs = []
self.start_re = re.compile(args.pop(0), self.reflags)
while len(args) > 1:
grammar = args.pop(0)
pattern = args.pop(0)
assert hasattr(grammar, 'rules'), repr(grammar)
assert type(pattern) == type(''), repr(pattern)
self.pairs.append((grammar, pattern))
if len(args) == 1:
self.pairs.append((grammar, None))
def match(self, lexer, parent):
return self.start_re.match(self.get_line(lexer), lexer.x)
def lex(self, lexer, parent, m):
assert m
# ok, so since we had a match, we need to create our start token, who
# will be the ancestor to all other tokens matched in this region
matchd = m.groupdict()
parent = self.make_token(lexer, m.group(0), 'start', parent, matchd, 'start')
yield parent
# now we will loop over the different pairs of grammars/stop-patterns in
# this region, and return the resulting token; we start at 0
for tok in self._lex_loop(lexer, [parent], matchd, 0):
yield tok
raise StopIteration
def resume(self, lexer, toresume):
assert toresume, "can't resume without tokens to resume!"
# ok, so we need to figure out in which of the grammars of our region
# we are resuming. to do this we calculate i, a position in our list
# of grammar/stop-pattern pairs
if toresume[0].link == 'start':
i = 0
else:
m = regex.middle_token_name.match(toresume[0].link)
assert m
i = int(m.group(1)) + 1
assert i > 0 and i < len(self.pairs)
# now we will loop over the different pairs of grammars/stop-patterns in
# this region, and return the resulting token; we start at i
for tok in self._lex_loop(lexer, toresume, toresume[0].matchd, i):
yield tok
raise StopIteration
def _lex_loop(self, lexer, toresume, matchd, i):
# we need to loop over our grammar/stop-pattern pairs
while i < len(self.pairs):
# for each one, we will compile our stop-regex, and figure out the
# name of the stop token to be created if this stop-regex matches.
fqname = toresume[0].fqname()
2007-07-16 18:43:11 -04:00
p = Point(toresume[0].x, toresume[0].y)
if fqname in lexer.mode.ghist and p in lexer.mode.ghist[fqname]:
mode = lexer.mode.ghist[fqname][p]
grammar = mode.grammar
2007-07-16 18:43:11 -04:00
elif fqname in lexer.mode.gstack:
mode = lexer.mode.gstack[fqname]
grammar = mode.grammar
2007-07-16 18:43:11 -04:00
lexer.mode.ghist.setdefault(fqname, {})
lexer.mode.ghist[fqname][p] = mode
2007-07-16 18:43:11 -04:00
del lexer.mode.gstack[fqname]
else:
mode = lexer.mode
grammar = self.pairs[i][0]
lexer.mstack.append(mode)
if self.pairs[i][1]:
stopre = re.compile(self.pairs[i][1] % matchd, self.reflags)
else:
stopre = None
if i == len(self.pairs) - 1:
tokname = 'end'
else:
tokname = 'middle%d' % i
# ok, so now loop over all the tokens in the current grammar, until
# the stop-token (if any) is found, and return each result as we get
# it.
for tok in self._lex(lexer, toresume, tokname, stopre, grammar):
yield tok
# ok, so now either we found the stop-token, and have a new parent
# for future tokens (if any), or we are done.
if tok.name == tokname:
toresume = [tok]
matchd.update(tok.matchd)
else:
raise StopIteration
# this should have already gotten done by _lex
#lexer.mstack.pop(-1)
i += 1
# assuming we make it through all our grammars, and find the end-token,
# then we need to signal that we are done.
raise StopIteration
def _lex(self, lexer, toresume, stopname, stopre, grammar):
assert toresume
parent = toresume[0]
reenter = len(toresume) > 1
null_t = None
# ok, so there are only two way we want to exit this loop: either we
# lex the whole document, or we encounter the stop-token.
done = False
while not done and lexer.y < len(lexer.lines):
line = self.get_line(lexer)
old_y = lexer.y
while not done and lexer.y == old_y and lexer.x < len(line):
# ok, so reenter gets priority, since the current input might be
# intended for nested grammar. so handle it here
if reenter:
reenter = False
for t in toresume[1].rule.resume(lexer, toresume[1:]):
yield t
# since we might have changed our x/y coordinates, we need to
# do some checks here, and maybe finish or change our coordintes
if lexer.y >= len(lexer.lines):
raise StopIteration
elif lexer.x >= len(line):
lexer.y += 1
lexer.x = 0
# ok, so get the *now* current line
line = self.get_line(lexer)
if stopre:
# if we are looking for a stop-token, do that check now
m = stopre.match(line, lexer.x)
if m:
if null_t:
# if we have a null token waiting, return it first.
yield null_t
null_t = None
# ok, now return the stop-token, and signal that we are
# done and no more input is to be consumed
lexer.mstack.pop(-1)
yield self.make_token(lexer, m.group(0), stopname,
parent, m.groupdict(), stopname)
done = True
break
m = None
# start checking our rules to see if we can match the input
for rule in grammar.rules:
m = rule.match(lexer, parent)
if m:
# ok great, we have a match
if null_t:
# if we have a null token waiting, return it first.
yield null_t
null_t = None
# ok, now for every token this rules has created, we
# return them, one by one.
for t in rule.lex(lexer, parent, m):
yield t
break
if not m:
# we didn't find a match on a rule, so add this character to
# the current null token (creating a new one if necessary);
if not null_t:
null_t = Token('null', None, lexer.y, lexer.x, '', None, parent)
null_t.color = lexer.get_color(null_t)
null_t.add_to_string(line[lexer.x])
lexer.x += 1
# ok, we are at the end of a line of input. so, if we have a null
# token waiting, now is the time to return it
if null_t:
yield null_t
null_t = None
if not done and old_y == lexer.y:
lexer.y += 1
lexer.x = 0
raise StopIteration
class NocaseRegionRule(RegionRule):
reflags = re.IGNORECASE
class Grammar:
rules = []
grammar = Grammar()
class Lexer:
def __init__(self, mode, grammar):
2007-07-16 18:43:11 -04:00
self.mode = mode
self.mstack = []
2007-07-16 18:43:11 -04:00
self.grammar = grammar
self.y = 0
self.x = 0
self.lines = None
assert self.grammar.rules
def get_line(self):
return self.lines[self.y] + '\n'
def lex(self, lines, y=0, x=0):
2007-07-16 18:43:11 -04:00
self.action = 'lex'
self.y = y
self.x = x
self.lines = lines
self.mstack = []
2007-07-16 18:43:11 -04:00
self.mode.ghist = {}
self.mode.gstack = {}
for t in self._lex():
yield t
2007-07-16 18:43:11 -04:00
del self.action
raise StopIteration
def resume(self, lines, y, x, token):
self.action = 'resume'
self.y = y
self.x = x
self.lines = lines
self.mstack = []
toresume = token.parents()
i = 1
while i < len(toresume):
if toresume[i].link and toresume[i].link != 'start':
del toresume[i-1]
else:
i += 1
if toresume:
for t in toresume[0].rule.resume(self, toresume):
yield t
for t in self._lex():
yield t
2007-07-16 18:43:11 -04:00
del self.action
raise StopIteration
def _lex(self):
parent = None
while self.y < len(self.lines):
null_t = None
line = self.get_line()
while self.x < len(line):
m = None
for rule in self.grammar.rules:
m = rule.match(self, parent)
if m:
if null_t:
yield null_t
null_t = None
for t in rule.lex(self, parent, m):
yield t
break
if self.y >= len(self.lines):
break
line = self.get_line()
if not m:
if self.x < len(line):
if null_t is None:
null_t = Token('null', None, self.y, self.x, '', None, parent)
null_t.color = self.get_color(null_t)
null_t.add_to_string(line[self.x])
self.x += 1
if null_t:
yield null_t
self.y += 1
self.x = 0
raise StopIteration
def get_color(self, token):
fqlist = token.fqlist()
if self.mstack:
mode = self.mstack[-1]
else:
mode = self.mode
c = mode.default_color
for j in range(0, len(fqlist)):
name = '.'.join(fqlist[j:])
if name in mode.colors:
c = mode.colors[name]
break
#if DARK_BACKGROUND:
if True:
c |= curses.A_BOLD
return c