branch : pmacs2
This commit is contained in:
moculus 2007-06-11 21:40:21 +00:00
parent 90aab85f25
commit 30dcf827aa
3 changed files with 186 additions and 321 deletions

View File

@ -485,14 +485,8 @@ class Application:
elif token.x >= x + slot.width: elif token.x >= x + slot.width:
break break
c = w.mode.colors.get(token.name, w.mode.default_color) name = token.fqname()
# c = w.mode.default_color c = w.mode.colors.get(name, w.mode.default_color)
# name_parts = token.name.split('.')
# for i in range(0, len(name_parts)):
# name = '.'.join(name_parts[i:])
# if name in w.mode.colors:
# c = w.mode.colors[name]
# break
if DARK_BACKGROUND: if DARK_BACKGROUND:
c |= curses.A_BOLD c |= curses.A_BOLD

View File

@ -20,7 +20,6 @@ class Highlighter:
def __init__(self, lexer): def __init__(self, lexer):
self.lexer = lexer self.lexer = lexer
self.tokens = [] self.tokens = []
self.line_contexts = {}
def dump(self, fmt='(%3s, %2s) | %s'): def dump(self, fmt='(%3s, %2s) | %s'):
print fmt % ('y', 'x', 'string') print fmt % ('y', 'x', 'string')
@ -50,20 +49,36 @@ class Highlighter:
sys.stdout.write(token.string) sys.stdout.write(token.string)
sys.stdout.write('\n') sys.stdout.write('\n')
def delete_token(self, y, i):
assert y < len(self.tokens), "%d < %d" % (y, len(self.tokens))
assert i < len(self.tokens[y]), "%d < %d" % (i, len(self.tokens[i]))
deleted = []
deleted.append(self.tokens[y].pop(i))
while y < len(self.tokens):
while i < len(self.tokens[y]):
while deleted and self.tokens[y][i].parent is not deleted[-1]:
del deleted[-1]
if not deleted:
return
elif self.tokens[y][i].parent is deleted[-1]:
deleted.append(self.tokens[y].pop(i))
else:
raise Exception, "huh?? %r %r" % (self.tokens[y][i].parent,
deleted)
i = 0
y += 1
def highlight(self, lines): def highlight(self, lines):
self.tokens = [[] for l in lines] self.tokens = [[] for l in lines]
self.lexer.lex(lines, y=0, x=0) self.lexer.lex(lines, y=0, x=0)
for token in self.lexer: for token in self.lexer:
self.tokens[token.y].append(token) self.tokens[token.y].append(token)
self.line_contexts = dict(self.lexer.line_contexts)
# relexing # relexing
# ====================== # ======================
def relex(self, lines, y1, x1, y2, x2): def relex(self, lines, y1, x1, y2, x2):
# start the relexing process # start the relexing process
#self.lexer.lex(lines, y1, 0) token = self.tokens[y1][0]
rulecontexts = self.line_contexts[y1] self.lexer.resume(lines, y1, 0, token)
self.lexer.resume(lines, y1, 0, rulecontexts)
# these keep track of the current y coordinate, the current token index # these keep track of the current y coordinate, the current token index
# on line[y], and the current "new token", respectively. # on line[y], and the current "new token", respectively.

458
lex2.py
View File

@ -3,56 +3,66 @@ import re
valid_name_re = re.compile('^[a-zA-Z_][a-zA-Z0-9_]*$') valid_name_re = re.compile('^[a-zA-Z_][a-zA-Z0-9_]*$')
reserved_names = ['start', 'middle', 'end', 'null'] reserved_names = ['start', 'middle', 'end', 'null']
class RuleContext:
# to be clear:
# x, y: where the rule processing began
# rule: the rule which began
# flag: a signal to be used to resume the rule correctly
# context: the previous rule namespace(s)
# matchd: the dictionary returned by the rule's matching
def __init__(self, y, x, rule, flag, context, matchd):
self.y = y
self.x = x
self.rule = rule
self.flag = flag
self.context = context
self.matchd = matchd
class Token(object): class Token(object):
def __init__(self, name, rule, y, x, s, **vargs): def __init__(self, name, rule=None, y=0, x=0, s="", parent=None, matchd={}):
self.name = name self.name = name
self.rule = rule self.rule = rule
self.y = y self.y = y
self.x = x self.x = x
self.string = s self.string = s
self.vargs = vargs self.parent = parent
self.matchd = matchd
def parents(self):
if self.parent is not None:
parents = self.parent.parents()
parents.append(self.parent)
return parents
else:
return []
def domain(self):
names = []
if self.parent is not None:
names.extend(self.parent.domain())
names.append(self.rule.name)
return names
def fqlist(self):
names = []
if self.parent is not None:
names.extend(self.parent.domain())
names.append(self.name)
return names
def fqname(self):
if self.name == 'start':
names = self.domain()
names.append(self.name)
else:
names = self.fqlist()
return '.'.join(names)
def copy(self): def copy(self):
return Token(self.name, None, self.y, self.x, self.string, **self.vargs) return Token(self.name, self.rule, self.y, self.x, self.string,
self.parent, self.matchd)
def add_to_string(self, s): def add_to_string(self, s):
self.string += s self.string += s
def end_x(self): def end_x(self):
return self.x + len(self.string) return self.x + len(self.string)
def __eq__(self, other): def __eq__(self, other):
return (self.y == other.y and return (self.y == other.y and self.x == other.x
self.x == other.x and and self.name == other.name and self.parent is other.parent and
self.string == other.string and self.string == other.string)
self.name == other.name and
self.vargs == other.vargs)
def __repr__(self): def __repr__(self):
if len(self.string) < 10: if len(self.string) < 10:
s = self.string s = self.string
else: else:
s = self.string[:10] + '...' s = self.string[:10] + '...'
return "<Token(%r, %r, %d, %d, %r)>" % (self.name, self.rule, self.y, self.x, s) fields = (self.fqname(), self.rule, self.y, self.x, s)
def render(self): return "<Token(%r, %r, %d, %d, %r)>" % fields
return (self,)
class Rule: class Rule:
name = 'abstract' name = 'abstract'
def match(self, lexer, context=[], d={}): def match(self, lexer, parent):
raise Exception, "%s rule cannot match!" % self.name raise Exception, "%s rule cannot match!" % self.name
def make_token(self, lexer, s, name, **vargs): def make_token(self, lexer, s, name, parent=None, matchd={}):
return Token(name, self, lexer.y, lexer.x, s, **vargs) return Token(name, self, lexer.y, lexer.x, s, parent, matchd)
class ConstantRule(Rule): class ConstantRule(Rule):
def __init__(self, name, constant): def __init__(self, name, constant):
@ -60,11 +70,12 @@ class ConstantRule(Rule):
assert name not in reserved_names, "reserved rule name: %r" % name assert name not in reserved_names, "reserved rule name: %r" % name
self.name = name self.name = name
self.constant = constant self.constant = constant
def match(self, lexer, context=[], d={}): self.lenth = len(self.constant)
def match(self, lexer, parent):
if lexer.lines[lexer.y][lexer.x:].startswith(self.constant): if lexer.lines[lexer.y][lexer.x:].startswith(self.constant):
name = '.'.join(context + [self.name]) token = self.make_token(lexer, self.constant, self.name, parent)
lexer.add_token(self.make_token(lexer, self.constant, name, grammar=lexer.grammar)) lexer.add_token(token)
lexer.x += len(self.constant) lexer.x += self.length
return True return True
else: else:
return False return False
@ -76,17 +87,20 @@ class PatternRule(Rule):
self.name = name self.name = name
self.pattern = pattern self.pattern = pattern
self.re = re.compile(pattern) self.re = re.compile(pattern)
def match(self, lexer, context=[], d={}): def _match(self, lexer, parent, m):
s = m.group(0)
token = self.make_token(lexer, s, self.name, parent)
lexer.add_token(token)
lexer.x += len(s)
def match(self, lexer, parent):
m = self.re.match(lexer.lines[lexer.y], lexer.x) m = self.re.match(lexer.lines[lexer.y], lexer.x)
if m: if m:
name = '.'.join(context + [self.name]) self._match(lexer, parent, m)
lexer.add_token(self.make_token(lexer, m.group(0), name, grammar=lexer.grammar))
lexer.x += len(m.group(0))
return True return True
else: else:
return False return False
class ContextPatternRule(Rule): class ContextPatternRule(PatternRule):
def __init__(self, name, pattern, fallback): def __init__(self, name, pattern, fallback):
assert valid_name_re.match(name), 'invalid name %r' % name assert valid_name_re.match(name), 'invalid name %r' % name
assert name not in reserved_names, "reserved rule name: %r" % name assert name not in reserved_names, "reserved rule name: %r" % name
@ -94,16 +108,14 @@ class ContextPatternRule(Rule):
self.pattern = pattern self.pattern = pattern
self.fallback = fallback self.fallback = fallback
self.fallback_re = re.compile(fallback) self.fallback_re = re.compile(fallback)
def match(self, lexer, context=[], d={}): def match(self, lexer, parent):
try: try:
r = re.compile(self.pattern % d) r = re.compile(self.pattern % parent.matchd)
except KeyError: except KeyError:
r = self.fallback_re r = self.fallback_re
m = r.match(lexer.lines[lexer.y], lexer.x) m = r.match(lexer.lines[lexer.y], lexer.x)
if m: if m:
name = '.'.join(context + [self.name]) self._match(lexer, parent, m)
lexer.add_token(self.make_token(lexer, m.group(0), name, grammar=lexer.grammar))
lexer.x += len(m.group(0))
return True return True
else: else:
return False return False
@ -117,42 +129,50 @@ class RegionRule(Rule):
self.grammar = grammar self.grammar = grammar
self.end = end self.end = end
self.start_re = re.compile(start) self.start_re = re.compile(start)
def _add_from_regex(self, context, name, lexer, m, grammar):
t_name = '.'.join(context + [self.name, name])
t = self.make_token(lexer, m.group(0), t_name, grammar=grammar)
lexer.add_token(t)
lexer.x += len(m.group(0))
def resume(self, lexer, context, flag, d, rulecontexts): def resume(self, lexer, toresume):
assert rulecontexts, "can't resume without rulecontexts!" assert toresume, "can't resume without tokens to resume!"
self._match(lexer, context, d, None, rulecontexts) self._match(lexer, None, None, toresume)
return True return True
def match(self, lexer, context=[], d={}): def match(self, lexer, parent):
# see if we can match our start token
m = self.start_re.match(lexer.lines[lexer.y], lexer.x) m = self.start_re.match(lexer.lines[lexer.y], lexer.x)
if m: if m:
# region was match, so let's do this self._match(lexer, parent, m, [])
return self._match(lexer, context, m.groupdict(), m, []) return True
else: else:
# region was not matched; we never started. so return false
return False return False
def _match(self, lexer, context, d, m, rulecontext=[]): def _add_from_regex(self, name, lexer, parent, m, matchd={}):
# if we have been given rulecontext, then we are going to "resume" a s = m.group(0)
# parse that can already be assumed to have started token = self.make_token(lexer, s, name, parent, matchd)
reenter = len(rulecontext) > 0 lexer.add_token(token)
assert m or reenter lexer.x += len(s)
return token
# first let's do some bookkeeping def _match(self, lexer, parent, m, toresume=[]):
lexer.context.append(RuleContext(lexer.y, lexer.x, self, 'start', # we either need a match object, or a token to resume
list(context), dict(d))) assert m or len(toresume) > 0
if m is not None:
self._add_from_regex(context, 'start', lexer, m, lexer.grammar) if m:
null_t_name = '.'.join(context + [self.name, 'null']) # if we had a match, then it becomes the parent, and we save its
# subgroup dict
d = m.groupdict()
parent = self._add_from_regex('start', lexer, parent, m, d)
else:
# otherwise, we should be resuming the start token, so let's pull
# the relevant info out of the token
parent = toresume[0]
d = parent.matchd
assert parent.name == 'start'
null_t = None null_t = None
# if we have an end token, then build it here. notice that it can # this determines whether we are still reentering. if len(toresume) == 1
# then it means that we have been reentering but will not continue, so
# reenter will be false.
reenter = len(toresume) > 1
# if we have an end regex, then build it here. notice that it can
# reference named groups from the start token. if we have no end, # reference named groups from the start token. if we have no end,
# well, then, we're never getting out of here alive! # well, then, we're never getting out of here alive!
if self.end: if self.end:
@ -167,21 +187,19 @@ class RegionRule(Rule):
# if this line is empty, then we skip it, but here we insert # if this line is empty, then we skip it, but here we insert
# an empty null token just so we have something # an empty null token just so we have something
if not reenter and len(lexer.lines[lexer.y]) == 0: if not reenter and len(lexer.lines[lexer.y]) == 0:
null_t = Token(null_t_name, None, lexer.y, lexer.x, '') null_t = Token('null', None, lexer.y, lexer.x, '', parent)
lexer.add_token(null_t) lexer.add_token(null_t)
null_t = None null_t = None
# ok, as long as we haven't found the end token, and have more # ok, as long as we haven't found the end token, and have more
# data on the current line to read, we will process tokens # data on the current line to read, we will process tokens
while not done and lexer.y == old_y and lexer.x < len(lexer.lines[lexer.y]): while (not done and lexer.y == old_y and
lexer.x < len(lexer.lines[lexer.y])):
# if we are reentering mid-parse, then that takes precedence # if we are reentering mid-parse, then that takes precedence
if reenter: if reenter:
reenter = False reenter = False
rule2 = rulecontext[0].rule rule2 = toresume[1].rule
context2 = rulecontext[0].context rule2.resume(lexer, toresume[1:])
d2 = rulecontext[0].matchd
assert rule2.resume(lexer, context2, d2, rulecontext[1:]), \
"%r %r %r %r" % (lexer, context2, d2, rulecontext[1:])
found = True found = True
null_t = None null_t = None
break break
@ -191,7 +209,7 @@ class RegionRule(Rule):
if self.end: if self.end:
m = end_re.match(lexer.lines[lexer.y], lexer.x) m = end_re.match(lexer.lines[lexer.y], lexer.x)
if m: if m:
self._add_from_regex(context, 'end', lexer, m, None) self._add_from_regex('end', lexer, parent, m, {})
done = True done = True
break break
@ -199,7 +217,7 @@ class RegionRule(Rule):
# find a token, note that we found one and exit the loop # find a token, note that we found one and exit the loop
found = False found = False
for rule in self.grammar.rules: for rule in self.grammar.rules:
if rule.match(lexer, context + [self.name], d): if rule.match(lexer, parent):
found = True found = True
null_t = None null_t = None
break break
@ -209,7 +227,7 @@ class RegionRule(Rule):
# create if it isn't set). # create if it isn't set).
if not found: if not found:
if null_t is None: if null_t is None:
null_t = Token(null_t_name, None, lexer.y, lexer.x, '') null_t = Token('null', None, lexer.y, lexer.x, '', parent)
lexer.add_token(null_t) lexer.add_token(null_t)
null_t.add_to_string(lexer.lines[lexer.y][lexer.x]) null_t.add_to_string(lexer.lines[lexer.y][lexer.x])
lexer.x += 1 lexer.x += 1
@ -223,12 +241,9 @@ class RegionRule(Rule):
# then that means we're finished with the line and should move # then that means we're finished with the line and should move
# on to the next one here # on to the next one here
if not done and old_y == lexer.y: if not done and old_y == lexer.y:
lexer.save_context()
lexer.y += 1 lexer.y += 1
lexer.x = 0 lexer.x = 0
# alright, we're finally done procesing the region, so return true
lexer.context.pop(-1)
return True return True
class DualRegionRule(Rule): class DualRegionRule(Rule):
@ -242,52 +257,49 @@ class DualRegionRule(Rule):
self.grammar2 = grammar2 self.grammar2 = grammar2
self.end = end self.end = end
self.start_re = re.compile(start) self.start_re = re.compile(start)
def _add_from_regex(self, context, name, lexer, m, grammar=None): def _add_from_regex(self, name, lexer, parent, m, matchd={}):
t_name = '.'.join(context + [self.name, name]) s = m.group(0)
t = self.make_token(lexer, m.group(0), t_name, grammar=grammar) token = self.make_token(lexer, s, name, parent, matchd)
lexer.add_token(t) lexer.add_token(token)
lexer.x += len(m.group(0)) lexer.x += len(s)
return token
def resume(self, lexer, context, flag, d, rulecontexts): def resume(self, lexer, toresume):
if flag == 'start': assert toresume
d2 = self._match_first(lexer, context, d, None, rulecontexts) token = toresume[0]
d3 = dict(d.items() + d2.items()) d = token.matchd
self._match_second(lexer, context, d3, None, rulecontexts) if token.name == 'start':
return True stoken = toresume[0]
elif flag == 'middle': mtoken = self._match_first(lexer, stoken, None, toresume)
self._match_second(lexer, context, flag, d, None, rulecontexts) self._match_second(lexer, mtoken, [])
return True elif token.name == 'middle':
d3 = token.matchd
self._match_second(lexer, token.parent, d3, toresume)
else: else:
raise Exception, "invalid flag %r" % flag raise Exception, "invalid flag %r" % flag
return True
def match(self, lexer, context=[], d={}): def match(self, lexer, parent):
# see if we can match our start token # see if we can match our start token
m = self.start_re.match(lexer.lines[lexer.y], lexer.x) m = self.start_re.match(lexer.lines[lexer.y], lexer.x)
if m: if m:
# region was match, so let's do this # region was match, so let's do this
d1 = m.groupdict() d1 = m.groupdict()
d2 = self._match_first(lexer, context, d1, m, []) d2 = self._match_first(lexer, parent, m, [])
d3 = dict(d1.items() + d2.items()) d3 = dict(d1.items() + d2.items())
self._match_second(lexer, context, d3, None, []) self._match_second(lexer, parent, d3, None, [])
return True return True
else: else:
# region was not matched; we never started. so return false # region was not matched; we never started. so return false
return False return False
def _match_first(self, lexer, context, d1, m1, rulecontext=[]): def _match_first(self, lexer, context, d1, m1, toresume=[]):
# if we have been given rulecontext, then we are going to "resume" a # if we have been given rulecontext, then we are going to "resume" a
# parse that can already be assumed to have started # parse that can already be assumed to have started
reenter = len(rulecontext) > 0 reenter = len(toresume) > 1
assert m1 or reenter assert m1 or reenter
# first let's do some bookkeeping
lexer.context.append(RuleContext(lexer.y, lexer.x, self, 'start',
list(context), dict(d1)))
# ok, so create our start token, and get ready to start reading data # ok, so create our start token, and get ready to start reading data
if m1 is not None: if m1 is not None:
self._add_from_regex(context, 'start', lexer, m1, lexer.grammar) self._add_from_regex('start', lexer, parent, m1, m1.groupdict())
null_t_name = '.'.join(context + [self.name, 'null'])
null_t = None null_t = None
middle_re = re.compile(self.middle % d1) middle_re = re.compile(self.middle % d1)
@ -302,7 +314,7 @@ class DualRegionRule(Rule):
# if this line is empty, then we will skip it, but here weinsert # if this line is empty, then we will skip it, but here weinsert
# an empty null token just so we have something # an empty null token just so we have something
if len(lexer.lines[lexer.y]) == 0: if len(lexer.lines[lexer.y]) == 0:
null_t = Token(null_t_name, None, lexer.y, lexer.x, '') null_t = Token('null', None, lexer.y, lexer.x, '')
lexer.add_token(null_t) lexer.add_token(null_t)
null_t = None null_t = None
@ -311,22 +323,22 @@ class DualRegionRule(Rule):
while not done and lexer.y == old_y and lexer.x < len(lexer.lines[lexer.y]): while not done and lexer.y == old_y and lexer.x < len(lexer.lines[lexer.y]):
# if we are reentering mid-parse, then that takes precedence # if we are reentering mid-parse, then that takes precedence
if reenter: if reenter:
reenter = False raise Exception, "aw damn"
xrule = rulecontext[0].rule #reenter = False
xcontext = rulecontext[0].context #xrule = rulecontext[0].rule
xd = rulecontext[0].matchd #xd = rulecontext[0].matchd
assert rule2.resume(lexer, xcontext, xd, rulecontext[1:]), \ #assert rule2.resume(lexer, xcontext, xd, rulecontext[1:]), \
"%r %r %r %r" % (lexer, xcontext, xd, rulecontext[1:]) # "%r %r %r %r" % (lexer, xcontext, xd, rulecontext[1:])
found = True #found = True
null_t = None #null_t = None
break #break
# see if we have found the middle token. if so, we can then # see if we have found the middle token. if so, we can then
# proceed to "stage 2" # proceed to "stage 2"
m2 = middle_re.match(lexer.lines[lexer.y], lexer.x) m2 = middle_re.match(lexer.lines[lexer.y], lexer.x)
if m2: if m2:
d2 = m2.groupdict() d2 = m2.groupdict()
self._add_from_regex(context, 'middle', lexer, m2, None) self._add_from_regex('middle', lexer, parent, m2, {})
done = True done = True
break break
@ -334,7 +346,7 @@ class DualRegionRule(Rule):
# find a token, note that we found one and exit the loop # find a token, note that we found one and exit the loop
found = False found = False
for rule in self.grammar1.rules: for rule in self.grammar1.rules:
if rule.match(lexer, context + [self.name], d1): if rule.match(lexer, parent):
found = True found = True
null_t = None null_t = None
break break
@ -344,7 +356,7 @@ class DualRegionRule(Rule):
# create if it isn't set). # create if it isn't set).
if not found: if not found:
if null_t is None: if null_t is None:
null_t = Token(null_t_name, None, lexer.y, lexer.x, '') null_t = Token('null', None, lexer.y, lexer.x, '')
lexer.add_token(null_t) lexer.add_token(null_t)
null_t.add_to_string(lexer.lines[lexer.y][lexer.x]) null_t.add_to_string(lexer.lines[lexer.y][lexer.x])
lexer.x += 1 lexer.x += 1
@ -361,21 +373,17 @@ class DualRegionRule(Rule):
lexer.save_context() lexer.save_context()
lexer.y += 1 lexer.y += 1
lexer.x = 0 lexer.x = 0
lexer.context.pop(-1)
return d2 return d2
def _match_second(self, lexer, context, d3, m, rulecontext=[]): def _match_second(self, lexer, context, d3, m, toresume=[]):
# if we have been given rulecontext, then we are going to "resume" a # if we have been given rulecontext, then we are going to "resume" a
# parse that can already be assumed to have started # parse that can already be assumed to have started
reenter = len(rulecontext) > 0 reenter = len(toresume) > 1
# ok stage 2 is like stage 1, only we are looking for end tokens # ok stage 2 is like stage 1, only we are looking for end tokens
# instead of middle tokens # instead of middle tokens
null_t_name = '.'.join(context + [self.name, 'null'])
null_t = None null_t = None
end_re = re.compile(self.end % d3) end_re = re.compile(self.end % d3)
lexer.context.append(RuleContext(lexer.y, lexer.x, self, 'middle',
list(context), dict(d3)))
# ok, so as long as we aren't done (we haven't found an end token), # ok, so as long as we aren't done (we haven't found an end token),
# keep reading input # keep reading input
@ -385,20 +393,20 @@ class DualRegionRule(Rule):
# if we are reentering mid-parse, then that takes precedence # if we are reentering mid-parse, then that takes precedence
if reenter: if reenter:
reenter = False raise Exception, "aw damn"
xrule = rulecontext[0].rule #reenter = False
xcontext = rulecontext[0].context #xrule = rulecontext[0].rule
xd = rulecontext[0].matchd #xd = rulecontext[0].matchd
assert rule2.resume(lexer, xcontext, xd, rulecontext[1:]), \ #assert rule2.resume(lexer, xcontext, xd, rulecontext[1:]), \
"%r %r %r %r" % (lexer, xcontext, xd, rulecontext[1:]) # "%r %r %r %r" % (lexer, xcontext, xd, rulecontext[1:])
found = True #found = True
null_t = None #null_t = None
break #break
# if this line is empty, then we will skip it, but here weinsert # if this line is empty, then we will skip it, but here weinsert
# an empty null token just so we have something # an empty null token just so we have something
if len(lexer.lines[lexer.y]) == 0: if len(lexer.lines[lexer.y]) == 0:
null_t = Token(null_t_name, None, lexer.y, lexer.x, '') null_t = Token('null', None, lexer.y, lexer.x, '')
lexer.add_token(null_t) lexer.add_token(null_t)
null_t = None null_t = None
@ -409,7 +417,7 @@ class DualRegionRule(Rule):
# proceed to "stage 2" # proceed to "stage 2"
m3 = end_re.match(lexer.lines[lexer.y], lexer.x) m3 = end_re.match(lexer.lines[lexer.y], lexer.x)
if m3: if m3:
self._add_from_regex(context, 'end', lexer, m3, None) self._add_from_regex('end', lexer, parent, m3, {})
done = True done = True
break break
@ -417,7 +425,7 @@ class DualRegionRule(Rule):
# find a token, note that we found one and exit the loop # find a token, note that we found one and exit the loop
found = False found = False
for rule in self.grammar2.rules: for rule in self.grammar2.rules:
if rule.match(lexer, context + [self.name], d3): if rule.match(lexer, parent):
found = True found = True
null_t = None null_t = None
break break
@ -427,7 +435,7 @@ class DualRegionRule(Rule):
# create if it isn't set). # create if it isn't set).
if not found: if not found:
if null_t is None: if null_t is None:
null_t = Token(null_t_name, None, lexer.y, lexer.x, '') null_t = Token('null', None, lexer.y, lexer.x, '')
lexer.add_token(null_t) lexer.add_token(null_t)
null_t.add_to_string(lexer.lines[lexer.y][lexer.x]) null_t.add_to_string(lexer.lines[lexer.y][lexer.x])
lexer.x += 1 lexer.x += 1
@ -446,150 +454,8 @@ class DualRegionRule(Rule):
lexer.x = 0 lexer.x = 0
# alright, we're finally done processing; return true # alright, we're finally done processing; return true
lexer.context.pop(-1)
return True return True
# def matchOLD(self, lexer, context=[], d={}):
# m1 = self.start_re.match(lexer.lines[lexer.y], lexer.x)
# # see if we can match out start token
# if m1:
# # ok, so create our start token, and get ready to start reading data
# self._add_from_regex(context, 'start', lexer, m1, lexer.grammar)
# null_t_name = '.'.join(context + [self.name, 'null'])
# null_t = None
#
# d1 = m1.groupdict()
# lexer.context.append(RuleContext(lexer.y, lexer.x, self, 'start',
# list(context), dict(d1)))
# d2 = {}
# middle_re = re.compile(self.middle % d1)
#
# # ok, so as long as we aren't done (we haven't found an end token),
# # keep reading input
# done = False
# while not done and lexer.y < len(lexer.lines):
# old_y = lexer.y
# # if this line is empty, then we will skip it, but here weinsert
# # an empty null token just so we have something
# if len(lexer.lines[lexer.y]) == 0:
# null_t = Token(null_t_name, None, lexer.y, lexer.x, '')
# lexer.add_token(null_t)
# null_t = None
#
# # ok, as long as we haven't found the end token, and have more
# # data on the current line to read, we will process tokens
# while not done and lexer.y == old_y and lexer.x < len(lexer.lines[lexer.y]):
# # see if we have found the middle token. if so, we can then
# # proceed to "stage 2"
# m2 = middle_re.match(lexer.lines[lexer.y], lexer.x)
# if m2:
# d2 = m2.groupdict()
# self._add_from_regex(context, 'middle', lexer, m2, None)
# done = True
# break
#
# # ok, we need to check all our rules now, in order. if we
# # find a token, note that we found one and exit the loop
# found = False
# for rule in self.grammar1.rules:
# if rule.match(lexer, context + [self.name], d1):
# found = True
# null_t = None
# break
#
# # if we never found a token, then we need to add another
# # character to the current null token (which we should
# # create if it isn't set).
# if not found:
# if null_t is None:
# null_t = Token(null_t_name, None, lexer.y, lexer.x, '')
# lexer.add_token(null_t)
# null_t.add_to_string(lexer.lines[lexer.y][lexer.x])
# lexer.x += 1
#
# # ok, since we're soon going to be on a different line (or
# # already are), we want a new null token. so forget about the
# # current one.
# null_t = None
#
# # if we're still on the same line at this point (and not done)
# # then that means we're finished with the line and should move
# # on to the next one here
# if not done and old_y == lexer.y:
# lexer.save_context()
# lexer.y += 1
# lexer.x = 0
#
# # ok stage 2 is like stage 1, only we are looking for end tokens
# # instead of middle tokens
# d3 = dict(d1.items() + d2.items())
# end_re = re.compile(self.end % d3)
# lexer.context.pop(-1)
# lexer.context.append(RuleContext(lexer.y, lexer.x, self, 'middle',
# list(context), dict(d3)))
#
# # ok, so as long as we aren't done (we haven't found an end token),
# # keep reading input
# done = False
# while not done and lexer.y < len(lexer.lines):
# old_y = lexer.y
# # if this line is empty, then we will skip it, but here weinsert
# # an empty null token just so we have something
# if len(lexer.lines[lexer.y]) == 0:
# null_t = Token(null_t_name, None, lexer.y, lexer.x, '')
# lexer.add_token(null_t)
# null_t = None
#
# # ok, as long as we haven't found the end token, and have more
# # data on the current line to read, we will process tokens
# while not done and lexer.y == old_y and lexer.x < len(lexer.lines[lexer.y]):
# # see if we have found the middle token. if so, we can then
# # proceed to "stage 2"
# m3 = end_re.match(lexer.lines[lexer.y], lexer.x)
# if m3:
# self._add_from_regex(context, 'end', lexer, m3, None)
# done = True
# break
#
# # ok, we need to check all our rules now, in order. if we
# # find a token, note that we found one and exit the loop
# found = False
# for rule in self.grammar2.rules:
# if rule.match(lexer, context + [self.name], d3):
# found = True
# null_t = None
# break
#
# # if we never found a token, then we need to add another
# # character to the current null token (which we should
# # create if it isn't set).
# if not found:
# if null_t is None:
# null_t = Token(null_t_name, None, lexer.y, lexer.x, '')
# lexer.add_token(null_t)
# null_t.add_to_string(lexer.lines[lexer.y][lexer.x])
# lexer.x += 1
#
# # ok, since we're soon going to be on a different line (or
# # already are), we want a new null token. so forget about the
# # current one.
# null_t = None
#
# # if we're still on the same line at this point (and not done)
# # then that means we're finished with the line and should move
# # on to the next one here
# if not done and old_y == lexer.y:
# lexer.save_context()
# lexer.y += 1
# lexer.x = 0
#
# # alright, we're finally done processing; return true
# lexer.context.pop(-1)
# return True
# else:
# # dual region was not matched; we never started. so return false
# return False
class Grammar: class Grammar:
rules = [] rules = []
def __init__(self): def __init__(self):
@ -606,9 +472,6 @@ class Lexer:
self.lines = None self.lines = None
self.tokens = [] self.tokens = []
self.context = []
self.line_contexts = {}
def add_token(self, t): def add_token(self, t):
self.tokens.append(t) self.tokens.append(t)
@ -618,30 +481,24 @@ class Lexer:
self.lines = lines self.lines = lines
self.tokens = [] self.tokens = []
self.context = [] def resume(self, lines, y, x, token):
self.line_contexts = {}
def resume(self, lines, y=0, x=0, rulecontexts=[]):
if len(rulecontexts) == 0:
self.lex(lines, y, x)
else:
self.y = y self.y = y
self.x = x self.x = x
#self.x = 0
self.lines = lines self.lines = lines
self.tokens = [] self.tokens = []
rc = rulecontexts[0]
rc.rule.resume(self, rc.context, rc.flag, rc.matchd, rulecontexts[1:]) if token:
toresume = token.parents()
if toresume:
raise Exception, "aw damn"
def __iter__(self): def __iter__(self):
if self.lines is None: if self.lines is None:
raise Exception, "no lines to lex" raise Exception, "no lines to lex"
return self return self
def save_context(self):
self.line_contexts[self.y] = list(self.context)
def next(self): def next(self):
null_t_name = 'null'
null_t = None null_t = None
if self.tokens: if self.tokens:
@ -652,15 +509,14 @@ class Lexer:
while self.x < len(line): while self.x < len(line):
curr_t = None curr_t = None
for rule in self.grammar.rules: for rule in self.grammar.rules:
if rule.match(self): if rule.match(self, None):
assert self.tokens, "match rendered no tokens?" assert self.tokens, "match rendered no tokens?"
return self.tokens.pop(0) return self.tokens.pop(0)
if null_t is None: if null_t is None:
null_t = Token(null_t_name, None, self.y, self.x, '') null_t = Token('null', None, self.y, self.x, '')
self.add_token(null_t) self.add_token(null_t)
null_t.add_to_string(line[self.x]) null_t.add_to_string(line[self.x])
self.x += 1 self.x += 1
self.save_context()
null_t = None null_t = None
self.y += 1 self.y += 1
self.x = 0 self.x = 0