import re
import util

valid_name_re  = re.compile('^[a-zA-Z_][a-zA-Z0-9_]*$')
full_name_re   = re.compile('^([a-zA-Z_]+)([0-9]*)$')
reserved_names = ['start', 'middle', 'end', 'null']

class Token(object):
    def __init__(self, name, rule=None, y=0, x=0, s="", parent=None, matchd={}):
        self.name    = name
        self.rule    = rule
        self.y       = y
        self.x       = x
        self.string  = s
        self.parent  = parent
        self.matchd  = matchd
        assert parent is None or hasattr(parent, 'name'), 'oh no %r' % parent
    def parents(self):
        if self.parent is not None:
            parents = self.parent.parents()
            parents.append(self.parent)
            return parents
        else:
            return []
    def domain(self):
        if self.parent is not None:
            names = self.parent.domain()
        else:
            names = []
        if self.name != 'middle':
            names.append(self.rule.name)
        return names
    def fqlist(self):
        if self.parent is not None:
            names = self.parent.domain()
        else:
            names = []
        if self.name == 'start':
            names.append(self.rule.name)
        names.append(self.name)
        return names
    def fqname(self):
        names = self.fqlist()
        return '.'.join(names)
    def copy(self):
        return Token(self.name, self.rule, self.y, self.x, self.string,
                     self.parent, self.matchd)
    def add_to_string(self, s):
        self.string += s
    def end_x(self):
        return self.x + len(self.string)
    def __eq__(self, other):
        return (self.y == other.y and self.x == other.x
                and self.name == other.name and self.parent is other.parent and
                self.string == other.string)
    def __repr__(self):
        if len(self.string) < 10:
            s = self.string
        else:
            s = self.string[:10] + '...'
        fields = (self.fqname(), self.rule, self.y, self.x, s)
        return "<Token(%r, %r, %d, %d, %r)>" % fields

class Rule:
    reflags = 0
    def __init__(self, name, group=None):
        assert valid_name_re.match(name), 'invalid name %r' % name
        assert name not in reserved_names, "reserved rule name: %r" % name
        self.name = name
        if group is None:
            self.group = name
        else:
            self.group = group
    def match(self, lexer, parent):
        raise Exception, "not implemented"
    def lex(self, lexer, parent, match):
        raise Exception, "not implemented"
    def make_token(self, lexer, s, name, parent=None, matchd={}):
        t = Token(name, self, lexer.y, lexer.x, s, parent, matchd)
        lexer.x += len(s)
        return t
    def get_line(self, lexer):
        return lexer.lines[lexer.y] + '\n'

class PatternRule(Rule):
    def __init__(self, name, pattern, group=None):
        Rule.__init__(self, name)
        self.pattern = pattern
        self.re = re.compile(self.pattern, self.reflags)
    def match(self, lexer, parent):
        return self.re.match(self.get_line(lexer), lexer.x)
    def lex(self, lexer, parent, m):
        if m:
            yield self.make_token(lexer, m.group(0), self.name, parent, m.groupdict())
        raise StopIteration

class NocasePatternRule(PatternRule):
    reflags = re.IGNORECASE

class ContextPatternRule(PatternRule):
    def __init__(self, name, pattern, fallback, group=None):
        Rule.__init__(self, name)
        self.pattern     = pattern
        self.fallback_re = re.compile(fallback, self.reflags)
    def match(self, lexer, parent):
        try:
            r = re.compile(self.pattern % parent.matchd)
        except KeyError:
            r = self.fallback_re
        return r.match(self.get_line(lexer), lexer.x)
class NocaseContextPatternRule(ContextPatternRule):
    reflags = re.IGNORECASE

class RegionRule(Rule):
    def __init__(self, name, start, grammar, end, group=None):
        Rule.__init__(self, name)
        self.grammar  = grammar
        self.end      = end
        self.start_re = re.compile(start, self.reflags)
    def match(self, lexer, parent):
        return self.start_re.match(self.get_line(lexer), lexer.x)
    def lex(self, lexer, parent, m):
        t1 = self.make_token(lexer, m.group(0), 'start', parent, m.groupdict())
        yield t1
        if self.end:
            stopre = re.compile(self.end % t1.matchd, self.reflags)
        else:
            stopre = None
        for t2 in self._lex(lexer, [t1], 'end', stopre, self.grammar):
            yield t2
        raise StopIteration
    def resume(self, lexer, toresume):
        assert toresume
        t1 = toresume[0]
        assert t1.name
        if self.end:
            stopre = re.compile(self.end % t1.matchd, self.reflags)
        else:
            stopre = None
        for t2 in self._lex(lexer, [t1], 'end', stopre, self.grammar):
            yield t2
        raise StopIteration

    def _lex(self, lexer, toresume, stopname, stopre, grammar):
        assert toresume
        parent  = toresume[0]
        reenter = len(toresume) > 1
        null_t  = None

        done = False
        while not done and lexer.y < len(lexer.lines):
            old_y = lexer.y
            line = self.get_line(lexer)
            while not done and lexer.y == old_y and lexer.x < len(line):
                if reenter:
                    reenter = False
                    for t in toresume[1].rule.resume(lexer, toresume[1:]):
                        yield t
                if lexer.y >= len(lexer.lines):
                    raise StopIteration
                elif lexer.x >= len(line):
                    lexer.y += 1
                    lexer.x = 0
                line = self.get_line(lexer)

                if stopre:
                    m = stopre.match(line, lexer.x)
                    if m:
                        if null_t:
                            yield null_t
                            null_t = None
                        yield self.make_token(lexer, m.group(0), stopname, parent, m.groupdict())
                        done = True
                        break

                m = None
                for rule in grammar.rules:
                    m = rule.match(lexer, parent)
                    if m:
                        if null_t:
                            yield null_t
                            null_t = None
                        for t in rule.lex(lexer, parent, m):
                            yield t
                        break

                if not m:
                    if lexer.x < len(line):
                        if not null_t:
                            null_t = Token('null', None, lexer.y, lexer.x, '', parent)
                        null_t.add_to_string(line[lexer.x])
                        lexer.x += 1
            if null_t:
                yield null_t
                null_t = None
            if not done and old_y == lexer.y:
                lexer.y += 1
                lexer.x = 0
        raise StopIteration
class NocaseRegionRule(RegionRule):
    reflags = re.IGNORECASE

class DualRegionRule(RegionRule):
    def __init__(self, name, start, grammar1, middle, grammar2, end, group=None):
        Rule.__init__(self, name)
        self.start_re = re.compile(start, self.reflags)
        self.grammar1 = grammar1
        self.middle   = middle
        self.grammar2 = grammar2
        self.end      = end
    def match(self, lexer, parent):
        return self.start_re.match(self.get_line(lexer), lexer.x)
    def lex(self, lexer, parent, m):
        assert m
        t1 = self.make_token(lexer, m.group(0), 'start', parent, m.groupdict())
        yield t1

        t2 = None
        if self.middle:
            stopre = re.compile(self.middle % t1.matchd, self.reflags)
        else:
            stopre = None
        for t2 in self._lex(lexer, [t1], 'middle', stopre, self.grammar1):
            yield t2

        if t2 is not None and t2.name == 'middle':
            if self.end:
                d = dict(t2.matchd)
                if t1:
                    d.update(t1.matchd)
                stopre = re.compile(self.end % d, self.reflags)
            else:
                stopre = None
            for t3 in self._lex(lexer, [t2], 'end', stopre, self.grammar2):
                yield t3

        raise StopIteration
    def resume(self, lexer, toresume):
        assert toresume, "can't resume without tokens to resume!"
        t1 = t2 = None
        if toresume[0].name == 'start':
            t1 = toresume[0]
            assert t1.name
        elif toresume[0].name == 'middle':
            t2 = toresume[0]
            assert t2.name
        else:
            raise Exception, "invalid name %r" % toresume[0].name

        if t1 is not None:
            #assert t1.name == 'start'
            if self.middle:
                stopre = re.compile(self.middle, self.reflags)
            else:
                stopre = None
            for t2 in self._lex_first(lexer, toresume, 'middle', stopre):
                yield t2
            toresume = [t2]
        if t2 is not None:
            assert t2.name == 'middle'
            if self.end:
                stopre = re.compile(self.end, self.reflags)
            else:
                stopre = None
            for t3 in self._lex_second(lexer, toresume, 'end', stopre):
                yield t3
            #toresume = [t3]
        raise StopIteration
class NocaseDualRegionRule(DualRegionRule):
    reflags = re.IGNORECASE

class Grammar:
    rules = []
    def __init__(self):
        # XYZ maybe this is unnecessary
        for rule in self.rules:
            if hasattr(rule, 'grammar') and rule.grammar is None:
                rule.grammar = self
            if hasattr(rule, 'grammar1') and rule.grammar is None:
                rule.grammar = self
            if hasattr(rule, 'grammar2') and rule.grammar is None:
                rule.grammar = self
grammar = Grammar()

class Lexer:
    def __init__(self, name, grammar):
        self.name    = name
        self.grammar = grammar
        self.y       = 0
        self.x       = 0
        self.lines   = None
        assert self.grammar.rules
    def get_line(self):
        return self.lines[self.y] + '\n'
    def lex(self, lines, y=0, x=0):
        self.y      = y
        self.x      = x
        self.lines  = lines
        self.tokens = []
        for t in self._lex():
            yield t
        raise StopIteration
    def resume(self, lines, y, x, token):
        self.y      = y
        self.x      = x
        self.lines  = lines
        self.tokens = []
        toresume = token.parents()

        # this is a special case for the "middle" rule of a dual region rule
        i = 1
        while i < len(toresume):
            if toresume[i].name == 'middle' and toresume[i-1].name == 'start':
                del toresume[i-1]
            else:
                i += 1

        if toresume:
            for t in toresume[0].rule.resume(self, toresume):
                yield t
        for t in self._lex():
            yield t
        raise StopIteration

    def _lex(self):
        parent = None
        while self.y < len(self.lines):
            null_t = None
            line = self.get_line()
            while self.x < len(line):
                m = None
                for rule in self.grammar.rules:
                    m = rule.match(self, parent)
                    if m:
                        if null_t:
                            yield null_t
                            null_t = None
                        for t in rule.lex(self, parent, m):
                            yield t
                        break

                line = self.get_line()
                if not m:
                    if self.x < len(line):
                        if null_t is None:
                            null_t = Token('null', None, self.y, self.x, '', parent)
                        null_t.add_to_string(line[self.x])
                        self.x += 1
            if null_t:
                yield null_t
            self.y += 1
            self.x = 0
        raise StopIteration