import curses, re
import regex, util
from point import Point

def escape(s):
    return re.escape(s)

class Token(object):
    def __init__(self, name, rule, y, x, s, color=None, parent=None, matchd={}, link=None):
        self.name    = name
        self.rule    = rule
        self.y       = y
        self.x       = x
        self.string  = s
        self.color   = color
        self.parent  = parent
        self.matchd  = matchd
        self.link    = link
        self._debug  = False
        #self._fqlist = None
        #self._fqname = None
        #self._fqlist = self.mkfqlist()
        #self._fqname = self.mkfqname()
        assert parent is None or hasattr(parent, 'name'), 'oh no %r' % parent

    def isa(self, *names):
        return self.name in names
    def match(self, name, string):
        return self.name == name and self.string == string
    def matchs(self, name, strings):
        return self.name == name and self.string in strings
    def matchp(self, pairs):
        for (name, string) in pairs:
            if self.match(name, string):
                return True
        return False

    def fqisa(self, *names):
        return self.fqname() in names
    def fqmatch(self, name, string):
        return self.fqname() == name and self.string == string
    def fqmatchs(self, name, strings):
        return self.fqname() == name and self.string in strings
    def fqmatchp(self, pairs):
        for (name, string) in pairs:
            if self.fqmatch(name, string):
                return True
        return False

    def parents(self):
        if self.parent is not None:
            parents = self.parent.parents()
            parents.append(self.parent)
            return parents
        else:
            return []
    def domain(self):
        if self.parent is not None:
            names = self.parent.domain()
        else:
            names = []
        if self.link and not self.link.startswith('middle'):
            names.append(self.rule.name)
        return names
    #def fqlist(self):
    #    if self._fqlist is None:
    #        self._fqlist = self.mkfqlist()
    #    return self._fqlist
    #def mkfqlist(self):
    def fqlist(self):
        if self.parent is not None:
            names = self.parent.domain()
        else:
            names = []
        if self.link == 'start':
            names.append(self.rule.name)
        names.append(self.name)
        return names
    #def fqname(self):
    #    if self._fqname is None:
    #        self._fqname = self.mkfqname()
    #    return self._fqname
    #def mkfqname(self):
    def fqname(self):
        names = self.fqlist()
        return '.'.join(names)
    def copy(self):
        return Token(self.name, self.rule, self.y, self.x, self.string,
                     self.color, self.parent, self.matchd, self.link)
    def add_to_string(self, s):
        self.string += s
    def end_x(self):
        return self.x + len(self.string)
    # note that if parent is not none, then this is recursive
    def __eq__(self, other):
        return (other is not None and
                self.y == other.y and self.x == other.x and
                self.name == other.name and
                self.string == other.string and
                self.parent == other.parent)
    def __repr__(self):
        if len(self.string) < 10:
            s = self.string
        else:
            s = self.string[:10] + '...'
        fields = (self.fqname(), self.rule, self.y, self.x, s)
        return "<Token(%r, %r, %d, %d, %r)>" % fields

class Rule(object):
    reflags = 0
    def __init__(self, name):
        self.name = name
    def match(self, lexer, parent):
        raise Exception, "not implemented"
    def lex(self, lexer, parent, match):
        raise Exception, "not implemented"
    def make_token(self, lexer, s, name, parent=None, matchd={}, link=None):
        t = Token(name, self, lexer.y, lexer.x, s, None, parent, matchd, link)
        t.color = lexer.get_color(t)
        lexer.x += len(s)
        if lexer.x > len(lexer.lines[lexer.y]):
            lexer.x = 0
            lexer.y += 1
        return t
    def get_line(self, lexer, y=None):
        if y is None:
            return lexer.lines[lexer.y] + '\n'
        else:
            return lexer.lines[y] + '\n'

class PatternRule(Rule):
    def __init__(self, name, pattern):
        Rule.__init__(self, name)
        self.pattern = pattern
        self.re = re.compile(self.pattern, self.reflags)
    def match(self, lexer, parent):
        return self.re.match(self.get_line(lexer), lexer.x)
    def lex(self, lexer, parent, m):
        if m:
            yield self.make_token(lexer, m.group(0), self.name, parent, m.groupdict())
        raise StopIteration
class NocasePatternRule(PatternRule):
    reflags = re.IGNORECASE

class PatternMatchRule(PatternRule):
    reflags = 0
    def __init__(self, name, pattern, *names):
        PatternRule.__init__(self, name, pattern)
        self.names = names
    def match(self, lexer, parent):
        return self.re.match(self.get_line(lexer), lexer.x)
    def lex(self, lexer, parent, m):
        if not m:
            raise StopIteration
        for group, name in zip(m.groups(), self.names):
            if not group:
                continue
            yield self.make_token(lexer, group, name, parent, m.groupdict())
        raise StopIteration
class NocasePatternMatchRule(PatternMatchRule):
    reflags = re.IGNORECASE

class FakeWindow(object):
    def __init__(self, app, b):
        self.application = app
        self.buffer = b
class OverrideError(Exception):
    pass
class OverridePatternRule(PatternRule):
    def lex(self, lexer, parent, m):
        if m:
            d = m.groupdict()
            if lexer.action == 'lex':
                a = lexer.mode.window.application
                try:
                    b = lexer.mode.window.buffer
                    modecls = a.modes[d['mode']]
                    mode = modecls(FakeWindow(lexer.mode.window.application, b))
                    if hasattr(mode, 'grammar') and hasattr(mode, 'colors'):
                        lexer.mode.gstack['%s' % d['token']] = mode
                    else:
                        raise OverrideError("argh: %r" % mode)
                except (KeyError, AttributeError, OverrideError):
                    # uncomment raise to fix dynamic highlighting
                    #raise
                    pass
            yield self.make_token(lexer, m.group(0), self.name, parent, d)
        raise StopIteration

class ContextPatternRule(PatternRule):
    def __init__(self, name, pattern, fallback):
        Rule.__init__(self, name)
        self.pattern     = pattern
        self.fallback_re = re.compile(fallback, self.reflags)
    def match(self, lexer, parent):
        try:
            r = re.compile(self.pattern % parent.matchd)
        except KeyError:
            r = self.fallback_re
        return r.match(self.get_line(lexer), lexer.x)
class NocaseContextPatternRule(ContextPatternRule):
    reflags = re.IGNORECASE

class PatternGroupRule(PatternRule):
    def __init__(self, name, *args):
        assert args and len(args) % 2 == 0
        i = 0
        pairs = []
        while i < len(args):
            tokname, pattern = args[i], args[i+1]
            pairs.append((tokname, re.compile(pattern, self.reflags)))
            i += 2
        Rule.__init__(self, name)
        self.pairs = tuple(pairs)
    def match(self, lexer, parent):
        (x, y) = (lexer.x, lexer.y)
        matches = []
        for (tokname, tokre) in self.pairs:
            if y >= len(lexer.lines):
                return []
            line = self.get_line(lexer, y)
            m    = tokre.match(line, x)
            if m:
                x += len(m.group(0))
                if x >= len(line):
                    x = 0
                    y += 1
                matches.append((tokname, m))
            else:
                return []
        assert len(matches) == len(self.pairs)
        return matches
    def lex(self, lexer, parent, matches):
        if matches:
            for (tokname, m) in matches:
                yield self.make_token(lexer, m.group(0), tokname, parent, m.groupdict())
        raise StopIteration
class NocasePatternGroupRule(PatternGroupRule):
    reflags = re.IGNORECASE

class RegionRule(Rule):
    def __init__(self, name, *args):
        Rule.__init__(self, name)
        assert len(args) > 1
        args = list(args)
        self.pairs    = []
        self.start_re = re.compile(args.pop(0), self.reflags)
        while len(args) > 1:
            grammar = args.pop(0)
            pattern = args.pop(0)
            #assert hasattr(grammar, 'rules'), repr(grammar)
            assert type(pattern) == type(''), repr(pattern)
            self.pairs.append((grammar, pattern))
        if len(args) == 1:
            self.pairs.append((grammar, None))

    def match(self, lexer, parent):
        return self.start_re.match(self.get_line(lexer), lexer.x)
    def lex(self, lexer, parent, m):
        assert m
        # ok, so since we had a match, we need to create our start token, who
        # will be the ancestor to all other tokens matched in this region
        matchd = m.groupdict()
        for (key, val) in matchd.iteritems():
            matchd[key] = escape(val)
        parent = self.make_token(lexer, m.group(0), 'start', parent, matchd, 'start')
        yield parent

        # now we will loop over the different pairs of grammars/stop-patterns in
        # this region, and return the resulting token; we start at 0
        for tok in self._lex_loop(lexer, [parent], matchd, 0):
            yield tok
        raise StopIteration
    def resume(self, lexer, toresume):
        assert toresume, "can't resume without tokens to resume!"
        # ok, so we need to figure out in which of the grammars of our region
        # we are resuming. to do this we calculate i, a position in our list
        # of grammar/stop-pattern pairs
        if toresume[0].link == 'start':
            i = 0
        else:
            m = regex.middle_token_name.match(toresume[0].link)
            assert m
            i = int(m.group(1)) + 1
            assert i > 0 and i < len(self.pairs)

        # now we will loop over the different pairs of grammars/stop-patterns in
        # this region, and return the resulting token; we start at i
        matchd = toresume[0].matchd
        if not matchd and toresume[0].parent:
            p = toresume[0].parent
            while not p.matchd and p.parent:
                p = p.parent
            matchd = p.matchd
        #for tok in self._lex_loop(lexer, toresume, toresume[0].matchd, i):
        for tok in self._lex_loop(lexer, toresume, matchd, i):
            yield tok
        raise StopIteration
    def _lex_loop(self, lexer, toresume, matchd, i):
        # we need to loop over our grammar/stop-pattern pairs
        while i < len(self.pairs):
            # for each one, we will compile our stop-regex, and figure out the
            # name of the stop token to be created if this stop-regex matches.
            fqname = toresume[0].fqname()
            rname = toresume[0].rule.name
            p = Point(toresume[0].x, toresume[0].y)
            if fqname in lexer.mode.ghist and p in lexer.mode.ghist[fqname]:
                mode = lexer.mode.ghist[fqname][p]
                grammar = mode.grammar
            elif fqname in lexer.mode.gstack:
                mode = lexer.mode.gstack[fqname]
                grammar = mode.grammar
                lexer.mode.ghist.setdefault(fqname, {})
                lexer.mode.ghist[fqname][p] = mode
                del lexer.mode.gstack[fqname]
            elif rname in lexer.mode.ghist and p in lexer.mode.ghist[rname]:
                mode = lexer.mode.ghist[rname][p]
                grammar = mode.grammar
            elif rname in lexer.mode.gstack:
                mode = lexer.mode.gstack[rname]
                grammar = mode.grammar
                lexer.mode.ghist.setdefault(rname, {})
                lexer.mode.ghist[rname][p] = mode
                if i == len(self.pairs) - 1:
                    del lexer.mode.gstack[rname]
            else:
                mode = lexer.mode
                grammar = self.pairs[i][0]
                if grammar is None:
                    grammar = lexer.grammar
            lexer.mstack.append(mode)

            if self.pairs[i][1]:
                try:
                    stopre = re.compile(self.pairs[i][1] % matchd, self.reflags)
                except:
                    raise Exception, "%r\n%r\n%r" % (self.pairs[i][1], matchd, self.reflags)
            else:
                stopre = None
            if i == len(self.pairs) - 1:
                tokname = 'end'
            else:
                tokname = 'middle%d' % i

            # ok, so now loop over all the tokens in the current grammar, until
            # the stop-token (if any) is found, and return each result as we get
            # it.
            tok = None
            for tok in self._lex(lexer, toresume, tokname, stopre, grammar):
                yield tok

            # ok, so now either we found the stop-token, and have a new parent
            # for future tokens (if any), or we are done.
            if tok is not None and tok.name == tokname:
                toresume = [tok]
                matchd.update(tok.matchd)
            else:
                raise StopIteration
            # this should have already gotten done by _lex
            #lexer.mstack.pop(-1)
            i += 1
        # assuming we make it through all our grammars, and find the end-token,
        # then we need to signal that we are done.
        raise StopIteration
    def _lex(self, lexer, toresume, stopname, stopre, grammar):
        assert toresume
        parent  = toresume[0]
        reenter = len(toresume) > 1
        null_t  = None

        # ok, so there are only two way we want to exit this loop: either we
        # lex the whole document, or we encounter the stop-token.
        done = False
        while not done and lexer.y < len(lexer.lines):
            line  = self.get_line(lexer)
            old_y = lexer.y
            while not done and lexer.y == old_y and lexer.x < len(line):
                # ok, so reenter gets priority, since the current input might be
                # intended for nested grammar. so handle it here
                if reenter:
                    reenter = False
                    for t in toresume[1].rule.resume(lexer, toresume[1:]):
                        yield t
                # since we might have changed our x/y coordinates, we need to
                # do some checks here, and maybe finish or change our coordintes
                if lexer.y >= len(lexer.lines):
                    raise StopIteration
                elif lexer.x >= len(line):
                    lexer.y += 1
                    lexer.x = 0

                # ok, so get the *now* current line
                line = self.get_line(lexer)

                if stopre:
                    # if we are looking for a stop-token, do that check now
                    m = stopre.match(line, lexer.x)
                    if m:
                        if null_t:
                            # if we have a null token waiting, return it first.
                            yield null_t
                            null_t = None
                        # ok, now return the stop-token, and signal that we are
                        # done and no more input is to be consumed
                        lexer.mstack.pop(-1)
                        yield self.make_token(lexer, m.group(0), stopname,
                                              parent, m.groupdict(), stopname)
                        done = True
                        break

                m = None
                # start checking our rules to see if we can match the input
                for rule in grammar.rules:
                    m = rule.match(lexer, parent)
                    if m:
                        # ok great, we have a match
                        if null_t:
                            # if we have a null token waiting, return it first.
                            yield null_t
                            null_t = None
                        # ok, now for every token this rules has created, we
                        # return them, one by one.
                        for t in rule.lex(lexer, parent, m):
                            yield t
                        break

                if not m:
                    # we didn't find a match on a rule, so add this character to
                    # the current null token (creating a new one if necessary);
                    if not null_t:
                        null_t = Token('null', None, lexer.y, lexer.x, '', None, parent)
                        null_t.color = lexer.get_color(null_t)
                    null_t.add_to_string(line[lexer.x])
                    lexer.x += 1

            # ok, we are at the end of a line of input. so, if we have a null
            # token waiting, now is the time to return it
            if null_t:
                yield null_t
                null_t = None
            if not done and old_y == lexer.y:
                lexer.y += 1
                lexer.x = 0
        raise StopIteration
class NocaseRegionRule(RegionRule):
    reflags = re.IGNORECASE

class OverrideRegionRule(RegionRule):
    def lex(self, lexer, parent, m):
        assert m
        d = m.groupdict()
        if 'grammar' in d:
            a = lexer.mode.window.application
            name = d['grammar'].lower()
            if name in a.modes:
                modecls = a.modes[name]
                b       = lexer.mode.window.buffer
                fw      = FakeWindow(lexer.mode.window.application, b)
                mode    = modecls(fw)
                assert hasattr(mode, 'grammar') and hasattr(mode, 'colors')
    
                if parent is None:
                    path = self.name
                else:
                    path = parent.domain() + '.' + self.name
                lexer.mode.gstack[path] = mode
        return RegionRule.lex(self, lexer, parent, m)

class Grammar(object):
    rules = []
grammar = Grammar()

class Lexer(object):
    def __init__(self, mode, grammar):
        self.mode    = mode
        self.mstack  = []
        self.grammar = grammar
        self.y       = 0
        self.x       = 0
        self.lines   = None
        assert self.grammar.rules
    def get_line(self):
        return self.lines[self.y] + '\n'
    def lex_all(self, lines):
        lextokens = [[] for l in lines]
        for t in lexer.lex(lines):
            lextokens[t.y].append(t)
        return lextokens
        
    def lex(self, lines, y=0, x=0):
        self.action      = 'lex'
        self.y           = y
        self.x           = x
        self.lines       = lines
        self.mstack      = []
        self.mode.ghist  = {}
        self.mode.gstack = {}
        for t in self._lex():
            yield t
        del self.action
        raise StopIteration
    def resume(self, lines, y, x, token):
        self.action = 'resume'
        self.y      = y
        self.x      = x
        self.lines  = lines
        self.mstack = []
        toresume    = token.parents()

        i = 1
        while i < len(toresume):
            if toresume[i].link and toresume[i].link != 'start':
                del toresume[i-1]
            else:
                i += 1

        if toresume:
            for t in toresume[0].rule.resume(self, toresume):
                #t._debug = True
                yield t
        for t in self._lex():
            #t._debug = True
            yield t
        del self.action
        raise StopIteration

    def _lex(self):
        parent = None
        while self.y < len(self.lines):
            null_t = None
            line = self.get_line()
            while self.x < len(line):
                m = None
                for rule in self.grammar.rules:
                    m = rule.match(self, parent)
                    if m:
                        if null_t:
                            yield null_t
                            null_t = None
                        for t in rule.lex(self, parent, m):
                            yield t
                        break

                if self.y >= len(self.lines):
                    break
                line = self.get_line()
                if not m:
                    if self.x < len(line):
                        if null_t is None:
                            null_t = Token('null', None, self.y, self.x, '', None, parent)
                            null_t.color = self.get_color(null_t)
                        null_t.add_to_string(line[self.x])
                        self.x += 1
            if null_t:
                yield null_t
            self.y += 1
            self.x = 0
        raise StopIteration

    def get_color(self, token):
        app = self.mode.window.application
        name = '.'.join(token.fqlist())

        if name in app.token_colors:
            return app.token_colors[name]
        elif name in app.cached_colors:
            return app.cached_colors[name]

        name2 = name
        while name2:
            try:
                i = name2.index('.')
                name2 = name2[i + 1:]
            except ValueError:
                break
            if name2 in app.token_colors:
                c = app.token_colors[name2]
                app.cached_colors[name] = c
                return c

        app.cached_colors[name] = app.config['default_color']
        return app.config['default_color']