import os, subprocess, re, tempfile
from subprocess import Popen, PIPE, STDOUT

import buffer, buffer.console
import completer, default, dirutil, regex, util, window
import mode.mini
from point import Point

from method import Method, Argument

class DumpContext(Method):
    '''debug context'''
    def _execute(self, w, **vargs):
        lines = []
        if w.mode.context:
            for i in range(0, len(w.mode.context.namelines)):
                lines.append("LINE %d: %r" % (i + 1, repr(w.mode.context.namelines[i])))
        else:
            lines.append("no context")
        output = "\n".join(lines)
        w.application.data_buffer("context-dump", output, switch_to=True)

class DumpRegions(Method):
    '''debug region highlighting'''
    def _execute(self, w, **vargs):
        lines = []
        for hr in w.application.highlighted_ranges:
            (w, p1, p2, fg, bg) = hr
            lines.append("%r %s %s" % (w, p1, p2))
        output = "\n".join(lines)
        w.application.data_buffer("region-dump", output, switch_to=True)

class DumpMarkers(Method):
    '''Dump all tab markers (tab debugging)'''
    def _execute(self, w, **vargs):
        lines = []
        if w.mode.tabber:
            keys = list(w.mode.tabber.lines.keys())
            keys.sort()
            for i in keys:
                line = w.mode.tabber.lines[i]
                lines.append("LINE %d: %r" % (i + 1, line))
                lines.append("    %s" % repr(w.mode.tabber.record[i]))
        else:
            lines.append("no tokens")
        output = "\n".join(lines)
        w.application.data_buffer("marker-dump", output, switch_to=True)

class DumpTokens(Method):
    '''Dump all lexical tokens (syntax highlighting debugging)'''
    def _execute(self, w, **vargs):
        lines = []
        if w.mode.name in w.buffer.highlights:
            tokens = w.buffer.highlights[w.mode.name].tokens
            for i in range(0, len(tokens)):
                lines.append("LINE %d" % (i + 1))
                group = tokens[i]
                for token in group:
                    fqname = token.fqname()
                    p1 = Point(token.x, token.y)
                    if token.parent is None:
                        pcoord = ''
                    else:
                        pcoord = '[%d, %d]' % (token.parent.x, token.parent.y)
                    if fqname in w.mode.ghist and p1 in w.mode.ghist[fqname]:
                        g = '[' + w.mode.ghist[fqname][p1].name() + ']'
                    else:
                        g = ''
                    fields = (str(p1), pcoord, token.fqname(), g, token.string)
                    lines.append('    %-10s %-10s %-20s %-10s %r' % fields)
        else:
            lines.append("no tokens")
        output = "\n".join(lines)
        w.application.data_buffer("token-dump", output, switch_to=True)

class DumpAggregateTokenData(Method):
    '''Dump all lexical tokens into an aggregated format'''
    def _execute(self, w, **vargs):
        lines = []
        if w.mode.name in w.buffer.highlights:
            tokens = w.buffer.highlights[w.mode.name].tokens
            for group in tokens:
                for token in group:
                    s1 = token.name
                    s2 = token.fqname()
                    n = len(token.string)
                    lines.append('%-s %-s %3d %-s' % (s1, s2, n, token.string))
        output = "\n".join(lines)
        w.application.data_buffer("token-dump", output, switch_to=True)

class GetToken(Method):
    '''View type and data of the "current" token'''
    def _execute(self, w, **vargs):
        if w.mode.name not in w.buffer.highlights:
            w.set_error('No Token Data')
            return

        token = w.get_token()
        if token is None:
            w.set_error('No Token')
        else:
            # HACK: fix up our internal tab representation
            s = regex.internal_tab.sub('\t', token.string)
            w.set_error('Token: %s (%s)' % (repr(s)[1:], token.fqname()))

class TokenComplete(Method):
    '''Complete token names based on other tokens in the buffer'''
    _mini_prompt = 'Token Complete'
    _tabber      = completer.TokenCompleter(None)
    class Dummy(object): pass
    def _complete(self, s):
        dw = self.Dummy()
        dw.buffer = self.Dummy()
        dw.buffer.method = self
        return self._tabber.tab_string(s, dw)
    def _execute(self, w, **vargs):
        self.old_window = w
        t = w.get_token2()
        if t is None:
            w.set_error("No token to complete!")
            return
        elif regex.reserved_token_names.match(t.name):
            w.set_error("Will not complete reserved token")
            return

        dw               = self.Dummy()
        dw.buffer        = self.Dummy()
        dw.buffer.method = self
        (s2, exists, complete) = self._tabber.tab_string(t.string, dw)

        p1 = Point(t.x, t.y)
        p2 = Point(t.end_x(), t.y)
        def callback(s):
            w.buffer.delete(p1, p2)
            w.insert_string(p1, s)
            w.application.close_mini_buffer()

        if exists and complete:
            w.set_error("Unique completion: %r" % s2)
            callback(s2)
            return
        else:
            w.application.open_mini_buffer("%s: " % self._mini_prompt, callback,
                                           method=self, tabber=self._tabber,
                                           startvalue=s2)

class OpenConsole(Method):
    '''Evaluate python expressions (for advanced use and debugging only)'''
    def execute(self, w, **vargs):
        a = w.application
        if not a.has_buffer_name('*Console*'):
            b = buffer.console.ConsoleBuffer()
            a.add_buffer(b)
            window.Window(b, a)
        b = a.bufferlist.get_buffer_by_name('*Console*')
        if a.window().buffer is not b:
            a.switch_buffer(b)
        f = lambda x: None
        w.application.open_mini_buffer('>>> ', f, self, None, 'consolemini')