import commands, os.path, re, string, sys, traceback import color, completer, context, default, mode, method, regex, tab, method.introspect from point import Point from render import RenderString from lex import Grammar, PatternRule, RegionRule, OverridePatternRule from parse import Any, And, Or, Optional, Name, Match, Matchs from method import Method try: import bike has_bike = True except ImportError: has_bike = False class StringGrammar1(Grammar): rules = [ PatternRule(r'octal', r'\\[0-7]{3}'), PatternRule(r'hex', r'\\x[0-9a-fA-F]{2}'), PatternRule(r'escaped', r'\\.'), PatternRule(r'data', r"[^\\']+"), ] class StringGrammar2(Grammar): rules = [ PatternRule(r'octal', r'\\[0-7]{3}'), PatternRule(r'hex', r'\\x[0-9a-fA-F]{2}'), PatternRule(r'escaped', r'\\.'), PatternRule(r'data', r'[^\\"]+'), ] class StringGrammar3(Grammar): rules = [ PatternRule(r'octal', r'\\[0-7]{3}'), PatternRule(r'hex', r'\\x[0-9a-fA-F]{2}'), PatternRule(r'escaped', r'\\.'), PatternRule(r'data', r"(?:[^\\']|'(?!')|''(?!'))+"), ] class StringGrammar4(Grammar): rules = [ PatternRule(r'octal', r'\\[0-7]{3}'), PatternRule(r'hex', r'\\x[0-9a-fA-F]{2}'), PatternRule(r'escaped', r'\\.'), PatternRule(r'data', r'(?:[^\\"]|"(?!")|""(?!"))+'), ] class PythonGrammar(Grammar): rules = [ PatternRule(r'functionname', r'(?<=def )[a-zA-Z_][a-zA-Z0-9_]*'), PatternRule(r'classname', r'(?<=class )[a-zA-Z_][a-zA-Z0-9_]*'), PatternRule(r'python_reserved', r'(?:True|None|False|Exception|self)(?![a-zA-Z0-9_])'), PatternRule(r'python_keyword', r'(?:yield|while|try|return|raise|print|pass|or|not|lambda|is|in|import|if|global|from|for|finally|exec|except|else|elif|del|def|continue|class|break|assert|as|and)(?![a-zA-Z0-9_])'), PatternRule(r"python_builtin", r'(?>=|<<=|\*\*='), PatternRule(r"integer", r"(?|<<|<=|<|-|>>|>=|>|\*\*|&|\*|\||/|\^|==|//|~|!=|%"), OverridePatternRule(r'comment', r'#@@:(?P[.a-zA-Z0-9_]+):(?P[.a-zA-Z0-9_]+) *$'), PatternRule(r'comment', r'#.*$'), PatternRule(r'continuation', r'\\\n$'), PatternRule(r'spaces', r' +'), PatternRule(r'eol', r'\n$'), ] class PythonTabber(tab.StackTabber): # NOTE: yield might initially seem like an endlevel name, but it's not one. # NOTE: return should be an endlevel name but for now it can't b endlevel_names = ('pass', 'raise', 'break', 'continue') startlevel_names = ('if', 'try', 'class', 'def', 'for', 'while', 'try') def __init__(self, m): tab.StackTabber.__init__(self, m) self.base_level = 0 def is_base(self, y): if y == 0: # we always know that line 0 is indented at the 0 level return True tokens = self.get_tokens(y) t0 = tokens[0] if t0.name == 'python_keyword' and t0.string in self.startlevel_names: # if a line has no whitespace and beings with something like # 'while','class','def','if',etc. then we can start at it return True else: # otherwise, we can't be sure that its level is correct return False def get_level(self, y): self._calc_level(y) return self.lines.get(y) def _calc_level(self, y): # ok, so first remember where we are going, and find our starting point target = y y = max(0, y - 1) while not self.is_base(y) and y > 0: y -= 1 # ok, so clear out our stack and then loop over each line self.popped = False self.markers = [] while y <= target: self.continued = False self.last_popped = self.popped self.popped = False tokens = self.get_tokens(y) currlvl = self.get_curr_level() # if we were continuing, let's pop that previous continuation token # and note that we're continuing if self.markers and self.markers[-1].name == 'cont': self.continued = True self._pop() # if we haven't reached the target-line yet, we can detect how many # levels of unindention, if any, the user chose on previous lines if y < target and len(tokens) > 2: if self.token_is_space(y, 0): l = len(tokens[0].string) else: l = 0 while currlvl > l: self._pop() currlvl = self.get_curr_level() self.popped = True # ok, having done all that, we can now process each token on the line for i in range(0, len(tokens)): currlvl = self._handle_token(currlvl, y, i) # so let's store the level for this line, as well as some debugging self.lines[y] = currlvl self.record[y] = tuple(self.markers) y += 1 def _handle_close_token(self, currlvl, y, i): try: return tab.StackTabber._handle_close_token(self, currlvl, y, i) except: return currlvl def _handle_other_token(self, currlvl, y, i): w = self.mode.tabwidth token = self.get_token(y, i) fqname = token.fqname() if fqname == 'continuation': # we need to pop the indentation level over, unless last line was # also a continued line if self.continued: self._opt_append('cont', currlvl, y) else: self._opt_append('cont', currlvl + w, y) elif fqname == 'string.start': # while inside of a string, there is no indention leve self._opt_append('string', None, y) elif fqname == 'string.end': # since we're done with the string, resume our indentation level self._opt_pop('string') elif fqname == 'delimiter': # we only really care about a colon as part of a one-line statement, # i.e. "while ok: foo()" or "if True: print 3" if token.string == ':': if self.markers and self.markers[-1].name in ('[', '{', '('): pass elif self.is_rightmost_token(y, i): pass else: self._pop() elif fqname == 'python_keyword': if token.string in self.endlevel_names and self.is_leftmost_token(y, i): # we know we'll unindent at least once self._pop() self.popped = True elif token.string in self.startlevel_names and self.is_leftmost_token(y, i): # we know we will indent exactly once self._append(token.string, currlvl + w, y) elif token.string in ('elif', 'else') and self.is_leftmost_token(y, i): # we know we'll unindent at least to the first if/elif if not self.popped and not self.last_popped and self._peek_until('if', 'elif'): self._pop_until('if', 'elif') currlvl = self.get_curr_level() self._append(token.string, currlvl + w, y) elif token.string == 'except' and self.is_leftmost_token(y, i): # we know we'll unindent at least to the first try if not self.popped and not self.last_popped: self._pop_until('try') currlvl = self.get_curr_level() self._append(token.string, currlvl + w, y) elif token.string == 'finally' and self.is_leftmost_token(y, i): # we know we'll unindent at least to the first try/except if not self.popped and not self.last_popped: self._pop_until('try', 'except') currlvl = self.get_curr_level() self._append(token.string, currlvl + w, y) return currlvl class PythonCheckSyntax(method.Method): '''Check the syntax of the current python file''' def _execute(self, w, **vargs): pythonlib = w.application.config.get('python.lib') if pythonlib: sys.path.insert(0, pythonlib) source = w.buffer.make_string() try: code = compile(source, w.buffer.path, 'exec') w.set_error("Syntax OK") except Exception, e: output = traceback.format_exc() w.application.data_buffer("*PythonSyntax*", output, switch_to=True, modename='error') del sys.path[0] class PythonDictCleanup(method.Method): '''Align assignment blocks and literal dictionaries''' def _execute(self, w, **vargs): cursor = w.logical_cursor() b = w.buffer # so this is where we will store the groups that we find groups_by_line = {} # the regex we will try regexes = [regex.python_dict_cleanup, regex.python_assign_cleanup] # if we aren't in a hash, inform the user and exit line = b.lines[cursor.y] myregex = None for r in regexes: if r.match(line): myregex = r if myregex is None: raise Exception, "Not a python dict line" groups_by_line[cursor.y] = myregex.match(line).groups() # find the beginning of this hash block start = 0 i = cursor.y - 1 while i >= 0: line = b.lines[i] m = myregex.match(line) if not m: start = i + 1 break else: groups_by_line[i] = m.groups() i -= 1 # find the end of this hash block end = len(b.lines) - 1 i = cursor.y + 1 while i < len(b.lines): line = b.lines[i] m = myregex.match(line) if not m: end = i - 1 break else: groups_by_line[i] = m.groups() i += 1 # assume that the least indented line is correct indent_w = min([len(groups_by_line[k][0]) for k in groups_by_line]) # find the longest hash key to base all the other padding on key_w = max([len(groups_by_line[k][1]) for k in groups_by_line]) # for each line, format it correctly keys = groups_by_line.keys() keys.sort() data = '' for i in keys: indent_pad = ' ' * indent_w key = groups_by_line[i][1] sep = groups_by_line[i][3] value = groups_by_line[i][5] key_pad = ' ' * (key_w - len(key)) if sep == '=': data += indent_pad + key + key_pad + ' ' + sep + ' ' + value + '\n' else: data += indent_pad + key + sep + ' ' + key_pad + value + '\n' # remove the old text and add the new start_p = Point(0, start) if end + 1 < len(w.buffer.lines): end_p = Point(0, end + 1) else: end_p = Point(len(w.buffer.lines[-1]), len(w.buffer.lines) - 1) w.delete(start_p, end_p) w.insert_string(start_p, data) class PythonInsertTripleSquotes(method.Method): '''Insert a triple-quoted string using single-quotes''' _q = "'''" def _execute(self, w, **vargs): w.insert_string_at_cursor('%s%s' % (self._q, self._q)) for i in range(0, 3): w.backward() class PythonInsertTripleDquotes(PythonInsertTripleSquotes): '''Insert a triple-quoted string using double-quotes''' _q = '"""' class PythonInitNames(method.Method): '''Jump to a function defined in this module''' def _execute(self, w, **vargs): w.mode.context.build_name_map() w.application.set_error("Initialized name maps") class PythonSemanticComplete(method.introspect.TokenComplete): _mini_prompt = 'Semantic Complete' def _min_completion(self, w, t): a = w.application a.methods['ipython-path-start'].execute(w, switch=False) name = buffer.IperlBuffer.create_name(w.buffer) b = a.get_buffer_by_name(name) line = w.buffer.lines[t.y] (x1, x2) = (t.x, t.end_x()) candidates = [t.string + s for s in b.completions(line[x1:x2])] minlen = None for candidate in candidates: if minlen is None: minlen = len(candidate) else: minlen = min(minlen, len(candidate)) return self._prune_candidates(t, minlen, candidates) class PythonGotoName(method.Method): '''Jump to a class or function defined in this module''' args = [method.Argument("name", type(""), "pythonname", "Goto Name: ")] title = 'Name' def _get_dict(self, w): return w.mode.context.get_names() def _execute(self, w, **vargs): name = vargs['name'] d = self._get_dict(w) if name in d: w.goto(Point(0, d[name])) else: w.application.set_error("%r %r was not found" % (title, name)) class PythonGotoFunction(PythonGotoName): '''Jump to a function defined in this module''' args = [method.Argument("name", type(""), "pythonfunction", "Goto Function: ")] title = 'Function' def _get_dict(self, w): return w.mode.context.get_functions() class PythonGotoClass(method.Method): '''Jump to a class defined in this module''' args = [method.Argument("name", type(""), "pythonclass", "Goto Class: ")] title = 'Class' def _get_dict(self, w): return w.mode.context.get_classes() class PythonListNames(method.Method): '''Show the user all functions defined in this module''' def _execute(self, w, **vargs): names = w.mode.context.get_names() output = '\n'.join(sorted(names)) + "\n" w.application.data_buffer("*Python-List-Names*", output, switch_to=True) class PythonBrmFindReferences(method.Method): def _execute(self, w, **vargs): if w.mode.brm is None: w.set_error('bicycle repairman not installed') return path = w.buffer.path cursor = w.logical_cursor() y, x = cursor.yx() refs = w.mode.brm.findReferencesByCoordinates(path, y, x) lines = [] n = 0 for r in refs: f, n, c = r.filename, r.lineno, r.confidence s = '%s:%d: %3d%% confidence' % (f, n, c) lines.append(s) n += 1 if n == 0: w.set_error('no references found') return data = '\n'.join(lines) w.application.data_buffer("*References*", data, switch_to=True) if n == 1: w.set_error('1 reference found') else: w.set_error('%d references found' % n) class PythonNameCompleter(completer.Completer): def _get_dict(self, w): return w.buffer.method.old_window.mode.context.get_names() def get_candidates(self, s, w=None): return [n for n in self._get_dict(w) if n.startswith(s)] class PythonFunctionCompleter(PythonNameCompleter): def _get_dict(self, w): return w.buffer.method.old_window.mode.context.get_functions() class PythonClassCompleter(completer.Completer): def _get_dict(self, w): return w.buffer.method.old_window.mode.context.get_classes() CLASS_MATCH = And(Optional(Name('spaces')), Matchs('python_keyword', ('public', 'protected', 'private')), Name('spaces'), Match('keyword', 'class'), Name('spaces'), Name('identifier')) CLASS_OFFSET = 1 class PythonContext(context.Context): empty_match = And(Optional(Name('spaces')), Name('eol')) class_match = And(Optional(Name('spaces')), Match('python_keyword', 'class'), Name('spaces'), Name('classname')) func_match = And(Optional(Name('spaces')), Match('python_keyword', 'def'), Name('spaces'), Name('functionname')) def __init__(self, mode): self.mode = mode self.names = None self.namelines = None self.classes = None self.functions = None # new object methods def get_functions(self): if self.functions is None: self.build_name_map() return self.functions def get_classes(self): if self.classes is None: self.build_name_map() return self.classes def get_function_list(self): return self._ordered_dict(self.get_functions()) def get_class_list(self): return self._ordered_dict(self.get_classes()) # overridden object methods def _init_name_map(self): self.names = {} self.classes = {} self.functions = {} self.namelines = [(None, None)] * len(self.mode.window.buffer.lines) def _del_name(self, y, name): if name: if name in self.names: del self.names[name] if name in self.classes: del self.classes[name] if name in self.functions: del self.functions[name] self.namelines[y] = (None, None) def _build_name_map(self, y1, y2, last, curr, stack): blen = len(self.mode.window.buffer.lines) highlights = self.mode.window.get_highlighter() i = y1 while i < y2: tokens = highlights.tokens[i] g = highlights.tokens[i] if self.empty_match.match(tokens): #if (len(g) == 1 and g[0].name == 'eol' or # len(g) == 2 and g[0].name == 'spaces' and g[1].name == 'eol'): if last is None: last = i i += 1 #if i == y2 and y2 < blen: # y2 += 1 continue if g[0].name == 'spaces': j, lvl = 1, len(g[0].string) else: j, lvl = 0, 0 while stack and lvl <= stack[-1][0]: stack.pop(-1) if last is not None: curr = '.'.join([x[1] for x in stack]) if curr: for k in range(last, i): self.namelines[k] = (curr, None) last = None if len(g[j:]) > 3: d, found = None, False if g[j].name == 'python_keyword' and g[j].string == 'class': d, found = self.classes, True elif g[j].name == 'python_keyword' and g[j].string == 'def': d, found = self.functions, True if found: stack.append([lvl, g[j+2].string]) curr = '.'.join([x[1] for x in stack]) d[curr] = i self.names[curr] = i else: curr = '.'.join([x[1] for x in stack]) if i == y2 - 1 and curr != self.namelines[i][0] and y2 < blen: y2 += 1 if curr: self.namelines[i] = (curr, None) i += 1 if last is not None and y2 < len(self.namelines): if self.namelines[y2] and self.namelines[y2][0]: n = len(self.namelines[y2][0].split('.')) curr = '.'.join([x[1] for x in stack[:n]]) if curr: for k in range(last, y2): self.namelines[k] = (curr, None) class Python(mode.Fundamental): modename = 'Python' extensions = ['.py'] detection = ['python'] tabbercls = PythonTabber grammar = PythonGrammar opentokens = ('delimiter',) opentags = {'(': ')', '[': ']', '{': '}'} closetokens = ('delimiter',) closetags = {')': '(', ']': '[', '}': '{'} commentc = '#' colors = { 'python_keyword': ('cyan', 'default', 'bold'), 'python_reserved': ('magenta', 'default', 'bold'), 'python_builtin': ('cyan', 'default', 'bold'), 'functionname': ('blue', 'default', 'bold'), 'classname': ('green', 'default', 'bold'), 'rawstring.start': ('green', 'default', 'bold'), 'rawstring.data': ('green', 'default', 'bold'), 'rawstring.null': ('green', 'default', 'bold'), 'rawstring.escaped': ('magenta', 'default', 'bold'), 'rawstring.end': ('green', 'default', 'bold'), 'system_identifier': ('cyan', 'default', 'bold'), } config = { 'python.lib': '.', } lconfig = { 'ignore-suffix': ['.pyc', '.pyo'], } actions = [PythonInitNames, PythonListNames, PythonGotoName, PythonGotoFunction, PythonGotoClass, PythonCheckSyntax, PythonDictCleanup, PythonSemanticComplete, PythonBrmFindReferences, PythonInsertTripleSquotes, PythonInsertTripleDquotes] completers = { "pythonname": PythonNameCompleter(None), "pythonfunction": PythonFunctionCompleter(None), "pythonclass": PythonClassCompleter(None), } format = "%(flag)s %(bname)-18s (%(mname)s) %(indent)s %(cursor)s/%(mark)s %(perc)s [%(name)s]" header_size = 3 def get_status_names(self): names = mode.Fundamental.get_status_names(self) c = self.window.logical_cursor() names['name'] = self.context.get_line_name(c.y) #names['first'] = self.window.first.xy() return names # xyz def get_header(self): fg, bg = "default", "red" if self.tabber is None: s = "Header support is not available for this mode" hs = [RenderString(s=s, attrs=color.build(fg, bg))] while len(hs) < 3: hs.insert(0, RenderString(s='', attrs=color.build(fg, bg))) return hs w = self.window y = self.window.first.y if self.window.first.x > 0: y += 1 lvl = self.tabber.get_level(y) markers = self.tabber.record[y] if w.buffer.is_whitespace(y): ws = None else: ws = w.buffer.count_leading_whitespace(y) hs = [] i = len(markers) - 1 while i >= 0 and len(hs) < 3: marker = markers[i] i -= 1 if marker.y == y: continue if ws and marker.level > ws: continue s = w.buffer.lines[marker.y][:w.width - 1] hs.insert(0, RenderString(s=s, attrs=color.build(fg, bg))) while len(hs) < 3: hs.insert(0, RenderString(s='', attrs=color.build(fg, bg))) return hs def __init__(self, w): mode.Fundamental.__init__(self, w) self.add_bindings('close-paren', (')',)) self.add_bindings('close-brace', ('}',)) self.add_bindings('close-bracket', (']',)) self.add_bindings('python-goto-name', ('C-c M-g',)) self.add_bindings('python-goto-function', ('C-c M-f',)) self.add_bindings('python-goto-class', ('C-c M-c',)) self.add_bindings('python-check-syntax', ('C-c s',)) self.add_bindings('python-dict-cleanup', ('C-c h',)) self.add_bindings('python-insert-triple-squotes', ('C-c M-\'',)) self.add_bindings('python-insert-triple-dquotes', ('C-c M-"',)) self.add_bindings('python-semantic-complete', ('C-c TAB',)) self.context = PythonContext(self) # bicycle repairman! if has_bike: self.brm = bike.init() else: self.brm = None install = Python.install