import commands, os.path, sets, string import color, completer, default, mode2, method, regex, tab2 import ctag_python from point2 import Point from lex3 import Grammar, PatternRule, RegionRule class StringGrammar(Grammar): rules = [ PatternRule(r'octal', r'\\[0-7]{3}'), PatternRule(r'escaped', r'\\.'), ] class PythonGrammar(Grammar): rules = [ PatternRule(r'functionname', r'(?<=def )[a-zA-Z_][a-zA-Z0-9_]*'), PatternRule(r'classname', r'(?<=class )[a-zA-Z_][a-zA-Z0-9_]*'), PatternRule(r'reserved', r'(?:True|None|False|Exception|self)(?![a-zA-Z0-9_])'), PatternRule(r'keyword', r'(?:yield|while|try|return|raise|print|pass|or|not|lambda|is|in|import|if|global|from|for|finally|exec|except|else|elif|del|def|continue|class|break|assert|as|and)(?![a-zA-Z0-9_])'), PatternRule(r"builtin", r'(?>=|<<=|\*\*='), PatternRule(r"operator", r"\+|<>|<<|<=|<|-|>>|>=|>|\*\*|&|\*|\||/|\^|==|//|~|!=|%"), PatternRule(r"integer", r"(? 0: y -= 1 # ok, so clear out our stack and then loop over each line self.popped = False self.markers = [] while y <= target: self.continued = False self.last_popped = self.popped self.popped = False tokens = self.get_tokens(y) currlvl = self.get_curr_level() # if we were continuing, let's pop that previous continuation token # and note that we're continuing if self.markers and self.markers[-1].name == 'cont': self.continued = True self._pop() # if we haven't reached the target-line yet, we can detect how many # levels of unindention, if any, the user chose on previous lines if y < target and len(tokens) > 2: if self.token_is_space(y, 0): l = len(tokens[0].string) else: l = 0 while currlvl > l: self._pop() currlvl = self.get_curr_level() self.popped = True # ok, having done all that, we can now process each token on the line for i in range(0, len(tokens)): currlvl = self._handle_token(currlvl, y, i) # so let's store the level for this line, as well as some debugging self.lines[y] = currlvl self.record[y] = tuple(self.markers) y += 1 def _handle_close_token(self, currlvl, y, i): try: return tab2.StackTabber._handle_close_token(self, currlvl, y, i) except: return currlvl def _handle_other_token(self, currlvl, y, i): token = self.get_token(y, i) fqname = token.fqname() if fqname == 'continuation': # we need to pop the indentation level over, unless last line was # also a continued line if self.continued: self._opt_append('cont', currlvl) else: self._opt_append('cont', currlvl + 4) elif fqname == 'string.start': # while inside of a string, there is no indention leve self._opt_append('string', None) elif fqname == 'string.end': # since we're done with the string, resume our indentation level self._opt_pop('string') elif fqname == 'delimiter': # we only really care about a colon as part of a one-line statement, # i.e. "while ok: foo()" or "if True: print 3" if token.string == ':': if self.markers and self.markers[-1].name in ('[', '{'): pass elif self.is_rightmost_token(y, i): pass else: self._pop() elif fqname == 'keyword': if token.string in self.endlevel_names: # we know we'll unindent at least once self._pop() self.popped = True elif token.string in self.startlevel_names and self.is_leftmost_token(y, i): # we know we will indent exactly once self._append(token.string, currlvl + 4) elif token.string in ('elif', 'else') and self.is_leftmost_token(y, i): # we know we'll unindent at least to the first if/elif if not self.popped and not self.last_popped: self._pop_until('if', 'elif') currlvl = self.get_curr_level() self._append(token.string, currlvl + 4) elif token.string == 'except' and self.is_leftmost_token(y, i): # we know we'll unindent at least to the first try if not self.popped and not self.last_popped: self._pop_until('try') currlvl = self.get_curr_level() self._append(token.string, currlvl + 4) elif token.string == 'finally' and self.is_leftmost_token(y, i): # we know we'll unindent at least to the first try/except if not self.popped and not self.last_popped: self._pop_until('try', 'except') currlvl = self.get_curr_level() self._append(token.string, currlvl + 4) return currlvl class Python(mode2.Fundamental): tabbercls = PythonTabber grammar = PythonGrammar opentokens = ('delimiter',) opentags = {'(': ')', '[': ']', '{': '}'} closetokens = ('delimiter',) closetags = {')': '(', ']': '[', '}': '{'} def __init__(self, w): mode2.Fundamental.__init__(self, w) # tag matching self.add_bindings('close-paren', (')',)) self.add_bindings('close-brace', ('}',)) self.add_bindings('close-bracket', (']',)) # add python-specific methods self.add_action_and_bindings(PythonCheckSyntax(), ('C-c s',)) self.add_action_and_bindings(PythonDictCleanup(), ('C-c h',)) #self.add_action_and_bindings(PythonUpdateTags(), ('C-c t',)) #self.add_action_and_bindings(PythonTagComplete(), ('C-c k',)) # highlighting self.colors = { 'keyword': color.build('cyan', 'default'), 'reserved': color.build('magenta', 'default'), 'builtin': color.build('cyan', 'default'), 'functionname': color.build('blue', 'default'), 'classname': color.build('green', 'default'), 'string.start': color.build('green', 'default'), 'string.null': color.build('green', 'default'), 'string.octal': color.build('magenta', 'default'), 'string.escaped': color.build('magenta', 'default'), 'string.format': color.build('yellow', 'default'), 'string.end': color.build('green', 'default'), 'integer': color.build('default', 'default'), 'float': color.build('default', 'default'), 'imaginary': color.build('default', 'default'), 'comment': color.build('red', 'default'), 'continuation': color.build('red', 'default'), 'system_identifier': color.build('cyan', 'default'), } self.pythonlib = "." def name(self): return "Python" class PythonSetLib(method.Method): '''Set the path(s) to find perl modules''' args = [method.Argument("lib", type=type(""), prompt="Python Path: ", default=default.build_constant("."))] def _execute(self, w, **vargs): w.mode.pythonlib = vargs['lib'] class PythonCheckSyntax(method.Method): '''Check the syntax of the current python file''' def _execute(self, w, **vargs): mod = os.path.splitext(os.path.basename(w.buffer.path))[0] cmd = "PYTHONPATH=%s python -c 'import %s'" % (w.mode.pythonlib, mod) (status, output) = commands.getstatusoutput(cmd) if status == 0: w.application.set_error("Syntax OK") w.application.data_buffer("python-syntax", output, switch_to=False) else: output = output + "\ncommand exit status: %d" % (status) w.application.data_buffer("python-syntax", output, switch_to=True) #class PythonUpdateTags(method.Method): # '''Update the CTag data associated with a python buffer''' # args = [method.Argument("lib", prompt="Module Base: ", datatype='path', # default=default.build_constant("."))] # def _execute(self, w, **vargs): # w.mode.ctagger = ctag_python.PythonCTagger() # w.mode.ctagger.process_paths([vargs['lib']]) # w.application.set_error('Tag data updated') # #class PythonTagComplete(method.Method): # '''Complete a symbol using tag data''' # def _execute(self, w, **vargs): # if not w.mode.ctagger.packages: # w.application.methods['python-update-tags'].execute(w) # return # # cursor = w.logical_cursor() # b = w.buffer # line = b.lines[cursor.y] # end = cursor.x # start = cursor.x # # word_chars = string.letters + string.digits + '_' # if start == 0: # w.application.set_error('walrus 1') # return # # c = line[start - 1] # if c == '(': # w.application.set_error('goldfinch 1') # return # elif c not in word_chars: # w.application.set_error('walrus 2') # return # # while start > 0 and line[start - 1] in word_chars: # start -= 1 # if start == end: # w.application.set_error('walrus 3') # return # word = line[start:end] # # candidates = [] # seen = sets.Set() # for p in w.mode.ctagger.packages.iterkeys(): # if p.startswith(word): # if p in seen: # continue # candidates.append(p) # seen.add(p) # for e in w.mode.ctagger.entries.itervalues(): # if e.symbol.startswith(word): # if e.symbol in seen: # continue # candidates.append(e.symbol) # seen.add(e.symbol) # if len(candidates) == 0: # w.application.set_error('No match: %r' % word) # return # elif len(candidates) == 1: # newword = candidates[0] # if word == newword: # w.application.set_error('Already completed!') # return # else: # w.application.set_error('Unique match!') # else: # newword = completer.find_common_string(candidates) # w.application.set_error('Ambiguous match: %r' % (candidates)) # b.delete_string(Point(start, cursor.y), Point(end, cursor.y)) # b.insert_string(Point(start, cursor.y), newword) class PythonDictCleanup(method.Method): '''Align assignment blocks and literal dictionaries''' def _execute(self, w, **vargs): cursor = w.logical_cursor() b = w.buffer # so this is where we will store the groups that we find groups_by_line = {} # the regex we will try regexes = [regex.python_dict_cleanup, regex.python_assign_cleanup] # if we aren't in a hash, inform the user and exit line = b.lines[cursor.y] myregex = None for r in regexes: if r.match(line): myregex = r if myregex is None: raise Exception, "Not a python dict line" groups_by_line[cursor.y] = myregex.match(line).groups() # find the beginning of this hash block start = 0 i = cursor.y - 1 while i >= 0: line = b.lines[i] m = myregex.match(line) if not m: start = i + 1 break else: groups_by_line[i] = m.groups() i -= 1 # find the end of this hash block end = len(b.lines) - 1 i = cursor.y + 1 while i < len(b.lines): line = b.lines[i] m = myregex.match(line) if not m: end = i - 1 break else: groups_by_line[i] = m.groups() i += 1 # assume that the least indented line is correct indent_w = min([len(groups_by_line[k][0]) for k in groups_by_line]) # find the longest hash key to base all the other padding on key_w = max([len(groups_by_line[k][1]) for k in groups_by_line]) # for each line, format it correctly keys = groups_by_line.keys() keys.sort() data = '' for i in keys: indent_pad = ' ' * indent_w key = groups_by_line[i][1] sep = groups_by_line[i][3] value = groups_by_line[i][5] key_pad = ' ' * (key_w - len(key)) if sep == '=': data += indent_pad + key + key_pad + ' ' + sep + ' ' + value + '\n' else: data += indent_pad + key + sep + ' ' + key_pad + value + '\n' # remove the old text and add the new start_p = Point(0, start) end_p = Point(0, end + 1) w.kill(start_p, end_p) w.insert_string(start_p, data)