branch : pmacs2
This commit is contained in:
moculus 2007-07-21 15:41:07 +00:00
parent 069ad23737
commit dc59cd567d
24 changed files with 0 additions and 2802 deletions

View File

@ -1,62 +0,0 @@
import color, mode2
from lex3 import Grammar, PatternRule, RegionRule, Grammar
from mode_perl import PerlGrammar
from mode_xml import TagGrammar
from mode_perl import StringGrammar
class BDSGrammar(Grammar):
rules = [
RegionRule(r'comment', r'<!--', Grammar, r'-->'),
RegionRule(r'tag', r'< */?', TagGrammar, r'/?>'),
PatternRule(r'delimiter', r'[\[\]\{\}\(\),\?:]'),
PatternRule(r'derived', r'(?:FM|CD|FS|FM|TA)[0-9]{3}-[0-9]{3}-[0-9]{3}'),
PatternRule(r'question', r'GQ[0-9]{3}-[0-9]{3}-[0-9]{3}:MQ[0-9]{3}-[0-9]{3}-[0-9]{3}'),
PatternRule(r'bdsfunc', r'[A-Z_][A-Z0-9_]+(?= *\()'),
PatternRule(r'perlfunc', r'[a-zA-Z_][a-zA-Z0-9_]+(?= *\()'),
PatternRule(r'misquoted', r"'[A-Z]{2}[0-9]{3}-[0-9]{3}-[0-9]{3}(?::[A-Z]{2}[0-9]{3}-[0-9]{3}-[0-9]{3})?'"),
PatternRule(r'misquoted', r'"[A-Z]{2}[0-9]{3}-[0-9]{3}-[0-9]{3}(?::[A-Z]{2}[0-9]{3}-[0-9]{3}-[0-9]{3})?"'),
RegionRule(r'string', '"', StringGrammar, '"'),
RegionRule(r'string', "'", Grammar, "'"),
PatternRule(r'operator', r'(?:&gt;=|&lt;=|&gt;|&lt;|==|&amp;&amp;|\|\||eq|ne)'),
]
class BDS(mode2.Fundamental):
grammar = BDSGrammar
opentokens = ('delimiter',)
opentags = {'(': ')', '[': ']', '{': '}'}
closetokens = ('delimiter',)
closetags = {')': '(', ']': '[', '}': '{'}
colors = {
'comment.start': ('red', 'default'),
'comment.null': ('red', 'default'),
'comment.end': ('red', 'default'),
'tag.start': ('default', 'default'),
'tag.namespace': ('magenta', 'default'),
'tag.name': ('blue', 'default'),
'tag.attrname': ('blue', 'default'),
'tag.string.start': ('cyan', 'default'),
'tag.string.null': ('cyan', 'default'),
'tag.string.end': ('cyan', 'default'),
'tag.end': ('default', 'default'),
'string.start': ('green', 'default'),
'string.octal': ('magenta', 'default'),
'string.escaped': ('magenta', 'default'),
'string.null': ('green', 'default'),
'string.end': ('green', 'default'),
'derived': ('yellow', 'default'),
'question': ('yellow', 'default'),
'misquoted': ('yellow', 'red'),
'bdsfunc': ('magenta', 'default'),
'perlfunc': ('magenta', 'default'),
'operator': ('magenta', 'default'),
}
def __init__(self, w):
mode2.Fundamental.__init__(self, w)
self.add_bindings('close-paren', (')',))
self.add_bindings('close-brace', ('}',))
self.add_bindings('close-bracket', (']',))
def name(self):
return "BDS"

View File

@ -1,25 +0,0 @@
import color, mode2
from point2 import Point
from lex3 import Grammar, PatternRule, RegionRule
class MetadataGrammar(Grammar):
rules = [
PatternRule(r'username', r'[a-zA-Z0-9_]+'),
]
class BlameGrammar(Grammar):
rules = [
RegionRule(r'metadata', r'^[0-9\.]+', MetadataGrammar, r'[0-9]{4}-[0-9]{2}-[0-9]{2}'),
PatternRule(r'data', r'.+$'),
]
class Blame(mode2.Fundamental):
grammar = BlameGrammar
colors = {
'metadata.start': ('blue', 'default', 'bold'),
'metadata.username': ('cyan', 'default', 'bold'),
'metadata.end': ('green', 'default', 'bold'),
}
def name(self):
return "Blame"

228
mode_c.py
View File

@ -1,228 +0,0 @@
import color, mode2, tab2
from lex3 import Grammar, PatternRule, RegionRule
from mode_python import StringGrammar
# this might not be complete...
# see http://gcc.gnu.org/onlinedocs/gcc-2.95.3/cpp_3.html#SEC44
class MacroGrammar(Grammar):
rules = [
PatternRule('name', r'(?:(?<=#define )) *[a-zA-Z_][a-zA-Z0-9_]*'),
PatternRule(r"unop", r"\+=|-=|\*=|/=|//=|%=|&=\|\^=|>>=|<<=|\*\*="),
PatternRule(r'binop', r"\+|<>|<<|<=|<|-|>>|>=|>|\*\*|&|\*|\||/|\^|==|//|~|!=|%"),
PatternRule(r"delimiter", r"->|\.|\(|\)|\[|\]|{|}|@|,|:|`|;|=|\?"),
PatternRule(r"integer", r"-?(?:0(?![x0-9])|[1-9][0-9]*|0[0-7]+|0[xX][0-9a-fA-F]+)[lL]?"),
PatternRule(r"float", r"-?(?:[0-9]+\.[0-9]*|\.[0-9]+|(?:[0-9]|[0-9]+\.[0-9]*|\.[0-9]+)[eE][\+-]?[0-9]+)"),
RegionRule(r'string', '"', StringGrammar, '"'),
PatternRule(r'char', r"'.'|'\\.'|'\\[0-7]{3}'"),
PatternRule(r"continued", r"\\\n$"),
]
class CGrammar(Grammar):
rules = [
PatternRule(r'include', r'#include(?!=[a-zA-Z0-9_])'),
PatternRule(r'header', r'<[-A-Za-z/0-9_\.]+>|"[-A-Za-z/0-9_\.]+"'),
RegionRule(r'macrocomment', r'#if +(?:0|NULL|FALSE)', Grammar, r'#endif'),
RegionRule(r'macro', r'#(?:assert|cpu|define|elif|else|endif|error|ident|ifdef|ifndef|if|import|include_next|line|machine|pragma|pragma_once|system|unassert|undef|warning)(?!=[a-zA-Z0-9_])', MacroGrammar, r'\n$'),
RegionRule(r'comment', '/\*', Grammar, '\*/'),
PatternRule(r'comment', r'//.*$'),
PatternRule(r'keyword', r"(?:auto|break|case|char|const|continue|default|double|do|else|enum|extern|float|for|goto|if|int|long|register|return|short|signed|sizeof|static|struct|switch|typedef|union|unsigned|void|volatile|while)(?![a-zA-z_])"),
PatternRule(r'label', r'[a-zA-Z_][a-zA-Z0-9_]*(?=:)'),
PatternRule(r'structname', r'(?<=struct ) *[a-zA-Z_][a-zA-Z0-9_]*'),
PatternRule(r'enumname', r'(?<=enum ) *[a-zA-Z_][a-zA-Z0-9_]*'),
PatternRule(r'function', r'[a-zA-Z_][a-zA-Z0-9_]*(?= *\()'),
PatternRule(r'builtin', r"(?:NULL|TRUE|FALSE)"),
PatternRule(r'identifier', r"[a-zA-Z_][a-zA-Z0-9_]*"),
PatternRule(r"unop", r"\+=|-=|\*=|/=|//=|%=|&=\|\^=|>>=|<<=|\*\*="),
PatternRule(r'binop', r"\+|<>|<<|<=|<|-|>>|>=|>|\*\*|&|\*|\||/|\^|==|//|~|!=|%"),
# this is sketchy as hell
PatternRule(r"delimiter", r"->|\.|\(|\)|\[|\]|{|}|@|,|:|`|;|=|\?"),
PatternRule(r"integer", r"(?:0(?![x0-9])|[1-9][0-9]*|0[0-7]+|0[xX][0-9a-fA-F]+)[lL]?"),
PatternRule(r"float", r"[0-9]+\.[0-9]*|\.[0-9]+|(?:[0-9]|[0-9]+\.[0-9]*|\.[0-9]+)[eE][\+-]?[0-9]+"),
RegionRule(r'string', '"', StringGrammar, '"'),
PatternRule(r'char', r"'.'|'\\.'|'\\[0-7]{3}'"),
PatternRule(r"eol", r"\n$"),
]
class CTabber(tab2.StackTabber):
def is_base(self, y):
if y == 0:
return True
highlighter = self.mode.window.buffer.highlights[self.mode.name()]
if not highlighter.tokens[y]:
return False
# this assumes that people aren't gonna use these macros inside of
# blocks, which is probably ok.
t0 = highlighter.tokens[y][0]
if t0.name == 'macro.start' and t0.string in ('#define', '#include'):
return True
# detecting function declarations is annoying; this assumes that people
# won't put a variable type and name on different lines, but that they
# might do that for function return type and name.
#
# unfortunately, valid function return types might include any of the
# four types of tokens below
decl = False
for t in highlighter.tokens[y]:
if t.name in ('keyword', 'identifier', 'structname', 'enumname'):
decl = True
continue
if decl and t.name == 'function':
break
else:
decl = False
break
if decl:
return True
return False
def _handle_open_token(self, currlvl, y, i):
self._opt_pop('cont')
token = self.get_token(y, i)
if token.string == '{':
self._opt_pop('cond')
currlvl = tab2.StackTabber._handle_open_token(self, currlvl, y, i)
return currlvl
def _handle_close_token(self, currlvl, y, i):
self._opt_pop('cont')
currlvl = tab2.StackTabber._handle_close_token(self, currlvl, y, i)
token = self.get_token(y, i)
if self.is_rightmost_token(y, i):
if token.string == '}':
self._opt_pop('cond')
self._opt_pop('cont')
elif self._peek_name() == 'cond':
pass
else:
self._opt_append('cont', currlvl + 4)
return currlvl
def _handle_other_token(self, currlvl, y, i):
token = self.get_token(y, i)
fqname = token.fqname()
if fqname == 'delimiter' and token.string == ';':
self._opt_pop('cond')
self._opt_pop('cont')
self._opt_pop('cond')
elif fqname == 'keyword':
if token.string in ('do', 'else', 'for', 'if', 'while'):
self._append('cond', currlvl + 4)
elif token.string == 'break':
self._opt_pop('case', 'while', 'for')
elif token.string == 'continue':
self._opt_pop('while', 'for')
elif token.string == 'case':
self._opt_pop('case')
currlvl = self.get_curr_level()
self._opt_append('case', currlvl + 4)
elif fqname == 'string.start':
self._opt_append('string', None)
elif fqname == 'string.end':
self._opt_pop('string')
if self.is_rightmost_token(y, i):
self._opt_append('cont', currlvl + 4)
# TODO: this could be a lot better
elif fqname == 'macro':
currlvl = 0
elif fqname.startswith('macro.start'):
self._opt_append('macro', None)
currlvl = 0
elif fqname.startswith('macro.end'):
self._opt_pop('macro', None)
elif fqname.startswith('macroblock.start'):
self._opt_append('macroblock', None)
currlvl = 0
elif fqname.startswith('macroblock.end'):
self._opt_pop('macroblock', None)
if self.is_rightmost_token(y, i):
if self._has_markers() and self._peek_name() == 'cond':
pass
elif(not fqname.startswith('string') and
not fqname.startswith('comment') and
not fqname.startswith('macro') and
not fqname == 'delimiter' and
not fqname == 'header' and
not fqname == 'null' and
not fqname == 'eol' and
token.string not in ('}', ';', '(', '{', '[', ',')):
self._opt_append('cont', currlvl + 4)
return currlvl
class C(mode2.Fundamental):
tabbercls = CTabber
grammar = CGrammar
opentokens = ('delimiter',)
opentags = {'(': ')', '[': ']', '{': '}'}
closetokens = ('delimiter',)
closetags = {')': '(', ']': '[', '}': '{'}
colors = {
'macrocomment.start': ('red', 'default'),
'macrocomment.null': ('red', 'default'),
'macrocomment.end': ('red', 'default'),
'comment': ('red', 'default'),
'comment.start': ('red', 'default'),
'comment.end': ('red', 'default'),
'comment.null': ('red', 'default'),
'include': ('blue', 'default'),
'header': ('green', 'default'),
'macro': ('blue', 'default'),
'macro.start': ('blue', 'default'),
'macro.name': ('yellow', 'default'),
'macro.null': ('magenta', 'default'),
#'macro.null': ('default', 'default'),
'macro.continued': ('red', 'default'),
'macro.delimiter': ('default', 'default'),
'macro.integer': ('green', 'default'),
'macro.float': ('green', 'default'),
'macro.char': ('green', 'default'),
'macro.string.start': ('green', 'default'),
'macro.string.escaped': ('magenta', 'default'),
'macro.string.octal': ('magenta', 'default'),
#'macro.string.escaped': ('default', 'default'),
#'macro.string.octal': ('default', 'default'),
'macro.string.null': ('green', 'default'),
'macro.string.end': ('green', 'default'),
'macro.end': ('magenta', 'default'),
#'macro.end': ('default', 'default'),
'label': ('magenta', 'default'),
'keyword': ('cyan', 'default'),
'function': ('blue', 'default'),
'builtin': ('magenta', 'default'),
'structname': ('yellow', 'default'),
'enumname': ('yellow', 'default'),
'char': ('green', 'default'),
'string.start': ('green', 'default'),
'string.octal': ('green', 'default'),
'string.escaped': ('green', 'default'),
'string.null': ('green', 'default'),
'string.end': ('green', 'default'),
'integer': ('green', 'default'),
'float': ('green', 'default'),
'bizzaro': ('magenta', 'green'),
}
def __init__(self, w):
mode2.Fundamental.__init__(self, w)
self.add_bindings('close-paren', (')',))
self.add_bindings('close-brace', ('}',))
self.add_bindings('close-bracket', (']',))
def name(self):
return "C"

View File

@ -1,30 +0,0 @@
import color, mode2
from lex3 import Grammar, PatternRule, RegionRule
from mode_python import StringGrammar
class ConsoleGrammar(Grammar):
rules = [
PatternRule(r'mesg', r'^[A-Za-z].*$'),
PatternRule(r'input', r'^>>>.*$'),
PatternRule(r'input', r'^-->.*$'),
#PatternRule(r'output', r'^ .*$'),
RegionRule(r'string', r'"', StringGrammar, r'"'),
RegionRule(r'string', r"'", StringGrammar, r"'"),
PatternRule(r'bareword', r'[a-zA-Z_][a-zA-Z0-9_]*'),
]
class Console(mode2.Fundamental):
grammar = ConsoleGrammar()
colors = {
'mesg': ('blue', 'default'),
'input': ('cyan', 'default'),
'output': ('default', 'default'),
'string.start': ('green', 'default'),
'string.octal': ('magenta', 'default'),
'string.escaped': ('magenta', 'default'),
'string.null': ('green', 'default'),
'string.end': ('green', 'default'),
}
def name(self):
return "Console"

View File

@ -1,183 +0,0 @@
import code, string, StringIO, sys, traceback
import color, completer, method, mode2
from lex3 import Grammar, PatternRule
from point2 import Point
class Console(mode2.Fundamental):
def __init__(self, w):
mode2.Fundamental.__init__(self, w)
self.bindings = {}
self.globals = dict(w.application.globals())
self.locals = dict(w.application.locals())
self.saved_input = ""
self.history = ['']
self.hindex = 0
self.add_bindings('start-of-line', ('C-a', 'HOME',))
self.add_bindings('end-of-line', ('C-e', 'END',))
self.add_bindings('backward', ('C-b', 'L_ARROW',))
self.add_bindings('forward', ('C-f', 'R_ARROW',))
self.add_bindings('delete-left', ('DELETE', 'BACKSPACE',))
self.add_bindings('delete-left-word', ('M-DELETE', 'M-BACKSPACE',))
self.add_bindings('delete-right', ('C-d',))
self.add_bindings('delete-right-word', ('M-d',))
self.add_bindings('kill-region', ('C-w',))
self.add_bindings('copy-region', ('M-w',))
self.add_bindings('kill', ('C-k',))
self.add_bindings('copy', ('M-k',))
self.add_bindings('yank', ('C-y',))
self.add_bindings('pop-kill', ('M-y',))
self.add_bindings('right-word', ('M-f',))
self.add_bindings('left-word', ('M-b',))
self.add_bindings('set-mark', ('C-@',))
self.add_bindings('switch-mark', ('C-x C-x',))
self.add_bindings('undo', ('C-/', 'C-x u',))
self.add_bindings('redo', ('M-/', 'M-_', 'C-x r',))
self.add_bindings('toggle-margins', ('M-m',))
self.add_bindings('transpose-words', ('M-t',))
self.add_bindings('delete-left-whitespace', ('C-c DELETE', 'C-c BACKSPACE',))
self.add_bindings('delete-right-whitespace', ('C-c d',))
self.add_bindings('insert-space', ('SPACE',))
self.add_bindings('insert-tab', ('TAB',))
self.add_action_and_bindings(ConsoleExec(), ('RETURN',))
self.add_action_and_bindings(ConsoleClear(), ('C-l',))
self.add_action_and_bindings(ConsoleCancel(), ('C-]',))
self.add_action_and_bindings(ConsoleHistoryPrev(), ('C-p',))
self.add_action_and_bindings(ConsoleHistoryNext(), ('C-n',))
#self.add_action_and_bindings(ConsoleTab(), ('TAB',))
for c in string.letters + string.digits + string.punctuation:
self.add_binding('insert-string-%s' % c, c)
def name(self):
return "Console"
class ConsoleExec(method.Method):
def _execute(self, w, **vargs):
s = w.buffer.make_string()
w.mode.history[-1] = s
w.mode.history.append('')
w.buffer.set_data('')
w.mode.hindex = len(w.mode.history) - 1
a = w.application
if not a.has_buffer_name('*Console*'):
raise Exception, "No console found!"
b = a.bufferlist.get_buffer_by_name('*Console*')
if a.window().buffer is not b:
a.switch_buffer(b)
p = a.get_mini_buffer_prompt()
b.insert_string(b.get_buffer_end(), p + s + '\n', force=True)
if w.mode.saved_input:
s = w.mode.saved_input + '\n' + s
try:
code_obj = code.compile_command(s)
if code_obj is None:
w.mode.saved_input = s
a.set_mini_buffer_prompt('--> ')
output = None
else:
w.mode.saved_input = ''
a.set_mini_buffer_prompt('>>> ')
sys.stdout = code_out = StringIO.StringIO()
sys.stderr = code_err = StringIO.StringIO()
ok = True
try:
exec code_obj in w.mode.globals, w.mode.locals
except Exception, e:
ok = False
output = str(e) + '\n'
sys.stdout = sys.__stdout__
sys.stdout = sys.__stderr__
if ok:
output = code_out.getvalue()
code_out.close()
code_err.close()
except (SyntaxError, OverflowError, ValueError), e:
a.set_mini_buffer_prompt('>>> ')
t = sys.exc_traceback
output = str(e) + traceback.format_exc()
if output:
newlines = [' %s' % x for x in output.split('\n')]
assert newlines[-1] == ' '
newlines[-1] = ''
b.insert_lines(b.get_buffer_end(), newlines, force=True)
for w2 in b.windows:
w2.goto_end()
class ConsoleCancel(method.Method):
def execute(self, w, **vargs):
w.application.close_mini_buffer()
class ConsoleClear(method.Method):
def execute(self, w, **vargs):
a = w.application
if not a.has_buffer_name('*Console*'):
raise Exception, "No console found!"
b = a.bufferlist.get_buffer_by_name('*Console*')
b.clear()
class ConsoleHistoryPrev(method.Method):
def execute(self, w, **vargs):
if w.mode.hindex <= 0:
w.mode.hindex = 0
return
elif w.mode.hindex == len(w.mode.history) - 1:
w.mode.history[-1] = w.buffer.make_string()
w.mode.hindex -= 1
w.buffer.set_data(w.mode.history[w.mode.hindex])
class ConsoleHistoryNext(method.Method):
def execute(self, w, **vargs):
if w.mode.hindex == len(w.mode.history) - 1:
return
w.mode.hindex += 1
w.buffer.set_data(w.mode.history[w.mode.hindex])
#class ConsoleTab(method.Method):
# def execute(self, w, **vargs):
# a = w.application
# s = w.buffer.make_string()
#
# if '"' in s or "'" in s or "(" in s or ")" in s or "[" in s or "]" in s:
# return
#
# parts = s.split(".")
# if len(parts) == 0:
# return
#
# v = a.globals()
# v.update(a.locals())
# obj = None
# for part in parts[:-1]:
# if obj is None:
# if part in v:
# obj = v[part]
# else:
# return
# else:
# if hasattr(obj, part):
# obj = getattr(obj, part)
# else:
# return
#
# if obj is None:
# pool = v.keys()
# else:
# pool = dir(obj)
# candidates = [x for x in pool if x.startswith(parts[-1])]
#
# if len(candidates) == 0:
# return
#
# common = completer.find_common_string(candidates)
# s2 = '.'.join(parts[:-1]) + '.' + common
#
# w.buffer.set_data(s2)
#
# if len(candidates) > 1:
# if not a.has_buffer_name('*Console*'):
# a.add_buffer(buffer.ConsoleBuffer())
# b = a.bufferlist.get_buffer_by_name('*Console*')
# b.insert_string(b.get_buffer_end(), repr(candidates) + '\n', force=True)

View File

@ -1,93 +0,0 @@
import color, mode2
from lex3 import Grammar, PatternRule, NocasePatternRule, RegionRule, NocaseRegionRule
from point2 import Point
class StringGrammar(Grammar):
rules = [
PatternRule(r'octal', r'\\[0-7]{3}'),
PatternRule(r'escaped', r'\\.'),
]
class KeywordGrammar(Grammar):
rules = [
PatternRule(r'octal', r'\\[0-7]{3}'),
PatternRule(r'escaped', r'\\.'),
RegionRule('string', "'", StringGrammar, r"'"),
RegionRule('string', '"', StringGrammar, r'"'),
]
class CSSGrammar(Grammar):
rules = [
RegionRule(r'comment', '/\*', Grammar, '\*/'),
RegionRule(r'htmlcomment', '<!--', Grammar, '-->'),
NocasePatternRule(r'dimension', r'[+-]?(?:[0-9]+|[0-9]*\.[0-9]+)[-a-z_][-a-z0-9_]*'),
NocasePatternRule(r'percentage', r'[+-]?(?:[0-9]+|[0-9]*\.[0-9]+)%%'),
NocasePatternRule(r'length', r'[+-]?(?:[0-9]+|[0-9]*\.[0-9]+)(?:em|ex|px|in|cm|mm|pt|pc)'),
NocasePatternRule(r'hash', r'#[-a-z0-9_]+'),
NocasePatternRule(r'real', r'[+-]?[0-9]*\.[0-9]+'),
NocasePatternRule(r'int', r'[+-]?[0-9]+'),
NocasePatternRule(r'rule', r'@(?:page|media|import)'),
NocasePatternRule(r'color', r'(?:aqua|black|blue|fuchsia|gray|green|lime|maroon|navy|olive|orange|purple|red|silver|teal|white|yellow|#[0-9]{6}|#[0-9]{3})'),
NocasePatternRule(r'keyword', r'(?:url|rgb|counter)'),
NocaseRegionRule(r'keyword', '(?:(?<=url)|(?<=rgb)|(?<=counter))\(', KeywordGrammar, '\)'),
NocasePatternRule(r'label', r"\.?[-a-zA-Z0-9_]+(?= *{)"),
NocasePatternRule(r'ident', r"-?[a-z_][-a-z0-9_]*"),
NocasePatternRule(r'name', r"[-a-z0-9_]+"),
NocasePatternRule(r'delimiter', r'[:;,{}()\[\]]|~=|\|=|='),
RegionRule(r'string', "'", StringGrammar, r"'"),
RegionRule(r'string', '"', StringGrammar, r'"'),
]
class CSS(mode2.Fundamental):
grammar = CSSGrammar
colors = {
'comment': ('red', 'default'),
'comment.start': ('red', 'default'),
'comment.null': ('red', 'default'),
'comment.end': ('red', 'default'),
'htmlcomment': ('red', 'default'),
'htmlcomment.start': ('red', 'default'),
'htmlcomment.null': ('red', 'default'),
'htmlcomment.end': ('red', 'default'),
'dimension': ('magenta', 'default'),
'percentage': ('magenta', 'default'),
'length': ('magenta', 'default'),
'real': ('magenta', 'default'),
'int': ('magenta', 'default'),
'color': ('magenta', 'default'),
'hash': ('cyan', 'default'),
'label': ('cyan', 'default'),
'rule': ('cyan', 'default'),
'keyword': ('cyan', 'default'),
'ident': ('default', 'default'),
'name': ('default', 'default'),
'delimiter': ('default', 'default'),
'keyword': ('cyan', 'default'),
'keyword.start': ('default', 'default'),
'keyword.null': ('cyan', 'default'),
'keyword.octal': ('magenta', 'default'),
'keyword.escaped': ('magenta', 'default'),
'keyword.end': ('default', 'default'),
'string.start': ('green', 'default'),
'string.null': ('green', 'default'),
'string.octal': ('magenta', 'default'),
'string.escaped': ('magenta', 'default'),
'string.end': ('green', 'default'),
}
def __init__(self, w):
mode2.Fundamental.__init__(self, w)
self.add_bindings('close-paren', (')',))
self.add_bindings('close-brace', ('}',))
self.add_bindings('close-bracket', (']',))
def name(self):
return "Javascript"

View File

@ -1,50 +0,0 @@
import color, method, mode2, re
from lex3 import Grammar, PatternRule, RegionRule
class DiffGrammar(Grammar):
rules = [
PatternRule(name=r'left', pattern=r"^\-.*$"),
PatternRule(name=r'right', pattern=r"^\+.*$"),
PatternRule(name=r'metadata', pattern=r'^[A-Za-z].*$'),
PatternRule(name=r'seperator', pattern=r'^={67}$'),
PatternRule(name=r'location', pattern=r"(?:^|(?<=\n))@@ [-+0-9a-z, ]* @@(?:$|\n)"),
PatternRule(name=r'common', pattern=r"(?:^|(?<=\n)).*(?:$|\n)"),
]
class Diff(mode2.Fundamental):
grammar = DiffGrammar()
colors = {
'left': ('red', 'default', 'bold'),
'right': ('blue', 'default', 'bold'),
'seperator': ('magenta', 'default', 'bold'),
'metadata': ('magenta', 'default', 'bold'),
'location': ('magenta', 'default', 'bold'),
}
def __init__(self, w):
mode2.Fundamental.__init__(self, w)
#self.add_action_and_bindings(DiffNextSection(), ('M-n', 'M-D_ARROW',))
#self.add_action_and_bindings(DiffPreviousSection(), ('M-p', 'M-U_ARROW',))
def name(self):
return "Diff"
class DiffNextSection(method.Method):
re = re.compile("(?:^|(?<=\n))@@ [-+0-9a-z, ]* @@(?:$|\n)")
def _execute(self, w, **vargs):
cursor = w.logical_cursor()
i = cursor.y + 1
while i < len(w.buffer.lines):
if self.re.match(w.buffer.lines[i]):
w.goto_line(i)
return
i += 1
class DiffPreviousSection(method.Method):
re = re.compile("(?:^|(?<=\n))@@ [-+0-9a-z, ]* @@(?:$|\n)")
def _execute(self, w, **vargs):
cursor = w.logical_cursor()
i = cursor.y - 1
while i >= 0:
if self.re.match(w.buffer.lines[i]):
w.goto_line(i)
return
i -= 1

View File

@ -1,184 +0,0 @@
import commands, dirutil, grp, method, mode2, os.path, pwd, re
from lex3 import Grammar, PatternRule, RegionRule, PatternGroupRule
from point2 import Point
from method import Method, Argument
class PermGrammar(Grammar):
rules = [
PatternRule(r'sticky', r'[tT]'),
PatternRule(r'setid', r'[sS]'),
PatternRule(r'read', r'r'),
PatternRule(r'write', r'w'),
PatternRule(r'exec', r'x'),
]
class PathGrammar(Grammar):
rules = [
RegionRule(r'perm', r'(?<=^.)', PermGrammar, r' '),
PatternGroupRule(r'fields', r'owner', r'[^ ]+ +', r'group', r'[^ ]+ +',
r'size', r'[^ ]+ +',
r'mtime', r'[A-Za-z]{3} [ 0-9]{2} [0-9]{2}:[0-9]{2} +',
r'name', r'[^\n]*'),
]
class DirGrammar(Grammar):
rules = [
RegionRule(r'file', r'^-', PathGrammar, r'\n'),
RegionRule(r'blk', r'^b', PathGrammar, r'\n'),
RegionRule(r'chr', r'^c', PathGrammar, r'\n'),
RegionRule(r'dir', r'^d', PathGrammar, r'\n'),
RegionRule(r'lnk', r'^l', PathGrammar, r'\n'),
RegionRule(r'fifo', r'^p', PathGrammar, r'\n'),
RegionRule(r'sock', r'^s', PathGrammar, r'\n'),
RegionRule(r'unk', r'^\?', PathGrammar, r'\n'),
]
class Dir(mode2.Fundamental):
grammar = DirGrammar()
colors = {
'blk.start': ('cyan', 'default'),
'blk.name': ('cyan', 'default'),
'chr.start': ('yellow', 'default'),
'chr.name': ('yellow', 'default'),
'dir.start': ('blue', 'default'),
'dir.name': ('blue', 'default'),
'lnk.start': ('green', 'default'),
'lnk.name': ('green', 'default'),
'fifo.start': ('red', 'default'),
'fifo.name': ('red', 'default'),
'sock.start': ('red', 'default'),
'sock.name': ('red', 'default'),
'unk.start': ('magenta', 'default'),
'unk.name': ('magenta', 'default'),
'perm.setid': ('yellow', 'default'),
'perm.sticky': ('yellow', 'default'),
'perm.read': ('magenta', 'default'),
'perm.write': ('magenta', 'default'),
'perm.exec': ('magenta', 'default'),
'owner': ('cyan', 'default'),
'group': ('cyan', 'default'),
'size': ('yellow', 'default'),
'mtime': ('green', 'default'),
}
def __init__(self, w):
mode2.Fundamental.__init__(self, w)
self.add_action_and_bindings(RefreshView(), ('C-c r',))
self.add_action_and_bindings(OpenPath(), ('RETURN',))
self.add_action_and_bindings(Chmod(), ('C-c m',))
self.add_action_and_bindings(Chown(), ('C-c o',))
self.add_action_and_bindings(Chgrp(), ('C-c g',))
self.add_action_and_bindings(TouchPath(), ('C-c t',))
self.add_action_and_bindings(RemovePath(), ('DELETE', 'BACKSPACE', 'C-d'))
def name(self):
return "Dir"
class RefreshView(Method):
def _execute(self, w, **vargs):
t = dirutil.resolve_token(w)
s = t.string
w.buffer.reload()
dirutil.find_name(w, s)
class OpenPath(Method):
def _execute(self, w, **vargs):
path = dirutil.resolve_path(w)
w.set_error("opening %r" % path)
w.application.methods['open-file'].execute(w, filename=path)
class DirCmd(Method):
def _make_cmd(self, w, path, **vargs):
return ''
def _run(self, w, **vargs):
basename = dirutil.resolve_name(w)
path = os.path.join(w.buffer.path, basename)
cmd = self._make_cmd(w, path, **vargs)
(status, output) = commands.getstatusoutput(cmd)
if status != 0:
w.set_error("%s failed (exit %d)" % (self.name, status))
w.application.methods['refresh-view'].execute(w, filename=path)
dirutil.find_name(w, basename)
class Chmod(DirCmd):
args = [Argument('mode', type=type(''), prompt="New Mode: ")]
octal_re = re.compile('^[0-7]{1,4}$')
symbolic_re = re.compile('(?:[ugoa]*(?:[-+=](?:[rwxXst]*|[ugo]))+ *,?)+')
def _make_cmd(self, w, path, **vargs):
return 'chmod %r %r' % (vargs['mode'], path)
def _execute(self, w, **vargs):
if self.octal_re.match(vargs['mode']):
pass
elif self.symbolic_re.match(vargs['mode']):
pass
else:
w.set_error("Not a valid mode: %r" % vargs['mode'])
self._run(w, **vargs)
class Chown(DirCmd):
args = [Argument('owner', type=type(''), prompt="New Owner: ")]
def _make_cmd(self, w, path, **vargs):
return 'chown %r %r' % (vargs['owner'], path)
def _execute(self, w, **vargs):
fields = vargs['owner'].split(':')
if len(fields) == 1:
(owner, group) = (fields[0], None)
elif len(fields) == 2:
(owner, group) = fields
else:
w.set_error("Malformed 'owner' argument: %r" % vargs['owner'])
return
if not dirutil.valid_owner(owner):
w.set_error('User %r does not exist' % owner)
return
if group is not None and not dirutil.valid_group(group):
w.set_error('Group %r does not exist' % group)
return
self._run(w, **vargs)
class Chgrp(DirCmd):
args = [Argument('group', type=type(''), prompt="New Group: ")]
def _make_cmd(self, w, path, **vargs):
return 'chgrp %r %r' % (vargs['group'], path)
def _execute(self, w, **vargs):
if not dirutil.valid_group(vargs['group']):
w.set_error('Group %r does not exist' % group)
return
self._run(w, **vargs)
class TouchPath(Method):
args = [Argument('filename', datatype="path", prompt="Touch File: ")]
def _execute(self, w, **vargs):
basename = vargs['filename']
path = os.path.join(w.buffer.path, basename)
retval = os.system('touch %r' % path)
w.application.methods['refresh-view'].execute(w, filename=path)
dirutil.find_name(w, basename)
if retval != 0:
w.set_error("touch %r failed (exit %d)" % (path, retval))
class RemovePath(Method):
def _execute(self, w, **vargs):
self._old_window = w
self._old_path = dirutil.resolve_path(w)
basename = os.path.basename(self._old_path)
self._prompt = "Do you want to delete %r? " % basename
w.application.open_mini_buffer(self._prompt, self._callback)
def _callback(self, v):
a = self._old_window.application
if v == 'yes':
self._doit()
a.close_mini_buffer()
return
if v == 'no':
a.close_mini_buffer()
return
a.open_mini_buffer(self._prompt, self._callback)
a.set_error('Please type "yes" or "no"')
def _doit(self):
w = self._old_window
path = self._old_path
try:
w.application.methods['previous-line'].execute(w)
os.remove(path)
w.set_error("deleted %r " % path)
w.application.methods['refresh-view'].execute(w, filename=path)
except:
w.set_error("failed to delete %r" % path)

View File

@ -1,52 +0,0 @@
import color, mode2
from lex3 import Grammar, PatternRule, RegionRule
from mode_xml import TagGrammar
from mode_javascript import JavascriptGrammar, Javascript
class HTMLGrammar(Grammar):
rules = [
# TODO: how does cdata work again?
RegionRule(r'comment', r'<!--', Grammar, r'-->'),
# BUG: not all scripts are javascript... but, dynamically choosing a
# grammar based on the 'type' attribute (which may be on a different
# line) could be pretty hairy.
RegionRule(r'script', r'<(?=script[^a-zA-Z0-9_])', TagGrammar, r'>', JavascriptGrammar, r'</(?=script>)', TagGrammar, r'>'),
RegionRule(r'tag', r'</?', TagGrammar, r'/?>'),
]
class HTML(mode2.Fundamental):
grammar = HTMLGrammar
colors = {
'comment.start': ('red', 'default'),
'comment.null': ('red', 'default'),
'comment.end': ('red', 'default'),
'script.start': ('default', 'default'),
'script.namespace': ('magenta', 'default'),
'script.name': ('blue', 'default'),
'script.attrname': ('cyan', 'default'),
'script.string.start': ('green', 'default'),
'script.string.null': ('green', 'default'),
'script.string.end': ('green', 'default'),
'script.end': ('default', 'default'),
'tag.start': ('default', 'default'),
'tag.namespace': ('magenta', 'default'),
'tag.name': ('blue', 'default'),
'tag.attrname': ('cyan', 'default'),
'tag.string.start': ('green', 'default'),
'tag.string.null': ('green', 'default'),
'tag.string.end': ('green', 'default'),
'tag.end': ('default', 'default'),
}
js = Javascript(None)
for name in js.colors:
colors['script.%s' % name] = js.colors[name]
del js
def __init__(self, w):
mode2.Fundamental.__init__(self, w)
self.add_bindings('close-paren', (')',))
self.add_bindings('close-brace', ('}',))
self.add_bindings('close-bracket', (']',))
def name(self):
return "HTML"

View File

@ -1,97 +0,0 @@
import color, mode2, tab2
from lex3 import Grammar, PatternRule, RegionRule
from point2 import Point
from mode_python import StringGrammar
class JavascriptGrammar(Grammar):
rules = [
PatternRule(r'comment', r'//.*$'),
RegionRule(r'comment', '/\*', Grammar, '\*/'),
PatternRule(r'continuation', r'\\(?= *$)'),
PatternRule(r'function', r"(?<=function )[a-zA-Z_][a-zA-Z0-9_]*"),
PatternRule(r'class', r"(?<=class )[a-zA-Z_][a-zA-Z0-9_]*"),
PatternRule(r'reserved', r'(?:as|break|case|catch|class|const|continue|default|delete|do|else|export|extends|false|finally|for|function|if|import|in|instanceof|is|namespace|new|null|package|private|public|return|super|switch|this|throw|true|try|typeof|use|var|void|while|with)(?![a-zA-Z0-9_])'),
PatternRule(r'reserved', r'(?:abstract|debugger|enum|goto|implements|interface|native|protected|synchronized|throws|transient|volatile)(?![a-zA-Z0-9_])'),
PatternRule(r'nonreserved', r'(?:get|include|set)(?![a-zA-Z0-9_])'),
PatternRule(r"method", r"(?<=\.)[a-zA-Z_][a-zA-Z0-9_]*(?= *\()"),
PatternRule(r'identifier', r"[a-zA-Z_][a-zA-Z0-9_]*"),
PatternRule(r'integer', r"(?:0|[1-9][0-9]*|0[0-7]+|0[xX][0-9a-fA-F]+)[lL]?"),
PatternRule(r'float', r"[0-9]+\.[0-9]*|\.[0-9]+|(?:[0-9]|[0-9]+\.[0-9]*|\.[0-9]+)[eE][\+-]?[0-9]+"),
# fucking javascript!
# their lexer grammar requires one-token look-behind in order to know
# whether a "/" starts a literal regex, or is part of a mathematical
# expression/assignment. so for now we will require either a space or $
# after / in order to *not* treat it as a regex. dammit!
PatternRule(r'delimiter', r'%=|&&=|&=|\(|\)|\*=|\+=|,|-=|\.{3}|\.|/=(?= |$)|::|:|;|<<=|>>=|>>>=|\?|\[|\]|^=|^^=|\{|\}|\|=|\|\|='),
PatternRule(r'operator', r'!==|!=|!|%|&&|&|\*|\+\+|\+|--|-|/(?= |$)|<<=|<<|<=|<|===|==|=|>>>=|>>>|>>=|>>|>=|>|\\'),
RegionRule('regex', "/", StringGrammar, "/"),
RegionRule('string', "'", StringGrammar, "'"),
RegionRule('string', '"', StringGrammar, '"'),
]
class JavascriptTabber(tab2.StackTabber):
def is_base(self, y):
if y == 0:
return True
highlighter = self.mode.window.buffer.highlights[self.mode.name()]
if not highlighter.tokens[y]:
return False
t = highlighter.tokens[y][0]
return t.name == 'reserved' and t.string == 'function'
def _handle_other_token(self, currlvl, y, i):
token = self.get_token(y, i)
fqname = token.fqname()
if token.name == 'operator' and token.string == '=':
self._opt_append("cont", currlvl + 4)
elif token.name == 'delimiter' and token.string == ";":
self._opt_pop("cont")
return currlvl
class Javascript(mode2.Fundamental):
grammar = JavascriptGrammar
tabbercls = JavascriptTabber
opentokens = ('delimiter',)
opentags = {'(': ')', '[': ']', '{': '}'}
closetokens = ('delimiter',)
closetags = {')': '(', ']': '[', '}': '{'}
colors = {
'comment': ('red', 'default'),
'comment.start': ('red', 'default'),
'comment.null': ('red', 'default'),
'comment.end': ('red', 'default'),
'continuation': ('red', 'default'),
'function': ('blue', 'default'),
'class': ('green', 'default'),
'reserved': ('cyan', 'default'),
'nonreserved': ('cyan', 'default'),
'delimiter': ('default', 'default'),
'operator': ('default', 'default'),
'integer': ('default', 'default'),
'float': ('default', 'default'),
'string.start': ('green', 'default'),
'string.null': ('green', 'default'),
'string.octal': ('magenta', 'default'),
'string.escaped': ('magenta', 'default'),
'string.end': ('green', 'default'),
'regex.start': ('cyan', 'default'),
'regex.null': ('cyan', 'default'),
'regex.octal': ('magenta', 'default'),
'regex.escaped': ('magenta', 'default'),
'regex.end': ('cyan', 'default'),
}
def __init__(self, w):
mode2.Fundamental.__init__(self, w)
self.add_bindings('close-paren', (')',))
self.add_bindings('close-brace', ('}',))
self.add_bindings('close-bracket', (']',))
def name(self):
return "Javascript"

View File

@ -1,84 +0,0 @@
import re, sets, string, sys
import color, commands, default, method, mode2, regex, tab2
from point2 import Point
class Life(mode2.Fundamental):
def __init__(self, w):
mode2.Fundamental.__init__(self, w)
self.add_action(LifeShiftLeft())
self.add_action(LifeShiftRight())
self.add_action_and_bindings(LifeDoTurn(), ('M-RETURN',))
for c in string.letters + string.digits + string.punctuation:
self.add_action_and_bindings(LifeInsertString(c), c)
self.token = 'o'
w.buffer.set_lines(self.normalize_board(), force=True)
w.goto_beginning()
def name(self):
return "Life"
def normalize_board(self):
lines = self.window.buffer.lines
s = ' ' * (self.window.width-1)
newlines = [s] * (self.window.height-1)
for i in range(0, min(len(lines), (self.window.height-1))):
chars = [' '] * (self.window.width-1)
for j in range(0, min(len(lines[i]), (self.window.height-1))):
if lines[i][j] != ' ':
chars[j] = self.token
newlines[i] = ''.join(chars)
return newlines
class LifeShiftLeft(method.Method):
def _execute(self, w, **vargs):
newlines = list(w.buffer.lines)
for i in range(0, len(newlines)):
newlines[i] = newlines[i][10:] + newlines[i][:10]
w.buffer.set_lines(newlines)
w.goto_beginning()
class LifeShiftRight(method.Method):
def _execute(self, w, **vargs):
newlines = list(w.buffer.lines)
for i in range(0, len(newlines)):
newlines[i] = newlines[i][-10:] + newlines[i][:-10]
w.buffer.set_lines(newlines)
w.goto_beginning()
# insert text
class LifeInsertString(method.Method):
_is_method = False
def __init__(self, s):
self.name = "life-insert-string-%s" % (s)
self.args = []
self.help = "Insert %r into the current buffer." % s
self.string = s
def _execute(self, w, **vargs):
if w.cursor_char() == '\n':
return
w.right_delete()
w.insert_string_at_cursor(w.mode.token)
class LifeDoTurn(method.Method):
'''Run a turn of life on the current buffer'''
def _execute(self, w, **vargs):
lines = w.mode.normalize_board()
newlines = list(lines)
w.buffer.set_lines(lines)
w.goto_beginning()
for y in range(0, (w.height-1)):
line = list(newlines[y])
for x in range(0, (w.width-1)):
on = lines[y][x] != ' '
count = 0
for (i,j) in ((-1, -1), (-1, 0), (-1, 1), (0, 1),
(0, -1), (1, -1), (1, 0), (1, 1)):
y2 = (y + i) % (w.height-2)
x2 = (x + j) % (w.width-2)
if lines[y2][x2] != ' ':
count += 1
if count == 3 or count == 2 and on:
line[x] = w.mode.token
else:
line[x] = ' '
newlines[y] = ''.join(line)
w.buffer.set_lines(newlines)
w.goto_beginning()

View File

@ -1,41 +0,0 @@
import method, mode2
class Mini(mode2.Fundamental):
'''This is the default mode'''
def __init__(self, w):
mode2.Fundamental.__init__(self, w)
# delete actions relating to multiple lines
self.del_action('center-view')
self.del_action('next-line')
self.del_action('previous-line')
self.del_action('page-down')
self.del_action('page-up')
self.del_action('goto-beginning')
self.del_action('goto-end')
self.del_action('switch-buffer')
# add some new actions for the minibuffer
self.add_action_and_bindings(MiniCallback(), ('RETURN',))
self.add_action_and_bindings(MiniTabComplete(), ('TAB',))
#self.add_action_and_bindings(MiniCancel(), ('C-]',))
def name(self):
return "Mini"
class MiniCallback(method.Method):
def execute(self, window, **vargs):
window.buffer.do_callback()
class MiniTabComplete(method.Method):
def __init__(self):
self.name = "tab-complete"
self.args = []
def execute(self, window, **vargs):
b = window.buffer
if b.tabber is None:
window.application.set_error("No tab completion")
return
s1 = b.make_string()
s2, exists, complete = b.tabber.tab_string(s1, window)
b.set_data(s2)

View File

@ -1,50 +0,0 @@
import color, mode2, method, mode_text
from lex3 import Grammar, PatternRule
class MuttGrammar(Grammar):
rules = [
PatternRule(name=r'header', pattern=r'^(?:From|To|Cc|Bcc|Subject|Reply-To|In-Reply-To|Delivered-To|Date):'),
PatternRule(name=r'quoteb', pattern=r'^ *(?:(?: *>){3})*(?: *>){2}.*$'),
PatternRule(name=r'quotea', pattern=r'^ *(?:(?: *>){3})*(?: *>){1}.*$'),
PatternRule(name=r'quotec', pattern=r'^ *(?:(?: *>){3})*(?: *>){3}.*$'),
PatternRule(name=r'email', pattern=r'(?:^|(?<=[ :]))<?[^<>@\n ]+@(?:[^<>@\.\n ]+\.)*[^<>@\.\n ]+>?'),
PatternRule(name=r'url', pattern=r'(?:^|(?<= ))(?:http|https|ftp|sftp|file|smtp|smtps|torrent|news|jabber|irc|telnet)://(?:[^\.\n ]+\.)*[^\.\n ]+'),
mode_text.ContinuedRule(),
mode_text.WordRule(),
PatternRule(name=r'punct', pattern=r'[^a-zA-Z0-9_]'),
PatternRule(name=r'stuff', pattern=r'[a-zA-Z0-9_]+'),
]
class Mutt(mode2.Fundamental):
grammar = MuttGrammar()
colors = {
'header': ('green', 'default', 'bold'),
'email': ('cyan', 'default', 'bold'),
'url': ('cyan', 'default', 'bold'),
'quotea': ('yellow', 'default', 'bold'),
'quoteb': ('cyan', 'default', 'bold'),
'quotec': ('magenta', 'default', 'bold'),
'misspelled': ('red', 'default'),
'cont.start': ('default', 'default'),
'cont.end': ('default', 'default'),
'word': ('default', 'default'),
'punct': ('default', 'default'),
'stuff': ('default', 'default'),
}
def __init__(self, w):
mode2.Fundamental.__init__(self, w)
self.add_action_and_bindings(mode_text.LearnWord(), ('C-c l',))
self.add_action_and_bindings(MuttWrapParagraph(), ('M-q',))
self.add_action_and_bindings(MuttInsertSpace(), ('SPACE',))
def name(self):
return "Mutt"
class MuttWrapLine(method.WrapLine):
limit = 72
class MuttWrapParagraph(method.WrapParagraph):
wrapper = MuttWrapLine
class MuttInsertSpace(mode_text.TextInsertSpace):
limit = 72
wrapper = MuttWrapParagraph

View File

@ -1,45 +0,0 @@
import color, mode2
from lex3 import Grammar, PatternRule, RegionRule
class StringGrammar(Grammar):
rules = [
PatternRule(r'octal', r'\\[0-7]{3}'),
PatternRule(r'escaped', r'\\.'),
]
class NasmGrammar(Grammar):
rules = [
PatternRule(r'keyword', r"(?:section|global|extern)(?![a-zA-Z_])"),
PatternRule(r'macros', r"%(?:define|undef|assign|strlen|macro|endmacro|if|elif|else|endif|ifdef|ifndef|include|push|pop|stacksize)(?![a-zA-Z_])"),
PatternRule(r'instructions', r"(?:jeq|jne|ja|jmp|push|pushad|pushfd|call|ret|sub|add|pop|popa|popad|popfd|call|and|cwd|cdq|cmp|cmpxchg|cpuid|div|divpd|enter|leave|fadd|fld|fmul|fsqrt|fsub|hlt|imul|inc|int|int3|lea|mov|movd|mul|neg|not|nop|or|sal|sar|shl|shr|shld|shrd|syscall|sysenter|sysexit|test|xchg|xadd|xor)(?![a-zA-Z_])"),
PatternRule(r'registers', r"(?:eax|ax|ah|al|ebx|bx|bh|bl|ecx|cx|ch|cl|esi|edi|esp|ebp)(?![a-zA-Z_])"),
PatternRule(r'prefix', r"(?:dword|word|lock)(?![a-zA-Z_])"),
PatternRule(r'label', r"[a-zA-Z_.][a-zA-Z0-9_.]*:"),
PatternRule(r"identifier", r"[a-zA-Z_][a-zA-Z0-9_]*"),
PatternRule(r"integer", r"(0|[1-9][0-9]*|0[0-7]+|0[xX][0-9a-fA-F]+)[lL]?"),
PatternRule(r"float", r"[0-9]+\.[0-9]*|\.[0-9]+|([0-9]|[0-9]+\.[0-9]*|\.[0-9]+)[eE][\+-]?[0-9]+"),
RegionRule(r'string', r'"""', StringGrammar, r'"""'),
RegionRule(r'string', r"'''", StringGrammar, r"'''"),
RegionRule(r'string', r'"', StringGrammar, r'"'),
RegionRule(r'string', r"'", StringGrammar, r"'"),
PatternRule(r'comment', r';.*$'),
]
class Nasm(mode2.Fundamental):
grammar = NasmGrammar
colors = {
'keyword': ('cyan', 'default', 'bold'),
'macros': ('blue', 'default', 'bold'),
'string.start': ('green', 'default'),
'string.null': ('green', 'default'),
'string.end': ('green', 'default'),
'comment': ('red', 'default'),
'registers': ('yellow', 'default'),
'instructions': ('magenta', 'default'),
'label': ('blue', 'default'),
}
def __init__(self, w):
mode2.Fundamental.__init__(self, w)
def name(self):
return "Nasm"

View File

@ -1,576 +0,0 @@
import re, sets, string, sys
import color, commands, default, method, mode2, regex, tab2
from point2 import Point
from lex3 import Grammar, PatternRule, ContextPatternRule, RegionRule, OverridePatternRule
from method import Argument, Method
class PodGrammar(Grammar):
rules = [
RegionRule(r'entry', r'(?<=^=head[1-4]) +.*$', Grammar, '^\n$'),
RegionRule(r'entry', r'(?<=^=over) +.*$', Grammar, '^\n$'),
RegionRule(r'entry', r'(?<=^=item) +.*$', Grammar, '^\n$'),
RegionRule(r'entry', r'(?:(?<=^=begin)|(?<=^=end)) +.*$', Grammar, '^\n$'),
RegionRule(r'entry', r'(?<=^=encoding) +.*$', Grammar, '^\n$'),
]
class StringGrammar(Grammar):
rules = [
PatternRule(r'octal', r'\\[0-7]{3}'),
PatternRule(r'escaped', r'\\.'),
PatternRule(r'deref', r"\$+[A-Za-z0-9_](?:[A-Za-z0-9_]|::)*(?:->{\$?(?:[a-zA-Z_][a-zA-Z_0-9]*|'(?:\\.|[^'\\])*'|\"(\\.|[^\\\"])*\")}|->\[\$?[0-9a-zA-Z_]+\])+"),
PatternRule(r'length', r"\$#[A-Za-z0-9_](?:[A-Za-z0-9_]|::)*"),
ContextPatternRule(r'scalar', r"\$[^A-Za-z0-9 %(delim)s](?![A-Za-z0-9_])", r"\$[^A-Za-z0-9 ](?![A-Za-z0-9_])"),
PatternRule(r'scalar', r"\$\$*[A-Za-z0-9_](?:[A-Za-z0-9_]|::)*"),
PatternRule(r'cast', r"[\$\@\%\&]{.*?}"),
PatternRule(r'array', r"@\$*[A-Za-z_](?:[A-Za-z0-9_]|::)*"),
]
class PerlGrammar(Grammar):
rules = [
RegionRule(r'heredoc', r"<<(?P<heredoc>[a-zA-Z0-9_]+) *;", StringGrammar, r'^%(heredoc)s$'),
RegionRule(r'heredoc', r'<< *"(?P<heredoc>[a-zA-Z0-9_]+)" *;', StringGrammar, r'^%(heredoc)s$'),
RegionRule(r'heredoc', r"<< *'(?P<heredoc>[a-zA-Z0-9_]+)' *;", Grammar, r'^%(heredoc)s$'),
RegionRule(r'evaldoc', r"<< *`(?P<heredoc>[a-zA-Z0-9_]+)` *;", StringGrammar, r'^%(heredoc)s$'),
RegionRule(r'endblock', r"^__END__|__DATA__ *$", Grammar, r''),
RegionRule(r'pod', r'^=[a-zA-Z0-9_]+', PodGrammar, r'^=cut'),
OverridePatternRule(r'comment', r'#@@:(?P<token>[.a-zA-Z0-9_]+):(?P<mode>[.a-zA-Z0-9_]+) *$'),
PatternRule(r'comment', r'#.*$'),
RegionRule(r'string', r'"', StringGrammar, r'"'),
RegionRule(r'string', r"'", Grammar, r"'"),
RegionRule(r'evalstring', r"`", StringGrammar, r"`"),
PatternRule(r'number', r'0?\.[0-9]+|[0-9]+(?:\.[0-9]+)?'),
PatternRule(r'keyword', r"(?<!->)(?:STDIN|STDERR|STDOUT|continue|do|else|elsif|eval|foreach|for|if|last|my|next|our|package|require|return|sub|undef|unless|until|use|while)(?![a-zA-Z0-9_])"),
PatternRule(r'hash_key', r'(?<={)[A-Za-z0-9_]+(?=})'),
PatternRule(r'hash_key', r'[A-Za-z0-9_]+(?= *=>)'),
PatternRule(r'length', r"\$#[A-Za-z0-9_](?:[A-Za-z0-9_]|::)*"),
PatternRule(r'cast', r'[\$\@\%\^\&](?= *{)'),
PatternRule(r'scalar', r"\$[\[\]<>ab/'\"_@\?#\$!%^|&*()](?![A-Za-z0-9_])"),
PatternRule(r'array', r"@_"),
PatternRule(r'function', r"\$\$*[A-Za-z0-9_](?:[A-Za-z0-9_]|::)*(?=-> *\()"),
PatternRule(r'scalar', r"\$\$*[A-Za-z0-9_](?:[A-Za-z0-9_]|::)*"),
PatternRule(r'array', r"@\$*[A-Za-z_](?:[A-Za-z0-9_]|::)*"),
PatternRule(r'hash', r"%\$*[A-Za-z_](?:[A-Za-z0-9_]|::)*"),
PatternRule(r'deref', r"[@%\$&\*](?={)"),
# match regexes
RegionRule(r'match', r'(?:(?<==~)|(?<=!~)|(?<=\()|(?<=split)) *(?P<delim>/)', StringGrammar, r'/[a-z]*'),
RegionRule(r'match', r'm *(?P<delim>[^ #a-zA-Z0-9_])', StringGrammar, r'%(delim)s[a-z]*'),
RegionRule(r'match', r'm(?P<delim>#)', StringGrammar, r'#[a-z]*'),
# replace regexes
RegionRule(r'replace', r's *(?P<delim>[^ a-zA-Z0-9_])', StringGrammar, r'%(delim)s', StringGrammar, r'%(delim)s[a-z]*'),
RegionRule(r'replace', r's(?P<delim>#)', StringGrammar, r'#', StringGrammar, r'#[a-z]*'),
# translate operator
RegionRule(r'translate', r'(?:y|tr) *(?P<delim>[^ a-zA-Z0-9_])', Grammar, r'%(delim)s', Grammar, r'%(delim)s[a-z]*'),
RegionRule(r'translate', r'(?:y|tr)#', Grammar, r'#', Grammar, r'#[a-z]*'),
# some more basic stuff
PatternRule(r'package', r"(?<=package )(?:[a-zA-Z_][a-zA-Z_0-9]*::)*[a-zA-Z_][a-zA-Z_0-9]*"),
PatternRule(r'sub', r"(?<=sub )[a-zA-Z_][a-zA-Z_0-9]*"),
PatternRule(r'use', r"(?<=use )(?:[a-zA-Z_][a-zA-Z_0-9]*::)*[a-zA-Z_][a-zA-Z_0-9]*"),
PatternRule(r'require', r"(?<=require )(?:[a-zA-Z_][a-zA-Z_0-9]*::)*[a-zA-Z_][a-zA-Z_0-9]*"),
PatternRule(r'label', r'[a-zA-Z_][a-zA-Z0-9_]*:(?!:)'),
PatternRule(r'method', r"(?<=->)[a-zA-Z_][a-zA-Z_0-9]*"),
PatternRule(r'function', r"&\$*(?:[a-zA-Z_][a-zA-Z_0-9]*::)*[a-zA-Z_][a-zA-Z_0-9]*"),
PatternRule(r'builtin', r"(?<!->)&?(?:write|warn|wantarray|waitpid|wait|vec|values|utime|use|untie|unshift|unpack|unlink|undef|umask|ucfirst|uc|truncate|times|time|tied|tie|telldir|tell|syswrite|system|sysseek|sysread|sysopen|syscall|symlink|substr|sub|study|stat|srand|sqrt|sprintf|split|splice|sort|socketpair|socket|sleep|sin|shutdown|shmwrite|shmread|shmget|shmctl|shift|setsockopt|setservent|setpwent|setprotoent|setpriority|setpgrp|setnetent|sethostent|setgrent|send|semop|semget|semctl|select|seekdir|seek|scalar|rmdir|rindex|rewinddir|reverse|return|reset|require|rename|ref|redo|recv|readpipe|readlink|readline|readdir|read|rand|quotemeta|push|prototype|printf|print|pos|pop|pipe|package|pack|our|ord|opendir|open|oct|no|next|my|msgsnd|msgrcv|msgget|msgctl|mkdir|map|lstat|log|lock|localtime|local|listen|link|length|lcfirst|lc|last|kill|keys|join|ioctl|int|index|import|hex|grep|goto|gmtime|glob|getsockopt|getsockname|getservent|getservbyport|getservbyname|getpwuid|getpwnam|getpwent|getprotoent|getprotobynumber|getprotobyname|getpriority|getppid|getpgrp|getpeername|getnetent|getnetbyname|getnetbyaddr|getlogin|gethostent|gethostbyname|gethostbyaddr|getgrnam|getgrgid|getgrent|getc|formline|format|fork|flock|fileno|fcntl|exp|exit|exists|exec|eval|eof|endservent|endpwent|endprotoent|endnetent|endhostent|endgrent|each|dump|do|die|delete|defined|dbmopen|dbmclose|crypt|cos|continue|connect|closedir|close|chroot|chr|chown|chop|chomp|chmod|chdir|caller|bless|binmode|bind|atan2|alarm|accept|abs)(?![a-zA-Z0-9_])"),
# quote operator
RegionRule(r'quoted', r'q[rqwx]? *\(', Grammar, r'\)'),
RegionRule(r'quoted', r'q[rqwx]? *{', Grammar, r'}'),
RegionRule(r'quoted', r'q[rqwx]? *<', Grammar, r'>'),
RegionRule(r'quoted', r'q[rqwx]? *\[', Grammar, r'\]'),
RegionRule(r'quoted', r'q[rqwx]? *(?P<delim>[^ #])', Grammar, r'%(delim)s'),
RegionRule(r'quoted', r'q[rqwx]?#', Grammar, r'#'),
PatternRule(r'function', r"(?:[a-zA-Z_][a-zA-Z_0-9]*::)*[a-zA-Z_][a-zA-Z_0-9]*(?= *\()"),
PatternRule(r'class', r"(?:[a-zA-Z_][a-zA-Z_0-9]*::)*[a-zA-Z_][a-zA-Z_0-9]*(?=->)"),
# some basic stuff
PatternRule(r'delimiter', r"[,;=\?(){}\[\]]|->|=>|(?<!:):(?!=:)"),
PatternRule(r'operator', r"\+=|-=|\*=|/=|//=|%=|&=\|\^=|>>=|<<=|\*\*="),
PatternRule(r'operator', r"\+\+|\+|<=>|<>|<<|<=|<|-|>>|>=|>|\*\*|\*|&&|&|\|\||\||/|\^|==|//|~|=~|!~|!=|%|!|\.|x(?![a-zA-Z_])"),
PatternRule(r'noperator', r"(?:xor|or|not|ne|lt|le|gt|ge|eq|cmp|and)(?![a-zA-Z_])"),
PatternRule(r'bareword', r'(?:[a-zA-Z_][a-zA-Z_0-9]*::)*[a-zA-Z_][a-zA-Z_0-9]*'),
PatternRule(r"eol", r"\n$"),
]
class PerlTabber(tab2.StackTabber):
def is_base(self, y):
if y == 0:
return True
highlighter = self.mode.window.buffer.highlights[self.mode.name()]
if not highlighter.tokens[y]:
return False
t = highlighter.tokens[y][0]
return t.name == 'keyword' and t.string == 'sub'
def _handle_open_token(self, currlvl, y, i):
currlvl = tab2.StackTabber._handle_open_token(self, currlvl, y, i)
return currlvl
def _handle_close_token(self, currlvl, y, i):
self._opt_pop('cont')
currlvl = tab2.StackTabber._handle_close_token(self, currlvl, y, i)
token = self.get_token(y, i)
if self.is_rightmost_token(y, i):
if token.string == '}':
self._opt_pop('cont')
else:
self._opt_append('cont', currlvl + 4)
return currlvl
def _handle_other_token(self, currlvl, y, i):
token = self.get_token(y, i)
fqname = token.fqname()
if fqname == 'delimiter' and token.string == ';':
self._opt_pop('cont')
elif fqname == 'heredoc.start':
self._opt_append('heredoc', None)
elif fqname == 'heredoc.end':
self._opt_pop('heredoc')
self._opt_pop('cont')
elif fqname == 'evaldoc.start':
self._opt_append('evaldoc', None)
elif fqname == 'evaldoc.end':
self._opt_pop('evaldoc')
self._opt_pop('cont')
elif fqname == 'pod.start':
self._opt_append('pod', None)
elif fqname == 'pod.end':
self._opt_pop('pod')
currlvl = 0
elif fqname == 'string.start':
self._opt_append('string', None)
elif fqname == 'string.end':
self._opt_pop('string')
if self.is_rightmost_token(y, i):
self._opt_append('cont', currlvl + 4)
if self.is_rightmost_token(y, i):
if(not fqname.startswith('pod') and
not fqname.startswith('heredoc') and
not fqname.startswith('string') and
not fqname.startswith('endblock') and
not fqname == 'eol' and
not fqname == 'comment' and
not fqname == 'null' and
token.string not in ('}', ';', '(', '{', '[', ',')):
self._opt_append('cont', currlvl + 4)
return currlvl
class Perl(mode2.Fundamental):
tabbercls = PerlTabber
grammar = PerlGrammar
opentokens = ('delimiter',)
opentags = {'(': ')', '[': ']', '{': '}'}
closetokens = ('delimiter',)
closetags = {')': '(', ']': '[', '}': '{'}
colors = {
# basic stuff
'escaped': ('magenta', 'default'),
'null': ('default', 'default'),
'delimiter': ('default', 'default'),
'sub': ('cyan', 'default'),
'number': ('default', 'default'),
'operator': ('default', 'default'),
'noperator': ('magenta', 'default'),
'endblock': ('red', 'default'),
'keyword': ('magenta', 'default'),
'cast': ('yellow', 'default'),
'scalar': ('yellow', 'default'),
'array': ('yellow', 'default'),
'deref': ('yellow', 'default'),
'hash': ('yellow', 'default'),
'hash_key': ('green', 'default'),
'comment': ('red', 'default'),
'function': ('cyan', 'default'),
'builtin': ('magenta', 'default'),
'method': ('cyan', 'default'),
'bareword': ('default', 'default'),
'label': ('cyan', 'default'),
'package': ('cyan', 'default'),
'class': ('cyan', 'default'),
'use': ('cyan', 'default'),
'require': ('cyan', 'default'),
'method': ('cyan', 'default'),
# heredoc/evaldoc
'heredoc.start': ('green', 'default'),
'heredoc.null': ('green', 'default'),
'heredoc.end': ('green', 'default'),
'evaldoc.start': ('cyan', 'default'),
'evaldoc.null': ('cyan', 'default'),
'evaldoc.end': ('cyan', 'default'),
# pod
'pod.start': ('red', 'default'),
'pod.null': ('red', 'default'),
'pod.entry.start': ('magenta', 'default'),
'pod.entry.null': ('magenta', 'default'),
'pod.entry.end': ('magenta', 'default'),
'pod.end': ('red', 'default'),
# strings
'string.start': ('green', 'default'),
'string.null': ('green', 'default'),
'string.escaped': ('magenta', 'default'),
'string.deref': ('yellow', 'default'),
'string.end': ('green', 'default'),
# `` strings
'evalstring.start': ('cyan', 'default'),
'evalstring.null': ('cyan', 'default'),
'evalstring.escaped': ('magenta', 'default'),
'evalstring.deref': ('yellow', 'default'),
'evalstring.end': ('cyan', 'default'),
# quoted region
'quoted': ('cyan', 'default'),
'quoted.start': ('cyan', 'default'),
'quoted.null': ('cyan', 'default'),
'quoted.end': ('cyan', 'default'),
# match regex
'match.start': ('cyan', 'default'),
'match.end': ('cyan', 'default'),
'match.null': ('cyan', 'default'),
# replace regex
'replace.start': ('cyan', 'default'),
'replace.middle0': ('cyan', 'default'),
'replace.end': ('cyan', 'default'),
'replace.null': ('cyan', 'default'),
'replace.escaped': ('magenta', 'default'),
'replace.deref': ('yellow', 'default'),
'replace.length': ('yellow', 'default'),
'replace.scalar': ('yellow', 'default'),
'replace.hash': ('yellow', 'default'),
'replace.cast': ('yellow', 'default'),
# translate regex
'translate.start': ('magenta', 'default'),
'translate.middle0': ('magenta', 'default'),
'translate.end': ('magenta', 'default'),
'translate.null': ('magenta', 'default'),
}
def __init__(self, w):
mode2.Fundamental.__init__(self, w)
self.add_action_and_bindings(PerlSetLib(), ('C-c l',))
self.add_action_and_bindings(PerlCheckSyntax(), ('C-c s',))
self.add_action_and_bindings(PerlHashCleanup(), ('C-c h',))
#self.add_action_and_bindings(PerlHashCleanup2(), ('C-c h',))
self.add_action_and_bindings(PerlViewModulePerldoc(), ('C-c v',))
self.add_action_and_bindings(PerlViewWordPerldoc(), ('C-c p',))
self.add_action_and_bindings(PerlWrapLine(), ('M-q',))
self.add_action_and_bindings(PerlGotoFunction(), ('C-c M-g',))
self.add_action_and_bindings(PerlWhichFunction(), ('C-c w',))
self.add_action_and_bindings(PerlListFunctions(), ('C-c W',))
# visual tag matching
self.add_bindings('close-paren', (')'))
self.add_bindings('close-bracket', (']'))
self.add_bindings('close-brace', ('}'))
# perl-specific
self.functions = None
self.perllib = 'lib'
def name(self):
return "Perl"
def build_function_map(self):
b = self.window.buffer
self.functions = {}
for i in range(0, len(b.lines)):
m = regex.perl_function.match(b.lines[i])
if m:
self.functions[m.group(1)] = i
def get_functions(self):
if self.functions is None:
self.build_function_map()
return self.functions
def get_function_names(self):
functions = self.get_functions()
pairs = [[functions[key], key] for key in functions]
pairs.sort()
names = [x[1] for x in pairs]
return names
class PerlSetLib(Method):
'''Set the path(s) to find perl modules'''
args = [Argument("lib", type=type(""), prompt="Location of lib: ",
default=default.build_constant("."))]
def _execute(self, w, **vargs):
w.mode.perllib = vargs['lib']
class PerlCheckSyntax(Method):
'''Check the syntax of a perl file'''
def _execute(self, w, **vargs):
app = w.application
cmd = "perl -c -I '%s' '%s'" % (w.mode.perllib, w.buffer.path)
(status, output) = commands.getstatusoutput(cmd)
if status == 0:
app.set_error("Syntax OK")
app.data_buffer("*Perl-Check-Syntax*", output, switch_to=False)
else:
app.data_buffer("*Perl-Check-Syntax*", output)
class PerlViewModulePerldoc(Method):
'''View documentation about this file using perldoc'''
def _execute(self, w, **vargs):
cmd = "perldoc -t -T '%s'" % w.buffer.path
(status, output) = commands.getstatusoutput(cmd)
w.application.data_buffer("*Perldoc*", output, switch_to=True)
class PerlViewWordPerldoc(Method):
'''View documentation about a package or function using perldoc'''
def _try(self, w, word, asfunc=False):
if asfunc:
cmd = "PERL5LIB=%r perldoc -t -T -f '%s'" % (w.mode.perllib, word)
else:
cmd = "PERL5LIB=%r perldoc -t -T '%s'" % (w.mode.perllib, word)
(status, data) = commands.getstatusoutput(cmd)
if status == 0:
return data
else:
return None
def _show(self, w, data, word):
w.application.data_buffer("*Perldoc*", data, switch_to=True)
w.application.set_error('displaying documentation for %r' % word)
def _execute(self, w, **vargs):
token = w.get_token()
#word = w.get_word(wl=string.letters + string.digits + '_:')
word = token.string
# make sure that the name is (mostly) valid
if word is None:
w.application.set_error('no word selected')
return
elif ':' in word and '::' not in word:
w.application.set_error('invalid word: %r' % word)
return
# first try it is a package
parts = word.split('::')
while len(parts) > 0:
newword = '::'.join(parts)
data = self._try(w, newword, asfunc=False)
if data:
self._show(w, data, newword)
return
parts.pop(-1)
# then try it as a function
data = self._try(w, word, asfunc=True)
if data:
self._show(w, data, word)
else:
w.application.set_error('nothing found for %r' % word)
class PerlGotoFunction(Method):
'''Jump to a function defined in this module'''
args = [Argument("name", type(""), "perlfunction", "Goto Function: ")]
def _execute(self, w, **vargs):
name = vargs['name']
functions = w.mode.get_functions()
if name in functions:
w.goto(Point(0, functions[name]))
else:
w.application.set_error("Function %r was not found" % name)
class PerlListFunctions(Method):
'''Show the user all functions defined in this module'''
def _execute(self, w, **vargs):
names = w.mode.get_function_names()
output = "\n".join(names) + "\n"
w.application.data_buffer("*Perl-List-Functions*", output, switch_to=True)
class PerlWhichFunction(Method):
'''Show the user what function they are in'''
def _execute(self, w, **vargs):
cursor = w.logical_cursor()
i = cursor.y
name = None
while i >= 0 and name is None:
line = w.buffer.lines[i]
m = regex.perl_function.match(line)
if m:
name = m.group(1)
else:
i -= 1
if name is None:
w.application.set_error("None");
else:
w.application.set_error("line %d: %s" % (i, name))
class PerlHashCleanup(Method):
'''Correctly align assignment blocks and literal hashes'''
def _execute(self, window, **vargs):
cursor = window.logical_cursor()
b = window.buffer
# so this is where we will store the groups that we find
groups_by_line = {}
# the regex we will try
regexes = [regex.perl_hash_cleanup,
regex.perl_assign_cleanup]
# if we aren't in a hash, inform the user and exit
line = b.lines[cursor.y]
myregex = None
for r in regexes:
if r.match(line):
myregex = r
if myregex is None:
raise Exception, "Not a perl hash line"
groups_by_line[cursor.y] = myregex.match(line).groups()
# find the beginning of this hash block
start = 0
i = cursor.y - 1
while i >= 0:
line = b.lines[i]
m = myregex.match(line)
if not m:
start = i + 1
break
else:
groups_by_line[i] = m.groups()
i -= 1
# find the end of this hash block
end = len(b.lines) - 1
i = cursor.y + 1
while i < len(b.lines):
line = b.lines[i]
m = myregex.match(line)
if not m:
end = i - 1
break
else:
groups_by_line[i] = m.groups()
i += 1
# assume that the least indented line is correct
indent_w = min([len(groups_by_line[k][0]) for k in groups_by_line])
# find the longest hash key to base all the other padding on
key_w = max([len(groups_by_line[k][1]) for k in groups_by_line])
# for each line, format it correctly
keys = groups_by_line.keys()
keys.sort()
data = ''
for i in keys:
indent_pad = ' ' * indent_w
key = groups_by_line[i][1]
sep = groups_by_line[i][3]
value = groups_by_line[i][5]
key_pad = ' ' * (key_w - len(key))
data += indent_pad + key + key_pad + ' ' + sep + ' ' + value + '\n'
# remove the old text and add the new
start_p = Point(0, start)
end_p = Point(0, end + 1)
window.kill(start_p, end_p)
window.insert_string(start_p, data)
class PerlWrapLine(Method):
'''Wrap Comments and POD'''
margin = 80
comment_re = re.compile('(#+)( *)(.*)')
def _is_newline(self, t):
return t.name == 'eol'
def _is_space(self, t):
return t.name == 'null' and regex.space.match(t.string)
def _detect_line_type(self, w, y):
c = w.logical_cursor()
highlighter = w.buffer.highlights[w.mode.name()]
ltype = None
for t in highlighter.tokens[c.y]:
if self._is_space(t):
pass
elif t.name == 'comment':
if ltype:
return None
else:
ltype = 'comment'
elif t.name == 'eol':
return ltype
else:
return None
def _execute(self, w, **vargs):
c = w.logical_cursor()
ltype = self._detect_line_type(w, c.y)
if ltype == 'comment':
return self._fix_comments(c, w)
elif ltype == 'pod':
return self._fix_pod(c, w)
else:
w.set_error("did not detect comment or pod lines")
return
def _fix_comments(self, c, w):
w.set_error("comment!")
def _fix_pod(self, c, w):
pass
#class PerlWrapLine(Method):
# '''Wrap lines, comments, POD'''
# margin = 80
# comment_re = re.compile('^( *)(#+)( *)([^ ].*)$')
# def _execute(self, w, **vargs):
# pcursor = w.physical_cursor()
# r = w.get_region(pcursor)
# if r is None:
# return
#
# t = r[4]
# if t == 'pod':
# assert False, 'POD: %s' % repr(r)
# elif t == 'comment':
# self._wrap_comment(w)
# else:
# return
#
# def _wrap_comment(self, w):
# l = w.logical_cursor()
# m = self.comment_re.match(w.buffer.lines[l.y])
# if not m:
# assert False, 'no match oh geez'
#
# pad = m.group(1) + m.group(2) + m.group(3)
# data = m.group(4) + ' '
#
# start = l.y
# end = l.y + 1
#
# while end < len(w.buffer.lines):
# m = self.comment_re.match(w.buffer.lines[end])
# if m:
# data += m.group(4) + ' '
# end += 1
# else:
# break
#
# words = [word for word in data.split() if word]
#
# lines = [pad]
# for word in words:
# if len(lines[-1]) == len(pad):
# lines[-1] += word
# elif len(lines[-1]) + 1 + len(word) <= self.margin:
# lines[-1] += ' ' + word
# else:
# lines.append(pad + word)
#
# # remove the old text and add the new
# start_p = Point(0, start)
# end_p = Point(len(w.buffer.lines[end-1]), end-1)
# w.kill(start_p, end_p)
# w.insert(start_p, '\n'.join(lines))

View File

@ -1,301 +0,0 @@
import commands, os.path, sets, string
import color, completer, default, mode2, method, regex, tab2
from point2 import Point
from lex3 import Grammar, PatternRule, RegionRule, OverridePatternRule
class StringGrammar(Grammar):
rules = [
PatternRule(r'octal', r'\\[0-7]{3}'),
PatternRule(r'escaped', r'\\.'),
]
class PythonGrammar(Grammar):
rules = [
PatternRule(r'functionname', r'(?<=def )[a-zA-Z_][a-zA-Z0-9_]*'),
PatternRule(r'classname', r'(?<=class )[a-zA-Z_][a-zA-Z0-9_]*'),
PatternRule(r'reserved', r'(?:True|None|False|Exception|self)(?![a-zA-Z0-9_])'),
PatternRule(r'keyword', r'(?:yield|while|try|return|raise|print|pass|or|not|lambda|is|in|import|if|global|from|for|finally|exec|except|else|elif|del|def|continue|class|break|assert|as|and)(?![a-zA-Z0-9_])'),
PatternRule(r"builtin", r'(?<!\.)(?:zip|xrange|vars|unicode|unichr|type|tuple|super|sum|str|staticmethod|sorted|slice|setattr|set|round|repr|reduce|raw_input|range|property|pow|ord|open|oct|object|max|min|map|long|locals|list|len|iter|issubclass|isinstance|int|input|id|hex|hash|hasattr|globals|getattr|frozenset|float|filter|file|execfile|eval|enumerate|divmod|dir|dict|delattr|complex|compile|coerce|cmp|classmethod|chr|callable|bool)(?![a-zA-Z0-9_])'),
PatternRule(r'methodcall', r'(?<=\. )[a-zA-Z_][a-zA-Z0-9_]*(?= *\()'),
PatternRule(r'functioncall', r'[a-zA-Z_][a-zA-Z0-9_]*(?= *\()'),
PatternRule(r'system_identifier', r'__[a-zA-Z0-9_]+__'),
PatternRule(r'private_identifier', r'__[a-zA-Z0-9_]*'),
PatternRule(r'hidden_identifier', r'_[a-zA-Z0-9_]*'),
PatternRule(r'identifier', r'[a-zA-Z_][a-zA-Z0-9_]*'),
PatternRule(r'delimiter', r'\(|\)|\[|\]|{|}|@|,|:|\.|`|=|;|\+=|-=|\*=|/=|//=|%=|&=|\|=|\^=|>>=|<<=|\*\*='),
PatternRule(r"operator", r"\+|<>|<<|<=|<|-|>>|>=|>|\*\*|&|\*|\||/|\^|==|//|~|!=|%"),
PatternRule(r"integer", r"(?<![\.0-9a-zA-Z_])(?:0|[1-9][0-9]*|0[0-7]+|0[xX][0-9a-fA-F]+)[lL]?(?![\.0-9a-zA-Z_])"),
PatternRule(r"float", r"(?<![\.0-9a-zA-Z_])(?:[0-9]+\.[0-9]*|\.[0-9]+|(?:[0-9]|[0-9]+\.[0-9]*|\.[0-9]+)[eE][\+-]?[0-9]+)(?![\.0-9a-zA-Z_])"),
PatternRule(r"imaginary", r"(?<![\.0-9a-zA-Z_])(?:[0-9]+|(?:[0-9]+\.[0-9]*|\.[0-9]+|(?:[0-9]|[0-9]+\.[0-9]*|\.[0-9]+)[eE][\+-]?[0-9]+)[jJ])(?![\.0-9a-zA-Z_])"),
RegionRule(r'string', r'"""', StringGrammar, r'"""'),
RegionRule(r'string', r"'''", StringGrammar, r"'''"),
RegionRule(r'string', r'"', StringGrammar, r'"'),
RegionRule(r'string', r"'", StringGrammar, r"'"),
OverridePatternRule(r'comment', r'#@@:(?P<token>[.a-zA-Z0-9_]+):(?P<mode>[.a-zA-Z0-9_]+) *$'),
PatternRule(r'comment', r'#.*$'),
PatternRule(r'continuation', r'\\\n$'),
PatternRule(r'eol', r'\n$'),
]
class PythonTabber(tab2.StackTabber):
# NOTE: yield might initially seem like an endlevel name, but it's not one.
endlevel_names = ('pass', 'return', 'raise', 'break', 'continue')
startlevel_names = ('if', 'try', 'class', 'def', 'for', 'while', 'try')
def __init__(self, m):
tab2.StackTabber.__init__(self, m)
self.base_level = 0
def is_base(self, y):
if y == 0:
# we always know that line 0 is indented at the 0 level
return True
tokens = self.get_tokens(y)
t0 = tokens[0]
if t0.name == 'keyword' and t0.string in self.startlevel_names:
# if a line has no whitespace and beings with something like
# 'while','class','def','if',etc. then we can start at it
return True
else:
# otherwise, we can't be sure that its level is correct
return False
def get_level(self, y):
self._calc_level(y)
return self.lines.get(y)
def _calc_level(self, y):
# ok, so first remember where we are going, and find our starting point
target = y
while not self.is_base(y) and y > 0:
y -= 1
# ok, so clear out our stack and then loop over each line
self.popped = False
self.markers = []
while y <= target:
self.continued = False
self.last_popped = self.popped
self.popped = False
tokens = self.get_tokens(y)
currlvl = self.get_curr_level()
# if we were continuing, let's pop that previous continuation token
# and note that we're continuing
if self.markers and self.markers[-1].name == 'cont':
self.continued = True
self._pop()
# if we haven't reached the target-line yet, we can detect how many
# levels of unindention, if any, the user chose on previous lines
if y < target and len(tokens) > 2:
if self.token_is_space(y, 0):
l = len(tokens[0].string)
else:
l = 0
while currlvl > l:
self._pop()
currlvl = self.get_curr_level()
self.popped = True
# ok, having done all that, we can now process each token on the line
for i in range(0, len(tokens)):
currlvl = self._handle_token(currlvl, y, i)
# so let's store the level for this line, as well as some debugging
self.lines[y] = currlvl
self.record[y] = tuple(self.markers)
y += 1
def _handle_close_token(self, currlvl, y, i):
try:
return tab2.StackTabber._handle_close_token(self, currlvl, y, i)
except:
return currlvl
def _handle_other_token(self, currlvl, y, i):
token = self.get_token(y, i)
fqname = token.fqname()
if fqname == 'continuation':
# we need to pop the indentation level over, unless last line was
# also a continued line
if self.continued:
self._opt_append('cont', currlvl)
else:
self._opt_append('cont', currlvl + 4)
elif fqname == 'string.start':
# while inside of a string, there is no indention leve
self._opt_append('string', None)
elif fqname == 'string.end':
# since we're done with the string, resume our indentation level
self._opt_pop('string')
elif fqname == 'delimiter':
# we only really care about a colon as part of a one-line statement,
# i.e. "while ok: foo()" or "if True: print 3"
if token.string == ':':
if self.markers and self.markers[-1].name in ('[', '{'):
pass
elif self.is_rightmost_token(y, i):
pass
else:
self._pop()
elif fqname == 'keyword':
if token.string in self.endlevel_names:
# we know we'll unindent at least once
self._pop()
self.popped = True
elif token.string in self.startlevel_names and self.is_leftmost_token(y, i):
# we know we will indent exactly once
self._append(token.string, currlvl + 4)
elif token.string in ('elif', 'else') and self.is_leftmost_token(y, i):
# we know we'll unindent at least to the first if/elif
if not self.popped and not self.last_popped:
self._pop_until('if', 'elif')
currlvl = self.get_curr_level()
self._append(token.string, currlvl + 4)
elif token.string == 'except' and self.is_leftmost_token(y, i):
# we know we'll unindent at least to the first try
if not self.popped and not self.last_popped:
self._pop_until('try')
currlvl = self.get_curr_level()
self._append(token.string, currlvl + 4)
elif token.string == 'finally' and self.is_leftmost_token(y, i):
# we know we'll unindent at least to the first try/except
if not self.popped and not self.last_popped:
self._pop_until('try', 'except')
currlvl = self.get_curr_level()
self._append(token.string, currlvl + 4)
return currlvl
class Python(mode2.Fundamental):
tabbercls = PythonTabber
grammar = PythonGrammar
opentokens = ('delimiter',)
opentags = {'(': ')', '[': ']', '{': '}'}
closetokens = ('delimiter',)
closetags = {')': '(', ']': '[', '}': '{'}
colors = {
'keyword': ('cyan', 'default'),
'reserved': ('magenta', 'default'),
'builtin': ('cyan', 'default'),
'functionname': ('blue', 'default'),
'classname': ('green', 'default'),
'string.start': ('green', 'default'),
'string.null': ('green', 'default'),
'string.octal': ('magenta', 'default'),
'string.escaped': ('magenta', 'default'),
'string.format': ('yellow', 'default'),
'string.end': ('green', 'default'),
'integer': ('default', 'default'),
'float': ('default', 'default'),
'imaginary': ('default', 'default'),
'comment': ('red', 'default'),
'continuation': ('red', 'default'),
'system_identifier': ('cyan', 'default'),
}
def __init__(self, w):
mode2.Fundamental.__init__(self, w)
# tag matching
self.add_bindings('close-paren', (')',))
self.add_bindings('close-brace', ('}',))
self.add_bindings('close-bracket', (']',))
# add python-specific methods
self.add_action_and_bindings(PythonCheckSyntax(), ('C-c s',))
self.add_action_and_bindings(PythonDictCleanup(), ('C-c h',))
# highlighting
self.pythonlib = "."
def name(self):
return "Python"
class PythonSetLib(method.Method):
'''Set the path(s) to find perl modules'''
args = [method.Argument("lib", type=type(""), prompt="Python Path: ",
default=default.build_constant("."))]
def _execute(self, w, **vargs):
w.mode.pythonlib = vargs['lib']
class PythonCheckSyntax(method.Method):
'''Check the syntax of the current python file'''
def _execute(self, w, **vargs):
mod = os.path.splitext(os.path.basename(w.buffer.path))[0]
cmd = "PYTHONPATH=%s python -c 'import %s'" % (w.mode.pythonlib, mod)
(status, output) = commands.getstatusoutput(cmd)
if status == 0:
w.application.set_error("Syntax OK")
w.application.data_buffer("python-syntax", output, switch_to=False)
else:
output = output + "\ncommand exit status: %d" % (status)
w.application.data_buffer("python-syntax", output, switch_to=True)
class PythonDictCleanup(method.Method):
'''Align assignment blocks and literal dictionaries'''
def _execute(self, w, **vargs):
cursor = w.logical_cursor()
b = w.buffer
# so this is where we will store the groups that we find
groups_by_line = {}
# the regex we will try
regexes = [regex.python_dict_cleanup,
regex.python_assign_cleanup]
# if we aren't in a hash, inform the user and exit
line = b.lines[cursor.y]
myregex = None
for r in regexes:
if r.match(line):
myregex = r
if myregex is None:
raise Exception, "Not a python dict line"
groups_by_line[cursor.y] = myregex.match(line).groups()
# find the beginning of this hash block
start = 0
i = cursor.y - 1
while i >= 0:
line = b.lines[i]
m = myregex.match(line)
if not m:
start = i + 1
break
else:
groups_by_line[i] = m.groups()
i -= 1
# find the end of this hash block
end = len(b.lines) - 1
i = cursor.y + 1
while i < len(b.lines):
line = b.lines[i]
m = myregex.match(line)
if not m:
end = i - 1
break
else:
groups_by_line[i] = m.groups()
i += 1
# assume that the least indented line is correct
indent_w = min([len(groups_by_line[k][0]) for k in groups_by_line])
# find the longest hash key to base all the other padding on
key_w = max([len(groups_by_line[k][1]) for k in groups_by_line])
# for each line, format it correctly
keys = groups_by_line.keys()
keys.sort()
data = ''
for i in keys:
indent_pad = ' ' * indent_w
key = groups_by_line[i][1]
sep = groups_by_line[i][3]
value = groups_by_line[i][5]
key_pad = ' ' * (key_w - len(key))
if sep == '=':
data += indent_pad + key + key_pad + ' ' + sep + ' ' + value + '\n'
else:
data += indent_pad + key + sep + ' ' + key_pad + value + '\n'
# remove the old text and add the new
start_p = Point(0, start)
end_p = Point(0, end + 1)
w.kill(start_p, end_p)
w.insert_string(start_p, data)

View File

@ -1,108 +0,0 @@
import re, sets, string
import color, method, minibuffer, mode2, search
from point2 import Point
class Replace(mode2.Fundamental):
'''This is the default mode'''
def __init__(self, w):
mode2.Fundamental.__init__(self, w)
self.actions = {}
self.bindings = {}
self.add_action_and_bindings(ReplaceAll(), ('a', '!',))
self.add_action_and_bindings(ReplaceOne(), ('y', 'SPACE',))
self.add_action_and_bindings(SkipReplace(), ('n', 'DELETE',))
self.add_action_and_bindings(CancelReplace(), ('q', 'RETURN', 'C-]', 'C-n', 'C-p', 'C-a', 'C-e', 'C-f', 'C-b'))
m = w.buffer.method
found = _find_next(m, False)
if not found:
w.set_error('%r was not found' % m.before)
raise minibuffer.MiniBufferError
_set_prompt(m)
def name(self):
return "Replace"
class ReplaceOne(method.Method):
def execute(self, w, **vargs):
m = w.buffer.method
_replace(m)
_find_next(m, False)
_finish(m, w)
class SkipReplace(method.Method):
def execute(self, w, **vargs):
m = w.buffer.method
_find_next(m, False)
_finish(m, w)
class ReplaceAll(method.Method):
def execute(self, w, **vargs):
m = w.buffer.method
while m.p1 is not None:
_replace(m)
_find_next(m, False)
_end(w)
w.set_error("Replace ended")
class CancelReplace(method.Method):
def execute(self, w, **vargs):
_end(w)
w.set_error("Replace cancelled")
def _find_next(m, move=False):
s = m.before
w = m.old_window
c = w.logical_cursor()
try:
r = re.compile(search.escape_literal(s))
except:
(m.p1, m.p2) = (None, None)
return False
#newc = search.find_next(r, w, move, start=c.add(1, 0))
newc = search.find_next(r, w, move, start=c.add(0, 0))
if newc:
(m.p1, m.p2) = newc
return True
else:
(m.p1, m.p2) = (None, None)
return False
def _set_prompt(m):
w = m.old_window
if m.p1 is None:
w.application.mini_prompt = '%r was not found' % m.before
return
(x, y) = m.p1.xy()
count = 0
while y < len(w.buffer.lines):
count += w.buffer.lines[y][x:].count(m.before)
y += 1
x = 0
if count > 1:
p = 'Replace %r with %r [ynaq] (%d occurances)?' % (m.before, m.after, count)
else:
p = 'Replace %r with %r [ynaq] (1 occurance)?' % (m.before, m.after)
w.application.mini_prompt = p
def _replace(m):
m.old_window.buffer.delete(m.p1, m.p2)
if m.after:
m.old_window.buffer.insert_string(m.p1, m.after)
def _finish(m, w):
if m.p1 is None:
_end(w)
w.set_error("Replace ended")
else:
_set_prompt(m)
def _end(w):
w.application.close_mini_buffer()
w.application.clear_highlighted_ranges()
w.buffer.method.old_cursor = None
w.buffer.method.old_window = None
assert not w.application.mini_active

View File

@ -1,140 +0,0 @@
import re, sets, string
import color, method, minibuffer, mode2, search
from point2 import Point
selected_color = 'magenta'
unselected_color = 'yellow'
class Search(mode2.Fundamental):
'''This is the default mode'''
def __init__(self, w):
mode2.Fundamental.__init__(self, w)
# clear out all the defaults that we don't want/need
self.actions = {}
self.bindings = {}
# add some useful bindings
self.add_action_and_bindings(SearchNext(), ('C-s',))
self.add_action_and_bindings(SearchPrevious(), ('C-r',))
self.add_action_and_bindings(EndSearch(), ('RETURN', 'C-n', 'C-p', 'C-a', 'C-e', 'C-f', 'C-b',))
self.add_action_and_bindings(CancelSearch(), ('C-]',))
self.add_action_and_bindings(SearchDeleteLeft(), ('DELETE', 'BACKSPACE',))
self.add_action_and_bindings(SearchDeleteLeftWord(), ('M-DELETE', 'M-BACKSPACE',))
# create all the insert actions for the character ranges we like
for collection in (string.letters, string.digits, string.punctuation):
for c in collection:
self.add_action_and_bindings(InsertSearchString(c), (c,))
self.add_action_and_bindings(InsertSearchString(' '), ('SPACE',))
def name(self):
return "Search"
def _make_regex(w, s):
try:
if w.buffer.method.is_literal:
s = search.escape_literal(s)
return re.compile(s, re.IGNORECASE)
else:
return re.compile(s)
except:
raise search.IllegalPatternError, "failed to compile: %r" % s
class SearchNext(method.Method):
def execute(self, w, **vargs):
w.buffer.method.direction = 'next'
s = w.buffer.make_string()
if s:
try:
r = _make_regex(w, s)
search.find_next(r, w.buffer.method.old_window, move=True)
except search.IllegalPatternError:
w.application.clear_highlighted_ranges()
else:
w.buffer.set_data(w.application.last_search)
class SearchPrevious(method.Method):
def execute(self, w, **vargs):
w.buffer.method.direction = 'previous'
if not w.buffer.make_string():
return
else:
s = w.buffer.make_string()
w2 = w.buffer.method.old_window
try:
r = _make_regex(w, s)
search.find_previous(r, w2, move=True)
except search.IllegalPatternError:
w.application.clear_highlighted_ranges()
class EndSearch(method.Method):
def execute(self, w, **vargs):
old_w = w.buffer.method.old_window
old_c = w.buffer.method.old_cursor
_end(w)
old_w.set_mark_point(old_c)
w.set_error("Mark set to search start")
class CancelSearch(method.Method):
def execute(self, w, **vargs):
w.buffer.method.old_window.goto(w.buffer.method.old_cursor)
_end(w)
w.set_error("Search cancelled")
class SearchDeleteLeft(method.Method):
def execute(self, w, **vargs):
w.left_delete()
_post_delete(w)
class SearchDeleteLeftWord(method.Method):
def execute(self, w, **vargs):
w.kill_left_word()
_post_delete(w)
def _post_delete(w):
old_cursor = w.buffer.method.old_cursor
old_w = w.buffer.method.old_window
old_w.goto(old_cursor)
if not w.buffer.make_string():
w.application.clear_highlighted_ranges()
return
s = w.buffer.make_string()
w2 = w.buffer.method.old_window
try:
r = _make_regex(w, s)
if w.buffer.method.direction == 'next':
search.find_next(r, w2, move=False)
else:
search.find_previous(r, w2, move=False)
except search.IllegalPatternError:
w.application.clear_highlighted_ranges()
class InsertSearchString(method.Method):
def __init__(self, s):
self.name = 'insert-search-string-%s' % (s)
self.string = s
self.args = []
self.help = None
def execute(self, w, **vargs):
w.insert_string_at_cursor(self.string)
s = w.buffer.make_string()
if not s:
w.application.clear_highlighted_ranges()
return
else:
try:
r = _make_regex(w, s)
w2 = w.buffer.method.old_window
if w.buffer.method.direction == 'next':
search.find_next(r, w2, move=False)
else:
search.find_previous(r, w2, move=False)
except search.IllegalPatternError:
w.application.clear_highlighted_ranges()
def _end(w):
w.application.close_mini_buffer()
w.application.clear_highlighted_ranges()
w.application.last_search = w.buffer.make_string()
#w.buffer.method.old_cursor = None
#w.buffer.method.old_window = None

View File

@ -1,97 +0,0 @@
import color, mode2, tab2
from lex3 import Grammar, PatternRule, RegionRule
class StringGrammar(Grammar):
rules = [
PatternRule(r'escaped', r'\\.'),
PatternRule(r'variable', r"\${(?:[a-zA-Z0-9_]+|\?\$)}"),
PatternRule(r"variable", r"\$[^({][a-zA-Z0-9_]*"),
PatternRule(r'variable', r"\$(?=\()"),
]
class ShGrammar(Grammar):
rules = [
PatternRule(r'function', r'[a-zA-Z_][a-zA-Z0-9_]*(?= *\(\))'),
PatternRule(r'reserved', r"(?:case|done|do|elif|else|esac|fi|for|function|if|in|select|then|until|while|time)(?![a-zA-Z0-9_=/])"),
PatternRule(r'builtin', r"(?:source|alias|bg|bind|break|builtin|cd|command|compgen|complete|declare|dirs|disown|echo|enable|eval|exec|exit|export|fc|fg|getops|hash|help|history|jobs|kill|let|local|logout|popd|printf|pushd|pwd|readonly|read|return|set|shift|shopt|suspend|test|times|trap|type|ulimit|umask|unalias|unset|wait)(?![a-zA-Z0-9_=/])"),
PatternRule(r'operator', r"(?:-eq|-ne|-gt|-lt|-ge|-le| = | != )"),
PatternRule(r'delimiter', r";;|[\[\]\(\);\{\}|&><]"),
RegionRule(r'eval', '`', StringGrammar, '`'),
RegionRule(r'neval', r'\$\(', StringGrammar, r'\)'),
PatternRule(r'variable', r"(?:^|(?<= ))[a-zA-Z_][a-zA-Z_][a-zA-Z0-9_]*(?==)"),
PatternRule(r'variable', r"\${(?:[a-zA-Z0-9_]+|\?\$)}"),
PatternRule(r"variable", r"\$[^({][a-zA-Z0-9_]*"),
PatternRule(r'variable', r"\$(?=\()"),
RegionRule(r'string', "'", Grammar(), "'"),
RegionRule(r'string', '"', StringGrammar, '"'),
PatternRule(r'comment', r'#.*$'),
PatternRule(r'bareword', r'[a-zA-Z0-9_-]+'),
PatternRule(r'continuation', r'\\\n$'),
PatternRule(r'eol', r'\n$'),
]
class ShTabber(tab2.StackTabber):
def is_base(self, y):
if y == 0:
return True
highlighter = self.mode.window.buffer.highlights[self.mode.name()]
if not highlighter.tokens[y]:
return False
t = highlighter.tokens[y][0]
return t.name == 'function'
def _handle_close_token(self, currlvl, y, i):
s = self.get_token(y, i).string
if s == ')' and self.markers and self._peek_name() == "case":
# we have to ignore ) when used in "case" statements.
return currlvl
else:
return tab2.StackTabber._handle_close_token(self, currlvl, y, i)
def _handle_other_token(self, currlvl, y, i):
token = self.get_token(y, i)
fqname = token.fqname()
if token.name == 'continuation':
self._opt_append("cont", currlvl + 4)
elif token.name == 'eol':
self._opt_pop("cont")
return currlvl
class Sh(mode2.Fundamental):
grammar = ShGrammar
tabbercls = ShTabber
opentokens = ('delimiter', 'reserved',)
opentags = {'(': ')', '[': ']', '{': '}',
'do': 'done', 'then': 'fi', 'case': 'esac'}
closetokens = ('delimiter', 'reserved',)
closetags = {')': '(', ']': '[', '}': '{',
'done': 'do', 'fi': 'then', 'esac': 'case'}
colors = {
'builtin': ('cyan', 'default', 'bold'),
'function': ('magenta', 'default', 'bold'),
'reserved': ('magenta', 'default', 'bold'),
'variable': ('yellow', 'default', 'bold'),
'delimiter': ('default', 'default', 'bold'),
'operator': ('magenta', 'default', 'bold'),
'string.start': ('green', 'default'),
'string.variable': ('yellow', 'default'),
'string.null': ('green', 'default'),
'string.end': ('green', 'default'),
'eval.start': ('cyan', 'default'),
'eval.variable': ('yellow', 'default'),
'eval.null': ('cyan', 'default'),
'eval.end': ('cyan', 'default'),
#'neval.start': ('cyan', 'default'),
#'neval.end': ('cyan', 'default'),
'neval.start': ('yellow', 'default'),
'neval.variable': ('yellow', 'default'),
'neval.null': ('cyan', 'default'),
'neval.end': ('yellow', 'default'),
'comment': ('red', 'default'),
'continuation': ('red', 'default'),
}
def name(self):
return "Sh"

View File

@ -1,136 +0,0 @@
import mode2, tab2
from lex3 import Grammar, PatternRule, NocasePatternRule, RegionRule, NocaseRegionRule
from mode_python import StringGrammar
class PlPgSqlGrammar(Grammar):
rules = [
PatternRule(r'comment', r'--.*\n$'),
RegionRule(r'comment', '/\*', Grammar, '\*/'),
PatternRule(r'delimiter', r':=|[():;,\.\$\[\]]'),
NocasePatternRule(r'attribute', r'(?:check|exists|unique|not null|default|primary key|minvalue|foreign key|references)(?![A-Za-z0-9_])'),
NocasePatternRule(r'keyword', r'(?:declare|begin|end|raise notice|return)'),
NocasePatternRule(r'operator', r'(?:case|when|then|else|end|not|and|or|is not|is|in|not in)(?![A-Za-z0-9_])'),
NocasePatternRule(r'keyword', r'(?:create database|create index|create sequence|create table|create trigger|create view|select|insert|update|delete|drop database|drop index|drop sequence|drop table|drop trigger|drop view|create user|alter user|drop user|drop function|grant|revoke|create function|create or replace function|create or replace view|create language|create operator|create type)(?![A-Za-z0-9_])'),
NocasePatternRule(r'pseudokeyword', r'(?:returns|language|right join|left join|inner join|outer join|join|where|null|true|false|into|values|as|from|order by|asc|desc|limit|distinct|cascade|using|on)(?![A-Za-z0-9_])'),
NocasePatternRule(r'type', r'(?:void|row|serial|varchar|float|integer|int|text|timestamptz|timestamp|datetz|date|timetz|time|boolean|bool)(?![A-Za-z0-9_])'),
PatternRule(r'builtin', r'(?:nextval|current_timestamp|current_time|current_date)(?![A-Za-z0-9_])'),
RegionRule(r'string', "''", StringGrammar, "''"),
RegionRule(r'quoted', '"', StringGrammar, '"'),
PatternRule(r'bareword', r'[A-Za-z0-9_]+'),
PatternRule(r'empty', r'^ *\n$'),
PatternRule(r'eol', r'\n'),
]
class FunctionGrammar(Grammar):
rules = [
PatternRule(r'comment', r'--.*\n$'),
RegionRule(r'comment', '/\*', Grammar, '\*/'),
PatternRule(r'delimiter', r':=|[():;,\.\$\[\]]'),
PatternRule(r'name', r'[a-zA-Z_][a-zA-Z0-9_]*(?=\()'),
NocasePatternRule(r'keyword', r'(?:as|returns|language)'),
NocasePatternRule(r'type', r'(?:void|row|serial|varchar|float|integer|int|text|timestamptz|timestamp|datetz|date|timetz|time|boolean|bool)(?![A-Za-z0-9_])'),
NocasePatternRule(r'language', r'(?<=language ) *[a-zA-Z_][a-zA-Z0-9_]+'),
RegionRule(r'definition', "'", PlPgSqlGrammar, "'(?!')"),
PatternRule(r'bareword', r'[A-Za-z0-9_]+'),
PatternRule(r'empty', r'^ *\n$'),
PatternRule(r'eol', r'\n'),
]
class SqlGrammar(Grammar):
rules = [
PatternRule(r'comment', r'--.*\n$'),
RegionRule(r'comment', '/\*', Grammar, '\*/'),
PatternRule(r'delimiter', r':=|[():;,\.\$\[\]]'),
NocaseRegionRule(r'function', r'create function', FunctionGrammar, r';'),
NocaseRegionRule(r'function', r'create or replace function', FunctionGrammar, r';'),
NocasePatternRule(r'attribute', r'(?:check|exists|unique|not null|default|primary key|minvalue|foreign key|references)(?![A-Za-z0-9_])'),
NocasePatternRule(r'operator', r'(?:case|when|then|else|end|not|and|or|is not|is|in|not in)(?![A-Za-z0-9_])'),
NocasePatternRule(r'keyword', r'(?:create database|create index|create sequence|create table|create trigger|create view|select|insert|update|delete|drop database|drop index|drop sequence|drop table|drop trigger|drop view|create user|alter user|drop user|drop function|grant|revoke|create function|create or replace function|create or replace view|create language|create operator|create type)(?![A-Za-z0-9_])'),
NocasePatternRule(r'pseudokeyword', r'(?:returns|language|right join|left join|inner join|outer join|join|where|null|true|false|into|values|as|from|order by|asc|desc|limit|distinct|cascade|using|on)(?![A-Za-z0-9_])'),
NocasePatternRule(r'type', r'(?:void|row|serial|varchar|float|integer|int|text|timestamptz|timestamp|datetz|date|timetz|time|boolean|bool)(?![A-Za-z0-9_])'),
PatternRule(r'builtin', r'(?:nextval|current_timestamp|current_time|current_date)(?![A-Za-z0-9_])'),
RegionRule(r'string', "'", StringGrammar, "'"),
RegionRule(r'quoted', '"', StringGrammar, '"'),
PatternRule(r'bareword', r'[A-Za-z0-9_]+'),
PatternRule(r'empty', r'^ *\n$'),
PatternRule(r'eol', r'\n'),
]
class SqlTabber(tab2.StackTabber):
wst = ('null', 'eol',)
def is_base(self, y):
if y == 0:
return True
highlighter = self.mode.window.buffer.highlights[self.mode.name()]
if not highlighter.tokens[y]:
return False
t = highlighter.tokens[y][0]
return t.name == 'function'
def _handle_other_token(self, currlvl, y, i):
token = self.get_token(y, i)
if token.name == 'delimiter' and token.string == ';':
self._opt_pop('cont')
elif token.name == 'keyword':
if token.string == 'declare':
self._opt_append('declare', currlvl + 4)
elif token.string == 'begin':
currlvl -= 4
elif token.string == 'end':
self._opt_pop('declare')
currlvl = self.get_curr_level()
if self.is_rightmost_token(y, i):
if not self._empty() and token.name == 'continuation':
self._opt_append('cont', currlvl + 4)
elif token.name == 'eol' and not self.markers:
self._opt_pop("cont")
return currlvl
class Sql(mode2.Fundamental):
grammar = SqlGrammar
tabbercls = SqlTabber
opentokens = ('delimiter',)
opentags = {'(': ')', '[': ']', '{': '}'}
closetokens = ('delimiter',)
closetags = {')': '(', ']': '[', '}': '{'}
colors = {
'comment': ('red', 'default'),
'operator': ('yellow', 'default'),
'attribute': ('magenta', 'default'),
'keyword': ('cyan', 'default'),
'pseudokeyword': ('cyan', 'default'),
'type': ('green', 'default'),
'builtin': ('yellow', 'default'),
'quoted': ('yellow', 'default'),
'string.start': ('green', 'default'),
'string.null': ('green', 'default'),
'string.escaped': ('magenta', 'default'),
'string.octal': ('magenta', 'default'),
'string.end': ('green', 'default'),
'bareword': ('default', 'default'),
'function.start': ('cyan', 'default'),
'function.null': ('default', 'default'),
'function.name': ('magenta', 'default'),
'function.language': ('magenta', 'default'),
'function.end': ('default', 'default'),
'function.definition.start': ('magenta', 'default'),
'function.definition.bareword': ('magenta', 'default'),
'function.definition.null': ('magenta', 'default'),
'function.definition.end': ('magenta', 'default'),
}
def __init__(self, w):
mode2.Fundamental.__init__(self, w)
self.add_bindings('close-paren', (')',))
self.add_bindings('close-brace', ('}',))
self.add_bindings('close-bracket', (']',))
def name(self):
return "Sql"

View File

@ -1,88 +0,0 @@
import color, mode2, method, ispell
from lex3 import Token, Rule, PatternRule, RegionRule, Grammar
class WordRule(PatternRule):
def __init__(self):
PatternRule.__init__(self, r'word', pattern=r"[a-zA-Z][a-zA-Z-\']*[a-zA-Z](?=$|[^a-zA-Z0-9-_])")
def _spelled_ok(self, word):
if ispell.can_spell():
speller = ispell.get_speller()
return speller.check(word, caps=False, title=False)
else:
return True
def lex(self, lexer, parent, m):
if m:
s = m.group(0)
if self._spelled_ok(s):
token = Token('word', self, lexer.y, lexer.x, s, None, parent, {})
else:
token = Token('misspelled', self, lexer.y, lexer.x, s, None, parent, {})
token.color = lexer.get_color(token)
lexer.x += len(s)
yield token
raise StopIteration
class ContinuedRule(RegionRule):
def __init__(self):
RegionRule.__init__(self, r'cont', r'[a-zA-Z0-9_]+- *$', Grammar, r'^ *(?:[^ ]+|$)')
class TextGrammar(Grammar):
rules = [
ContinuedRule(),
WordRule(),
PatternRule(r'punct', r'[^a-zA-Z0-9_]'),
PatternRule(r'stuff', r'[a-zA-Z0-9_]+'),
]
class Text(mode2.Fundamental):
grammar = TextGrammar
colors = {
'misspelled': ('red', 'default'),
'cont.start': ('default', 'default'),
'cont.end': ('default', 'default'),
'word': ('default', 'default'),
'punct': ('default', 'default'),
'stuff': ('default', 'default'),
}
def __init__(self, w):
mode2.Fundamental.__init__(self, w)
self.add_action_and_bindings(LearnWord(), ('C-c l',))
self.add_action_and_bindings(TextInsertSpace(), ('SPACE',))
self.add_action_and_bindings(method.WrapParagraph(), ('M-q',))
def name(self):
return "Text"
class TextInsertSpace(method.Method):
limit = 80
wrapper = method.WrapParagraph
def execute(self, w, **vargs):
w.insert_string_at_cursor(' ')
cursor = w.logical_cursor()
i = cursor.y
if len(w.buffer.lines[i]) > self.limit:
self.wrapper().execute(w)
class LearnWord(method.Method):
def execute(self, w, **vargs):
if not ispell.can_spell():
w.application.set_error('Spelling support is unavailable')
return
cursor = w.logical_cursor()
word = None
for token in w.buffer.highlights[w.mode.name()].tokens[cursor.y]:
if (token.x <= cursor.x and
token.end_x() > cursor.x and
token.name == 'misspelled'):
word = token.string
if word is None:
w.application.set_error('No misspelled word was found')
return
speller = ispell.get_speller()
speller.learn(word)
w.application.set_error("Added %r to personal dictionary" % (word))
# cheap way to relex just this word; there should really be a function
w.insert_string_at_cursor(' ')
w.left_delete()

View File

@ -1,64 +0,0 @@
import color, mode2
from lex3 import Grammar, PatternRule, RegionRule
#from mode_xml import TagGrammar
from mode_perl import StringGrammar
class DirectiveGrammar(Grammar):
rules = [
PatternRule(r'comment', r'#(?:[^%]|%(?!\]))*'),
PatternRule(r'keyword', r'BLOCK|CALL|CASE|CATCH|CLEAR|DEBUG|DEFAULT|FINAL|FILTER|FOREACH|ELSIF|ELSE|END|GET|IF|INCLUDE|INSERT|IN|LAST|MACRO|META|NEXT|PERL|PROCESS|RAWPERL|RETURN|SET|STOP|SWITCH|TAGS|THROW|TRY|UNLESS|USE|WHILE|WRAPPER'),
RegionRule(r'string', r'"', StringGrammar, r'"'),
RegionRule(r'string', r"'", StringGrammar, r"'"),
]
class TagGrammar(Grammar):
rules = [
RegionRule(r'directive', r'\[\%', DirectiveGrammar, r'%%\]'),
RegionRule(r'string', r'"', Grammar, r'"'),
RegionRule(r'string', r"'", Grammar, r"'"),
PatternRule(r'namespace', r'[a-zA-Z_]+:'),
PatternRule(r'attrname', r'[^ =>\n]+(?==)'),
PatternRule(r'name', r'[^\[\] =>\n]+'),
]
class TemplateGrammar(Grammar):
rules = [
RegionRule(r'comment', r'<!--', Grammar, r'-->'),
RegionRule(r'directive', r'\[\%', DirectiveGrammar, r'%%\]'),
RegionRule(r'tag', r'</?', TagGrammar, r'/?>'),
]
class Template(mode2.Fundamental):
grammar = TemplateGrammar
colors = {
'comment.start': ('red', 'default'),
'comment.null': ('red', 'default'),
'comment.end': ('red', 'default'),
'directive.start': ('magenta', 'default'),
'directive.comment': ('red', 'default'),
'directive.keyword': ('cyan', 'default'),
'directive.string.start': ('green', 'default'),
'directive.string.escaped': ('magenta', 'default'),
'directive.string.octal': ('magenta', 'default'),
'directive.string.null': ('green', 'default'),
'directive.string.end': ('green', 'default'),
'directive.null': ('magenta', 'default'),
'directive.end': ('magenta', 'default'),
'tag.start': ('default', 'default'),
'tag.namespace': ('magenta', 'default'),
'tag.name': ('blue', 'default'),
'tag.attrname': ('cyan', 'default'),
'tag.string.start': ('green', 'default'),
'tag.string.null': ('green', 'default'),
'tag.string.end': ('green', 'default'),
'tag.end': ('default', 'default'),
}
def __init__(self, w):
mode2.Fundamental.__init__(self, w)
self.add_bindings('close-paren', (')',))
self.add_bindings('close-brace', ('}',))
self.add_bindings('close-bracket', (']',))
def name(self):
return "Template"

View File

@ -1,32 +0,0 @@
import color, method, mode2
from point2 import Point
class Which(mode2.Fundamental):
def __init__(self, w):
mode2.Fundamental.__init__(self, w)
old_mode = w.buffer.method.old_window.mode
self.bindings = dict(old_mode.bindings)
def handle_token(self, t):
'''self.handle_token(token): handles input "token"'''
self.window.active_point = None
try:
act = mode2.Handler.handle_token(self, t)
if act is None:
return
else:
s = '%r is %r: %s' % (self.last_sequence, act.name, act.help)
self.window.application.set_error(s)
self._end()
except Exception, e:
if mode2.DEBUG:
raise
else:
err = "%s in mode '%s'" % (e, self.name())
self.window.application.set_error(err)
self._end()
def name(self):
return "Which"
def _end(self):
self.window.application.close_mini_buffer()
self.window.buffer.method.old_cursor = None
self.window.buffer.method.old_window = None

View File

@ -1,36 +0,0 @@
import color, mode2
from lex3 import Grammar, PatternRule, RegionRule
class TagGrammar(Grammar):
rules = [
RegionRule(r'string', r'"', Grammar, r'"'),
RegionRule(r'string', r"'", Grammar, r"'"),
PatternRule(r'namespace', r'[a-zA-Z_]+:'),
PatternRule(r'attrname', r'[^ =>\n]+(?==)'),
PatternRule(r'name', r'[^\[\] =>\n]+'),
]
class XMLGrammar(Grammar):
rules = [
# TODO: how does cdata work again?
RegionRule(r'comment', r'<!--', Grammar, r'-->'),
RegionRule(r'tag', r'<', TagGrammar, r'/?>'),
]
class XML(mode2.Fundamental):
grammar = XMLGrammar
colors = {
'comment.start': ('red', 'default'),
'comment.null': ('red', 'default'),
'comment.end': ('red', 'default'),
'tag.start': ('default', 'default'),
'tag.namespace': ('magenta', 'default'),
'tag.name': ('blue', 'default'),
'tag.attrname': ('cyan', 'default'),
'tag.string.start': ('green', 'default'),
'tag.string.null': ('green', 'default'),
'tag.string.end': ('green', 'default'),
'tag.end': ('default', 'default'),
}
def name(self):
return "XML"