parent
077417a363
commit
6f8fdb638a
3
BUGS
3
BUGS
|
@ -1,3 +1,6 @@
|
||||||
|
2007/06/05:
|
||||||
|
search back misses some results on the same line as the search
|
||||||
|
|
||||||
2006/07/04:
|
2006/07/04:
|
||||||
when in the minibuffer, certain key sequences don't seem to get picked up.
|
when in the minibuffer, certain key sequences don't seem to get picked up.
|
||||||
|
|
||||||
|
|
2
lex_c.py
2
lex_c.py
|
@ -20,7 +20,7 @@ class CGrammar(lex.Grammar):
|
||||||
'action':lex.make_token},
|
'action':lex.make_token},
|
||||||
|
|
||||||
{'name': 'header',
|
{'name': 'header',
|
||||||
'expr': r'''(?<=#include) +(?:<[A-Za-z0-9_]+\.h?>|"[A-Za-z0-9_]+\.h")''',
|
'expr': r'''(?<=#include) +(?:<[A-Za-z/0-9_]+\.h?>|"[A-Za-z/0-9_]+\.h")''',
|
||||||
'action': lex.make_token},
|
'action': lex.make_token},
|
||||||
|
|
||||||
{'name': 'constant',
|
{'name': 'constant',
|
||||||
|
|
42
lex_mutt.py
42
lex_mutt.py
|
@ -1,72 +1,50 @@
|
||||||
import os
|
import lex, lex_text
|
||||||
import ispell, lex
|
|
||||||
|
|
||||||
def make_token(rule, m):
|
|
||||||
'''return a token from a hit'''
|
|
||||||
return(lex.Token(rule.name, m.start(), m.end(), m.group(0)))
|
|
||||||
|
|
||||||
def make_token_spell(rule, m):
|
|
||||||
'''return a token from a hit'''
|
|
||||||
# first let's figure out the actual word we need to check
|
|
||||||
if rule.name == 'continued word':
|
|
||||||
word = '%s%s' % (m.group(1), m.group(2))
|
|
||||||
else:
|
|
||||||
word = m.group(0)
|
|
||||||
|
|
||||||
# okay, now we check the spelling; we don't spell-check all caps words
|
|
||||||
if ispell.can_spell() and \
|
|
||||||
not ispell.get_speller().check(word, caps=False, title=False):
|
|
||||||
name = "misspelled %s" % rule.name
|
|
||||||
else:
|
|
||||||
name = rule.name
|
|
||||||
|
|
||||||
return lex.Token(name, m.start(), m.end(), m.group(0))
|
|
||||||
|
|
||||||
class MuttGrammar(lex.Grammar):
|
class MuttGrammar(lex.Grammar):
|
||||||
GRAMMAR_LIST = [
|
GRAMMAR_LIST = [
|
||||||
{'name': 'header',
|
{'name': 'header',
|
||||||
'expr': r'(?:^|(?<=\n))(?:From|To|Cc|Bcc|Subject|Reply-To|In-Reply-To|Delivered-To|Date):',
|
'expr': r'(?:^|(?<=\n))(?:From|To|Cc|Bcc|Subject|Reply-To|In-Reply-To|Delivered-To|Date):',
|
||||||
'action': make_token,
|
'action': lex.make_token,
|
||||||
},
|
},
|
||||||
|
|
||||||
{'name': 'quote1',
|
{'name': 'quote1',
|
||||||
'expr': r'(?:^|(?<=\n))(?:(?: *>){3})*(?: *>){1} *(?:[^ >\n][^\n]*)?(?:$|\n)',
|
'expr': r'(?:^|(?<=\n))(?:(?: *>){3})*(?: *>){1} *(?:[^ >\n][^\n]*)?(?:$|\n)',
|
||||||
'action': make_token,
|
'action': lex.make_token,
|
||||||
},
|
},
|
||||||
|
|
||||||
{'name': 'quote2',
|
{'name': 'quote2',
|
||||||
'expr': r'(?:^|(?<=\n))(?:(?: *>){3})*(?: *>){2} *(?:[^ >\n][^\n]*)?(?:$|\n)',
|
'expr': r'(?:^|(?<=\n))(?:(?: *>){3})*(?: *>){2} *(?:[^ >\n][^\n]*)?(?:$|\n)',
|
||||||
'action': make_token,
|
'action': lex.make_token,
|
||||||
},
|
},
|
||||||
|
|
||||||
{'name': 'quote3',
|
{'name': 'quote3',
|
||||||
'expr': r'(?:^|(?<=\n))(?:(?: *>){3})*(?: *>){3} *(?:[^ >\n][^\n]*)?(?:$|\n)',
|
'expr': r'(?:^|(?<=\n))(?:(?: *>){3})*(?: *>){3} *(?:[^ >\n][^\n]*)?(?:$|\n)',
|
||||||
'action': make_token,
|
'action': lex.make_token,
|
||||||
},
|
},
|
||||||
|
|
||||||
{'name': 'email',
|
{'name': 'email',
|
||||||
'expr': r'(?:^|(?<=[ :\n]))<?[^<>@\n ]+@(?:[^<>@\.\n ]+\.)*[^<>@\.\n ]+>?',
|
'expr': r'(?:^|(?<=[ :\n]))<?[^<>@\n ]+@(?:[^<>@\.\n ]+\.)*[^<>@\.\n ]+>?',
|
||||||
'action': make_token,
|
'action': lex.make_token,
|
||||||
},
|
},
|
||||||
|
|
||||||
{'name': 'url',
|
{'name': 'url',
|
||||||
'expr': r'(?:^|(?<=[ \n]))(?:http|https|ftp|sftp|file|smtp|smtps|torrent|news|jabber|irc|telnet)://(?:[^\.\n ]+\.)*[^\.\n ]+',
|
'expr': r'(?:^|(?<=[ \n]))(?:http|https|ftp|sftp|file|smtp|smtps|torrent|news|jabber|irc|telnet)://(?:[^\.\n ]+\.)*[^\.\n ]+',
|
||||||
'action': make_token,
|
'action': lex.make_token,
|
||||||
},
|
},
|
||||||
|
|
||||||
{'name': 'continued word',
|
{'name': 'continued word',
|
||||||
'expr': r"""([a-zA-Z][a-zA-Z-']*[a-zA-Z])-\n *([a-zA-Z][a-zA-Z-]*[a-zA-Z])""",
|
'expr': r"""([a-zA-Z][a-zA-Z-']*[a-zA-Z])-\n *([a-zA-Z][a-zA-Z-]*[a-zA-Z])""",
|
||||||
'action': make_token_spell,
|
'action': lex_text.make_token_spell,
|
||||||
},
|
},
|
||||||
|
|
||||||
{'name': 'word',
|
{'name': 'word',
|
||||||
'expr': r"""(?:[a-zA-Z][-']?)*[a-zA-Z]""",
|
'expr': r"""(?:[a-zA-Z][-']?)*[a-zA-Z]""",
|
||||||
'action': make_token_spell,
|
'action': lex_text.make_token_spell,
|
||||||
},
|
},
|
||||||
|
|
||||||
{'name': 'stuff',
|
{'name': 'stuff',
|
||||||
'expr': r"""[^ \n]+""",
|
'expr': r"""[^ \n]+""",
|
||||||
'action': make_token,
|
'action': lex.make_token,
|
||||||
},
|
},
|
||||||
|
|
||||||
{'name': "default",
|
{'name': "default",
|
||||||
|
|
|
@ -50,7 +50,7 @@ class PerlGrammar(lex.Grammar):
|
||||||
'action': lex.make_token},
|
'action': lex.make_token},
|
||||||
|
|
||||||
{'name': 'keyword',
|
{'name': 'keyword',
|
||||||
'expr': r"""(?<!->)(?:STDIN|STDERR|STDOUT|and|cmp|continue|do|else|elsif|eq|eval|foreach|for|if|last|my|next|ne|not|or|our|package|require|return|sub|undef|unless|until|use|while)(?![a-zA-Z_])""",
|
'expr': r"""(?<!->)(?:STDIN|STDERR|STDOUT|and|cmp|continue|do|else|elsif|eq|eval|foreach|for|if|last|my|next|ne|not|no|or|our|package|require|return|sub|undef|unless|until|use|while)(?![a-zA-Z_])""",
|
||||||
'action': lex.make_token},
|
'action': lex.make_token},
|
||||||
|
|
||||||
{'name': 'hash bareword index',
|
{'name': 'hash bareword index',
|
||||||
|
|
|
@ -29,10 +29,10 @@ class SqlGrammar(lex.Grammar):
|
||||||
'action': lex.make_token},
|
'action': lex.make_token},
|
||||||
|
|
||||||
{'name': 'keyword1',
|
{'name': 'keyword1',
|
||||||
'expr': r'''(?:CREATE DATABASE|CREATE INDEX|CREATE SEQUENCE|CREATE TABLE|CREATE TRIGGER|CREATE VIEW|SELECT|INSERT|UPDATE|DELETE|DROP DATABASE|DROP INDEX|DROP SEQUENCE|DROP TABLE|DROP TRIGGER|DROP VIEW|CREATE USER|ALTER USER|DROP USER|DROP FUNCTION|GRANT|REVOKE|CREATE FUNCTION|CREATE OR REPLACE FUNCTION|CREATE LANGUAGE|CREATE OPERATOR|CREATE TYPE)(?![A-Za-z0-9_])''',
|
'expr': r'''(?:CREATE DATABASE|CREATE INDEX|CREATE SEQUENCE|CREATE TABLE|CREATE TRIGGER|CREATE VIEW|SELECT|INSERT|UPDATE|DELETE|DROP DATABASE|DROP INDEX|DROP SEQUENCE|DROP TABLE|DROP TRIGGER|DROP VIEW|CREATE USER|ALTER USER|DROP USER|DROP FUNCTION|GRANT|REVOKE|CREATE FUNCTION|CREATE OR REPLACE FUNCTION|CREATE OR REPLACE VIEW|CREATE LANGUAGE|CREATE OPERATOR|CREATE TYPE)(?![A-Za-z0-9_])''',
|
||||||
'action': lex.make_token},
|
'action': lex.make_token},
|
||||||
{'name': 'keyword2',
|
{'name': 'keyword2',
|
||||||
'expr': r'''(?:create database|create index|create sequence|create table|create trigger|create view|select|insert|update|delete|drop database|drop index|drop sequence|drop table|drop trigger|drop view|create user|alter user|drop user|drop function|grant|revoke|create function|create or replace function|create language|create operator|create type)(?![A-Za-z0-9_])''',
|
'expr': r'''(?:create database|create index|create sequence|create table|create trigger|create view|select|insert|update|delete|drop database|drop index|drop sequence|drop table|drop trigger|drop view|create user|alter user|drop user|drop function|grant|revoke|create function|create or replace function|create or replace view|create language|create operator|create type)(?![A-Za-z0-9_])''',
|
||||||
'action': lex.make_token},
|
'action': lex.make_token},
|
||||||
|
|
||||||
{'name': 'pseudo-keyword1',
|
{'name': 'pseudo-keyword1',
|
||||||
|
|
18
lex_text.py
18
lex_text.py
|
@ -1,22 +1,20 @@
|
||||||
import os
|
import os
|
||||||
import ispell, lex
|
import ispell, lex
|
||||||
|
|
||||||
def make_token_spell(rule, m):
|
def make_token_spell(rule, m, offset):
|
||||||
'''return a token from a hit'''
|
'''return a token from a hit'''
|
||||||
# first let's figure out the actual word we need to check
|
# first let's figure out the actual word we need to check
|
||||||
if rule.name == 'continued word':
|
if rule.name == 'continued word':
|
||||||
word = '%s%s' % (m.group(1), m.group(2))
|
word = '%s%s' % (m.group(1), m.group(2))
|
||||||
else:
|
else:
|
||||||
word = m.group(0)
|
word = m.group(0)
|
||||||
|
|
||||||
# okay, now we check the spelling; we don't spell-check all caps words
|
# okay, now we check the spelling; we don't spell-check all caps words
|
||||||
if ispell.can_spell() and \
|
if ispell.can_spell() and \
|
||||||
not ispell.get_speller().check(word, caps=False, title=True):
|
not ispell.get_speller().check(word, caps=False, title=False):
|
||||||
name = "misspelled %s" % rule.name
|
name = "misspelled %s" % rule.name
|
||||||
else:
|
else:
|
||||||
name = rule.name
|
name = rule.name
|
||||||
|
return(lex.Token(name, m.start() + offset, m.end() + offset, word))
|
||||||
return lex.Token(name, m.start(), m.end(), m.group(0))
|
|
||||||
|
|
||||||
class TextGrammar(lex.Grammar):
|
class TextGrammar(lex.Grammar):
|
||||||
GRAMMAR_LIST = [
|
GRAMMAR_LIST = [
|
||||||
|
@ -25,8 +23,14 @@ class TextGrammar(lex.Grammar):
|
||||||
'action': make_token_spell},
|
'action': make_token_spell},
|
||||||
|
|
||||||
{'name': 'word',
|
{'name': 'word',
|
||||||
'expr': r"""[a-zA-Z][a-zA-Z-']*[a-zA-Z]""",
|
'expr': r"""(?:[a-zA-Z][-']?)*[a-zA-Z]""",
|
||||||
'action': make_token_spell},
|
'action': make_token_spell,
|
||||||
|
},
|
||||||
|
|
||||||
|
{'name': 'stuff',
|
||||||
|
'expr': r"""[^ \n]+""",
|
||||||
|
'action': lex.make_token,
|
||||||
|
},
|
||||||
|
|
||||||
{'name': "default",
|
{'name': "default",
|
||||||
'expr': r'.| |\n',
|
'expr': r'.| |\n',
|
||||||
|
|
|
@ -6,7 +6,7 @@ from optparse import OptionParser
|
||||||
# our imports
|
# our imports
|
||||||
import lex
|
import lex
|
||||||
|
|
||||||
class XMLGrammar(lex.Grammar):
|
class TTGrammar(lex.Grammar):
|
||||||
GRAMMAR_LIST = [
|
GRAMMAR_LIST = [
|
||||||
{'name': 'comment',
|
{'name': 'comment',
|
||||||
'expr': r'''<!--(?:.| |\n)+?(?:-->|$)''',
|
'expr': r'''<!--(?:.| |\n)+?(?:-->|$)''',
|
||||||
|
@ -14,6 +14,7 @@ class XMLGrammar(lex.Grammar):
|
||||||
|
|
||||||
{'name': 'template',
|
{'name': 'template',
|
||||||
'expr': r'''\[%(?:.| |\n)*?%\]''',
|
'expr': r'''\[%(?:.| |\n)*?%\]''',
|
||||||
|
#'expr': r'''\[%%\]''',
|
||||||
'action': lex.make_token},
|
'action': lex.make_token},
|
||||||
|
|
||||||
{'name': 'ltb',
|
{'name': 'ltb',
|
||||||
|
@ -45,7 +46,7 @@ class XMLGrammar(lex.Grammar):
|
||||||
'action': lex.make_token},
|
'action': lex.make_token},
|
||||||
|
|
||||||
{'name': 'nodevalue',
|
{'name': 'nodevalue',
|
||||||
'expr': r'''(?<=>)(?:[^<]|\n)+?(?=<)''',
|
'expr': r'''(?:(?<=>)|(?<=%\]))(?:[^<\[]|\n|\[(?!%.*?%\]))+?(?=(?:<|\[%))''',
|
||||||
'action': lex.make_token},
|
'action': lex.make_token},
|
||||||
|
|
||||||
{'name': 'whitespace',
|
{'name': 'whitespace',
|
||||||
|
@ -83,5 +84,5 @@ class XMLGrammar(lex.Grammar):
|
||||||
|
|
||||||
def _default_rules(self):
|
def _default_rules(self):
|
||||||
"""subclasses can override this to define defaults for a grammar"""
|
"""subclasses can override this to define defaults for a grammar"""
|
||||||
for rdir in XMLGrammar.GRAMMAR_LIST:
|
for rdir in TTGrammar.GRAMMAR_LIST:
|
||||||
self.add_rule(**rdir)
|
self.add_rule(**rdir)
|
||||||
|
|
|
@ -245,6 +245,7 @@ class SwitchBuffer(Method):
|
||||||
w.application.set_error("buffer %r was not found" % name)
|
w.application.set_error("buffer %r was not found" % name)
|
||||||
class KillBuffer(Method):
|
class KillBuffer(Method):
|
||||||
'''Close the current buffer'''
|
'''Close the current buffer'''
|
||||||
|
force=False
|
||||||
def _args(self):
|
def _args(self):
|
||||||
return [Argument('buffername', datatype="buffer",
|
return [Argument('buffername', datatype="buffer",
|
||||||
prompt="Kill Buffer: ",
|
prompt="Kill Buffer: ",
|
||||||
|
@ -255,11 +256,14 @@ class KillBuffer(Method):
|
||||||
assert name in app.bufferlist.buffer_names, "Buffer %r does not exist" % name
|
assert name in app.bufferlist.buffer_names, "Buffer %r does not exist" % name
|
||||||
assert name != '*Scratch*', "Can't kill scratch buffer"
|
assert name != '*Scratch*', "Can't kill scratch buffer"
|
||||||
b = app.bufferlist.buffer_names[name]
|
b = app.bufferlist.buffer_names[name]
|
||||||
|
if not self.force:
|
||||||
assert not b.changed(), "Buffer %r has been modified" % (name)
|
assert not b.changed(), "Buffer %r has been modified" % (name)
|
||||||
if app.bufferlist.is_buffer_visible(b):
|
if app.bufferlist.is_buffer_visible(b):
|
||||||
app.bufferlist.set_slot(app.active_slot, app.bufferlist.hidden_buffers[0])
|
app.bufferlist.set_slot(app.active_slot, app.bufferlist.hidden_buffers[0])
|
||||||
app.bufferlist.remove_buffer(b)
|
app.bufferlist.remove_buffer(b)
|
||||||
b.close()
|
b.close()
|
||||||
|
class ForceKillBuffer(KillBuffer):
|
||||||
|
force=True
|
||||||
class ListBuffers(Method):
|
class ListBuffers(Method):
|
||||||
'''List all open buffers in a new buffer'''
|
'''List all open buffers in a new buffer'''
|
||||||
def _execute(self, w, **vargs):
|
def _execute(self, w, **vargs):
|
||||||
|
|
3
mode2.py
3
mode2.py
|
@ -1,7 +1,7 @@
|
||||||
import os
|
import os
|
||||||
import sets, string
|
import sets, string
|
||||||
|
|
||||||
import color, default, highlight, method, point
|
import color, default, method, point
|
||||||
|
|
||||||
DEBUG = False
|
DEBUG = False
|
||||||
#DEBUG = True
|
#DEBUG = True
|
||||||
|
@ -103,6 +103,7 @@ class Fundamental(Handler):
|
||||||
self.add_bindings('replace', ('M-%',))
|
self.add_bindings('replace', ('M-%',))
|
||||||
self.add_bindings('open-file', ('C-x C-f',))
|
self.add_bindings('open-file', ('C-x C-f',))
|
||||||
self.add_bindings('kill-buffer', ('C-x k',))
|
self.add_bindings('kill-buffer', ('C-x k',))
|
||||||
|
self.add_bindings('force-kill-buffer', ('C-x K',))
|
||||||
self.add_bindings('list-buffers', ('C-x C-b',))
|
self.add_bindings('list-buffers', ('C-x C-b',))
|
||||||
self.add_bindings('meta-x', ('M-x',))
|
self.add_bindings('meta-x', ('M-x',))
|
||||||
self.add_bindings('wrap-line', ('M-q',))
|
self.add_bindings('wrap-line', ('M-q',))
|
||||||
|
|
|
@ -8,10 +8,11 @@ class Mutt(mode.Fundamental):
|
||||||
|
|
||||||
self.add_action_and_bindings(mode_text.LearnWord(), ('C-c l',))
|
self.add_action_and_bindings(mode_text.LearnWord(), ('C-c l',))
|
||||||
self.add_action_and_bindings(MuttWrapParagraph(), ('M-q',))
|
self.add_action_and_bindings(MuttWrapParagraph(), ('M-q',))
|
||||||
#self.add_action_and_bindings(MuttWrapLine(), ('M-q',))
|
|
||||||
self.add_action_and_bindings(MuttInsertSpace(), ('SPACE',))
|
self.add_action_and_bindings(MuttInsertSpace(), ('SPACE',))
|
||||||
|
|
||||||
self.grammar = lex_mutt.MuttGrammar()
|
self.grammar = lex_mutt.MuttGrammar()
|
||||||
|
#import lex_text
|
||||||
|
#self.grammar = lex_text.TextGrammar()
|
||||||
self.lexer = lex.Lexer(self.grammar)
|
self.lexer = lex.Lexer(self.grammar)
|
||||||
|
|
||||||
self.default_color = color.build('default', 'default')
|
self.default_color = color.build('default', 'default')
|
||||||
|
@ -26,9 +27,6 @@ class Mutt(mode.Fundamental):
|
||||||
'quote3': color.build('magenta', 'default', 'bold'),
|
'quote3': color.build('magenta', 'default', 'bold'),
|
||||||
}
|
}
|
||||||
|
|
||||||
#self.highlighter.lex_buffer()
|
|
||||||
#self.get_regions()
|
|
||||||
|
|
||||||
def name(self):
|
def name(self):
|
||||||
return "Mutt"
|
return "Mutt"
|
||||||
|
|
||||||
|
|
|
@ -19,9 +19,6 @@ class Text(mode.Fundamental):
|
||||||
'misspelled continued word': color.build('red', 'default', 'bold'),
|
'misspelled continued word': color.build('red', 'default', 'bold'),
|
||||||
}
|
}
|
||||||
|
|
||||||
#self.highlighter.lex_buffer()
|
|
||||||
#self.get_regions()
|
|
||||||
|
|
||||||
def name(self):
|
def name(self):
|
||||||
return "Text"
|
return "Text"
|
||||||
|
|
||||||
|
|
|
@ -38,7 +38,7 @@ class ShTabber(tab.TokenStackTabber):
|
||||||
p = buffer.get_offset_point(next_token.start)
|
p = buffer.get_offset_point(next_token.start)
|
||||||
self.stack_append((s, p.x))
|
self.stack_append((s, p.x))
|
||||||
elif s == "else":
|
elif s == "else":
|
||||||
assert self.tab_stack[-1][0] == "if", "bbbsssxxx"
|
assert self.tab_stack[-1][0] == "then", "bbbsssxxx: %s" % self.tab_stack[-1][0]
|
||||||
d = self.tab_stack[-1][1] - self.tab_stack[-2][1]
|
d = self.tab_stack[-1][1] - self.tab_stack[-2][1]
|
||||||
self.line_depth -= d
|
self.line_depth -= d
|
||||||
elif s in self.close_tags:
|
elif s in self.close_tags:
|
||||||
|
|
Loading…
Reference in New Issue