diff --git a/BUGS b/BUGS index 91b8255..a06b11a 100644 --- a/BUGS +++ b/BUGS @@ -1,3 +1,6 @@ +2007/06/05: +search back misses some results on the same line as the search + 2006/07/04: when in the minibuffer, certain key sequences don't seem to get picked up. diff --git a/lex_c.py b/lex_c.py index 6128c27..eefa240 100644 --- a/lex_c.py +++ b/lex_c.py @@ -20,7 +20,7 @@ class CGrammar(lex.Grammar): 'action':lex.make_token}, {'name': 'header', - 'expr': r'''(?<=#include) +(?:<[A-Za-z0-9_]+\.h?>|"[A-Za-z0-9_]+\.h")''', + 'expr': r'''(?<=#include) +(?:<[A-Za-z/0-9_]+\.h?>|"[A-Za-z/0-9_]+\.h")''', 'action': lex.make_token}, {'name': 'constant', diff --git a/lex_mutt.py b/lex_mutt.py index 3f545c0..67ea22d 100755 --- a/lex_mutt.py +++ b/lex_mutt.py @@ -1,72 +1,50 @@ -import os -import ispell, lex - -def make_token(rule, m): - '''return a token from a hit''' - return(lex.Token(rule.name, m.start(), m.end(), m.group(0))) - -def make_token_spell(rule, m): - '''return a token from a hit''' - # first let's figure out the actual word we need to check - if rule.name == 'continued word': - word = '%s%s' % (m.group(1), m.group(2)) - else: - word = m.group(0) - - # okay, now we check the spelling; we don't spell-check all caps words - if ispell.can_spell() and \ - not ispell.get_speller().check(word, caps=False, title=False): - name = "misspelled %s" % rule.name - else: - name = rule.name - - return lex.Token(name, m.start(), m.end(), m.group(0)) +import lex, lex_text class MuttGrammar(lex.Grammar): GRAMMAR_LIST = [ {'name': 'header', 'expr': r'(?:^|(?<=\n))(?:From|To|Cc|Bcc|Subject|Reply-To|In-Reply-To|Delivered-To|Date):', - 'action': make_token, + 'action': lex.make_token, }, {'name': 'quote1', 'expr': r'(?:^|(?<=\n))(?:(?: *>){3})*(?: *>){1} *(?:[^ >\n][^\n]*)?(?:$|\n)', - 'action': make_token, + 'action': lex.make_token, }, {'name': 'quote2', 'expr': r'(?:^|(?<=\n))(?:(?: *>){3})*(?: *>){2} *(?:[^ >\n][^\n]*)?(?:$|\n)', - 'action': make_token, + 'action': lex.make_token, }, {'name': 'quote3', 'expr': r'(?:^|(?<=\n))(?:(?: *>){3})*(?: *>){3} *(?:[^ >\n][^\n]*)?(?:$|\n)', - 'action': make_token, + 'action': lex.make_token, }, {'name': 'email', 'expr': r'(?:^|(?<=[ :\n]))@\n ]+@(?:[^<>@\.\n ]+\.)*[^<>@\.\n ]+>?', - 'action': make_token, + 'action': lex.make_token, }, {'name': 'url', 'expr': r'(?:^|(?<=[ \n]))(?:http|https|ftp|sftp|file|smtp|smtps|torrent|news|jabber|irc|telnet)://(?:[^\.\n ]+\.)*[^\.\n ]+', - 'action': make_token, + 'action': lex.make_token, }, {'name': 'continued word', 'expr': r"""([a-zA-Z][a-zA-Z-']*[a-zA-Z])-\n *([a-zA-Z][a-zA-Z-]*[a-zA-Z])""", - 'action': make_token_spell, + 'action': lex_text.make_token_spell, }, {'name': 'word', 'expr': r"""(?:[a-zA-Z][-']?)*[a-zA-Z]""", - 'action': make_token_spell, + 'action': lex_text.make_token_spell, }, {'name': 'stuff', 'expr': r"""[^ \n]+""", - 'action': make_token, + 'action': lex.make_token, }, {'name': "default", diff --git a/lex_perl.py b/lex_perl.py index cc0ddbd..e026b0f 100755 --- a/lex_perl.py +++ b/lex_perl.py @@ -50,7 +50,7 @@ class PerlGrammar(lex.Grammar): 'action': lex.make_token}, {'name': 'keyword', - 'expr': r"""(?)(?:STDIN|STDERR|STDOUT|and|cmp|continue|do|else|elsif|eq|eval|foreach|for|if|last|my|next|ne|not|or|our|package|require|return|sub|undef|unless|until|use|while)(?![a-zA-Z_])""", + 'expr': r"""(?)(?:STDIN|STDERR|STDOUT|and|cmp|continue|do|else|elsif|eq|eval|foreach|for|if|last|my|next|ne|not|no|or|our|package|require|return|sub|undef|unless|until|use|while)(?![a-zA-Z_])""", 'action': lex.make_token}, {'name': 'hash bareword index', diff --git a/lex_sql.py b/lex_sql.py index 7430066..3bc0ae3 100755 --- a/lex_sql.py +++ b/lex_sql.py @@ -29,10 +29,10 @@ class SqlGrammar(lex.Grammar): 'action': lex.make_token}, {'name': 'keyword1', - 'expr': r'''(?:CREATE DATABASE|CREATE INDEX|CREATE SEQUENCE|CREATE TABLE|CREATE TRIGGER|CREATE VIEW|SELECT|INSERT|UPDATE|DELETE|DROP DATABASE|DROP INDEX|DROP SEQUENCE|DROP TABLE|DROP TRIGGER|DROP VIEW|CREATE USER|ALTER USER|DROP USER|DROP FUNCTION|GRANT|REVOKE|CREATE FUNCTION|CREATE OR REPLACE FUNCTION|CREATE LANGUAGE|CREATE OPERATOR|CREATE TYPE)(?![A-Za-z0-9_])''', + 'expr': r'''(?:CREATE DATABASE|CREATE INDEX|CREATE SEQUENCE|CREATE TABLE|CREATE TRIGGER|CREATE VIEW|SELECT|INSERT|UPDATE|DELETE|DROP DATABASE|DROP INDEX|DROP SEQUENCE|DROP TABLE|DROP TRIGGER|DROP VIEW|CREATE USER|ALTER USER|DROP USER|DROP FUNCTION|GRANT|REVOKE|CREATE FUNCTION|CREATE OR REPLACE FUNCTION|CREATE OR REPLACE VIEW|CREATE LANGUAGE|CREATE OPERATOR|CREATE TYPE)(?![A-Za-z0-9_])''', 'action': lex.make_token}, {'name': 'keyword2', - 'expr': r'''(?:create database|create index|create sequence|create table|create trigger|create view|select|insert|update|delete|drop database|drop index|drop sequence|drop table|drop trigger|drop view|create user|alter user|drop user|drop function|grant|revoke|create function|create or replace function|create language|create operator|create type)(?![A-Za-z0-9_])''', + 'expr': r'''(?:create database|create index|create sequence|create table|create trigger|create view|select|insert|update|delete|drop database|drop index|drop sequence|drop table|drop trigger|drop view|create user|alter user|drop user|drop function|grant|revoke|create function|create or replace function|create or replace view|create language|create operator|create type)(?![A-Za-z0-9_])''', 'action': lex.make_token}, {'name': 'pseudo-keyword1', diff --git a/lex_text.py b/lex_text.py index 734b608..6968611 100755 --- a/lex_text.py +++ b/lex_text.py @@ -1,32 +1,36 @@ import os import ispell, lex -def make_token_spell(rule, m): +def make_token_spell(rule, m, offset): '''return a token from a hit''' # first let's figure out the actual word we need to check if rule.name == 'continued word': word = '%s%s' % (m.group(1), m.group(2)) else: word = m.group(0) - # okay, now we check the spelling; we don't spell-check all caps words if ispell.can_spell() and \ - not ispell.get_speller().check(word, caps=False, title=True): + not ispell.get_speller().check(word, caps=False, title=False): name = "misspelled %s" % rule.name else: name = rule.name - - return lex.Token(name, m.start(), m.end(), m.group(0)) + return(lex.Token(name, m.start() + offset, m.end() + offset, word)) class TextGrammar(lex.Grammar): GRAMMAR_LIST = [ {'name': 'continued word', 'expr': r"""([a-zA-Z][a-zA-Z-']*[a-zA-Z])-\n *([a-zA-Z][a-zA-Z-]*[a-zA-Z])""", 'action': make_token_spell}, - + {'name': 'word', - 'expr': r"""[a-zA-Z][a-zA-Z-']*[a-zA-Z]""", - 'action': make_token_spell}, + 'expr': r"""(?:[a-zA-Z][-']?)*[a-zA-Z]""", + 'action': make_token_spell, + }, + + {'name': 'stuff', + 'expr': r"""[^ \n]+""", + 'action': lex.make_token, + }, {'name': "default", 'expr': r'.| |\n', diff --git a/lex_tt.py b/lex_tt.py index b54bc2d..2b4d505 100755 --- a/lex_tt.py +++ b/lex_tt.py @@ -6,7 +6,7 @@ from optparse import OptionParser # our imports import lex -class XMLGrammar(lex.Grammar): +class TTGrammar(lex.Grammar): GRAMMAR_LIST = [ {'name': 'comment', 'expr': r'''|$)''', @@ -14,6 +14,7 @@ class XMLGrammar(lex.Grammar): {'name': 'template', 'expr': r'''\[%(?:.| |\n)*?%\]''', + #'expr': r'''\[%%\]''', 'action': lex.make_token}, {'name': 'ltb', @@ -45,7 +46,7 @@ class XMLGrammar(lex.Grammar): 'action': lex.make_token}, {'name': 'nodevalue', - 'expr': r'''(?<=>)(?:[^<]|\n)+?(?=<)''', + 'expr': r'''(?:(?<=>)|(?<=%\]))(?:[^<\[]|\n|\[(?!%.*?%\]))+?(?=(?:<|\[%))''', 'action': lex.make_token}, {'name': 'whitespace', @@ -83,5 +84,5 @@ class XMLGrammar(lex.Grammar): def _default_rules(self): """subclasses can override this to define defaults for a grammar""" - for rdir in XMLGrammar.GRAMMAR_LIST: + for rdir in TTGrammar.GRAMMAR_LIST: self.add_rule(**rdir) diff --git a/method.py b/method.py index e380e16..7fe6300 100644 --- a/method.py +++ b/method.py @@ -245,6 +245,7 @@ class SwitchBuffer(Method): w.application.set_error("buffer %r was not found" % name) class KillBuffer(Method): '''Close the current buffer''' + force=False def _args(self): return [Argument('buffername', datatype="buffer", prompt="Kill Buffer: ", @@ -255,11 +256,14 @@ class KillBuffer(Method): assert name in app.bufferlist.buffer_names, "Buffer %r does not exist" % name assert name != '*Scratch*', "Can't kill scratch buffer" b = app.bufferlist.buffer_names[name] - assert not b.changed(), "Buffer %r has been modified" % (name) + if not self.force: + assert not b.changed(), "Buffer %r has been modified" % (name) if app.bufferlist.is_buffer_visible(b): app.bufferlist.set_slot(app.active_slot, app.bufferlist.hidden_buffers[0]) app.bufferlist.remove_buffer(b) b.close() +class ForceKillBuffer(KillBuffer): + force=True class ListBuffers(Method): '''List all open buffers in a new buffer''' def _execute(self, w, **vargs): diff --git a/mode2.py b/mode2.py index 9a1d43e..aac2af3 100644 --- a/mode2.py +++ b/mode2.py @@ -1,7 +1,7 @@ import os import sets, string -import color, default, highlight, method, point +import color, default, method, point DEBUG = False #DEBUG = True @@ -103,6 +103,7 @@ class Fundamental(Handler): self.add_bindings('replace', ('M-%',)) self.add_bindings('open-file', ('C-x C-f',)) self.add_bindings('kill-buffer', ('C-x k',)) + self.add_bindings('force-kill-buffer', ('C-x K',)) self.add_bindings('list-buffers', ('C-x C-b',)) self.add_bindings('meta-x', ('M-x',)) self.add_bindings('wrap-line', ('M-q',)) diff --git a/mode_mutt.py b/mode_mutt.py index ce80418..6a26548 100644 --- a/mode_mutt.py +++ b/mode_mutt.py @@ -8,27 +8,25 @@ class Mutt(mode.Fundamental): self.add_action_and_bindings(mode_text.LearnWord(), ('C-c l',)) self.add_action_and_bindings(MuttWrapParagraph(), ('M-q',)) - #self.add_action_and_bindings(MuttWrapLine(), ('M-q',)) self.add_action_and_bindings(MuttInsertSpace(), ('SPACE',)) self.grammar = lex_mutt.MuttGrammar() + #import lex_text + #self.grammar = lex_text.TextGrammar() self.lexer = lex.Lexer(self.grammar) self.default_color = color.build('default', 'default') self.colors = { - 'header': color.build('green', 'default', 'bold'), - 'email': color.build('cyan', 'default', 'bold'), - 'url': color.build('cyan', 'default', 'bold'), - 'misspelled word': color.build('red', 'default', 'bold'), + 'header': color.build('green', 'default', 'bold'), + 'email': color.build('cyan', 'default', 'bold'), + 'url': color.build('cyan', 'default', 'bold'), + 'misspelled word': color.build('red', 'default', 'bold'), 'misspelled continued word': color.build('red', 'default', 'bold'), - 'quote1': color.build('yellow', 'default', 'bold'), - 'quote2': color.build('cyan', 'default', 'bold'), - 'quote3': color.build('magenta', 'default', 'bold'), + 'quote1': color.build('yellow', 'default', 'bold'), + 'quote2': color.build('cyan', 'default', 'bold'), + 'quote3': color.build('magenta', 'default', 'bold'), } - #self.highlighter.lex_buffer() - #self.get_regions() - def name(self): return "Mutt" diff --git a/mode_text.py b/mode_text.py index 7adbbd0..b4bb877 100644 --- a/mode_text.py +++ b/mode_text.py @@ -19,9 +19,6 @@ class Text(mode.Fundamental): 'misspelled continued word': color.build('red', 'default', 'bold'), } - #self.highlighter.lex_buffer() - #self.get_regions() - def name(self): return "Text" diff --git a/tab_sh.py b/tab_sh.py index d725f33..1e4cc4e 100644 --- a/tab_sh.py +++ b/tab_sh.py @@ -38,7 +38,7 @@ class ShTabber(tab.TokenStackTabber): p = buffer.get_offset_point(next_token.start) self.stack_append((s, p.x)) elif s == "else": - assert self.tab_stack[-1][0] == "if", "bbbsssxxx" + assert self.tab_stack[-1][0] == "then", "bbbsssxxx: %s" % self.tab_stack[-1][0] d = self.tab_stack[-1][1] - self.tab_stack[-2][1] self.line_depth -= d elif s in self.close_tags: