update from pmacs/

--HG--
branch : pmacs2
This commit is contained in:
moculus 2007-06-14 02:38:46 +00:00
parent 077417a363
commit 6f8fdb638a
12 changed files with 50 additions and 64 deletions

3
BUGS
View File

@ -1,3 +1,6 @@
2007/06/05:
search back misses some results on the same line as the search
2006/07/04:
when in the minibuffer, certain key sequences don't seem to get picked up.

View File

@ -20,7 +20,7 @@ class CGrammar(lex.Grammar):
'action':lex.make_token},
{'name': 'header',
'expr': r'''(?<=#include) +(?:<[A-Za-z0-9_]+\.h?>|"[A-Za-z0-9_]+\.h")''',
'expr': r'''(?<=#include) +(?:<[A-Za-z/0-9_]+\.h?>|"[A-Za-z/0-9_]+\.h")''',
'action': lex.make_token},
{'name': 'constant',

View File

@ -1,72 +1,50 @@
import os
import ispell, lex
def make_token(rule, m):
'''return a token from a hit'''
return(lex.Token(rule.name, m.start(), m.end(), m.group(0)))
def make_token_spell(rule, m):
'''return a token from a hit'''
# first let's figure out the actual word we need to check
if rule.name == 'continued word':
word = '%s%s' % (m.group(1), m.group(2))
else:
word = m.group(0)
# okay, now we check the spelling; we don't spell-check all caps words
if ispell.can_spell() and \
not ispell.get_speller().check(word, caps=False, title=False):
name = "misspelled %s" % rule.name
else:
name = rule.name
return lex.Token(name, m.start(), m.end(), m.group(0))
import lex, lex_text
class MuttGrammar(lex.Grammar):
GRAMMAR_LIST = [
{'name': 'header',
'expr': r'(?:^|(?<=\n))(?:From|To|Cc|Bcc|Subject|Reply-To|In-Reply-To|Delivered-To|Date):',
'action': make_token,
'action': lex.make_token,
},
{'name': 'quote1',
'expr': r'(?:^|(?<=\n))(?:(?: *>){3})*(?: *>){1} *(?:[^ >\n][^\n]*)?(?:$|\n)',
'action': make_token,
'action': lex.make_token,
},
{'name': 'quote2',
'expr': r'(?:^|(?<=\n))(?:(?: *>){3})*(?: *>){2} *(?:[^ >\n][^\n]*)?(?:$|\n)',
'action': make_token,
'action': lex.make_token,
},
{'name': 'quote3',
'expr': r'(?:^|(?<=\n))(?:(?: *>){3})*(?: *>){3} *(?:[^ >\n][^\n]*)?(?:$|\n)',
'action': make_token,
'action': lex.make_token,
},
{'name': 'email',
'expr': r'(?:^|(?<=[ :\n]))<?[^<>@\n ]+@(?:[^<>@\.\n ]+\.)*[^<>@\.\n ]+>?',
'action': make_token,
'action': lex.make_token,
},
{'name': 'url',
'expr': r'(?:^|(?<=[ \n]))(?:http|https|ftp|sftp|file|smtp|smtps|torrent|news|jabber|irc|telnet)://(?:[^\.\n ]+\.)*[^\.\n ]+',
'action': make_token,
'action': lex.make_token,
},
{'name': 'continued word',
'expr': r"""([a-zA-Z][a-zA-Z-']*[a-zA-Z])-\n *([a-zA-Z][a-zA-Z-]*[a-zA-Z])""",
'action': make_token_spell,
'action': lex_text.make_token_spell,
},
{'name': 'word',
'expr': r"""(?:[a-zA-Z][-']?)*[a-zA-Z]""",
'action': make_token_spell,
'action': lex_text.make_token_spell,
},
{'name': 'stuff',
'expr': r"""[^ \n]+""",
'action': make_token,
'action': lex.make_token,
},
{'name': "default",

View File

@ -50,7 +50,7 @@ class PerlGrammar(lex.Grammar):
'action': lex.make_token},
{'name': 'keyword',
'expr': r"""(?<!->)(?:STDIN|STDERR|STDOUT|and|cmp|continue|do|else|elsif|eq|eval|foreach|for|if|last|my|next|ne|not|or|our|package|require|return|sub|undef|unless|until|use|while)(?![a-zA-Z_])""",
'expr': r"""(?<!->)(?:STDIN|STDERR|STDOUT|and|cmp|continue|do|else|elsif|eq|eval|foreach|for|if|last|my|next|ne|not|no|or|our|package|require|return|sub|undef|unless|until|use|while)(?![a-zA-Z_])""",
'action': lex.make_token},
{'name': 'hash bareword index',

View File

@ -29,10 +29,10 @@ class SqlGrammar(lex.Grammar):
'action': lex.make_token},
{'name': 'keyword1',
'expr': r'''(?:CREATE DATABASE|CREATE INDEX|CREATE SEQUENCE|CREATE TABLE|CREATE TRIGGER|CREATE VIEW|SELECT|INSERT|UPDATE|DELETE|DROP DATABASE|DROP INDEX|DROP SEQUENCE|DROP TABLE|DROP TRIGGER|DROP VIEW|CREATE USER|ALTER USER|DROP USER|DROP FUNCTION|GRANT|REVOKE|CREATE FUNCTION|CREATE OR REPLACE FUNCTION|CREATE LANGUAGE|CREATE OPERATOR|CREATE TYPE)(?![A-Za-z0-9_])''',
'expr': r'''(?:CREATE DATABASE|CREATE INDEX|CREATE SEQUENCE|CREATE TABLE|CREATE TRIGGER|CREATE VIEW|SELECT|INSERT|UPDATE|DELETE|DROP DATABASE|DROP INDEX|DROP SEQUENCE|DROP TABLE|DROP TRIGGER|DROP VIEW|CREATE USER|ALTER USER|DROP USER|DROP FUNCTION|GRANT|REVOKE|CREATE FUNCTION|CREATE OR REPLACE FUNCTION|CREATE OR REPLACE VIEW|CREATE LANGUAGE|CREATE OPERATOR|CREATE TYPE)(?![A-Za-z0-9_])''',
'action': lex.make_token},
{'name': 'keyword2',
'expr': r'''(?:create database|create index|create sequence|create table|create trigger|create view|select|insert|update|delete|drop database|drop index|drop sequence|drop table|drop trigger|drop view|create user|alter user|drop user|drop function|grant|revoke|create function|create or replace function|create language|create operator|create type)(?![A-Za-z0-9_])''',
'expr': r'''(?:create database|create index|create sequence|create table|create trigger|create view|select|insert|update|delete|drop database|drop index|drop sequence|drop table|drop trigger|drop view|create user|alter user|drop user|drop function|grant|revoke|create function|create or replace function|create or replace view|create language|create operator|create type)(?![A-Za-z0-9_])''',
'action': lex.make_token},
{'name': 'pseudo-keyword1',

View File

@ -1,22 +1,20 @@
import os
import ispell, lex
def make_token_spell(rule, m):
def make_token_spell(rule, m, offset):
'''return a token from a hit'''
# first let's figure out the actual word we need to check
if rule.name == 'continued word':
word = '%s%s' % (m.group(1), m.group(2))
else:
word = m.group(0)
# okay, now we check the spelling; we don't spell-check all caps words
if ispell.can_spell() and \
not ispell.get_speller().check(word, caps=False, title=True):
not ispell.get_speller().check(word, caps=False, title=False):
name = "misspelled %s" % rule.name
else:
name = rule.name
return lex.Token(name, m.start(), m.end(), m.group(0))
return(lex.Token(name, m.start() + offset, m.end() + offset, word))
class TextGrammar(lex.Grammar):
GRAMMAR_LIST = [
@ -25,8 +23,14 @@ class TextGrammar(lex.Grammar):
'action': make_token_spell},
{'name': 'word',
'expr': r"""[a-zA-Z][a-zA-Z-']*[a-zA-Z]""",
'action': make_token_spell},
'expr': r"""(?:[a-zA-Z][-']?)*[a-zA-Z]""",
'action': make_token_spell,
},
{'name': 'stuff',
'expr': r"""[^ \n]+""",
'action': lex.make_token,
},
{'name': "default",
'expr': r'.| |\n',

View File

@ -6,7 +6,7 @@ from optparse import OptionParser
# our imports
import lex
class XMLGrammar(lex.Grammar):
class TTGrammar(lex.Grammar):
GRAMMAR_LIST = [
{'name': 'comment',
'expr': r'''<!--(?:.| |\n)+?(?:-->|$)''',
@ -14,6 +14,7 @@ class XMLGrammar(lex.Grammar):
{'name': 'template',
'expr': r'''\[%(?:.| |\n)*?%\]''',
#'expr': r'''\[%%\]''',
'action': lex.make_token},
{'name': 'ltb',
@ -45,7 +46,7 @@ class XMLGrammar(lex.Grammar):
'action': lex.make_token},
{'name': 'nodevalue',
'expr': r'''(?<=>)(?:[^<]|\n)+?(?=<)''',
'expr': r'''(?:(?<=>)|(?<=%\]))(?:[^<\[]|\n|\[(?!%.*?%\]))+?(?=(?:<|\[%))''',
'action': lex.make_token},
{'name': 'whitespace',
@ -83,5 +84,5 @@ class XMLGrammar(lex.Grammar):
def _default_rules(self):
"""subclasses can override this to define defaults for a grammar"""
for rdir in XMLGrammar.GRAMMAR_LIST:
for rdir in TTGrammar.GRAMMAR_LIST:
self.add_rule(**rdir)

View File

@ -245,6 +245,7 @@ class SwitchBuffer(Method):
w.application.set_error("buffer %r was not found" % name)
class KillBuffer(Method):
'''Close the current buffer'''
force=False
def _args(self):
return [Argument('buffername', datatype="buffer",
prompt="Kill Buffer: ",
@ -255,11 +256,14 @@ class KillBuffer(Method):
assert name in app.bufferlist.buffer_names, "Buffer %r does not exist" % name
assert name != '*Scratch*', "Can't kill scratch buffer"
b = app.bufferlist.buffer_names[name]
if not self.force:
assert not b.changed(), "Buffer %r has been modified" % (name)
if app.bufferlist.is_buffer_visible(b):
app.bufferlist.set_slot(app.active_slot, app.bufferlist.hidden_buffers[0])
app.bufferlist.remove_buffer(b)
b.close()
class ForceKillBuffer(KillBuffer):
force=True
class ListBuffers(Method):
'''List all open buffers in a new buffer'''
def _execute(self, w, **vargs):

View File

@ -1,7 +1,7 @@
import os
import sets, string
import color, default, highlight, method, point
import color, default, method, point
DEBUG = False
#DEBUG = True
@ -103,6 +103,7 @@ class Fundamental(Handler):
self.add_bindings('replace', ('M-%',))
self.add_bindings('open-file', ('C-x C-f',))
self.add_bindings('kill-buffer', ('C-x k',))
self.add_bindings('force-kill-buffer', ('C-x K',))
self.add_bindings('list-buffers', ('C-x C-b',))
self.add_bindings('meta-x', ('M-x',))
self.add_bindings('wrap-line', ('M-q',))

View File

@ -8,10 +8,11 @@ class Mutt(mode.Fundamental):
self.add_action_and_bindings(mode_text.LearnWord(), ('C-c l',))
self.add_action_and_bindings(MuttWrapParagraph(), ('M-q',))
#self.add_action_and_bindings(MuttWrapLine(), ('M-q',))
self.add_action_and_bindings(MuttInsertSpace(), ('SPACE',))
self.grammar = lex_mutt.MuttGrammar()
#import lex_text
#self.grammar = lex_text.TextGrammar()
self.lexer = lex.Lexer(self.grammar)
self.default_color = color.build('default', 'default')
@ -26,9 +27,6 @@ class Mutt(mode.Fundamental):
'quote3': color.build('magenta', 'default', 'bold'),
}
#self.highlighter.lex_buffer()
#self.get_regions()
def name(self):
return "Mutt"

View File

@ -19,9 +19,6 @@ class Text(mode.Fundamental):
'misspelled continued word': color.build('red', 'default', 'bold'),
}
#self.highlighter.lex_buffer()
#self.get_regions()
def name(self):
return "Text"

View File

@ -38,7 +38,7 @@ class ShTabber(tab.TokenStackTabber):
p = buffer.get_offset_point(next_token.start)
self.stack_append((s, p.x))
elif s == "else":
assert self.tab_stack[-1][0] == "if", "bbbsssxxx"
assert self.tab_stack[-1][0] == "then", "bbbsssxxx: %s" % self.tab_stack[-1][0]
d = self.tab_stack[-1][1] - self.tab_stack[-2][1]
self.line_depth -= d
elif s in self.close_tags: