try this again

--HG--
branch : pmacs2
This commit is contained in:
moculus 2007-03-06 15:05:38 +00:00
commit 6780f9f22a
82 changed files with 19440 additions and 0 deletions

13
BUGS Normal file
View File

@ -0,0 +1,13 @@
2006/07/04:
when in the minibuffer, certain key sequences don't seem to get picked up.
2006/07/04:
undo/redo should probably show you what is being undone (i.e. by jumping to that
region of code).
2006/07/04:
undo/redo is mostly fixed, but there are still occasionally problems, which seem
to relate to pasting in multiple lines and cursor positioning.
2006/06/25:
long prompts will cause problems (particularly filenames)

12
IDEAS Normal file
View File

@ -0,0 +1,12 @@
since the commands are stateless, they should probably only be instantiated once
and stored in the application. that way, anyone can run any command using the
following:
m = app.methods['my-method-name']
m.execute()
Right now, every mode instance instantiates its own exact copy of the method,
and anyone else who needs to use a method just instantiates the method in
question.
(2006/11/4)
Well, we've made some progress on this. The app now has copies of everything,
but the various modes don't necessarily use that copy, and also don't
necessarily add their own stuff to it.

107
aes.py Executable file
View File

@ -0,0 +1,107 @@
#!/usr/bin/python
#
# by Erik Osheim
import os, popen2
class Cipher:
def __init__(self, password, seed='aes.py', hashtype='rmd160'):
self.password = password
self.seed = seed
self.hashtype = hashtype
def encrypt(self, data):
return encrypt_data(data, self.password, self.seed, self.hashtype)
def decrypt(self, encrypted):
return decrypt_data(encrypted, self.password, self.seed, self.hashtype)
def _check_aespipe():
result = os.system('which aespipe > /dev/null')
if result != 0:
raise Exception, "Could not find aespipe; is it installed?"
def encrypt_data(data, password, seed='aes.py', hashtype='rmd160'):
'''uses password to encrypt data'''
_check_aespipe()
cmd = "aespipe -S '%s' -H '%s' -p 0" % (seed, hashtype)
(stdout, stdin, stderr) = popen2.popen3(cmd)
stdin.write(password + '\n')
stdin.write(data)
stdin.close()
encrypted = stdout.read()
err = stderr.read()
if err:
raise Exception, "Problem: %s" % err
return encrypted
def encrypt_path(path, data, password, seed='aes.py', hashtype='rmd160'):
'''uses password to encrypt data and writes result to path'''
encrypted = encrypt_data(data, password, seed, hashtype)
f = open(path, 'w')
f.write(encrypted)
f.close()
def decrypt_data(encrypted, password, seed='aes.py', hashtype='rmd160'):
'''uses password to decrypt data'''
_check_aespipe()
cmd = "aespipe -d -S '%s' -H '%s' -p 0" % (seed, hashtype)
(stdout, stdin, stderr) = popen2.popen3(cmd)
stdin.write(password + '\n')
stdin.write(encrypted)
stdin.close()
data = stdout.read()
err = stderr.read()
if err:
raise Exception, "Problem: %s" % err
# data is null-padded at the end to align on 16 or 512 bytes boundaries
i = len(data)
while i > 1:
if data[i-1] == '\x00':
i -= 1
else:
break
return data[:i]
def decrypt_path(path, password, seed='aes.py', hashtype='rmd160'):
'''uses password to decrypt data from path'''
f = open(path, 'r')
encrypted = f.read()
f.close()
data = decrypt_data(encrypted, password, seed, hashtype)
return data
if __name__ == "__main__":
import optparse, sys
parser = optparse.OptionParser()
parser.set_defaults(mode='decrypt')
parser.set_defaults(password='insecure1@3$5^')
parser.set_defaults(filename='output.aes')
parser.add_option('-e', dest='mode', action='store_const', const='encrypt',
help="perform encryption on data from stdin")
parser.add_option('-d', dest='mode', action='store_const', const='decrypt',
help="perform decryption on data from stdin")
parser.add_option('-f', dest='filename', action='store', metavar='FILENAME',
help="encrypt to/from FILENAME (default: output.aes)")
parser.add_option('-p', dest='password', action='store', metavar='PASSWORD',
default="insecure1@3$5^",
help="use password PASSWORD (default: insecure1@3$5^)")
(opts, args) = parser.parse_args()
c = Cipher(opts.password)
if opts.mode == 'encrypt':
data = sys.stdin.read()
encrypted = c.encrypt(data)
f = open(opts.filename, 'w')
f.write(encrypted)
f.close()
print "data written to %r." % opts.filename
else:
print "reading data from %r:" % opts.filename
f = open(opts.filename, 'r')
encrypted = f.read()
f.close()
data = c.decrypt(encrypted)
print data

814
application.py Executable file
View File

@ -0,0 +1,814 @@
#!/usr/bin/env python
import curses, curses.ascii, getpass, os, re, string, sys, termios, time
import traceback
import buffer, bufferlist, color, completer, keyinput, method, minibuffer
import mode, point, sets, util, window
# modes
import mode, mode_c, mode_mini, mode_python, mode_nasm, mode_perl, mode_search
import mode_replace, mode_xml, mode_console, mode_sh, mode_text, mode_which
import mode_mutt, mode_sql, mode_javascript, mode_diff, mode_blame, mode_tt
def run(buffers, jump_to_line=None, init_mode=None):
# save terminal state so we can restore it when the program exits
attr = termios.tcgetattr(sys.stdin)
keyinput.disable_control_chars()
retval = 1
try:
retval = curses.wrapper(run_app, buffers, jump_to_line, init_mode)
except:
traceback.print_exc()
# restore terminal state
termios.tcsetattr(sys.stdin, termios.TCSANOW, attr)
return retval
def run_app(stdscr, buffers, jump_to_line=None, init_mode=None):
a = Application(stdscr, buffers, jump_to_line, init_mode)
a.run()
KILL_RING_LIMIT = 128
WORD_LETTERS = list(string.letters + string.digits)
ERROR_TIMEOUT = -1
#ERROR_TIMEOUT = 2
#DARK_BACKGROUND = False
DARK_BACKGROUND = True
class Application:
def __init__(self, stdscr, buffers=[], jump_to_line=None, init_mode=None):
# initalize curses primitives
self.stdscr = stdscr
self.y, self.x = self.stdscr.getmaxyx()
# initialize some basic stuff
self.margins_visible = False
#self.margins = [(80, 'blue'), (90, 'red')]
self.margins = [(80, 'blue'), ]
# each highlighted_range contains three things: [window, start_p, end_p]
self.highlighted_ranges = []
self.mini_active = False
self.mini_buffer = None
self.mini_prompt = ""
self.error_string = ""
self.error_timestamp = None
self.input = keyinput.Handler()
# initialize our colors
if curses.has_colors():
curses.start_color()
try:
curses.use_default_colors()
color.default_color = True
except:
# guess we weren't on 2.4
color.default_color = False
color.init()
# this is how we can change color settings
if curses.can_change_color():
#curses.init_color(curses.COLOR_BLUE, 750, 400, 0)
pass
else:
self.set_error("Dynamic color not available")
# initialize our modes
self.modes = {
'blame': mode_blame.Blame,
'c': mode_c.C,
'console': mode_console.Console,
'diff': mode_diff.Diff,
'fundamental': mode.Fundamental,
'mini': mode_mini.Mini,
'nasm': mode_nasm.Nasm,
'perl': mode_perl.Perl,
'python': mode_python.Python,
'replace': mode_replace.Replace,
'search': mode_search.Search,
'sh': mode_sh.Sh,
'text': mode_text.Text,
'which': mode_which.Which,
'xml': mode_xml.XML,
'mutt': mode_mutt.Mutt,
'sql': mode_sql.Sql,
'javascript': mode_javascript.Javascript,
'template': mode_tt.Template,
}
# these are used in this order to determine which mode to open certain
# kinds of files
self.mode_paths = {
'/etc/profile': 'sh',
}
self.mode_basenames = {
'.bashrc': 'sh',
'.bash_profile': 'sh',
'.profile': 'sh',
}
self.mode_extensions = {
'.py': 'python',
'.pl': 'perl',
'.pm': 'perl',
'.t': 'perl',
'.c': 'c',
'.txt': 'text',
'.s': 'nasm',
'.sh': 'sh',
'.bash': 'sh',
'.xml': 'xml',
'.xml.in': 'xml',
'.html': 'xml',
'.htm': 'xml',
'.sql': 'sql',
'.js': 'javascript',
'.tt': 'template'
}
self.mode_detection = {
'python': 'python',
'perl': 'perl',
'sh': 'sh',
'bash': 'sh',
}
# initialize our methods
self.methods = {}
for name in dir(method):
cls = eval("method.%s" % name)
if hasattr(cls, '_is_method') and cls._is_method:
self.methods[cls._name()] = cls()
# create all the insert methods for the character ranges we like
for c in string.letters + string.digits + string.punctuation:
## closing tags are handled differently
#if c == ')' or c == ']' or c == '}':
# continue
obj = method.InsertString(c)
self.methods[obj.name] = obj
# window/slot height/width
height = self.y - 2
width = self.x - 1
# initialize our buffers
# note that the first buffer in buffers will be initially visible
buffers.append(buffer.ScratchBuffer())
buffers.append(buffer.ConsoleBuffer())
self.bufferlist = bufferlist.BufferList(height, width)
self.active_slot = 0
self.resize_slots()
# build windows for our buffers
for b in buffers:
window.Window(b, self, height, width, slot=self.active_slot,
mode_name=init_mode)
self.bufferlist.add_buffer(b)
self.resize_windows()
# see if the user has requested that we go to a particular line
if jump_to_line:
name = buffers[0].name()
b = self.bufferlist.get_buffer_by_name(name)
w = b.get_window(self.active_slot)
method.GotoLine().execute(w, lineno=jump_to_line)
# initialize our kill ring and last action
self.kill_ring = []
self.kill_commands = ['kill', 'kill-region']
self.last_action = None
self.last_search = None
self.last_replace_before = None
self.last_replace_after = None
# initialize tab handlers
method.DATATYPES['path'] = completer.FileCompleter()
method.DATATYPES['buffer'] = completer.BufferCompleter(self)
method.DATATYPES['command'] = completer.CommandCompleter()
method.DATATYPES['shell'] = completer.ShellCompleter()
method.DATATYPES['method'] = completer.MethodCompleter()
method.DATATYPES['mode'] = completer.ModeCompleter()
method.DATATYPES['perlfunction'] = completer.PerlFunctionCompleter()
# set up curses
self.win = curses.newwin(self.y, self.x, 0, 0)
self.win.leaveok(1)
curses.meta(1)
curses.halfdelay(1)
self.hide_cursor()
def globals(self):
return globals()
def locals(self):
return locals()
def add_slot(self):
b = self.bufferlist.slots[self.active_slot].buffer
n = self.bufferlist.add_slot(0, 0, 0, b)
self.resize_slots()
self.add_window_to_buffer(b, n)
self.resize_windows()
def remove_slot(self, slotname):
assert len(self.bufferlist.slots) > 1, "oh no you didn't!"
assert slotname >= 0 and slotname < len(self.bufferlist.slots), \
"invalid slot: %r (%r)" % (slotname, len(self.bufferlist.slots))
b = self.bufferlist.slots[slotname].buffer
self.bufferlist.remove_slot(slotname)
if self.active_slot > slotname:
self.active_slot = max(0, self.active_slot - 1)
self.resize_slots()
self.resize_windows()
def single_slot(self):
while len(self.bufferlist.slots) > 1:
if self.active_slot == 0:
self.remove_slot(1)
else:
self.remove_slot(0)
def get_window_height_width(self, slotname):
assert slotname >= 0 and slotname < len(self.bufferlist.slots), \
"invalid slot: %r" % slotname
slot = self.bufferlist.slots[slotname]
return (slot.height, slot.width)
# mini buffer handling
def get_mini_buffer(self):
return self.mini_buffer
def mini_buffer_is_open(self):
return self.mini_buffer is not None
def open_mini_buffer(self, prompt, callback, method=None, tabber=None,
modename=None):
if self.mini_buffer_is_open():
self.close_mini_buffer()
self.mini_prompt = prompt
self.mini_buffer = minibuffer.MiniBuffer(callback, method, tabber,
modename)
window.Window(self.mini_buffer, self, height=1,
width=self.x-1-len(self.mini_prompt)-1, slot='mini')
self.mini_active = True
def exec_mini_buffer(self):
self.mini_buffer.callback(self.mini_buffer.make_string())
self.close_mini_buffer()
def close_mini_buffer(self):
if self.mini_buffer_is_open():
self.mini_buffer.close()
self.mini_buffer = None
self.mini_prompt = ""
self.mini_active = False
def get_mini_buffer_prompt(self):
return self.mini_prompt
def set_mini_buffer_prompt(self, p):
self.mini_prompt = p
# window handling
def toggle_window(self):
assert 0 <= self.active_slot and self.active_slot < len(self.bufferlist.slots)
self.active_slot = (self.active_slot + 1) % len(self.bufferlist.slots)
def window(self):
slotname = self.active_slot
return self.bufferlist.slots[slotname].buffer.get_window(slotname)
def active_window(self):
if self.mini_active:
return self.mini_buffer.get_window('mini')
else:
assert 0 <= self.active_slot and self.active_slot < len(self.bufferlist.slots), \
"0 <= %d < %d" % (self.active_slot, len(self.bufferlist.slots))
slotname = self.active_slot
return self.bufferlist.slots[slotname].buffer.get_window(slotname)
# buffer handling
def file_buffer(self, path, data, switch_to=True):
assert not self.has_buffer_name(path), 'oh no! %r is already open' % path
assert not os.path.exists(path), 'oh no! %r already exists in fs' % path
f = open(path, 'w')
f.write(data)
f.close()
b = buffer.FileBuffer(path)
b.open()
self.add_window_to_buffer(b, self.active_slot)
self.add_buffer(b)
if switch_to:
self.switch_buffer(b)
def data_buffer(self, name, data, switch_to=True, modename=None):
if self.has_buffer_name(name):
b = self.bufferlist.buffer_names[name]
self.remove_buffer(b)
b = buffer.DataBuffer(name, data)
if modename is not None:
b.modename = modename
self.add_window_to_buffer(b, self.active_slot)
self.add_buffer(b)
if switch_to:
self.switch_buffer(b)
def get_buffer_by_path(self, path):
return self.bufferlist.get_buffer_by_path(path)
def has_buffer_name(self, name):
return self.bufferlist.has_buffer_name(name)
def get_buffer_by_name(self, name):
return self.bufferlist.get_buffer_by_name(name)
def has_buffer(self, b):
return self.bufferlist.has_buffer(b)
def add_buffer(self, b):
self.bufferlist.add_buffer(b)
def remove_buffer(self, b):
assert b.name() is not "*Scratch*", "can't kill the scratch"
assert self.bufferlist.has_buffer(b), "can't kill what's not there"
assert len(self.bufferlist.buffers) > 1, "can't kill with no other buffers"
self.bufferlist.remove_buffer(b)
b.close()
if self.bufferlist.empty_slot(self.active_slot):
b2 = self.bufferlist.hidden_buffers[0]
self.bufferlist.set_slot(self.active_slot, b2)
def switch_buffer(self, b):
assert self.has_buffer_name(b.name()), "buffer %s does not exist" % (b.name())
assert 0 <= self.active_slot and self.active_slot < len(self.bufferlist.slots)
self.add_window_to_buffer(b, self.active_slot)
self.bufferlist.set_slot(self.active_slot, b)
def add_window_to_buffer(self, b, slotname):
if not b.has_window(slotname):
slot = self.bufferlist.slots[slotname]
window.Window(b, self, height=slot.height, width=slot.width, slot=slotname)
# error string handling
def set_error(self, s):
self.error_string = s
self.error_timestamp = time.time()
def clear_error(self):
self.error_string = ""
self.error_timestamp = None
def resize_event(self):
self.y, self.x = self.stdscr.getmaxyx()
self.resize_slots()
self.resize_windows()
def resize_slots(self):
n = len(self.bufferlist.slots)
x = self.x - 1
y_sum = self.y - 1 - n
y_pool = y_sum
y_offset = 0
for i in range(0, n - 1):
slot = self.bufferlist.slots[i]
y = y_sum / n
slot.resize(y, x, y_offset)
y_pool -= y
y_offset += y + 1
slot = self.bufferlist.slots[n-1].resize(y_pool, x, y_offset)
def resize_windows(self):
for b in self.bufferlist.buffers:
keys = b.windows.keys()
for name in keys:
try:
(height, width) = self.get_window_height_width(name)
b.windows[name].set_size(width, height)
except:
w = b.windows[name]
del b.windows[name]
# kill w now
# hide the curses cursor
def hide_cursor(self):
self.win.move(self.y-2, 0)
try:
curses.curs_set(0)
except:
pass
# exit
def exit(self):
self.done = True
# kill stack manipulation
def push_kill(self, s):
if s is not None:
if self.last_action in self.kill_commands and \
len(self.kill_ring):
self.kill_ring[-1] = self.kill_ring[-1] + s
else:
self.kill_ring.append(s)
if len(self.kill_ring) > KILL_RING_LIMIT:
self.kill_ring.pop(0)
def pop_kill(self):
return self.kill_ring.pop(-1)
def has_kill(self, i=-1):
return len(self.kill_ring) >= abs(i)
def get_kill(self, i=-1):
return self.kill_ring[i]
# undo/redo
def undo(self):
try:
self.window().buffer.undo()
except Exception, e:
self.set_error("%s" % (e))
def redo(self):
try:
self.window().buffer.redo()
except Exception, e:
self.set_error("%s" % (e))
# action creating methods
def make_insert_action(self, c):
return lambda: self.window().insert_string(c)
def make_window_action(self, methodname):
f = getattr(self.window(), methodname)
f()
# we are evil
def eval(self, s):
return eval(s)
# the might run-loop!
def run(self):
self.done = False
#keycodes = []
while not self.done:
i = self.win.getch()
#if i > 0:
# if len(keycodes) >= 6:
# keycodes.pop(0)
# keycodes.append(str(i))
#self.set_error('keycodes: %s' % repr(keycodes))
if i == curses.KEY_RESIZE:
while i == curses.KEY_RESIZE:
i = self.win.getch()
self.resize_event()
err = ''
try:
self.input.parse(i)
except Exception, e:
err = str(e)
while len(self.input.tokens):
t = self.input.tokens.pop(0)
self.active_window().mode.handle_token(t)
self.draw()
if err:
self.set_error(err)
if self.error_timestamp is not None and \
ERROR_TIMEOUT > 0 and \
time.time() - self.error_timestamp > ERROR_TIMEOUT:
self.clear_error()
return
# highlighting
# each highlighted_range contains three things: [window, start_p, end_p]
def add_highlighted_range(self, w, start_p, end_p):
self.highlighted_ranges.append([w, start_p, end_p])
def clear_highlighted_ranges(self):
self.highlighted_ranges = []
# full screen drawer
def draw(self):
self.hide_cursor()
self.draw_slots()
self.draw_input_bar()
self.hide_cursor()
self.win.noutrefresh()
self.hide_cursor()
curses.doupdate()
# debugging
def dump(self):
w = self.window()
ll = len(w.buffer.lines)
pl = len(w.get_physical_lines())
vl = len(w.visible_lines())
first = w.first
last = w.last
cursor = w.logical_cursor()
vcursor = w.visible_cursor()
s = ""
s += "width: %d\n" % (w.width)
s += "height: %d\n" % (w.height)
s += "len logical lines: %d\n" % (ll)
s += "logical first: %s\n" % (first)
s += "logical last: %s\n" % (last)
s += "logical cursor: %s\n" % (cursor)
s += "len physical lines: %d\n" % (pl)
s += "physical first: %s\n" % (w.physical_point(first))
s += "physical last: %s\n" % (w.physical_point(last))
s += "physical cursor: %s\n" % (w.physical_point(cursor))
s += "len visible lines: %d\n" % (vl)
s += "visible first: %s\n" % ("n/a")
s += "visible last: %s\n" % ("n/a")
s += "visible cursor: %s\n" % (vcursor)
return s
# sub-drawing methods
def draw_slots(self):
self.win.erase()
for i in range(0, len(self.bufferlist.slots)):
slot = self.bufferlist.slots[i]
self.draw_slot(i)
self.draw_status_bar(i)
def draw_slot(self, slotname):
slot = self.bufferlist.slots[slotname]
if not slot.buffer.has_window(slotname):
return
w = slot.buffer.get_window(slotname)
lines = w.visible_lines()
regions = w.mode.visible_regions()
## FIXME: why isn't this always the same????
assert (len(lines) == len(regions) or
len(lines) == len(regions) - 1), "%d,%d" % (len(lines),
len(regions)-1)
assert len(lines) > 0, "no lines... why?"
m = min(len(lines), slot.height)
assert m > 0
x = slot.width
y = slot.height
y_offset = slot.offset
assert x > 0
assert y > 0
red_attr = color.build_attr(color.pairs('red', 'default'))
for i in range(0, m):
j = 0
line = lines[i]
for r in regions[i]:
try:
# start, end, attr, value, ttype = r
assert 0 <= r.start, "0 <= %d" % (r.start)
assert r.start <= r.end, "%d <= %d" % (r.start, r.end)
assert r.end <= len(line), "%d <= %d" % (r.end, len(line))
except Exception, e:
s = "\n".join([repr(x) for x in regions])
raise Exception, "%s\n%s\n\n%s\n\n%s\n\n%s\n\n%d" % \
(e, s, regions[i], r, repr(line), len(line))
assert line[r.start:r.end] == r.value, \
"%r != %r" % (line[r.start:r.end], r.value)
try:
if DARK_BACKGROUND:
attr = r.attr | curses.A_BOLD
else:
attr = r.attr
self.win.addnstr(i + y_offset, r.start, r.value, r.end - r.start, attr)
except Exception, e:
raise Exception, "%s\n%s %s %s %s" % \
(e, repr(i), repr(r.start), repr(r.value), repr(r.end - r.start))
j = r.end
if j < len(line):
# this is cheating... FIXME
self.win.addnstr(i + y_offset, j, line[j:], len(line) - j)
j += len(line) - j
if j < x:
self.win.addnstr(i + y_offset, j, ' ' * (x-j), (x-j))
if w.continued_visible_line(i):
self.win.addch(i + y_offset, x, '\\', red_attr)
else:
self.win.addch(i + y_offset, x, ' ')
for i in range(m, y):
self.win.addnstr(i + y_offset, 0, '~' + ' ' * (x), x + 1, red_attr)
for (high_w, lp1, lp2) in self.highlighted_ranges:
if lp1.y != lp2.y:
# this region is incoherent, so skip it, or die, whatever
#raise Exception, "haddock %d != %d" % (lp1.y, lp2.y)
pass
elif w is not high_w:
# this region isn't in the current window so skip it
pass
else:
(pp1, pp2) = (w.physical_point(lp1), w.physical_point(lp2))
vo = w.visible_offset()
(vp1, vp2) = (pp1.offset(0, -vo), pp2.offset(0, -vo))
if vp2.y < 0 or vp1.y > w.height:
# this region is not visible, so skip it
pass
else:
# first let's fix our points so we're sure they're visible
if vp1.y < 0:
vp1 = point.Point(0,0)
if vp2.y > w.height:
vp2 = point.Point(len(lines[-1]), w.height-1)
if vp1.y == vp2.y:
# our region physically fits on one line; this is easy
b = lines[vp1.y][vp1.x:vp2.x]
self.win.addstr(vp1.y + y_offset, vp1.x, b, curses.A_REVERSE)
else:
# our region spans multiple physical lines, so deal
b1 = lines[vp1.y][vp1.x:]
self.win.addstr(vp1.y + y_offset, vp1.x, b1, curses.A_REVERSE)
for i in range(vp1.y + 1, vp2.y):
b = lines[i]
self.wind.addstr(i + y_offset, 0, b, curses.A_REVERSE)
b2 = lines[vp2.y][:vp2.x]
self.win.addstr(vp2.y + y_offset, 0, b2, curses.A_REVERSE)
if self.margins_visible:
for (limit, shade) in self.margins:
if self.x > limit:
for i in range(0, y):
# the actual character is the lower 8 bits, and the
# attribute is the upper 8 bits; we will ignore the
# attribute and just get the character
char = self.win.inch(i + y_offset, limit) & 255
attr = color.build('default', shade, 'bold')
self.win.addch(i + y_offset, limit, char, attr)
if self.mini_active is False and self.active_slot == slotname:
if w.active_point is not None and w.point_is_visible(w.active_point):
pa = w.physical_point(w.active_point)
va = pa.offset(0, -w.visible_offset())
if len(lines[va.y]):
a = lines[va.y][va.x]
else:
a = ' '
self.win.addch(va.y + y_offset, va.x, a, curses.A_REVERSE)
else:
cursor = w.visible_cursor()
cx, cy = (cursor.x, cursor.y)
if cy >= len(lines):
self.set_error('in main1: cursor error; %d >= %d' %
(cy, len(lines)))
return
elif cx == len(lines[cy]):
c = ' '
elif cx > len(lines[cy]):
self.set_error('why? %r %r' % (cursor, len(lines[cy])))
return
else:
c = lines[cy][cx]
self.win.addch(cy + y_offset, cx, c, curses.A_REVERSE)
def draw_status_bar(self, slotname):
slot = self.bufferlist.slots[slotname]
if not slot.buffer.has_window(slotname):
return
w = slot.buffer.get_window(slotname)
b = w.buffer
cursor = w.logical_cursor()
pcursor = w.physical_cursor()
first = w.first
last = w.last
if b.readonly():
if b.changed():
modflag = '%*'
else:
modflag = '%%'
else:
if b.changed():
modflag = '**'
else:
modflag = '--'
if w.mark:
mark = w.mark
else:
mark = point.Point(-1, -1)
name = b.name()
if w.first_is_visible():
perc = "Top"
elif w.last_is_visible():
perc = "Bot"
else:
perc = "%2d%%" % (first.y*100 / len(b.lines))
# XYZ: we should actually use more of the 'state' variables
format = "----:%s-Fl %-18s (%s)--L%d--C%d--%s"
status = format % (modflag, name, w.mode.name(), cursor.y+1, cursor.x+1, perc)
status = status[:slot.width + 1]
status += "-" * (slot.width - len(status) + 1)
self.win.addnstr(slot.height + slot.offset, 0, status, slot.width + 1,
curses.A_REVERSE)
# input bar drawing
def draw_input_bar(self):
if self.error_string:
self.draw_error()
elif self.mini_buffer_is_open():
self.draw_mini_buffer()
else:
self.draw_nothing()
try:
# fucking python, fucking curses, fucking fuck
self.win.addch(self.y-1, self.x-1, ' ')
except:
pass
def draw_error(self):
l = self.x - 1
s1 = self.error_string
s2 = util.cleanse(util.padtrunc(s1, l))
self.win.addnstr(self.y-1, 0, s2, l)
def draw_mini_buffer(self):
l = self.x - 1
w = self.mini_buffer.get_window('mini')
lines = w.visible_lines()
s1 = self.mini_prompt + lines[0]
s2 = util.padtrunc(s1, l)
self.win.addnstr(self.y-1, 0, s2, l)
if self.mini_active:
cursor = w.visible_cursor()
cx, cy = (cursor.x, cursor.y)
if cy >= len(lines):
#self.set_error('in main2: cursor error; %d >= %d' %
# (cy, len(lines)))
self.set_error('in main2: %r, %r [f:%r,l:%r] {h:%r,w:%r} %r' %
(len(lines), cursor, w.first, w.last,
w.height, w.width, len(lines[0])))
return
elif cx == len(lines[cy]):
c = ' '
else:
c = lines[cy][cx]
self.win.addch(self.y-1, cx + len(self.mini_prompt), c,
curses.A_REVERSE)
def draw_nothing(self):
l = self.x - 1
self.win.addnstr(self.y-1, 0, util.pad('', l), l)
def open_aes_file(path, nl, name=None):
p = getpass.getpass("Please enter the AES password: ")
b = buffer.AesBuffer(path, p, nl, name)
return b
def open_plain_file(path, nl, name=None):
b = buffer.FileBuffer(path, nl, name)
return b
if __name__ == "__main__":
ciphers = { 'none': open_plain_file,
'aes': open_aes_file }
linetypes = { 'win': '\r\n',
'mac': '\r',
'unix': '\n' }
import optparse
parser = optparse.OptionParser()
parser.set_defaults(debug=False)
parser.set_defaults(goto=None)
parser.set_defaults(mode=None)
parser.set_defaults(cipher='none')
parser.set_defaults(linetype='unix')
parser.add_option('-d', '--debug', dest='debug', action='store_true',
help='run in debug mode')
parser.add_option('-e', '--encrypt', dest='cipher', metavar='CIPHER',
help='decrypt and encrypt with CIPHER (default: none)')
parser.add_option('-g', '--goto', dest='goto', metavar='NUM', type='int',
help='jump to line NUM of the first argument')
parser.add_option('-l', '--line-end', dest='linetype', metavar='TYPE',
help='use TYPE (win,mac,unix) line endings (default: unix)')
parser.add_option('-m', '--mode', dest='mode', metavar='MODE',
help='open arguments in MODE')
(opts, args) = parser.parse_args()
# if debugging, disable error handling to produce backtraces
if opts.debug:
mode.DEBUG = True
# we will support using +19 as the first argument to indicate opening the
# first file on line 19 (same as -g 19 or --goto 19)
if len(sys.argv) > 1 and args[0].startswith('+'):
opts.goto = int(args[0][1:])
args = args[1:]
# figure out which kind of line types we're using
if opts.linetype not in linetypes:
sys.stderr.write('invalid linetype: %r' % opts.linetype)
sys.exit(1)
nl = linetypes[opts.linetype]
# figure out what kind of file open function to use
if opts.cipher not in ciphers:
sys.stderr.write('invalid cipher: %r' % opts.cipher)
sys.exit(2)
f = ciphers[opts.cipher]
# open each path using our callback to get a buffer, open that buffer, etc.
buffers = []
names = sets.Set()
paths = sets.Set()
for path in args:
path = os.path.abspath(os.path.realpath(util.expand_tilde(path)))
if path in paths:
continue
name = os.path.basename(path)
if name in names:
i = 1
auxname = '%s/%d' % (name, i)
while auxname in names:
i += 1
auxname = '%s/%d' % (name, i)
name = auxname
b = f(path, nl, name)
b.open()
buffers.append(b)
paths.add(path)
names.add(name)
# ok, now run our app
run(buffers, opts.goto, opts.mode)

516
buffer.py Normal file
View File

@ -0,0 +1,516 @@
import md5, os, sets, shutil
import aes, point, method, regex
# set this to 0 or less to have infinite undo/redo
REDO_STACK_LIMIT = 1024
UNDO_STACK_LIMIT = 1024
# abstract class
class Buffer(object):
def __init__(self, nl='\n'):
self.lines = [""]
self.windows = {}
self.undo_stack = []
self.redo_stack = []
assert nl in ('\n', '\r', '\r\n'), "Invalid line ending"
self.nl = nl
self.modified = False
def num_chars(self):
n = 0
for line in self.lines[:-1]:
n += len(line) + 1
n += len(self.lines[-1])
return n
# basic file operation stuff
def _open_file_r(self, path):
path = os.path.realpath(path)
if not os.path.isfile(path):
raise Exception, "Path '%s' does not exist" % (path)
if not os.access(path, os.R_OK):
raise Exception, "Path '%s' cannot be read" % (path)
f = open(path, 'r')
return f
def _open_file_w(self, path):
if os.path.isfile(path):
raise Exception, "Path '%s' already exists" % (path)
d = os.path.dirname(path)
if not os.access(d, os.R_OK):
raise Exception, "Dir '%s' cannot be read" % (path)
if not os.access(d, os.W_OK):
raise Exception, "Dir '%s' cannot be written" % (path)
f = open(path, 'w')
return f
def _temp_path(self, path):
(dirname, basename) = os.path.split(path)
return os.path.join(dirname, ".__%s__pmacs" % (basename))
# undo/redo stack
def add_to_stack(self, move, stack="undo"):
if stack == "undo":
self.redo_stack = []
self.undo_stack.append(move)
if UNDO_STACK_LIMIT > 0:
while len(self.undo_stack) > UNDO_STACK_LIMIT:
self.undo_stack.pop(0)
elif stack == "redo":
self.redo_stack.append(move)
if REDO_STACK_LIMIT > 0:
while len(self.redo_stack) > REDO_STACK_LIMIT:
self.redo_stack.pop(0)
elif stack == "none":
self.undo_stack.append(move)
if UNDO_STACK_LIMIT > 0:
while len(self.undo_stack) > UNDO_STACK_LIMIT:
self.undo_stack.pop(0)
else:
raise Exception, "Invalid stack to add to: %s" % (stack)
def restore_move(self, move, stack="redo"):
if move[0] == "insert":
self.insert_string(move[1], move[2], stack=stack)
elif move[0] == "delete":
self.delete_string(move[1], move[2], stack=stack)
else:
raise Exception, "Invalid undo move type: '%s'" % (move[0])
def undo(self):
if len(self.undo_stack):
move = self.undo_stack.pop(-1)
self.restore_move(move, stack="redo")
else:
raise Exception, "Nothing to Undo!"
def redo(self):
if len(self.redo_stack):
move = self.redo_stack.pop(-1)
self.restore_move(move, stack="none")
else:
raise Exception, "Nothing to Redo!"
# window-buffer communication
def add_window(self, w, name):
assert name not in self.windows, "window %r already exists" % name
self.windows[name] = w
def remove_window(self, name):
del self.windows[name]
def _region_added(self, p, xdiff, ydiff, str=None, stack="undo"):
y = p.y + ydiff
if ydiff == 0:
x = p.x + xdiff
else:
x = xdiff
p2 = point.Point(x, y)
move = ["delete", p, p2, str]
self.add_to_stack(move, stack)
for w in self.windows.itervalues():
w._region_added(p, xdiff, ydiff, str)
def _region_removed(self, p1, p2, str=None, stack="undo"):
move = ["insert", p1, str]
self.add_to_stack(move, stack)
for w in self.windows.itervalues():
w._region_removed(p1, p2, str)
def has_window(self, name):
return name in self.windows
def get_window(self, name):
if name in self.windows:
return self.windows[name]
else:
raise Exception, "uh oh %r" % self.windows
# internal validation
def _validate_point(self, p):
self._validate_xy(p.x, p.y)
def _validate_xy(self, x, y):
assert y >= 0 and y < len(self.lines), \
"xy1: %d >= 0 and %d < %d" % (y, y, len(self.lines))
assert x >= 0 and x <= len(self.lines[y]), \
"xy2: %d >= 0 and %d <= %d" % (x, x, len(self.lines[y]))
def _validate_y(self, y):
assert y >= 0 and y < len(self.lines), \
"y: %d >= 0 and %d < %d" % (y, y, len(self.lines))
# internal
def make_string(self, start=0, end=None, nl='\n'):
assert end is None or start < end
if start == 0 and end is None:
return nl.join(self.lines)
else:
lines = []
i = 0
offset = 0
while i < len(self.lines):
l = self.lines[i]
if offset + len(l) < start:
pass
elif offset <= start:
if end is None or offset + len(l) < end:
lines.append(l[start - offset:])
else:
lines.append(l[start - offset:end - offset])
elif end is None or offset + len(l) < end:
lines.append(l)
else:
lines.append(l[:end])
offset += len(l) + 1
i += 1
return nl.join(lines)
# methods to be overridden by subclasses
def name(self):
return "Generic"
def close(self):
pass
def open(self):
pass
def changed(self):
return self.modified
def reload(self):
raise Exception, "%s reload: Unimplemented" % (self.name())
def save_as(self, path, force=False):
# check to see if the path exists, and if we're prepared to overwrite it
# if yes to both, get its mode so we can preserve the path's permissions
mode = None
if os.path.exists(path):
if force:
mode = os.stat(self.path)[0]
else:
raise Exception, "oh no! %r already exists" % path
# create the string that we're going to write into the file
data = self.write_filter(self.make_string(nl=self.nl))
# create a safe temporary path to write to, and write out data to it
temp_path = self._temp_path()
f2 = self._open_file_w(temp_path)
f2.write(data)
f2.close()
# move the temporary file to the actual path; maybe change permissions
shutil.move(temp_path, path)
if mode:
os.chmod(path, mode)
# the file has not been modified now
self.modified = False
def readonly(self):
return False
def read_filter(self, data):
return data
def write_filter(self, data):
return data
# point retrieval
def get_buffer_start(self):
return point.Point(0, 0, "logical")
def get_buffer_end(self):
y = len(self.lines) - 1
return point.Point(len(self.lines[y]), y, "logical")
def get_line_start(self, y):
self._validate_y(y)
return Point(0, y, "logical")
def get_line_end(self, y):
self._validate_y(y)
return Point(len(self.lines[y]), y, "logical")
def get_point_offset(self, p):
'''used to find positions in data string'''
self._validate_point(p)
offset = 0
for line in self.lines[:p.y]:
offset += len(line) + 1
offset += p.x
return offset
def get_offset_point(self, offset):
i = 0
y = 0
for line in self.lines:
if i + len(line) + 1 > offset:
break
else:
i += len(line) + 1
y += 1
return point.Point(offset - i, y)
# data retrieval
def get_character(self, p):
self._validate_point(p)
if p.x == len(self.lines[p.y]):
if p1.y < len(self.lines):
return "\n"
else:
return ""
else:
return self.lines[p.y][p.x]
def get_substring(self, p1, p2):
self._validate_point(p1)
self._validate_point(p2)
assert p1 <= p2, "p1.x (%d) > p2.x (%d)" % (p1.x, p2.x)
if p1 == p2:
return ""
elif p1.y == p2.y:
return self.lines[p1.y][p1.x:p2.x]
else:
if p1.x == 0:
text = "%s\n" % (self.lines[p1.y])
else:
text = "%s\n" % (self.lines[p1.y][p1.x:])
for i in range(p1.y+1, p2.y):
text = "%s%s\n" % (text, self.lines[i])
if p2.x > 0:
text = "%s%s" % (text, self.lines[p2.y][:p2.x])
return text
def set_data(self, d, force=False):
if not force and self.readonly():
raise Exception, "set_data: buffer is readonly"
start = self.get_buffer_start()
end = self.get_buffer_end()
self.delete_string(start, end, force=force)
self.insert_string(start, d, force=force)
self.modified = True
# insertion into buffer
def insert_string(self, p, s, stack="undo", force=False):
if not force:
assert not self.readonly(), "insert_string: buffer is read-only"
new_lines = s.split("\n")
if len(new_lines) > 1:
xdiff = len(new_lines[-1]) - p.x
else:
xdiff = len(new_lines[-1])
ydiff = len(new_lines) - 1
new_lines[0] = self.lines[p.y][:p.x] + new_lines[0]
new_lines[-1] = new_lines[-1] + self.lines[p.y][p.x:]
self.lines[p.y:p.y+1] = new_lines
self._region_added(p, xdiff, ydiff, s, stack)
self.modified = True
# deletion from buffer
def delete_character(self, p, stack="undo", force=False):
"""delete character at (x,y) from the buffer"""
if not force:
assert not self.readonly(), "delete_character: buffer is read-only"
self._validate_point(p)
x, y = p.x, p.y
if p.x < len(self.lines[p.y]):
s = self.lines[y][x]
self.lines[y] = "%s%s" % (self.lines[y][:x], self.lines[y][x+1:])
self._region_removed(p, p.offset(1, 0, "logical"), str=s, stack=stack)
elif p.y < len(self.lines) - 1:
s = "\n"
self.lines[y:y+2] = ["%s%s" % (self.lines[y], self.lines[y+1])]
self._region_removed(p, point.Point(0, p.y + 1, "logical"), str="\n", stack=stack)
self.modified = True
def delete_string(self, p1, p2, stack="undo", force=False):
"""delete characters from p1 up to p2 from the buffer"""
if not force:
assert not self.readonly(), "delete_string: buffer is read-only"
self._validate_xy(p1.x, p1.y)
self._validate_xy(p2.x, p2.y)
if p1 == p2:
return
assert p1 < p2, "p1 %r > p2 %r" % (p1, p2)
s = self.get_substring(p1, p2)
if p1.y < p2.y:
start_line = self.lines[p1.y][:p1.x]
end_line = self.lines[p2.y][p2.x:]
self.lines[p1.y:p2.y+1] = ["%s%s" % (start_line, end_line)]
elif p1.y == p2.y:
if p1.x == p2.x - 1:
s = self.lines[p1.y][p1.x]
self.delete_character(p1, stack=stack)
# make sure we don't call _region_removed twice, so return
return
elif p1.x < p2.x:
s = self.lines[p1.y][p1.x:p2.x]
self.lines[p1.y] = "%s%s" % (self.lines[p1.y][:p1.x],
self.lines[p1.y][p2.x:])
else:
raise Exception, "p1.x (%d) >= p2.x (%d)" % (p1.x, p2.x)
else:
raise Exception, "p1.y (%d) > p2.y (%d)" % (p1.y, p2.y)
self._region_removed(p1, p2, str=s, stack=stack)
self.modified = True
# random
def count_leading_whitespace(self, y):
line = self.lines[y]
m = regex.leading_whitespace.match(line)
if m:
return m.end()
else:
# should not happen
raise Exception, "iiiijjjj"
return 0
# scratch is a singleton
scratch = None
class ScratchBuffer(Buffer):
def __new__(cls, *args, **kwargs):
global scratch
if scratch is None:
scratch = object.__new__(ScratchBuffer, *args, **kwargs)
return scratch
def name(self):
return "*Scratch*"
def close(self):
global scratch
scratch = None
class DataBuffer(Buffer):
def __init__(self, name, data, nl='\n'):
Buffer.__init__(self, nl)
self._name = name
self.lines = data.split("\n")
def name(self):
return self._name
def close(self):
pass
def readonly(self):
return True
# console is another singleton
console = None
class ConsoleBuffer(Buffer):
def __new__(cls, *args, **kwargs):
global console
if console is None:
b = object.__new__(ConsoleBuffer, *args, **kwargs)
console = b
return console
def __init__(self, nl='\n'):
Buffer.__init__(self, nl)
lines = ['Python Console\n',
"Evaluate python expressions in the editor's context (self)\n",
'Press Control-] to exit\n',
'\n']
console.set_data(''.join(lines), force=True)
def name(self):
return '*Console*'
def changed(self):
return False
def close(self):
global console
console = None
def readonly(self):
return True
class FileBuffer(Buffer):
def __init__(self, path, nl='\n', name=None):
'''fb = FileBuffer(path)'''
Buffer.__init__(self, nl)
self.path = os.path.realpath(path)
self.checksum = None
if name is None:
self._name = os.path.basename(self.path)
else:
self._name = name
if os.path.exists(self.path) and not os.access(self.path, os.W_OK):
self._readonly = True
else:
self._readonly = False
def readonly(self):
return self._readonly
def _open_file_r(self, path=None):
if path is None:
path = self.path
path = os.path.realpath(path)
self.path = path
if not os.path.isfile(path):
raise Exception, "Path '%s' does not exist" % (path)
if not os.access(path, os.R_OK):
raise Exception, "Path '%s' cannot be read" % (path)
f = open(path, 'r')
return f
def _open_file_w(self, path=None):
if path is None:
path = self.path
if os.path.isfile(path):
raise Exception, "Path '%s' already exists" % (path)
d = os.path.dirname(path)
if not os.access(d, os.R_OK):
raise Exception, "Dir '%s' cannot be read" % (path)
if not os.access(d, os.W_OK):
raise Exception, "Dir '%s' cannot be written" % (path)
f = open(path, 'w')
return f
def _temp_path(self, path=None):
if path is None:
path = self.path
(dirname, basename) = os.path.split(path)
return os.path.join(dirname, ".__%s__pmacs" % (basename))
# methods for dealing with the underlying resource, etc.
def name(self):
#return self.path
return self._name
def path_exists(self):
return os.path.exists(self.path)
def store_checksum(self, data):
self.checksum = md5.new(data)
def read(self):
if self.path_exists():
f = self._open_file_r()
data = f.read()
f.close()
self.store_checksum(data)
else:
data = ''
data = self.read_filter(data)
#FIXME: this is horrible...but maybe not as horrible as using tabs??
data = data.replace("\t", " ")
return data
def open(self):
data = self.read()
self.lines = data.split(self.nl)
def reload(self):
self.open()
def changed_on_disk(self):
assert self.checksum is not None
f = open(self.path)
data = f.read()
f.close()
m = md5.new(data)
return self.checksum.digest() != m.digest()
def save(self, force=False):
if self.readonly():
raise Exception, "can't save a read-only file"
if self.checksum is not None and force is False:
# the file already existed and we took a checksum so make sure it's
# still the same right now
if not self.path_exists():
raise Exception, "oh no! %r disappeared!" % self.path
if self.changed_on_disk():
raise Exception, "oh no! %r has changed on-disk!" % self.path
temp_path = self._temp_path()
data = self.make_string(nl=self.nl)
data = self.write_filter(data)
f2 = self._open_file_w(temp_path)
f2.write(data)
f2.close()
if self.path_exists():
mode = os.stat(self.path)[0]
os.chmod(temp_path, mode)
shutil.move(temp_path, self.path)
self.store_checksum(data)
self.modified = False
def save_as(self, path):
self.path = path
self.save()
class AesBuffer(FileBuffer):
def __init__(self, path, password, nl='\n'):
'''fb = FileBuffer(path)'''
FileBuffer.__init__(self, path, nl)
self.password = password
def read_filter(self, data):
return aes.decrypt(data, self.password)
def write_filter(self, data):
return aes.encrypt(data, self.password)

104
bufferlist.py Normal file
View File

@ -0,0 +1,104 @@
import sets
class Slot:
def __init__(self, height, width, offset, buffer=None):
self.height = height
self.width = width
self.offset = offset
self.buffer = buffer
self.resize(height, width, offset)
def is_empty(self):
return self.buffer is None
def resize(self, height, width, offset):
self.height = height
self.width = width
self.offset = offset
# possible callbacks
def remove(self):
# possible callbacks
pass
class BufferList:
def __init__(self, height, width, buffers=()):
self.slots = []
self.add_slot(height, width, 0)
self.buffers = sets.Set()
self.buffer_names = {}
self.hidden_buffers = []
for b in buffers:
self.add_buffer(b)
# manipulate slots
def add_slot(self, height, width, offset=0, buffer=None):
self.slots.append(Slot(height, width, offset, buffer))
return len(self.slots) - 1
def empty_slot(self, i):
assert i > -1 and i < len(self.slots), "slot %d does not exist" % i
return self.slots[i].is_empty()
def set_slot(self, i, b):
assert i > -1 and i < len(self.slots), "slot %d does not exist" % i
assert b in self.buffers, "buffer %s does not exist" % (b.name())
if b in self.hidden_buffers:
self.hidden_buffers.remove(b)
if not self.slots[i].is_empty():
b2 = self.slots[i].buffer
self.hidden_buffers.insert(0, b2)
self.slots[i].buffer = b
def remove_slot(self, i):
assert i > -1 and i < len(self.slots), "slot %d does not exist" % i
if not self.slots[i].is_empty():
b = self.slots[i].buffer
self.hidden_buffers.insert(0, b)
self.slots[i].remove()
del self.slots[i]
# now fix the stored slot numbers for all the
for b in self.buffers:
for j in range(i, len(self.slots)):
if b.has_window(j+1):
w = b.get_window(j+1)
del b.windows[j+1]
w.slot = j
b.windows[j] = w
# add/remove buffers
def add_buffer(self, b):
assert b not in self.buffers, "buffer %s already exists" % (b.name())
self.buffers.add(b)
self.buffer_names[b.name()] = b
self.hidden_buffers.append(b)
for i in range(0, len(self.slots)):
if self.empty_slot(i):
self.set_slot(i, b)
def has_buffer(self, b):
return b in self.buffers
def has_buffer_name(self, name):
return name in self.buffer_names
def get_buffer_by_name(self, name):
return self.buffer_names[name]
def get_buffer_by_path(self, path):
for b in self.buffers:
if hasattr(b, 'path') and b.path == path:
return b
return None
def remove_buffer(self, b):
assert b in self.buffers, "buffer %s does not exist" % (b.name())
for slot in self.slots:
if slot.buffer is b:
slot.buffer = None
self.buffers.remove(b)
del self.buffer_names[b.name()]
if b in self.hidden_buffers:
self.hidden_buffers.remove(b)
# query buffers
def is_buffer_hidden(self, b):
assert b in self.buffers, "buffer %s does not exist" % (b.name())
return b in self.hidden_buffers
def is_buffer_visible(self, b):
assert b in self.buffers, "buffer %s does not exist" % (b.name())
for slot in self.slots:
if slot.buffer is b:
return True
return False

60
cache.py Normal file
View File

@ -0,0 +1,60 @@
import bisect, time
class CacheDict(dict):
"""This class works like a basic dictionary except that you can put
constraints on its size. Once that size is reached, the key that was
inserted or accessed the least recently is removed every time a new key
is added."""
def __init__(self, max_size=1000000):
'''CacheDict(max_size=1000000): build a cache'''
# once max_size is reached, the oldest cache entry will be
# pushed out to make room for each new one
self.max_size = max_size
dict.__init__(self)
# _times_dict will map keys to timestamps
self._times_dict = {}
# _times_list will store (timestamp, key) pairs in sorted
# order (oldest first)
self._times_list = []
def timestamp(self, key):
'''find the timestamp for key'''
assert key in self
# construct a (timestamp, key) item
item = (self._times_dict[key], key)
# look for the item in the (sorted) list
i = bisect.bisect_left(self._times_list, item)
# make sure the index we are returning really is valid
if item != self._times_list[i]:
raise LookupError
return i
def __getitem__(self, key):
# find the value in the dict
value = dict.__getitem__(self, key)
# do this to update the timestamp on this key
self[key] = value
return value
def __setitem__(self, key, value):
# delete any old instance of the key to make way for the new
if key in self:
del self._times_list[self.timestamp(key)]
# remove old keys until we have enough space to add this one
while len(self._times_list) >= self.max_size:
key = self._times_list[0][1]
del self[key]
# add this key, create a timestamp, and update our other data
# structures accordingly
t = time.time()
dict.__setitem__(self, key, value)
self._times_dict[key] = t
# make sure we keep the list sorted
bisect.insort_left(self._times_list, (t, key))
def __delitem__(self, key):
# we need to make sure we delete this key out of all three of
# our data structures
del self._times_list[self.timestamp(key)]
del self._times_dict[key]
dict.__delitem__(self, key)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,84 @@
Hi there
how's it going?
OK, here we go....
GJJJJJJJJJJJJJJJJJJJJJJJJJJJJJEIG EJG EIGJ EIG EEEEEEEEEEEEEEEEEEEGEIGJE IE HJEIH EJIH EJHIE JHIE JHIE GJEI GEJIG JEIGJ EIGE GEEEEEEEEEEEEEEEEEIG JEGI EJGIE GEIG EG EIGEJ GIEJG IEJG EIG JEIGJE GE
OK that seemed to go ok... now for an even longer one:
GJJJJJJJJJJJJJJJJJJJJJJJJJJJJJEIG EJG EIGJ EIG EEEEEEEEEEEEEEEEEEEGEIGJE IE HJEIH EJIH EJHIE JHIE JHIE GJEI GEJIG JEIGJ EIGE GEEEEEEEEEEEEEEEEEIG JEGI EJGIE GEIG EG EIGEJ GIEJG IEJG EIG JEIGJE GE GJJJJJJJJJJJJJJJJJJJJJJJJJJJJJEIG EJG EIGJ EIG EEEEEEEEEEEEEEEEEEEGEIGJE IE HJEIH EJIH EJHIE JHIE JHIE GJEI GEJIG JEIGJ EIGE GEEEEEEEEEEEEEEEEEIG JEGI EJGIE GEIG EG EIGEJ GIEJG IEJG EIG JEIGJE GE GJJJJJJJJJJJJJJJJJJJJJJJJJJJJJEIG EJG EIGJ EIG EEEEEEEEEEEEEEEEEEEGEIGJE IE HJEIH EJIH EJHIE JHIE JHIE GJEI GEJIG JEIGJ EIGE GEEEEEEEEEEEEEEEEEIG JEGI EJGIE GEIG EG EIGEJ GIEJG IEJG EIG JEIGJE GE
OK that's all for now.
Hi there
how's it going?
OK, here we go....
GJJJJJJJJJJJJJJJJJJJJJJJJJJJJJEIG EJG EIGJ EIG EEEEEEEEEEEEEEEEEEEGEIGJE IE HJEIH EJIH EJHIE JHIE JHIE GJEI GEJIG JEIGJ EIGE GEEEEEEEEEEEEEEEEEIG JEGI EJGIE GEIG EG EIGEJ GIEJG IEJG EIG JEIGJE GE
OK that seemed to go ok... now for an even longer one:
GJJJJJJJJJJJJJJJJJJJJJJJJJJJJJEIG EJG EIGJ EIG EEEEEEEEEEEEEEEEEEEGEIGJE IE HJEIH EJIH EJHIE JHIE JHIE GJEI GEJIG JEIGJ EIGE GEEEEEEEEEEEEEEEEEIG JEGI EJGIE GEIG EG EIGEJ GIEJG IEJG EIG JEIGJE GE GJJJJJJJJJJJJJJJJJJJJJJJJJJJJJEIG EJG EIGJ EIG EEEEEEEEEEEEEEEEEEEGEIGJE IE HJEIH EJIH EJHIE JHIE JHIE GJEI GEJIG JEIGJ EIGE GEEEEEEEEEEEEEEEEEIG JEGI EJGIE GEIG EG EIGEJ GIEJG IEJG EIG JEIGJE GE GJJJJJJJJJJJJJJJJJJJJJJJJJJJJJEIG EJG EIGJ EIG EEEEEEEEEEEEEEEEEEEGEIGJE IE HJEIH EJIH EJHIE JHIE JHIE GJEI GEJIG JEIGJ EIGE GEEEEEEEEEEEEEEEEEIG JEGI EJGIE GEIG EG EIGEJ GIEJG IEJG EIG JEIGJE GE
OK that's all for now.
Hi there
how's it going?
OK, here we go....
GJJJJJJJJJJJJJJJJJJJJJJJJJJJJJEIG EJG EIGJ EIG EEEEEEEEEEEEEEEEEEEGEIGJE IE HJEIH EJIH EJHIE JHIE JHIE GJEI GEJIG JEIGJ EIGE GEEEEEEEEEEEEEEEEEIG JEGI EJGIE GEIG EG EIGEJ GIEJG IEJG EIG JEIGJE GE
OK that seemed to go ok... now for an even longer one:
GJJJJJJJJJJJJJJJJJJJJJJJJJJJJJEIG EJG EIGJ EIG EEEEEEEEEEEEEEEEEEEGEIGJE IE HJEIH EJIH EJHIE JHIE JHIE GJEI GEJIG JEIGJ EIGE GEEEEEEEEEEEEEEEEEIG JEGI EJGIE GEIG EG EIGEJ GIEJG IEJG EIG JEIGJE GE GJJJJJJJJJJJJJJJJJJJJJJJJJJJJJEIG EJG EIGJ EIG EEEEEEEEEEEEEEEEEEEGEIGJE IE HJEIH EJIH EJHIE JHIE JHIE GJEI GEJIG JEIGJ EIGE GEEEEEEEEEEEEEEEEEIG JEGI EJGIE GEIG EG EIGEJ GIEJG IEJG EIG JEIGJE GE GJJJJJJJJJJJJJJJJJJJJJJJJJJJJJEIG EJG EIGJ EIG EEEEEEEEEEEEEEEEEEEGEIGJE IE HJEIH EJIH EJHIE JHIE JHIE GJEI GEJIG JEIGJ EIGE GEEEEEEEEEEEEEEEEEIG JEGI EJGIE GEIG EG EIGEJ GIEJG IEJG EIG JEIGJE GE
OK that's all for now.
Hi there
how's it going?
OK, here we go....
GJJJJJJJJJJJJJJJJJJJJJJJJJJJJJEIG EJG EIGJ EIG EEEEEEEEEEEEEEEEEEEGEIGJE IE HJEIH EJIH EJHIE JHIE JHIE GJEI GEJIG JEIGJ EIGE GEEEEEEEEEEEEEEEEEIG JEGI EJGIE GEIG EG EIGEJ GIEJG IEJG EIG JEIGJE GE
OK that seemed to go ok... now for an even longer one:
GJJJJJJJJJJJJJJJJJJJJJJJJJJJJJEIG EJG EIGJ EIG EEEEEEEEEEEEEEEEEEEGEIGJE IE HJEIH EJIH EJHIE JHIE JHIE GJEI GEJIG JEIGJ EIGE GEEEEEEEEEEEEEEEEEIG JEGI EJGIE GEIG EG EIGEJ GIEJG IEJG EIG JEIGJE GE GJJJJJJJJJJJJJJJJJJJJJJJJJJJJJEIG EJG EIGJ EIG EEEEEEEEEEEEEEEEEEEGEIGJE IE HJEIH EJIH EJHIE JHIE JHIE GJEI GEJIG JEIGJ EIGE GEEEEEEEEEEEEEEEEEIG JEGI EJGIE GEIG EG EIGEJ GIEJG IEJG EIG JEIGJE GE GJJJJJJJJJJJJJJJJJJJJJJJJJJJJJEIG EJG EIGJ EIG EEEEEEEEEEEEEEEEEEEGEIGJE IE HJEIH EJIH EJHIE JHIE JHIE GJEI GEJIG JEIGJ EIGE GEEEEEEEEEEEEEEEEEIG JEGI EJGIE GEIG EG EIGEJ GIEJG IEJG EIG JEIGJE GE
OK that's all for now.
Hi there
how's it going?
OK, here we go....
GJJJJJJJJJJJJJJJJJJJJJJJJJJJJJEIG EJG EIGJ EIG EEEEEEEEEEEEEEEEEEEGEIGJE IE HJEIH EJIH EJHIE JHIE JHIE GJEI GEJIG JEIGJ EIGE GEEEEEEEEEEEEEEEEEIG JEGI EJGIE GEIG EG EIGEJ GIEJG IEJG EIG JEIGJE GE
OK that seemed to go ok... now for an even longer one:
GJJJJJJJJJJJJJJJJJJJJJJJJJJJJJEIG EJG EIGJ EIG EEEEEEEEEEEEEEEEEEEGEIGJE IE HJEIH EJIH EJHIE JHIE JHIE GJEI GEJIG JEIGJ EIGE GEEEEEEEEEEEEEEEEEIG JEGI EJGIE GEIG EG EIGEJ GIEJG IEJG EIG JEIGJE GE GJJJJJJJJJJJJJJJJJJJJJJJJJJJJJEIG EJG EIGJ EIG EEEEEEEEEEEEEEEEEEEGEIGJE IE HJEIH EJIH EJHIE JHIE JHIE GJEI GEJIG JEIGJ EIGE GEEEEEEEEEEEEEEEEEIG JEGI EJGIE GEIG EG EIGEJ GIEJG IEJG EIG JEIGJE GE GJJJJJJJJJJJJJJJJJJJJJJJJJJJJJEIG EJG EIGJ EIG EEEEEEEEEEEEEEEEEEEGEIGJE IE HJEIH EJIH EJHIE JHIE JHIE GJEI GEJIG JEIGJ EIGE GEEEEEEEEEEEEEEEEEIG JEGI EJGIE GEIG EG EIGEJ GIEJG IEJG EIG JEIGJE GE
OK that's all for now.
Hi there
how's it going?
OK, here we go....
GJJJJJJJJJJJJJJJJJJJJJJJJJJJJJEIG EJG EIGJ EIG EEEEEEEEEEEEEEEEEEEGEIGJE IE HJEIH EJIH EJHIE JHIE JHIE GJEI GEJIG JEIGJ EIGE GEEEEEEEEEEEEEEEEEIG JEGI EJGIE GEIG EG EIGEJ GIEJG IEJG EIG JEIGJE GE
OK that seemed to go ok... now for an even longer one:
GJJJJJJJJJJJJJJJJJJJJJJJJJJJJJEIG EJG EIGJ EIG EEEEEEEEEEEEEEEEEEEGEIGJE IE HJEIH EJIH EJHIE JHIE JHIE GJEI GEJIG JEIGJ EIGE GEEEEEEEEEEEEEEEEEIG JEGI EJGIE GEIG EG EIGEJ GIEJG IEJG EIG JEIGJE GE GJJJJJJJJJJJJJJJJJJJJJJJJJJJJJEIG EJG EIGJ EIG EEEEEEEEEEEEEEEEEEEGEIGJE IE HJEIH EJIH EJHIE JHIE JHIE GJEI GEJIG JEIGJ EIGE GEEEEEEEEEEEEEEEEEIG JEGI EJGIE GEIG EG EIGEJ GIEJG IEJG EIG JEIGJE GE GJJJJJJJJJJJJJJJJJJJJJJJJJJJJJEIG EJG EIGJ EIG EEEEEEEEEEEEEEEEEEEGEIGJE IE HJEIH EJIH EJHIE JHIE JHIE GJEI GEJIG JEIGJ EIGE GEEEEEEEEEEEEEEEEEIG JEGI EJGIE GEIG EG EIGEJ GIEJG IEJG EIG JEIGJE GE
OK that's all for now.

3
code_examples/METRIC.txt Normal file

File diff suppressed because one or more lines are too long

284
code_examples/Reporting2.pm Normal file
View File

@ -0,0 +1,284 @@
package TBB::Reporting2;
use strict;
use warnings;
use DBI;
use TBB::ID;
use TBB::ClientSystemData;
use TBB::UserData;
use TBB::DataManager::Relational;
use TBB::UserManager;
use TBB::LogManager "write_log";
use Apache::Session::Postgres;
use Config::General;
use Data::Dumper;
use Config::General;
use DBI;
=head1 NAME
TBB::Reporting
=head1 SYNOPSIS
use TBB::UserData
my $reporting = TBB::Reporting->();
...
$reporting->populate();
=head1 DESCRIPTION
This class populates a data_by_user table, which aliases some
userdata IDs (in particular questions, formulas and conditions) to
names given to them by site-relations in the "user_data_alias"
table.
After instantiating this method, run populate to create the table.
What happens, in order, is this:
1. grab the user_data_alias data
2. verify (via the RM) that each resource exists; if so add it to our aliases
3. drop the old data_by_user table
4. create a new one using the aliases
5. get a list of all sessions
6. for each session:
a. figure out which client it is, and create a userdata
b. get values for all aliases (default value: "")
c. insert a row into data_by_user for this client
7. done
TODO: populate should dump out the old data_by_user first as a
backup, and maybe even automatically restore it if something fails
(but probably not).
TODO: less stuff should probably be hardcoded, even though our
setup here kind of sucks and is kind of temporary.
=cut
sub new {
my $class = shift;
my %opts = @_;
my $self = {};
bless($self, $class);
$self->connect_to_userdata_db();
$self->build_resource_manager();
$self->get_reporting_aliases();
return $self;
}
# this creates a connection to the userdata db and stores it as $self->{dmr}
# currently the settings are hardcoded cause i want it to work
sub connect_to_userdata_db {
my $self = shift;
my $host = "date.tbb";
my $port = 5432;
my $db = "tbb_tbbv2db_main";
my $user = "postgres";
my $password = "";
my $options = "";
write_log('debug', "DB params:");
write_log('debug', "\tHost: $host");
write_log('debug', "\tPort: $port");
write_log('debug', "\tDB: $db");
write_log('debug', "\tUser: $user");
write_log('debug', "\tPassword: $password");
write_log('debug', "\tOptions: $options");
$self->{dmr} = TBB::DataManager::Relational->new(
host => $host,
port => $port,
db => $db,
user => $user,
password => $password,
);
}
# this builds a resource manager if one doesn't already exist
sub build_resource_manager {
my $self = shift;
unless(defined($TBB::BenefitDelivery::ResourceManager)) {
my $rm = TBB::BenefitDelivery::ResourceManager->new('resource_dir' => 'resources');
$TBB::BenefitDelivery::ResourceManager = $rm;
}
}
foo()
unless(1);
print 'hi\n';
# this gets an array of hash references, each of which represents a userdata alias
# it stores the result in $self->{aliases}
#
# currently it hits a seperate database from the userdata db. eventually, we'd like
# the restore DB and the reporting DB to be the same. sigh.
sub get_reporting_aliases {
my $self = shift;
my $rdmr = TBB::DataManager::Relational->new(
host => "reports.v2.tbb",
port => 5432,
db => "reporting",
user => "tbbv2db",
password => "reports",
);
my $aliases_aref = $rdmr->select(select => "alias, question, type",
from => "user_data_alias");
$self->{aliases} = [];
# make sure each alias is valid; undefined formulas or conditions will give
# us problems down the road
foreach my $alias (@$aliases_aref) {
eval {
my $id = TBB::ID::normalize($alias->{question});
$alias->{question} = $id;
if(TBB::ID::is_a($id, "question")) {
push(@{$self->{aliases}}, $alias);
} elsif($TBB::BenefitDelivery::ResourceManager->get_component($id)) {
push(@{$self->{aliases}}, $alias);
} else {
write_log("error", "Alias \"$alias->{question}\" does not exist");
}
};
if($@) {
write_log("error", "There was a problem with \"$alias->{question}\": $@");
}
}
# this will help to make sure that the exports for each user are proceeding
# correctly
write_log("notice", "We are reporting on " . scalar(@{$self->{aliases}}) . " aliases.");
}
# this returns an sql string which will create the appropriate data_by_user table
# we do this seperately because we're not always sure where to create the table
# (date.tbb? reports.v2.tbb? who knows?) so hopefully this will make the code
# cleaner
sub data_by_user_sql {
my $self = shift;
my $sql = "CREATE TABLE data_by_user (id_user integer PRIMARY KEY, ";
$sql .= join(", ", map { "$_->{alias} $_->{type}" } @{$self->{aliases}});
$sql .= ");";
return $sql;
}
# the big one!
sub populate {
my $self = shift;
my %opts = @_;
write_log('notice', "Starting populate");
# connect to reporting db, and drop the old data_by_user
my $rdbh = DBI->connect('dbi:Pg:dbname=tbb_tbbv2db_main;host=date.tbb;port=5432',
'postgres',
'',
{RaiseError => 1});
eval { $rdbh->do("DROP TABLE data_by_user;"); };
# build new data_by_user table
my $sql = $self->data_by_user_sql();
print STDERR "FFF: $sql\n";
$rdbh->do($sql);
print STDERR "GGG: we are ok\n";
#exit(1);
my @report_fields = map {$_->{alias}} @{$self->{aliases}};
push(@report_fields, "id_user");
@report_fields = sort(@report_fields);
my @dummy_fields = map { "?" } @report_fields;
$sql = "INSERT INTO data_by_user (" . join(", ", @report_fields) . ") VALUES (" . join(", ", @dummy_fields) . ");";
#print STDERR "JJJ: $sql\n";
my $sth = $rdbh->prepare($sql);
#exit(1);
# for each client user, grab their system_data_id (which is
# a session ID); we get an array of session_id references.
my $fields = "id_user, system_data_id";
my $table = "tbb_user";
my $where = "id_user_type = 'CL' and system_data_id is not NULL";
my $session_ids = $self->{dmr}->select(select => $fields,
from => $table,
where => $where);
# for each hash in the array we made (each session ID)
my $processed = 0;
my $total = scalar(@$session_ids);
foreach my $session_id (@$session_ids) {
$processed++;
my $system_id = $session_id->{system_data_id};
my $client_id = $session_id->{id_user};
write_log('info', "Exporting $system_id (user: $client_id) ($processed/$total)");
# we need to see if there is a session ID or not in the
# sessions table. there almost certainly won't be more
# than one but we test anyway. unless there is exactly
# one we will skip this session.
my $check = $self->{dmr}->select(select => "id",
from => "sessions",
where => "id = '$system_id'");
my $count = scalar(@$check);
if($count == 0) {
write_log('warn', "Session $system_id does not exist");
next;
} elsif($count > 1) {
write_log('warn', "Session $system_id is not unique ($count found)");
next;
}
my $csd = TBB::ClientSystemData->new($system_id, "reporter");
my $dms = $csd->retrieve_data_manager_session();
my $user_data = TBB::UserData::New->new(data_manager_relational => $self->{dmr},
data_manager_session => $dms,
client_system_data => $csd,
current_client_id => $client_id);
my $user_data_obj = {'id_user' => $client_id};
foreach my $alias_href (@{$self->{aliases}}) {
my $alias = $alias_href->{alias};
my $id = $alias_href->{question};
my $type = $alias_href->{type};
my $value = $user_data->retrieve_value(base => $id,
this_id_user => $client_id);
$value ||= "";
$user_data_obj->{$alias} = $value;
}
my @values = ();
foreach my $name (@report_fields) {
push(@values, $user_data_obj->{$name});
}
$sth->execute(@values);
write_log('debug', " Saving " . scalar(keys(%$user_data_obj)) . " components") if TBB::LogManager::writes_at('debug');
#write_log('debug', " User Data Obj: " . Dumper($user_data_obj)) if TBB::LogManager::writes_at('debug');
}
write_log('notice', "Populate completed");
return 1;
}
1;

16
code_examples/TEST.txt Normal file
View File

@ -0,0 +1,16 @@
# TEST DATA
application FreeBSD
# abstract class
class Method:
def __init__(self, buffer, **vargs):
self.buffer = buffer
for (varg, vval) in vargs.iteritems():
setattr(self, varg, vval)
def name(self):
return ""
def execute(self):
raise Exception, "Unimplemented Method: %s" % (self.name())

28
code_examples/blah.c Normal file
View File

@ -0,0 +1,28 @@
int main() {
while(1 < 2) {
pritf("hola\n"):
}
if(1 == 2)
printf("\n");
while(1 == 2)
if(1 == 2)
printf("\n");
do if(1 == 2)
printf("hola\n");
while(1 == 2);
while(1)
if(2 < 3) {
printf("\n");
printf("\n");
}
printf("\n");
printf("\n");
}

721
code_examples/build.c Normal file
View File

@ -0,0 +1,721 @@
/*
* build.c - Fragment builder.
*
* Copyright (c) 2000-2003 - J. Kevin Scott and Jack W. Davidson
*
* This file is part of the Strata dynamic code modification infrastructure.
* This file may be copied, modified, and redistributed for non-commercial
* use as long as all copyright, permission, and nonwarranty notices are
* preserved, and that the distributor grants the recipient permission for
* further redistribution as permitted by this notice.
*
* Please contact the authors for restrictions applying to commercial use.
*
* THIS SOURCE IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
* MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*
* Author: J. Kevin Scott
* e-mail: jks6b@cs.virginia.edu
* URL : http://www.cs.virginia.edu/~jks6b
*
*/
#ident "$Id: build.c 1063 2006-03-14 16:52:47Z williadw $"
#include "all.h"
/* Private data structures and function declarations. */
/* these macros are to the head and tail of the fragment queue
* replacing the head and tail globals with thread specific
* pointers
*/
#define HEAD ((hashtable_get((*TI.thread_id)()))->fq_head)
#define TAIL ((hashtable_get((*TI.thread_id)()))->fq_tail)
#define PATCHTABSIZE 1087
static strata_patch *patch_tab[PATCHTABSIZE];
static jmp_buf strata_build_env;
int builder_opt_no_frag_linking;
int strata_entrance_count;
int strata_stop_after;
static char *frag_ty_map[] = {
"CBRANCH",
"IBRANCH",
"CALL",
"RET",
"SWITCH",
"SPECIAL",
"INVALID",
"ICALL"
};
strata_fragment* strata_install_fragment_group();
strata_fragment* strata_build_single_fragment(app_iaddr_t next_PC);
void strata_add_pc_mapping(app_iaddr_t PC, fcache_iaddr_t fPC);
void strata_create_fragment_trampolines(strata_fragment *frag);
void strata_remove_create_tramp_entry(strata_patch* cur);
void targ_create_trampoline(strata_fragment *frag, strata_patch *tramplist);
/*
* strata_enter_builder - Perform operations necessary when entering the builder,
* including locking mutexes, and logging.
*/
void strata_enter_builder(app_iaddr_t to_PC, strata_fragment *from_frag) {
/* Thread locking */
STRATA_LOG("lock", "builder lock.\n");
(*TI.mutex_lock)();
}
/*
* strata_leave_builder - Perform any operation that's necessary each time we
* exit the builder and return execution to the fragment cache, including
* dumping tracing info, updating the control flow graph, and unlocking mutexs.
*/
void strata_leave_builder(strata_fragment* frag, strata_fragment* from_frag) {
/* emit information about ibtc traces, if requested */
if(from_frag && form_ibtc_traces_fd)
{
fwrite((const void *)(&(from_frag->ty)),
sizeof(int), 1, form_ibtc_traces_fd);
fwrite((const void *)(&(from_frag->indPC)),
sizeof(unsigned), 1, form_ibtc_traces_fd);
fwrite((const void *)(&(from_frag->indfPC)),
sizeof(unsigned), 1, form_ibtc_traces_fd);
fwrite((const void *)(&(frag->PC)),
sizeof(unsigned), 1, form_ibtc_traces_fd);
}
/* handle control flow edges */
strata_handle_control_flow_graph(from_frag, frag);
STRATA_TRACE("Leaving builder to execute 0x%08x:0x%08x\n", frag->fPC, frag->PC);
(*TI.mutex_unlock)();
}
/*
* strata_build_main -
* Build a fragment and returns the starting fragment address.
*
* Inputs:
* to_PC - Beginning of new fragment.
* from_frag - Pointer to the fragment calling strata_build_main
*
* Output:
* The (fragment) address to start executing.
*/
fcache_iaddr_t strata_build_main (app_iaddr_t to_PC, strata_fragment *from_frag)
{
strata_fragment *frag;
insn_t insn;
fcache_iaddr_t to_fPC, previous_last_fPC;
app_iaddr_t next_PC, previous_next_PC;
int insn_class, first_frag;
int sieve_needs_fill=0;
strata_enter_builder(to_PC, from_frag);
/* Mark our place in case we need to restart the builder.
If the fragment cache fills, we need to create a new fragment
and then, restart the builder.
(consider moving past the point where we know we're building
a fragment)
*/
if(!targ_opt_fast_return)
setjmp(strata_build_env); /* this is for flushing the f$, we're only doing that w/o fast returns */
#ifdef STATS
/* "strata iso" functionality*/
strata_entrance_count++;
if ( strata_entrance_count == strata_stop_after ) {
return to_PC;
}
stat_builder_enters++;
#endif
STRATA_TRACE("Builder entered from fragment %08x:%08x [%s] to execute %08x (%d entrances)\n",
from_frag,
from_frag?from_frag->PC:0,
from_frag?frag_ty_map[from_frag->ty]:"NONE",
to_PC,
strata_entrance_count
);
/* !!! TEMP SOLUTNION check for fast return setjmp error */
if ( strata_fragment_address(to_PC) ) {
STRATA_LOG("fast_returns", "returning to to_PC!");
(*TI.mutex_unlock)();
return to_PC;
}
/* lookup the fragment in the F$ */
frag = strata_lookup_built_fragment(to_PC) ;
/* if it's not there, build it */
if (!frag)
{
/* We're adding at least one new fragment to the fragment cache. */
/* (consider moving setjump here) */
STRATA_TRACE("Target not in F$!\n");
/* Add it to the work list, and build the group. */
strata_enqueue_address(to_PC);
frag = strata_install_fragment_group();
}
/* Execute the fragment. */
STRATA_TRACE("Executing %08x mapped to %08x type=%s\n",
to_PC,
frag->fPC,
frag_ty_map[frag->ty]
);
strata_sieve_update(frag);
/* leave builder and return the fragment address */
strata_leave_builder(frag, from_frag);
return frag->fPC;
}
/*
* strata_handle_control_flow_graph - update the (non-working) CFG for strata
*/
void strata_handle_control_flow_graph(strata_fragment *from_frag, strata_fragment *frag)
{
app_iaddr_t to_PC;
assert(frag);
to_PC=frag->PC;
/* Target is already in the fragment cache. */
STRATA_TRACE("Target in F$!\n");
/* Count the execution of the fragment we're going to. */
/* consider making into macro */
strata_profiler_count_insn(frag->prof,1);
if (from_frag != NULL) {
/* Update control flow information. */
if (!(from_frag->ty == STRATA_FRAG_CBRANCH ||
from_frag->ty == STRATA_FRAG_CALL)) {
strata_frag_control_edge(from_frag,frag);
}
switch(from_frag->ty) {
case STRATA_FRAG_RET:
#ifdef STATS
stat_num_rets++;
#endif
strata_indirect_branch_miss(from_frag,frag);
break;
case STRATA_FRAG_ICALL:
case STRATA_FRAG_IBRANCH:
#ifdef STATS
stat_num_ibranches++;
#endif
strata_indirect_branch_miss(from_frag,frag);
break;
case STRATA_FRAG_CBRANCH:
#ifdef STATS
stat_num_branches++;
#endif
break;
case STRATA_FRAG_SWITCH:
break;
case STRATA_FRAG_CALL:
break;
case STRATA_FRAG_SPECIAL:
break;
default:
strata_fatal("Unknown branch type for fragment");
}
} else {
/* All fragments must set from_frag! */
/*strata_fatal("Re-entering fragment did not set from_frag");*/
/* I'm not sure above statement is true, warn about it for now */
/* thread -- reason for not using strata fatal */
STRATA_LOG("warn", "From Frag not set!");
}
}
/*
* strata_install_fragment_group -
* This function builds and installs an group of
* dependent fragments into the F$. These should be
* fragments that aren't ready to be used until all of them are
* built. (ie, a fragment with a call + its return fragment if
* fast returns are on.)
*/
strata_fragment* strata_install_fragment_group()
{
strata_fragment* frag, *first_frag;
app_iaddr_t next_PC;
fragment_queue* cur;
/* initializations */
first_frag = NULL;
cur = HEAD;
/* While we have fragments on the work list */
while ( cur != NULL ) {
next_PC = cur->PC;
frag = strata_build_single_fragment(next_PC);
if ( !(frag->flags & STRATA_FRAG_DONT_LINK) ) {
/* Do chaining. */
strata_apply_patches(frag);
}
cur = cur->next;
}
/* don't set the flags to "ready" unil all of the fragments
are built */
while( next_PC = strata_dequeue_address() ) {
frag = strata_lookup_fragment(next_PC);
assert(frag!=NULL);
if ( first_frag == NULL ) {
first_frag = frag;
}
frag->flags |= STRATA_FRAG_READY;
}
return first_frag;
}
/*
* strata_build_single_fragment -
* builds a single fragment from the give application address, and
* returns a pointer to the strata_fragment struct. Note, this function
* does not set the STRATA_FRAG_READY flag, which is handled in the common
* case by strata_install_fragment_group
*/
strata_fragment* strata_build_single_fragment(app_iaddr_t next_PC)
{
strata_fragment *frag=NULL;
insn_t insn;
int insn_class=0;
int instrs_in_this_frag = 0;
fcache_iaddr_t to_fPC=0, previous_last_fPC=0;
app_iaddr_t previous_next_PC=0;
/* initalize the new fragment */
STRATA_TRACE("Building new fragment for :>0x%08x\n",next_PC);
frag = strata_create_fragment(next_PC);
strata_begin_fragment(frag);
/* set linking flag if necessary */
if ( builder_opt_no_frag_linking ) {
frag->flags |= STRATA_FRAG_DONT_LINK;
}
STRATA_TRACE("New fragment at: 0x%08x\n", frag->fPC);
/* Remember the frag. cache address of the first fragment. */
if (to_fPC == 0)
to_fPC = frag->fPC;
/* Fetch/decode/translate loop for fragment formation. */
do {
/* reset vars keeping last PC */
previous_next_PC=next_PC;
previous_last_fPC=frag->last_fPC;
/* fetch and classify */
insn = (*TI.fetch)(next_PC);
insn_class = (*TI.classify)(insn);
if(next_PC==strata_stop_address)
{
targ_emit_shutdown_code(frag,previous_next_PC, insn);
frag->ty=STRATA_FRAG_SPECIAL;
next_PC=0;
break;
}
/* consider making into a indirect function call off the enumeration */
switch(insn_class) {
case STRATA_CALL:
next_PC = (*TI.xlate_call)(frag,next_PC,insn);
strata_add_pc_mapping(next_PC, frag->last_fPC);
STRATA_TRACE("call translation returned: 0x%08x\n", next_PC);
break;
case STRATA_PC_RELATIVE_BRANCH:
next_PC = (*TI.xlate_pcrel_branch)(frag,next_PC,insn);
strata_add_pc_mapping(next_PC, frag->last_fPC);
break;
case STRATA_INDIRECT_BRANCH:
next_PC = (*TI.xlate_ind_branch)(frag,next_PC,insn);
break;
case STRATA_RETURN:
next_PC = (*TI.xlate_return)(frag,next_PC,insn);
break;
case STRATA_SPECIAL:
next_PC = (*TI.xlate_special)(frag,next_PC,insn);
strata_add_pc_mapping(next_PC, frag->last_fPC);
break;
case STRATA_NORMAL:
next_PC = (*TI.xlate_normal)(frag,next_PC,insn);
break;
default:
assert(0);
}
instrs_in_this_frag++;
if(instrs_in_this_frag >= strata_max_insts_per_frag && next_PC)
{
(*TI.end_fragment_early)(frag,next_PC);
break;
}
frag->indPC = previous_next_PC;
frag->indfPC = previous_last_fPC;
} while(next_PC);
/* Create all the trampolines that need to be added */
strata_create_fragment_trampolines(frag);
/* Mark end of fragment. */
strata_end_fragment(frag);
/* assert that the fragment type was set to non-zero */
assert(frag->ty!=STRATA_FRAG_INVALID);
return frag;
}
/* The next two functions manage the builder's work list of fragments. Items
* added to the work list will get added to the fragment cache in FIFO order.
* We use a linked list to represent the queue. The static globals head &
* tail point to the head and tail of the queue respectively. We enqueue at
* the tail and dequeue at the head. The queue is empty when head points to
* NULL.
*/
/*
* strata_enqueue_address - Place a fragment's starting address onto the builder's work list.
*/
void strata_enqueue_address (app_iaddr_t PC)
{
fragment_queue *p;
STRATA_TRACE("Enqueue'ing 0x%08x to process later\n",PC);
/* Allocate a new queue element and initialize its fields. */
NEW(p,BUILDER);
p->PC = PC;
p->next = NULL;
/* Link p into the queue at the tail. */
if (TAIL != NULL) {
TAIL->next = p;
}
TAIL = p;
/* Check for previously empty queue. */
if (HEAD == NULL)
HEAD = TAIL;
}
/*
* strata_dequeue_address - Get the next fragment starting address off of the builder's work list.
*/
app_iaddr_t strata_dequeue_address (void)
{
fragment_queue *p;
p = HEAD;
if (p != NULL) {
HEAD = HEAD->next;
return p->PC;
} else {
TAIL = NULL;
return 0;
}
}
/*
* strata_reset_builder - Reset the builder.
*/
void strata_reset_builder (void)
{
int i;
for(i=0;i<PATCHTABSIZE;i++) {
patch_tab[i] = NULL;
}
HEAD = TAIL = NULL;
strata_deallocate(BUILDER);
}
/* Initialize the builder. */
void strata_init_builder (void)
{
strata_entrance_count = 0;
/* Initialize target specific builder stuff. */
(*TI.init)();
/* Reset builder data structures. */
strata_reset_builder();
}
/* Restart the builder. */
void strata_restart_builder (void) {
extern int targ_opt_fast_return;
assert(targ_opt_fast_return==FALSE);
strata_reset_builder();
longjmp(strata_build_env,0);
}
/* Link all trampolines with target PC that were installed before the
* fragment from PC had been copied to fragment cache location fPC.
*
* NOTE: Patches aren't removed after they have been applied. This
* simplifies the code, but could hamper performance. This merits
* looking at before release.
*/
void strata_apply_patches (strata_fragment *frag) {
app_iaddr_t h;
strata_patch *cur;
STRATA_TRACE("Applying patches for 0x%08x => 0x%08x\n",frag->PC,frag->fPC);
h = frag->PC % PATCHTABSIZE;
cur = patch_tab[h];
while(cur != NULL) {
if (!cur->patched && cur->PC == frag->PC) {
strata_frag_control_edge(cur->frag,frag);
strata_remove_create_tramp_entry(cur);
(*TI.patch)(cur,frag->fPC);
cur->patched = 1;
}
cur = cur->next;
}
}
/*
* This is a list of patches that needs to be applied for the current fragment.
*/
strata_patch* tramplist=NULL;
/*
* strata_create_trampoline_later - Record that the branch in patch->u.loc needs to temporarily go
* to a trampoline. However, we can't emit that trampoline now
* because we're not anywhere near done with emitting this fragment.
* Consequently, we'll fix up this branch later (in end_fragment)
* and emit a trampoline.
*/
void strata_create_trampoline_later(strata_patch *patch, app_iaddr_t to_PC)
{
app_iaddr_t h;
assert(patch);
/* tracing help */
if (patch->ty == PATCH_SWITCH)
{
assert(0);
}
else
{
STRATA_TRACE("Location %08x will have a tramp added after this fragment build\n",
patch->u.loc,to_PC);
}
patch->PC=to_PC;
/* Install the patch into add trampoline list */
patch->next=tramplist;
tramplist=patch;
}
/*
* Create all the trampolines that this fragments needs
*/
void strata_create_fragment_trampolines(strata_fragment *frag)
{
/* loop over the tramplist, creating trampolines as we go */
while(tramplist)
{
/* create this trampoline, we'll need the target's help */
targ_create_trampoline(frag,tramplist);
/* next trampoline */
tramplist=tramplist->next;
}
return;
}
/*
* strata_remove_create_tramp_entry - Remove an entry from the create
* trampoline list. This is sometimes necessary when we patch before
* we finish the fragment (sometimes this happens when we create PC mappings).
*/
void strata_remove_create_tramp_entry(strata_patch* cur)
{
strata_patch* ttramp=tramplist;
strata_patch* prev=NULL;
/* examine each entry on the tramp lines */
while(ttramp)
{
/* match: remove entry */
if(ttramp->PC==cur->PC)
{
/* note: freeing of list entries isn't necessary, as they're arena allocated.
* We could put them into a list of free entries, if we had a mechanism for it
*/
if(!prev)
{
/* head of list, easy removal */
tramplist=tramplist->next;
}
else
{
/* make prev entry's next ptr point to cur entry next */
prev->next=ttramp->next;
}
}
/* keep track of the previous one for removal purposes, and move to the next one */
prev=ttramp;
ttramp=ttramp->next;
}
return;
}
/*
* strata_patch_later - Add patch as a list of fragment addresses that need to be fixed
* when targPC is translated.
*/
void strata_patch_later (strata_patch *patch, app_iaddr_t targPC)
{
app_iaddr_t h;
assert(patch);
if (patch->ty == PATCH_SWITCH) {
STRATA_TRACE("Table %08x, element %08x will be patched when %08x arrives\n",
patch->u.sw.table_base,patch->u.sw.table_entry,targPC);
} else {
STRATA_TRACE("Location %08x will be patched when %08x arrives\n",
patch->u.loc,targPC);
}
/* Initialize the patch. */
patch->PC = targPC;
patch->patched = 0;
/* Install the patch into the patch table. */
h = patch->PC % PATCHTABSIZE;
patch->next = patch_tab[h];
patch_tab[h] = patch;
}
/*
* strata_create_patch_with_type - Create a simple patch with a f$ address and a type, which may be
* a target defined type.
*/
strata_patch *strata_create_patch_with_type(strata_fragment *frag, fcache_iaddr_t loc,
strata_patch_type_t ty)
{
strata_patch *patch;
NEW(patch,FCACHE);
patch->ty = ty;
patch->frag = frag;
patch->u.loc = loc;
return patch;
}
/* Allocate a trampoline patch. */
strata_patch *strata_patch_tramp (strata_fragment *frag, fcache_iaddr_t loc) {
return strata_create_patch_with_type(frag,loc,PATCH_TRAMP);
}
/* Allocate for a return address. */
strata_patch *strata_patch_alt_retaddr (strata_fragment *frag, fcache_iaddr_t loc) {
return strata_create_patch_with_type(frag,loc,PATCH_ALT_RETADDR);
}
/* Allocate for a callsite address; used when partial inlining is off */
strata_patch *strata_patch_callsite(strata_fragment *frag, app_iaddr_t loc) {
return strata_create_patch_with_type(frag,loc,PATCH_CALLSITE);
}
/* Allocate for a return address. */
strata_patch *strata_patch_retaddr (strata_fragment *frag, fcache_iaddr_t loc) {
return strata_create_patch_with_type(frag,loc,PATCH_RETADDR);
}
/* Allocate a patch for a switch trampoline entry. */
strata_patch *strata_patch_switch (strata_fragment *frag, unsigned table_base, unsigned table_entry) {
strata_patch *patch;
NEW(patch,FCACHE);
patch->ty = PATCH_SWITCH;
patch->frag = frag;
patch->u.sw.table_base = table_base;
patch->u.sw.table_entry = table_entry;
return patch;
}
/*
* strata_add_pc_mapping - create a fake fragment which is a mapping of an frag_PC to an app_PC.
*/
void strata_add_pc_mapping(app_iaddr_t PC, fcache_iaddr_t fPC)
{
strata_fragment *frag=NULL;
#define strata_opt_add_pc_mappings 1
if(!strata_opt_add_pc_mappings)
return;
if((void*)PC==(void*)NULL)
{
return;
}
frag = strata_create_fragment(PC);
frag->fPC=fPC;
frag->last_fPC=fPC;
frag->ty=STRATA_FRAG_PC_MAPPING;
strata_apply_patches(frag);
}

12
code_examples/example.xml Normal file
View File

@ -0,0 +1,12 @@
<foo some_attr='blah'
other_attr='foo'
c='9'>
<bijj a="222"
b="999">
<blah
a='1'
b='2'
c='3' />
fjjgg
</bar>
</foo>

10
code_examples/heredoc.pl Normal file
View File

@ -0,0 +1,10 @@
#!/usr/bin/perl
my $var = <<HEREDOC;
i hate this
it's so awful
$blah blah blah
jiejgiejgiejge
HEREDOC
print $var . "\n";

425
code_examples/imacfb.c Normal file
View File

@ -0,0 +1,425 @@
/*
* framebuffer driver for Intel Based Mac's
*
* (c) 2006 Edgar Hucek <gimli@dark-green.com>
* Original imac driver written by Gerd Knorr <kraxel@goldbach.in-berlin.de>
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/tty.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/fb.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <video/vga.h>
#include <asm/io.h>
#include <asm/mtrr.h>
#define dac_reg (0x3c8)
#define dac_val (0x3c9)
typedef enum _MAC_TAPE {
M_I17,
M_I20,
M_MINI,
M_NEW
} MAC_TAPE;
/* --------------------------------------------------------------------- */
static struct fb_var_screeninfo imacfb_defined __initdata = {
.activate = FB_ACTIVATE_NOW,
.height = -1,
.width = -1,
.right_margin = 32,
.upper_margin = 16,
.lower_margin = 4,
.vsync_len = 4,
.vmode = FB_VMODE_NONINTERLACED,
};
static struct fb_fix_screeninfo imacfb_fix __initdata = {
.id = "IMAC VGA",
.type = FB_TYPE_PACKED_PIXELS,
.accel = FB_ACCEL_NONE,
};
static int inverse = 0;
static int mtrr = 0; /* disable mtrr */
static int vram_remap __initdata = 0; /* Set amount of memory to be used */
static int vram_total __initdata = 0; /* Set total amount of memory */
static int depth;
static int model = M_NEW;
static int manual_height = 0;
static int manual_width = 0;
/* --------------------------------------------------------------------- */
static int imacfb_setcolreg(unsigned regno, unsigned red, unsigned green,
unsigned blue, unsigned transp,
struct fb_info *info)
{
/*
* Set a single color register. The values supplied are
* already rounded down to the hardware's capabilities
* (according to the entries in the `var' structure). Return
* != 0 for invalid regno.
*/
if (regno >= info->cmap.len)
return 1;
if (regno < 16 && info->var.bits_per_pixel != 8) {
switch (info->var.bits_per_pixel) {
case 16:
if (info->var.red.offset == 10) {
/* 1:5:5:5 */
((u32*) (info->pseudo_palette))[regno] =
((red & 0xf800) >> 1) |
((green & 0xf800) >> 6) |
((blue & 0xf800) >> 11);
} else {
/* 0:5:6:5 */
((u32*) (info->pseudo_palette))[regno] =
((red & 0xf800) ) |
((green & 0xfc00) >> 5) |
((blue & 0xf800) >> 11);
}
break;
case 24:
case 32:
red >>= 8;
green >>= 8;
blue >>= 8;
((u32 *)(info->pseudo_palette))[regno] =
(red << info->var.red.offset) |
(green << info->var.green.offset) |
(blue << info->var.blue.offset);
break;
}
}
return 0;
}
static struct fb_ops imacfb_ops = {
.owner = THIS_MODULE,
.fb_setcolreg = imacfb_setcolreg,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
};
static int __init imacfb_setup(char *options)
{
char *this_opt;
if (!options || !*options)
return 0;
while ((this_opt = strsep(&options, ",")) != NULL) {
if (!*this_opt) continue;
if (! strcmp(this_opt, "inverse"))
inverse=1;
else if (! strncmp(this_opt, "mtrr:", 5))
mtrr = simple_strtoul(this_opt+5, NULL, 0);
else if (! strcmp(this_opt, "nomtrr"))
mtrr=0;
else if (! strncmp(this_opt, "vtotal:", 7))
vram_total = simple_strtoul(this_opt+7, NULL, 0);
else if (! strncmp(this_opt, "vremap:", 7))
vram_remap = simple_strtoul(this_opt+7, NULL, 0);
else if (! strcmp(this_opt, "i17"))
model = M_I17;
else if (! strcmp(this_opt, "i20"))
model = M_I20;
else if (! strcmp(this_opt, "mini"))
model = M_MINI;
else if (! strncmp(this_opt, "height:", 7))
manual_height = simple_strtoul(this_opt+7, NULL, 0);
else if (! strncmp(this_opt, "width:", 6))
manual_width = simple_strtoul(this_opt+6, NULL, 0);
}
return 0;
}
#define DEFAULT_FB_MEM 1024*1024*16
static int __init imacfb_probe(struct platform_device *dev)
{
struct fb_info *info;
int err;
unsigned int size_vmode;
unsigned int size_remap;
unsigned int size_total;
screen_info.lfb_base = 0x80010000;
screen_info.lfb_depth = 32;
screen_info.lfb_size = DEFAULT_FB_MEM / 0x10000;
screen_info.pages=1;
screen_info.blue_size = 8;
screen_info.blue_pos = 0;
screen_info.green_size = 8;
screen_info.green_pos = 8;
screen_info.red_size = 8;
screen_info.red_pos = 16;
screen_info.rsvd_size = 8;
screen_info.rsvd_pos = 24;
switch(model) {
case M_I17:
screen_info.lfb_width = 1440;
screen_info.lfb_height = 900;
screen_info.lfb_linelength = 1472 * 4;
break;
case M_NEW:
case M_I20:
screen_info.lfb_width = 1680;
screen_info.lfb_height = 1050;
screen_info.lfb_linelength = 1728 * 4;
break;
case M_MINI:
screen_info.lfb_width = 1024;
screen_info.lfb_height = 768;
screen_info.lfb_linelength = 2048 * 4;
break;
}
/* if the user wants to manually specify height/width,
we will override the defaults */
/* TODO: eventually get auto-detection working */
if(manual_height > 0)
screen_info.lfb_height = manual_height;
if(manual_width > 0)
screen_info.lfb_width = manual_width;
/*
static void *videomemory;
static u_long videomemorysize = (64*1024*1024);
videomemory = ioremap(0x80000000,videomemorysize);
memset(videomemory, 0x99, videomemorysize);
*/
imacfb_fix.smem_start = screen_info.lfb_base;
imacfb_defined.bits_per_pixel = screen_info.lfb_depth;
if (15 == imacfb_defined.bits_per_pixel)
imacfb_defined.bits_per_pixel = 16;
imacfb_defined.xres = screen_info.lfb_width;
imacfb_defined.yres = screen_info.lfb_height;
imacfb_fix.line_length = screen_info.lfb_linelength;
imacfb_fix.visual = (imacfb_defined.bits_per_pixel == 8) ?
FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR;
/* size_vmode -- that is the amount of memory needed for the
* used video mode, i.e. the minimum amount of
* memory we need. */
size_vmode = imacfb_defined.yres * imacfb_fix.line_length;
/* size_total -- all video memory we have. Used for mtrr
* entries, ressource allocation and bounds
* checking. */
size_total = screen_info.lfb_size * 65536;
if (vram_total)
size_total = vram_total * 1024 * 1024;
if (size_total < size_vmode)
size_total = size_vmode;
/* size_remap -- the amount of video memory we are going to
* use for imacfb. With modern cards it is no
* option to simply use size_total as that
* wastes plenty of kernel address space. */
size_remap = size_vmode * 2;
if (vram_remap)
size_remap = vram_remap * 1024 * 1024;
if (size_remap < size_vmode)
size_remap = size_vmode;
if (size_remap > size_total)
size_remap = size_total;
imacfb_fix.smem_len = size_remap;
#ifndef __i386__
screen_info.imacpm_seg = 0;
#endif
if (!request_mem_region(imacfb_fix.smem_start, size_total, "imacfb")) {
printk(KERN_WARNING
"imacfb: cannot reserve video memory at 0x%lx\n",
imacfb_fix.smem_start);
/* We cannot make this fatal. Sometimes this comes from magic
spaces our resource handlers simply don't know about */
}
info = framebuffer_alloc(sizeof(u32) * 256, &dev->dev);
if (!info) {
release_mem_region(imacfb_fix.smem_start, size_total);
return -ENOMEM;
}
info->pseudo_palette = info->par;
info->par = NULL;
info->screen_base = ioremap(imacfb_fix.smem_start, imacfb_fix.smem_len);
if (!info->screen_base) {
printk(KERN_ERR
"imacfb: abort, cannot ioremap video memory 0x%x @ 0x%lx\n",
imacfb_fix.smem_len, imacfb_fix.smem_start);
err = -EIO;
goto err;
}
printk(KERN_INFO "imacfb: framebuffer at 0x%lx, mapped to 0x%p, "
"using %dk, total %dk\n",
imacfb_fix.smem_start, info->screen_base,
size_remap/1024, size_total/1024);
printk(KERN_INFO "imacfb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
imacfb_defined.xres, imacfb_defined.yres, imacfb_defined.bits_per_pixel, imacfb_fix.line_length, screen_info.pages);
imacfb_defined.xres_virtual = imacfb_defined.xres;
imacfb_defined.yres_virtual = imacfb_fix.smem_len / imacfb_fix.line_length;
printk(KERN_INFO "imacfb: scrolling: redraw\n");
imacfb_defined.yres_virtual = imacfb_defined.yres;
/* some dummy values for timing to make fbset happy */
imacfb_defined.pixclock = 10000000 / imacfb_defined.xres * 1000 / imacfb_defined.yres;
imacfb_defined.left_margin = (imacfb_defined.xres / 8) & 0xf8;
imacfb_defined.hsync_len = (imacfb_defined.xres / 8) & 0xf8;
imacfb_defined.red.offset = screen_info.red_pos;
imacfb_defined.red.length = screen_info.red_size;
imacfb_defined.green.offset = screen_info.green_pos;
imacfb_defined.green.length = screen_info.green_size;
imacfb_defined.blue.offset = screen_info.blue_pos;
imacfb_defined.blue.length = screen_info.blue_size;
imacfb_defined.transp.offset = screen_info.rsvd_pos;
imacfb_defined.transp.length = screen_info.rsvd_size;
if (imacfb_defined.bits_per_pixel <= 8) {
depth = imacfb_defined.green.length;
imacfb_defined.red.length =
imacfb_defined.green.length =
imacfb_defined.blue.length =
imacfb_defined.bits_per_pixel;
}
printk(KERN_INFO "imacfb: %s: "
"size=%d:%d:%d:%d, shift=%d:%d:%d:%d\n",
(imacfb_defined.bits_per_pixel > 8) ?
"Truecolor" : "Pseudocolor",
screen_info.rsvd_size,
screen_info.red_size,
screen_info.green_size,
screen_info.blue_size,
screen_info.rsvd_pos,
screen_info.red_pos,
screen_info.green_pos,
screen_info.blue_pos);
imacfb_fix.ypanstep = 0;
imacfb_fix.ywrapstep = 0;
/* request failure does not faze us, as vgacon probably has this
* region already (FIXME) */
request_region(0x3c0, 32, "imacfb");
#ifdef CONFIG_MTRR
if (mtrr) {
unsigned int temp_size = size_total;
unsigned int type = 0;
switch (mtrr) {
case 1:
type = MTRR_TYPE_UNCACHABLE;
break;
case 2:
type = MTRR_TYPE_WRBACK;
break;
case 3:
type = MTRR_TYPE_WRCOMB;
break;
case 4:
type = MTRR_TYPE_WRTHROUGH;
break;
default:
type = 0;
break;
}
if (type) {
int rc;
/* Find the largest power-of-two */
while (temp_size & (temp_size - 1))
temp_size &= (temp_size - 1);
/* Try and find a power of two to add */
do {
rc = mtrr_add(imacfb_fix.smem_start, temp_size,
type, 1);
temp_size >>= 1;
} while (temp_size >= PAGE_SIZE && rc == -EINVAL);
}
}
#endif
info->fbops = &imacfb_ops;
info->var = imacfb_defined;
info->fix = imacfb_fix;
info->flags = FBINFO_FLAG_DEFAULT;
if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
err = -ENOMEM;
goto err;
}
if (register_framebuffer(info)<0) {
err = -EINVAL;
fb_dealloc_cmap(&info->cmap);
goto err;
}
printk(KERN_INFO "fb%d: %s frame buffer device\n",
info->node, info->fix.id);
return 0;
err:
framebuffer_release(info);
release_mem_region(imacfb_fix.smem_start, size_total);
return err;
}
static struct platform_driver imacfb_driver = {
.probe = imacfb_probe,
.driver = {
.name = "imacfb",
},
};
static struct platform_device imacfb_device = {
.name = "imacfb",
};
static int __init imacfb_init(void)
{
int ret;
char *option = NULL;
/* ignore error return of fb_get_options */
fb_get_options("imacfb", &option);
imacfb_setup(option);
ret = platform_driver_register(&imacfb_driver);
if (!ret) {
ret = platform_device_register(&imacfb_device);
if (ret)
platform_driver_unregister(&imacfb_driver);
}
return ret;
}
module_init(imacfb_init);
MODULE_LICENSE("GPL");

143
code_examples/map.js Normal file
View File

@ -0,0 +1,143 @@
// some useful globals
var map;
var showing = "venues";
var people = [
["Erik", "http://www.bearhome.net", "924 S. Farragut St., Philadelphia"],
["Corey", "", "49th and Florence, Philadelphia"]
];
var venues = [
["Danger Danger", "http://www.myspace.com/dangerdangerhouse", "900 S. 47th St., Philadelphia, PA"],
["409 House", "http://www.myspace.com/westphilly409", "409 S. 43rd St., Philadelphia, PA"],
["Haunted Cream Egg", "http://www.myspace.com/hauntedcreamegg", "4207 Baltimore Ave., Philadelphia, PA"],
["The Avant Gentlemen's Lodge", "http://www.myspace.com/avantgentlemenslodge", "4028 Filbert St., Philadelphia, PA"],
["Be Happy House", "http://www.circley.net/behappy/", "4078 Spring Garden St., Philadelphia, PA"],
["The Veggieplex", "http://www.myspace.com/theveggieplex", "1019 S. 47th St., Philadelphia, PA"],
["LAVA", "http://www.lavazone.org/", "4134 Lancaster Ave., Philadelphia, PA"],
["Clap It Off!", "http://www.myspace.com/clapitoff", "1208 S. 46th St., Philadelphia, PA"],
["The Rotunda", "http://www.foundationarts.org/events.html", "4014 Walnut St., Philadelphia, PA"],
["The Millcreek Tavern", "http://www.millcreektavernphilly.com", "4200 Chester Ave., Philadelphia, PA"],
["Eris Temple", "", "602 S. 52nd St., Philadelphia, PA"],
["The Warehouse", "", "5027 Beaumont St., Philadelphia, PA"]
];
var markers = [];
// base icon stuff so we create labelled icons
var baseIcon = new GIcon();
baseIcon.shadow = "http://www.google.com/mapfiles/shadow50.png";
baseIcon.iconSize = new GSize(20, 34);
baseIcon.shadowSize = new GSize(37, 34);
baseIcon.iconAnchor = new GPoint(9, 34);
baseIcon.infoWindowAnchor = new GPoint(9, 2);
baseIcon.infoShadowAnchor = new GPoint(18, 25);
function getCode(index) {
return String.fromCharCode("A".charCodeAt(0) + index);
}
function getIcon(index) {
var letter = getCode(index);
var icon = new GIcon(baseIcon);
icon.image = "http://www.google.com/mapfiles/marker" + letter + ".png";
return icon;
}
function load() {
if (GBrowserIsCompatible()) {
map = new GMap2(document.getElementById("map"));
map.addControl(new GSmallMapControl());
map.addControl(new GMapTypeControl());
showVenues();
}
}
function showVenues() {
map.clearOverlays();
markers = [];
var link_html = "<h3>venues:</h3><p>";
for(var i=0; i < venues.length; i++) {
link_html += showAddress(i, venues[i][0], venues[i][1], venues[i][2]);
}
link_html += "</p>";
document.getElementById("links").innerHTML = link_html;
showing = "venues";
}
function showPeople() {
map.clearOverlays();
markers = [];
var link_html = "<h3>people:</h3><p>";
for(var i=0; i < people.length; i++) {
link_html += showAddress(i, people[i][0], people[i][1], people[i][2]);
}
link_html += "</p>";
document.getElementById("links").innerHTML = link_html;
showing = "people";
}
function hilightMarker(index) {
var marker = markers[index];
if(marker) {
setInfoMarkerHtml(index, marker);
} else {
debug("didn't find a marker");
}
}
function debug(s) {
document.getElementById("debug").innerHTML = s;
}
function setInfoMarkerHtml(index, marker) {
var html;
var entry;
if(showing == "people") {
entry = people[index];
} else {
entry = venues[index];
}
var name = entry[0];
var url = entry[1];
var address = entry[2];
if(url) {
html = "<a target=\"_blank\" href=" + url + "><b>" + name + "</b></a><br/>" + address;
} else {
html = "<b>" + name + "</b><br/>" + address;
}
debug(html);
marker.openInfoWindowHtml(html);
}
function showAddress(index, name, url, address) {
var geocoder = new GClientGeocoder();
geocoder.getLatLng(
address,
function(point) {
if (!point) {
alert(address + " not found");
} else {
map.setCenter(point, 13);
var icon = getIcon(index);
var marker = new GMarker(point, icon);
GEvent.addListener(
marker,
"click",
function() {
setInfoMarkerHtml(index, marker);
}
);
map.addOverlay(marker);
markers[index] = marker;
}
}
);
var code = getCode(index);
return "<a href=\"javascript:hilightMarker(" + index + ")\">" + code + ": " + name + "</a><br/>";
}

5110
code_examples/sim-outorder.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,72 @@
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; targ-switch.s - Strata contex switch primative operations
;
; Copyright (c) 2000, 2004 - Dan Williams and Jack W. Davidson
;
;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
section .data
frag_jmp_addr dd 0
junk dd 0
section .text
extern strata_build_main, hashtable_get_default, intercept_sigaction, syscall_watch_lookup, targ_exec
BITS32
global targ_start
global strata_stop
global targ_reenter
; Macros
; These offsets are based on targ-build.c defines
; NOTE: the offsets assume 32 bit int, if that changes, these
; values must change.
%define STRATA_FROMFRAG(base) [base+106]
%define STRATA_REDIRECT(base) [base+126]
%define STRATA_SYSCALL(base) [base+126]
;; This technically causes a segfault, not an illegal instruction
;; but I'm unable to find a "standard" illegal instruction for x86
%define ILLEGAL_INSN mov eax, [0]
; targ_start -
; This function is called by by the application immediately after strata_get_start_fn, and it is used to first transfer
; control from the application to strata. It does this by (basically) doing
; the same things that occur doing a fragment trampoline.
targ_start:
pop eax ; pop the return address, this is the first instruction to execute, and will be passed to s_b_m()
sub esp, 28 ; this is the code for a trampoline with the next fragment stored in eax, and no from-frag
push dword [esp]
pushad
pushfd
push dword 0
push eax
; We are simulating a call, but we don't want to return here,
; instead we return to targ_exec
push targ_exec
jmp strata_build_main
; Matainance note: targ_reenter is no
; longer used. The entire context save
; has been pushed into the trampoline
targ_reenter:
ILLEGAL_INSN
;
; see strata_init for details
;
; strata_stop - function that returns it's own address so that it can be found w/o writing dynamic linker ickyiness.
;
strata_stop:
call strata_stop_L1
strata_stop_L1:
pop eax
sub eax, 5
ret

79
code_examples/wfault.sh Executable file
View File

@ -0,0 +1,79 @@
#!/bin/sh
#
# An improvement on netstat for system administrators
#
# This lists not just network connectios by IP and by
# port, but actually gives a listing of each process that
# is using a socket, with PID and owner. Makes it easier
# to see who is doing what, and to stop it quickly if
# desired.
#
# by Erik Osheim
# Print usage
helpusage() {
echo "usage: wfault [-halbtu]"
echo " -h: print this message"
echo ""
echo " -a: print all ip sockets"
echo " -l: only print listening ip sockets"
echo ""
echo " -b: print both tcp and udp sockets"
echo " -t: print only tcp sockets"
echo " -u: print only udp sockets"
}
# Find and list all TCP sockets of a particular type (ESTABLISHED, CLOSE_WAIT,
# LISTENING) using fuser output
findtcptype() {
echo TCP: ${1}
echo ----------------------------------------------------
netstat -an | \
awk -v type=$1 -F '[ :]*' '$8==type{system("fuser -u -v -n tcp " $5)}' 2>&1 | \
awk '/^.+$/&&!/USER/'
echo
}
# Find all listening UDP sockets
findudp() {
echo UDP
echo ----------------------------------------------------
netstat -an | \
awk -F '[ :]*' '$1=="udp"{system("fuser -u -v -n udp " $5)}' 2>&1 | \
awk '/^.+$/&&!/USER/'
echo
}
LISTENING=0
TCP=1
UDP=1
while getopts "halbtu" var; do
case $var in
h) helpusage; exit 0;;
a) LISTENING=0; TCP=1; UDP=1;;
l) LISTENING=1;;
b) TCP=1; UDP=1;;
t) TCP=1; UDP=0;;
u) UDP=1; TCP=0;;
*) helpusage; exit 1;;
esac
done
echo hola > foo/bar/duh;
if [[ $( id -u ) -ne 0 ]]; then
echo "Must be run as root."
exit 1
fi
if [[ ${TCP} -eq 1 ]]; then
if [[ ${LISTENING} -eq 0 ]]; then
findtcptype ESTABLISHED
findtcptype CLOSE_WAIT
fi
findtcptype LISTEN
fi
if [[ ${UDP} -eq 1 ]]; then
findudp
fi

82
color.py Normal file
View File

@ -0,0 +1,82 @@
import curses
inited = False
#default_color = False
default_color = True
def init():
global colors, _colors, _pairs, attributes, inited, index
if not inited:
index = 1
colors = { 'cyan': curses.COLOR_CYAN,
'green': curses.COLOR_GREEN,
'red': curses.COLOR_RED,
'yellow': curses.COLOR_YELLOW,
'blue': curses.COLOR_BLUE,
'magenta': curses.COLOR_MAGENTA,
'black': curses.COLOR_BLACK,
'white': curses.COLOR_WHITE }
if default_color:
colors["default"] = -1
_colors = []
_pairs = {}
for key in _pairs:
fg, bg = key
curses.init_pair(index, colors[fg], colors[bg])
_pairs[key] = curses.color_pair(index)
_colors.append(key)
index = len(_colors) + 1
attributes = { 'bold': curses.A_BOLD,
'reverse': curses.A_REVERSE,
'normal': curses.A_NORMAL,
'underline': curses.A_UNDERLINE,
'dim': curses.A_DIM,
'standout': curses.A_STANDOUT }
inited = True
def pairs(fg, bg):
if not curses.has_colors():
return curses.color_pair(0)
global colors, _colors, _pairs, index
key = (fg, bg)
if key not in _pairs:
assert index < curses.COLOR_PAIRS
if not default_color:
if fg == "default":
fg = "white"
if bg == "default":
bg = "black"
curses.init_pair(index, colors[fg], colors[bg])
_pairs[key] = curses.color_pair(index)
_colors.append(key)
index = len(_colors) + 1
return _pairs[key]
def get_pairs(index):
if index == 0:
return ('white', 'black')
else:
return _colors[index-1]
def reverse_colors(attr):
return attr ^ curses.A_REVERSE
def build(fg, bg, *attr):
cattr = pairs(fg, bg)
return build_attr(cattr, *attr)
def build_attr(*attr):
v = curses.A_NORMAL
for x in attr:
if type(x) == type(''):
x = attributes[x]
v = v | x
return v

105
completer.py Normal file
View File

@ -0,0 +1,105 @@
import glob, os, pwd
import method, util
def find_common_string(candidates):
if len(candidates) == 0:
return ""
elif len(candidates) == 1:
return candidates[0]
else:
done = False
index = 0
test = candidates[0]
while True:
for c in candidates:
if len(c) <= index or c[index] != test[index]:
return test[:index]
index += 1
return test
class Completer:
def get_candidates(self, s):
assert "Not implemented"
def tab_string(self, s, w=None):
'''returns a tuple of three things:
1. the new string
2. whether the string "exists"
3. whether the string is "complete"'''
candidates = self.get_candidates(s, w)
if len(candidates) == 0:
return (s, False, True)
elif len(candidates) == 1:
return (candidates[0], True, True)
else:
s2 = find_common_string(candidates)
if s2 in candidates:
return (s2, True, False)
else:
return (s2, False, False)
class FileCompleter(Completer):
def get_candidates(self, s, w=None):
s = util.expand_tilde(s)
if s.startswith('~'):
users = ['~%s' % (x[0]) for x in pwd.getpwall()]
candidates = [util.expand_tilde(user) for user in users if user.startswith(s)]
else:
candidates = glob.glob(s + '*')
for i in range(0, len(candidates)):
c = candidates[i]
if os.path.isdir(os.path.realpath(c)):
candidates[i] = c + '/'
return candidates
class BufferCompleter(Completer):
def __init__(self, application):
self.application = application
def get_candidates(self, s, w=None):
bl = self.application.bufferlist
candidates = [b.name() for b in bl.buffers if b.name().startswith(s)]
return candidates
class CommandCompleter(Completer):
def get_candidates(self, s, w=None):
path = os.getenv('PATH')
path_dirs = path.split(':')
candidates = []
for d in path_dirs:
if (not os.path.isdir(d) or not os.access(d, os.R_OK)):
continue
for p in os.listdir(d):
if not os.path.isfile(os.path.join(d, p)):
continue
elif not p.startswith(s):
continue
else:
candidates.append(p)
return candidates
class ShellCompleter(Completer):
def __init__(self):
self.file_completer = FileCompleter()
self.command_completer = CommandCompleter()
def get_candidates(self, s, w=None):
if ' ' in s:
i = s.rindex(' ') + 1
base = s[:i]
last = s[i:]
candidates = self.file_completer.get_candidates(last)
return [base + x for x in candidates]
else:
return self.command_completer.get_candidates(s)
class MethodCompleter(Completer):
def get_candidates(self, s, w=None):
return [n for n in w.application.methods if n.startswith(s)]
class ModeCompleter(Completer):
def get_candidates(self, s, w=None):
return [n for n in w.application.modes if n.startswith(s)]
class PerlFunctionCompleter(Completer):
def get_candidates(self, s, w=None):
old_window = w.buffer.method.old_window
functions = old_window.mode.get_functions()
return [n for n in functions if n.startswith(s)]

222
ctag_python.py Executable file
View File

@ -0,0 +1,222 @@
#!/usr/bin/python
#
# by Erik Osheim
import os, re, sets, sys
# regular expressions
class_re = re.compile('^(.*?)\t(.*?)\t(.*?)\tc$')
function_re = re.compile('^(.*?)\t(.*?)\t(.*?)\tf$')
method_re = re.compile('^([^\t]+)\t([^\t]+)\t([^\t]+)\tm\tclass:([^\t]+)(?:\t.*)?$')
class_supers_re = re.compile('^\/\^ *class +[_A-Za-z][_A-Za-z0-9]* *\((.*?)\) *: *\$\/;\"$')
def_args_re = re.compile('^\/\^ *def +[_A-Za-z][_A-Za-z0-9]* *\((.*?)\) *: *\$\/;\"$')
find_args_re = re.compile('[\*_a-zA-Z][\*_a-zA-Z0-9]*(?:=(?:[^,\'" ]+|"(?:\\.|[^\\"])*"|\'(?:\\.|[^\\"])*\'))?')
# set of base python objects which can be assumed to exist
base_objects = sets.Set(['object', 'list', 'dict'])
def is_fully_qualified(s):
return s in base_objects or '.' in s
def parse_entry(line):
m = class_re.match(line)
if m:
return ClassEntry(m.group(1), m.group(2), m.group(3))
m = function_re.match(line)
if m:
return FunctionEntry(m.group(1), m.group(2), m.group(3))
m = method_re.match(line)
if m:
return MethodEntry(m.group(1), m.group(2), m.group(3), m.group(4))
raise Exception, "Oh no: %s" % line
class PythonCTagger:
def __init__(self):
self.entries = {}
self.packages = {}
self.classes = {}
self.class_methods = {}
def process_tagfile(self, path):
f = open(path, 'r')
data = f.read()
f.close()
self.process_data(data)
def process_paths(self, paths=['.']):
(stdin, stdout, stderr) = os.popen3("exuberant-ctags -L - -f -")
for base in paths:
for root, dirs, files in os.walk(base):
if 'CVS' in dirs:
dirs.remove('CVS')
for name in files:
if name.endswith('.py'):
if base != '.':
path = os.path.join(root, name)
else:
path = name
stdin.write('%s\n' % path)
stdin.flush()
stdin.close()
data = stdout.read()
stdout.close()
stderr.close()
self.process_data(data)
def process_data(self, data):
# process the ctags output data
for l in data.split('\n'):
if not l:
continue
elif l.startswith('!'):
continue
else:
e = parse_entry(l)
self.entries[e.fullname()] = e
package = e.package()
if e.type == 'method':
p = e.parent
if not is_fully_qualified(p):
p = '%s.%s' % (package, p)
self.classes.setdefault(p, {})
self.classes[p][e.symbol] = e
else:
self.packages.setdefault(package, {})
self.packages[package][e.symbol] = e
# this returns the methods available in the class
def get_methods_for_class(self, c):
cn = c.fullname()
# if we haven't determined this classes methods yet, then let's do it
if cn not in self.class_methods:
classes_seen = sets.Set()
methods_seen = sets.Set()
self.class_methods[cn] = []
# create a queue of classes to process... this solves the ordering
# problem for class inheritance...i.e.:
# class Shape
# class Rectangle(Shape)
# class Rhombus(Shape)
# class Square(Rectangle, Rhombus)
# 1. [Square] --> process Square --> [Rectangle, Rhombus]
# 2. [Rectangle, Rhombus] --> process Rectangle --> [Rhombus, Shape]
# 3. [Rhombus, Shape] --> process Rhombus --> [Shape, Shape]
# 4. [Shape, Shape] --> process Shape --> [Shape]
# 5. [Shape] --> already processed Shape, skipping
to_process = [c]
while to_process:
e = to_process.pop(0)
fn = e.fullname()
# if we've seen this class already, then skip it
if fn in classes_seen:
continue
# mark that we've seen this class; if we don't know about it's
# methods, then let's just skip it.
classes_seen.add(fn)
if fn not in self.classes:
continue
# for each method in the class, add it to our list if it's new
for msymbol in self.classes[fn]:
if msymbol not in methods_seen:
self.class_methods[cn].append(self.classes[fn][msymbol])
methods_seen.add(msymbol)
# for each parent of this class, append it to the end of the queue
# if we know about it
for sfn in e.supers:
if sfn in self.entries:
to_process.append(self.entries[sfn])
return self.class_methods[cn]
def display(self):
# for each package, print out the classes and functions in that package
for p in ct.packages:
print 'package %s' % p
for es in ct.packages[p]:
e = ct.packages[p][es]
print ' %s %s' % (e.type, e.prototype())
fn = e.fullname()
# for each class, print out the methods that class provides (either
# implemented directly or inherited from a super class)
if e.type == 'class':
for m in ct.get_methods_for_class(e):
# status determines whether the class is being inherited,
# or implemented directly
if fn != m.parent:
status = '*'
else:
status = ' '
print ' %s %s' % (status, m.dump())
print ''
class Entry:
type = 'generic'
def __init__(self, symbol, path):
self.symbol = symbol
self.path = path
def __repr__(self):
return '<%s %s.%s>' % (self.type.title(), self.package(), self.symbol)
def package(self):
return self.path[:-3].replace('/', '.')
def fullname(self):
return '%s.%s' % (self.package(), self.symbol)
def prototype(self):
return self.fullname()
def dump(self):
return '%s %s' % (self.type, self.prototype())
class ClassEntry(Entry):
type = 'class'
def __init__(self, symbol, path, match):
Entry.__init__(self, symbol, path)
m = class_supers_re.match(match)
self.match = match
self.supers = []
if m:
self.supers = [x.strip() for x in m.group(1).split(',')]
for i in range(0, len(self.supers)):
if not is_fully_qualified(self.supers[i]):
self.supers[i] = '%s.%s' % (self.package(), self.supers[i])
def prototype(self):
return '%s(%s)' % (self.fullname(), ', '.join(self.supers))
class FunctionEntry(Entry):
type = 'function'
def __init__(self, symbol, path, match):
Entry.__init__(self, symbol, path)
m = def_args_re.match(match)
self.match = match
self.args = []
if m:
self.args = re.findall(find_args_re, m.group(1))
def prototype(self):
return '%s(%s)' % (self.fullname(), ', '.join(self.args))
class MethodEntry(FunctionEntry):
type = 'method'
def __init__(self, symbol, path, match, parent):
FunctionEntry.__init__(self, symbol, path, match)
self.parent = parent
if is_fully_qualified(parent):
self.parent = parent
else:
self.parent = '%s.%s' % (self.package(), parent)
def fullname(self):
return '%s.%s' % (self.parent, self.symbol)
if __name__ == "__main__":
ct = PythonCTagger()
if len(sys.argv[1:]) == 0:
ct.process_paths()
else:
ct.process_tagfile(sys.argv[1])
if False:
sys.exit(0)
else:
ct.display()

218
ctags.py Executable file
View File

@ -0,0 +1,218 @@
#!/usr/bin/python
#
# by Erik Osheim
import os, re, sets, sys
entries = {}
packages = {}
classes = {}
class_methods = {}
class_re = re.compile('^(.*?)\t(.*?)\t(.*?)\tc$')
function_re = re.compile('^(.*?)\t(.*?)\t(.*?)\tf$')
method_re = re.compile('^([^\t]+)\t([^\t]+)\t([^\t]+)\tm\tclass:([^\t]+)(?:\t.*)?$')
class_supers_re = re.compile('^\/\^ *class +[_A-Za-z][_A-Za-z0-9]* *\((.*?)\) *: *\$\/;\"$')
def_args_re = re.compile('^\/\^ *def +[_A-Za-z][_A-Za-z0-9]* *\((.*?)\) *: *\$\/;\"$')
find_args_re = re.compile('[\*_a-zA-Z][\*_a-zA-Z0-9]*(?:=(?:[^,\'" ]+|"(?:\\.|[^\\"])*"|\'(?:\\.|[^\\"])*\'))?')
base_objects = sets.Set(['object', 'list', 'dict'])
def is_fully_qualified(s):
return s in base_objects or '.' in s
def parse_entry(line):
m = class_re.match(line)
if m:
return ClassEntry(m.group(1), m.group(2), m.group(3))
m = function_re.match(line)
if m:
return FunctionEntry(m.group(1), m.group(2), m.group(3))
m = method_re.match(line)
if m:
return MethodEntry(m.group(1), m.group(2), m.group(3), m.group(4))
raise Exception, "Oh no: %s" % line
class Entry:
type = 'generic'
def __init__(self, symbol, path):
self.symbol = symbol
self.path = path
def __repr__(self):
return '<%s %s.%s>' % (self.type.title(), self.package(), self.symbol)
def package(self):
return self.path[:-3].replace('/', '.')
def fullname(self):
return '%s.%s' % (self.package(), self.symbol)
def prototype(self):
return self.fullname()
def dump(self):
return '%s %s' % (self.type, self.prototype())
class ClassEntry(Entry):
type = 'class'
def __init__(self, symbol, path, match):
Entry.__init__(self, symbol, path)
m = class_supers_re.match(match)
self.match = match
self.supers = []
if m:
self.supers = [x.strip() for x in m.group(1).split(',')]
for i in range(0, len(self.supers)):
if not is_fully_qualified(self.supers[i]):
self.supers[i] = '%s.%s' % (self.package(), self.supers[i])
def prototype(self):
return '%s(%s)' % (self.fullname(), ', '.join(self.supers))
class FunctionEntry(Entry):
type = 'function'
def __init__(self, symbol, path, match):
Entry.__init__(self, symbol, path)
m = def_args_re.match(match)
self.match = match
self.args = []
if m:
self.args = re.findall(find_args_re, m.group(1))
def prototype(self):
return '%s(%s)' % (self.fullname(), ', '.join(self.args))
class MethodEntry(FunctionEntry):
type = 'method'
def __init__(self, symbol, path, match, parent):
FunctionEntry.__init__(self, symbol, path, match)
self.parent = parent
if is_fully_qualified(parent):
self.parent = parent
else:
self.parent = '%s.%s' % (self.package(), parent)
def fullname(self):
return '%s.%s' % (self.parent, self.symbol)
def process_tagfile(path):
global entries, classes, packages
f = open(path, 'r')
data = f.read()
f.close()
process_data(data)
def process_direct():
(stdin, stdout, stderr) = os.popen3("exuberant-ctags -L - -f -")
for base in ('.'):
for root, dirs, files in os.walk(base):
if 'CVS' in dirs:
dirs.remove('CVS')
for name in files:
if name.endswith('.py'):
if base != '.':
path = os.path.join(root, name)
else:
path = name
stdin.write('%s\n' % path)
stdin.flush()
stdin.close()
data = stdout.read()
stdout.close()
stderr.close()
process_data(data)
def process_data(data):
global entries, classes, packages
# process the ctags output data
for l in data.split('\n'):
if not l:
continue
elif l.startswith('!'):
continue
else:
e = parse_entry(l)
entries[e.fullname()] = e
package = e.package()
if e.type == 'method':
p = e.parent
if not is_fully_qualified(p):
p = '%s.%s' % (package, p)
classes.setdefault(p, {})
classes[p][e.symbol] = e
else:
packages.setdefault(package, {})
packages[package][e.symbol] = e
# this returns the methods available in the class
def get_methods_for_class(c):
global class_methods
cn = c.fullname()
# if we haven't determined this classes methods yet, then let's do it
if cn not in class_methods:
classes_seen = sets.Set()
methods_seen = sets.Set()
class_methods[cn] = []
# create a queue of classes to process... this solves the ordering
# problem for class inheritance...i.e.:
# class Shape
# class Rectangle(Shape)
# class Rhombus(Shape)
# class Square(Rectangle, Rhombus)
# 1. [Square] --> process Square --> [Rectangle, Rhombus]
# 2. [Rectangle, Rhombus] --> process Rectangle --> [Rhombus, Shape]
# 3. [Rhombus, Shape] --> process Rhombus --> [Shape, Shape]
# 4. [Shape, Shape] --> process Shape --> [Shape]
# 5. [Shape] --> already processed Shape, skipping
to_process = [c]
while to_process:
e = to_process.pop(0)
fn = e.fullname()
# if we've seen this class already, then skip it
if fn in classes_seen:
continue
# mark that we've seen this class; if we don't know about it's
# methods, then let's just skip it.
classes_seen.add(fn)
if fn not in classes:
continue
# for each method in the class, add it to our list if it's new
for msymbol in classes[fn]:
if msymbol not in methods_seen:
class_methods[cn].append(classes[fn][msymbol])
methods_seen.add(msymbol)
# for each parent of this class, append it to the end of the queue
# if we know about it
for sfn in e.supers:
if sfn in entries:
to_process.append(entries[sfn])
return class_methods[cn]
if __name__ == "__main__":
if len(sys.argv[1:]) == 0:
process_direct()
else:
process_tagfile(sys.argv[1])
# exit here to get good timing data
#sys.exit(0)
# for each package, print out the classes and functions in that package
for p in packages:
print 'package %s' % p
for es in packages[p]:
e = packages[p][es]
print ' %s %s' % (e.type, e.prototype())
fn = e.fullname()
# for each class, print out the methods that class provides (either
# implemented directly or inherited from a super class)
if e.type == 'class':
for m in get_methods_for_class(e):
# status determines whether the class is being inherited,
# or implemented directly
if fn != m.parent:
status = '*'
else:
status = ' '
print ' %s %s' % (status, m.dump())
print ''

39
default.py Normal file
View File

@ -0,0 +1,39 @@
import os
# default callbacks
def none(window):
return None
def last_buffer(window):
bl = window.application.bufferlist
return bl.hidden_buffers[0].name()
def current_buffer(window):
return window.buffer.name()
def last_replace_before(window):
return None #XYZ
if window.application.last_replace_before:
return window.application.last_replace_before
else:
return None
def last_replace_after(window):
return None #XYZ
if window.application.last_replace_after:
return window.application.last_replace_after
else:
return None
def current_working_dir(window):
home = os.getenv('HOME')
cwd = os.getcwd()
if cwd.startswith(home):
cwd = cwd.replace(home, '~', 1)
if not cwd.endswith('/'):
cwd += '/'
return cwd
# default callback builders
def build_constant(c):
return lambda w: c

8
foo.pl Normal file
View File

@ -0,0 +1,8 @@
my $foo = <<EOF;
hi hi hi EOF hi hi EOF
EOF
EOF
print $foo;

14
global.py Normal file
View File

@ -0,0 +1,14 @@
_symbols = {}
def has(name):
return name in _symbols
def get(name):
if name in _symbols:
return _symbols[name]
else:
_symbols[name] = None
return None
def set(name, value):
_symbols[name] = value

431
highlight.py Normal file
View File

@ -0,0 +1,431 @@
import point
# to be clear:
# tokens are generated by the lexer from the buffer, and correspond to lexical
# information about a logical portion of the buffer.
# regions are derived from a combination of the lexical tokens (which correspond
# to the logical buffer) and the physical line endings (i.e. dependent on screen
# width, etc.)
class Highlighter:
'''class used by modes to manage syntax highlighting'''
def __init__(self, m):
self.mode = m
self.tokens = None
self.regions = None
def invalidate_tokens(self):
self.tokens = None
self.invalidate_regions()
def invalidate_regions(self):
self.regions = None
def invalidate_token_range(self, start_offset, end_offset, m, n, diff):
# fix all the tokens to update their offsets, and clean out
# a token which spans the change
offset = start_offset
i = 0
last_index_before = None
first_index_after = None
while i < len(self.tokens):
t = self.tokens[i]
t.debug = False
if t.end <= start_offset:
last_index_before = i
i += 1
elif t.start >= end_offset:
if first_index_after is None:
first_index_after = i
t.start += diff
t.end += diff
i += 1
else:
if offset == start_offset:
offset = self.tokens[i].start
del self.tokens[i]
# delete m tokens further forward
for i in range(0, m):
if first_index_after is None:
break
elif first_index_after > len(self.tokens):
del self.tokens[first_index_after]
elif first_index_after == len(self.tokens):
del self.tokens[first_index_after]
first_index_after = None
# delete n tokens further back
for i in range(0, n):
if last_index_before is None:
break
elif last_index_before > 0:
del self.tokens[last_index_before]
last_index_before -= 1
elif last_index_before == 0:
del self.tokens[0]
last_index_before = None
break
return (last_index_before, first_index_after)
def reparse_region(self, last_index_before, first_index_after):
i = last_index_before
if i is None:
i = 0
tokens_before = False
start_offset = 0
else:
tokens_before = True
start_offset = self.tokens[i].start
j = first_index_after
if j is None or j >= len(self.tokens):
j = -1
tokens_after = False
end_offset = None
else:
tokens_after = True
end_offset = self.tokens[j].end
# FIXME
# new things the strategy should do include:
# 1. not generating the huge "data" string
# 2. really generating the "roll-back" with
# data not just by rolling back the index
# of the lexer
# 3. pass in only as much data as you need
# to do the minimal check, and for the
# "after the change" checking, use append
# to strategically keep the string 1-2
# tokens ahead of where it needs to be
#data = self.mode.window.buffer.make_string()
#self.mode.lexer.lex(data, start_offset)
if len(self.tokens) > i:
buf_index = max(self.tokens[i].start - 100, 0)
else:
buf_index = 0
if end_offset is None:
data = self.mode.window.buffer.make_string(start=buf_index, end=None)
else:
data = self.mode.window.buffer.make_string(start=buf_index,
end=end_offset + 100)
self.mode.lexer.lex(data, start_offset - buf_index, buf_index)
saved_t = False
while True:
if saved_t is True:
# we want to retry t agagin
saved_t = False
else:
try:
t = self.mode.lexer.next()
if t is None:
continue
except:
# we have no more tokens, so delete whatever was left and
# then return
if i < len(self.tokens):
del self.tokens[i:]
self.mode.lexer.lex()
return
if i >= len(self.tokens):
# we don't have any old tokens this far out, so just keep
t.debug = True
self.tokens.append(t)
i += 1
elif t.end <= self.tokens[i].start:
# we shouldn't get here if we are before the change
assert not tokens_before
# the token is before our tokens, so we can just add it
t.debug = True
self.tokens.insert(i, t)
i += 1
elif t.start == self.tokens[i].start and \
t.end == self.tokens[i].end and \
t.name == self.tokens[i].name:
# the token is identical to ours, so we can either
# stop if we are after the change, or confirm the
# start point if we are before
if tokens_before:
tokens_before = False
i += 1
else:
self.tokens[i].debug = True
self.mode.lexer.lex()
return
else:
if i < len(self.tokens):
del self.tokens[i]
if tokens_before and i < 0:
raise Exception, "oh no!"
# we need to keep sliding our window back
i -= 1
start_offset = self.tokens[i].start
self.mode.lexer.lex(data, start_offset)
elif tokens_before:
# ok, now we aren't sliding our window back
# and can proceed normally
tokens_before = False
saved_t = True
else:
# the new token conflicts with the old one, so delete
# the old one and try again
saved_t = True
raise Exception, "we should never get here (dolphin 2)"
def _region_changed_slow(self):
self.invalidate_tokens()
self.get_regions()
return
def _region_added_dumb(self, p, xdiff, ydiff, s):
self.invalidate_regions()
# calculate the start and end offsets of the change, and the
# difference to the length of the whole data string
start_offset = self.mode.window.buffer.get_point_offset(p)
end_offset = start_offset
assert (xdiff > 0 and ydiff >= 0) or ydiff > 0
if ydiff > 0:
p2 = point.Point(p.x + xdiff, p.y + ydiff)
elif ydiff == 0:
p2 = point.Point(p.x + xdiff, p.y)
new_offset = self.mode.window.buffer.get_point_offset(p2)
diff = new_offset - start_offset
assert diff > 0
# move the tokens start and end points so that the additions
# (while not being correct) won't break the existing
# highlighting
for t in self.tokens:
t.debug = False
if t.end <= start_offset:
pass
elif t.start >= end_offset:
t.start += diff
t.end += diff
else:
t.end += diff
def _region_added_complex(self, p, xdiff, ydiff, s):
self.invalidate_regions()
# calculate the start and end offsets of the change, and the
# difference to the length of the whole data string
start_offset = self.mode.window.buffer.get_point_offset(p)
end_offset = start_offset
assert ydiff >= 0
if ydiff > 0:
p2 = point.Point(p.x + xdiff, p.y + ydiff)
elif ydiff == 0:
p2 = point.Point(p.x + xdiff, p.y)
new_offset = self.mode.window.buffer.get_point_offset(p2)
diff = new_offset - start_offset
(i, j) = self.invalidate_token_range(start_offset, end_offset, 1, 1, diff)
#(i, j) = self.invalidate_token_range(start_offset, end_offset, 1, 2, diff)
self.reparse_region(i, j)
def region_added(self, p, xdiff, ydiff, s):
if s == ' ' or s == ' ':
self._region_added_dumb(p, xdiff, ydiff, s)
else:
self._region_added_complex(p, xdiff, ydiff, s)
def _region_removed_dumb(self, p1, p2, s):
self.invalidate_regions()
# calculate the start and end offsets of the change, and the
# difference to the length of the whole data string
#diff = r
diff = len(s)
start_offset = self.mode.window.buffer.get_point_offset(p1)
end_offset = start_offset + diff
# move the tokens start and end points so that the additions
# (while not being correct) won't break the existing
# highlighting
i = 0
while i < len(self.tokens):
t = self.tokens[i]
t.debug = False
# if our token contains a trailing newline, certain
# deletions may not match unless we pretend that the end
# is one character earlier
if t.string.endswith('\n'):
t_end = t.end - 1
else:
t_end = t.end
if t_end <= start_offset:
pass
elif t.start >= start_offset and t_end <= end_offset:
del self.tokens[i]
continue
elif t_end >= start_offset and t_end <= end_offset:
t.end = start_offset
elif t.start >= start_offset and t.start <= end_offset:
t.start = end_offset
else:
t.start -= diff
t.end -= diff
if t.start == t.end:
del self.tokens[i]
continue
else:
assert t.start < t.end
i += 1
def _region_removed_complex(self, p1, p2, s):
self.invalidate_regions()
# calculate the start and end offsets of the change, and the
# difference to the length of the whole data string
diff = len(s)
start_offset = self.mode.window.buffer.get_point_offset(p1)
end_offset = start_offset + diff
(i, j) = self.invalidate_token_range(start_offset, end_offset, 1, 1, -diff)
#(i, j) = self.invalidate_token_range(start_offset, end_offset, 1, 2, -diff)
self.reparse_region(i, j)
def region_removed(self, p1, p2, s):
self._region_removed_complex(p1, p2, s)
def get_tokens(self):
if self.tokens is None:
self.lex_buffer()
return self.tokens
def lex_buffer(self):
'''lexes the buffer according to the grammar'''
if not hasattr(self.mode, "grammar") or \
not hasattr(self.mode, "lexer") or \
self.mode.grammar is None or \
self.mode.lexer is None:
self.tokens = []
return
self.mode.lexer.lex(self.mode.window.buffer.make_string())
self.tokens = []
for token in self.mode.lexer:
if token is not None:
self.tokens.append(token)
def get_regions(self):
def endloop(line, pindex, plines):
'''helper method for get_regions'''
self.regions.append([])
o = offset + len(line) + 1
if (pindex < len(plines) and
self.mode.window._physical_lines_cont[pindex]):
# in this case we don't skip the newline
o -= 1
p = pindex + 1
return o, p
self.get_tokens()
if self.regions is None:
plines = self.mode.window.get_physical_lines()
tindex = 0 # token index
offset = 0 # string offset
pindex = 0 # physical index
self.regions = [[]]
# looping over the physical lines
while pindex < len(plines):
last = 0
line = plines[pindex]
# figure out if we have a current token, and if so, which one
if tindex < len(self.tokens):
t = self.tokens[tindex]
else:
t = None
# if the current line doesn't contain a token, then
# make a default color token for that line and
# continue
if type(t) == type(""):
raise Exception, repr(t)
if t is None or t.start >= offset + len(line):
r = Region(0, len(line), self.mode.default_color, line, '', None)
self.regions[-1].append(r)
offset, pindex = endloop(line, pindex, plines)
continue
# looping over the tokens on a physical line
while t is not None and t.start < offset + len(line):
if t.start > offset + last:
assert last <= t.start - offset, \
"iegjeigje (%d <= %d)" % (last, t.start - offset)
# there is uncolored space before/between the token(s)
r = Region(last, t.start - offset,
self.mode.default_color,
line[last:t.start - offset], '', None)
self.regions[-1].append(r)
last = t.start - offset
color = self.mode.colors.get(t.name, self.mode.default_color)
if t.debug:
# this is useful for seeing which places get relexed
#color = self.mode.colors.get('bizzaro', self.mode.default_color)
pass
# in the case of a multiline token, looping over
# the lines it spans and incrementing as in the upper
# loop...
while t.end > offset + len(line):
assert last <= len(line), \
"jjjjccccc (%d <= %d)" % (last, len(line))
r = Region(last, len(line), color, line[last:], t.name, t)
self.regions[-1].append(r)
last = 0
offset, pindex = endloop(line, pindex, plines)
if pindex >= len(plines):
# huh???
raise Exception, "fuck me"
return self.regions
else:
line = plines[pindex]
assert last <= t.end - offset, \
"bbjjgjg (%d <= %d - %d)" % (last, t.end, offset)
r = Region(last, t.end - offset, color, line[last:t.end-offset], t.name, t)
self.regions[-1].append(r)
last = t.end - offset
tindex += 1
if tindex < len(self.tokens):
t = self.tokens[tindex]
else:
t = None
last = self.regions[-1][-1][1]
offset, pindex = endloop(line, pindex, plines)
return self.regions
class Region:
index_to_attr = ['start', 'end', 'attr', 'value', 'name']
def __init__(self, start, end, attr, value, name, token=None):
self.start = start
self.end = end
self.attr = attr
self.value = value
self.name = name
self.token = token
def __getitem__(self, i):
return getattr(self, self.index_to_attr[i])
def __repr__(self):
return '<Region: %r, %r, %r, %r, %r>' % (self.start, self.end, self.attr,
self.value, self.name)

80
ispell.py Normal file
View File

@ -0,0 +1,80 @@
import os, popen2
import cache
_speller = None
_can_spell = os.system('which ispell > /dev/null 2>&1') == 0
def can_spell():
global _can_spell
return _can_spell
def get_speller():
global _speller
return _speller
class Speller:
def __init__(self, cmd='ispell'):
self.pipe = None
self.cache = cache.CacheDict()
self.cmd = cmd
self.start()
def start(self):
assert self.pipe is None
self.pipe = popen2.Popen3('%s -a' % self.cmd, 'rw')
self.pipe.childerr.close()
self.pipe.fromchild.readline()
def stop(self):
self.pipe.tochild.close()
self.pipe.fromchild.close()
self.pipe = None
def restart(self):
self.stop()
self.start()
def flush(self, word):
if word in self.cache:
del self.cache[word]
def check(self, word, caps=False, title=True):
# here are some quick checks:
# 1. zero-length words
# 2. all-caps word
# 3. words whose first letter is capitalized
if len(word) == 0:
return True
elif not caps and word.isupper():
return True
elif not title and word[0].isupper():
return True
result = False
if word in self.cache:
result = self.cache[word]
else:
if self.pipe.poll() >= 0:
self.pipe = None
self.start()
self.pipe.tochild.write("%s\n" % (word))
self.pipe.tochild.flush()
l = self.pipe.fromchild.readline()
if l.startswith("*") or l.startswith("+") or l.startswith("-"):
result = True
while True:
l = self.pipe.fromchild.readline()
if l == "\n":
break
self.cache[word] = result
return result
def learn(self, word):
if self.pipe.poll() >= 0:
self.pipe = None
self.start()
self.pipe.tochild.write("*%s\n" % (word))
self.pipe.tochild.flush()
self.flush(word)
if _can_spell:
_speller = Speller()

154
keyinput.py Normal file
View File

@ -0,0 +1,154 @@
import curses, sys, termios
# this is a huge map of ASCII keycode sequences it should include all
# the "standard" ones for a US 104 key keyboard. this module may need
# to support some kind of subclassing in order to be localizable.
#
# of course, i'd be crazy to try to localize a curses app
MAP = { 0: "C-@",
1: "C-a",
2: "C-b",
3: "C-c",
4: "C-d",
5: "C-e",
6: "C-f",
7: "C-g",
8: "BACKSPACE",
9: "TAB",
10: "RETURN",
11: "C-k",
12: "C-l",
13: "C-m",
14: "C-n",
15: "C-o",
16: "C-p",
17: "C-q",
18: "C-r",
19: "C-s",
20: "C-t",
21: "C-u",
22: "C-v",
23: "C-w",
24: "C-x",
25: "C-y",
26: "C-z",
27: { 79: { 80: "F1",
81: "F2",
82: "F3",
83: "F4" },
91: { 49: { 49: { 126: "F1" },
50: { 126: "F2" },
51: { 126: "F3" },
52: { 126: "F4" },
53: { 126: "F5" },
55: { 126: "F6" },
56: { 126: "F7" },
57: { 126: "F8" },
126: "HOME" },
50: { 48: { 126: "F9" },
49: { 126: "F10" },
51: { 126: "F11" },
52: { 126: "F12" },
126: "INSERT" },
51: { 126: "DELETE" },
52: { 126: "END" },
53: { 126: "PG_UP" },
54: { 126: "PG_DN" },
65: "U_ARROW",
66: "D_ARROW",
67: "R_ARROW",
68: "L_ARROW",
90: "LEFT_TAB",
91: { 65: "F1",
66: "F2",
67: "F3",
68: "F4",
69: "F5" } } },
28: "C-\\",
29: "C-]",
30: "C-30",
31: "C-/",
32: "SPACE",
127: "DELETE" }
# add the meta/control-char combinations
for key in MAP.iterkeys():
if key == 27:
# we don't want to define ESC-ESC
continue
MAP[27][key] = "M-%s" % (MAP[key])
# add meta character stuff
for i in range(33, 126):
if i == 79 or i == 91:
# these keys are used in other sequences
continue
# 7bit meta sequences
MAP[27][i] = "M-%s" % (chr(i))
# 8bit meta characters
MAP[128+i] = "M-%s" % (chr(i))
def disable_control_chars():
#terminal settings are for chumps
attr = termios.tcgetattr(sys.stdin)
global OLD_ATTR
OLD_ATTR = attr
# don't listen to allow input START/STOP (C-s,C-q)
attr[0] = attr[0] & ~(termios.IXON | termios.IXOFF)
# remove as many signal handlers as we can; we want to
# leave C-d and C-z probably
for pos in range(0,len(attr[6])):
if pos == termios.VEOF or pos == termios.VSUSP:
continue
attr[6][pos] = '\x00'
# write the attributes back
termios.tcsetattr(sys.stdin, termios.TCSANOW, attr)
class Handler:
def __init__(self):
self.tokens = []
self.unset_meta()
def unset_meta(self):
self.meta = MAP
self.has_meta = False
def set_meta(self, d):
self.meta = d
self.has_meta = True
def parse(self, i):
if i == -1:
# this means that no key was actually pressed
return
if i in self.meta:
# we have a special "named" token (a control character or
# a meta sequence)
if type(self.meta[i]) == type({}):
# ok, we need to keep traversing down to finish the
# meta sequence
self.set_meta(self.meta[i])
else:
# ok, we either got a control character or finished a
# meta sequence
self.tokens.append(self.meta[i])
self.unset_meta()
else:
# we got a regular character
# we may have interrupted a meta sequence... so unset it
#if self.has_meta:
# self.tokens.append("ESC")
self.unset_meta()
if(32 < i and i < 127):
# we got a "regular" keycode, so use it
self.tokens.append(chr(i))
else:
# this shouldn't really happen
raise Exception, "strange keycode recieved: %d" % (i)

249
lex.py Executable file
View File

@ -0,0 +1,249 @@
#!/bin/env python
"""
lex - a lexer generator in python.
"""
__author__ = "Dan Williams (dan@osheim.org, dww4s@virginia.edu)"
__copyright__ = "2005"
# std imports
import os.path, re, sys, copy
# 2.3 imports
from optparse import OptionParser
# callbacks
def silent(rule, m, offset):
'''ignore a hit; return None'''
pass
def make_token(rule, m, offset):
'''return a token from a hit'''
return(Token(rule.name, m.start() + offset, m.end() + offset, m.group(0)))
class Token:
'''Used to store an instance of a lexical token'''
def __init__(self, name, start, end, s=None):
self.name = name
self.start = start
self.end = end
self.string = s
self.debug = False
def __repr__(self):
if len(self.string) < 10:
s = self.string
else:
s = self.string[:10] + "..."
return "<Token(%r, %d, %d, %r)>" % (self.name, self.start, self.end, s)
class Rule(object):
"""Defines a rule used by a lexer."""
def __init__(self, name="Unnamed", expr=r"(.|\n)", action=lambda x,y: None):
self.name = name
self.expr = expr
self.re = re.compile(self.expr)
self.action = action
def match(self, *args, **kw):
"""Determine if this rule is matched"""
return self.re.match(*args, **kw)
def act(self, lexer, m, offset=0):
"""Act on this rule"""
return self.action(self, m, offset)
class SubRule(Rule):
"""Defines a rule which parses a region according to its own grammar,
i.e. a sub-grammar with its own rules. This rule may return multiple
tokens and span multiple calls to the next() method of Lexer."""
def __init__(self, name="Unnamed", expr=r"(.|\n)", grammar=None):
self.name = name
self.expr = expr
self.re = re.compile(self.expr)
if grammar is None:
self.grammar = Grammar()
else:
self.grammar = grammar
self.lexer = Lexer(self.grammar)
self.data = None
self.index = None
def match(self, *args, **kw):
"""Determine if this rule is matched"""
m = self.re.match(*args, **kw)
if m is not None:
self.data = args[0][:m.end()]
self.index = args[1]
return m
def act(self, lexer, m):
"""Act on this match"""
self.lexer.lex(self.data, self.index)
try:
v = self.lexer.next()
lexer.sub_lexer = self.lexer
return v
except StopIteration:
lexer.sub_lexer = None
return None
class BalancedExprMatch:
def __init__(self, start, end, data):
self.s = start
self.e = end
self.d = data
def start(self):
return self.s
def end(self):
return self.e
def group(self, i):
if i == 0 or i == 1:
return self.d
else:
raise IndexError, "no such group"
def groupdict(self):
return {}
def groups(self):
return ()
def span(self):
return (self.s, self.e)
class BalancedExprRule(Rule):
"""
Defines a rule that need to take into account opening and closing
expressions, i.e. parenthesis, #if and #endif, etc.
"""
def __init__(self, name="Unnamed", start_expr=r"(#if +0)",
enter="#if", leave="#endif", action=lambda x,y: None):
self.name = name
self.start_expr = start_expr
self.start_re = re.compile(self.start_expr)
self.enter = enter
self.leave = leave
self.action = action
def match(self, *args, **kw):
if not self.start_re.match(*args):
return None
stack = []
data = args[0]
index = args[1]
start = index
if data[index:].startswith(self.enter):
stack.append(self.enter)
index += len(self.enter)
while len(stack) > 0 and index < len(data):
if data[index:].startswith(self.enter):
stack.append(self.enter)
index += len(self.enter)
elif data[index:].startswith(self.leave):
stack.pop(-1)
index += len(self.leave)
else:
index += 1
m = BalancedExprMatch(start, index, data[start:index])
return m
def act(self, lexer, m):
"""Act on this rule"""
return self.action(self, m)
class Grammar(list):
"""
Defines rules for lexing according to a given grammar.
The order of rules in the grammar is their precedence in matching.
"""
GRAMMAR_LIST = [ {'name': 'default'} ]
def __init__(self, *args, **kw):
"""useful values to pass in:
rules -> list of rules (ordered!)
if rules are not supplied, self._default_rules() is used"""
list.__init__(self)
if "rules" in kw:
for r in kw["rules"]:
self.append(r)
else:
self._default_rules()
self._post_init(*args, **kw)
def _default_rules(self):
"""subclasses can override this to define defaults for a grammar"""
for rdir in self.GRAMMAR_LIST:
self.add_rule(**rdir)
def _post_init(self, *args, **kw):
"""subclasses can override this to enable other behavior"""
pass
def add_rule(self, *args, **kw):
self.append(Rule(*args, **kw))
def clear_rules(self):
while len(self) > 0:
del self[0]
class Lexer(object):
"""Defines a lexer, a generator of lexical tokens, etc."""
def __init__(self, grammar=None, rules=None, data=None, index=0):
"""
If the grammar keyword is provided, then that grammar will be used.
Else, if the rules keyword is provided, that list of rules will be used
Else, the default (boring) grammar will be used.
Normally, lex(data) is used to (re-)intialize the lexer with data to
lex. If the data keyword is provided, then the lexer is ready to go
on instantiation.
"""
if grammar is not None:
self.grammar = grammar
elif rules is not None:
self.grammar = Grammar(rules=rules)
else:
self.grammar = Grammar()
self.data = data
self.index = index
self.offset = 0
self.sub_lexer = None
def lex(self, data=None, index=0, offset=0):
"""
(re-)initialize the lexer with data to lex, and optionally, an offset
to start at
"""
self.data = data
self.index = index
self.offset = offset
def __iter__(self):
if self.data is None:
raise Exception, "No data to be lexed"
return self
#def append(self, newdata, offset=0):
# self.data += newdata
# self.index += offset
def next(self):
# used for multiple levels of lexing
if self.sub_lexer is not None:
try:
return self.sub_lexer.next()
except StopIteration:
self.sub_lexer = None
if self.index >= len(self.data):
raise StopIteration
for rule in self.grammar:
m = rule.match(self.data, self.index)
if m:
self.index = m.end()
return rule.act(self, m, self.offset)
raise Exception, "Failed to consume last %d characters of input: %r" % \
(len(self.data) - self.index, self.data[self.index:])

216
lex2.py Executable file
View File

@ -0,0 +1,216 @@
import re
class Token:
def __init__(self, rule, y, x, s, role='single'):
self.rule = rule
self.y = y
self.x = x
self.string = s
self.role = role
def add_to_string(self, s):
self.string += s
def __repr__(self):
if len(self.string) < 10:
s = self.string
else:
s = self.string[:10] + '...'
return "<Token(%r, %d, %d, %r)>" % (self.rule, self.y, self.x, s)
class Rule:
def __init__(self):
self.name = 'null'
def add_token(self, lexer, s, role='single'):
t = Token(self, lexer.y, lexer.x, s, role)
lexer.curr_tokens.append(t)
lexer.x += len(s)
def add_to_last_token(self, lexer, s):
assert lexer.curr_tokens
lexer.curr_tokens[-1].add_to_string(s)
lexer.x += len(s)
def match(self):
raise Exception, "not implemented"
class NullRule(Rule):
def __init__(self):
self.name = 'null'
def match(self):
raise Exception, "null rule does not match!"
class NewlineRule(Rule):
def __init__(self):
self.name = 'newline'
def match(self):
raise Exception, "newline rule does not match!"
class ConstantRule(Rule):
def __init__(self, name="unnamed_constant", const="foo"):
self.name = name
self.const = const
def match(self, lexer):
if lexer.lines[lexer.y][lexer.x:].startswith(self.const):
self.add_token(lexer, self.const)
return True
else:
return False
class RegexRule(Rule):
def __init__(self, name="unnamed_regex", expr="[^ ]+"):
self.name = name
self.expr = expr
self.re = re.compile(expr)
def match(self, lexer):
m = self.re.match(lexer.lines[lexer.y], lexer.x)
if m:
self.add_token(lexer, m.group(0))
return True
else:
return False
class RegionRule(Rule):
def __init__(self, name, start, mid, end):
self.name = name
self.start_re = re.compile(start)
self.mid_re = re.compile(mid)
self.end_re = re.compile(end)
def match(self, lexer):
lt = lexer.last_token
l = lexer.lines[lexer.y]
if lt is not None and lt.rule.name == self.name and lt.role != 'end':
saw_mid = False
while lexer.x < len(l):
m_end = self.end_re.match(l, lexer.x)
if m_end:
self.add_token(lexer, m_end.group(0), 'end')
return True
m_mid = self.mid_re.match(l, lexer.x)
if m_mid:
s = m_mid.group(0)
else:
s = l[lexer.x]
if saw_mid:
self.add_to_last_token(lexer, s)
else:
self.add_token(lexer, s, 'mid')
saw_mid = True
return True
else:
m = self.start_re.match(l, lexer.x)
if m:
self.add_token(lexer, m.group(0), 'start')
return True
else:
return False
class DynamicRegionRule(Rule):
def __init__(self, name, start, mid, end_fmt):
self.name = name
self.start_re = re.compile(start)
self.mid_re = re.compile(mid)
self.end_fmt = end_fmt
def add_token(self, lexer, s, role, end_re):
t = Token(self, lexer.y, lexer.x, s, role)
t.end_re = end_re
lexer.curr_tokens.append(t)
lexer.x += len(s)
def match(self, lexer):
lt = lexer.last_token
l = lexer.lines[lexer.y]
if lt is not None and lt.rule.name == self.name and lt.role != 'end':
saw_mid = False
while lexer.x < len(l):
m_end = self.end_re.match(l, lexer.x)
if m_end:
self.add_token(lexer, m_end.group(0), 'end', None)
return True
m_mid = self.mid_re.match(l, lexer.x)
if m_mid:
s = m_mid.group(0)
else:
s = l[lexer.x]
if saw_mid:
self.add_to_last_token(lexer, s)
else:
self.add_token(lexer, s, 'mid', lt.end_re)
saw_mid = True
return True
else:
m = self.start_re.match(l, lexer.x)
if m:
end_re = re.compile(self.end_fmt % m.groups())
self.add_token(lexer, m.group(0), 'start', end_re)
return True
else:
return False
class Lexer:
rules = [
RegionRule('heredoc', "<< *([a-zA-Z0-9_]+) *;", '.', '^%s$'),
RegionRule('string1', '"', '\\.|.', '"'),
RegexRule('word'),
]
null = NullRule()
newline = NewlineRule()
def __init__(self):
self.lines = None
self.y = 0
self.x = 0
self.last_token = None
self.curr_tokens = []
def lex(self, lines, y=0, x=0, last_token=None, next_token=None):
self.lines = lines
self.y = y
self.x = x
self.last_token = None
self.curr_tokens = []
def __iter__(self):
if self.lines is None:
raise Exception, "no lines to lex"
return self
def match(self):
for rule in self.rules:
match = rule.match(self)
if match:
assert self.curr_tokens
return True
return False
def add_to_null_token(self):
c = self.lines[self.y][self.x]
if self.curr_tokens:
assert self.curr_tokens[0].rule.name == 'null', self.curr_tokens[0].rule.name
self.curr_tokens[0].add_to_string(c)
else:
self.curr_tokens.append(self.make_null_token(c))
self.x += 1
def make_null_token(self, c):
return Token(self.null, self.y, self.x, c)
def make_newline_token(self):
return Token(self.newline, self.y, self.x, '\n')
def pop_curr_token(self):
t = self.curr_tokens.pop(0)
self.last_token = t
return t
def next(self):
if self.curr_tokens:
return self.pop_curr_token()
while self.y < len(self.lines):
while self.x < len(self.lines[self.y]):
t = self.match()
if t:
return self.pop_curr_token()
else:
self.add_to_null_token()
self.y += 1
self.x = 0
#self.curr_tokens.append(self.make_newline_token())
if self.curr_tokens:
return self.pop_curr_token()
raise StopIteration

198
lex2_perl.py Executable file
View File

@ -0,0 +1,198 @@
from optparse import OptionParser
import lex2
class PerlGrammar(lex2.Grammar):
grammar = [
{'name': 'heredoc',
'expr': r"""<< *([a-zA-Z0-9_]+) *;(?:.*?\n)*?(?:\1|$)""",
'action': lex.make_token},
{'name': 'endblock',
'expr': r"""(?:^|\n)(?:__END__|__DATA__)(?:.|\n)*$""",
'action': lex.make_token},
{'name': 'pod',
'expr': r"""(?:^|(?<=\n))=[a-zA-Z0-9_]+.*(?:\n(?!=cut).*)*(?:\n=cut|$)""",
'action': lex.make_token},
{'name': "comment",
'expr': r'[#].*(?:\n|$)',
'action': lex.make_token},
{'name': "string1",
'expr': r'''"(?:\\(?:.|\n)|[^\\"]|[ \n])*(?:"|.?$)''',
'action': lex.make_token},
{'name': "string2",
'expr': r"""'(?:\\(?:.|\n)|[^\\'])*(?:'|.?$)""",
'action': lex.make_token},
{'name': "evalstring",
'expr': r"""`(?:\\(?:.|\n)|[^\\`])*(?:`|.?$)""",
'action': lex.make_token},
{'name': 'number',
'expr': r"""0?\.[0-9]+|[0-9]+(?:\.[0-9]+)?""",
'action': lex.make_token},
{'name': 'label',
'expr': r"""[a-zA-Z_][a-zA-Z0-9_]*:(?= |\n)""",
'action': lex.make_token},
{'name': 'keyword',
'expr': r"""(?<!->)(?:STDIN|STDERR|STDOUT|and|cmp|continue|do|else|elsif|eq|eval|foreach|for|if|last|my|next|ne|not|or|our|package|require|return|sub|undef|unless|until|use|while)(?![a-zA-Z_])""",
'action': lex.make_token},
{'name': 'hash bareword index',
'expr': r"(?<={)[A-Za-z0-9_]+(?=})",
'action': lex.make_token},
{'name': 'literal hash bareword index',
'expr': r"[A-Za-z0-9_]+(?= *=>)",
'action': lex.make_token},
{'name': 'length scalar',
'expr': r"\$#[A-Za-z0-9_](?:[A-Za-z0-9_]|::)*",
'action': lex.make_token},
{'name': 'system scalar',
'expr': r"\$[][><ab/'\"_@\?#\$!%^|&*()](?![A-Za-z0-9_])",
'action': lex.make_token},
{'name': 'system array',
'expr': r"@_",
'action': lex.make_token},
{'name': 'scalar',
'expr': r"""\$\$*[A-Za-z0-9_](?:[A-Za-z0-9_]|::)*""",
'action': lex.make_token},
{'name': 'array',
'expr': r"""@\$*[A-Za-z_](?:[A-Za-z0-9_]|::)*""",
'action': lex.make_token},
{'name': 'hash',
'expr': r"""%\$*[A-Za-z_](?:[A-Za-z0-9_]|::)*""",
'action': lex.make_token},
{'name': 'dereference',
'expr': r"""[@%\$&\*](?={)""",
'action': lex.make_token},
# this isn't totally right but it handle's q//, q{} and q() which are
# the commonest
{'name': 'quoted region',
'expr': r"""q.\((?:\\.|[^\\\)])*\)|q./(?:\\.|[^\\/])*/|q.\{(?:\\.|[^\\\}])*\}""",
'action': lex.make_token},
# match regexes are annoying: the basic gist is easy, but all the perl
# crap sucks. if the m is not present, you have to use / as the
# delimiter. otherwise, you can use any non-alphanumeric-or-whitespace
# character. if you use <, (, [, or {, you close with the opposite kind
# of thing. we have to special-case those last 4. ugh.
#
# basic gist: /(\\.|[^\\])*?/[a-z]*
{'name': 'match regex',
'expr': r"""(?:(?<==~)|(?<=!~)|(?<=\()) */(?:\\.|[^\\/])*/[a-z]*|m([^<[{(A-Za-z0-9 \t\n])(?:\\.|[^\\])*?\1[a-z]*|m\((?:\\.|[^\\])*?\)[a-z]*|m{(?:\\.|[^\\])*?}[a-z]*|m<(?:\\.|[^\\])*?>[a-z]*|m\[(?:\\.|[^\\])*?\][a-z]*""",
'action': lex.make_token},
# we officially don't support the bullshit s{a}{b} thing perl has going.
# those guys are on crack. we only support things like s#a#b# or s/a/b/.
# same comments as above apply
{'name': 'replace regex',
'expr': r"""(?:y|tr|s)([^<[{(A-Za-z0-9 \t\n])(?:\\.|[^\\])*?\1(?:\\.|[^\\])*?\1[a-z]*""",
'action': lex.make_token},
{'name': 'package',
'expr': r"""(?<=package )(?:[a-zA-Z_][a-zA-Z_0-9]*::)*[a-zA-Z_][a-zA-Z_0-9]*""",
'action': lex.make_token},
{'name': 'use',
'expr': r"""(?<=use )(?:[a-zA-Z_][a-zA-Z_0-9]*::)*[a-zA-Z_][a-zA-Z_0-9]*""",
'action': lex.make_token},
{'name': 'instance method',
'expr': r"""(?<=->)[a-zA-Z_][a-zA-Z_0-9]*""",
'action': lex.make_token},
{'name': 'static method',
'expr': r"""&?(?:[a-zA-Z_][a-zA-Z_0-9]*::)+[a-zA-Z_][a-zA-Z_0-9]*""",
'action': lex.make_token},
{'name': 'method declaration',
'expr': r"""(?<=sub )[a-zA-Z_][a-zA-Z_0-9]*(?=[ \n]*{)""",
'action': lex.make_token},
{'name': 'built-in method',
'expr': r"""(?<!->)&?(?:write|warn|wantarray|waitpid|wait|vec|values|utime|use|untie|unshift|unpack|unlink|undef|umask|ucfirst|uc|truncate|times|time|tied|tie|telldir|tell|syswrite|system|sysseek|sysread|sysopen|syscall|symlink|substr|sub|study|stat|srand|sqrt|sprintf|split|splice|sort|socketpair|socket|sleep|sin|shutdown|shmwrite|shmread|shmget|shmctl|shift|setsockopt|setservent|setpwent|setprotoent|setpriority|setpgrp|setnetent|sethostent|setgrent|send|semop|semget|semctl|select|seekdir|seek|scalar|rmdir|rindex|rewinddir|reverse|return|reset|require|rename|ref|redo|recv|readpipe|readlink|readline|readdir|read|rand|quotemeta|push|prototype|printf|print|pos|pop|pipe|package|pack|our|ord|opendir|open|oct|no|next|my|msgsnd|msgrcv|msgget|msgctl|mkdir|map|lstat|log|lock|localtime|local|listen|link|length|lcfirst|lc|last|kill|keys|join|ioctl|int|index|import|hex|grep|goto|gmtime|glob|getsockopt|getsockname|getservent|getservbyport|getservbyname|getpwuid|getpwnam|getpwent|getprotoent|getprotobynumber|getprotobyname|getpriority|getppid|getpgrp|getpeername|getnetent|getnetbyname|getnetbyaddr|getlogin|gethostent|gethostbyname|gethostbyaddr|getgrnam|getgrgid|getgrent|getc|formline|format|fork|flock|fileno|fcntl|exp|exit|exists|exec|eval|eof|endservent|endpwent|endprotoent|endnetent|endhostent|endgrent|each|dump|do|die|delete|defined|dbmopen|dbmclose|crypt|cos|continue|connect|closedir|close|chroot|chr|chown|chop|chomp|chmod|chdir|caller|bless|binmode|bind|atan2|alarm|accept|abs)(?![a-zA-Z0-9_])""",
#'expr':r"""(?<!->)&?(?:abs|accept|alarm|atan2|bind|binmode|bless|caller|chdir|chmod|chomp|chop|chown|chroot|chr|closedir|close|connect|cos|crypt|dbmclose|dbmopen|defined|delete|die|dump|each|eof|exec|exists|exit|exp|fcntl|fileno|flock|fork|format|formline|getc|getlogin|getpeername|grep|int|join|keys|lc|map|open|pop|print|push|rand|readdir|ref|scalar|select|shift|sort|split|srand|time|uc|unshift|values|wantarray|warn)(?![a-zA-Z0-9_])""",
'action': lex.make_token},
{'name': 'method',
'expr': r"""&(?:[a-zA-Z_][a-zA-Z_0-9]*::)*[a-zA-Z_][a-zA-Z_0-9]*""",
'action': lex.make_token},
{'name': 'methodref',
'expr': r"""&\$(?:[a-zA-Z_][a-zA-Z_0-9]*::)*[a-zA-Z_][a-zA-Z_0-9]*""",
'action': lex.make_token},
{'name': 'bareword method',
'expr': r"""(?:[a-zA-Z_][a-zA-Z_0-9]*::)*[a-zA-Z_][a-zA-Z_0-9]*(?=[ \n]*(?:\(|->))""",
'action': lex.make_token},
{'name': "delimiter",
'expr': r"""\(|\)|\[|\]|{|}|,|;|=>|=|\?|(?<!:):(?!=:)""",
'action': lex.make_token},
{'name': "unary operator",
'expr': r"""\+=|-=|\*=|/=|//=|%=|&=\|\^=|>>=|<<=|\*\*=""",
'action': lex.make_token},
{'name': "operator",
'expr': r"""\+|<=>|<>|<<|<=|<|-|>>|>=|>|\*\*|&|\*|\||/|\^|==|//|~|=~|!~|!=|%|!|\.""",
'action': lex.make_token},
{'name': 'bareword',
'expr': r"""(?:[a-zA-Z_][a-zA-Z_0-9]*::)*[a-zA-Z_][a-zA-Z_0-9]*""",
'action': lex.make_token},
{'name': 'default',
'expr': r""".|\n""",
'action': lex.silent}
]
def _default_rules(self):
"""subclasses can override this to define defaults for a grammar"""
sub_exprs = {}
string_rules = []
for rdir in PerlGrammar.GRAMMAR_LIST:
self.add_rule(**rdir)
if INTERPOLATION_HIGHLIGHTING:
if rdir['name'] in ('scalar', 'system scalar', 'array', 'hash',
'system array'):
rdir2 = rdir.copy()
rdir2['name'] = 'interpolated ' + rdir['name']
string_rules.append(lex.Rule(**rdir2))
elif rdir['name'] in ('heredoc', 'string1', 'string2'):
sub_exprs[rdir['name']] = rdir['expr']
if INTERPOLATION_HIGHLIGHTING:
string_rules.append(lex.Rule(name="default string",
expr=r"""(?:\\.|[^\\\$]|\n)+|\$""",
action=lex.make_token))
string_grammar = lex.Grammar(rules=string_rules)
self.insert(0, lex.SubRule(name='heredoc',
expr=sub_exprs['heredoc'],
grammar=string_grammar))
self.insert(4, lex.SubRule(name="string1",
expr=sub_exprs['string1'],
grammar=string_grammar))
self.insert(5, lex.SubRule(name="string2",
expr=sub_exprs['string2'],
grammar=string_grammar))

16
lex_blame.py Executable file
View File

@ -0,0 +1,16 @@
import lex
class BlameGrammar(lex.Grammar):
GRAMMAR_LIST = [
{'name': 'metadata',
'expr': "(?:^|(?<=\n))[0-9.]+ +[a-zA-Z0-9_]+ +[-0-9A-Za-z]+",
'action': lex.make_token},
{'name': 'data',
'expr': ".+(?:$|\n)",
'action': lex.make_token},
{'name': "default",
'expr': r'\\.|.|\n',
'action': lex.silent},
]

122
lex_c.py Normal file
View File

@ -0,0 +1,122 @@
#!/bin/env python
# 2.3 imports
from optparse import OptionParser
# our imports
import lex
class CGrammar(lex.Grammar):
GRAMMAR_LIST = [
# this might not be complete...
# see http://gcc.gnu.org/onlinedocs/gcc-2.95.3/cpp_3.html#SEC44
# we need to absorb the rest of the line cause otherwise shit happens
{'name': 'macro2',
'expr': r"#(?:define|import|include|undef)(?= )",
'action':lex.make_token},
{'name': 'macro1',
'expr': r"#(?:assert|cpu|elif|else|error|endif|error|ident|ifdef|ifndef|if|include_next|line|machine|pragma|pragma_once|system|unassert|warning)(?:[^\n]*\\\n)*[^\n]*?(?=\n)",
'action':lex.make_token},
{'name': 'header',
'expr': r'''(?<=#include) +(?:<[A-Za-z0-9_]+\.h?>|"[A-Za-z0-9_]+\.h")''',
'action': lex.make_token},
{'name': 'constant',
'expr': r'''(?<=#define) +[A-Za-z0-9_]+(?= |\(|\n|$)''',
'action': lex.make_token},
{'name': 'label',
'expr': r"""[a-zA-Z_]+(?=:)""",
'action': lex.make_token},
{'name': "c++ comment",
'expr': r'//.*(?:\n|$)',
'action': lex.make_token},
{'name': "c comment",
'expr': r"/\*(?:.|\n)*?(?:\*/|$)",
'action' : lex.make_token},
{'name': 'control',
'expr': r"(?:break|case|continue|default|do|else|for|goto|if|return|switch|while)(?![a-zA-Z_])",
'action': lex.make_token},
{'name': 'keyword',
'expr': r"(?:auto|break|case|char|const|continue|default|do|double|else|enum|extern|float|for|goto|if|int|long|register|return|short|signed|sizeof|static|struct|switch|typedef|union|unsigned|void|volatile|while)(?![a-zA-z_])",
'action': lex.make_token},
{'name': 'builtin',
'expr': r"(?:NULL|TRUE|FALSE)",
'action': lex.make_token},
{'name': "identifier",
'expr': r"[a-zA-Z_][a-zA-Z0-9_]*",
'action': lex.make_token},
{'name': "unary operator",
'expr': r"""\+=|-=|\*=|/=|//=|%=|&=\|\^=|>>=|<<=|\*\*=""",
'action': lex.make_token},
{'name': "operator",
'expr': r"""\+|<>|<<|<=|<|-|>>|>=|>|\*\*|&|\*|\||/|\^|==|//|~|!=|%""",
'action': lex.make_token},
# this is sketchy as hell
{'name': "delimiter",
'expr': r"""->|\.|\(|\)|\[|\]|{|}|@|,|:|`|;|=|\?""",
'action': lex.make_token},
{'name': "integer",
'expr': r"(?:0(?![x0-9])|[1-9][0-9]*|0[0-7]+|0[xX][0-9a-fA-F]+)[lL]?",
'action': lex.make_token},
{'name': "float",
'expr': r"""[0-9]+\.[0-9]*|\.[0-9]+|(?:[0-9]|[0-9]+\.[0-9]*|\.[0-9]+)[eE][\+-]?[0-9]+""",
'action': lex.make_token},
{'name': "string1",
'expr': r'"(?:\\.|[^"])*(?:"|.?$)',
'action': lex.make_token},
# Doesn't handle octal . . (yeah it does..heh...ughhh)
{'name': "char",
'expr': r"'(?:\\[^']+|[^'])(?:'|.?$)",
'action': lex.make_token},
{'name': "default",
'expr': r'\\.|.|\n',
'action': lex.silent}
]
def _default_rules(self):
"""subclasses can override this to define defaults for a grammar"""
lex.Grammar._default_rules(self)
self.insert(0, lex.BalancedExprRule(name='macro comment',
start_expr=r"#if +0",
enter="#if",
leave="#endif",
action=lex.make_token))
if __name__ == "__main__":
usage = "%%prog <file> [<file> ...]\n\n" \
"Lex one or more files according to the python grammar"
parser = OptionParser(usage=usage)
(options, args) = parser.parse_args()
g = CGrammar()
l = lex.Lexer(grammar=g)
for path in args:
f = open(path, 'r')
data = f.read()
f.close()
print "Lexing %s:" % (path)
l.lex(data)
for t in l:
if t is not None:
print t
#print "%-12s %-40s %d %d" % (t.rule.name, t.string, t.start, t.end)

41
lex_diff.py Executable file
View File

@ -0,0 +1,41 @@
import lex
class DiffGrammar(lex.Grammar):
GRAMMAR_LIST = [
{'name': "left",
'expr': "(?:^|(?<=\n))\-.*(?:$|\n)",
'action': lex.make_token},
{'name': "right",
'expr': "(?:^|(?<=\n))\+.*(?:$|\n)",
'action': lex.make_token},
#RCS file: /usr/local/cvsroot/TBB_v2/main_application/lib/TBB/EfileServer.pm,v
#retrieving revision 1.57
#diff -u -r1.57 EfileServer.pm
{'name': "cvs metadata",
'expr': "(?:^|(?<=\n))Index: .*\n={67}\nRCS file: .*,v\nretrieving revision [0-9.]+\ndiff -u .*(?:$|\n)",
'action': lex.make_token},
{'name': "svn metadata",
'expr': "(?:^|(?<=\n))Index: .*\n={67}(?:$|\n)",
'action': lex.make_token},
{'name': "location",
'expr': "(?:^|(?<=\n))@@ [-+0-9a-z, ]* @@(?:$|\n)",
'action': lex.make_token},
{'name': "common",
'expr': "(?:^|(?<=\n)).*(?:$|\n)",
'action': lex.make_token},
{'name': "default",
'expr': r'\\.|.|\n',
'action': lex.silent}
]
def _default_rules(self):
"""subclasses can override this to define defaults for a grammar"""
for rdir in DiffGrammar.GRAMMAR_LIST:
self.add_rule(**rdir)

82
lex_javascript.py Executable file
View File

@ -0,0 +1,82 @@
#!/bin/env python
# 2.3 imports
from optparse import OptionParser
# our imports
import lex
class JavascriptGrammar(lex.Grammar):
GRAMMAR_LIST = [
## {'name': "import statement",
## 'expr': r"""(?:^|(?<= ))import [ .]*(?=\n)""",
## 'action': lex.make_token},
{'name': "comment",
'expr': r'//.*(?=\n|$)',
'action': lex.make_token},
{'name': "function declaration",
'expr': r"(?<=function ) *[a-zA-Z0-9_]* *(?=\()",
'action': lex.make_token},
{'name': "class declaration",
'expr': r"(?<=class )[a-zA-Z_][a-zA-Z0-9_]*",
'action': lex.make_token},
{'name': 'keyword',
'expr': r"""(?:and|break|class|continue|def|del|elif|else|except|exec|finally|for|from|function|global|if|import|in|is|lambda|new|not|or|pass|print|raise|return|try|var|while|yield)(?![a-zA-Z0-9_])""",
'action': lex.make_token},
{'name': "pseudo-keyword",
'expr': r"""(?:as|self|True|False|None|Exception)(?![a-zA-Z0-9_])""",
'action': lex.make_token},
## {'name': "built-in method",
## 'expr': r"""(?<!\.)(?:bool|callable|chr|classmethod|cmp|coerce|compile|complex|delattr|dict|dir|divmod|enumerate|eval|execfile|file|filter|float|frozenset|getattr|globals|hasattr|hash|hex|id|input|int|isinstance|issubclass|iter|len|list|locals|long|map|min|max|object|oct|open|ord|pow|property|range|raw_input|reduce|repr|round|set|setattr|slice|sorted|staticmethod|str|sum|super|tuple|type|unichr|unicode|vars|xrange|zip)(?![a-zA-Z0-9_])""",
## 'action': lex.make_token},
{'name': "bound method",
'expr': r"(?<=\.)[a-zA-Z_][a-zA-Z0-9_]*(?= *\()",
'action': lex.make_token},
{'name': "identifier",
'expr': r"[a-zA-Z_][a-zA-Z0-9_]*",
'action': lex.make_token},
{'name': "delimiter",
'expr': r"""\(|\)|\[|\]|{|}|@|,|:|\.|`|=|;|\+=|-=|\*=|/=|//=|%=|&=|\|=|\^=|>>=|<<=|\*\*=""",
'action': lex.make_token},
{'name': "operator",
'expr': r"""\+|<>|<<|<=|<|-|>>|>=|>|\*\*|&|\*|\||/|\^|==|//|~|!=|%""",
'action': lex.make_token},
{'name': "integer",
'expr': r"(?:0|[1-9][0-9]*|0[0-7]+|0[xX][0-9a-fA-F]+)[lL]?",
'action': lex.make_token},
{'name': "float",
'expr': r"""[0-9]+\.[0-9]*|\.[0-9]+|(?:[0-9]|[0-9]+\.[0-9]*|\.[0-9]+)[eE][\+-]?[0-9]+""",
'action': lex.make_token},
{'name': "imaginary",
'expr': r"""[0-9]+|(?:[0-9]+\.[0-9]*|\.[0-9]+|(?:[0-9]|[0-9]+\.[0-9]*|\.[0-9]+)[eE][\+-]?[0-9]+)[jJ]""",
'action': lex.make_token},
{'name': "string1",
'expr': r'"(?:\\.|[^\\"])*(?:"|.?$)',
'action': lex.make_token},
{'name': "string2",
'expr': r"'(?:\\.|[^\\'])*(?:'|.?$)",
'action': lex.make_token},
{'name': "continuation",
'expr': r'\\(?=(?:\n|$))',
'action': lex.make_token},
{'name': "default",
'expr': r'\\.|.|\n',
'action': lex.silent}
]

81
lex_mutt.py Executable file
View File

@ -0,0 +1,81 @@
import os
import ispell, lex
def make_token(rule, m):
'''return a token from a hit'''
return(lex.Token(rule.name, m.start(), m.end(), m.group(0)))
def make_token_spell(rule, m):
'''return a token from a hit'''
# first let's figure out the actual word we need to check
if rule.name == 'continued word':
word = '%s%s' % (m.group(1), m.group(2))
else:
word = m.group(0)
# okay, now we check the spelling; we don't spell-check all caps words
if ispell.can_spell() and \
not ispell.get_speller().check(word, caps=False, title=False):
name = "misspelled %s" % rule.name
else:
name = rule.name
return lex.Token(name, m.start(), m.end(), m.group(0))
class MuttGrammar(lex.Grammar):
GRAMMAR_LIST = [
{'name': 'header',
'expr': r'(?:^|(?<=\n))(?:From|To|Cc|Bcc|Subject|Reply-To|In-Reply-To|Delivered-To|Date):',
'action': make_token,
},
{'name': 'quote1',
'expr': r'(?:^|(?<=\n))(?:(?: *>){3})*(?: *>){1} *(?:[^ >\n][^\n]*)?(?:$|\n)',
'action': make_token,
},
{'name': 'quote2',
'expr': r'(?:^|(?<=\n))(?:(?: *>){3})*(?: *>){2} *(?:[^ >\n][^\n]*)?(?:$|\n)',
'action': make_token,
},
{'name': 'quote3',
'expr': r'(?:^|(?<=\n))(?:(?: *>){3})*(?: *>){3} *(?:[^ >\n][^\n]*)?(?:$|\n)',
'action': make_token,
},
{'name': 'email',
'expr': r'(?:^|(?<=[ :\n]))<?[^<>@\n ]+@(?:[^<>@\.\n ]+\.)*[^<>@\.\n ]+>?',
'action': make_token,
},
{'name': 'url',
'expr': r'(?:^|(?<=[ \n]))(?:http|https|ftp|sftp|file|smtp|smtps|torrent|news|jabber|irc|telnet)://(?:[^\.\n ]+\.)*[^\.\n ]+',
'action': make_token,
},
{'name': 'continued word',
'expr': r"""([a-zA-Z][a-zA-Z-']*[a-zA-Z])-\n *([a-zA-Z][a-zA-Z-]*[a-zA-Z])""",
'action': make_token_spell,
},
{'name': 'word',
'expr': r"""(?:[a-zA-Z][-']?)*[a-zA-Z]""",
'action': make_token_spell,
},
{'name': 'stuff',
'expr': r"""[^ \n]+""",
'action': make_token,
},
{'name': "default",
'expr': r'.| |\n',
'action': lex.silent,
},
]
def _default_rules(self):
"""subclasses can override this to define defaults for a grammar"""
for rdir in self.GRAMMAR_LIST:
self.add_rule(**rdir)

100
lex_nasm.py Normal file
View File

@ -0,0 +1,100 @@
#!/bin/env python
# 2.3 imports
from optparse import OptionParser
# our imports
import lex
class NasmGrammar(lex.Grammar):
GRAMMAR_LIST = [
{'name': 'keyword',
'expr': \
r"""(?:section|global|extern)(?![a-zA-Z_])""",
'action': lex.make_token},
{'name': "nasm macros",
'expr': r"%(?:define|undef|assign|strlen|macro|endmacro|if|elif|else|endif|ifdef|ifndef|include|push|pop|stacksize)(?![a-zA-Z_])",
'action': lex.make_token
},
{'name': "instructions",
'expr': \
r"""(?:jeq|jne|ja|jmp|push|pushad|pushfd|call|ret|sub|add|pop|popa|popad|popfd|call|and|cwd|cdq|cmp|cmpxchg|cpuid|div|divpd|enter|leave|fadd|fld|fmul|fsqrt|fsub|hlt|imul|inc|int|int3|lea|mov|movd|mul|neg|not|nop|or|sal|sar|shl|shr|shld|shrd|syscall|sysenter|sysexit|test|xchg|xadd|xor)(?![a-zA-Z_])""",
'action': lex.make_token},
{'name': "registers",
'expr': \
r"""(?:eax|ax|ah|al|ebx|bx|bh|bl|ecx|cx|ch|cl|esi|edi|esp|ebp)""",
'action': lex.make_token},
{'name': "prefix",
'expr': r"(?:dword|word|lock)",
'action': lex.make_token
},
{'name': "label",
'expr': r"[a-zA-Z_.][a-zA-Z0-9_.]*:",
'action': lex.make_token},
{'name': "identifier",
'expr': r"[a-zA-Z_][a-zA-Z0-9_]*",
'action': lex.make_token},
{'name': "integer",
'expr': r"(0|[1-9][0-9]*|0[0-7]+|0[xX][0-9a-fA-F]+)[lL]?",
'action': lex.make_token},
{'name': "float",
'expr': \
r"""[0-9]+\.[0-9]*|\.[0-9]+|([0-9]|
[0-9]+\.[0-9]*|\.[0-9]+)[eE][\+-]?[0-9]+""",
'action': lex.make_token},
{'name': "string3",
'expr': r'"""[.|\n]*?(?:"""|$)',
'action': lex.make_token},
{'name': "string1",
'expr': r'"(?:\\.|[^\\"])*(?:"|$)',
'action': lex.make_token},
{'name': "string2",
'expr': r"'(?:\\.|[^\\'])*(?:'|$)",
'action': lex.make_token},
{'name': "comment",
'expr': r'[;].*(?:\n|$)',
'action': lex.make_token},
{'name': "default",
'expr': r'\\.|.|\n',
'action': lex.silent}
]
def _default_rules(self):
"""subclasses can override this to define defaults for a grammar"""
for rdir in NasmGrammar.GRAMMAR_LIST:
self.add_rule(**rdir)
if __name__ == "__main__":
usage = "%%prog <file> [<file> ...]\n\n" \
"Lex one or more files according to the python grammar"
parser = OptionParser(usage=usage)
(options, args) = parser.parse_args()
g = NasmGrammar()
l = lex.Lexer(grammar=g)
for path in args:
f = open(path, 'r')
data = f.read()
f.close()
print "Lexing %s:" % (path)
l.lex(data)
for x in l:
if x is not None:
print x

207
lex_perl.py Executable file
View File

@ -0,0 +1,207 @@
#!/bin/env python
# 2.3 imports
from optparse import OptionParser
# our imports
import lex
# this will support perl's string interpolation; but, it can be slower and also
# possibly buggier
INTERPOLATION_HIGHLIGHTING = False
#INTERPOLATION_HIGHLIGHTING = True
class PerlGrammar(lex.Grammar):
GRAMMAR_LIST = [
{'name': 'heredoc',
'expr': r"""<< *([a-zA-Z0-9_]+) *;(?:.*?\n)*?(?:\1|$)""",
'action': lex.make_token},
{'name': 'endblock',
'expr': r"""(?:^|\n)(?:__END__|__DATA__)(?:.|\n)*$""",
'action': lex.make_token},
{'name': 'pod',
'expr': r"""(?:^|(?<=\n))=[a-zA-Z0-9_]+.*(?:\n(?!=cut).*)*(?:\n=cut|$)""",
'action': lex.make_token},
{'name': "comment",
'expr': r'[#].*(?:\n|$)',
'action': lex.make_token},
{'name': "string1",
'expr': r'''"(?:\\(?:.|\n)|[^\\"]|[ \n])*(?:"|.?$)''',
'action': lex.make_token},
{'name': "string2",
'expr': r"""'(?:\\(?:.|\n)|[^\\'])*(?:'|.?$)""",
'action': lex.make_token},
{'name': "evalstring",
'expr': r"""`(?:\\(?:.|\n)|[^\\`])*(?:`|.?$)""",
'action': lex.make_token},
{'name': 'number',
'expr': r"""0?\.[0-9]+|[0-9]+(?:\.[0-9]+)?""",
'action': lex.make_token},
{'name': 'label',
'expr': r"""[a-zA-Z_][a-zA-Z0-9_]*:(?= |\n)""",
'action': lex.make_token},
{'name': 'keyword',
'expr': r"""(?<!->)(?:STDIN|STDERR|STDOUT|and|cmp|continue|do|else|elsif|eq|eval|foreach|for|if|last|my|next|ne|not|or|our|package|require|return|sub|undef|unless|until|use|while)(?![a-zA-Z_])""",
'action': lex.make_token},
{'name': 'hash bareword index',
'expr': r"(?<={)[A-Za-z0-9_]+(?=})",
'action': lex.make_token},
{'name': 'literal hash bareword index',
'expr': r"[A-Za-z0-9_]+(?= *=>)",
'action': lex.make_token},
{'name': 'length scalar',
'expr': r"\$#[A-Za-z0-9_](?:[A-Za-z0-9_]|::)*",
'action': lex.make_token},
{'name': 'system scalar',
'expr': r"\$[][><ab/'\"_@\?#\$!%^|&*()](?![A-Za-z0-9_])",
'action': lex.make_token},
{'name': 'system array',
'expr': r"@_",
'action': lex.make_token},
{'name': 'scalar',
'expr': r"""\$\$*[A-Za-z0-9_](?:[A-Za-z0-9_]|::)*""",
'action': lex.make_token},
{'name': 'array',
'expr': r"""@\$*[A-Za-z_](?:[A-Za-z0-9_]|::)*""",
'action': lex.make_token},
{'name': 'hash',
'expr': r"""%\$*[A-Za-z_](?:[A-Za-z0-9_]|::)*""",
'action': lex.make_token},
{'name': 'dereference',
'expr': r"""[@%\$&\*](?={)""",
'action': lex.make_token},
# this isn't totally right but it handle's q//, q{} and q() which are
# the commonest
{'name': 'quoted region',
'expr': r"""q.\((?:\\.|[^\\\)])*\)|q./(?:\\.|[^\\/])*/|q.\{(?:\\.|[^\\\}])*\}""",
'action': lex.make_token},
# match regexes are annoying: the basic gist is easy, but all the perl
# crap sucks. if the m is not present, you have to use / as the
# delimiter. otherwise, you can use any non-alphanumeric-or-whitespace
# character. if you use <, (, [, or {, you close with the opposite kind
# of thing. we have to special-case those last 4. ugh.
#
# basic gist: /(\\.|[^\\])*?/[a-z]*
{'name': 'match regex',
'expr': r"""(?:(?<==~)|(?<=!~)|(?<=\()) */(?:\\.|[^\\/])*/[a-z]*|m([^<[{(A-Za-z0-9 \t\n])(?:\\.|[^\\])*?\1[a-z]*|m\((?:\\.|[^\\])*?\)[a-z]*|m{(?:\\.|[^\\])*?}[a-z]*|m<(?:\\.|[^\\])*?>[a-z]*|m\[(?:\\.|[^\\])*?\][a-z]*""",
'action': lex.make_token},
# we officially don't support the bullshit s{a}{b} thing perl has going.
# those guys are on crack. we only support things like s#a#b# or s/a/b/.
# same comments as above apply
{'name': 'replace regex',
'expr': r"""(?:y|tr|s)([^<[{(A-Za-z0-9 \t\n])(?:\\.|[^\\])*?\1(?:\\.|[^\\])*?\1[a-z]*""",
'action': lex.make_token},
{'name': 'package',
'expr': r"""(?<=package )(?:[a-zA-Z_][a-zA-Z_0-9]*::)*[a-zA-Z_][a-zA-Z_0-9]*""",
'action': lex.make_token},
{'name': 'use',
'expr': r"""(?<=use )(?:[a-zA-Z_][a-zA-Z_0-9]*::)*[a-zA-Z_][a-zA-Z_0-9]*""",
'action': lex.make_token},
{'name': 'instance method',
'expr': r"""(?<=->)[a-zA-Z_][a-zA-Z_0-9]*""",
'action': lex.make_token},
{'name': 'static method',
'expr': r"""&?(?:[a-zA-Z_][a-zA-Z_0-9]*::)+[a-zA-Z_][a-zA-Z_0-9]*""",
'action': lex.make_token},
{'name': 'method declaration',
'expr': r"""(?<=sub )[a-zA-Z_][a-zA-Z_0-9]*(?=[ \n]*{)""",
'action': lex.make_token},
{'name': 'built-in method',
'expr': r"""(?<!->)&?(?:write|warn|wantarray|waitpid|wait|vec|values|utime|use|untie|unshift|unpack|unlink|undef|umask|ucfirst|uc|truncate|times|time|tied|tie|telldir|tell|syswrite|system|sysseek|sysread|sysopen|syscall|symlink|substr|sub|study|stat|srand|sqrt|sprintf|split|splice|sort|socketpair|socket|sleep|sin|shutdown|shmwrite|shmread|shmget|shmctl|shift|setsockopt|setservent|setpwent|setprotoent|setpriority|setpgrp|setnetent|sethostent|setgrent|send|semop|semget|semctl|select|seekdir|seek|scalar|rmdir|rindex|rewinddir|reverse|return|reset|require|rename|ref|redo|recv|readpipe|readlink|readline|readdir|read|rand|quotemeta|push|prototype|printf|print|pos|pop|pipe|package|pack|our|ord|opendir|open|oct|no|next|my|msgsnd|msgrcv|msgget|msgctl|mkdir|map|lstat|log|lock|localtime|local|listen|link|length|lcfirst|lc|last|kill|keys|join|ioctl|int|index|import|hex|grep|goto|gmtime|glob|getsockopt|getsockname|getservent|getservbyport|getservbyname|getpwuid|getpwnam|getpwent|getprotoent|getprotobynumber|getprotobyname|getpriority|getppid|getpgrp|getpeername|getnetent|getnetbyname|getnetbyaddr|getlogin|gethostent|gethostbyname|gethostbyaddr|getgrnam|getgrgid|getgrent|getc|formline|format|fork|flock|fileno|fcntl|exp|exit|exists|exec|eval|eof|endservent|endpwent|endprotoent|endnetent|endhostent|endgrent|each|dump|do|die|delete|defined|dbmopen|dbmclose|crypt|cos|continue|connect|closedir|close|chroot|chr|chown|chop|chomp|chmod|chdir|caller|bless|binmode|bind|atan2|alarm|accept|abs)(?![a-zA-Z0-9_])""",
#'expr':r"""(?<!->)&?(?:abs|accept|alarm|atan2|bind|binmode|bless|caller|chdir|chmod|chomp|chop|chown|chroot|chr|closedir|close|connect|cos|crypt|dbmclose|dbmopen|defined|delete|die|dump|each|eof|exec|exists|exit|exp|fcntl|fileno|flock|fork|format|formline|getc|getlogin|getpeername|grep|int|join|keys|lc|map|open|pop|print|push|rand|readdir|ref|scalar|select|shift|sort|split|srand|time|uc|unshift|values|wantarray|warn)(?![a-zA-Z0-9_])""",
'action': lex.make_token},
{'name': 'method',
'expr': r"""&(?:[a-zA-Z_][a-zA-Z_0-9]*::)*[a-zA-Z_][a-zA-Z_0-9]*""",
'action': lex.make_token},
{'name': 'methodref',
'expr': r"""&\$(?:[a-zA-Z_][a-zA-Z_0-9]*::)*[a-zA-Z_][a-zA-Z_0-9]*""",
'action': lex.make_token},
{'name': 'bareword method',
'expr': r"""(?:[a-zA-Z_][a-zA-Z_0-9]*::)*[a-zA-Z_][a-zA-Z_0-9]*(?=[ \n]*(?:\(|->))""",
'action': lex.make_token},
{'name': "delimiter",
'expr': r"""\(|\)|\[|\]|{|}|,|;|=>|=|\?|(?<!:):(?!=:)""",
'action': lex.make_token},
{'name': "unary operator",
'expr': r"""\+=|-=|\*=|/=|//=|%=|&=\|\^=|>>=|<<=|\*\*=""",
'action': lex.make_token},
{'name': "operator",
'expr': r"""\+|<=>|<>|<<|<=|<|-|>>|>=|>|\*\*|&|\*|\||/|\^|==|//|~|=~|!~|!=|%|!|\.""",
'action': lex.make_token},
{'name': 'bareword',
'expr': r"""(?:[a-zA-Z_][a-zA-Z_0-9]*::)*[a-zA-Z_][a-zA-Z_0-9]*""",
'action': lex.make_token},
{'name': 'default',
'expr': r""".|\n""",
'action': lex.silent}
]
def _default_rules(self):
"""subclasses can override this to define defaults for a grammar"""
sub_exprs = {}
string_rules = []
for rdir in PerlGrammar.GRAMMAR_LIST:
self.add_rule(**rdir)
if INTERPOLATION_HIGHLIGHTING:
if rdir['name'] in ('scalar', 'system scalar', 'array', 'hash',
'system array'):
rdir2 = rdir.copy()
rdir2['name'] = 'interpolated ' + rdir['name']
string_rules.append(lex.Rule(**rdir2))
elif rdir['name'] in ('heredoc', 'string1', 'string2'):
sub_exprs[rdir['name']] = rdir['expr']
if INTERPOLATION_HIGHLIGHTING:
string_rules.append(lex.Rule(name="default string",
expr=r"""(?:\\.|[^\\\$]|\n)+|\$""",
action=lex.make_token))
string_grammar = lex.Grammar(rules=string_rules)
self.insert(0, lex.SubRule(name='heredoc',
expr=sub_exprs['heredoc'],
grammar=string_grammar))
self.insert(4, lex.SubRule(name="string1",
expr=sub_exprs['string1'],
grammar=string_grammar))
self.insert(5, lex.SubRule(name="string2",
expr=sub_exprs['string2'],
grammar=string_grammar))

102
lex_python.py Executable file
View File

@ -0,0 +1,102 @@
#!/bin/env python
# 2.3 imports
from optparse import OptionParser
# our imports
import lex
class PythonGrammar(lex.Grammar):
GRAMMAR_LIST = [
{'name': "import statement",
'expr': r"""(?:^|(?<= ))import [ .]*(?=\n)""",
'action': lex.make_token},
{'name': "method declaration",
'expr': r"(?<=def )[a-zA-Z_][a-zA-Z0-9_]*",
'action': lex.make_token},
{'name': "class declaration",
'expr': r"(?<=class )[a-zA-Z_][a-zA-Z0-9_]*",
'action': lex.make_token},
{'name': 'keyword',
'expr': r"""(?:and|assert|break|class|continue|def|del|elif|else|except|exec|finally|for|from|global|if|import|in|is|lambda|not|or|pass|print|raise|return|try|while|yield)(?![a-zA-Z0-9_])""",
'action': lex.make_token},
{'name': "pseudo-keyword",
'expr': r"""(?:as|self|True|False|None|Exception)(?![a-zA-Z0-9_])""",
'action': lex.make_token},
{'name': "built-in method",
'expr': r"""(?<!\.)(?:bool|callable|chr|classmethod|cmp|coerce|compile|complex|delattr|dict|dir|divmod|enumerate|eval|execfile|file|filter|float|frozenset|getattr|globals|hasattr|hash|hex|id|input|int|isinstance|issubclass|iter|len|list|locals|long|map|min|max|object|oct|open|ord|pow|property|range|raw_input|reduce|repr|round|set|setattr|slice|sorted|staticmethod|str|sum|super|tuple|type|unichr|unicode|vars|xrange|zip)(?![a-zA-Z0-9_])""",
'action': lex.make_token},
{'name': "bound method",
'expr': r"(?<=\.)[a-zA-Z_][a-zA-Z0-9_]*(?= *\()",
'action': lex.make_token},
{'name': "system_identifier",
'expr': r"__[a-zA-Z0-9_]*__",
'action': lex.make_token},
{'name': "private_identifier",
'expr': r"__[a-zA-Z0-9_]*",
'action': lex.make_token},
{'name': "hidden_identifier",
'expr': r"_[a-zA-Z0-9_]*",
'action': lex.make_token},
{'name': "identifier",
'expr': r"[a-zA-Z_][a-zA-Z0-9_]*",
'action': lex.make_token},
{'name': "delimiter",
'expr': r"""\(|\)|\[|\]|{|}|@|,|:|\.|`|=|;|\+=|-=|\*=|/=|//=|%=|&=|\|=|\^=|>>=|<<=|\*\*=""",
'action': lex.make_token},
{'name': "operator",
'expr': r"""\+|<>|<<|<=|<|-|>>|>=|>|\*\*|&|\*|\||/|\^|==|//|~|!=|%""",
'action': lex.make_token},
{'name': "integer",
'expr': r"(?:0|[1-9][0-9]*|0[0-7]+|0[xX][0-9a-fA-F]+)[lL]?",
'action': lex.make_token},
{'name': "float",
'expr': r"""[0-9]+\.[0-9]*|\.[0-9]+|(?:[0-9]|[0-9]+\.[0-9]*|\.[0-9]+)[eE][\+-]?[0-9]+""",
'action': lex.make_token},
{'name': "imaginary",
'expr': r"""[0-9]+|(?:[0-9]+\.[0-9]*|\.[0-9]+|(?:[0-9]|[0-9]+\.[0-9]*|\.[0-9]+)[eE][\+-]?[0-9]+)[jJ]""",
'action': lex.make_token},
{'name': "string4",
'expr': r'"""(?:.|\n)*?(?:"""|$)',
'action': lex.make_token},
{'name': "string3",
'expr': r"'''(?:.|\n)*?(?:'''|$)",
'action': lex.make_token},
{'name': "string1",
'expr': r'"(?:\\.|[^\\"])*(?:"|.?$)',
'action': lex.make_token},
{'name': "string2",
'expr': r"'(?:\\.|[^\\'])*(?:'|.?$)",
'action': lex.make_token},
{'name': "comment",
'expr': r'[#].*(?=\n|$)',
'action': lex.make_token},
{'name': "continuation",
'expr': r'\\(?=(?:\n|$))',
'action': lex.make_token},
{'name': "default",
'expr': r'\\.|.|\n',
'action': lex.silent}
]

85
lex_sh.py Executable file
View File

@ -0,0 +1,85 @@
# 2.3 imports
from optparse import OptionParser
# our imports
import lex
class ShGrammar(lex.Grammar):
GRAMMAR_LIST = [
{'name': "method",
'expr': r"""[a-zA-Z_][a-zA-Z0-9_]*(?=\(\))""",
'action': lex.make_token},
{'name': 'reserved',
'expr': r"""(?:case|done|do|elif|else|esac|fi|for|function|if|in|select|then|until|while|time)(?![a-zA-Z0-9_=])""",
'action': lex.make_token},
{'name': 'builtin',
'expr': r"""(?:source|alias|bg|bind|break|builtin|cd|command|compgen|complete|declare|dirs|disown|echo|enable|eval|exec|exit|export|fc|fg|getops|hash|help|history|jobs|kill|let|local|logout|popd|printf|pushd|pwd|readonly|read|return|set|shift|shopt|suspend|test|times|trap|type|ulimit|umask|unalias|unset|wait)(?![a-zA-Z0-9_=/])""",
'action': lex.make_token},
{'name': 'operator',
'expr': r"""(?:-eq|-ne|-gt|-lt|-ge|-le| = | != )""",
'action': lex.make_token},
## {'name': 'redirection',
## 'expr': r"(?:[1-6] *)?> *(?:&[1-6]|(?:\\.|[^\\\"';| ])+)",
## 'action': lex.make_token},
{'name': 'delimiter',
'expr': """[][\(\);\{\}|&><]""",
'action': lex.make_token},
## {'name': 'variable0',
## 'expr': r"""(?:(?<=\n)|^) *[a-zA-Z_][a-zA-Z0-9_]*(?=\=)""",
## 'action': lex.make_token},
{'name': 'variable0',
'expr': r"""(?:(?<=\n) *|^ *| +)[a-zA-Z_][a-zA-Z0-9_]*(?=\=)""",
'action': lex.make_token},
{'name': "variable1",
'expr': r"\${(?:[a-zA-Z0-9_]+|\?\$)}",
'action': lex.make_token},
{'name': "variable2",
'expr': r"\$[^({][a-zA-Z0-9_]*",
'action': lex.make_token},
{'name': "variable3",
'expr': r"\$(?=\()",
'action': lex.make_token},
{'name': "eval",
'expr': r'`(?:\\.|[^\\`])*(?:`|.?$)',
'action': lex.make_token},
{'name': "string1",
'expr': r'"(?:\\.|[^\\"])*(?:"|.?$)',
'action': lex.make_token},
{'name': "string2",
'expr': r"'(?:\\.|[^\\'])*(?:'|.?$)",
'action': lex.make_token},
{'name': 'continuation',
'expr': r"""\\(?= *(\n|$))""",
'action': lex.make_token},
{'name': "comment",
'expr': r'[#].*(?:\n|$)',
'action': lex.make_token},
{'name': 'bareword',
'expr': r"""[a-zA-Z0-9_-]+""",
'action': lex.make_token},
{'name': "default",
'expr': r'\\.|.|\n',
'action': lex.silent}
]
def _default_rules(self):
"""subclasses can override this to define defaults for a grammar"""
for rdir in ShGrammar.GRAMMAR_LIST:
self.add_rule(**rdir)

70
lex_sql.py Executable file
View File

@ -0,0 +1,70 @@
import lex
class SqlGrammar(lex.Grammar):
GRAMMAR_LIST = [
{'name': "sql comment",
'expr': r'--[^\n]*',
'action': lex.make_token},
{'name': "c comment",
'expr': r'/\*(?:.| |\n)*?(?:\*/|$)',
'action': lex.make_token},
{'name': 'delimiter',
'expr': r'[][();,\.:$]',
'action': lex.make_token},
{'name': 'attribute1',
'expr': r'''(?:CHECK|EXISTS|UNIQUE|NOT NULL|DEFAULT|PRIMARY KEY|MINVALUE|FOREIGN KEY|REFERENCES)(?![A-Za-z0-9_])''',
'action': lex.make_token},
{'name': 'attribute2',
'expr': r'''(?:check|exists|unique|not null|default|primary key|minvalue|foreign key|references)(?![A-Za-z0-9_])''',
'action': lex.make_token},
{'name': 'operator1',
'expr': r'''(?:CASE|WHEN|THEN|ELSE|END|NOT|AND|OR|IS NOT|IS|IN|NOT IN)(?![A-Za-z0-9_])''',
'action': lex.make_token},
{'name': 'operator2',
'expr': r'''(?:case|when|then|else|end|not|and|or|is not|is|in|not in)(?![A-Za-z0-9_])''',
'action': lex.make_token},
{'name': 'keyword1',
'expr': r'''(?:CREATE DATABASE|CREATE INDEX|CREATE SEQUENCE|CREATE TABLE|CREATE TRIGGER|CREATE VIEW|SELECT|INSERT|UPDATE|DELETE|DROP DATABASE|DROP INDEX|DROP SEQUENCE|DROP TABLE|DROP TRIGGER|DROP VIEW|CREATE USER|ALTER USER|DROP USER|DROP FUNCTION|GRANT|REVOKE|CREATE FUNCTION|CREATE OR REPLACE FUNCTION|CREATE LANGUAGE|CREATE OPERATOR|CREATE TYPE)(?![A-Za-z0-9_])''',
'action': lex.make_token},
{'name': 'keyword2',
'expr': r'''(?:create database|create index|create sequence|create table|create trigger|create view|select|insert|update|delete|drop database|drop index|drop sequence|drop table|drop trigger|drop view|create user|alter user|drop user|drop function|grant|revoke|create function|create or replace function|create language|create operator|create type)(?![A-Za-z0-9_])''',
'action': lex.make_token},
{'name': 'pseudo-keyword1',
'expr': r'''(?:RETURNS|LANGUAGE|RIGHT JOIN|LEFT JOIN|INNER JOIN|OUTER JOIN|JOIN|WHERE|NULL|TRUE|FALSE|INTO|VALUES|AS|FROM|ORDER BY|ASC|DESC|LIMIT|DISTINCT|CASCADE|USING|ON)(?![A-Za-z0-9_])''',
'action': lex.make_token},
{'name': 'pseudo-keyword1',
'expr': r'''(?:returns|language|right join|left join|inner join|outer join|join|where|null|true|false|into|values|as|from|order by|asc|desc|limit|distinct|cascade|using|on)(?![A-Za-z0-9_])''',
'action': lex.make_token},
{'name': 'type1',
'expr': '(?:VOID|ROW|SERIAL|VARCHAR|FLOAT|INTEGER|INT|TEXT|TIMESTAMPTZ|TIMESTAMP|DATETZ|DATE|TIMETZ|TIME|BOOLEAN|BOOL)(?![A-Za-z0-9_])',
'action': lex.make_token},
{'name': 'type2',
'expr': '(?:void|row|serial|varchar|float|integer|int|text|timestamptz|timestamp|datetz|date|timetz|time|boolean|bool)(?![A-Za-z0-9_])',
'action': lex.make_token},
{'name': 'function',
'expr': r'''(?:nextval|current_timestamp|current_time|current_date)(?![A-Za-z0-9_])''',
'action': lex.make_token},
{'name': 'string',
'expr': r"""'(?:\\.|[^\\'])*(?:'|$)""",
'action': lex.make_token},
{'name': 'quoted',
'expr': r'''"(?:\\.|[^\\"])*(?:"|$)''',
'action': lex.make_token},
{'name': 'bareword',
'expr': r'''[A-Za-z0-9_]+''',
'action': lex.make_token},
{'name': "default",
'expr': r'\\.|.|\n',
'action': lex.silent}
]

39
lex_text.py Executable file
View File

@ -0,0 +1,39 @@
import os
import ispell, lex
def make_token_spell(rule, m):
'''return a token from a hit'''
# first let's figure out the actual word we need to check
if rule.name == 'continued word':
word = '%s%s' % (m.group(1), m.group(2))
else:
word = m.group(0)
# okay, now we check the spelling; we don't spell-check all caps words
if ispell.can_spell() and \
not ispell.get_speller().check(word, caps=False, title=True):
name = "misspelled %s" % rule.name
else:
name = rule.name
return lex.Token(name, m.start(), m.end(), m.group(0))
class TextGrammar(lex.Grammar):
GRAMMAR_LIST = [
{'name': 'continued word',
'expr': r"""([a-zA-Z][a-zA-Z-']*[a-zA-Z])-\n *([a-zA-Z][a-zA-Z-]*[a-zA-Z])""",
'action': make_token_spell},
{'name': 'word',
'expr': r"""[a-zA-Z][a-zA-Z-']*[a-zA-Z]""",
'action': make_token_spell},
{'name': "default",
'expr': r'.| |\n',
'action': lex.silent}
]
def _default_rules(self):
"""subclasses can override this to define defaults for a grammar"""
for rdir in TextGrammar.GRAMMAR_LIST:
self.add_rule(**rdir)

87
lex_tt.py Executable file
View File

@ -0,0 +1,87 @@
#!/bin/env python
# 2.3 imports
from optparse import OptionParser
# our imports
import lex
class XMLGrammar(lex.Grammar):
GRAMMAR_LIST = [
{'name': 'comment',
'expr': r'''<!--(?:.| |\n)+?(?:-->|$)''',
'action': lex.make_token},
{'name': 'template',
'expr': r'''\[%(?:.| |\n)*?%\]''',
'action': lex.make_token},
{'name': 'ltb',
'expr': r'<![^>]*>',
'action': lex.make_token},
{'name': 'ltq',
'expr': r'<\?',
'action': lex.make_token},
{'name': 'gtq',
'expr': r'\?>',
'action': lex.make_token},
{'name': 'ltc',
'expr': r'</',
'action': lex.make_token},
{'name': 'gtc',
'expr': r'/>',
'action': lex.make_token},
{'name': 'lt',
'expr': r'<',
'action': lex.make_token},
{'name': 'gt',
'expr': r'>',
'action': lex.make_token},
{'name': 'nodevalue',
'expr': r'''(?<=>)(?:[^<]|\n)+?(?=<)''',
'action': lex.make_token},
{'name': 'whitespace',
'expr': r'''(?: |\n)+''',
'action': lex.silent},
{'name': 'namespace',
'expr': r'[a-zA-Z_]+:',
'action': lex.make_token},
#{'name': 'xi',
# 'expr': r'xi:',
# 'action': lex.make_token},
{'name': 'opentag',
'expr': r'(?:(?<=<)|(?<=xi:))[^ >\n/]+',
'action': lex.make_token},
{'name': 'attrvalue',
'expr': r'''(?<==)"(?:\\.|[^"\\])*(?:"|\\?$)|(?<==)'(?:\\.|[^'\\])*(?:'|\\?$)''',
'action': lex.make_token},
{'name': 'attrname',
'expr': r'[^ \n=>]+(?:(?==)|$)',
'action': lex.make_token},
{'name': 'closetag',
'expr': r'[^ =\n<>/]+',
'action': lex.make_token},
{'name': 'default',
'expr': r""".|\n""",
'action': lex.silent}
]
def _default_rules(self):
"""subclasses can override this to define defaults for a grammar"""
for rdir in XMLGrammar.GRAMMAR_LIST:
self.add_rule(**rdir)

83
lex_xml.py Executable file
View File

@ -0,0 +1,83 @@
#!/bin/env python
# 2.3 imports
from optparse import OptionParser
# our imports
import lex
class XMLGrammar(lex.Grammar):
GRAMMAR_LIST = [
{'name': 'comment',
'expr': r'''<!--(?:.| |\n)+?(?:-->|$)''',
'action': lex.make_token},
{'name': 'ltb',
'expr': r'<![^>]*>',
'action': lex.make_token},
{'name': 'ltq',
'expr': r'<\?',
'action': lex.make_token},
{'name': 'gtq',
'expr': r'\?>',
'action': lex.make_token},
{'name': 'ltc',
'expr': r'</',
'action': lex.make_token},
{'name': 'gtc',
'expr': r'/>',
'action': lex.make_token},
{'name': 'lt',
'expr': r'<',
'action': lex.make_token},
{'name': 'gt',
'expr': r'>',
'action': lex.make_token},
{'name': 'nodevalue',
'expr': r'''(?<=>)(?:[^<]|\n)+?(?=<)''',
'action': lex.make_token},
{'name': 'whitespace',
'expr': r'''(?: |\n)+''',
'action': lex.silent},
{'name': 'namespace',
'expr': r'[a-zA-Z_]+:',
'action': lex.make_token},
#{'name': 'xi',
# 'expr': r'xi:',
# 'action': lex.make_token},
{'name': 'opentag',
'expr': r'(?:(?<=<)|(?<=xi:))[^ >\n/]+',
'action': lex.make_token},
{'name': 'attrvalue',
'expr': r'''(?<==)"(?:\\.|[^"\\])*(?:"|\\?$)|(?<==)'(?:\\.|[^'\\])*(?:'|\\?$)''',
'action': lex.make_token},
{'name': 'attrname',
'expr': r'[^ \n=>]+(?:(?==)|$)',
'action': lex.make_token},
{'name': 'closetag',
'expr': r'[^ =\n<>/]+',
'action': lex.make_token},
{'name': 'default',
'expr': r""".|\n""",
'action': lex.silent}
]
def _default_rules(self):
"""subclasses can override this to define defaults for a grammar"""
for rdir in XMLGrammar.GRAMMAR_LIST:
self.add_rule(**rdir)

1229
method.py Normal file

File diff suppressed because it is too large Load Diff

24
minibuffer.py Normal file
View File

@ -0,0 +1,24 @@
import buffer
# minibuffer is a singleton
mini = None
class MiniBuffer(buffer.Buffer):
def __new__(cls, *args, **kwargs):
global mini
if mini is None:
mini = object.__new__(MiniBuffer, *args, **kwargs)
return mini
# the callback function should take one argument (window)
def __init__(self, func, method=None, tabber=None, modename=None):
buffer.Buffer.__init__(self)
self.callback = func
self.method = method
self.tabber = tabber
self.modename = modename
def name(self):
return "*Minibuffer*"
def do_callback(self):
self.callback(self.make_string())
def close(self):
global mini
mini = None

232
mode.py Normal file
View File

@ -0,0 +1,232 @@
import os
import sets, string
import color, default, highlight, method, point
DEBUG = False
#DEBUG = True
class Handler:
def __init__(self):
self.prefixes = sets.Set(["C-x", "C-c", "C-u"])
self.last_sequence = ''
self.curr_tokens = []
self.bindings = {}
# handle adding and removing actions
def add_action(self, action):
if action.name in self.window.application.methods:
return
assert action.name not in self.window.application.methods, \
"Action %r already found" % action.name
self.window.application.methods[action.name] = action
def del_action(self, name):
#del self.window.application.methods[name]
for binding in self.bindings.keys():
if self.bindings[binding] == name:
del self.bindings[binding]
def add_binding(self, name, sequence):
#assert name in self.actions, "No action called %r found" % name
assert name in self.window.application.methods, \
"No action called %r found" % name
self.bindings[sequence] = name
def add_bindings(self, name, sequences):
for sequence in sequences:
self.add_binding(name, sequence)
def del_binding(self, sequence):
del self.bindings[sequence]
def add_action_and_bindings(self, action, sequences):
self.add_action(action)
for sequence in sequences:
self.add_binding(action.name, sequence)
def handle_token(self, t):
'''self.handle_token(token): returns None, or the action to
take. raises an exception on unknown input'''
self.curr_tokens.append(t)
sequence = " ".join(self.curr_tokens)
if sequence in self.bindings:
name = self.bindings[sequence]
#act = self.actions[name]
act = self.window.application.methods[name]
self.curr_tokens = []
self.last_sequence = sequence
return act
elif t in self.prefixes:
for binding in self.bindings:
if binding.startswith(sequence):
return None
self.curr_tokens = []
self.last_sequence = sequence
raise Exception, "no action defined for %r" % (sequence)
class Fundamental(Handler):
'''This is the default mode'''
def __init__(self, w):
self.window = w
Handler.__init__(self)
#self.actions = {}
self.bindings = {}
self.add_bindings('start-of-line', ('C-a', 'HOME',))
self.add_bindings('end-of-line', ('C-e', 'END',))
self.add_bindings('backward', ('C-b', 'L_ARROW',))
self.add_bindings('forward', ('C-f', 'R_ARROW',))
self.add_bindings('center-view', ('C-l',))
self.add_bindings('next-line', ('C-n', 'D_ARROW',))
self.add_bindings('previous-line', ('C-p', 'U_ARROW',))
self.add_bindings('next-section', ('M-n', 'M-D_ARROW',))
self.add_bindings('previous-section', ('M-p', 'M-U_ARROW',))
self.add_bindings('page-down', ('C-v', 'PG_DN',))
self.add_bindings('page-up', ('M-v', 'PG_UP',))
self.add_bindings('goto-beginning', ('M-<',))
self.add_bindings('goto-end', ('M->',))
self.add_bindings('delete-left', ('DELETE', 'BACKSPACE',))
self.add_bindings('delete-left-word', ('M-DELETE', 'M-BACKSPACE',))
self.add_bindings('delete-right', ('C-d',))
self.add_bindings('delete-right-word', ('M-d',))
self.add_bindings('kill-region', ('C-w',))
self.add_bindings('copy-region', ('M-w',))
self.add_bindings('kill', ('C-k',))
self.add_bindings('copy', ('M-k',))
self.add_bindings('yank', ('C-y',))
self.add_bindings('pop-kill', ('M-y',))
self.add_bindings('right-word', ('M-f',))
self.add_bindings('left-word', ('M-b',))
self.add_bindings('set-mark', ('C-@',))
self.add_bindings('switch-buffer', ('C-x b',))
self.add_bindings('switch-mark', ('C-x C-x',))
self.add_bindings('undo', ('C-/', 'C-x u',))
self.add_bindings('redo', ('M-/', 'M-_', 'C-x r',))
self.add_bindings('goto-line', ('M-g',))
self.add_bindings('forward-chars', ('C-x M-c',))
self.add_bindings('forward-lines', ('C-x M-n',))
self.add_bindings('search', ('C-s',))
self.add_bindings('reverse-search', ('C-r',))
self.add_bindings('toggle-margins', ('M-m',))
self.add_bindings('replace', ('M-%',))
self.add_bindings('open-file', ('C-x C-f',))
self.add_bindings('kill-buffer', ('C-x k',))
self.add_bindings('list-buffers', ('C-x C-b',))
self.add_bindings('meta-x', ('M-x',))
self.add_bindings('wrap-line', ('M-q',))
self.add_bindings('transpose-words', ('M-t',))
self.add_bindings('save-buffer', ('C-x C-s',))
self.add_bindings('save-buffer-as', ('C-x C-w',))
self.add_bindings('relex-buffer', ('M-r',))
self.add_bindings('exit', ('C-x C-c',))
self.add_bindings('exit2', ('C-c C-c',))
self.add_bindings('split-window', ('C-x s',))
self.add_bindings('unsplit-window', ('C-u s',))
self.add_bindings('toggle-window', ('C-x o',))
self.add_bindings('delete-left-whitespace', ('C-c DELETE', 'C-c BACKSPACE',))
self.add_bindings('delete-right-whitespace', ('C-c d',))
self.add_bindings('insert-space', ('SPACE',))
self.add_bindings('insert-tab', ('TAB',))
self.add_bindings('insert-newline', ('RETURN',))
self.add_bindings('comment-region', ('C-c #',))
self.add_bindings('uncomment-region', ('C-u C-c #',))
self.add_bindings('justify-right', ('C-c f',))
self.add_bindings('justify-left', ('C-c b',))
self.add_bindings('indent-block', ('C-c >',))
self.add_bindings('unindent-block', ('C-c <',))
self.add_bindings('code-complete', ('M-c',))
self.add_bindings('shell-cmd', ('C-c !',))
self.add_bindings('open-aes-file', ('C-c a',))
self.add_bindings('open-console', ('M-e',))
self.add_bindings('show-bindings-buffer', ('C-c M-h','C-c M-?',))
self.add_bindings('which-command', ('M-?',))
self.add_bindings('cmd-help-buffer', ('M-h',))
self.add_bindings('set-mode', ('C-x m',))
self.add_bindings('cancel', ('C-]',))
#self.add_bindings('close-paren', (')',))
#self.add_bindings('close-brace', ('}',))
#self.add_bindings('close-bracket', (']',))
# create all the insert actions for the character ranges we like
for c in string.letters + string.digits + string.punctuation:
## closing tags are handled differently
#if c == ')' or c == ']' or c == '}':
# continue
self.add_binding('insert-string-%s' % c, c)
# initialize some stuff
self.tag_matching = False
self.grammar = None
self.lexer = None
self.tabber = None
# initialize the default colors, highlighter, etc.
self.default_color = color.pairs('default', 'default')
self.colors = {}
self.highlighter = highlight.Highlighter(self)
# get mode name
def name(self):
return "Fundamental"
# handle input tokens
def handle_token(self, t):
'''self.handle_token(token): handles input "token"'''
self.window.active_point = None
if DEBUG:
# debug mode is crash prone
act = Handler.handle_token(self, t)
if act is None:
return
else:
self.window.application.clear_error()
act.execute(self.window)
self.window.application.last_action = act.name
else:
# regular mode is hard to get stack traces from
try:
act = Handler.handle_token(self, t)
if act is None:
return
else:
self.window.application.clear_error()
act.execute(self.window)
self.window.application.last_action = act.name
except Exception, e:
if DEBUG:
raise
else:
err = "%s in mode '%s'" % (e, self.name())
self.window.application.set_error(err)
def invalidate(self):
if self.tabber is not None:
self.tabber.invalidate()
if self.lexer is not None:
self.highlighter.invalidate_regions()
def get_regions(self):
if self.lexer is not None:
return self.highlighter.get_regions()
else:
return [[] for l in self.window.get_physical_lines()]
def visible_regions(self):
i = self.window.visible_offset()
regions = self.get_regions()
return regions[i:i+self.window.height]
def region_added(self, p, xdiff, ydiff, s):
if self.lexer is not None:
if self.tabber is not None and s != ' ' and s != ' ':
self.tabber.invalidate()
self.highlighter.region_added(p, xdiff, ydiff, s)
def region_removed(self, p1, p2, s):
if self.lexer is not None:
if self.tabber is not None:
self.tabber.invalidate()
self.highlighter.region_removed(p1, p2, s)
def get_indentation_level(self, y):
if self.tabber is None:
return None
else:
return self.tabber.get_indentation_level(y)

16
mode_blame.py Normal file
View File

@ -0,0 +1,16 @@
import color, method, mode, lex, lex_blame, re
class Blame(mode.Fundamental):
def __init__(self, w):
mode.Fundamental.__init__(self, w)
self.grammar = lex_blame.BlameGrammar()
self.lexer = lex.Lexer(self.grammar)
self.colors = {
'metadata': color.build('red', 'default', 'bold'),
#'data': color.build('green', 'default', 'bold'),
}
def name(self):
return "Blame"

41
mode_c.py Normal file
View File

@ -0,0 +1,41 @@
import sets, sys
import color, mode, lex, lex_c, method, tab_c
class C(mode.Fundamental):
def __init__(self, w):
mode.Fundamental.__init__(self, w)
self.tag_matching = True
self.grammar = lex_c.CGrammar()
self.lexer = lex.Lexer(self.grammar)
self.add_bindings('close-paren', (')',))
self.add_bindings('close-brace', ('}',))
self.add_bindings('close-bracket', (']',))
self.default_color = color.build('default', 'default')
self.colors = {
'control': color.build('blue', 'default', 'bold'),
'keyword': color.build('cyan', 'default', 'bold'),
'macro1': color.build('blue', 'default', 'bold'),
'macro2': color.build('blue', 'default', 'bold'),
'constant': color.build('magenta', 'default', 'bold'),
'header': color.build('green', 'default', 'bold'),
'label': color.build('magenta', 'default', 'bold'),
'char': color.build('green', 'default'),
'builtin': color.build('magenta', 'default', 'bold'),
'string1': color.build('green', 'default'),
'c comment': color.build('red', 'default'),
'c++ comment': color.build('red', 'default'),
'macro comment': color.build('red', 'default'),
'function name': color.build('blue', 'default'),
'integer': color.build('green', 'default'),
'float': color.build('green', 'default'),
'bizzaro': color.build('magenta', 'green'),
}
self.tabber = tab_c.CTabber(self)
def name(self):
return "C"

158
mode_console.py Normal file
View File

@ -0,0 +1,158 @@
import code, os, sets, string, StringIO, sys, traceback
import color, completer, default, highlight, method, mode, point
class Console(mode.Fundamental):
def __init__(self, w):
mode.Fundamental.__init__(self, w)
self.actions = {}
self.bindings = {}
self.globals = dict(w.application.globals())
self.locals = dict(w.application.locals())
self.saved_input = ""
self.add_bindings('start-of-line', ('C-a', 'HOME',))
self.add_bindings('end-of-line', ('C-e', 'END',))
self.add_bindings('backward', ('C-b', 'L_ARROW',))
self.add_bindings('forward', ('C-f', 'R_ARROW',))
self.add_bindings('delete-left', ('DELETE', 'BACKSPACE',))
self.add_bindings('delete-left-word', ('M-DELETE', 'M-BACKSPACE',))
self.add_bindings('delete-right', ('C-d',))
self.add_bindings('delete-right-word', ('M-d',))
self.add_bindings('kill-region', ('C-w',))
self.add_bindings('copy-region', ('M-w',))
self.add_bindings('kill', ('C-k',))
self.add_bindings('copy', ('M-k',))
self.add_bindings('yank', ('C-y',))
self.add_bindings('pop-kill', ('M-y',))
self.add_bindings('right-word', ('M-f',))
self.add_bindings('left-word', ('M-b',))
self.add_bindings('set-mark', ('C-@',))
self.add_bindings('switch-mark', ('C-x C-x',))
self.add_bindings('undo', ('C-/', 'C-x u',))
self.add_bindings('redo', ('M-/', 'M-_', 'C-x r',))
self.add_bindings('toggle-margins', ('M-m',))
self.add_bindings('transpose-words', ('M-t',))
self.add_bindings('delete-left-whitespace', ('C-c DELETE', 'C-c BACKSPACE',))
self.add_bindings('delete-right-whitespace', ('C-c d',))
self.add_bindings('insert-space', ('SPACE',))
self.add_bindings('insert-tab', ('TAB',))
self.add_action_and_bindings(ConsoleExec(), ('RETURN',))
self.add_action_and_bindings(ConsoleCancel(), ('C-]',))
#self.add_action_and_bindings(ConsoleTab(), ('TAB',))
# create all the insert actions for the character ranges we like
for c in string.letters + string.digits + string.punctuation:
self.add_binding('insert-string-%s' % c, c)
# get mode name
def name(self):
return "Console"
class ConsoleExec(method.Method):
def _execute(self, w, **vargs):
s = w.buffer.make_string()
w.buffer.set_data('')
a = w.application
if not a.has_buffer_name('*Console*'):
a.add_buffer(buffer.ConsoleBuffer())
b = a.bufferlist.get_buffer_by_name('*Console*')
if a.window().buffer is not b:
a.switch_buffer(b)
p = a.get_mini_buffer_prompt()
b.insert_string(b.get_buffer_end(), p + s + '\n', force=True)
if w.mode.saved_input:
s = w.mode.saved_input + '\n' + s
try:
code_obj = code.compile_command(s)
if code_obj is None:
w.mode.saved_input = s
a.set_mini_buffer_prompt(' > ')
output = None
else:
w.mode.saved_input = ''
a.set_mini_buffer_prompt('>>> ')
sys.stdout = code_out = StringIO.StringIO()
sys.stderr = code_err = StringIO.StringIO()
ok = True
try:
#exec code_obj in a.globals(), a.locals()
exec code_obj in w.mode.globals, w.mode.locals
except Exception, e:
ok = False
output = str(e) + '\n'
sys.stdout = sys.__stdout__
sys.stdout = sys.__stderr__
if ok:
output = code_out.getvalue()
code_out.close()
code_err.close()
except (SyntaxError, OverflowError, ValueError), e:
a.set_mini_buffer_prompt('>>> ')
#t = sys.last_traceback
t = sys.exc_traceback
output = str(e) + traceback.format_exc()
#try:
# output = a.eval(s)
#except Exception, e:
# output = str(e)
if output:
b.insert_string(b.get_buffer_end(), output, force=True)
w.goto_end()
class ConsoleCancel(method.Method):
def execute(self, w, **vargs):
w.application.close_mini_buffer()
#class ConsoleTab(method.Method):
# def execute(self, w, **vargs):
# a = w.application
# s = w.buffer.make_string()
#
# if '"' in s or "'" in s or "(" in s or ")" in s or "[" in s or "]" in s:
# return
#
# parts = s.split(".")
# if len(parts) == 0:
# return
#
# v = a.globals()
# v.update(a.locals())
# obj = None
# for part in parts[:-1]:
# if obj is None:
# if part in v:
# obj = v[part]
# else:
# return
# else:
# if hasattr(obj, part):
# obj = getattr(obj, part)
# else:
# return
#
# if obj is None:
# pool = v.keys()
# else:
# pool = dir(obj)
# candidates = [x for x in pool if x.startswith(parts[-1])]
#
# if len(candidates) == 0:
# return
#
# common = completer.find_common_string(candidates)
# s2 = '.'.join(parts[:-1]) + '.' + common
#
# w.buffer.set_data(s2)
#
# if len(candidates) > 1:
# if not a.has_buffer_name('*Console*'):
# a.add_buffer(buffer.ConsoleBuffer())
# b = a.bufferlist.get_buffer_by_name('*Console*')
# b.insert_string(b.get_buffer_end(), repr(candidates) + '\n', force=True)

45
mode_diff.py Normal file
View File

@ -0,0 +1,45 @@
import color, method, mode, lex, lex_diff, re
class Diff(mode.Fundamental):
def __init__(self, w):
mode.Fundamental.__init__(self, w)
self.grammar = lex_diff.DiffGrammar()
self.lexer = lex.Lexer(self.grammar)
self.add_action_and_bindings(DiffNextSection(), ('M-n', 'M-D_ARROW',))
self.add_action_and_bindings(DiffPreviousSection(), ('M-p', 'M-U_ARROW',))
self.colors = {
'left': color.build('red', 'default', 'bold'),
'right': color.build('blue', 'default', 'bold'),
'seperator': color.build('magenta', 'default', 'bold'),
'cvs metadata': color.build('magenta', 'default', 'bold'),
'svn metadata': color.build('magenta', 'default', 'bold'),
'location': color.build('magenta', 'default', 'bold'),
}
def name(self):
return "Diff"
class DiffNextSection(method.Method):
re = re.compile("(?:^|(?<=\n))@@ [-+0-9a-z, ]* @@(?:$|\n)")
def _execute(self, w, **vargs):
cursor = w.logical_cursor()
i = cursor.y + 1
while i < len(w.buffer.lines):
if self.re.match(w.buffer.lines[i]):
w.goto_line(i)
return
i += 1
class DiffPreviousSection(method.Method):
re = re.compile("(?:^|(?<=\n))@@ [-+0-9a-z, ]* @@(?:$|\n)")
def _execute(self, w, **vargs):
cursor = w.logical_cursor()
i = cursor.y - 1
while i >= 0:
if self.re.match(w.buffer.lines[i]):
w.goto_line(i)
return
i -= 1

43
mode_javascript.py Normal file
View File

@ -0,0 +1,43 @@
import re, sets, string, sys
import color, commands, default, lex, lex_javascript, method, mode, point, regex, tab_javascript
class Javascript(mode.Fundamental):
def __init__(self, w):
mode.Fundamental.__init__(self, w)
self.tag_matching = True
self.grammar = lex_javascript.JavascriptGrammar()
self.lexer = lex.Lexer(self.grammar)
self.add_bindings('close-paren', (')',))
self.add_bindings('close-brace', ('}',))
self.add_bindings('close-bracket', (']',))
self.default_color = color.build_attr(color.pairs('default', 'default'))
self.colors = {
'keyword' : color.build('cyan', 'default', 'bold'),
'pseudo-keyword' : color.build('cyan', 'default', 'bold'),
'built-in method' : color.build('cyan', 'default', 'bold'),
'function declaration' : color.build('blue', 'default', 'bold'),
'class declaration' : color.build('green', 'default'),
'string4' : color.build('green', 'default'),
'string3' : color.build('green', 'default'),
'string2' : color.build('green', 'default'),
'string1' : color.build('green', 'default'),
'comment' : color.build('red', 'default'),
'continuation' : color.build('red', 'default'),
#'operator' : color.build('yellow', 'default'),
#'delimiter' : color.build('magenta', 'default'),
'system_identifier' : color.build('cyan', 'default', 'bold'),
#'bound method' : color.build('yellow', 'default'),
'import statement' : color.build('magenta', 'green'),
'bizzaro' : color.build('magenta', 'green'),
}
#self.highlighter.lex_buffer()
#self.get_regions()
self.tabber = tab_javascript.JavascriptTabber(self)
def name(self):
return "Javascript"

48
mode_mini.py Normal file
View File

@ -0,0 +1,48 @@
import sets, string
import color, highlight, method, minibuffer, mode, point
class Mini(mode.Fundamental):
'''This is the default mode'''
def __init__(self, w):
mode.Fundamental.__init__(self, w)
# delete actions relating to multiple lines
self.del_action('center-view')
self.del_action('next-line')
self.del_action('previous-line')
self.del_action('page-down')
self.del_action('page-up')
self.del_action('goto-beginning')
self.del_action('goto-end')
self.del_action('switch-buffer')
# add some new actions for the minibuffer
self.add_action_and_bindings(MiniCallback(), ('RETURN',))
self.add_action_and_bindings(MiniTabComplete(), ('TAB',))
#self.add_action_and_bindings(MiniCancel(), ('C-]',))
def name(self):
return "Mini"
class MiniCallback(method.Method):
def execute(self, window, **vargs):
window.buffer.do_callback()
class MiniTabComplete(method.Method):
def __init__(self):
self.name = "tab-complete"
self.args = []
def execute(self, window, **vargs):
b = window.buffer
if b.tabber is None:
window.application.set_error("No tab completion")
return
s1 = b.make_string()
s2, exists, complete = b.tabber.tab_string(s1, window)
b.set_data(s2)
#class MiniCancel(method.Method):
# def execute(self, window, **vargs):
# window.application.close_mini_buffer()
# window.application.error_string = "Cancel"

43
mode_mutt.py Normal file
View File

@ -0,0 +1,43 @@
import sets, sys
import color, mode, lex, lex_mutt, method, mode_text
class Mutt(mode.Fundamental):
def __init__(self, w):
mode.Fundamental.__init__(self, w)
self.add_action_and_bindings(mode_text.LearnWord(), ('C-c l',))
self.add_action_and_bindings(MuttWrapParagraph(), ('M-q',))
#self.add_action_and_bindings(MuttWrapLine(), ('M-q',))
self.add_action_and_bindings(MuttInsertSpace(), ('SPACE',))
self.grammar = lex_mutt.MuttGrammar()
self.lexer = lex.Lexer(self.grammar)
self.default_color = color.build('default', 'default')
self.colors = {
'header': color.build('green', 'default', 'bold'),
'email': color.build('cyan', 'default', 'bold'),
'url': color.build('cyan', 'default', 'bold'),
'misspelled word': color.build('red', 'default', 'bold'),
'misspelled continued word': color.build('red', 'default', 'bold'),
'quote1': color.build('yellow', 'default', 'bold'),
'quote2': color.build('cyan', 'default', 'bold'),
'quote3': color.build('magenta', 'default', 'bold'),
}
#self.highlighter.lex_buffer()
#self.get_regions()
def name(self):
return "Mutt"
class MuttWrapLine(method.WrapLine):
limit = 72
class MuttWrapParagraph(method.WrapParagraph):
wrapper = MuttWrapLine
class MuttInsertSpace(mode_text.TextInsertSpace):
limit = 72
wrapper = MuttWrapParagraph

29
mode_nasm.py Normal file
View File

@ -0,0 +1,29 @@
import sets, sys
import color, mode, lex, lex_nasm
class Nasm(mode.Fundamental):
def __init__(self, w):
mode.Fundamental.__init__(self, w)
self.grammar = lex_nasm.NasmGrammar()
self.lexer = lex.Lexer(self.grammar)
self.default_color = color.build('default', 'default')
self.colors = {
'keyword': color.build('cyan', 'default', 'bold'),
'nasm macros': color.build('blue', 'default', 'bold'),
'string3': color.build('green', 'default'),
'string2': color.build('green', 'default'),
'string1': color.build('green', 'default'),
'comment': color.build('red', 'default'),
'registers': color.build('yellow', 'default'),
'instructions': color.build('magenta', 'default'),
'label': color.build('blue', 'default'),
}
#self.highlighter.lex_buffer()
#self.get_regions()
def name(self):
return "Nasm"

449
mode_perl.py Normal file
View File

@ -0,0 +1,449 @@
import re, sets, string, sys
import color, commands, default, lex, lex_perl, method, mode, point, regex, tab_perl
class Perl(mode.Fundamental):
def __init__(self, w):
mode.Fundamental.__init__(self, w)
self.tag_matching = True
self.grammar = lex_perl.PerlGrammar()
self.lexer = lex.Lexer(self.grammar)
self.add_action_and_bindings(PerlCheckSyntax(), ('C-c s',))
self.add_action_and_bindings(PerlHashCleanup(), ('C-c h',))
#self.add_action_and_bindings(PerlHashCleanup2(), ('C-c h',))
self.add_action_and_bindings(PerlViewModulePerldoc(), ('C-c v',))
self.add_action_and_bindings(PerlViewWordPerldoc(), ('C-c p',))
self.add_action_and_bindings(PerlWrapLine(), ('M-q',))
self.add_action_and_bindings(PerlGotoFunction(), ('C-c M-g',))
self.add_action_and_bindings(PerlWhichFunction(), ('C-c w',))
self.add_action_and_bindings(PerlListFunctions(), ('C-c W',))
self.add_bindings('close-paren', (')',))
self.add_bindings('close-brace', ('}',))
self.add_bindings('close-bracket', (']',))
self.default_color = color.build('default', 'default')
self.colors = {
'heredoc': color.build('green', 'default'),
'endblock': color.build('red', 'default'),
'pod': color.build('red', 'default'),
'comment': color.build('red', 'default'),
'string1': color.build('green', 'default'),
'string2': color.build('green', 'default'),
'evalstring': color.build('cyan', 'default'),
'default string': color.build('green', 'default'),
'keyword': color.build('magenta', 'default'),
'length scalar': color.build('yellow', 'default'),
'system scalar': color.build('yellow', 'default'),
'system array': color.build('yellow', 'default'),
'scalar': color.build('yellow', 'default'),
'dereference': color.build('yellow', 'default'),
'array': color.build('yellow', 'default'),
'hash': color.build('yellow', 'default'),
'hash bareword index': color.build('green', 'default'),
'quoted region': color.build('cyan', 'default'),
'match regex': color.build('cyan', 'default'),
'replace regex': color.build('cyan', 'default'),
'literal hash bareword index': color.build('green', 'default'),
'interpolated scalar': color.build('yellow', 'default'),
'interpolated system scalar': color.build('yellow', 'default'),
'interpolated array': color.build('yellow', 'default'),
'interpolated system array': color.build('yellow', 'default'),
'interpolated hash': color.build('yellow', 'default'),
'label': color.build('cyan', 'default'),
'package': color.build('cyan', 'default'),
'use': color.build('cyan', 'default'),
'method': color.build('cyan', 'default'),
'methodref': color.build('cyan', 'default'),
'method declaration': color.build('cyan', 'default'),
'instance method': color.build('cyan', 'default'),
'static method': color.build('cyan', 'default'),
'built-in method': color.build('magenta', 'default'),
'bareword method': color.build('cyan', 'default'),
#'bareword': color.build('yellow', 'magenta'),
'bizzaro': color.build('magenta', 'green')
}
#self.highlighter.lex_buffer()
#self.get_regions()
self.tabber = tab_perl.PerlTabber(self)
self.functions = None
def name(self):
return "Perl"
def build_function_map(self):
b = self.window.buffer
self.functions = {}
for i in range(0, len(b.lines)):
m = regex.perl_function.match(b.lines[i])
if m:
self.functions[m.group(1)] = i
def get_functions(self):
if self.functions is None:
self.build_function_map()
return self.functions
def get_function_names(self):
functions = self.get_functions()
pairs = [[functions[key], key] for key in functions]
pairs.sort()
names = [x[1] for x in pairs]
return names
class PerlWrapLine(method.Method):
'''Wrap lines, comments, POD'''
margin = 80
comment_re = re.compile('^( *)(#+)( *)([^ ].*)$')
def _execute(self, w, **vargs):
pcursor = w.physical_cursor()
r = w.get_region(pcursor)
if r is None:
return
t = r[4]
if t == 'pod':
assert False, 'POD: %s' % repr(r)
elif t == 'comment':
self._wrap_comment(w)
else:
return
def _wrap_comment(self, w):
l = w.logical_cursor()
m = self.comment_re.match(w.buffer.lines[l.y])
if not m:
assert False, 'no match oh geez'
pad = m.group(1) + m.group(2) + m.group(3)
data = m.group(4) + ' '
start = l.y
end = l.y + 1
while end < len(w.buffer.lines):
m = self.comment_re.match(w.buffer.lines[end])
if m:
data += m.group(4) + ' '
end += 1
else:
break
words = [word for word in data.split() if word]
lines = [pad]
for word in words:
if len(lines[-1]) == len(pad):
lines[-1] += word
elif len(lines[-1]) + 1 + len(word) <= self.margin:
lines[-1] += ' ' + word
else:
lines.append(pad + word)
# remove the old text and add the new
start_p = point.Point(0, start)
end_p = point.Point(len(w.buffer.lines[end-1]), end-1)
w.kill(start_p, end_p)
w.insert(start_p, '\n'.join(lines))
class PerlCheckSyntax(method.Method):
'''Check the syntax of a perl file'''
def _args(self):
return [method.Argument("lib", type=type(""), prompt="Location of lib: ",
default=default.build_constant("."))]
def _execute(self, window, **vargs):
a = vargs['lib']
cmd = "perl -c -I '%s' '%s'" % (a, window.buffer.path)
(status, output) = commands.getstatusoutput(cmd)
if status == 0:
window.application.set_error("Syntax OK")
window.application.data_buffer("*Perl-Check-Syntax*", output, switch_to=False)
else:
window.application.data_buffer("*Perl-Check-Syntax*", output)
class PerlViewModulePerldoc(method.Method):
'''View documentation about this file using perldoc'''
def _execute(self, w, **vargs):
cmd = "perldoc -t -T '%s'" % w.buffer.path
(status, output) = commands.getstatusoutput(cmd)
w.application.data_buffer("*Perldoc*", output, switch_to=True)
class PerlViewWordPerldoc(method.Method):
'''View documentation about a package or function using perldoc'''
def _execute(self, w, **vargs):
cursor = w.logical_cursor()
line = w.buffer.lines[cursor.y]
word_chars = string.letters + string.digits + '_:'
if line[cursor.x] not in word_chars:
w.application.set_error('error: no word selected')
return
start = cursor.x
while start > 0 and line[start - 1] in word_chars:
start -= 1
end = cursor.x + 1
while end < len(line) - 1 and line[end] in word_chars:
end += 1
word = line[start:end]
w.application.set_error('the current word is: %r' % word)
ok = False
data = ''
perl_word_re = re.compile('^[a-zA-Z_][a-zA-Z_0-9]*(?:::[a-zA-Z_][a-zA-Z0-9]*)*$')
if not perl_word_re.match(word):
w.application.set_error('invalid word: %r' % word)
return
if '::' in word:
# we are probably dealing with a package
parts = word.split('::')
while len(parts) > 0:
newword = '::'.join(parts)
cmd = "perldoc -t -T '%s'" % newword
(status, data) = commands.getstatusoutput(cmd)
if status == 0:
word = newword
ok = True
break
parts.pop(-1)
elif ':' in word:
w.application.set_error('invalid word2222: %r' % word)
return
else:
cmd = "perldoc -t -T -f '%s'" % word
(status, data) = commands.getstatusoutput(cmd)
if status == 0:
ok = True
else:
cmd = "perldoc -t -T -f '%s'" % word
(status, data) = commands.getstatusoutput(cmd)
ok = status == 0
if not ok:
w.application.set_error('nothing found for %r' % word)
else:
w.application.data_buffer("*Perldoc*", data, switch_to=True)
w.application.set_error('displaying documentation for %r' % word)
class PerlGotoFunction(method.Method):
'''Jump to a function defined in this module'''
def _args(self):
return [method.Argument("name", type=type(""), datatype="perlfunction",
prompt="Goto Function: ")]
def _execute(self, w, **vargs):
name = vargs['name']
functions = w.mode.get_functions()
if name in functions:
number = functions[name]
p = point.Point(0, number)
w.goto(p)
else:
w.application.set_error("Function %r was not found" % name)
class PerlListFunctions(method.Method):
'''Show the user all functions defined in this module'''
def _execute(self, w, **vargs):
names = w.mode.get_function_names()
output = "\n".join(names) + "\n"
w.application.data_buffer("*Perl-List-Functions*", output, switch_to=True)
class PerlWhichFunction(method.Method):
'''Show the user what function they are in'''
def _execute(self, w, **vargs):
cursor = w.logical_cursor()
i = cursor.y
name = None
while i >= 0 and name is None:
line = w.buffer.lines[i]
m = regex.perl_function.match(line)
if m:
name = m.group(1)
else:
i -= 1
if name is None:
w.application.set_error("None");
else:
w.application.set_error("line %d: %s" % (i, name))
class PerlHashCleanup(method.Method):
'''Correctly align assignment blocks and literal hashes'''
def _execute(self, window, **vargs):
cursor = window.logical_cursor()
b = window.buffer
# so this is where we will store the groups that we find
groups_by_line = {}
# the regex we will try
regexes = [regex.perl_hash_cleanup,
regex.perl_assign_cleanup]
# if we aren't in a hash, inform the user and exit
line = b.lines[cursor.y]
myregex = None
for r in regexes:
if r.match(line):
myregex = r
if myregex is None:
raise Exception, "Not a perl hash line"
groups_by_line[cursor.y] = myregex.match(line).groups()
# find the beginning of this hash block
start = 0
i = cursor.y - 1
while i >= 0:
line = b.lines[i]
m = myregex.match(line)
if not m:
start = i + 1
break
else:
groups_by_line[i] = m.groups()
i -= 1
# find the end of this hash block
end = len(b.lines) - 1
i = cursor.y + 1
while i < len(b.lines):
line = b.lines[i]
m = myregex.match(line)
if not m:
end = i - 1
break
else:
groups_by_line[i] = m.groups()
i += 1
# assume that the least indented line is correct
indent_w = min([len(groups_by_line[k][0]) for k in groups_by_line])
# find the longest hash key to base all the other padding on
key_w = max([len(groups_by_line[k][1]) for k in groups_by_line])
# for each line, format it correctly
keys = groups_by_line.keys()
keys.sort()
data = ''
for i in keys:
indent_pad = ' ' * indent_w
key = groups_by_line[i][1]
sep = groups_by_line[i][3]
value = groups_by_line[i][5]
key_pad = ' ' * (key_w - len(key))
data += indent_pad + key + key_pad + ' ' + sep + ' ' + value + '\n'
# remove the old text and add the new
start_p = point.Point(0, start)
end_p = point.Point(0, end + 1)
window.kill(start_p, end_p)
window.insert(start_p, data)
class PerlHashCleanup2(method.Method):
'''Correctly align assignment blocks and literal hashes'''
def process_line2(self, line_regions, sep=None, indent=None):
(pre_toks, sep_tok, post_toks) = ([], None, [])
ok = False
before = True
for r in line_regions:
(start, end, attr, s, name) = r
if name == "":
continue
elif before:
if len(pre_toks) == 0:
pre_toks.append(r)
elif (name == "delimiter" and s == sep or
(sep is None and (s == "=" or s == "=>"))):
sep_tok = r
before = False
else:
pre_toks.append(r)
else:
post_toks.append(r)
ok = True
if ok:
return (True, sep_tok[3], (pre_toks, sep_tok, post_toks))
else:
return (False, "", ([], None, []))
def _execute(self, w, **vargs):
cursor = w.logical_cursor()
b = w.buffer
# so this is where we will store the groups that we find
groups_by_line = {}
all_regions = w.mode.get_regions()
line_regions = all_regions[cursor.y]
(ok, sep, group) = self.process_line2(line_regions)
if not ok:
raise Exception, "Not a perl hash line"
groups_by_line[cursor.y] = group
# find the beginning of this hash block
start = cursor.y
while start >= 0:
(ok2, sep2, group2) = self.process_line2(all_regions[start - 1], sep)
if not ok2:
break
start -= 1
groups_by_line[start] = group2
# find the end of this hash block
end = cursor.y
while end < len(b.lines) - 1:
(ok2, sep2, group2) = self.process_line2(all_regions[end + 1], sep)
if not ok2:
break
end += 1
groups_by_line[end] = group2
# find the minimum indented line
indent_w = None
for k in groups_by_line:
x = groups_by_line[k][0][0].start
if indent_w is None or x < indent_w:
indent_w = x
# find the max key length
key_w = None
for k in groups_by_line:
x = groups_by_line[k][0][-1].end - groups_by_line[k][0][0].start
if key_w is None or x > key_w:
key_w = x
# for each line, format it correctly
keys = groups_by_line.keys()
keys.sort()
data = ''
for i in keys:
line = ' ' * indent_w
l = groups_by_line[i][0][0].start
for t in groups_by_line[i][0]:
line += ' ' * max(0, t.start - l)
line += t.value
l = t.end
line += ' ' * max(0, key_w - l + groups_by_line[i][0][0].start)
line += ' ' + groups_by_line[i][1].value + ' '
l = groups_by_line[i][2][0].start
for t in groups_by_line[i][2]:
line += ' ' * max(0, t.start - l)
line += t.value
l = t.end
data += line + '\n'
# remove the old text and add the new
start_p = point.Point(0, start)
end_p = point.Point(0, end + 1)
w.kill(start_p, end_p)
w.insert(start_p, data)

220
mode_python.py Normal file
View File

@ -0,0 +1,220 @@
import commands, os.path, sets, string, sys
import color, default, mode, lex, lex_python, method, point, regex, tab_python
import ctag_python, completer
class Python(mode.Fundamental):
def __init__(self, w):
mode.Fundamental.__init__(self, w)
self.tag_matching = True
self.grammar = lex_python.PythonGrammar()
self.lexer = lex.Lexer(self.grammar)
self.add_action_and_bindings(PythonCheckSyntax(), ('C-c s',))
self.add_action_and_bindings(PythonDictCleanup(), ('C-c h',))
self.add_action_and_bindings(PythonUpdateTags(), ('C-c t',))
self.add_action_and_bindings(PythonTagComplete(), ('C-c k',))
self.add_bindings('close-paren', (')',))
self.add_bindings('close-brace', ('}',))
self.add_bindings('close-bracket', (']',))
self.default_color = color.build('default', 'default')
self.colors = {
'keyword' : color.build('cyan', 'default', 'bold'),
'pseudo-keyword' : color.build('cyan', 'default', 'bold'),
'built-in method' : color.build('cyan', 'default', 'bold'),
'method declaration' : color.build('blue', 'default', 'bold'),
'class declaration' : color.build('green', 'default'),
'string4' : color.build('green', 'default'),
'string3' : color.build('green', 'default'),
'string2' : color.build('green', 'default'),
'string1' : color.build('green', 'default'),
'comment' : color.build('red', 'default'),
'continuation' : color.build('red', 'default'),
#'operator' : color.build('yellow', 'default'),
#'delimiter' : color.build('magenta', 'default'),
'system_identifier' : color.build('cyan', 'default', 'bold'),
#'bound method' : color.build('yellow', 'default'),
'import statement' : color.build('magenta', 'green'),
'bizzaro' : color.build('magenta', 'green'),
}
#self.highlighter.lex_buffer()
#self.get_regions()
self.tabber = tab_python.PythonTabber(self)
self.ctagger = ctag_python.PythonCTagger()
def name(self):
return "Python"
class PythonCheckSyntax(method.Method):
'''Check the syntax of the current python file'''
def _args(self):
return [method.Argument("lib", type=type(""), prompt="Python Path: ",
datatype='path',
default=default.build_constant("."))]
def _execute(self, w, **vargs):
a = vargs['lib']
mod = os.path.splitext(os.path.basename(w.buffer.path))[0]
cmd = "PYTHONPATH=%s python -c 'import %s'" % (a, mod)
(status, output) = commands.getstatusoutput(cmd)
if status == 0:
w.application.set_error("Syntax OK")
w.application.data_buffer("python-syntax", output, switch_to=False)
else:
output = output + "\ncommand exit status: %d" % (status)
w.application.data_buffer("python-syntax", output, switch_to=True)
class PythonUpdateTags(method.Method):
'''Update the CTag data associated with a python buffer'''
def _args(self):
return [method.Argument("lib", prompt="Module Base: ", datatype='path',
default=default.build_constant("."))]
def _execute(self, w, **vargs):
w.mode.ctagger = ctag_python.PythonCTagger()
w.mode.ctagger.process_paths([vargs['lib']])
w.application.set_error('Tag data updated')
class PythonTagComplete(method.Method):
'''Complete a symbol using tag data'''
def _execute(self, w, **vargs):
if not w.mode.ctagger.packages:
w.application.methods['python-update-tags'].execute(w)
return
cursor = w.logical_cursor()
b = w.buffer
line = b.lines[cursor.y]
end = cursor.x
start = cursor.x
word_chars = string.letters + string.digits + '_'
#word_chars = string.letters + string.digits + string.punctuation
if start == 0:
w.application.set_error('walrus 1')
return
c = line[start - 1]
if c == '(':
w.application.set_error('goldfinch 1')
return
elif c not in word_chars:
w.application.set_error('walrus 2')
return
while start > 0 and line[start - 1] in word_chars:
start -= 1
if start == end:
w.application.set_error('walrus 3')
return
word = line[start:end]
candidates = []
seen = sets.Set()
for p in w.mode.ctagger.packages.iterkeys():
if p.startswith(word):
if p in seen:
continue
candidates.append(p)
seen.add(p)
for e in w.mode.ctagger.entries.itervalues():
if e.symbol.startswith(word):
if e.symbol in seen:
continue
candidates.append(e.symbol)
seen.add(e.symbol)
if len(candidates) == 0:
w.application.set_error('No match: %r' % word)
return
elif len(candidates) == 1:
newword = candidates[0]
if word == newword:
w.application.set_error('Already completed!')
return
else:
w.application.set_error('Unique match!')
else:
newword = completer.find_common_string(candidates)
w.application.set_error('Ambiguous match: %r' % (candidates))
b.delete_string(point.Point(start, cursor.y), point.Point(end, cursor.y))
b.insert_string(point.Point(start, cursor.y), newword)
class PythonDictCleanup(method.Method):
'''Align assignment blocks and literal dictionaries'''
def _execute(self, w, **vargs):
cursor = w.logical_cursor()
b = w.buffer
# so this is where we will store the groups that we find
groups_by_line = {}
# the regex we will try
regexes = [regex.python_dict_cleanup,
regex.python_assign_cleanup]
# if we aren't in a hash, inform the user and exit
line = b.lines[cursor.y]
myregex = None
for r in regexes:
if r.match(line):
myregex = r
if myregex is None:
raise Exception, "Not a python dict line"
groups_by_line[cursor.y] = myregex.match(line).groups()
# find the beginning of this hash block
start = 0
i = cursor.y - 1
while i >= 0:
line = b.lines[i]
m = myregex.match(line)
if not m:
start = i + 1
break
else:
groups_by_line[i] = m.groups()
i -= 1
# find the end of this hash block
end = len(b.lines) - 1
i = cursor.y + 1
while i < len(b.lines):
line = b.lines[i]
m = myregex.match(line)
if not m:
end = i - 1
break
else:
groups_by_line[i] = m.groups()
i += 1
# assume that the least indented line is correct
indent_w = min([len(groups_by_line[k][0]) for k in groups_by_line])
# find the longest hash key to base all the other padding on
key_w = max([len(groups_by_line[k][1]) for k in groups_by_line])
# for each line, format it correctly
keys = groups_by_line.keys()
keys.sort()
data = ''
for i in keys:
indent_pad = ' ' * indent_w
key = groups_by_line[i][1]
sep = groups_by_line[i][3]
value = groups_by_line[i][5]
key_pad = ' ' * (key_w - len(key))
if sep == '=':
data += indent_pad + key + key_pad + ' ' + sep + ' ' + value + '\n'
else:
data += indent_pad + key + sep + ' ' + key_pad + value + '\n'
# remove the old text and add the new
start_p = point.Point(0, start)
end_p = point.Point(0, end + 1)
w.kill(start_p, end_p)
w.insert(start_p, data)

126
mode_replace.py Normal file
View File

@ -0,0 +1,126 @@
import sets, string
import color, highlight, method, minibuffer, mode, point
import random
class Replace(mode.Fundamental):
'''This is the default mode'''
def __init__(self, w):
mode.Fundamental.__init__(self, w)
self.actions = {}
self.bindings = {}
default_actions = ((ReplaceAll(), ('a', '!',)),
(ReplaceOne(), ('y', 'SPACE',)),
(SkipReplace(), ('n', 'DELETE',)),
(CancelReplace(), ('q', 'RETURN', 'C-]', 'C-n',
'C-p', 'C-a', 'C-e', 'C-f',
'C-b')))
# add the replace actions
for pair in default_actions:
(action, sequences) = pair
assert type(sequences) == type(()), repr(pair)
self.add_action_and_bindings(action, sequences)
_find_next(w, False)
_set_prompt(w.buffer.method, w.buffer.method.old_window)
def name(self):
return "Replace"
class ReplaceOne(method.Method):
def execute(self, window, **vargs):
m = window.buffer.method
old_window = m.old_window
_replace(m, old_window)
_find_next(window, True)
_finish(m, window, old_window)
class SkipReplace(method.Method):
def execute(self, window, **vargs):
m = window.buffer.method
old_window = m.old_window
_find_next(window, True)
_finish(m, window, old_window)
class ReplaceAll(method.Method):
def execute(self, window, **vargs):
m = window.buffer.method
old_window = m.old_window
while m.p1 is not None:
_replace(m, old_window)
_find_next(window, True)
_end(window)
window.application.set_error("Replace ended")
class CancelReplace(method.Method):
def execute(self, window, **vargs):
_end(window)
window.application.set_error("Replace cancelled")
def _set_prompt(m, window):
i = m.old_window.buffer.get_point_offset(m.p1)
s = m.old_window.buffer.make_string()
count = s[i:].count(m.before)
if count > 1:
window.application.mini_prompt = 'Replace %r with %r [ynaq] (%d occurances)?' % (m.before, m.after, count)
else:
window.application.mini_prompt = 'Replace %r with %r [ynaq] (1 occurance)?' % (m.before, m.after)
def _replace(m, old_window):
old_window.buffer.delete_string(m.p1, m.p2)
if m.after:
old_window.buffer.insert_string(m.p1, m.after)
def _find_next(window, move=False):
m = window.buffer.method
old_window = m.old_window
b = old_window.buffer
s = m.before
c = old_window.logical_cursor()
(x, y) = (c.x, c.y)
if move:
x += 1
l = b.lines[y][x:]
# for each line available
while y < len(b.lines):
if s in l:
# success
x = x + l.index(s)
x2 = x + len(s)
m.p1 = point.Point(x, y)
m.p2 = point.Point(x2, y)
old_window.goto(m.p1)
old_window.application.clear_highlighted_ranges()
old_window.application.add_highlighted_range(old_window, m.p1, m.p2)
#old_window.application.highlighted_range = [old_window, m.p1, m.p2]
_set_prompt(m, old_window)
return
elif y >= len(b.lines) - 1:
# failure
break
else:
# keep trying
y += 1
l = b.lines[y]
x = 0
m.p1 = None
m.p2 = None
def _finish(m, window, old_window):
if m.p1 is None:
_end(window)
window.application.set_error("Replace ended")
else:
_set_prompt(m, old_window)
def _end(window):
window.application.close_mini_buffer()
#window.application.highlighted_range = []
window.application.clear_highlighted_ranges()
window.buffer.method.old_cursor = None
window.buffer.method.old_window = None

171
mode_search.py Normal file
View File

@ -0,0 +1,171 @@
import sets, string
import color, highlight, method, minibuffer, mode, point
class Search(mode.Fundamental):
'''This is the default mode'''
def __init__(self, w):
mode.Fundamental.__init__(self, w)
self.actions = {}
self.bindings = {}
default_actions = (
(SearchNext(), ('C-s',)),
(SearchPrevious(), ('C-r',)),
(EndSearch(), ('RETURN', 'C-n', 'C-p', 'C-a', 'C-e', 'C-f', 'C-b')),
(CancelSearch(), ('C-]',)),
(DeleteLeft(), ('DELETE', 'BACKSPACE',)),
)
# add the search actions
for pair in default_actions:
(action, sequences) = pair
assert type(sequences) == type(()), repr(pair)
self.add_action_and_bindings(action, sequences)
# create all the insert actions for the character ranges we like
for collection in (string.letters, string.digits, string.punctuation):
for c in collection:
self.add_action_and_bindings(InsertSearchString(c), (c,))
self.add_action_and_bindings(InsertSearchString(' '), ('SPACE',))
def name(self):
return "Search"
class SearchNext(method.Method):
def execute(self, window, **vargs):
window.buffer.method.direction = 'next'
s = window.buffer.make_string()
if not s:
s = window.application.last_search
window.buffer.set_data(s)
else:
old_window = window.buffer.method.old_window
_find_next(old_window, window, move=True)
class SearchPrevious(method.Method):
def execute(self, window, **vargs):
window.buffer.method.direction = 'previous'
s = window.buffer.make_string()
if not s:
return
else:
old_window = window.buffer.method.old_window
_find_previous(old_window, window, move=True)
class EndSearch(method.Method):
def execute(self, window, **vargs):
old_window = window.buffer.method.old_window
old_cursor = window.buffer.method.old_cursor
_end(window)
old_window.set_mark_point(old_cursor)
window.application.set_error("Mark set to search start")
class CancelSearch(method.Method):
def execute(self, window, **vargs):
old_window = window.buffer.method.old_window
old_cursor = window.buffer.method.old_cursor
old_window.goto(old_cursor)
_end(window)
window.application.set_error("Search cancelled")
class DeleteLeft(method.Method):
def execute(self, window, **vargs):
window.left_delete()
old_cursor = window.buffer.method.old_cursor
old_window = window.buffer.method.old_window
old_window.goto(old_cursor)
if window.buffer.method.direction == 'next':
_find_next(old_window, window, move=False)
else:
_find_previous(old_window, window, move=False)
class InsertSearchString(method.Method):
def __init__(self, s):
self.name = 'insert-search-string-%s' % (s)
self.string = s
self.args = []
self.help = None
def execute(self, window, **vargs):
window.insert_string(self.string)
s = window.buffer.make_string()
if not s:
return
else:
old_window = window.buffer.method.old_window
if window.buffer.method.direction == 'next':
_find_next(old_window, window, move=False)
else:
_find_previous(old_window, window, move=False)
def _end(window):
s = window.buffer.make_string()
window.application.last_search = s
window.application.close_mini_buffer()
#window.application.highlighted_range = []
window.application.clear_highlighted_ranges()
window.buffer.method.old_cursor = None
window.buffer.method.old_window = None
def _find_previous(old_window, new_window, move=False):
s = new_window.buffer.make_string()
old_buffer = old_window.buffer
c = old_window.logical_cursor()
(x, y) = (c.x, c.y)
if move:
x -= 1
l = old_buffer.lines[y][:x + len(s)]
# for each line available
while y >= 0:
if s in l:
# success
x = l.index(s)
old_window.goto(point.Point(x, y))
#new_window.application.highlighted_range = [old_window,
# point.Point(x,y),
# point.Point(x + len(s), y)]
old_window.application.clear_highlighted_ranges()
new_window.application.add_highlighted_range(old_window,
point.Point(x,y),
point.Point(x + len(s), y))
break
elif y >= len(old_buffer.lines) - 1:
# failure
break
else:
# keep trying
y -= 1
l = old_buffer.lines[y]
x = 0
def _find_next(old_window, new_window, move=False):
s = new_window.buffer.make_string()
old_buffer = old_window.buffer
c = old_window.logical_cursor()
(x, y) = (c.x, c.y)
if move:
x += 1
l = old_buffer.lines[y][x:]
# for each line available
while y < len(old_buffer.lines):
if s in l:
# success
x = l.index(s) + x
old_window.goto(point.Point(x, y))
#new_window.application.highlighted_range = [old_window,
# point.Point(x,y),
# point.Point(x + len(s), y)]
old_window.application.clear_highlighted_ranges()
new_window.application.add_highlighted_range(old_window,
point.Point(x,y),
point.Point(x + len(s), y))
break
elif y >= len(old_buffer.lines) - 1:
# failure
break
else:
# keep trying
y += 1
l = old_buffer.lines[y]
x = 0

36
mode_sh.py Normal file
View File

@ -0,0 +1,36 @@
import commands, os.path, sets, sys
import color, default, mode, lex, lex_sh, method, tab_sh
class Sh(mode.Fundamental):
def __init__(self, w):
mode.Fundamental.__init__(self, w)
self.grammar = lex_sh.ShGrammar()
self.lexer = lex.Lexer(self.grammar)
self.colors = {
'builtin': color.build('cyan', 'default', 'bold'),
'method': color.build('magenta', 'default', 'bold'),
'reserved': color.build('magenta', 'default', 'bold'),
#'delimiter': color.build('magenta', 'default', 'bold'),
'delimiter': color.build('default', 'default', 'bold'),
'operator': color.build('magenta', 'default', 'bold'),
'redirection': color.build('blue', 'default', 'bold'),
'string1': color.build('green', 'default'),
'string2': color.build('green', 'default'),
'eval': color.build('cyan', 'default', 'bold'),
'comment': color.build('red', 'default'),
'continuation': color.build('red', 'default'),
'variable0': color.build('yellow', 'default', 'bold'),
'variable1': color.build('yellow', 'default', 'bold'),
'variable2': color.build('yellow', 'default', 'bold'),
'variable3': color.build('yellow', 'default', 'bold'),
}
#self.highlighter.lex_buffer()
#self.get_regions()
self.tabber = tab_sh.ShTabber(self)
def name(self):
return "Sh"

37
mode_sql.py Normal file
View File

@ -0,0 +1,37 @@
import commands, os.path, sets, sys
import color, default, mode, lex, lex_sql, method, tab, tab_sql
class Sql(mode.Fundamental):
def __init__(self, w):
mode.Fundamental.__init__(self, w)
self.tag_matching = True
self.grammar = lex_sql.SqlGrammar()
self.lexer = lex.Lexer(self.grammar)
self.colors = {
'sql comment': color.build('red', 'default', 'bold'),
'c comment': color.build('red', 'default', 'bold'),
'operator1': color.build('yellow', 'default', 'bold'),
'operator2': color.build('yellow', 'default', 'bold'),
'attribute1': color.build('magenta', 'default', 'bold'),
'attribute2': color.build('magenta', 'default', 'bold'),
'keyword1': color.build('cyan', 'default', 'bold'),
'keyword2': color.build('cyan', 'default', 'bold'),
'pseudo-keyword1': color.build('cyan', 'default', 'bold'),
'pseudo-keyword2': color.build('cyan', 'default', 'bold'),
'type1': color.build('green', 'default', 'bold'),
'type2': color.build('green', 'default', 'bold'),
'function': color.build('yellow', 'default', 'bold'),
'quoted': color.build('yellow', 'default', 'bold'),
'string': color.build('green', 'default', 'bold'),
'bareword': color.build('default', 'default', 'bold'),
}
#self.highlighter.lex_buffer()
#self.get_regions()
self.tabber = tab_sql.SQLTabber(self)
def name(self):
return "Sql"

64
mode_text.py Normal file
View File

@ -0,0 +1,64 @@
import sets, sys
import color, mode, lex, lex_text, method, ispell
class Text(mode.Fundamental):
def __init__(self, w):
mode.Fundamental.__init__(self, w)
self.add_action_and_bindings(LearnWord(), ('C-c l',))
self.add_action_and_bindings(TextInsertSpace(), ('SPACE',))
self.add_action_and_bindings(method.WrapParagraph(), ('M-q',))
self.grammar = lex_text.TextGrammar()
self.lexer = lex.Lexer(self.grammar)
self.default_color = color.build('default', 'default')
self.colors = {
'misspelled word': color.build('red', 'default', 'bold'),
'misspelled continued word': color.build('red', 'default', 'bold'),
}
#self.highlighter.lex_buffer()
#self.get_regions()
def name(self):
return "Text"
class TextInsertSpace(method.Method):
limit = 80
#wrapper = method.WrapLine
wrapper = method.WrapParagraph
def execute(self, window, **vargs):
window.insert_string(' ')
cursor = window.logical_cursor()
i = cursor.y
if len(window.buffer.lines[i]) > self.limit:
self.wrapper().execute(window)
class LearnWord(method.Method):
def execute(self, window, **vargs):
if window.mode.highlighter.tokens is None:
window.mode.highlighter.lex_buffer()
cursor = window.logical_cursor()
cursor_offset = window.get_cursor_offset()
tok = None
for t in window.mode.highlighter.tokens:
if t.start <= cursor_offset and cursor_offset < t.end:
tok = t
break
if tok:
word = tok.string
if tok.name.startswith('all-caps'):
s = "%r is all-caps" % (word)
elif tok.name.startswith('misspelled'):
ispell.get_speller().learn(word)
window.mode.highlighter.invalidate_tokens()
s = "Added %r to personal dictionary" % (word)
else:
s = "%r is already in the dictionary" % (word)
else:
s = "No word to learn found"
window.application.set_error(s)

31
mode_tt.py Normal file
View File

@ -0,0 +1,31 @@
import sets, sys
import color, commands, default, lex, lex_xml, method, mode, point, regex, tab_xml
class Template(mode.Fundamental):
def __init__(self, w):
mode.Fundamental.__init__(self, w)
self.grammar = lex_xml.XMLGrammar()
self.lexer = lex.Lexer(self.grammar)
self.add_bindings('close-paren', (')',))
self.add_bindings('close-brace', ('}',))
self.add_bindings('close-bracket', (']',))
self.default_color = color.build('default', 'default')
self.colors = {
'markup': color.build('red', 'default'),
'namespace': color.build('magenta', 'default'),
'opentag': color.build('blue', 'default'),
'nodevalue': color.build('default', 'default'),
'attrname': color.build('cyan', 'default'),
'attrvalue': color.build('green', 'default'),
'closetag': color.build('blue', 'default'),
'comment': color.build('red', 'default'),
'bizzaro': color.build('magenta', 'green'),
}
self.tabber = tab_xml.XMLTabber(self)
def name(self):
return "XML"

54
mode_which.py Normal file
View File

@ -0,0 +1,54 @@
import sets, string
import color, method, minibuffer, mode, point
import random
class Which(mode.Fundamental):
'''This is the default mode'''
def __init__(self, w):
mode.Fundamental.__init__(self, w)
old_mode = w.buffer.method.old_window.mode
self.actions = dict(old_mode.actions)
self.bindings = dict(old_mode.bindings)
for name in self.actions.keys():
old_method = self.actions[name]
new_method = HelpWrapper(old_method)
self.actions[name] = new_method
def name(self):
return "Which"
def handle_token(self, t):
'''self.handle_token(token): handles input "token"'''
self.window.active_point = None
try:
act = mode.Handler.handle_token(self, t)
if act is None:
return
else:
self.window.application.clear_error()
act.execute(self.window)
self.window.application.last_action = act.name
except Exception, e:
self.window.application.set_error('%r is not bound to anything' % self.last_sequence)
_end(self.window)
class HelpWrapper(method.Method):
def __init__(self, m):
self.name = "help-for-%s" % m.name
self.args = []
self.help = 'provide help for the %r command' % m.name
self._m_name = m.name
self._m_help = m.help
def _execute(self, window, **vargs):
seq = window.mode.last_sequence
s = '%r is %r: %s' % (seq, self._m_name, self._m_help)
window.application.set_error(s)
_end(window)
def _end(window):
window.application.close_mini_buffer()
window.buffer.method.old_cursor = None
window.buffer.method.old_window = None

32
mode_xml.py Normal file
View File

@ -0,0 +1,32 @@
import sets, sys
import color, commands, default, lex, lex_xml, method, mode, point, regex, tab_xml
class XML(mode.Fundamental):
def __init__(self, w):
mode.Fundamental.__init__(self, w)
self.grammar = lex_xml.XMLGrammar()
self.lexer = lex.Lexer(self.grammar)
self.add_bindings('close-paren', (')',))
self.add_bindings('close-brace', ('}',))
self.add_bindings('close-bracket', (']',))
self.default_color = color.build('default', 'default')
self.colors = {
'namespace': color.build('magenta', 'default'),
'opentag': color.build('blue', 'default'),
'nodevalue': color.build('default', 'default'),
'attrname': color.build('cyan', 'default'),
'attrvalue': color.build('green', 'default'),
'closetag': color.build('blue', 'default'),
'comment': color.build('red', 'default'),
'bizzaro': color.build('magenta', 'green'),
}
#self.highlighter.lex_buffer()
#self.get_regions()
self.tabber = tab_xml.XMLTabber(self)
def name(self):
return "XML"

66
point.py Normal file
View File

@ -0,0 +1,66 @@
LOGICAL = "logical"
PHYSICAL = "physical"
class Point:
def __init__(self, x=0, y=0, t=None):
assert t is None or \
t == LOGICAL or \
t == PHYSICAL
self.x = x
self.y = y
self.type = t
def offset(self, x, y, t=None):
return Point(self.x + x, self.y + y, t)
def copy(self):
return Point(self.x, self.y, self.type)
def __cmp__(self, other):
assert self.type == other.type or \
self.type is None or \
other.type is None
c = cmp(self.y, other.y)
if c == 0:
return cmp(self.x, other.x)
else:
return c
def __add__(self, other):
assert self.type == other.type or \
self.type is None or \
other.type is None
return Point(self.x + other.x,
self.y + other.y)
def __iadd__(self, other):
assert self.type == other.type or \
self.type is None or \
other.type is None
x = other.x
y = other.y
self.x += x
self.y += y
def __sub__(self, other):
assert self.type == other.type or \
self.type is None or \
other.type is None
return Point(self.x - other.x,
self.y - other.y)
def __isub__(self, other):
assert self.type == other.type or \
self.type is None or \
other.type is None
x = other.x
y = other.y
self.x -= x
self.y -= y
def __abs__(self):
return Point(abs(self.x), abs(self.y))
def __repr__(self):
if self.type is None:
return "Point(%d, %d)" % (self.x, self.y)
else:
return "Point(%s:%d, %d)" % (self.type[0], self.x, self.y)

21
regex.py Normal file
View File

@ -0,0 +1,21 @@
import re
# whitespace regexes
leading_whitespace = re.compile('^ *')
trailing_whitespace = re.compile(' *$')
whitespace = re.compile('^ *$')
# word regexes
word = re.compile('^[A-Za-z0-9_]+$')
word_char = re.compile('^[A-Za-z0-9_]$')
# perl regexes
perl_base = re.compile("^sub ")
perl_hash_cleanup = re.compile("^( *)([^ ]+|'(?:\\.|[^'\\'])*'|\"(?:\\.|[^\\\"]*)\")( *)(=>)( *)([^ ].*)$")
perl_assign_cleanup = re.compile("^( *)((?:my |our )?[^ ]+)( *)(=(?!>))( *)([^ ].*)$")
perl_function = re.compile("^ *sub ([A-Za-z_][A-Za-z0-9_]*)")
# python regexes
python_base = re.compile("^[^ ]")
python_dict_cleanup = re.compile("^( *)((?:[^'\":]|'(?:\\.|[^\\'])*'|\"(?:\\.|[^\\'])*)+?)( *)(:)( *)([^ ].*)$")
python_assign_cleanup = re.compile("^( *)([^ ]+)( *)(=)( *)([^ ].*)$")

10
run.py Executable file
View File

@ -0,0 +1,10 @@
#!/usr/bin/env python
import commands, os, sys
(status, path) = commands.getstatusoutput('which python')
args = ['python', '-O', 'application.py']
args.extend(sys.argv[1:])
os.execv(path, args)

140
tab.py Normal file
View File

@ -0,0 +1,140 @@
import point, regex, util
class Tabber:
def __init__(self, m):
self.mode = m
self.levels = None
def invalidate(self):
self.levels = None
def calculate_tabs(self):
raise Exception, "Unimplemented 22"
def get_indentation_level(self, y):
if self.levels is None:
self.calculate_tabs()
return self.levels[y]
class TokenStackTabber(Tabber):
def __init__(self, m):
Tabber.__init__(self, m)
# state variables for tab processing
self.errors = None
self.y = None
self.index = None
self.tab_stack = None
self.line_depth = None
self.start_offset = None
self.end_offset = None
def stack_append(self, item):
self.tab_stack.append(item)
def stack_pop(self):
assert len(self.tab_stack) > 1, "rjrjrjr"
self.tab_stack.pop(-1)
def handle_token(self, prev_token, token, next_token, y=None):
s = token.string
if s == "(":
if next_token is None:
self.stack_append((s, self.tab_stack[-1][1] + 4))
else:
p = self.mode.window.buffer.get_offset_point(next_token.start)
self.stack_append((s, p.x))
elif s == ")":
if self.tab_stack[-1][0] == "(":
self.stack_pop()
if prev_token is None:
self.line_depth = self.tab_stack[-1][1]
else:
# mismatched tag
if self.errors is False:
err = "line %d: expected %r, got %r" % \
(self.tab_stack[-1][0], s)
self.mode.window.application.set_error(err)
self.errors = True
def base_indentation_level(self, y):
return y == 0
def calculate_tabs(self, start=0, goal=None):
lines = self.mode.window.buffer.lines
tokens = self.mode.highlighter.tokens
buffer = self.mode.window.buffer
if self.levels is None:
self.levels = [None] * (len(lines))
self.errors = False
self.index = 0
self.y = start
self.tab_stack = [(None, 0)]
# we want to process every logical line in the file
while self.y < len(lines):
self.line_depth = self.tab_stack[-1][1]
if self.index >= len(tokens):
self.levels[self.y] = self.line_depth
self.y += 1
continue
line = lines[self.y]
self.start_offset = buffer.get_point_offset(point.Point(0, self.y))
self.end_offset = buffer.get_point_offset(point.Point(len(line), self.y))
# we want to find all the tokens on the line we are currently processing
while self.index < len(tokens):
token = tokens[self.index]
# previous token, or None if token is first on the line
if self.index > 0 and \
tokens[self.index - 1].start > self.start_offset:
prev_token = tokens[self.index - 1]
else:
prev_token = None
# next token, or None if token is last on the line
#if self.index < len(tokens) - 1 and \
if self.index < len(tokens) - 1 and \
tokens[self.index + 1].end <= self.end_offset:
next_token = tokens[self.index + 1]
else:
next_token = None
if token.end < self.start_offset:
# token is too far back
pass
elif token.start > self.end_offset:
# token is too far forward
break
else:
self.handle_token(prev_token, token, next_token, self.y)
if token.end >= self.end_offset:
# token continues to the next line
break
else:
self.index += 1
self.levels[self.y] = self.line_depth
self.y += 1
if goal is not None and self.y > goal:
return
def get_indentation_level(self, y):
if self.levels is not None and self.levels[y] is not None:
result = self.levels[y]
else:
i = max(0, y - 1)
while i > 0:
if self.base_indentation_level(i):
break
i -= 1
self.calculate_tabs(i, y)
result = self.levels[y]
if result == -1:
return None
return result

86
tab_c.py Normal file
View File

@ -0,0 +1,86 @@
import tab, point
class CTabber(tab.TokenStackTabber):
close_tags = {')': '(',
']': '[',
'}': '{'}
def stack_append_const(self, c):
self.stack_append((c, self.tab_stack[-1][1] + 4))
def stack_append_unique_const(self, c):
if self.tab_stack[-1][0] != c:
self.stack_append((c, self.tab_stack[-1][1] + 4))
def stack_pop_const(self, *c_args):
if self.tab_stack[-1][0] in c_args:
self.stack_pop()
def stack_pop_all_const(self, *c_args):
while self.tab_stack[-1][0] in c_args:
self.stack_pop()
def handle_token(self, prev_token, token, next_token, y=None):
buffer = self.mode.window.buffer
name = token.name
s = token.string
if name == "c comment":
if self.tab_stack[-1][0] != "c comment":
self.stack_append(("c comment", self.tab_stack[-1][1]))
else:
self.line_depth += 1
p = point.Point(len(buffer.lines[self.y]), self.y)
offset = buffer.get_point_offset(p)
if token.end <= offset or next_token is not None:
self.stack_pop()
elif name == "macro":
self.line_depth -= 4
elif name == "operator" and next_token is None:
self.stack_append_const_unique("cont")
elif name == "label":
self.line_depth -= 4
#self.line_depth = 0
elif name == "keyword":
if (s == "do" or
s == "else" or
s == "for" or
s == "if" or
s == "while"):
self.stack_append_const("block")
elif s == "case":
if prev_token is None:
self.line_depth -= 4
elif name == "delimiter":
if s == "{" or s == "(" or s == "[":
if s == "{":
if prev_token is None and self.tab_stack[-1][0] == "block":
self.line_depth -= 4
self.stack_pop_const("block")
#self.stack_pop_const("block", "cont")
else:
self.stack_pop_const("cont")
if next_token is None:
self.stack_append((s, self.tab_stack[-1][1] + 4))
else:
p = buffer.get_offset_point(next_token.start)
self.stack_append((s, p.x))
elif s == "}" or s == ")" or s == "]":
if s == "}":
self.stack_pop_all_const("block", "cont")
else:
self.stack_pop_all_const("cont")
if self.tab_stack[-1][0] == self.close_tags[s]:
self.stack_pop()
if prev_token is None:
self.line_depth = self.tab_stack[-1][1]
elif self.errors is False:
err = "tag mismatch, line %d: expected %r, got %r" % \
(self.y, self.tab_stack[-1][0], s)
self.mode.window.application.set_error(err)
self.errors = True
if s == "}":
self.stack_pop_all_const("block", "cont")
elif (s == "=" or s == "?") and next_token is None:
self.stack_append_const_unique("cont")
elif s == ',':
self.stack_pop_all_const("cont")
elif s == ';':
self.stack_pop_all_const("block", "cont")

60
tab_javascript.py Normal file
View File

@ -0,0 +1,60 @@
import tab
class JavascriptTabber(tab.TokenStackTabber):
close_tags = {'}': '{',
')': '(',
']': '['}
def error(self, s):
self.mode.window.application.set_error(s)
self.errors = True
def base_indentation_level(self, y):
if y == 0:
return True
lines = self.mode.window.buffer.lines
if y < len(lines) and lines[y].startswith('function '):
return True
return False
def stack_append_const(self, c):
if self.tab_stack[-1][0] != c:
self.stack_append((c, self.tab_stack[-1][1] + 4))
def stack_pop_const(self, c):
if self.tab_stack[-1][0] in c_args:
self.stack_pop()
def stack_pop_all_const(self, *c_args):
while self.tab_stack[-1][0] in c_args:
self.stack_pop()
def handle_token(self, prev_token, token, next_token, y=None):
buffer = self.mode.window.buffer
name = token.name
s = token.string
if name == "delimiter":
if s == "{" or s == "(" or s == "[":
if prev_token is None:
self.stack_pop_all_const("cont")
if next_token is None:
self.stack_append((s, self.tab_stack[-1][1] + 4))
else:
p = buffer.get_offset_point(next_token.start)
self.stack_append((s, p.x))
elif s == "}" or s == ")" or s == "]":
self.stack_pop_all_const("cont")
if self.tab_stack[-1][0] == self.close_tags[s]:
self.stack_pop()
if prev_token is None:
self.line_depth = self.tab_stack[-1][1]
elif self.errors is False:
self.error("tag mismatch, line %d: expected %r, got %r" %
(self.y, self.tab_stack[-1][0], s))
if s == "}":
self.stack_pop_all_const("cont")
else:
pass
elif s == "=" and next_token is None:
self.stack_append_const("cont")
elif s == ";":
self.stack_pop_all_const("cont")

85
tab_perl.py Normal file
View File

@ -0,0 +1,85 @@
import tab
class PerlTabber(tab.TokenStackTabber):
close_tags = {'}': '{',
')': '(',
']': '['}
def error(self, s):
self.mode.window.application.set_error(s)
self.errors = True
def base_indentation_level(self, y):
if y == 0:
return True
lines = self.mode.window.buffer.lines
if y < len(lines) and lines[y].startswith('sub '):
return True
return False
def stack_append_const(self, c):
if self.tab_stack[-1][0] != c:
self.stack_append((c, self.tab_stack[-1][1] + 4))
def stack_pop_const(self, c):
if self.tab_stack[-1][0] in c_args:
self.stack_pop()
def stack_pop_all_const(self, *c_args):
while self.tab_stack[-1][0] in c_args:
self.stack_pop()
def handle_token(self, prev_token, token, next_token, y=None):
buffer = self.mode.window.buffer
name = token.name
s = token.string
if name == "delimiter":
if s == "{" or s == "(" or s == "[":
if prev_token is None:
self.stack_pop_all_const("cont")
if next_token is None:
self.stack_append((s, self.tab_stack[-1][1] + 4))
else:
p = buffer.get_offset_point(next_token.start)
self.stack_append((s, p.x))
elif s == "}" or s == ")" or s == "]":
self.stack_pop_all_const("cont")
if self.tab_stack[-1][0] == self.close_tags[s]:
self.stack_pop()
if prev_token is None:
self.line_depth = self.tab_stack[-1][1]
elif self.errors is False:
self.error("tag mismatch, line %d: expected %r, got %r" %
(self.y, self.tab_stack[-1][0], s))
if s == "}":
self.stack_pop_all_const("cont")
else:
pass
elif s == "=" and next_token is None:
self.stack_append_const("cont")
elif s == ";":
self.stack_pop_all_const("cont")
elif name == "heredoc":
if token.start > self.start_offset:
self.stack_append(('heredoc', -1))
elif token.end <= self.end_offset:
self.stack_pop_all_const("heredoc")
self.stack_pop_all_const("cont")
elif name == "pod":
if token.start >= self.start_offset:
self.stack_append(('pod', -1))
elif token.end <= self.end_offset:
assert self.tab_stack[-1][0] == 'pod', "vvvvvveije9876"
self.stack_pop()
self.line_depth = self.tab_stack[-1][1]
if (name != "heredoc" and
name != "endblock" and
name != "pod" and
name != "comment" and
s != "}" and
s != ";" and
s != "(" and
s != "{" and
s != "[" and
s != ",") and next_token is None:
self.stack_append_const("cont")

141
tab_python.py Normal file
View File

@ -0,0 +1,141 @@
import point, regex, util, tab
class PythonTabber(tab.Tabber):
start_tags = {'(': ')',
'{': '}',
'[': ']'}
close_tags = {')': '(',
'}': '{',
']': '['}
def __init__(self, m):
tab.Tabber.__init__(self, m)
self.y = None
self.index = None
self.tab_stack = None
self.line_depth = None
def stack_append(self, item):
self.tab_stack.append(item)
def stack_pop(self):
self.tab_stack.pop(-1)
def base_indentation_level(self, y):
return y == 0
def calculate_tabs(self, start=0, goal=None):
lines = self.mode.window.buffer.lines
tokens = self.mode.highlighter.tokens
buffer = self.mode.window.buffer
if self.levels is None:
self.levels = [None] * (len(lines))
self.index = 0
self.y = start
self.base = 0
self.tab_stack = []
# we want to process every logical line in the file
while self.y < len(lines):
line = lines[self.y]
start_index = self.index
start_point = point.Point(0, self.y)
start_offset = buffer.get_point_offset(start_point)
end_point = point.Point(len(line), self.y)
end_offset = buffer.get_point_offset(end_point)
# we want to find all the tokens on the line we are currently processing
while self.index < len(tokens):
token = tokens[self.index]
if token.end > end_offset:
break
self.index += 1
self.handle_line(line,
start_offset, start_index,
end_offset, self.index)
self.levels[self.y] = self.line_depth
self.y += 1
if goal is not None and self.y > goal:
break
def get_line_depth(self):
if len(self.tab_stack) > 0:
return self.tab_stack[-1][1]
else:
return self.base
def handle_line(self, line, start_offset, start_index, end_offset, end_index):
self.line_depth = self.get_line_depth()
tokens = self.mode.highlighter.tokens
if start_index >= len(tokens):
return
if regex.whitespace.match(line):
return
if len(self.tab_stack) == 0 and tokens[start_index].start >= start_offset:
self.base = util.count_leading_whitespace(line)
for i in range(start_index, end_index):
token = tokens[i]
s = token.string
if s in self.start_tags:
if i < end_index - 1:
i = tokens[i+1].start - start_offset
elif len(self.tab_stack) > 0:
i = self.tab_stack[-1][1] + 4
else:
i = self.base + 4
self.stack_append((s, i))
elif s in self.close_tags:
assert len(self.tab_stack), "Unbalanced closing tag"
assert self.tab_stack[-1][0] == self.close_tags[s], "Unmatched closing tag"
self.stack_pop()
if i == start_index:
self.line_depth = self.get_line_depth()
if tokens[start_index].start < start_offset:
self.line_depth = -1
prebase = self.base
s = tokens[start_index].string
e = tokens[end_index-1].string
if s == "except" or s == "elif" or s == "else":
if self.y > 0 and self.line_depth == self.levels[self.y - 1]:
self.line_depth = max(0, self.line_depth - 4)
elif (s == "return" or s == "raise" or s == "yield" or s == "break" or
s == "pass" or s == 'continue'):
self.base = max(0, self.base - 4)
if e == "\\":
if len(self.tab_stack) and self.tab_stack[-1][0] == "\\":
pass
else:
self.stack_append(("\\", prebase + 4))
return
elif e == ":":
self.base += 4
elif len(self.tab_stack) and self.tab_stack[-1][0] == "\\":
self.stack_pop()
def get_indentation_level(self, y):
if self.levels is not None and self.levels[y] is not None:
result = self.levels[y]
else:
i = max(0, y - 1)
while i > 0:
if self.base_indentation_level(i):
break
i -= 1
self.calculate_tabs(i, y)
result = self.levels[y]
if result == -1:
return None
return result

55
tab_sh.py Normal file
View File

@ -0,0 +1,55 @@
import tab, point
class ShTabber(tab.TokenStackTabber):
close_tags = {'}': '{',
')': '(',
']': '[',
'done': 'do',
'fi': 'then',
'esac': 'case'}
def stack_append_const(self, c):
self.stack_append((c, self.tab_stack[-1][1] + 4))
def stack_append_unique_const(self, c):
if self.tab_stack[-1][0] != c:
self.stack_append((c, self.tab_stack[-1][1] + 4))
def stack_pop_const(self, *c_args):
if self.tab_stack[-1][0] in c_args:
self.stack_pop()
def stack_pop_all_const(self, *c_args):
while self.tab_stack[-1][0] in c_args:
self.stack_pop()
def handle_token(self, prev_token, token, next_token, y=None):
buffer = self.mode.window.buffer
name = token.name
s = token.string
if next_token is None and name != "continuation":
self.stack_pop_all_const(self, "continued")
if name == 'reserved' or name == "keyword" or name == "delimiter":
if s == ")" and self.tab_stack[-1][0] == "case":
pass
elif s == "then" or s == "do" or s == "(" or s == "[" or s == "case" or s == "{":
if next_token is None:
self.stack_append((s, self.tab_stack[-1][1] + 4))
else:
p = buffer.get_offset_point(next_token.start)
self.stack_append((s, p.x))
elif s == "else":
assert self.tab_stack[-1][0] == "if", "bbbsssxxx"
d = self.tab_stack[-1][1] - self.tab_stack[-2][1]
self.line_depth -= d
elif s in self.close_tags:
if self.tab_stack[-1][0] == self.close_tags[s]:
self.stack_pop()
if prev_token is None:
self.line_depth = self.tab_stack[-1][1]
elif self.errors is False:
err = "tag mismatch, line %d: expected %r, got %r" % \
(self.y, self.tab_stack[-1][0], s)
self.mode.window.application.set_error(err)
self.errors = True
elif name == "continuation":
self.stack_append_unique_const("continued")

51
tab_sql.py Normal file
View File

@ -0,0 +1,51 @@
import tab
class SQLTabber(tab.TokenStackTabber):
close_tags = {')': '('}
def error(self, s):
self.mode.window.application.set_error(s)
self.errors = True
def base_indentation_level(self, y):
return y == 0
def stack_append_const(self, c):
if self.tab_stack[-1][0] != c:
self.stack_append((c, self.tab_stack[-1][1] + 4))
def stack_pop_const(self, c):
if self.tab_stack[-1][0] in c_args:
self.stack_pop()
def stack_pop_all_const(self, *c_args):
while self.tab_stack[-1][0] in c_args:
self.stack_pop()
def handle_token(self, prev_token, token, next_token, y=None):
buffer = self.mode.window.buffer
name = token.name
s = token.string
if name == 'delimiter':
if s == '(':
if next_token is None:
self.stack_append((s, self.tab_stack[-1][1] + 4))
else:
p = buffer.get_offset_point(next_token.start)
self.stack_append((s, p.x))
elif s == ')':
if self.tab_stack[-1][0] == self.close_tags[s]:
self.stack_pop()
if prev_token is None:
self.line_depth = self.tab_stack[-1][1]
elif self.errors is False:
self.error("tag mismatch, line %d: expected %r, got %r" %
(self.y, self.tab_stack[-1][0], s))
elif s == ',':
pass
elif s == ';':
pass
elif name == 'string':
if token.start > self.start_offset:
self.stack_append(('string', -1))
if token.end <= self.end_offset:
self.stack_pop_all_const("string")

48
tab_xml.py Normal file
View File

@ -0,0 +1,48 @@
import tab
class XMLTabber(tab.TokenStackTabber):
close_tags = {'}': '{',
')': '(',
']': '['}
def stack_append_const(self, c, n):
if self.tab_stack[-1][0] != c:
self.stack_append((c, self.tab_stack[-1][1] + n))
def stack_pop_const(self, *c_args):
if self.tab_stack[-1][0] in c_args:
self.stack_pop()
def base_indentation_level(self, y):
return False
def handle_token(self, prev_token, token, next_token, y=None):
buffer = self.mode.window.buffer
name = token.name
s = token.string
if name == 'opentag':
if next_token is None:
x = len(s) + 2
else:
x = next_token.start - token.start + 1
# x is an offset from the current indention level
self.stack_append_const('cont', x)
elif name == 'gtc':
self.stack_pop_const('cont')
if prev_token is None:
self.line_depth = self.tab_stack[-1][1]
elif name == 'gt':
self.stack_pop_const('cont')
if prev_token is None:
self.line_depth = self.tab_stack[-1][1]
if self.tab_stack[-1][0] == 'close':
self.stack_pop_const('close')
else:
self.stack_append(('tag', self.tab_stack[-1][1] + 4))
elif name == 'ltc':
self.stack_pop_const('cont')
self.stack_pop_const('tag')
l = self.tab_stack[-1][1]
self.stack_append(('close', l))
if prev_token is None:
self.line_depth = l

15
test.py Normal file
View File

@ -0,0 +1,15 @@
import lex2
lines = [
'this is the first line',
'<< EOF;'
' abracadra hocus pocus',
' EOF',
'EOF',
'"this is a double-quoted string" and this is not...',
"we're done",
]
l = lex2.Lexer()
l.lex(lines)
for t in l:
print '%-20r %r' % (t.rule.name, t.string)

52
util.py Normal file
View File

@ -0,0 +1,52 @@
import os, pwd, regex
def expand_tilde(path):
if not path.startswith('~'):
return path
parts = path.split('/', 1)
if parts[0] == '~':
parts[0] = os.getenv('HOME')
elif parts[0].startswith('~'):
users = [x[0] for x in pwd.getpwall()]
if parts[0][1:] in users:
home = pwd.getpwnam(parts[0][1:])[5]
parts[0] = home
if len(parts) > 1:
s = os.path.join(parts[0], parts[1])
else:
s = parts[0]
s = os.path.realpath(s)
if os.path.isdir(s):
s += '/'
return s
def cleanse(s):
s2 = s.replace("\n", "")
return s2
def padtrunc(s, i, c=' '):
assert i >= 0
assert len(c) == 1
l = len(s)
if l < i:
return s + c * (i - l)
elif l > i:
return s[0:i]
else:
return s
def pad(s, i, c=' '):
assert len(c) == 1
l = len(s)
if len(s) < i:
return s + c * (i - l)
else:
return s
def count_leading_whitespace(s):
m = regex.leading_whitespace.match(s)
assert m, "count leading whitespace failed somehow"
return m.end() - m.start()

569
window.py Normal file
View File

@ -0,0 +1,569 @@
import os.path, string
import buffer, point, regex
WORD_LETTERS = list(string.letters + string.digits)
# note about the cursor: the cursor position will insert in front of
# the character it highlights. to this end, it needs to be able to
# highlight behind the last character on a line. thus, the x
# coordinate of the (logical) cursor can equal the length of lines[y],
# even though lines[y][x] throws an index error. both buffer and
# window need to be aware of this possibility for points.
class Window(object):
def __init__(self, b, a, height=24, width=80, slot='main', mode_name=None):
self.buffer = b
self.application = a
self.buffer.add_window(self, slot)
self.first = point.Point(0, 0, "logical")
self.last = point.Point(0, 0, "logical")
self.cursor = point.Point(0, 0, "logical")
self.mark = None
self.active_point = None
#self.physical_movement = False
self.height = height
self.width = width
self._logical_offsets = None
self._physical_lines = None
self._physical_lines_cont = None
self.input_line = ""
if mode_name is not None:
pass
elif hasattr(self.buffer, 'modename') and self.buffer.modename is not None:
mode_name = self.buffer.modename
elif self.buffer.name() == "*Minibuffer*":
mode_name = 'mini'
elif self.buffer.name() == "*Console*":
#mode_name = "console"
mode_name = "fundamental"
elif hasattr(self.buffer, 'path'):
path = self.buffer.path
basename = os.path.basename(path)
#ext = os.path.splitext(path)[1].lower()
ext = self._get_path_ext(path)
if path in self.application.mode_paths:
mode_name = self.application.mode_paths[path]
elif basename in self.application.mode_basenames:
mode_name = self.application.mode_basenames[basename]
elif ext in self.application.mode_extensions:
mode_name = self.application.mode_extensions[ext]
elif len(self.buffer.lines) > 0 and \
self.buffer.lines[0].startswith('#!'):
line = self.buffer.lines[0]
for word in self.application.mode_detection:
if word in line:
mode_name = self.application.mode_detection[word]
if mode_name is None:
mode_name = "fundamental"
m = self.application.modes[mode_name](self)
self.set_mode(m)
def _get_path_ext(self, path):
name = os.path.basename(path).lower()
tokens = name.split('.')
if len(tokens) > 2 and tokens[-1] in ('gz', 'in', 'zip'):
return '.%s.%s' % (tokens[-2], tokens[-1])
else:
return os.path.splitext(path)[1].lower()
def set_mode(self, m):
self.mode = m
self.redraw()
def get_cursor_offset(self):
cursor = self.logical_cursor()
return self.buffer.get_point_offset(cursor)
# the message is printed when the point is not visible, and the proper
# variable is set
def set_active_point(self, p, use_msg_when_hidden=True,
msg='marking on line %(y)d, character %(x)d'):
self.active_point = p
if not self.point_is_visible(p):
self.application.set_error(msg % {'x': p.x, 'y': p.y})
# cursors
#
# there are three:
# the actual cursor (not good for most things)
# the logical cursor (good for logical actions on buffer)
# the physical cursor (good for drawing)
def logical_cursor(self):
y = self.cursor.y
if self.cursor.x <= len(self.buffer.lines[y]):
return self.cursor
else:
return point.Point(len(self.buffer.lines[y]), y, "logical")
def logical_point(self, p):
self.get_physical_lines()
x = 0
y = 0
for i in range(0, p.y):
if self._physical_lines_cont[i]:
x += self.width
else:
x = 0
y += 1
x += p.x
return point.Point(x, y, "logical")
def physical_cursor(self):
p = self.logical_cursor()
#return self.physical_point(p)
self.get_physical_lines()
y = 0
for i in self._logical_offsets[0:p.y]:
y += i
y += p.x / self.width
x = p.x % self.width
# this allows the cursor to be in the right margin, rather than on the
# next line... i.e. not the behavior you want for actual text.
if p.x == len(self.buffer.lines[p.y]) and y > 0 and p.x > 0 and x == 0:
#if y > 0 and p.x > 0 and x == 0:
x = self.width
y -= 1
return point.Point(x, y, "physical")
def physical_point(self, p):
self.get_physical_lines()
y = 0
for i in self._logical_offsets[0:p.y]:
y += i
y += p.x / self.width
x = p.x % self.width
return point.Point(x, y, "physical")
# debug
def get_debug_repr(self):
return ""
def get_physical_lines(self):
if self._physical_lines is None:
self._physical_lines = []
self._physical_lines_cont = []
self._logical_offsets = []
for l in self.buffer.lines:
pl = []
while len(l) > self.width:
pl.append(l[:self.width])
l = l[self.width:]
pl.append(l)
self._logical_offsets.append(len(pl))
self._physical_lines.extend(pl)
for i in range(0, len(pl)-1):
self._physical_lines_cont.append(True)
self._physical_lines_cont.append(False)
return self._physical_lines
# redrawing
def set_size(self, width, height):
self.width = width
self.height = height
self.redraw()
self._invalidate_physical_lines()
self.mode.invalidate()
def _invalidate_physical_lines(self):
self._physical_lines = None
def _region_added(self, p, xdiff, ydiff, str=None):
cursor = self.logical_cursor()
self._invalidate_physical_lines()
if cursor.y > p.y:
self.cursor = cursor.offset(0, ydiff)
elif self.cursor >= p:
self.cursor = cursor.offset(xdiff, ydiff)
else:
pass
self.redraw() # added 2006-5-28
if not self.cursor_is_visible():
self.center_view()
self.mode.region_added(p, xdiff, ydiff, str)
def _region_removed(self, p1, p2, str):
pdelta = p1 - p2
xdiff, ydiff = pdelta.x, pdelta.y
self._invalidate_physical_lines()
if self.cursor.y > p2.y:
self.cursor = self.cursor.offset(0, ydiff)
elif self.cursor > p2:
self.cursor = self.cursor.offset(xdiff, ydiff)
elif self.cursor >= p1:
self.cursor = p1.offset(0, 0)
else:
pass
if not self.cursor_is_visible():
self.center_view()
self.mode.region_removed(p1, p2, str)
def visible_offset(self):
pfirst = self.physical_point(self.first)
return pfirst.y
def visible_cursor(self):
i = self.visible_offset()
return self.physical_cursor().offset(0, -i)
def visible_lines(self):
i = self.visible_offset()
lines = self.get_physical_lines()
return lines[i:i+self.height]
def continued_visible_line(self, i):
return self._physical_lines_cont[i + self.visible_offset()]
def redraw(self):
plines = self.get_physical_lines()
pfirst = self.physical_point(self.first)
py = min(pfirst.y + self.height - 1, len(plines) - 1)
px = min(self.width, len(plines[py]))
plast = point.Point(px, py, "physical")
self.last = self.logical_point(plast)
if self.last < self.first:
raise Exception, "BUGGJGJG:\n%s" % (self.dump())
self._validate_first_last()
def point_is_visible(self, p):
return self.first <= p and p <= self.last
def cursor_is_visible(self):
cursor = self.logical_cursor()
return self.point_is_visible(cursor)
def first_is_visible(self):
first_point = self.buffer.get_buffer_start()
return self.point_is_visible(first_point)
def last_is_visible(self):
last_point = self.buffer.get_buffer_end()
return self.point_is_visible(last_point)
def center_view(self):
pcursor = self.physical_cursor()
x = 0
if self.height == 1:
# we special case this to avoid rounding problems
y = max(0, pcursor.y)
else:
offset = self.height - (self.height / 2)
y = max(0, pcursor.y - offset)
pfirst = point.Point(x, y, "physical")
self.first = self.logical_point(pfirst)
self.redraw()
def relocate_cursor(self):
if not self.cursor_is_visible():
i = self.visible_offset()
pp = point.Point(0, i, "physical")
lp = self.logical_point(pp)
self.goto(lp)
# point validation
def _validate_cursor(self):
self.buffer._validate_point(self.logical_cursor())
def _validate_mark(self):
self.buffer._validate_point(self.mark)
def _validate_first_last(self):
assert self.first <= self.logical_cursor(), "one"
assert (self.first.x % self.width) == 0, "two: %d %% %d != 0 (%d)" % (self.first.x, self.width, self.first.x % self.width)
assert self.first <= self.last, "four"
# moving in buffer
def forward(self):
cursor = self.logical_cursor()
if cursor.x < len(self.buffer.lines[cursor.y]):
self.cursor.x = cursor.x + 1
elif cursor.y < len(self.buffer.lines) - 1:
self.cursor.y = cursor.y + 1
self.cursor.x = 0
if not self.cursor_is_visible():
self.center_view()
def backward(self):
cursor = self.logical_cursor()
if cursor.x > 0:
self.cursor.x = cursor.x - 1
elif self.cursor.y > 0:
self.cursor.y = cursor.y - 1
self.cursor.x = len(self.buffer.lines[self.cursor.y])
if not self.cursor_is_visible():
self.center_view()
def end_of_line(self):
self.cursor.x = len(self.buffer.lines[self.cursor.y])
if not self.cursor_is_visible():
self.center_view()
def start_of_line(self):
self.cursor.x = 0
if not self.cursor_is_visible():
self.center_view()
def previous_line(self):
if self.cursor.y > 0:
self.cursor.y -= 1
if not self.cursor_is_visible():
self.center_view()
def next_line(self):
if self.cursor.y < len(self.buffer.lines) - 1:
self.cursor.y += 1
if not self.cursor_is_visible():
self.center_view()
def pshift(self, p, i):
y = max(0, p.y + i)
y = min(y, len(self._physical_lines) - 1)
x = min(len(self._physical_lines[y]), p.x)
return self.logical_point(point.Point(x, y, "physical"))
# word handling
def find_left_word(self, p=None):
if p is None:
p = self.logical_cursor().offset(0, 0)
start = self.buffer.get_buffer_start()
if p == start:
return
elif p.x == 0:
p.y -= 1
p.x = len(self.buffer.lines[p.y])
else:
p.x -= 1
while p >= start and self.point_char(p) not in WORD_LETTERS:
if p.x == 0:
p.y -= 1
p.x = len(self.buffer.lines[p.y])
else:
p.x -= 1
found_word = False
while p >= start and self.point_char(p) in WORD_LETTERS:
found_word = True
if p.x == 0:
p.y -= 1
p.x = len(self.buffer.lines[p.y])
else:
p.x -= 1
if not found_word:
pass
elif p.x == len(self.buffer.lines[p.y]):
p.x = 0
p.y += 1
else:
p.x += 1
return p
def find_right_word(self, p=None):
if p is None:
p = self.logical_cursor().offset(0, 0)
end = self.buffer.get_buffer_end()
while p < end and self.point_char(p) not in WORD_LETTERS:
if p.x == len(self.buffer.lines[p.y]):
p.x = 0
p.y += 1
else:
p.x += 1
while p < end and self.point_char(p) in WORD_LETTERS:
if p.x == len(self.buffer.lines[p.y]):
p.x = 0
p.y += 1
else:
p.x += 1
return p
def left_word(self):
p = self.find_left_word()
if p is not None:
self.goto(p)
def right_word(self):
p = self.find_right_word()
if p is not None:
self.goto(p)
# page up/down
def page_up(self):
first_point = self.buffer.get_buffer_start()
if self.point_is_visible(first_point):
self.goto_beginning()
return
self.cursor = self.pshift(self.physical_cursor(), 3 - self.height)
if self.first > first_point:
self.first = self.pshift(self.physical_point(self.first), 3 - self.height)
self.redraw()
def page_down(self):
last_point = self.buffer.get_buffer_end()
if self.point_is_visible(last_point):
self.goto_end()
return
self.cursor = self.pshift(self.physical_cursor(), self.height - 3)
if self.last < last_point:
self.first = self.pshift(self.physical_point(self.first), self.height - 3)
self.redraw()
# jumping in buffer
def goto(self, p):
self.buffer._validate_point(p)
self.cursor.x = p.x
self.cursor.y = p.y
if not self.cursor_is_visible():
self.center_view()
def goto_line(self, y):
if y < 0:
y = len(self.buffer.lines) + y + 1
self.buffer._validate_y(y)
self.cursor.y = y
self.cursor.x = 0
if not self.cursor_is_visible():
self.center_view()
def forward_lines(self, n):
assert n > 0, "illegal number of lines: %d" % n
m = 0
p = self.logical_cursor().copy()
while m < n and p.y < len(self.buffer.lines):
p.y += 1
m += 1
self.goto(p)
def forward_chars(self, n):
m = 0
p = self.logical_cursor().copy()
while p < self.last and m < n:
if p.x == len(self.buffer.lines[p.y]):
p.y += 1
p.x = 0
m += 1
else:
p.x += 1
m += 1
self.goto(p)
def goto_char(self, n):
self.goto(point.Point(0, 0))
self.forward_chars(n)
def goto_beginning(self):
self.cursor = self.buffer.get_buffer_start()
self.first = self.buffer.get_buffer_start()
self.redraw()
def goto_end(self):
self.cursor = self.buffer.get_buffer_end()
if not self.cursor_is_visible():
pcursor = self.physical_cursor()
pfirst = pcursor.offset(0, 3 - self.height)
pfirst.x = 0
self.first = self.logical_point(pfirst)
self.redraw()
# mark manipulation
def set_mark_point(self, p):
self.mark = p.offset(0, 0)
def set_mark(self):
cursor = self.logical_cursor()
self.set_mark_point(cursor)
self.application.set_error("Mark set")
def goto_mark(self):
self.goto(self.mark)
def switch_mark(self):
if self.mark:
p = self.mark
cursor = self.logical_cursor()
self.set_mark_point(cursor)
self.goto(p)
# deletion
def left_delete(self):
cursor = self.logical_cursor()
if cursor.x > 0:
self.buffer.delete_character(cursor.offset(-1, 0, "logical"))
elif cursor.y > 0:
self.buffer.delete_character(point.Point(len(self.buffer.lines[cursor.y-1]),
cursor.y - 1,
"logical"))
else:
pass
def right_delete(self):
cursor = self.logical_cursor()
if cursor < self.last:
self.buffer.delete_character(cursor)
else:
pass
# killing
def kill_line(self):
return self.copy_line(kill=True)
def kill_region(self):
return self.copy_region(kill=True)
def kill_left_word(self):
p1 = self.find_left_word()
p2 = self.logical_cursor()
if p1 == p2:
return
return self.kill(p1, p2)
def kill_right_word(self):
p1 = self.logical_cursor()
p2 = self.find_right_word()
if p1 == p2:
return
return self.kill(p1, p2)
def copy_line(self, kill=False):
cursor = self.logical_cursor()
if (cursor.x < len(self.buffer.lines[cursor.y]) and
not regex.whitespace.match(self.buffer.lines[cursor.y][cursor.x:])):
limit = point.Point(len(self.buffer.lines[cursor.y]), cursor.y, "logical")
elif cursor.y < len(self.buffer.lines) - 1:
limit = point.Point(0, cursor.y + 1, "logical")
else:
return
if kill:
return self.kill(cursor, limit)
else:
return self.copy(cursor, limit)
def copy_region(self, kill=False):
cursor = self.logical_cursor()
if cursor < self.mark:
p1 = cursor
p2 = self.mark
elif self.mark < cursor:
p1 = self.mark
p2 = cursor
else:
self.input_line = "Empty kill region"
return
if kill:
return self.kill(p1, p2)
else:
return self.copy(p1, p2)
def kill(self, p1, p2):
killed = self.buffer.get_substring(p1, p2)
self.buffer.delete_string(p1, p2)
self.application.push_kill(killed)
return killed
def copy(self, p1, p2):
copied = self.buffer.get_substring(p1, p2)
self.application.push_kill(copied)
return copied
# insertion
def insert_string(self, s):
cursor = self.logical_cursor()
self.insert(cursor, s)
def insert(self, p, s):
c = s.count('\n')
if c > 0:
y = p.y + c
x = len(s) - (c + 1) - 1
else:
y = p.y
x = p.x + len(s)
self.buffer.insert_string(p, s)
def yank(self):
s = self.application.get_kill()
self.insert_string(s)
def pop_kill(self):
return self.application.pop_kill()
# querying
def highlighted_char(self):
cursor = self.logical_cursor()
self.point_char(cursor)
def point_char(self, p):
if p.x == len(self.buffer.lines[p.y]):
return "\n"
else:
return self.buffer.lines[p.y][p.x]
# region finding (p is a physical point)
def get_region(self, p):
regions = self.mode.get_regions()
assert len(regions[p.y]) > 0, "no regions found; strange"
for r in regions[p.y]:
if r.start <= p.x and r.end >= p.x + 1:
return r
return None