parent
6a79d35968
commit
1856149079
|
@ -15,6 +15,7 @@ import mode.blame, mode.diff, mode.dir
|
|||
import mode.xml, mode.tt, mode.css, mode.javascript, mode.html
|
||||
import mode.text, mode.mutt
|
||||
import mode.bds, mode.life
|
||||
import mode.rst
|
||||
|
||||
def run(buffers, jump_to_line=None, init_mode=None):
|
||||
# save terminal state so we can restore it when the program exits
|
||||
|
@ -103,7 +104,8 @@ class Application(object):
|
|||
'javascript': mode.javascript.Javascript,
|
||||
'sql': mode.sql.Sql,
|
||||
'template': mode.tt.Template,
|
||||
'bds': mode.bds.BDS
|
||||
'bds': mode.bds.BDS,
|
||||
'rst': mode.rst.RST,
|
||||
}
|
||||
|
||||
# these are used in this order to determine which mode to open certain
|
||||
|
@ -135,6 +137,7 @@ class Application(object):
|
|||
'.sql': 'sql',
|
||||
'.tt': 'template',
|
||||
'.css': 'css',
|
||||
'.rst': 'rst',
|
||||
}
|
||||
self.mode_detection = {
|
||||
'python': 'python',
|
||||
|
|
|
@ -0,0 +1,56 @@
|
|||
|
||||
Control Flow Integrity
|
||||
======================
|
||||
|
||||
Control-Flow Integrity is a technique used to insure a security
|
||||
property, namely the targets of all instructions that alter control
|
||||
flow (ie, branch instructions). To do this they use a combination of
|
||||
static analysis and dynamic checks.
|
||||
|
||||
They then give a number of examples of how to one could use CFI to
|
||||
improve other existing control flow based tools, including SFI and
|
||||
shadow stacks. Finally, they give a brief look in at the formal theory
|
||||
behind secure control flow.
|
||||
|
||||
Problem to be solved
|
||||
--------------------
|
||||
|
||||
CFI is designed to handle malicious attacks against a
|
||||
program. Particularly their threat model is that the adversary has
|
||||
total control over the data memory. This covers a number of practical
|
||||
attacks, including any that use a "stack smashing" technique to gain
|
||||
control of the program. This includes many (all?) code injection
|
||||
attacks, as well as arc-injection attacks.
|
||||
|
||||
Contributions
|
||||
-------------
|
||||
|
||||
To enforce the control-flow integrity policy, they first use static
|
||||
analysis to determine legitimate targets of indirect branches. Second,
|
||||
they use binary rewriting to insert dynamic checks to insure the
|
||||
runtime target is one of the acceptable targets. This can be done
|
||||
because most functions are well behaved, in that the always return to
|
||||
their callee.
|
||||
|
||||
Evaluation
|
||||
----------
|
||||
|
||||
The main strength of this approach is that it offers a practical
|
||||
defense against arc injection attacks. They used "hand examinations"
|
||||
of some known windows arc injection attacks, specifically a GDI+ JPEG
|
||||
flaw.
|
||||
|
||||
There are a number of downsides to their CFI prototype. First is the
|
||||
relatively high overhead, 20% on SPEC without perl. Secondly, it seems
|
||||
that there are potential problems with programs that use many function
|
||||
pointers to point to a variety of different functions. Currently CFI
|
||||
creates equivalence classes of functions (see the lt and gt example).
|
||||
|
||||
Next Step
|
||||
---------
|
||||
|
||||
I believe the use of a SDT-based tool instead of the lightweight
|
||||
binary instrumentation could address a number of the difficulties,
|
||||
notably performance. I also think a better job could be done grouping
|
||||
functions with better use of the control flow information to do a
|
||||
better job partitioning the targets of function pointers.
|
15
lex3.py
15
lex3.py
|
@ -75,6 +75,9 @@ class Rule:
|
|||
t = Token(name, self, lexer.y, lexer.x, s, None, parent, matchd, link)
|
||||
t.color = lexer.get_color(t)
|
||||
lexer.x += len(s)
|
||||
if lexer.x >= len(lexer.lines[lexer.y]):
|
||||
lexer.x = 0
|
||||
lexer.y += 1
|
||||
return t
|
||||
def get_line(self, lexer):
|
||||
return lexer.lines[lexer.y] + '\n'
|
||||
|
@ -142,17 +145,23 @@ class PatternGroupRule(PatternRule):
|
|||
Rule.__init__(self, name)
|
||||
self.pairs = tuple(pairs)
|
||||
def match(self, lexer, parent):
|
||||
x = lexer.x
|
||||
(x, y) = (lexer.x, lexer.y)
|
||||
matches = []
|
||||
line = self.get_line(lexer)
|
||||
for (tokname, tokre) in self.pairs:
|
||||
m = tokre.match(line, x)
|
||||
if y >= len(lexer.lines):
|
||||
return []
|
||||
line = lexer.lines[y] + '\n'
|
||||
m = tokre.match(line, x)
|
||||
if m:
|
||||
x += len(m.group(0))
|
||||
if x >= len(line):
|
||||
x = 0
|
||||
y += 1
|
||||
matches.append((tokname, m))
|
||||
else:
|
||||
return []
|
||||
assert len(matches) == len(self.pairs)
|
||||
#(lexer.x, lexer.y) = (x, y)
|
||||
return matches
|
||||
def lex(self, lexer, parent, matches):
|
||||
if matches:
|
||||
|
|
28
mode/bds.py
28
mode/bds.py
|
@ -1,5 +1,7 @@
|
|||
import commands
|
||||
import color, mode2
|
||||
from lex3 import Grammar, PatternRule, RegionRule, Grammar
|
||||
from method import Method
|
||||
from mode.perl import PerlGrammar
|
||||
from mode.xml import TagGrammar
|
||||
from mode.perl import StringGrammar
|
||||
|
@ -27,10 +29,11 @@ class BDS(mode2.Fundamental):
|
|||
closetokens = ('delimiter',)
|
||||
closetags = {')': '(', ']': '[', '}': '{'}
|
||||
colors = {
|
||||
# comments
|
||||
'comment.start': ('red', 'default'),
|
||||
'comment.null': ('red', 'default'),
|
||||
'comment.end': ('red', 'default'),
|
||||
|
||||
# xml tag
|
||||
'tag.start': ('default', 'default'),
|
||||
'tag.namespace': ('magenta', 'default'),
|
||||
'tag.name': ('blue', 'default'),
|
||||
|
@ -39,13 +42,13 @@ class BDS(mode2.Fundamental):
|
|||
'tag.string.null': ('cyan', 'default'),
|
||||
'tag.string.end': ('cyan', 'default'),
|
||||
'tag.end': ('default', 'default'),
|
||||
|
||||
# strings
|
||||
'string.start': ('green', 'default'),
|
||||
'string.octal': ('magenta', 'default'),
|
||||
'string.escaped': ('magenta', 'default'),
|
||||
'string.null': ('green', 'default'),
|
||||
'string.end': ('green', 'default'),
|
||||
|
||||
# keywords, etc
|
||||
'derived': ('yellow', 'default'),
|
||||
'question': ('yellow', 'default'),
|
||||
'misquoted': ('yellow', 'red'),
|
||||
|
@ -58,5 +61,24 @@ class BDS(mode2.Fundamental):
|
|||
self.add_bindings('close-paren', (')',))
|
||||
self.add_bindings('close-brace', ('}',))
|
||||
self.add_bindings('close-bracket', (']',))
|
||||
self.add_action(BDSMakeInstall())
|
||||
self.add_action(BDSCompileResources())
|
||||
self.add_action(BDSRestart())
|
||||
def name(self):
|
||||
return "BDS"
|
||||
|
||||
class BDSMakeInstall(Method):
|
||||
def _execute(self, w, **vargs):
|
||||
cmd = "perl Makefile.PL && make && make install"
|
||||
(status, output) = commands.getstatusoutput(cmd)
|
||||
if status == 0:
|
||||
w.set_error("make succeeded")
|
||||
else:
|
||||
w.application.data_buffer("*bds-make-install*", output, switch_to=True)
|
||||
w.set_error("make failed with %d" % status)
|
||||
class BDSCompileResources(Method):
|
||||
def _execute(self, w, **vargs):
|
||||
pass
|
||||
class BDSRestart(Method):
|
||||
def _execute(self, w, **vargs):
|
||||
pass
|
||||
|
|
|
@ -0,0 +1,20 @@
|
|||
import color, mode2
|
||||
from lex3 import Grammar, PatternRule, RegionRule, PatternGroupRule
|
||||
|
||||
class RSTGrammar(Grammar):
|
||||
rules = [
|
||||
PatternGroupRule(r'title', r'title', r'^.*?\n', r'titlesep', r'^=+\n'),
|
||||
PatternGroupRule(r'subtitle', r'subtitle', r'^.*?\n', r'subtitlesep', r'^-+\n'),
|
||||
PatternRule(r'line', r'^.*?\n'),
|
||||
]
|
||||
|
||||
class RST(mode2.Fundamental):
|
||||
grammar = RSTGrammar
|
||||
colors = {
|
||||
'title': ('blue', 'default'),
|
||||
'titlesep': ('blue', 'default'),
|
||||
'subtitle': ('cyan', 'default'),
|
||||
'subtitlesep': ('cyan', 'default'),
|
||||
}
|
||||
def name(self):
|
||||
return "RST"
|
Loading…
Reference in New Issue