Implement logging presets (#445)

* logging template nits

* moved logging outside Manticore

* naming fix and exposed API call

* logging -> log renaming

* fix for verbosity test

* restored setter/getter for verbosity

* fixes for warnings in 0 mode and exteneed test_cli_verbosity

* removed print

* relative import, fork message to executor, init function

* verbosity docstring fix

* docstring nit
This commit is contained in:
Theofilos Petsios 2017-08-14 16:05:29 -04:00 committed by GitHub
parent 1d8e051522
commit 09bb702fac
6 changed files with 148 additions and 105 deletions

View File

@ -70,7 +70,6 @@ def main():
args = parse_arguments()
m = Manticore(args.programs[0], args.programs[1:])
m.policy = args.policy
m.args = args

View File

@ -43,7 +43,7 @@ class Policy(object):
super(Policy, self).__init__(*args, **kwargs)
self._executor = executor
self._executor.subscribe('did_add_state', self._add_state_callback)
@contextmanager
def locked_context(self):
''' Policy shared context dictionary '''
@ -55,7 +55,7 @@ class Policy(object):
ctx['policy'] = policy_context
def _add_state_callback(self, state_id, state):
''' Save prepare(state) on policy shared context before
''' Save prepare(state) on policy shared context before
the state is stored
'''
with self.locked_context() as ctx:
@ -84,7 +84,7 @@ class Random(Policy):
class Uncovered(Policy):
def __init__(self, executor, *args, **kwargs):
super(Uncovered, self).__init__(executor, *args, **kwargs)
#hook on the necesary executor signals
#hook on the necesary executor signals
#on callbacks save data in executor.context['policy']
def prepare(self, state):
@ -94,7 +94,7 @@ class Uncovered(Policy):
def choice(self, state_ids):
# Use executor.context['uncovered'] = state_id -> stats
# am
with self._executor.locked_context() as ctx:
with self._executor.locked_context() as ctx:
lastpc = ctx['policy']
visited = ctx.get('visited', ())
interesting = set()
@ -109,7 +109,7 @@ class Uncovered(Policy):
class Executor(Eventful):
'''
The executor guides the execution of an initial state or a paused previous run.
The executor guides the execution of an initial state or a paused previous run.
It handles all exceptional conditions (system calls, memory faults, concretization, etc.)
'''
@ -117,7 +117,7 @@ class Executor(Eventful):
super(Executor, self).__init__(**kwargs)
# Signals / Callbacks handlers will be invoked potentially at different
# Signals / Callbacks handlers will be invoked potentially at different
# worker processes. State provides a local context to save data.
self.subscribe('will_load_state', self._register_state_callbacks)
@ -140,10 +140,10 @@ class Executor(Eventful):
if context is None:
context = {}
self._shared_context = manager.dict(context)
#scheduling priority policy (wip)
#Set policy
policies = {'random': Random,
policies = {'random': Random,
'uncovered': Uncovered
}
self._policy = policies[policy](self)
@ -160,7 +160,7 @@ class Executor(Eventful):
@contextmanager
def locked_context(self):
''' Executor context is a shared memory object. All workers share this.
''' Executor context is a shared memory object. All workers share this.
It needs a lock. Its used like this:
with executor.context() as context:
@ -175,16 +175,16 @@ class Executor(Eventful):
def _register_state_callbacks(self, state, state_id):
'''
Install forwarding callbacks in state so the events can go up.
Install forwarding callbacks in state so the events can go up.
Going up, we prepend state in the arguments.
'''
'''
#Forward all state signals
self.forward_events_from(state, True)
def add(self, state):
'''
Enqueue state.
Save state on storage, assigns an id to it, then add it to the
Save state on storage, assigns an id to it, then add it to the
priority queue
'''
#save the state to secondary storage
@ -194,7 +194,7 @@ class Executor(Eventful):
return state_id
def load_workspace(self):
#Browse and load states in a workspace in case we are trying to
#Browse and load states in a workspace in case we are trying to
# continue from paused run
loaded_state_ids = self._workspace.try_loading_workspace()
if not loaded_state_ids:
@ -237,7 +237,7 @@ class Executor(Eventful):
###############################################
# Priority queue
# Priority queue
@sync
def put(self, state_id):
''' Enqueue it for processing '''
@ -264,7 +264,7 @@ class Executor(Eventful):
#if there is actually some workers running wait for state forks
logger.debug("Waiting for available states")
self._lock.wait()
state_id = self._policy.choice(list(self._states))
del self._states[self._states.index(state_id)]
return state_id
@ -292,20 +292,20 @@ class Executor(Eventful):
'''
Fork state on expression concretizations.
Using policy build a list of solutions for expression.
For the state on each solution setting the new state with setstate
For the state on each solution setting the new state with setstate
For example if expression is a Bool it may have 2 solutions. True or False.
Parent
Parent
(expression = ??)
Child1 Child2
(expression = True) (expression = True)
setstate(True) setstate(False)
The optional setstate() function is supposed to set the concrete value
The optional setstate() function is supposed to set the concrete value
in the child state.
'''
assert isinstance(expression, Expression)
@ -315,9 +315,13 @@ class Executor(Eventful):
#Find a set of solutions for expression
solutions = state.concretize(expression, policy)
logger.info("Forking, about to store. (policy: %s, values: %s)",
policy,
', '.join('0x{:x}'.format(sol) for sol in solutions))
self.publish('will_fork_state', state, expression, solutions, policy)
#Build and enqueue a state for each solution
#Build and enqueue a state for each solution
children = []
for new_value in solutions:
with state as new_state:
@ -334,7 +338,7 @@ class Executor(Eventful):
#maintain a list of childres for logging purpose
children.append(state_id)
logger.debug("Forking current state into states %r",children)
logger.debug("Forking current state into states %r", children)
return None
def run(self):
@ -428,7 +432,7 @@ class Executor(Eventful):
self.publish('will_terminate_state', current_state, current_state_id, 'Exception')
current_state = None
logger.setState(None)
assert current_state is None
#notify siblings we are about to stop this run

View File

@ -24,10 +24,10 @@ from .core.smtlib import solver, ConstraintSet
from .platforms import linux, decree, windows
from .utils.helpers import issymbolic
from .utils.nointerrupt import WithKeyboardInterruptAs
from .utils import log
logger = logging.getLogger('MANTICORE')
def makeDecree(program, concrete_data=''):
constraints = ConstraintSet()
platform = decree.SDecree(constraints, program)
@ -68,7 +68,7 @@ def makeLinux(program, argv, env, symbolic_files, concrete_start = ''):
#set stdin input...
platform.input.write(initial_state.symbolicate_buffer('+'*256, label='STDIN'))
return initial_state
return initial_state
def makeWindows(args):
@ -92,7 +92,7 @@ def makeWindows(args):
buf_str = "".join(platform.current.read_bytes(data_ptr, data_size))
logger.debug('Original buffer: %s', buf_str.encode('hex'))
offset = args.offset
offset = args.offset
concrete_data = args.data.decode('hex')
assert data_size >= offset + len(concrete_data)
size = min(args.maxsymb, data_size - offset - len(concrete_data))
@ -131,6 +131,7 @@ def binary_type(path):
else:
raise NotImplementedError("Binary {} not supported. Magic bytes: 0x{}".format(path, binascii.hexlify(magic)))
class Manticore(object):
'''
The central analysis object.
@ -170,7 +171,6 @@ class Manticore(object):
self._dumpafter = 0
self._maxstates = 0
self._maxstorage = 0
self._verbosity = 0
self._symbolic_files = [] # list of string
self._executor = None
#Executor wide shared context
@ -181,7 +181,7 @@ class Manticore(object):
if self._binary_type == 'ELF':
self._binary_obj = ELFFile(file(self._binary))
self._init_logging()
log.init_logging()
@property
def context(self):
@ -191,11 +191,11 @@ class Manticore(object):
else:
logger.warning("Using shared context without a lock")
return self._executor._shared_context
@contextmanager
def locked_context(self, key=None, default=list):
''' It refers to the manticore shared context
''' It refers to the manticore shared context
It needs a lock. Its used like this:
with m.context() as context:
@ -220,30 +220,6 @@ class Manticore(object):
yield ctx
context[key] = ctx
def _init_logging(self):
def loggerSetState(logger, stateid):
logger.filters[0].stateid = stateid
class ContextFilter(logging.Filter):
'''
This is a filter which injects contextual information into the log.
'''
def filter(self, record):
if hasattr(self, 'stateid') and isinstance(self.stateid, int):
record.stateid = '[%d]' % self.stateid
else:
record.stateid = ''
return True
ctxfilter = ContextFilter()
logging.basicConfig(format='%(asctime)s: [%(process)d]%(stateid)s %(name)s:%(levelname)s: %(message)s', stream=sys.stdout, level=logging.ERROR)
for loggername in ['MANTICORE', 'VISITOR', 'EXECUTOR', 'CPU', 'REGISTERS', 'SMT', 'MEMORY', 'PLATFORM']:
logging.getLogger(loggername).addFilter(ctxfilter)
logging.getLogger(loggername).setState = types.MethodType(loggerSetState, logging.getLogger(loggername))
# XXX(yan): args is a temporary hack to include while we continue moving
# non-Linux platforms to new-style arg handling.
@property
@ -290,40 +266,23 @@ class Manticore(object):
def maxstorage(self):
return self._maxstorage
@maxstorage.setter
def maxstorage(self, max_storage):
self._maxstorage = max_storage
@property
def verbosity(self):
'''
Convenience interface for setting logging verbosity to one of several predefined
logging presets. Valid values: 0-5
'''
return self._verbosity
"""Convenience interface for setting logging verbosity to one of
several predefined logging presets. Valid values: 0-5.
"""
return log.manticore_verbosity
@verbosity.setter
def verbosity(self, setting):
zero = map(lambda x: (x, logging.ERROR),
['MANTICORE', 'VISITOR', 'EXECUTOR', 'CPU', 'REGISTERS', 'SMT', 'MEMORY', 'PLATFORM'])
levels = [zero,
[('MANTICORE', logging.INFO), ('EXECUTOR', logging.INFO)],
[('PLATFORM', logging.DEBUG)],
[('MEMORY', logging.DEBUG), ('CPU', logging.DEBUG)],
[('REGISTERS', logging.DEBUG)],
[('SMT', logging.DEBUG)]]
"""A call used to modify the level of output verbosity
:param int setting: the level of verbosity to be used
"""
log.set_verbosity(setting)
# Takes a value and ensures it's in a certain range
def clamp(val, minimum, maximum):
return sorted((minimum, val, maximum))[1]
clamped = clamp(setting, 0, len(levels) - 1)
if clamped != setting:
logger.debug("%s not between 0 and %d, forcing to %d", setting, len(levels) - 1, clamped)
for level in range(clamped + 1):
for log_type, log_level in levels[level]:
logging.getLogger(log_type).setLevel(log_level)
self._verbosity = clamped
@maxstorage.setter
def maxstorage(self, max_storage):
self._maxstorage = max_storage
def hook(self, pc):
'''
@ -395,7 +354,7 @@ class Manticore(object):
raise NotImplementedError("Binary {} not supported.".format(path))
return state
@property
def policy(self):
@ -461,7 +420,7 @@ class Manticore(object):
else: raise "Unsupported architecture: %s"%(arch, )
return self._arch
def _start_workers(self, num_processes, profiling=False):
assert num_processes > 0, "Must have more than 0 worker processes"
@ -492,7 +451,7 @@ class Manticore(object):
p.start()
def _join_workers(self):
with WithKeyboardInterruptAs(self._executor.shutdown):
with WithKeyboardInterruptAs(self._executor.shutdown):
while len(self._workers) > 0:
w = self._workers.pop().join()
@ -552,7 +511,7 @@ class Manticore(object):
def _terminate_state_callback(self, state, state_id, ex):
#aggregates state statistics into exceutor statistics. FIXME split
logger.info("Terminate state %r %r ", state, state_id)
logger.debug("Terminate state %r %r ", state, state_id)
if state is None:
return
state_visited = state.context.get('visited_since_last_fork', set())
@ -574,10 +533,7 @@ class Manticore(object):
manticore_context['visited'] = manticore_visited.union(state_visited)
state.context['visited_since_last_fork'] = set()
logger.info("Forking, about to store. (policy: %s, values: %s)", policy,
', '.join('0x{:x}'.format(pc) for pc in values))
def _read_register_callback(self, state, reg_name, value):
def _read_register_callback(self, state, reg_name, value):
logger.debug("Read Register %r %r", reg_name, value)
def _write_register_callback(self, state, reg_name, value):
@ -706,12 +662,11 @@ class Manticore(object):
ws_path = None
self._output = ManticoreOutput(ws_path)
self._executor = Executor(initial_state,
workspace=self._output.descriptor,
policy=self._policy,
policy=self._policy,
context=self.context)
#Link Executor events to default callbacks in manticore object

View File

@ -1,4 +1,3 @@
import logging
import inspect
@ -31,7 +30,7 @@ class UnicornEmulator(object):
text.start: (len(text), UC_PROT_READ | UC_PROT_EXEC)
}
# Keep track of all the memory Unicorn needs while executing this
# Keep track of all the memory Unicorn needs while executing this
# instruction
self._should_be_written = {}
@ -50,7 +49,7 @@ class UnicornEmulator(object):
return Uc(UC_ARCH_X86, UC_MODE_32)
elif self._cpu.mode == CS_MODE_64:
return Uc(UC_ARCH_X86, UC_MODE_64)
raise RuntimeError("Unsupported architecture")
@ -87,7 +86,7 @@ class UnicornEmulator(object):
return self._emu.reg_read(UC_X86_REG_EIP)
elif self._cpu.mode == CS_MODE_64:
return self._emu.reg_read(UC_X86_REG_RIP)
def _hook_xfer_mem(self, uc, access, address, size, value, data):
'''
@ -180,7 +179,7 @@ class UnicornEmulator(object):
"Concretizing for emulation")
self._emu.mem_write(address, ''.join(values))
# Try emulation
self._should_try_again = False
@ -214,7 +213,7 @@ class UnicornEmulator(object):
registers |= set(['XMM0', 'XMM1', 'XMM2', 'XMM3', 'XMM4', 'XMM5', 'XMM6', 'XMM7', 'XMM8', 'XMM9', 'XMM10', 'XMM11', 'XMM12', 'XMM13', 'XMM14', 'XMM15'])
# XXX(yan): This concretizes the entire register state. This is overly
# aggressive. Once capstone adds consistent support for accessing
# aggressive. Once capstone adds consistent support for accessing
# referred registers, make this only concretize those registers being
# read from.
for reg in registers:
@ -222,7 +221,7 @@ class UnicornEmulator(object):
if issymbolic(val):
from ..core.cpu.abstractcpu import ConcretizeRegister
raise ConcretizeRegister(self._cpu, reg, "Concretizing for emulation.",
policy='ONE')
policy='ONE')
self._emu.reg_write(self._to_unicorn_id(reg), val)
# Bring in the instruction itself
@ -242,7 +241,7 @@ class UnicornEmulator(object):
try:
self._emu.emu_start(self._cpu.PC, self._cpu.PC+instruction.size, count=1)
except UcError as e:
# We request re-execution by signaling error; if we we didn't set
# We request re-execution by signaling error; if we we didn't set
# _should_try_again, it was likely an actual error
if not self._should_try_again:
raise

80
manticore/utils/log.py Normal file
View File

@ -0,0 +1,80 @@
import logging
import sys
import types
class ContextFilter(logging.Filter):
'''
This is a filter which injects contextual information into the log.
'''
def filter(self, record):
if hasattr(self, 'stateid') and isinstance(self.stateid, int):
record.stateid = '[%d]' % self.stateid
else:
record.stateid = ''
return True
loggers = ['MANTICORE',
'VISITOR',
'EXECUTOR',
'CPU',
'REGISTERS',
'SMT',
'MEMORY',
'PLATFORM']
manticore_verbosity = 0
def init_logging():
ctxfilter = ContextFilter()
logfmt = ("%(asctime)s: [%(process)d]%(stateid)s %(name)s:%(levelname)s:"
" %(message)s")
logging.basicConfig(format=logfmt, stream=sys.stdout, level=logging.ERROR)
for l in loggers:
logging.getLogger(l).setLevel(logging.WARNING)
logging.getLogger(l).addFilter(ctxfilter)
logging.getLogger(l).setState = types.MethodType(loggerSetState,
logging.getLogger(l))
def loggerSetState(logger, stateid):
logger.filters[0].stateid = stateid
def set_verbosity(setting):
global manticore_verbosity
zero = map(lambda x: (x, logging.WARNING), loggers)
levels = [
# 0
zero,
# 1
[
('MANTICORE', logging.INFO)
],
# 2 (-v)
[
('EXECUTOR', logging.INFO),
('PLATFORM', logging.DEBUG)
],
# 3 (-vv)
[
('CPU', logging.DEBUG)
],
# 4 (-vvv)
[
('MEMORY', logging.DEBUG),
('CPU', logging.DEBUG),
('REGISTERS', logging.DEBUG)
],
# 5 (-vvvv)
[
('MANTICORE', logging.DEBUG),
('SMT', logging.DEBUG)
]
]
# Takes a value and ensures it's in a certain range
def clamp(val, minimum, maximum):
return sorted((minimum, val, maximum))[1]
clamped = clamp(setting, 0, len(levels) - 1)
for level in range(clamped + 1):
for log_type, log_level in levels[level]:
logging.getLogger(log_type).setLevel(log_level)
manticore_verbosity = clamped

View File

@ -61,7 +61,7 @@ class IntegrationTest(unittest.TestCase):
'--procs', '4',
filename,
'+++++++++'], stdout=output)
self.assertTrue(time.time()-t < 20)
def test_cli_verbosity(self):
@ -72,7 +72,13 @@ class IntegrationTest(unittest.TestCase):
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, 'binaries/basic_linux_amd64')
output = subprocess.check_output(['python', '-m', 'manticore', filename])
self.assertLessEqual(len(output.splitlines()), 25)
self.assertLessEqual(len(output.splitlines()), 60)
output = subprocess.check_output(['python', '-m', 'manticore', filename, "-v"])
self.assertLessEqual(len(output.splitlines()), 80)
output = subprocess.check_output(['python', '-m', 'manticore', filename, "-vvv"])
self.assertGreaterEqual(len(output.splitlines()), 435000)
output = subprocess.check_output(['python', '-m', 'manticore', filename, "-vvvv"])
self.assertGreaterEqual(len(output.splitlines()), 950000)
def testArgumentsAssertions(self):
dirname = os.path.dirname(__file__)