r"""Logging Facilities
This module provides some simple default logging facilities including
a rotating set of log files.
Usage
=====
Components of the :mod:`mmf` package should always use the logging facilities to
emit messages to the user. The name of the logger should be as local as
possible to enable filtering (either the complete class name, or the module name
for example). The levels should be interpreted as follows:
`debug` : Use for debugging information that should not be displayed during
normal runs.
`info` : Use for information that should be logged to file, but not
necessarily displayed on the console.
`warning` : Use for warnings that do not need full stack trace and that should
be repeated.
`error` : Use for errors that need to be taken seriously.
`critical` : Use for mission critical problems that will terminate the
program.
Note: also considering using real warnings by calling the :func:`warnings.warn`
function in the :mod:`warnings` module. This has the following advantages:
1) The :func:`warnings.warn` can generate a stack trace to show exactly where
the warning was issued. It also accepts a `stacklevel=2` argument to allow
you to report the trace in the calling function (or higher).
2) The warnings can be set to only display once per run (or once in each module
etc.). (This is the default behaviour.)
3) Warnings can be turned into exceptions by using
:func:`warnings.simplefilter('error', UserWarnings)`.
Basically, using :func:`warnings.warn` for warnings about coding problems (like
a module not be able to be imported, or a function being deprecated) that should
only be issued once, or for warnings that need to have a traceback
usif you only want a warning to appear oneed a traceback, then use :func:`warnings.warn`.
"""
from __future__ import division, absolute_import
import os
import sys
import string
import cStringIO
import types
import traceback
import glob
import logging
from logging import getLogger, Filter
import logging.handlers
import tempfile
import threading
import warnings
_HANDLERS = {}
_FORMAT_FILE = " - ".join(["%(id)06i:",
"%(asctime)s",
"%(process)d:%(threadName)s:%(name)s:%(levelname)s",
"%(message)s"])
_FORMAT_STREAM = "%(id)06i:%(name)s:%(levelname)s - %(message)s"
[docs]def log_to_file(filename, mode='w',
level=logging.DEBUG,
stack=False, stack_limit=None, **kwargs):
r"""Start logging to the specified file and return handler."""
global _HANDLERS
global _FORMAT_FILE
global _FORMAT_STREAM
logger = logging.getLogger()
if filename in _HANDLERS:
logger.removeHandler(_HANDLERS[filename])
del _HANDLERS[filename]
handler = logging.FileHandler(filename, mode=mode, **kwargs)
formatter = MMFFormatter(_FORMAT_FILE,
stack=stack, stack_limit=stack_limit)
handler.setFormatter(formatter)
handler.setLevel(level)
_HANDLERS[filename] = handler
logger.addHandler(handler)
logging.getLogger("mmf").info(
"Started logging to file: '%s'" % (filename,))
return handler
[docs]def log_to_stream(stream=None):
r"""Start logging to `stream` and return handler."""
global _HANDLERS
logger = logging.getLogger("mmf")
if stream in _HANDLERS:
handler = _HANDLERS[stream]
else:
handler = logging.StreamHandler(stream)
formatter = logging.Formatter(_FORMAT_STREAM)
handler.setFormatter(formatter)
handler.setLevel(logging.INFO)
_HANDLERS[stream] = handler
logger.addHandler(handler)
return handler
# Much of this code is borrowed from python's logging/__init__.py module and
# warnings.py module.
#
# _srcfile is used when walking the stack to check when we've got the first
# caller stack frame.
#
if hasattr(sys, 'frozen'): #support for py2exe
_srcfile = "logging%s" % (__file__[-4:])
elif string.lower(__file__[-4:]) in ['.pyc', '.pyo']:
_srcfile = __file__[:-4] + '.py'
else:
_srcfile = __file__
_srcfile = os.path.normcase(_srcfile)
[docs]class LoggerTraceback(Exception):
r"""Used for printing tracebacks in log messages."""
[docs]class MMFTracebackType:
r"""We need to put the frame in a :class:`TracebackType` object, but can't
create one so we fake one."""
def __init__(self, frame, lineno):
self.tb_frame = frame
self.tb_lineno = lineno
self.tb_next = None
[docs]class MMFLogger(logging.getLoggerClass()):
r"""Custom logger that also provides traceback information.
Differs from the standard logger in the following ways:
1) Packs kwargs into `extra` argument for processing. (Uses `stacklevel`
for example and :meth:`warn` accepts `category` so that exceptions can be
raised later.)
2) Adds a unique `id` to each record.
3) Adds a `stack` frame to each record if :attr:`with_stack` is True.
.. note:: The `stacklevel` argument is applied after the trace is backed out
of the logging modules. This means that the value you would have to pass
to :func:`warnings.warn` might be differ (in my experience, I need to add
one more level when using :mod:`warnings`.)
.. attribute:: debug_level
Will run :func:`pdb.set_trace` and drop into the python debugger if the
level is above the :attr:`debug_level`.
"""
_parent_class = logging.getLoggerClass()
_id = 0
debug_level = None
[docs] def __init__(self, name):
self._parent_class.__init__(self, name)
self.with_stack = True
def _log(self, level, msg, args, exc_info=None, extra=None, **kw):
if extra is None:
extra = {}
extra.update(kw)
MMFLogger._parent_class._log(
self, level, msg, args, exc_info, extra)
if self.debug_level is not None and level >= self.debug_level:
import pdb
pdb.set_trace()
[docs] def warn(self, msg, *arg, **kw):
kw['category'] = kw.get('category', UserWarning)
return MMFLogger._parent_class.warn(self, msg, *arg, **kw)
[docs] def makeRecord(self, name, level, fn, lno, msg, args, exc_info, func=None,
extra=None):
"""
A factory method which can be overridden in subclasses to create
specialized LogRecords. Here we also add a traceback.
"""
MMFLogger._id += 1
if extra is None:
extra = {}
frame = None
func = ""
fn = ""
lno = 1
if self.with_stack:
frame = logging.currentframe()
if frame is not None:
frame = frame.f_back
while hasattr(frame, "f_code"):
# Go up until we are out of logging modules.
co = frame.f_code
filename = os.path.normcase(co.co_filename)
if filename in [_srcfile, logging._srcfile]:
frame = frame.f_back
continue
break
stacklevel = extra.get('stacklevel', 0)
while stacklevel > 0 and hasattr(frame, 'f_back'):
frame = frame.f_back
stacklevel -= 1
if hasattr(frame, "f_code"):
func = frame.f_code.co_name
fn = os.path.normcase(co.co_filename)
lno = frame.f_lineno
rv = logging.LogRecord(name, level, fn, lno, msg, args, exc_info, func)
if frame is not None:
rv.stack = frame
rv.id = MMFLogger._id
if extra is not None:
for key in extra:
if (key in ["message", "asctime"]) or (key in rv.__dict__):
raise KeyError("Attempt to overwrite %r in LogRecord" % key)
rv.__dict__[key] = extra[key]
return rv
BASIC_FORMAT = "%(id)06i:" + logging.BASIC_FORMAT
[docs]def setLoggerClass(klass):
r"""Sets the default logger class and also monekypatches the root logger."""
logging.setLoggerClass(klass)
old_root = logging.root
root = logging.getLoggerClass()(name="root")
root.level = old_root.level
root.handlers = old_root.handlers
root.propagate = old_root.propagate
root.disabled = old_root.disabled
logging.root = root
logging.Logger.root = root
logging.Logger.manager.root = root
[docs]def start_logging(tmp_dir="/tmp",
format=BASIC_FORMAT,
interactive=None,
log_warnings=True,
level=logging.INFO,
files=None,
debug_level=None):
r"""Start a logging handler and setup log files. This is called when the
:mod:`mmf` module is imported if the `logging_options` configuration values
is `True`. It uses :func:`logging.basicConfig` to set up a default stream
handler so that if a handler has already been defined, then it will not be
changed. Depending on options, file handlers may also be setup.
Parameters
----------
files : list of str or (str, kwargs)
Names of files to use for logging. The `dict` `kwargs` can have any
arguments supported by :func:`log_to_file`.
tmp_dir : str
If specified, then this directory will be used for a rotating
set of logfiles in `tmp_dir/pymmf` with names
`<pid>_<unique>.log`. (Not yet supported.)
interactive : bool, 'reset'
If `True`, then the log messages will also be directed to
`sys.stdout`, even if `filename` is specified. This will use
:func:`logging.basicConfig` unless passed the string `'reset'` in which
case the root logger will be completely reset (to be used if a root
logger has already been setup because in this case
:func:`logging.basicConfig` will do nothing.)
log_warnings : bool, None
If `True`, then capture warnings from the :mod:`warnings` module and
redirect these to the logger. If `None`, then leave setting unchanged.
"""
if interactive is None:
if files is None:
interactive = True
else:
interactive = False
MMFLogger.debug_level = debug_level
setLoggerClass(MMFLogger)
root = logging.getLogger()
root.level = logging.DEBUG
if interactive:
if 'reset' == interactive:
for _h in root.handlers:
root.removeHandler(_h)
if not root.handlers:
fmt = MMFFormatter(fmt=format,
stack=False,
stack_limit=None)
hdlr = logging.StreamHandler()
hdlr.setFormatter(fmt)
hdlr.level = level
root.addHandler(hdlr)
_files = {}
if files is not None:
for _f in files:
if isinstance(_f, tuple):
_files[_f[0]] = _f[1]
else:
_files[_f] = dict(level=level, stack=False)
for _f in _files:
try:
handler = log_to_file(_f, **_files[_f])
except Exception, err:
root.error("Error logging to file '%s':\n%s" %
(_f, str(err)))
if log_warnings is not None:
captureWarnings(log_warnings)
warnings.simplefilter('default')
# if tmp_dir is not None:
# try:
# os.makedirs(tmp_dir)
# except OSError, err:
# warnings.warn("\n".join(
# ["mmf.utils.logging: Could not make `tmp_dir`=%s"
# % tmp_dir,
# str(err)
# ]))
# (logfile, logfilename) = tempfile.mkstemp(
# suffix='log', prefix='pymmf_', dir=tmp_dir, text=True)
# # Add the log message handler to the logger
# handler = logging.handlers.RotatingFileHandler(
# LOG_FILENAME, maxBytes=20, backupCount=5)
# _LOGGER.info("Started logging to file: '%s'" % (logfilename,))
# my_logger.addHandler(handler)
# # Log some messages
# for i in range(20):
# my_logger.debug('i = %d' % i)
# # See what files are created
# logfiles = glob.glob('%s*' % LOG_FILENAME)
# for filename in logfiles:
# print filename
# Warnings integration
# This is taken from python 2.7 and slightly modified.
_warnings_showwarning = None
[docs]class NullHandler(logging.Handler):
[docs] def emit(self, record):
pass
def _showwarning(message, category, filename, lineno, file=None, line=None):
"""
Implementation of showwarnings which redirects to logging,
which will first check to see if the file parameter is
None. If a file is specified, it will delegate to the original
warnings implementation of showwarning. Otherwise, it will
call warnings.formatwarning and will log the resulting string
to a warnings logger named "py.warnings" with level
logging.WARNING.
"""
if file is not None:
if _warnings_showwarning is not None:
_warnings_showwarning(message, category, filename, lineno,
file, line)
else:
#s = warnings.formatwarning(message, category, filename,
# lineno, line)
# We don't include the tracback here. It will be printed later
s = "%s:%s: %s: %s" % (filename, lineno, category.__name__, message)
#line = linecache.getline(filename, lineno) if line is None else line
#if line:
# line = line.strip()
# s += " %s\n" % line
logger = logging.getLogger("mmf")
if not logger.handlers:
logger.addHandler(NullHandler())
logger.warning("%s", s)
[docs]def captureWarnings(capture):
"""
If capture is true, redirect all warnings to the logging
package. If capture is False, ensure that warnings are not
redirected to logging but to their original destinations.
"""
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = _showwarning
else:
if _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None