python logging to database

Python logging to a database with a backup logger


Problem

I had the same problem when I ran a Django project inside the server since sometimes you need to check the logs remotely.


Solution

First, there is a need for a handler for the logger to insert logs in to the database. Before that and since my SQL is not good, an ORM is needed that I choose SQLAlchemy.

model:

# models.py
from sqlalchemy import Column, Integer, String, DateTime, Text
from sqlalchemy.ext.declarative import declarative_base
import datetime

base = declarative_base()


class Log(base):
    __tablename__ = "log"
    id = Column(Integer, primary_key=True, autoincrement=True)
    time = Column(DateTime, nullable=False, default=datetime.datetime.now)
    level_name = Column(String(10), nullable=True)
    module = Column(String(200), nullable=True)
    thread_name = Column(String(200), nullable=True)
    file_name = Column(String(200), nullable=True)
    func_name = Column(String(200), nullable=True)
    line_no = Column(Integer, nullable=True)
    process_name = Column(String(200), nullable=True)
    message = Column(Text)
    last_line = Column(Text)

This is the crud for insertion into the database:

#crud.py
import sqlalchemy
from .models import base
from traceback import print_exc


class Crud:
    def __init__(self, connection_string=f'sqlite:///log_db.sqlite3',
                 encoding='utf-8',
                 pool_size=10,
                 max_overflow=20,
                 pool_recycle=3600):

        self.connection_string = connection_string
        self.encoding = encoding
        self.pool_size = pool_size
        self.max_overflow = max_overflow
        self.pool_recycle = pool_recycle
        self.engine = None
        self.session = None

    def initiate(self):
        self.create_engine()
        self.create_session()
        self.create_tables()

    def create_engine(self):
        self.engine = sqlalchemy.create_engine(self.connection_string)

    def create_session(self):
        self.session = sqlalchemy.orm.Session(bind=self.engine)

    def create_tables(self):
        base.metadata.create_all(self.engine)

    def insert(self, instances):
        try:
            self.session.add(instances)
            self.session.commit()
            self.session.flush()
        except:
            self.session.rollback()
            raise

    def __del__(self):
        self.close_session()
        self.close_all_connections()

    def close_session(self):
        try:
            self.session.close()
        except:
            print_exc()
        else:
            self.session = None

    def close_all_connections(self):
        try:
            self.engine.dispose()
        except:
            print_exc()
        else:
            self.engine = None

The handler:

# handler.py
from logging import Handler, getLogger
from traceback import print_exc
from .crud import Crud
from .models import Log


my_crud = Crud(
    connection_string=<connection string to reach your db>,
    encoding='utf-8',
    pool_size=10,
    max_overflow=20,
    pool_recycle=3600)

my_crud.initiate()


class DBHandler(Handler):
    backup_logger = None

    def __init__(self, level=0, backup_logger_name=None):
        super().__init__(level)
        if backup_logger_name:
            self.backup_logger = getLogger(backup_logger_name)

    def emit(self, record):
        try:
            message = self.format(record)
            try:
                last_line = message.rsplit('\n', 1)[-1]
            except:
                last_line = None

            try:
                new_log = Log(module=record.module,
                              thread_name=record.threadName,
                              file_name=record.filename,
                              func_name=record.funcName,
                              level_name=record.levelname,
                              line_no=record.lineno,
                              process_name=record.processName,
                              message=message,
                              last_line=last_line)
                # raise

                my_crud.insert(instances=new_log)
            except:
                if self.backup_logger:
                    try:
                        getattr(self.backup_logger, record.levelname.lower())(record.message)
                    except:
                        print_exc()
                else:
                    print_exc()

        except:
            print_exc()

Test to check the logger:

# test.py
from logging import basicConfig, getLogger, DEBUG, FileHandler, Formatter
from .handlers import DBHandler

basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
            datefmt='%d-%b-%y %H:%M:%S',
            level=DEBUG)
format = Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')

backup_logger = getLogger('backup_logger')
file_handler = FileHandler('file.log')
file_handler.setLevel(DEBUG)
file_handler.setFormatter(format)
backup_logger.addHandler(file_handler)

db_logger = getLogger('logger')
db_handler = DBHandler(backup_logger_name='backup_logger')
db_handler.setLevel(DEBUG)
db_handler.setFormatter(format)
db_logger.addHandler(db_handler)

if __name__ == "__main__":
    db_logger.debug('debug: hello world!')
    db_logger.info('info: hello world!')
    db_logger.warning('warning: hello world!')
    db_logger.error('error: hello world!')
    db_logger.critical('critical: hello world!!!!')

You can see the handler accepts a backup logger that can use it when the database insertion fails.

A good improvement can be logging into the database by threading.


I am digging this out again.

There is a solution with SqlAlchemy (Pyramid is NOT required for this recipe):

https://docs.pylonsproject.org/projects/pyramid-cookbook/en/latest/logging/sqlalchemy_logger.html

And you could improve logging by adding extra fields, here is a guide: https://stackoverflow.com/a/17558764/1115187

Fallback to FS

Not sure that this is 100% correct, but you could have 2 handlers:

  1. database handler (write to DB)
  2. file handler (write to file or stream)

Just wrap the DB-commit with a try-except. But be aware: the file will contain ALL log entries, but not only entries for which DB saving was failed.


I recently managed to write my own database logger in Python. Since I couldn't find any example I thought I post mine here. Works with MS SQL.

Database table could look like this:

CREATE TABLE [db_name].[log](
    [id] [bigint] IDENTITY(1,1) NOT NULL,
    [log_level] [int] NULL,
    [log_levelname] [char](32) NULL,
    [log] [char](2048) NOT NULL,
    [created_at] [datetime2](7) NOT NULL,
    [created_by] [char](32) NOT NULL,
) ON [PRIMARY]

The class itself:

class LogDBHandler(logging.Handler):
    '''
    Customized logging handler that puts logs to the database.
    pymssql required
    '''
    def __init__(self, sql_conn, sql_cursor, db_tbl_log):
        logging.Handler.__init__(self)
        self.sql_cursor = sql_cursor
        self.sql_conn = sql_conn
        self.db_tbl_log = db_tbl_log

    def emit(self, record):
        # Set current time
        tm = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(record.created))
        # Clear the log message so it can be put to db via sql (escape quotes)
        self.log_msg = record.msg
        self.log_msg = self.log_msg.strip()
        self.log_msg = self.log_msg.replace('\'', '\'\'')
        # Make the SQL insert
        sql = 'INSERT INTO ' + self.db_tbl_log + ' (log_level, ' + \
            'log_levelname, log, created_at, created_by) ' + \
            'VALUES (' + \
            ''   + str(record.levelno) + ', ' + \
            '\'' + str(record.levelname) + '\', ' + \
            '\'' + str(self.log_msg) + '\', ' + \
            '(convert(datetime2(7), \'' + tm + '\')), ' + \
            '\'' + str(record.name) + '\')'
        try:
            self.sql_cursor.execute(sql)
            self.sql_conn.commit()
        # If error - print it out on screen. Since DB is not working - there's
        # no point making a log about it to the database :)
        except pymssql.Error as e:
            print sql
            print 'CRITICAL DB ERROR! Logging to database not possible!'

And usage example:

import pymssql
import time
import logging

db_server = 'servername'
db_user = 'db_user'
db_password = 'db_pass'
db_dbname = 'db_name'
db_tbl_log = 'log'

log_file_path = 'C:\\Users\\Yourname\\Desktop\\test_log.txt'
log_error_level     = 'DEBUG'       # LOG error level (file)
log_to_db = True                    # LOG to database?

class LogDBHandler(logging.Handler):
    [...]

# Main settings for the database logging use
if (log_to_db):
    # Make the connection to database for the logger
    log_conn = pymssql.connect(db_server, db_user, db_password, db_dbname, 30)
    log_cursor = log_conn.cursor()
    logdb = LogDBHandler(log_conn, log_cursor, db_tbl_log)

# Set logger
logging.basicConfig(filename=log_file_path)

# Set db handler for root logger
if (log_to_db):
    logging.getLogger('').addHandler(logdb)
# Register MY_LOGGER
log = logging.getLogger('MY_LOGGER')
log.setLevel(log_error_level)

# Example variable
test_var = 'This is test message'

# Log the variable contents as an error
log.error('This error occurred: %s' % test_var)

Above will log both to the database and to the file. If file is not needed - skip the 'logging.basicConfig(filename=log_file_path)' line. Everything logged using 'log' - will be logged as MY_LOGGER. If some external error appears (i.e. in the module imported or something) - error will appear as 'root', since 'root' logger is also active, and is using the database handler.


Write yourself a handler that directs the logs to the database in question. When it fails, you can remove it from the handler list of the logger. There are many ways to deal with the failure-modes.