logging
Logging package for Python. Based on PEP 282 and comments thereto in comp.lang.python.
Copyright (C) 2001-2022 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
1# Copyright 2001-2022 by Vinay Sajip. All Rights Reserved. 2# 3# Permission to use, copy, modify, and distribute this software and its 4# documentation for any purpose and without fee is hereby granted, 5# provided that the above copyright notice appear in all copies and that 6# both that copyright notice and this permission notice appear in 7# supporting documentation, and that the name of Vinay Sajip 8# not be used in advertising or publicity pertaining to distribution 9# of the software without specific, written prior permission. 10# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING 11# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL 12# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR 13# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER 14# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT 15# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 17""" 18Logging package for Python. Based on PEP 282 and comments thereto in 19comp.lang.python. 20 21Copyright (C) 2001-2022 Vinay Sajip. All Rights Reserved. 22 23To use, simply 'import logging' and log away! 24""" 25 26import sys, os, time, io, re, traceback, warnings, weakref, collections.abc 27 28from types import GenericAlias 29from string import Template 30from string import Formatter as StrFormatter 31 32 33__all__ = ['BASIC_FORMAT', 'BufferingFormatter', 'CRITICAL', 'DEBUG', 'ERROR', 34 'FATAL', 'FileHandler', 'Filter', 'Formatter', 'Handler', 'INFO', 35 'LogRecord', 'Logger', 'LoggerAdapter', 'NOTSET', 'NullHandler', 36 'StreamHandler', 'WARN', 'WARNING', 'addLevelName', 'basicConfig', 37 'captureWarnings', 'critical', 'debug', 'disable', 'error', 38 'exception', 'fatal', 'getLevelName', 'getLogger', 'getLoggerClass', 39 'info', 'log', 'makeLogRecord', 'setLoggerClass', 'shutdown', 40 'warn', 'warning', 'getLogRecordFactory', 'setLogRecordFactory', 41 'lastResort', 'raiseExceptions', 'getLevelNamesMapping', 42 'getHandlerByName', 'getHandlerNames'] 43 44import threading 45 46__author__ = "Vinay Sajip <vinay_sajip@red-dove.com>" 47__status__ = "production" 48# The following module attributes are no longer updated. 49__version__ = "0.5.1.2" 50__date__ = "07 February 2010" 51 52#--------------------------------------------------------------------------- 53# Miscellaneous module data 54#--------------------------------------------------------------------------- 55 56# 57#_startTime is used as the base when calculating the relative time of events 58# 59_startTime = time.time_ns() 60 61# 62#raiseExceptions is used to see if exceptions during handling should be 63#propagated 64# 65raiseExceptions = True 66 67# 68# If you don't want threading information in the log, set this to False 69# 70logThreads = True 71 72# 73# If you don't want multiprocessing information in the log, set this to False 74# 75logMultiprocessing = True 76 77# 78# If you don't want process information in the log, set this to False 79# 80logProcesses = True 81 82# 83# If you don't want asyncio task information in the log, set this to False 84# 85logAsyncioTasks = True 86 87#--------------------------------------------------------------------------- 88# Level related stuff 89#--------------------------------------------------------------------------- 90# 91# Default levels and level names, these can be replaced with any positive set 92# of values having corresponding names. There is a pseudo-level, NOTSET, which 93# is only really there as a lower limit for user-defined levels. Handlers and 94# loggers are initialized with NOTSET so that they will log all messages, even 95# at user-defined levels. 96# 97 98CRITICAL = 50 99FATAL = CRITICAL 100ERROR = 40 101WARNING = 30 102WARN = WARNING 103INFO = 20 104DEBUG = 10 105NOTSET = 0 106 107_levelToName = { 108 CRITICAL: 'CRITICAL', 109 ERROR: 'ERROR', 110 WARNING: 'WARNING', 111 INFO: 'INFO', 112 DEBUG: 'DEBUG', 113 NOTSET: 'NOTSET', 114} 115_nameToLevel = { 116 'CRITICAL': CRITICAL, 117 'FATAL': FATAL, 118 'ERROR': ERROR, 119 'WARN': WARNING, 120 'WARNING': WARNING, 121 'INFO': INFO, 122 'DEBUG': DEBUG, 123 'NOTSET': NOTSET, 124} 125 126def getLevelNamesMapping(): 127 return _nameToLevel.copy() 128 129def getLevelName(level): 130 """ 131 Return the textual or numeric representation of logging level 'level'. 132 133 If the level is one of the predefined levels (CRITICAL, ERROR, WARNING, 134 INFO, DEBUG) then you get the corresponding string. If you have 135 associated levels with names using addLevelName then the name you have 136 associated with 'level' is returned. 137 138 If a numeric value corresponding to one of the defined levels is passed 139 in, the corresponding string representation is returned. 140 141 If a string representation of the level is passed in, the corresponding 142 numeric value is returned. 143 144 If no matching numeric or string value is passed in, the string 145 'Level %s' % level is returned. 146 """ 147 # See Issues #22386, #27937 and #29220 for why it's this way 148 result = _levelToName.get(level) 149 if result is not None: 150 return result 151 result = _nameToLevel.get(level) 152 if result is not None: 153 return result 154 return "Level %s" % level 155 156def addLevelName(level, levelName): 157 """ 158 Associate 'levelName' with 'level'. 159 160 This is used when converting levels to text during message formatting. 161 """ 162 with _lock: 163 _levelToName[level] = levelName 164 _nameToLevel[levelName] = level 165 166if hasattr(sys, "_getframe"): 167 currentframe = lambda: sys._getframe(1) 168else: #pragma: no cover 169 def currentframe(): 170 """Return the frame object for the caller's stack frame.""" 171 try: 172 raise Exception 173 except Exception as exc: 174 return exc.__traceback__.tb_frame.f_back 175 176# 177# _srcfile is used when walking the stack to check when we've got the first 178# caller stack frame, by skipping frames whose filename is that of this 179# module's source. It therefore should contain the filename of this module's 180# source file. 181# 182# Ordinarily we would use __file__ for this, but frozen modules don't always 183# have __file__ set, for some reason (see Issue #21736). Thus, we get the 184# filename from a handy code object from a function defined in this module. 185# (There's no particular reason for picking addLevelName.) 186# 187 188_srcfile = os.path.normcase(addLevelName.__code__.co_filename) 189 190# _srcfile is only used in conjunction with sys._getframe(). 191# Setting _srcfile to None will prevent findCaller() from being called. This 192# way, you can avoid the overhead of fetching caller information. 193 194# The following is based on warnings._is_internal_frame. It makes sure that 195# frames of the import mechanism are skipped when logging at module level and 196# using a stacklevel value greater than one. 197def _is_internal_frame(frame): 198 """Signal whether the frame is a CPython or logging module internal.""" 199 filename = os.path.normcase(frame.f_code.co_filename) 200 return filename == _srcfile or ( 201 "importlib" in filename and "_bootstrap" in filename 202 ) 203 204 205def _checkLevel(level): 206 if isinstance(level, int): 207 rv = level 208 elif str(level) == level: 209 if level not in _nameToLevel: 210 raise ValueError("Unknown level: %r" % level) 211 rv = _nameToLevel[level] 212 else: 213 raise TypeError("Level not an integer or a valid string: %r" 214 % (level,)) 215 return rv 216 217#--------------------------------------------------------------------------- 218# Thread-related stuff 219#--------------------------------------------------------------------------- 220 221# 222#_lock is used to serialize access to shared data structures in this module. 223#This needs to be an RLock because fileConfig() creates and configures 224#Handlers, and so might arbitrary user threads. Since Handler code updates the 225#shared dictionary _handlers, it needs to acquire the lock. But if configuring, 226#the lock would already have been acquired - so we need an RLock. 227#The same argument applies to Loggers and Manager.loggerDict. 228# 229_lock = threading.RLock() 230 231def _prepareFork(): 232 """ 233 Prepare to fork a new child process by acquiring the module-level lock. 234 235 This should be used in conjunction with _afterFork(). 236 """ 237 # Wrap the lock acquisition in a try-except to prevent the lock from being 238 # abandoned in the event of an asynchronous exception. See gh-106238. 239 try: 240 _lock.acquire() 241 except BaseException: 242 _lock.release() 243 raise 244 245def _afterFork(): 246 """ 247 After a new child process has been forked, release the module-level lock. 248 249 This should be used in conjunction with _prepareFork(). 250 """ 251 _lock.release() 252 253 254# Prevent a held logging lock from blocking a child from logging. 255 256if not hasattr(os, 'register_at_fork'): # Windows and friends. 257 def _register_at_fork_reinit_lock(instance): 258 pass # no-op when os.register_at_fork does not exist. 259else: 260 # A collection of instances with a _at_fork_reinit method (logging.Handler) 261 # to be called in the child after forking. The weakref avoids us keeping 262 # discarded Handler instances alive. 263 _at_fork_reinit_lock_weakset = weakref.WeakSet() 264 265 def _register_at_fork_reinit_lock(instance): 266 with _lock: 267 _at_fork_reinit_lock_weakset.add(instance) 268 269 def _after_at_fork_child_reinit_locks(): 270 for handler in _at_fork_reinit_lock_weakset: 271 handler._at_fork_reinit() 272 273 # _prepareFork() was called in the parent before forking. 274 # The lock is reinitialized to unlocked state. 275 _lock._at_fork_reinit() 276 277 os.register_at_fork(before=_prepareFork, 278 after_in_child=_after_at_fork_child_reinit_locks, 279 after_in_parent=_afterFork) 280 281 282#--------------------------------------------------------------------------- 283# The logging record 284#--------------------------------------------------------------------------- 285 286class LogRecord(object): 287 """ 288 A LogRecord instance represents an event being logged. 289 290 LogRecord instances are created every time something is logged. They 291 contain all the information pertinent to the event being logged. The 292 main information passed in is in msg and args, which are combined 293 using str(msg) % args to create the message field of the record. The 294 record also includes information such as when the record was created, 295 the source line where the logging call was made, and any exception 296 information to be logged. 297 """ 298 def __init__(self, name, level, pathname, lineno, 299 msg, args, exc_info, func=None, sinfo=None, **kwargs): 300 """ 301 Initialize a logging record with interesting information. 302 """ 303 ct = time.time_ns() 304 self.name = name 305 self.msg = msg 306 # 307 # The following statement allows passing of a dictionary as a sole 308 # argument, so that you can do something like 309 # logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2}) 310 # Suggested by Stefan Behnel. 311 # Note that without the test for args[0], we get a problem because 312 # during formatting, we test to see if the arg is present using 313 # 'if self.args:'. If the event being logged is e.g. 'Value is %d' 314 # and if the passed arg fails 'if self.args:' then no formatting 315 # is done. For example, logger.warning('Value is %d', 0) would log 316 # 'Value is %d' instead of 'Value is 0'. 317 # For the use case of passing a dictionary, this should not be a 318 # problem. 319 # Issue #21172: a request was made to relax the isinstance check 320 # to hasattr(args[0], '__getitem__'). However, the docs on string 321 # formatting still seem to suggest a mapping object is required. 322 # Thus, while not removing the isinstance check, it does now look 323 # for collections.abc.Mapping rather than, as before, dict. 324 if (args and len(args) == 1 and isinstance(args[0], collections.abc.Mapping) 325 and args[0]): 326 args = args[0] 327 self.args = args 328 self.levelname = getLevelName(level) 329 self.levelno = level 330 self.pathname = pathname 331 try: 332 self.filename = os.path.basename(pathname) 333 self.module = os.path.splitext(self.filename)[0] 334 except (TypeError, ValueError, AttributeError): 335 self.filename = pathname 336 self.module = "Unknown module" 337 self.exc_info = exc_info 338 self.exc_text = None # used to cache the traceback text 339 self.stack_info = sinfo 340 self.lineno = lineno 341 self.funcName = func 342 self.created = ct / 1e9 # ns to float seconds 343 # Get the number of whole milliseconds (0-999) in the fractional part of seconds. 344 # Eg: 1_677_903_920_999_998_503 ns --> 999_998_503 ns--> 999 ms 345 # Convert to float by adding 0.0 for historical reasons. See gh-89047 346 self.msecs = (ct % 1_000_000_000) // 1_000_000 + 0.0 347 if self.msecs == 999.0 and int(self.created) != ct // 1_000_000_000: 348 # ns -> sec conversion can round up, e.g: 349 # 1_677_903_920_999_999_900 ns --> 1_677_903_921.0 sec 350 self.msecs = 0.0 351 352 self.relativeCreated = (ct - _startTime) / 1e6 353 if logThreads: 354 self.thread = threading.get_ident() 355 self.threadName = threading.current_thread().name 356 else: # pragma: no cover 357 self.thread = None 358 self.threadName = None 359 if not logMultiprocessing: # pragma: no cover 360 self.processName = None 361 else: 362 self.processName = 'MainProcess' 363 mp = sys.modules.get('multiprocessing') 364 if mp is not None: 365 # Errors may occur if multiprocessing has not finished loading 366 # yet - e.g. if a custom import hook causes third-party code 367 # to run when multiprocessing calls import. See issue 8200 368 # for an example 369 try: 370 self.processName = mp.current_process().name 371 except Exception: #pragma: no cover 372 pass 373 if logProcesses and hasattr(os, 'getpid'): 374 self.process = os.getpid() 375 else: 376 self.process = None 377 378 self.taskName = None 379 if logAsyncioTasks: 380 asyncio = sys.modules.get('asyncio') 381 if asyncio: 382 try: 383 self.taskName = asyncio.current_task().get_name() 384 except Exception: 385 pass 386 387 def __repr__(self): 388 return '<LogRecord: %s, %s, %s, %s, "%s">'%(self.name, self.levelno, 389 self.pathname, self.lineno, self.msg) 390 391 def getMessage(self): 392 """ 393 Return the message for this LogRecord. 394 395 Return the message for this LogRecord after merging any user-supplied 396 arguments with the message. 397 """ 398 msg = str(self.msg) 399 if self.args: 400 msg = msg % self.args 401 return msg 402 403# 404# Determine which class to use when instantiating log records. 405# 406_logRecordFactory = LogRecord 407 408def setLogRecordFactory(factory): 409 """ 410 Set the factory to be used when instantiating a log record. 411 412 :param factory: A callable which will be called to instantiate 413 a log record. 414 """ 415 global _logRecordFactory 416 _logRecordFactory = factory 417 418def getLogRecordFactory(): 419 """ 420 Return the factory to be used when instantiating a log record. 421 """ 422 423 return _logRecordFactory 424 425def makeLogRecord(dict): 426 """ 427 Make a LogRecord whose attributes are defined by the specified dictionary, 428 This function is useful for converting a logging event received over 429 a socket connection (which is sent as a dictionary) into a LogRecord 430 instance. 431 """ 432 rv = _logRecordFactory(None, None, "", 0, "", (), None, None) 433 rv.__dict__.update(dict) 434 return rv 435 436 437#--------------------------------------------------------------------------- 438# Formatter classes and functions 439#--------------------------------------------------------------------------- 440_str_formatter = StrFormatter() 441del StrFormatter 442 443 444class PercentStyle(object): 445 446 default_format = '%(message)s' 447 asctime_format = '%(asctime)s' 448 asctime_search = '%(asctime)' 449 validation_pattern = re.compile(r'%\(\w+\)[#0+ -]*(\*|\d+)?(\.(\*|\d+))?[diouxefgcrsa%]', re.I) 450 451 def __init__(self, fmt, *, defaults=None): 452 self._fmt = fmt or self.default_format 453 self._defaults = defaults 454 455 def usesTime(self): 456 return self._fmt.find(self.asctime_search) >= 0 457 458 def validate(self): 459 """Validate the input format, ensure it matches the correct style""" 460 if not self.validation_pattern.search(self._fmt): 461 raise ValueError("Invalid format '%s' for '%s' style" % (self._fmt, self.default_format[0])) 462 463 def _format(self, record): 464 if defaults := self._defaults: 465 values = defaults | record.__dict__ 466 else: 467 values = record.__dict__ 468 return self._fmt % values 469 470 def format(self, record): 471 try: 472 return self._format(record) 473 except KeyError as e: 474 raise ValueError('Formatting field not found in record: %s' % e) 475 476 477class StrFormatStyle(PercentStyle): 478 default_format = '{message}' 479 asctime_format = '{asctime}' 480 asctime_search = '{asctime' 481 482 fmt_spec = re.compile(r'^(.?[<>=^])?[+ -]?#?0?(\d+|{\w+})?[,_]?(\.(\d+|{\w+}))?[bcdefgnosx%]?$', re.I) 483 field_spec = re.compile(r'^(\d+|\w+)(\.\w+|\[[^]]+\])*$') 484 485 def _format(self, record): 486 if defaults := self._defaults: 487 values = defaults | record.__dict__ 488 else: 489 values = record.__dict__ 490 return self._fmt.format(**values) 491 492 def validate(self): 493 """Validate the input format, ensure it is the correct string formatting style""" 494 fields = set() 495 try: 496 for _, fieldname, spec, conversion in _str_formatter.parse(self._fmt): 497 if fieldname: 498 if not self.field_spec.match(fieldname): 499 raise ValueError('invalid field name/expression: %r' % fieldname) 500 fields.add(fieldname) 501 if conversion and conversion not in 'rsa': 502 raise ValueError('invalid conversion: %r' % conversion) 503 if spec and not self.fmt_spec.match(spec): 504 raise ValueError('bad specifier: %r' % spec) 505 except ValueError as e: 506 raise ValueError('invalid format: %s' % e) 507 if not fields: 508 raise ValueError('invalid format: no fields') 509 510 511class StringTemplateStyle(PercentStyle): 512 default_format = '${message}' 513 asctime_format = '${asctime}' 514 asctime_search = '${asctime}' 515 516 def __init__(self, *args, **kwargs): 517 super().__init__(*args, **kwargs) 518 self._tpl = Template(self._fmt) 519 520 def usesTime(self): 521 fmt = self._fmt 522 return fmt.find('$asctime') >= 0 or fmt.find(self.asctime_search) >= 0 523 524 def validate(self): 525 pattern = Template.pattern 526 fields = set() 527 for m in pattern.finditer(self._fmt): 528 d = m.groupdict() 529 if d['named']: 530 fields.add(d['named']) 531 elif d['braced']: 532 fields.add(d['braced']) 533 elif m.group(0) == '$': 534 raise ValueError('invalid format: bare \'$\' not allowed') 535 if not fields: 536 raise ValueError('invalid format: no fields') 537 538 def _format(self, record): 539 if defaults := self._defaults: 540 values = defaults | record.__dict__ 541 else: 542 values = record.__dict__ 543 return self._tpl.substitute(**values) 544 545 546BASIC_FORMAT = "%(levelname)s:%(name)s:%(message)s" 547 548_STYLES = { 549 '%': (PercentStyle, BASIC_FORMAT), 550 '{': (StrFormatStyle, '{levelname}:{name}:{message}'), 551 '$': (StringTemplateStyle, '${levelname}:${name}:${message}'), 552} 553 554class Formatter(object): 555 """ 556 Formatter instances are used to convert a LogRecord to text. 557 558 Formatters need to know how a LogRecord is constructed. They are 559 responsible for converting a LogRecord to (usually) a string which can 560 be interpreted by either a human or an external system. The base Formatter 561 allows a formatting string to be specified. If none is supplied, the 562 style-dependent default value, "%(message)s", "{message}", or 563 "${message}", is used. 564 565 The Formatter can be initialized with a format string which makes use of 566 knowledge of the LogRecord attributes - e.g. the default value mentioned 567 above makes use of the fact that the user's message and arguments are pre- 568 formatted into a LogRecord's message attribute. Currently, the useful 569 attributes in a LogRecord are described by: 570 571 %(name)s Name of the logger (logging channel) 572 %(levelno)s Numeric logging level for the message (DEBUG, INFO, 573 WARNING, ERROR, CRITICAL) 574 %(levelname)s Text logging level for the message ("DEBUG", "INFO", 575 "WARNING", "ERROR", "CRITICAL") 576 %(pathname)s Full pathname of the source file where the logging 577 call was issued (if available) 578 %(filename)s Filename portion of pathname 579 %(module)s Module (name portion of filename) 580 %(lineno)d Source line number where the logging call was issued 581 (if available) 582 %(funcName)s Function name 583 %(created)f Time when the LogRecord was created (time.time_ns() / 1e9 584 return value) 585 %(asctime)s Textual time when the LogRecord was created 586 %(msecs)d Millisecond portion of the creation time 587 %(relativeCreated)d Time in milliseconds when the LogRecord was created, 588 relative to the time the logging module was loaded 589 (typically at application startup time) 590 %(thread)d Thread ID (if available) 591 %(threadName)s Thread name (if available) 592 %(taskName)s Task name (if available) 593 %(process)d Process ID (if available) 594 %(processName)s Process name (if available) 595 %(message)s The result of record.getMessage(), computed just as 596 the record is emitted 597 """ 598 599 converter = time.localtime 600 601 def __init__(self, fmt=None, datefmt=None, style='%', validate=True, *, 602 defaults=None): 603 """ 604 Initialize the formatter with specified format strings. 605 606 Initialize the formatter either with the specified format string, or a 607 default as described above. Allow for specialized date formatting with 608 the optional datefmt argument. If datefmt is omitted, you get an 609 ISO8601-like (or RFC 3339-like) format. 610 611 Use a style parameter of '%', '{' or '$' to specify that you want to 612 use one of %-formatting, :meth:`str.format` (``{}``) formatting or 613 :class:`string.Template` formatting in your format string. 614 615 .. versionchanged:: 3.2 616 Added the ``style`` parameter. 617 """ 618 if style not in _STYLES: 619 raise ValueError('Style must be one of: %s' % ','.join( 620 _STYLES.keys())) 621 self._style = _STYLES[style][0](fmt, defaults=defaults) 622 if validate: 623 self._style.validate() 624 625 self._fmt = self._style._fmt 626 self.datefmt = datefmt 627 628 default_time_format = '%Y-%m-%d %H:%M:%S' 629 default_msec_format = '%s,%03d' 630 631 def formatTime(self, record, datefmt=None): 632 """ 633 Return the creation time of the specified LogRecord as formatted text. 634 635 This method should be called from format() by a formatter which 636 wants to make use of a formatted time. This method can be overridden 637 in formatters to provide for any specific requirement, but the 638 basic behaviour is as follows: if datefmt (a string) is specified, 639 it is used with time.strftime() to format the creation time of the 640 record. Otherwise, an ISO8601-like (or RFC 3339-like) format is used. 641 The resulting string is returned. This function uses a user-configurable 642 function to convert the creation time to a tuple. By default, 643 time.localtime() is used; to change this for a particular formatter 644 instance, set the 'converter' attribute to a function with the same 645 signature as time.localtime() or time.gmtime(). To change it for all 646 formatters, for example if you want all logging times to be shown in GMT, 647 set the 'converter' attribute in the Formatter class. 648 """ 649 ct = self.converter(record.created) 650 if datefmt: 651 s = time.strftime(datefmt, ct) 652 else: 653 s = time.strftime(self.default_time_format, ct) 654 if self.default_msec_format: 655 s = self.default_msec_format % (s, record.msecs) 656 return s 657 658 def formatException(self, ei): 659 """ 660 Format and return the specified exception information as a string. 661 662 This default implementation just uses 663 traceback.print_exception() 664 """ 665 sio = io.StringIO() 666 tb = ei[2] 667 # See issues #9427, #1553375. Commented out for now. 668 #if getattr(self, 'fullstack', False): 669 # traceback.print_stack(tb.tb_frame.f_back, file=sio) 670 traceback.print_exception(ei[0], ei[1], tb, limit=None, file=sio) 671 s = sio.getvalue() 672 sio.close() 673 if s[-1:] == "\n": 674 s = s[:-1] 675 return s 676 677 def usesTime(self): 678 """ 679 Check if the format uses the creation time of the record. 680 """ 681 return self._style.usesTime() 682 683 def formatMessage(self, record): 684 return self._style.format(record) 685 686 def formatStack(self, stack_info): 687 """ 688 This method is provided as an extension point for specialized 689 formatting of stack information. 690 691 The input data is a string as returned from a call to 692 :func:`traceback.print_stack`, but with the last trailing newline 693 removed. 694 695 The base implementation just returns the value passed in. 696 """ 697 return stack_info 698 699 def format(self, record): 700 """ 701 Format the specified record as text. 702 703 The record's attribute dictionary is used as the operand to a 704 string formatting operation which yields the returned string. 705 Before formatting the dictionary, a couple of preparatory steps 706 are carried out. The message attribute of the record is computed 707 using LogRecord.getMessage(). If the formatting string uses the 708 time (as determined by a call to usesTime(), formatTime() is 709 called to format the event time. If there is exception information, 710 it is formatted using formatException() and appended to the message. 711 """ 712 record.message = record.getMessage() 713 if self.usesTime(): 714 record.asctime = self.formatTime(record, self.datefmt) 715 s = self.formatMessage(record) 716 if record.exc_info: 717 # Cache the traceback text to avoid converting it multiple times 718 # (it's constant anyway) 719 if not record.exc_text: 720 record.exc_text = self.formatException(record.exc_info) 721 if record.exc_text: 722 if s[-1:] != "\n": 723 s = s + "\n" 724 s = s + record.exc_text 725 if record.stack_info: 726 if s[-1:] != "\n": 727 s = s + "\n" 728 s = s + self.formatStack(record.stack_info) 729 return s 730 731# 732# The default formatter to use when no other is specified 733# 734_defaultFormatter = Formatter() 735 736class BufferingFormatter(object): 737 """ 738 A formatter suitable for formatting a number of records. 739 """ 740 def __init__(self, linefmt=None): 741 """ 742 Optionally specify a formatter which will be used to format each 743 individual record. 744 """ 745 if linefmt: 746 self.linefmt = linefmt 747 else: 748 self.linefmt = _defaultFormatter 749 750 def formatHeader(self, records): 751 """ 752 Return the header string for the specified records. 753 """ 754 return "" 755 756 def formatFooter(self, records): 757 """ 758 Return the footer string for the specified records. 759 """ 760 return "" 761 762 def format(self, records): 763 """ 764 Format the specified records and return the result as a string. 765 """ 766 rv = "" 767 if len(records) > 0: 768 rv = rv + self.formatHeader(records) 769 for record in records: 770 rv = rv + self.linefmt.format(record) 771 rv = rv + self.formatFooter(records) 772 return rv 773 774#--------------------------------------------------------------------------- 775# Filter classes and functions 776#--------------------------------------------------------------------------- 777 778class Filter(object): 779 """ 780 Filter instances are used to perform arbitrary filtering of LogRecords. 781 782 Loggers and Handlers can optionally use Filter instances to filter 783 records as desired. The base filter class only allows events which are 784 below a certain point in the logger hierarchy. For example, a filter 785 initialized with "A.B" will allow events logged by loggers "A.B", 786 "A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If 787 initialized with the empty string, all events are passed. 788 """ 789 def __init__(self, name=''): 790 """ 791 Initialize a filter. 792 793 Initialize with the name of the logger which, together with its 794 children, will have its events allowed through the filter. If no 795 name is specified, allow every event. 796 """ 797 self.name = name 798 self.nlen = len(name) 799 800 def filter(self, record): 801 """ 802 Determine if the specified record is to be logged. 803 804 Returns True if the record should be logged, or False otherwise. 805 If deemed appropriate, the record may be modified in-place. 806 """ 807 if self.nlen == 0: 808 return True 809 elif self.name == record.name: 810 return True 811 elif record.name.find(self.name, 0, self.nlen) != 0: 812 return False 813 return (record.name[self.nlen] == ".") 814 815class Filterer(object): 816 """ 817 A base class for loggers and handlers which allows them to share 818 common code. 819 """ 820 def __init__(self): 821 """ 822 Initialize the list of filters to be an empty list. 823 """ 824 self.filters = [] 825 826 def addFilter(self, filter): 827 """ 828 Add the specified filter to this handler. 829 """ 830 if not (filter in self.filters): 831 self.filters.append(filter) 832 833 def removeFilter(self, filter): 834 """ 835 Remove the specified filter from this handler. 836 """ 837 if filter in self.filters: 838 self.filters.remove(filter) 839 840 def filter(self, record): 841 """ 842 Determine if a record is loggable by consulting all the filters. 843 844 The default is to allow the record to be logged; any filter can veto 845 this by returning a false value. 846 If a filter attached to a handler returns a log record instance, 847 then that instance is used in place of the original log record in 848 any further processing of the event by that handler. 849 If a filter returns any other true value, the original log record 850 is used in any further processing of the event by that handler. 851 852 If none of the filters return false values, this method returns 853 a log record. 854 If any of the filters return a false value, this method returns 855 a false value. 856 857 .. versionchanged:: 3.2 858 859 Allow filters to be just callables. 860 861 .. versionchanged:: 3.12 862 Allow filters to return a LogRecord instead of 863 modifying it in place. 864 """ 865 for f in self.filters: 866 if hasattr(f, 'filter'): 867 result = f.filter(record) 868 else: 869 result = f(record) # assume callable - will raise if not 870 if not result: 871 return False 872 if isinstance(result, LogRecord): 873 record = result 874 return record 875 876#--------------------------------------------------------------------------- 877# Handler classes and functions 878#--------------------------------------------------------------------------- 879 880_handlers = weakref.WeakValueDictionary() #map of handler names to handlers 881_handlerList = [] # added to allow handlers to be removed in reverse of order initialized 882 883def _removeHandlerRef(wr): 884 """ 885 Remove a handler reference from the internal cleanup list. 886 """ 887 # This function can be called during module teardown, when globals are 888 # set to None. It can also be called from another thread. So we need to 889 # pre-emptively grab the necessary globals and check if they're None, 890 # to prevent race conditions and failures during interpreter shutdown. 891 handlers, lock = _handlerList, _lock 892 if lock and handlers: 893 with lock: 894 try: 895 handlers.remove(wr) 896 except ValueError: 897 pass 898 899def _addHandlerRef(handler): 900 """ 901 Add a handler to the internal cleanup list using a weak reference. 902 """ 903 with _lock: 904 _handlerList.append(weakref.ref(handler, _removeHandlerRef)) 905 906 907def getHandlerByName(name): 908 """ 909 Get a handler with the specified *name*, or None if there isn't one with 910 that name. 911 """ 912 return _handlers.get(name) 913 914 915def getHandlerNames(): 916 """ 917 Return all known handler names as an immutable set. 918 """ 919 return frozenset(_handlers) 920 921 922class Handler(Filterer): 923 """ 924 Handler instances dispatch logging events to specific destinations. 925 926 The base handler class. Acts as a placeholder which defines the Handler 927 interface. Handlers can optionally use Formatter instances to format 928 records as desired. By default, no formatter is specified; in this case, 929 the 'raw' message as determined by record.message is logged. 930 """ 931 def __init__(self, level=NOTSET): 932 """ 933 Initializes the instance - basically setting the formatter to None 934 and the filter list to empty. 935 """ 936 Filterer.__init__(self) 937 self._name = None 938 self.level = _checkLevel(level) 939 self.formatter = None 940 self._closed = False 941 # Add the handler to the global _handlerList (for cleanup on shutdown) 942 _addHandlerRef(self) 943 self.createLock() 944 945 def get_name(self): 946 return self._name 947 948 def set_name(self, name): 949 with _lock: 950 if self._name in _handlers: 951 del _handlers[self._name] 952 self._name = name 953 if name: 954 _handlers[name] = self 955 956 name = property(get_name, set_name) 957 958 def createLock(self): 959 """ 960 Acquire a thread lock for serializing access to the underlying I/O. 961 """ 962 self.lock = threading.RLock() 963 _register_at_fork_reinit_lock(self) 964 965 def _at_fork_reinit(self): 966 self.lock._at_fork_reinit() 967 968 def acquire(self): 969 """ 970 Acquire the I/O thread lock. 971 """ 972 if self.lock: 973 self.lock.acquire() 974 975 def release(self): 976 """ 977 Release the I/O thread lock. 978 """ 979 if self.lock: 980 self.lock.release() 981 982 def setLevel(self, level): 983 """ 984 Set the logging level of this handler. level must be an int or a str. 985 """ 986 self.level = _checkLevel(level) 987 988 def format(self, record): 989 """ 990 Format the specified record. 991 992 If a formatter is set, use it. Otherwise, use the default formatter 993 for the module. 994 """ 995 if self.formatter: 996 fmt = self.formatter 997 else: 998 fmt = _defaultFormatter 999 return fmt.format(record) 1000 1001 def emit(self, record): 1002 """ 1003 Do whatever it takes to actually log the specified logging record. 1004 1005 This version is intended to be implemented by subclasses and so 1006 raises a NotImplementedError. 1007 """ 1008 raise NotImplementedError('emit must be implemented ' 1009 'by Handler subclasses') 1010 1011 def handle(self, record): 1012 """ 1013 Conditionally emit the specified logging record. 1014 1015 Emission depends on filters which may have been added to the handler. 1016 Wrap the actual emission of the record with acquisition/release of 1017 the I/O thread lock. 1018 1019 Returns an instance of the log record that was emitted 1020 if it passed all filters, otherwise a false value is returned. 1021 """ 1022 rv = self.filter(record) 1023 if isinstance(rv, LogRecord): 1024 record = rv 1025 if rv: 1026 with self.lock: 1027 self.emit(record) 1028 return rv 1029 1030 def setFormatter(self, fmt): 1031 """ 1032 Set the formatter for this handler. 1033 """ 1034 self.formatter = fmt 1035 1036 def flush(self): 1037 """ 1038 Ensure all logging output has been flushed. 1039 1040 This version does nothing and is intended to be implemented by 1041 subclasses. 1042 """ 1043 pass 1044 1045 def close(self): 1046 """ 1047 Tidy up any resources used by the handler. 1048 1049 This version removes the handler from an internal map of handlers, 1050 _handlers, which is used for handler lookup by name. Subclasses 1051 should ensure that this gets called from overridden close() 1052 methods. 1053 """ 1054 #get the module data lock, as we're updating a shared structure. 1055 with _lock: 1056 self._closed = True 1057 if self._name and self._name in _handlers: 1058 del _handlers[self._name] 1059 1060 def handleError(self, record): 1061 """ 1062 Handle errors which occur during an emit() call. 1063 1064 This method should be called from handlers when an exception is 1065 encountered during an emit() call. If raiseExceptions is false, 1066 exceptions get silently ignored. This is what is mostly wanted 1067 for a logging system - most users will not care about errors in 1068 the logging system, they are more interested in application errors. 1069 You could, however, replace this with a custom handler if you wish. 1070 The record which was being processed is passed in to this method. 1071 """ 1072 if raiseExceptions and sys.stderr: # see issue 13807 1073 exc = sys.exception() 1074 try: 1075 sys.stderr.write('--- Logging error ---\n') 1076 traceback.print_exception(exc, limit=None, file=sys.stderr) 1077 sys.stderr.write('Call stack:\n') 1078 # Walk the stack frame up until we're out of logging, 1079 # so as to print the calling context. 1080 frame = exc.__traceback__.tb_frame 1081 while (frame and os.path.dirname(frame.f_code.co_filename) == 1082 __path__[0]): 1083 frame = frame.f_back 1084 if frame: 1085 traceback.print_stack(frame, file=sys.stderr) 1086 else: 1087 # couldn't find the right stack frame, for some reason 1088 sys.stderr.write('Logged from file %s, line %s\n' % ( 1089 record.filename, record.lineno)) 1090 # Issue 18671: output logging message and arguments 1091 try: 1092 sys.stderr.write('Message: %r\n' 1093 'Arguments: %s\n' % (record.msg, 1094 record.args)) 1095 except RecursionError: # See issue 36272 1096 raise 1097 except Exception: 1098 sys.stderr.write('Unable to print the message and arguments' 1099 ' - possible formatting error.\nUse the' 1100 ' traceback above to help find the error.\n' 1101 ) 1102 except OSError: #pragma: no cover 1103 pass # see issue 5971 1104 finally: 1105 del exc 1106 1107 def __repr__(self): 1108 level = getLevelName(self.level) 1109 return '<%s (%s)>' % (self.__class__.__name__, level) 1110 1111class StreamHandler(Handler): 1112 """ 1113 A handler class which writes logging records, appropriately formatted, 1114 to a stream. Note that this class does not close the stream, as 1115 sys.stdout or sys.stderr may be used. 1116 """ 1117 1118 terminator = '\n' 1119 1120 def __init__(self, stream=None): 1121 """ 1122 Initialize the handler. 1123 1124 If stream is not specified, sys.stderr is used. 1125 """ 1126 Handler.__init__(self) 1127 if stream is None: 1128 stream = sys.stderr 1129 self.stream = stream 1130 1131 def flush(self): 1132 """ 1133 Flushes the stream. 1134 """ 1135 with self.lock: 1136 if self.stream and hasattr(self.stream, "flush"): 1137 self.stream.flush() 1138 1139 def emit(self, record): 1140 """ 1141 Emit a record. 1142 1143 If a formatter is specified, it is used to format the record. 1144 The record is then written to the stream with a trailing newline. If 1145 exception information is present, it is formatted using 1146 traceback.print_exception and appended to the stream. If the stream 1147 has an 'encoding' attribute, it is used to determine how to do the 1148 output to the stream. 1149 """ 1150 try: 1151 msg = self.format(record) 1152 stream = self.stream 1153 # issue 35046: merged two stream.writes into one. 1154 stream.write(msg + self.terminator) 1155 self.flush() 1156 except RecursionError: # See issue 36272 1157 raise 1158 except Exception: 1159 self.handleError(record) 1160 1161 def setStream(self, stream): 1162 """ 1163 Sets the StreamHandler's stream to the specified value, 1164 if it is different. 1165 1166 Returns the old stream, if the stream was changed, or None 1167 if it wasn't. 1168 """ 1169 if stream is self.stream: 1170 result = None 1171 else: 1172 result = self.stream 1173 with self.lock: 1174 self.flush() 1175 self.stream = stream 1176 return result 1177 1178 def __repr__(self): 1179 level = getLevelName(self.level) 1180 name = getattr(self.stream, 'name', '') 1181 # bpo-36015: name can be an int 1182 name = str(name) 1183 if name: 1184 name += ' ' 1185 return '<%s %s(%s)>' % (self.__class__.__name__, name, level) 1186 1187 __class_getitem__ = classmethod(GenericAlias) 1188 1189 1190class FileHandler(StreamHandler): 1191 """ 1192 A handler class which writes formatted logging records to disk files. 1193 """ 1194 def __init__(self, filename, mode='a', encoding=None, delay=False, errors=None): 1195 """ 1196 Open the specified file and use it as the stream for logging. 1197 """ 1198 # Issue #27493: add support for Path objects to be passed in 1199 filename = os.fspath(filename) 1200 #keep the absolute path, otherwise derived classes which use this 1201 #may come a cropper when the current directory changes 1202 self.baseFilename = os.path.abspath(filename) 1203 self.mode = mode 1204 self.encoding = encoding 1205 if "b" not in mode: 1206 self.encoding = io.text_encoding(encoding) 1207 self.errors = errors 1208 self.delay = delay 1209 # bpo-26789: FileHandler keeps a reference to the builtin open() 1210 # function to be able to open or reopen the file during Python 1211 # finalization. 1212 self._builtin_open = open 1213 if delay: 1214 #We don't open the stream, but we still need to call the 1215 #Handler constructor to set level, formatter, lock etc. 1216 Handler.__init__(self) 1217 self.stream = None 1218 else: 1219 StreamHandler.__init__(self, self._open()) 1220 1221 def close(self): 1222 """ 1223 Closes the stream. 1224 """ 1225 with self.lock: 1226 try: 1227 if self.stream: 1228 try: 1229 self.flush() 1230 finally: 1231 stream = self.stream 1232 self.stream = None 1233 if hasattr(stream, "close"): 1234 stream.close() 1235 finally: 1236 # Issue #19523: call unconditionally to 1237 # prevent a handler leak when delay is set 1238 # Also see Issue #42378: we also rely on 1239 # self._closed being set to True there 1240 StreamHandler.close(self) 1241 1242 def _open(self): 1243 """ 1244 Open the current base file with the (original) mode and encoding. 1245 Return the resulting stream. 1246 """ 1247 open_func = self._builtin_open 1248 return open_func(self.baseFilename, self.mode, 1249 encoding=self.encoding, errors=self.errors) 1250 1251 def emit(self, record): 1252 """ 1253 Emit a record. 1254 1255 If the stream was not opened because 'delay' was specified in the 1256 constructor, open it before calling the superclass's emit. 1257 1258 If stream is not open, current mode is 'w' and `_closed=True`, record 1259 will not be emitted (see Issue #42378). 1260 """ 1261 if self.stream is None: 1262 if self.mode != 'w' or not self._closed: 1263 self.stream = self._open() 1264 if self.stream: 1265 StreamHandler.emit(self, record) 1266 1267 def __repr__(self): 1268 level = getLevelName(self.level) 1269 return '<%s %s (%s)>' % (self.__class__.__name__, self.baseFilename, level) 1270 1271 1272class _StderrHandler(StreamHandler): 1273 """ 1274 This class is like a StreamHandler using sys.stderr, but always uses 1275 whatever sys.stderr is currently set to rather than the value of 1276 sys.stderr at handler construction time. 1277 """ 1278 def __init__(self, level=NOTSET): 1279 """ 1280 Initialize the handler. 1281 """ 1282 Handler.__init__(self, level) 1283 1284 @property 1285 def stream(self): 1286 return sys.stderr 1287 1288 1289_defaultLastResort = _StderrHandler(WARNING) 1290lastResort = _defaultLastResort 1291 1292#--------------------------------------------------------------------------- 1293# Manager classes and functions 1294#--------------------------------------------------------------------------- 1295 1296class PlaceHolder(object): 1297 """ 1298 PlaceHolder instances are used in the Manager logger hierarchy to take 1299 the place of nodes for which no loggers have been defined. This class is 1300 intended for internal use only and not as part of the public API. 1301 """ 1302 def __init__(self, alogger): 1303 """ 1304 Initialize with the specified logger being a child of this placeholder. 1305 """ 1306 self.loggerMap = { alogger : None } 1307 1308 def append(self, alogger): 1309 """ 1310 Add the specified logger as a child of this placeholder. 1311 """ 1312 if alogger not in self.loggerMap: 1313 self.loggerMap[alogger] = None 1314 1315# 1316# Determine which class to use when instantiating loggers. 1317# 1318 1319def setLoggerClass(klass): 1320 """ 1321 Set the class to be used when instantiating a logger. The class should 1322 define __init__() such that only a name argument is required, and the 1323 __init__() should call Logger.__init__() 1324 """ 1325 if klass != Logger: 1326 if not issubclass(klass, Logger): 1327 raise TypeError("logger not derived from logging.Logger: " 1328 + klass.__name__) 1329 global _loggerClass 1330 _loggerClass = klass 1331 1332def getLoggerClass(): 1333 """ 1334 Return the class to be used when instantiating a logger. 1335 """ 1336 return _loggerClass 1337 1338class Manager(object): 1339 """ 1340 There is [under normal circumstances] just one Manager instance, which 1341 holds the hierarchy of loggers. 1342 """ 1343 def __init__(self, rootnode): 1344 """ 1345 Initialize the manager with the root node of the logger hierarchy. 1346 """ 1347 self.root = rootnode 1348 self.disable = 0 1349 self.emittedNoHandlerWarning = False 1350 self.loggerDict = {} 1351 self.loggerClass = None 1352 self.logRecordFactory = None 1353 1354 @property 1355 def disable(self): 1356 return self._disable 1357 1358 @disable.setter 1359 def disable(self, value): 1360 self._disable = _checkLevel(value) 1361 1362 def getLogger(self, name): 1363 """ 1364 Get a logger with the specified name (channel name), creating it 1365 if it doesn't yet exist. This name is a dot-separated hierarchical 1366 name, such as "a", "a.b", "a.b.c" or similar. 1367 1368 If a PlaceHolder existed for the specified name [i.e. the logger 1369 didn't exist but a child of it did], replace it with the created 1370 logger and fix up the parent/child references which pointed to the 1371 placeholder to now point to the logger. 1372 """ 1373 rv = None 1374 if not isinstance(name, str): 1375 raise TypeError('A logger name must be a string') 1376 with _lock: 1377 if name in self.loggerDict: 1378 rv = self.loggerDict[name] 1379 if isinstance(rv, PlaceHolder): 1380 ph = rv 1381 rv = (self.loggerClass or _loggerClass)(name) 1382 rv.manager = self 1383 self.loggerDict[name] = rv 1384 self._fixupChildren(ph, rv) 1385 self._fixupParents(rv) 1386 else: 1387 rv = (self.loggerClass or _loggerClass)(name) 1388 rv.manager = self 1389 self.loggerDict[name] = rv 1390 self._fixupParents(rv) 1391 return rv 1392 1393 def setLoggerClass(self, klass): 1394 """ 1395 Set the class to be used when instantiating a logger with this Manager. 1396 """ 1397 if klass != Logger: 1398 if not issubclass(klass, Logger): 1399 raise TypeError("logger not derived from logging.Logger: " 1400 + klass.__name__) 1401 self.loggerClass = klass 1402 1403 def setLogRecordFactory(self, factory): 1404 """ 1405 Set the factory to be used when instantiating a log record with this 1406 Manager. 1407 """ 1408 self.logRecordFactory = factory 1409 1410 def _fixupParents(self, alogger): 1411 """ 1412 Ensure that there are either loggers or placeholders all the way 1413 from the specified logger to the root of the logger hierarchy. 1414 """ 1415 name = alogger.name 1416 i = name.rfind(".") 1417 rv = None 1418 while (i > 0) and not rv: 1419 substr = name[:i] 1420 if substr not in self.loggerDict: 1421 self.loggerDict[substr] = PlaceHolder(alogger) 1422 else: 1423 obj = self.loggerDict[substr] 1424 if isinstance(obj, Logger): 1425 rv = obj 1426 else: 1427 assert isinstance(obj, PlaceHolder) 1428 obj.append(alogger) 1429 i = name.rfind(".", 0, i - 1) 1430 if not rv: 1431 rv = self.root 1432 alogger.parent = rv 1433 1434 def _fixupChildren(self, ph, alogger): 1435 """ 1436 Ensure that children of the placeholder ph are connected to the 1437 specified logger. 1438 """ 1439 name = alogger.name 1440 namelen = len(name) 1441 for c in ph.loggerMap.keys(): 1442 #The if means ... if not c.parent.name.startswith(nm) 1443 if c.parent.name[:namelen] != name: 1444 alogger.parent = c.parent 1445 c.parent = alogger 1446 1447 def _clear_cache(self): 1448 """ 1449 Clear the cache for all loggers in loggerDict 1450 Called when level changes are made 1451 """ 1452 1453 with _lock: 1454 for logger in self.loggerDict.values(): 1455 if isinstance(logger, Logger): 1456 logger._cache.clear() 1457 self.root._cache.clear() 1458 1459#--------------------------------------------------------------------------- 1460# Logger classes and functions 1461#--------------------------------------------------------------------------- 1462 1463class Logger(Filterer): 1464 """ 1465 Instances of the Logger class represent a single logging channel. A 1466 "logging channel" indicates an area of an application. Exactly how an 1467 "area" is defined is up to the application developer. Since an 1468 application can have any number of areas, logging channels are identified 1469 by a unique string. Application areas can be nested (e.g. an area 1470 of "input processing" might include sub-areas "read CSV files", "read 1471 XLS files" and "read Gnumeric files"). To cater for this natural nesting, 1472 channel names are organized into a namespace hierarchy where levels are 1473 separated by periods, much like the Java or Python package namespace. So 1474 in the instance given above, channel names might be "input" for the upper 1475 level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels. 1476 There is no arbitrary limit to the depth of nesting. 1477 """ 1478 _tls = threading.local() 1479 1480 def __init__(self, name, level=NOTSET): 1481 """ 1482 Initialize the logger with a name and an optional level. 1483 """ 1484 Filterer.__init__(self) 1485 self.name = name 1486 self.level = _checkLevel(level) 1487 self.parent = None 1488 self.propagate = True 1489 self.handlers = [] 1490 self.disabled = False 1491 self._cache = {} 1492 1493 def setLevel(self, level): 1494 """ 1495 Set the logging level of this logger. level must be an int or a str. 1496 """ 1497 self.level = _checkLevel(level) 1498 self.manager._clear_cache() 1499 1500 def debug(self, msg, *args, **kwargs): 1501 """ 1502 Log 'msg % args' with severity 'DEBUG'. 1503 1504 To pass exception information, use the keyword argument exc_info with 1505 a true value, e.g. 1506 1507 logger.debug("Houston, we have a %s", "thorny problem", exc_info=True) 1508 """ 1509 if self.isEnabledFor(DEBUG): 1510 self._log(DEBUG, msg, args, **kwargs) 1511 1512 def info(self, msg, *args, **kwargs): 1513 """ 1514 Log 'msg % args' with severity 'INFO'. 1515 1516 To pass exception information, use the keyword argument exc_info with 1517 a true value, e.g. 1518 1519 logger.info("Houston, we have a %s", "notable problem", exc_info=True) 1520 """ 1521 if self.isEnabledFor(INFO): 1522 self._log(INFO, msg, args, **kwargs) 1523 1524 def warning(self, msg, *args, **kwargs): 1525 """ 1526 Log 'msg % args' with severity 'WARNING'. 1527 1528 To pass exception information, use the keyword argument exc_info with 1529 a true value, e.g. 1530 1531 logger.warning("Houston, we have a %s", "bit of a problem", exc_info=True) 1532 """ 1533 if self.isEnabledFor(WARNING): 1534 self._log(WARNING, msg, args, **kwargs) 1535 1536 def warn(self, msg, *args, **kwargs): 1537 warnings.warn("The 'warn' method is deprecated, " 1538 "use 'warning' instead", DeprecationWarning, 2) 1539 self.warning(msg, *args, **kwargs) 1540 1541 def error(self, msg, *args, **kwargs): 1542 """ 1543 Log 'msg % args' with severity 'ERROR'. 1544 1545 To pass exception information, use the keyword argument exc_info with 1546 a true value, e.g. 1547 1548 logger.error("Houston, we have a %s", "major problem", exc_info=True) 1549 """ 1550 if self.isEnabledFor(ERROR): 1551 self._log(ERROR, msg, args, **kwargs) 1552 1553 def exception(self, msg, *args, exc_info=True, **kwargs): 1554 """ 1555 Convenience method for logging an ERROR with exception information. 1556 """ 1557 self.error(msg, *args, exc_info=exc_info, **kwargs) 1558 1559 def critical(self, msg, *args, **kwargs): 1560 """ 1561 Log 'msg % args' with severity 'CRITICAL'. 1562 1563 To pass exception information, use the keyword argument exc_info with 1564 a true value, e.g. 1565 1566 logger.critical("Houston, we have a %s", "major disaster", exc_info=True) 1567 """ 1568 if self.isEnabledFor(CRITICAL): 1569 self._log(CRITICAL, msg, args, **kwargs) 1570 1571 def fatal(self, msg, *args, **kwargs): 1572 """ 1573 Don't use this method, use critical() instead. 1574 """ 1575 self.critical(msg, *args, **kwargs) 1576 1577 def log(self, level, msg, *args, **kwargs): 1578 """ 1579 Log 'msg % args' with the integer severity 'level'. 1580 1581 To pass exception information, use the keyword argument exc_info with 1582 a true value, e.g. 1583 1584 logger.log(level, "We have a %s", "mysterious problem", exc_info=True) 1585 """ 1586 if not isinstance(level, int): 1587 if raiseExceptions: 1588 raise TypeError("level must be an integer") 1589 else: 1590 return 1591 if self.isEnabledFor(level): 1592 self._log(level, msg, args, **kwargs) 1593 1594 def findCaller(self, stack_info=False, stacklevel=1): 1595 """ 1596 Find the stack frame of the caller so that we can note the source 1597 file name, line number and function name. 1598 """ 1599 f = currentframe() 1600 #On some versions of IronPython, currentframe() returns None if 1601 #IronPython isn't run with -X:Frames. 1602 if f is None: 1603 return "(unknown file)", 0, "(unknown function)", None 1604 while stacklevel > 0: 1605 next_f = f.f_back 1606 if next_f is None: 1607 ## We've got options here. 1608 ## If we want to use the last (deepest) frame: 1609 break 1610 ## If we want to mimic the warnings module: 1611 #return ("sys", 1, "(unknown function)", None) 1612 ## If we want to be pedantic: 1613 #raise ValueError("call stack is not deep enough") 1614 f = next_f 1615 if not _is_internal_frame(f): 1616 stacklevel -= 1 1617 co = f.f_code 1618 sinfo = None 1619 if stack_info: 1620 with io.StringIO() as sio: 1621 sio.write("Stack (most recent call last):\n") 1622 traceback.print_stack(f, file=sio) 1623 sinfo = sio.getvalue() 1624 if sinfo[-1] == '\n': 1625 sinfo = sinfo[:-1] 1626 return co.co_filename, f.f_lineno, co.co_name, sinfo 1627 1628 def makeRecord(self, name, level, fn, lno, msg, args, exc_info, 1629 func=None, extra=None, sinfo=None): 1630 """ 1631 A factory method which can be overridden in subclasses to create 1632 specialized LogRecords. 1633 """ 1634 rv = _logRecordFactory(name, level, fn, lno, msg, args, exc_info, func, 1635 sinfo) 1636 if extra is not None: 1637 for key in extra: 1638 if (key in ["message", "asctime"]) or (key in rv.__dict__): 1639 raise KeyError("Attempt to overwrite %r in LogRecord" % key) 1640 rv.__dict__[key] = extra[key] 1641 return rv 1642 1643 def _log(self, level, msg, args, exc_info=None, extra=None, stack_info=False, 1644 stacklevel=1): 1645 """ 1646 Low-level logging routine which creates a LogRecord and then calls 1647 all the handlers of this logger to handle the record. 1648 """ 1649 sinfo = None 1650 if _srcfile: 1651 #IronPython doesn't track Python frames, so findCaller raises an 1652 #exception on some versions of IronPython. We trap it here so that 1653 #IronPython can use logging. 1654 try: 1655 fn, lno, func, sinfo = self.findCaller(stack_info, stacklevel) 1656 except ValueError: # pragma: no cover 1657 fn, lno, func = "(unknown file)", 0, "(unknown function)" 1658 else: # pragma: no cover 1659 fn, lno, func = "(unknown file)", 0, "(unknown function)" 1660 if exc_info: 1661 if isinstance(exc_info, BaseException): 1662 exc_info = (type(exc_info), exc_info, exc_info.__traceback__) 1663 elif not isinstance(exc_info, tuple): 1664 exc_info = sys.exc_info() 1665 record = self.makeRecord(self.name, level, fn, lno, msg, args, 1666 exc_info, func, extra, sinfo) 1667 self.handle(record) 1668 1669 def handle(self, record): 1670 """ 1671 Call the handlers for the specified record. 1672 1673 This method is used for unpickled records received from a socket, as 1674 well as those created locally. Logger-level filtering is applied. 1675 """ 1676 if self._is_disabled(): 1677 return 1678 1679 self._tls.in_progress = True 1680 try: 1681 maybe_record = self.filter(record) 1682 if not maybe_record: 1683 return 1684 if isinstance(maybe_record, LogRecord): 1685 record = maybe_record 1686 self.callHandlers(record) 1687 finally: 1688 self._tls.in_progress = False 1689 1690 def addHandler(self, hdlr): 1691 """ 1692 Add the specified handler to this logger. 1693 """ 1694 with _lock: 1695 if not (hdlr in self.handlers): 1696 self.handlers.append(hdlr) 1697 1698 def removeHandler(self, hdlr): 1699 """ 1700 Remove the specified handler from this logger. 1701 """ 1702 with _lock: 1703 if hdlr in self.handlers: 1704 self.handlers.remove(hdlr) 1705 1706 def hasHandlers(self): 1707 """ 1708 See if this logger has any handlers configured. 1709 1710 Loop through all handlers for this logger and its parents in the 1711 logger hierarchy. Return True if a handler was found, else False. 1712 Stop searching up the hierarchy whenever a logger with the "propagate" 1713 attribute set to zero is found - that will be the last logger which 1714 is checked for the existence of handlers. 1715 """ 1716 c = self 1717 rv = False 1718 while c: 1719 if c.handlers: 1720 rv = True 1721 break 1722 if not c.propagate: 1723 break 1724 else: 1725 c = c.parent 1726 return rv 1727 1728 def callHandlers(self, record): 1729 """ 1730 Pass a record to all relevant handlers. 1731 1732 Loop through all handlers for this logger and its parents in the 1733 logger hierarchy. If no handler was found, output a one-off error 1734 message to sys.stderr. Stop searching up the hierarchy whenever a 1735 logger with the "propagate" attribute set to zero is found - that 1736 will be the last logger whose handlers are called. 1737 """ 1738 c = self 1739 found = 0 1740 while c: 1741 for hdlr in c.handlers: 1742 found = found + 1 1743 if record.levelno >= hdlr.level: 1744 hdlr.handle(record) 1745 if not c.propagate: 1746 c = None #break out 1747 else: 1748 c = c.parent 1749 if (found == 0): 1750 if lastResort: 1751 if record.levelno >= lastResort.level: 1752 lastResort.handle(record) 1753 elif raiseExceptions and not self.manager.emittedNoHandlerWarning: 1754 sys.stderr.write("No handlers could be found for logger" 1755 " \"%s\"\n" % self.name) 1756 self.manager.emittedNoHandlerWarning = True 1757 1758 def getEffectiveLevel(self): 1759 """ 1760 Get the effective level for this logger. 1761 1762 Loop through this logger and its parents in the logger hierarchy, 1763 looking for a non-zero logging level. Return the first one found. 1764 """ 1765 logger = self 1766 while logger: 1767 if logger.level: 1768 return logger.level 1769 logger = logger.parent 1770 return NOTSET 1771 1772 def isEnabledFor(self, level): 1773 """ 1774 Is this logger enabled for level 'level'? 1775 """ 1776 if self._is_disabled(): 1777 return False 1778 1779 try: 1780 return self._cache[level] 1781 except KeyError: 1782 with _lock: 1783 if self.manager.disable >= level: 1784 is_enabled = self._cache[level] = False 1785 else: 1786 is_enabled = self._cache[level] = ( 1787 level >= self.getEffectiveLevel() 1788 ) 1789 return is_enabled 1790 1791 def getChild(self, suffix): 1792 """ 1793 Get a logger which is a descendant to this one. 1794 1795 This is a convenience method, such that 1796 1797 logging.getLogger('abc').getChild('def.ghi') 1798 1799 is the same as 1800 1801 logging.getLogger('abc.def.ghi') 1802 1803 It's useful, for example, when the parent logger is named using 1804 __name__ rather than a literal string. 1805 """ 1806 if self.root is not self: 1807 suffix = '.'.join((self.name, suffix)) 1808 return self.manager.getLogger(suffix) 1809 1810 def getChildren(self): 1811 1812 def _hierlevel(logger): 1813 if logger is logger.manager.root: 1814 return 0 1815 return 1 + logger.name.count('.') 1816 1817 d = self.manager.loggerDict 1818 with _lock: 1819 # exclude PlaceHolders - the last check is to ensure that lower-level 1820 # descendants aren't returned - if there are placeholders, a logger's 1821 # parent field might point to a grandparent or ancestor thereof. 1822 return set(item for item in d.values() 1823 if isinstance(item, Logger) and item.parent is self and 1824 _hierlevel(item) == 1 + _hierlevel(item.parent)) 1825 1826 def _is_disabled(self): 1827 # We need to use getattr as it will only be set the first time a log 1828 # message is recorded on any given thread 1829 return self.disabled or getattr(self._tls, 'in_progress', False) 1830 1831 def __repr__(self): 1832 level = getLevelName(self.getEffectiveLevel()) 1833 return '<%s %s (%s)>' % (self.__class__.__name__, self.name, level) 1834 1835 def __reduce__(self): 1836 if getLogger(self.name) is not self: 1837 import pickle 1838 raise pickle.PicklingError('logger cannot be pickled') 1839 return getLogger, (self.name,) 1840 1841 1842class RootLogger(Logger): 1843 """ 1844 A root logger is not that different to any other logger, except that 1845 it must have a logging level and there is only one instance of it in 1846 the hierarchy. 1847 """ 1848 def __init__(self, level): 1849 """ 1850 Initialize the logger with the name "root". 1851 """ 1852 Logger.__init__(self, "root", level) 1853 1854 def __reduce__(self): 1855 return getLogger, () 1856 1857_loggerClass = Logger 1858 1859class LoggerAdapter(object): 1860 """ 1861 An adapter for loggers which makes it easier to specify contextual 1862 information in logging output. 1863 """ 1864 1865 def __init__(self, logger, extra=None, merge_extra=False): 1866 """ 1867 Initialize the adapter with a logger and a dict-like object which 1868 provides contextual information. This constructor signature allows 1869 easy stacking of LoggerAdapters, if so desired. 1870 1871 You can effectively pass keyword arguments as shown in the 1872 following example: 1873 1874 adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2")) 1875 1876 By default, LoggerAdapter objects will drop the "extra" argument 1877 passed on the individual log calls to use its own instead. 1878 1879 Initializing it with merge_extra=True will instead merge both 1880 maps when logging, the individual call extra taking precedence 1881 over the LoggerAdapter instance extra 1882 1883 .. versionchanged:: 3.13 1884 The *merge_extra* argument was added. 1885 """ 1886 self.logger = logger 1887 self.extra = extra 1888 self.merge_extra = merge_extra 1889 1890 def process(self, msg, kwargs): 1891 """ 1892 Process the logging message and keyword arguments passed in to 1893 a logging call to insert contextual information. You can either 1894 manipulate the message itself, the keyword args or both. Return 1895 the message and kwargs modified (or not) to suit your needs. 1896 1897 Normally, you'll only need to override this one method in a 1898 LoggerAdapter subclass for your specific needs. 1899 """ 1900 if self.merge_extra and "extra" in kwargs: 1901 kwargs["extra"] = {**self.extra, **kwargs["extra"]} 1902 else: 1903 kwargs["extra"] = self.extra 1904 return msg, kwargs 1905 1906 # 1907 # Boilerplate convenience methods 1908 # 1909 def debug(self, msg, *args, **kwargs): 1910 """ 1911 Delegate a debug call to the underlying logger. 1912 """ 1913 self.log(DEBUG, msg, *args, **kwargs) 1914 1915 def info(self, msg, *args, **kwargs): 1916 """ 1917 Delegate an info call to the underlying logger. 1918 """ 1919 self.log(INFO, msg, *args, **kwargs) 1920 1921 def warning(self, msg, *args, **kwargs): 1922 """ 1923 Delegate a warning call to the underlying logger. 1924 """ 1925 self.log(WARNING, msg, *args, **kwargs) 1926 1927 def warn(self, msg, *args, **kwargs): 1928 warnings.warn("The 'warn' method is deprecated, " 1929 "use 'warning' instead", DeprecationWarning, 2) 1930 self.warning(msg, *args, **kwargs) 1931 1932 def error(self, msg, *args, **kwargs): 1933 """ 1934 Delegate an error call to the underlying logger. 1935 """ 1936 self.log(ERROR, msg, *args, **kwargs) 1937 1938 def exception(self, msg, *args, exc_info=True, **kwargs): 1939 """ 1940 Delegate an exception call to the underlying logger. 1941 """ 1942 self.log(ERROR, msg, *args, exc_info=exc_info, **kwargs) 1943 1944 def critical(self, msg, *args, **kwargs): 1945 """ 1946 Delegate a critical call to the underlying logger. 1947 """ 1948 self.log(CRITICAL, msg, *args, **kwargs) 1949 1950 def log(self, level, msg, *args, **kwargs): 1951 """ 1952 Delegate a log call to the underlying logger, after adding 1953 contextual information from this adapter instance. 1954 """ 1955 if self.isEnabledFor(level): 1956 msg, kwargs = self.process(msg, kwargs) 1957 self.logger.log(level, msg, *args, **kwargs) 1958 1959 def isEnabledFor(self, level): 1960 """ 1961 Is this logger enabled for level 'level'? 1962 """ 1963 return self.logger.isEnabledFor(level) 1964 1965 def setLevel(self, level): 1966 """ 1967 Set the specified level on the underlying logger. 1968 """ 1969 self.logger.setLevel(level) 1970 1971 def getEffectiveLevel(self): 1972 """ 1973 Get the effective level for the underlying logger. 1974 """ 1975 return self.logger.getEffectiveLevel() 1976 1977 def hasHandlers(self): 1978 """ 1979 See if the underlying logger has any handlers. 1980 """ 1981 return self.logger.hasHandlers() 1982 1983 def _log(self, level, msg, args, **kwargs): 1984 """ 1985 Low-level log implementation, proxied to allow nested logger adapters. 1986 """ 1987 return self.logger._log(level, msg, args, **kwargs) 1988 1989 @property 1990 def manager(self): 1991 return self.logger.manager 1992 1993 @manager.setter 1994 def manager(self, value): 1995 self.logger.manager = value 1996 1997 @property 1998 def name(self): 1999 return self.logger.name 2000 2001 def __repr__(self): 2002 logger = self.logger 2003 level = getLevelName(logger.getEffectiveLevel()) 2004 return '<%s %s (%s)>' % (self.__class__.__name__, logger.name, level) 2005 2006 __class_getitem__ = classmethod(GenericAlias) 2007 2008root = RootLogger(WARNING) 2009Logger.root = root 2010Logger.manager = Manager(Logger.root) 2011 2012#--------------------------------------------------------------------------- 2013# Configuration classes and functions 2014#--------------------------------------------------------------------------- 2015 2016def basicConfig(**kwargs): 2017 """ 2018 Do basic configuration for the logging system. 2019 2020 This function does nothing if the root logger already has handlers 2021 configured, unless the keyword argument *force* is set to ``True``. 2022 It is a convenience method intended for use by simple scripts 2023 to do one-shot configuration of the logging package. 2024 2025 The default behaviour is to create a StreamHandler which writes to 2026 sys.stderr, set a formatter using the BASIC_FORMAT format string, and 2027 add the handler to the root logger. 2028 2029 A number of optional keyword arguments may be specified, which can alter 2030 the default behaviour. 2031 2032 filename Specifies that a FileHandler be created, using the specified 2033 filename, rather than a StreamHandler. 2034 filemode Specifies the mode to open the file, if filename is specified 2035 (if filemode is unspecified, it defaults to 'a'). 2036 format Use the specified format string for the handler. 2037 datefmt Use the specified date/time format. 2038 style If a format string is specified, use this to specify the 2039 type of format string (possible values '%', '{', '$', for 2040 %-formatting, :meth:`str.format` and :class:`string.Template` 2041 - defaults to '%'). 2042 level Set the root logger level to the specified level. 2043 stream Use the specified stream to initialize the StreamHandler. Note 2044 that this argument is incompatible with 'filename' - if both 2045 are present, 'stream' is ignored. 2046 handlers If specified, this should be an iterable of already created 2047 handlers, which will be added to the root logger. Any handler 2048 in the list which does not have a formatter assigned will be 2049 assigned the formatter created in this function. 2050 force If this keyword is specified as true, any existing handlers 2051 attached to the root logger are removed and closed, before 2052 carrying out the configuration as specified by the other 2053 arguments. 2054 encoding If specified together with a filename, this encoding is passed to 2055 the created FileHandler, causing it to be used when the file is 2056 opened. 2057 errors If specified together with a filename, this value is passed to the 2058 created FileHandler, causing it to be used when the file is 2059 opened in text mode. If not specified, the default value is 2060 `backslashreplace`. 2061 2062 Note that you could specify a stream created using open(filename, mode) 2063 rather than passing the filename and mode in. However, it should be 2064 remembered that StreamHandler does not close its stream (since it may be 2065 using sys.stdout or sys.stderr), whereas FileHandler closes its stream 2066 when the handler is closed. 2067 2068 .. versionchanged:: 3.2 2069 Added the ``style`` parameter. 2070 2071 .. versionchanged:: 3.3 2072 Added the ``handlers`` parameter. A ``ValueError`` is now thrown for 2073 incompatible arguments (e.g. ``handlers`` specified together with 2074 ``filename``/``filemode``, or ``filename``/``filemode`` specified 2075 together with ``stream``, or ``handlers`` specified together with 2076 ``stream``. 2077 2078 .. versionchanged:: 3.8 2079 Added the ``force`` parameter. 2080 2081 .. versionchanged:: 3.9 2082 Added the ``encoding`` and ``errors`` parameters. 2083 """ 2084 # Add thread safety in case someone mistakenly calls 2085 # basicConfig() from multiple threads 2086 with _lock: 2087 force = kwargs.pop('force', False) 2088 encoding = kwargs.pop('encoding', None) 2089 errors = kwargs.pop('errors', 'backslashreplace') 2090 if force: 2091 for h in root.handlers[:]: 2092 root.removeHandler(h) 2093 h.close() 2094 if len(root.handlers) == 0: 2095 handlers = kwargs.pop("handlers", None) 2096 if handlers is None: 2097 if "stream" in kwargs and "filename" in kwargs: 2098 raise ValueError("'stream' and 'filename' should not be " 2099 "specified together") 2100 else: 2101 if "stream" in kwargs or "filename" in kwargs: 2102 raise ValueError("'stream' or 'filename' should not be " 2103 "specified together with 'handlers'") 2104 if handlers is None: 2105 filename = kwargs.pop("filename", None) 2106 mode = kwargs.pop("filemode", 'a') 2107 if filename: 2108 if 'b' in mode: 2109 errors = None 2110 else: 2111 encoding = io.text_encoding(encoding) 2112 h = FileHandler(filename, mode, 2113 encoding=encoding, errors=errors) 2114 else: 2115 stream = kwargs.pop("stream", None) 2116 h = StreamHandler(stream) 2117 handlers = [h] 2118 dfs = kwargs.pop("datefmt", None) 2119 style = kwargs.pop("style", '%') 2120 if style not in _STYLES: 2121 raise ValueError('Style must be one of: %s' % ','.join( 2122 _STYLES.keys())) 2123 fs = kwargs.pop("format", _STYLES[style][1]) 2124 fmt = Formatter(fs, dfs, style) 2125 for h in handlers: 2126 if h.formatter is None: 2127 h.setFormatter(fmt) 2128 root.addHandler(h) 2129 level = kwargs.pop("level", None) 2130 if level is not None: 2131 root.setLevel(level) 2132 if kwargs: 2133 keys = ', '.join(kwargs.keys()) 2134 raise ValueError('Unrecognised argument(s): %s' % keys) 2135 2136#--------------------------------------------------------------------------- 2137# Utility functions at module level. 2138# Basically delegate everything to the root logger. 2139#--------------------------------------------------------------------------- 2140 2141def getLogger(name=None): 2142 """ 2143 Return a logger with the specified name, creating it if necessary. 2144 2145 If no name is specified, return the root logger. 2146 """ 2147 if not name or isinstance(name, str) and name == root.name: 2148 return root 2149 return Logger.manager.getLogger(name) 2150 2151def critical(msg, *args, **kwargs): 2152 """ 2153 Log a message with severity 'CRITICAL' on the root logger. If the logger 2154 has no handlers, call basicConfig() to add a console handler with a 2155 pre-defined format. 2156 """ 2157 if len(root.handlers) == 0: 2158 basicConfig() 2159 root.critical(msg, *args, **kwargs) 2160 2161def fatal(msg, *args, **kwargs): 2162 """ 2163 Don't use this function, use critical() instead. 2164 """ 2165 critical(msg, *args, **kwargs) 2166 2167def error(msg, *args, **kwargs): 2168 """ 2169 Log a message with severity 'ERROR' on the root logger. If the logger has 2170 no handlers, call basicConfig() to add a console handler with a pre-defined 2171 format. 2172 """ 2173 if len(root.handlers) == 0: 2174 basicConfig() 2175 root.error(msg, *args, **kwargs) 2176 2177def exception(msg, *args, exc_info=True, **kwargs): 2178 """ 2179 Log a message with severity 'ERROR' on the root logger, with exception 2180 information. If the logger has no handlers, basicConfig() is called to add 2181 a console handler with a pre-defined format. 2182 """ 2183 error(msg, *args, exc_info=exc_info, **kwargs) 2184 2185def warning(msg, *args, **kwargs): 2186 """ 2187 Log a message with severity 'WARNING' on the root logger. If the logger has 2188 no handlers, call basicConfig() to add a console handler with a pre-defined 2189 format. 2190 """ 2191 if len(root.handlers) == 0: 2192 basicConfig() 2193 root.warning(msg, *args, **kwargs) 2194 2195def warn(msg, *args, **kwargs): 2196 warnings.warn("The 'warn' function is deprecated, " 2197 "use 'warning' instead", DeprecationWarning, 2) 2198 warning(msg, *args, **kwargs) 2199 2200def info(msg, *args, **kwargs): 2201 """ 2202 Log a message with severity 'INFO' on the root logger. If the logger has 2203 no handlers, call basicConfig() to add a console handler with a pre-defined 2204 format. 2205 """ 2206 if len(root.handlers) == 0: 2207 basicConfig() 2208 root.info(msg, *args, **kwargs) 2209 2210def debug(msg, *args, **kwargs): 2211 """ 2212 Log a message with severity 'DEBUG' on the root logger. If the logger has 2213 no handlers, call basicConfig() to add a console handler with a pre-defined 2214 format. 2215 """ 2216 if len(root.handlers) == 0: 2217 basicConfig() 2218 root.debug(msg, *args, **kwargs) 2219 2220def log(level, msg, *args, **kwargs): 2221 """ 2222 Log 'msg % args' with the integer severity 'level' on the root logger. If 2223 the logger has no handlers, call basicConfig() to add a console handler 2224 with a pre-defined format. 2225 """ 2226 if len(root.handlers) == 0: 2227 basicConfig() 2228 root.log(level, msg, *args, **kwargs) 2229 2230def disable(level=CRITICAL): 2231 """ 2232 Disable all logging calls of severity 'level' and below. 2233 """ 2234 root.manager.disable = level 2235 root.manager._clear_cache() 2236 2237def shutdown(handlerList=_handlerList): 2238 """ 2239 Perform any cleanup actions in the logging system (e.g. flushing 2240 buffers). 2241 2242 Should be called at application exit. 2243 """ 2244 for wr in reversed(handlerList[:]): 2245 #errors might occur, for example, if files are locked 2246 #we just ignore them if raiseExceptions is not set 2247 try: 2248 h = wr() 2249 if h: 2250 try: 2251 h.acquire() 2252 # MemoryHandlers might not want to be flushed on close, 2253 # but circular imports prevent us scoping this to just 2254 # those handlers. hence the default to True. 2255 if getattr(h, 'flushOnClose', True): 2256 h.flush() 2257 h.close() 2258 except (OSError, ValueError): 2259 # Ignore errors which might be caused 2260 # because handlers have been closed but 2261 # references to them are still around at 2262 # application exit. 2263 pass 2264 finally: 2265 h.release() 2266 except: # ignore everything, as we're shutting down 2267 if raiseExceptions: 2268 raise 2269 #else, swallow 2270 2271#Let's try and shutdown automatically on application exit... 2272import atexit 2273atexit.register(shutdown) 2274 2275# Null handler 2276 2277class NullHandler(Handler): 2278 """ 2279 This handler does nothing. It's intended to be used to avoid the 2280 "No handlers could be found for logger XXX" one-off warning. This is 2281 important for library code, which may contain code to log events. If a user 2282 of the library does not configure logging, the one-off warning might be 2283 produced; to avoid this, the library developer simply needs to instantiate 2284 a NullHandler and add it to the top-level logger of the library module or 2285 package. 2286 """ 2287 def handle(self, record): 2288 """Stub.""" 2289 2290 def emit(self, record): 2291 """Stub.""" 2292 2293 def createLock(self): 2294 self.lock = None 2295 2296 def _at_fork_reinit(self): 2297 pass 2298 2299# Warnings integration 2300 2301_warnings_showwarning = None 2302 2303def _showwarning(message, category, filename, lineno, file=None, line=None): 2304 """ 2305 Implementation of showwarnings which redirects to logging, which will first 2306 check to see if the file parameter is None. If a file is specified, it will 2307 delegate to the original warnings implementation of showwarning. Otherwise, 2308 it will call warnings.formatwarning and will log the resulting string to a 2309 warnings logger named "py.warnings" with level logging.WARNING. 2310 """ 2311 if file is not None: 2312 if _warnings_showwarning is not None: 2313 _warnings_showwarning(message, category, filename, lineno, file, line) 2314 else: 2315 s = warnings.formatwarning(message, category, filename, lineno, line) 2316 logger = getLogger("py.warnings") 2317 if not logger.handlers: 2318 logger.addHandler(NullHandler()) 2319 # bpo-46557: Log str(s) as msg instead of logger.warning("%s", s) 2320 # since some log aggregation tools group logs by the msg arg 2321 logger.warning(str(s)) 2322 2323def captureWarnings(capture): 2324 """ 2325 If capture is true, redirect all warnings to the logging package. 2326 If capture is False, ensure that warnings are not redirected to logging 2327 but to their original destinations. 2328 """ 2329 global _warnings_showwarning 2330 if capture: 2331 if _warnings_showwarning is None: 2332 _warnings_showwarning = warnings.showwarning 2333 warnings.showwarning = _showwarning 2334 else: 2335 if _warnings_showwarning is not None: 2336 warnings.showwarning = _warnings_showwarning 2337 _warnings_showwarning = None
737class BufferingFormatter(object): 738 """ 739 A formatter suitable for formatting a number of records. 740 """ 741 def __init__(self, linefmt=None): 742 """ 743 Optionally specify a formatter which will be used to format each 744 individual record. 745 """ 746 if linefmt: 747 self.linefmt = linefmt 748 else: 749 self.linefmt = _defaultFormatter 750 751 def formatHeader(self, records): 752 """ 753 Return the header string for the specified records. 754 """ 755 return "" 756 757 def formatFooter(self, records): 758 """ 759 Return the footer string for the specified records. 760 """ 761 return "" 762 763 def format(self, records): 764 """ 765 Format the specified records and return the result as a string. 766 """ 767 rv = "" 768 if len(records) > 0: 769 rv = rv + self.formatHeader(records) 770 for record in records: 771 rv = rv + self.linefmt.format(record) 772 rv = rv + self.formatFooter(records) 773 return rv
A formatter suitable for formatting a number of records.
741 def __init__(self, linefmt=None): 742 """ 743 Optionally specify a formatter which will be used to format each 744 individual record. 745 """ 746 if linefmt: 747 self.linefmt = linefmt 748 else: 749 self.linefmt = _defaultFormatter
Optionally specify a formatter which will be used to format each individual record.
751 def formatHeader(self, records): 752 """ 753 Return the header string for the specified records. 754 """ 755 return ""
Return the header string for the specified records.
763 def format(self, records): 764 """ 765 Format the specified records and return the result as a string. 766 """ 767 rv = "" 768 if len(records) > 0: 769 rv = rv + self.formatHeader(records) 770 for record in records: 771 rv = rv + self.linefmt.format(record) 772 rv = rv + self.formatFooter(records) 773 return rv
Format the specified records and return the result as a string.
1191class FileHandler(StreamHandler): 1192 """ 1193 A handler class which writes formatted logging records to disk files. 1194 """ 1195 def __init__(self, filename, mode='a', encoding=None, delay=False, errors=None): 1196 """ 1197 Open the specified file and use it as the stream for logging. 1198 """ 1199 # Issue #27493: add support for Path objects to be passed in 1200 filename = os.fspath(filename) 1201 #keep the absolute path, otherwise derived classes which use this 1202 #may come a cropper when the current directory changes 1203 self.baseFilename = os.path.abspath(filename) 1204 self.mode = mode 1205 self.encoding = encoding 1206 if "b" not in mode: 1207 self.encoding = io.text_encoding(encoding) 1208 self.errors = errors 1209 self.delay = delay 1210 # bpo-26789: FileHandler keeps a reference to the builtin open() 1211 # function to be able to open or reopen the file during Python 1212 # finalization. 1213 self._builtin_open = open 1214 if delay: 1215 #We don't open the stream, but we still need to call the 1216 #Handler constructor to set level, formatter, lock etc. 1217 Handler.__init__(self) 1218 self.stream = None 1219 else: 1220 StreamHandler.__init__(self, self._open()) 1221 1222 def close(self): 1223 """ 1224 Closes the stream. 1225 """ 1226 with self.lock: 1227 try: 1228 if self.stream: 1229 try: 1230 self.flush() 1231 finally: 1232 stream = self.stream 1233 self.stream = None 1234 if hasattr(stream, "close"): 1235 stream.close() 1236 finally: 1237 # Issue #19523: call unconditionally to 1238 # prevent a handler leak when delay is set 1239 # Also see Issue #42378: we also rely on 1240 # self._closed being set to True there 1241 StreamHandler.close(self) 1242 1243 def _open(self): 1244 """ 1245 Open the current base file with the (original) mode and encoding. 1246 Return the resulting stream. 1247 """ 1248 open_func = self._builtin_open 1249 return open_func(self.baseFilename, self.mode, 1250 encoding=self.encoding, errors=self.errors) 1251 1252 def emit(self, record): 1253 """ 1254 Emit a record. 1255 1256 If the stream was not opened because 'delay' was specified in the 1257 constructor, open it before calling the superclass's emit. 1258 1259 If stream is not open, current mode is 'w' and `_closed=True`, record 1260 will not be emitted (see Issue #42378). 1261 """ 1262 if self.stream is None: 1263 if self.mode != 'w' or not self._closed: 1264 self.stream = self._open() 1265 if self.stream: 1266 StreamHandler.emit(self, record) 1267 1268 def __repr__(self): 1269 level = getLevelName(self.level) 1270 return '<%s %s (%s)>' % (self.__class__.__name__, self.baseFilename, level)
A handler class which writes formatted logging records to disk files.
1195 def __init__(self, filename, mode='a', encoding=None, delay=False, errors=None): 1196 """ 1197 Open the specified file and use it as the stream for logging. 1198 """ 1199 # Issue #27493: add support for Path objects to be passed in 1200 filename = os.fspath(filename) 1201 #keep the absolute path, otherwise derived classes which use this 1202 #may come a cropper when the current directory changes 1203 self.baseFilename = os.path.abspath(filename) 1204 self.mode = mode 1205 self.encoding = encoding 1206 if "b" not in mode: 1207 self.encoding = io.text_encoding(encoding) 1208 self.errors = errors 1209 self.delay = delay 1210 # bpo-26789: FileHandler keeps a reference to the builtin open() 1211 # function to be able to open or reopen the file during Python 1212 # finalization. 1213 self._builtin_open = open 1214 if delay: 1215 #We don't open the stream, but we still need to call the 1216 #Handler constructor to set level, formatter, lock etc. 1217 Handler.__init__(self) 1218 self.stream = None 1219 else: 1220 StreamHandler.__init__(self, self._open())
Open the specified file and use it as the stream for logging.
1222 def close(self): 1223 """ 1224 Closes the stream. 1225 """ 1226 with self.lock: 1227 try: 1228 if self.stream: 1229 try: 1230 self.flush() 1231 finally: 1232 stream = self.stream 1233 self.stream = None 1234 if hasattr(stream, "close"): 1235 stream.close() 1236 finally: 1237 # Issue #19523: call unconditionally to 1238 # prevent a handler leak when delay is set 1239 # Also see Issue #42378: we also rely on 1240 # self._closed being set to True there 1241 StreamHandler.close(self)
Closes the stream.
1252 def emit(self, record): 1253 """ 1254 Emit a record. 1255 1256 If the stream was not opened because 'delay' was specified in the 1257 constructor, open it before calling the superclass's emit. 1258 1259 If stream is not open, current mode is 'w' and `_closed=True`, record 1260 will not be emitted (see Issue #42378). 1261 """ 1262 if self.stream is None: 1263 if self.mode != 'w' or not self._closed: 1264 self.stream = self._open() 1265 if self.stream: 1266 StreamHandler.emit(self, record)
Emit a record.
If the stream was not opened because 'delay' was specified in the constructor, open it before calling the superclass's emit.
If stream is not open, current mode is 'w' and _closed=True
, record
will not be emitted (see Issue #42378).
779class Filter(object): 780 """ 781 Filter instances are used to perform arbitrary filtering of LogRecords. 782 783 Loggers and Handlers can optionally use Filter instances to filter 784 records as desired. The base filter class only allows events which are 785 below a certain point in the logger hierarchy. For example, a filter 786 initialized with "A.B" will allow events logged by loggers "A.B", 787 "A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If 788 initialized with the empty string, all events are passed. 789 """ 790 def __init__(self, name=''): 791 """ 792 Initialize a filter. 793 794 Initialize with the name of the logger which, together with its 795 children, will have its events allowed through the filter. If no 796 name is specified, allow every event. 797 """ 798 self.name = name 799 self.nlen = len(name) 800 801 def filter(self, record): 802 """ 803 Determine if the specified record is to be logged. 804 805 Returns True if the record should be logged, or False otherwise. 806 If deemed appropriate, the record may be modified in-place. 807 """ 808 if self.nlen == 0: 809 return True 810 elif self.name == record.name: 811 return True 812 elif record.name.find(self.name, 0, self.nlen) != 0: 813 return False 814 return (record.name[self.nlen] == ".")
Filter instances are used to perform arbitrary filtering of LogRecords.
Loggers and Handlers can optionally use Filter instances to filter records as desired. The base filter class only allows events which are below a certain point in the logger hierarchy. For example, a filter initialized with "A.B" will allow events logged by loggers "A.B", "A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If initialized with the empty string, all events are passed.
790 def __init__(self, name=''): 791 """ 792 Initialize a filter. 793 794 Initialize with the name of the logger which, together with its 795 children, will have its events allowed through the filter. If no 796 name is specified, allow every event. 797 """ 798 self.name = name 799 self.nlen = len(name)
Initialize a filter.
Initialize with the name of the logger which, together with its children, will have its events allowed through the filter. If no name is specified, allow every event.
801 def filter(self, record): 802 """ 803 Determine if the specified record is to be logged. 804 805 Returns True if the record should be logged, or False otherwise. 806 If deemed appropriate, the record may be modified in-place. 807 """ 808 if self.nlen == 0: 809 return True 810 elif self.name == record.name: 811 return True 812 elif record.name.find(self.name, 0, self.nlen) != 0: 813 return False 814 return (record.name[self.nlen] == ".")
Determine if the specified record is to be logged.
Returns True if the record should be logged, or False otherwise. If deemed appropriate, the record may be modified in-place.
555class Formatter(object): 556 """ 557 Formatter instances are used to convert a LogRecord to text. 558 559 Formatters need to know how a LogRecord is constructed. They are 560 responsible for converting a LogRecord to (usually) a string which can 561 be interpreted by either a human or an external system. The base Formatter 562 allows a formatting string to be specified. If none is supplied, the 563 style-dependent default value, "%(message)s", "{message}", or 564 "${message}", is used. 565 566 The Formatter can be initialized with a format string which makes use of 567 knowledge of the LogRecord attributes - e.g. the default value mentioned 568 above makes use of the fact that the user's message and arguments are pre- 569 formatted into a LogRecord's message attribute. Currently, the useful 570 attributes in a LogRecord are described by: 571 572 %(name)s Name of the logger (logging channel) 573 %(levelno)s Numeric logging level for the message (DEBUG, INFO, 574 WARNING, ERROR, CRITICAL) 575 %(levelname)s Text logging level for the message ("DEBUG", "INFO", 576 "WARNING", "ERROR", "CRITICAL") 577 %(pathname)s Full pathname of the source file where the logging 578 call was issued (if available) 579 %(filename)s Filename portion of pathname 580 %(module)s Module (name portion of filename) 581 %(lineno)d Source line number where the logging call was issued 582 (if available) 583 %(funcName)s Function name 584 %(created)f Time when the LogRecord was created (time.time_ns() / 1e9 585 return value) 586 %(asctime)s Textual time when the LogRecord was created 587 %(msecs)d Millisecond portion of the creation time 588 %(relativeCreated)d Time in milliseconds when the LogRecord was created, 589 relative to the time the logging module was loaded 590 (typically at application startup time) 591 %(thread)d Thread ID (if available) 592 %(threadName)s Thread name (if available) 593 %(taskName)s Task name (if available) 594 %(process)d Process ID (if available) 595 %(processName)s Process name (if available) 596 %(message)s The result of record.getMessage(), computed just as 597 the record is emitted 598 """ 599 600 converter = time.localtime 601 602 def __init__(self, fmt=None, datefmt=None, style='%', validate=True, *, 603 defaults=None): 604 """ 605 Initialize the formatter with specified format strings. 606 607 Initialize the formatter either with the specified format string, or a 608 default as described above. Allow for specialized date formatting with 609 the optional datefmt argument. If datefmt is omitted, you get an 610 ISO8601-like (or RFC 3339-like) format. 611 612 Use a style parameter of '%', '{' or '$' to specify that you want to 613 use one of %-formatting, :meth:`str.format` (``{}``) formatting or 614 :class:`string.Template` formatting in your format string. 615 616 .. versionchanged:: 3.2 617 Added the ``style`` parameter. 618 """ 619 if style not in _STYLES: 620 raise ValueError('Style must be one of: %s' % ','.join( 621 _STYLES.keys())) 622 self._style = _STYLES[style][0](fmt, defaults=defaults) 623 if validate: 624 self._style.validate() 625 626 self._fmt = self._style._fmt 627 self.datefmt = datefmt 628 629 default_time_format = '%Y-%m-%d %H:%M:%S' 630 default_msec_format = '%s,%03d' 631 632 def formatTime(self, record, datefmt=None): 633 """ 634 Return the creation time of the specified LogRecord as formatted text. 635 636 This method should be called from format() by a formatter which 637 wants to make use of a formatted time. This method can be overridden 638 in formatters to provide for any specific requirement, but the 639 basic behaviour is as follows: if datefmt (a string) is specified, 640 it is used with time.strftime() to format the creation time of the 641 record. Otherwise, an ISO8601-like (or RFC 3339-like) format is used. 642 The resulting string is returned. This function uses a user-configurable 643 function to convert the creation time to a tuple. By default, 644 time.localtime() is used; to change this for a particular formatter 645 instance, set the 'converter' attribute to a function with the same 646 signature as time.localtime() or time.gmtime(). To change it for all 647 formatters, for example if you want all logging times to be shown in GMT, 648 set the 'converter' attribute in the Formatter class. 649 """ 650 ct = self.converter(record.created) 651 if datefmt: 652 s = time.strftime(datefmt, ct) 653 else: 654 s = time.strftime(self.default_time_format, ct) 655 if self.default_msec_format: 656 s = self.default_msec_format % (s, record.msecs) 657 return s 658 659 def formatException(self, ei): 660 """ 661 Format and return the specified exception information as a string. 662 663 This default implementation just uses 664 traceback.print_exception() 665 """ 666 sio = io.StringIO() 667 tb = ei[2] 668 # See issues #9427, #1553375. Commented out for now. 669 #if getattr(self, 'fullstack', False): 670 # traceback.print_stack(tb.tb_frame.f_back, file=sio) 671 traceback.print_exception(ei[0], ei[1], tb, limit=None, file=sio) 672 s = sio.getvalue() 673 sio.close() 674 if s[-1:] == "\n": 675 s = s[:-1] 676 return s 677 678 def usesTime(self): 679 """ 680 Check if the format uses the creation time of the record. 681 """ 682 return self._style.usesTime() 683 684 def formatMessage(self, record): 685 return self._style.format(record) 686 687 def formatStack(self, stack_info): 688 """ 689 This method is provided as an extension point for specialized 690 formatting of stack information. 691 692 The input data is a string as returned from a call to 693 :func:`traceback.print_stack`, but with the last trailing newline 694 removed. 695 696 The base implementation just returns the value passed in. 697 """ 698 return stack_info 699 700 def format(self, record): 701 """ 702 Format the specified record as text. 703 704 The record's attribute dictionary is used as the operand to a 705 string formatting operation which yields the returned string. 706 Before formatting the dictionary, a couple of preparatory steps 707 are carried out. The message attribute of the record is computed 708 using LogRecord.getMessage(). If the formatting string uses the 709 time (as determined by a call to usesTime(), formatTime() is 710 called to format the event time. If there is exception information, 711 it is formatted using formatException() and appended to the message. 712 """ 713 record.message = record.getMessage() 714 if self.usesTime(): 715 record.asctime = self.formatTime(record, self.datefmt) 716 s = self.formatMessage(record) 717 if record.exc_info: 718 # Cache the traceback text to avoid converting it multiple times 719 # (it's constant anyway) 720 if not record.exc_text: 721 record.exc_text = self.formatException(record.exc_info) 722 if record.exc_text: 723 if s[-1:] != "\n": 724 s = s + "\n" 725 s = s + record.exc_text 726 if record.stack_info: 727 if s[-1:] != "\n": 728 s = s + "\n" 729 s = s + self.formatStack(record.stack_info) 730 return s
Formatter instances are used to convert a LogRecord to text.
Formatters need to know how a LogRecord is constructed. They are responsible for converting a LogRecord to (usually) a string which can be interpreted by either a human or an external system. The base Formatter allows a formatting string to be specified. If none is supplied, the style-dependent default value, "%(message)s", "{message}", or "${message}", is used.
The Formatter can be initialized with a format string which makes use of knowledge of the LogRecord attributes - e.g. the default value mentioned above makes use of the fact that the user's message and arguments are pre- formatted into a LogRecord's message attribute. Currently, the useful attributes in a LogRecord are described by:
%(name)s Name of the logger (logging channel) %(levelno)s Numeric logging level for the message (DEBUG, INFO, WARNING, ERROR, CRITICAL) %(levelname)s Text logging level for the message ("DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL") %(pathname)s Full pathname of the source file where the logging call was issued (if available) %(filename)s Filename portion of pathname %(module)s Module (name portion of filename) %(lineno)d Source line number where the logging call was issued (if available) %(funcName)s Function name %(created)f Time when the LogRecord was created (time.time_ns() / 1e9 return value) %(asctime)s Textual time when the LogRecord was created %(msecs)d Millisecond portion of the creation time %(relativeCreated)d Time in milliseconds when the LogRecord was created, relative to the time the logging module was loaded (typically at application startup time) %(thread)d Thread ID (if available) %(threadName)s Thread name (if available) %(taskName)s Task name (if available) %(process)d Process ID (if available) %(processName)s Process name (if available) %(message)s The result of record.getMessage(), computed just as the record is emitted
602 def __init__(self, fmt=None, datefmt=None, style='%', validate=True, *, 603 defaults=None): 604 """ 605 Initialize the formatter with specified format strings. 606 607 Initialize the formatter either with the specified format string, or a 608 default as described above. Allow for specialized date formatting with 609 the optional datefmt argument. If datefmt is omitted, you get an 610 ISO8601-like (or RFC 3339-like) format. 611 612 Use a style parameter of '%', '{' or '$' to specify that you want to 613 use one of %-formatting, :meth:`str.format` (``{}``) formatting or 614 :class:`string.Template` formatting in your format string. 615 616 .. versionchanged:: 3.2 617 Added the ``style`` parameter. 618 """ 619 if style not in _STYLES: 620 raise ValueError('Style must be one of: %s' % ','.join( 621 _STYLES.keys())) 622 self._style = _STYLES[style][0](fmt, defaults=defaults) 623 if validate: 624 self._style.validate() 625 626 self._fmt = self._style._fmt 627 self.datefmt = datefmt
Initialize the formatter with specified format strings.
Initialize the formatter either with the specified format string, or a default as described above. Allow for specialized date formatting with the optional datefmt argument. If datefmt is omitted, you get an ISO8601-like (or RFC 3339-like) format.
Use a style parameter of '%', '{' or '$' to specify that you want to
use one of %-formatting, str.format()
({}
) formatting or
string.Template
formatting in your format string.
Changed in version 3.2:
Added the style
parameter.
localtime([seconds]) -> (tm_year,tm_mon,tm_mday,tm_hour,tm_min, tm_sec,tm_wday,tm_yday,tm_isdst)
Convert seconds since the Epoch to a time tuple expressing local time. When 'seconds' is not passed in, convert the current time instead.
632 def formatTime(self, record, datefmt=None): 633 """ 634 Return the creation time of the specified LogRecord as formatted text. 635 636 This method should be called from format() by a formatter which 637 wants to make use of a formatted time. This method can be overridden 638 in formatters to provide for any specific requirement, but the 639 basic behaviour is as follows: if datefmt (a string) is specified, 640 it is used with time.strftime() to format the creation time of the 641 record. Otherwise, an ISO8601-like (or RFC 3339-like) format is used. 642 The resulting string is returned. This function uses a user-configurable 643 function to convert the creation time to a tuple. By default, 644 time.localtime() is used; to change this for a particular formatter 645 instance, set the 'converter' attribute to a function with the same 646 signature as time.localtime() or time.gmtime(). To change it for all 647 formatters, for example if you want all logging times to be shown in GMT, 648 set the 'converter' attribute in the Formatter class. 649 """ 650 ct = self.converter(record.created) 651 if datefmt: 652 s = time.strftime(datefmt, ct) 653 else: 654 s = time.strftime(self.default_time_format, ct) 655 if self.default_msec_format: 656 s = self.default_msec_format % (s, record.msecs) 657 return s
Return the creation time of the specified LogRecord as formatted text.
This method should be called from format() by a formatter which wants to make use of a formatted time. This method can be overridden in formatters to provide for any specific requirement, but the basic behaviour is as follows: if datefmt (a string) is specified, it is used with time.strftime() to format the creation time of the record. Otherwise, an ISO8601-like (or RFC 3339-like) format is used. The resulting string is returned. This function uses a user-configurable function to convert the creation time to a tuple. By default, time.localtime() is used; to change this for a particular formatter instance, set the 'converter' attribute to a function with the same signature as time.localtime() or time.gmtime(). To change it for all formatters, for example if you want all logging times to be shown in GMT, set the 'converter' attribute in the Formatter class.
659 def formatException(self, ei): 660 """ 661 Format and return the specified exception information as a string. 662 663 This default implementation just uses 664 traceback.print_exception() 665 """ 666 sio = io.StringIO() 667 tb = ei[2] 668 # See issues #9427, #1553375. Commented out for now. 669 #if getattr(self, 'fullstack', False): 670 # traceback.print_stack(tb.tb_frame.f_back, file=sio) 671 traceback.print_exception(ei[0], ei[1], tb, limit=None, file=sio) 672 s = sio.getvalue() 673 sio.close() 674 if s[-1:] == "\n": 675 s = s[:-1] 676 return s
Format and return the specified exception information as a string.
This default implementation just uses traceback.print_exception()
678 def usesTime(self): 679 """ 680 Check if the format uses the creation time of the record. 681 """ 682 return self._style.usesTime()
Check if the format uses the creation time of the record.
687 def formatStack(self, stack_info): 688 """ 689 This method is provided as an extension point for specialized 690 formatting of stack information. 691 692 The input data is a string as returned from a call to 693 :func:`traceback.print_stack`, but with the last trailing newline 694 removed. 695 696 The base implementation just returns the value passed in. 697 """ 698 return stack_info
This method is provided as an extension point for specialized formatting of stack information.
The input data is a string as returned from a call to
traceback.print_stack()
, but with the last trailing newline
removed.
The base implementation just returns the value passed in.
700 def format(self, record): 701 """ 702 Format the specified record as text. 703 704 The record's attribute dictionary is used as the operand to a 705 string formatting operation which yields the returned string. 706 Before formatting the dictionary, a couple of preparatory steps 707 are carried out. The message attribute of the record is computed 708 using LogRecord.getMessage(). If the formatting string uses the 709 time (as determined by a call to usesTime(), formatTime() is 710 called to format the event time. If there is exception information, 711 it is formatted using formatException() and appended to the message. 712 """ 713 record.message = record.getMessage() 714 if self.usesTime(): 715 record.asctime = self.formatTime(record, self.datefmt) 716 s = self.formatMessage(record) 717 if record.exc_info: 718 # Cache the traceback text to avoid converting it multiple times 719 # (it's constant anyway) 720 if not record.exc_text: 721 record.exc_text = self.formatException(record.exc_info) 722 if record.exc_text: 723 if s[-1:] != "\n": 724 s = s + "\n" 725 s = s + record.exc_text 726 if record.stack_info: 727 if s[-1:] != "\n": 728 s = s + "\n" 729 s = s + self.formatStack(record.stack_info) 730 return s
Format the specified record as text.
The record's attribute dictionary is used as the operand to a string formatting operation which yields the returned string. Before formatting the dictionary, a couple of preparatory steps are carried out. The message attribute of the record is computed using LogRecord.getMessage(). If the formatting string uses the time (as determined by a call to usesTime(), formatTime() is called to format the event time. If there is exception information, it is formatted using formatException() and appended to the message.
923class Handler(Filterer): 924 """ 925 Handler instances dispatch logging events to specific destinations. 926 927 The base handler class. Acts as a placeholder which defines the Handler 928 interface. Handlers can optionally use Formatter instances to format 929 records as desired. By default, no formatter is specified; in this case, 930 the 'raw' message as determined by record.message is logged. 931 """ 932 def __init__(self, level=NOTSET): 933 """ 934 Initializes the instance - basically setting the formatter to None 935 and the filter list to empty. 936 """ 937 Filterer.__init__(self) 938 self._name = None 939 self.level = _checkLevel(level) 940 self.formatter = None 941 self._closed = False 942 # Add the handler to the global _handlerList (for cleanup on shutdown) 943 _addHandlerRef(self) 944 self.createLock() 945 946 def get_name(self): 947 return self._name 948 949 def set_name(self, name): 950 with _lock: 951 if self._name in _handlers: 952 del _handlers[self._name] 953 self._name = name 954 if name: 955 _handlers[name] = self 956 957 name = property(get_name, set_name) 958 959 def createLock(self): 960 """ 961 Acquire a thread lock for serializing access to the underlying I/O. 962 """ 963 self.lock = threading.RLock() 964 _register_at_fork_reinit_lock(self) 965 966 def _at_fork_reinit(self): 967 self.lock._at_fork_reinit() 968 969 def acquire(self): 970 """ 971 Acquire the I/O thread lock. 972 """ 973 if self.lock: 974 self.lock.acquire() 975 976 def release(self): 977 """ 978 Release the I/O thread lock. 979 """ 980 if self.lock: 981 self.lock.release() 982 983 def setLevel(self, level): 984 """ 985 Set the logging level of this handler. level must be an int or a str. 986 """ 987 self.level = _checkLevel(level) 988 989 def format(self, record): 990 """ 991 Format the specified record. 992 993 If a formatter is set, use it. Otherwise, use the default formatter 994 for the module. 995 """ 996 if self.formatter: 997 fmt = self.formatter 998 else: 999 fmt = _defaultFormatter 1000 return fmt.format(record) 1001 1002 def emit(self, record): 1003 """ 1004 Do whatever it takes to actually log the specified logging record. 1005 1006 This version is intended to be implemented by subclasses and so 1007 raises a NotImplementedError. 1008 """ 1009 raise NotImplementedError('emit must be implemented ' 1010 'by Handler subclasses') 1011 1012 def handle(self, record): 1013 """ 1014 Conditionally emit the specified logging record. 1015 1016 Emission depends on filters which may have been added to the handler. 1017 Wrap the actual emission of the record with acquisition/release of 1018 the I/O thread lock. 1019 1020 Returns an instance of the log record that was emitted 1021 if it passed all filters, otherwise a false value is returned. 1022 """ 1023 rv = self.filter(record) 1024 if isinstance(rv, LogRecord): 1025 record = rv 1026 if rv: 1027 with self.lock: 1028 self.emit(record) 1029 return rv 1030 1031 def setFormatter(self, fmt): 1032 """ 1033 Set the formatter for this handler. 1034 """ 1035 self.formatter = fmt 1036 1037 def flush(self): 1038 """ 1039 Ensure all logging output has been flushed. 1040 1041 This version does nothing and is intended to be implemented by 1042 subclasses. 1043 """ 1044 pass 1045 1046 def close(self): 1047 """ 1048 Tidy up any resources used by the handler. 1049 1050 This version removes the handler from an internal map of handlers, 1051 _handlers, which is used for handler lookup by name. Subclasses 1052 should ensure that this gets called from overridden close() 1053 methods. 1054 """ 1055 #get the module data lock, as we're updating a shared structure. 1056 with _lock: 1057 self._closed = True 1058 if self._name and self._name in _handlers: 1059 del _handlers[self._name] 1060 1061 def handleError(self, record): 1062 """ 1063 Handle errors which occur during an emit() call. 1064 1065 This method should be called from handlers when an exception is 1066 encountered during an emit() call. If raiseExceptions is false, 1067 exceptions get silently ignored. This is what is mostly wanted 1068 for a logging system - most users will not care about errors in 1069 the logging system, they are more interested in application errors. 1070 You could, however, replace this with a custom handler if you wish. 1071 The record which was being processed is passed in to this method. 1072 """ 1073 if raiseExceptions and sys.stderr: # see issue 13807 1074 exc = sys.exception() 1075 try: 1076 sys.stderr.write('--- Logging error ---\n') 1077 traceback.print_exception(exc, limit=None, file=sys.stderr) 1078 sys.stderr.write('Call stack:\n') 1079 # Walk the stack frame up until we're out of logging, 1080 # so as to print the calling context. 1081 frame = exc.__traceback__.tb_frame 1082 while (frame and os.path.dirname(frame.f_code.co_filename) == 1083 __path__[0]): 1084 frame = frame.f_back 1085 if frame: 1086 traceback.print_stack(frame, file=sys.stderr) 1087 else: 1088 # couldn't find the right stack frame, for some reason 1089 sys.stderr.write('Logged from file %s, line %s\n' % ( 1090 record.filename, record.lineno)) 1091 # Issue 18671: output logging message and arguments 1092 try: 1093 sys.stderr.write('Message: %r\n' 1094 'Arguments: %s\n' % (record.msg, 1095 record.args)) 1096 except RecursionError: # See issue 36272 1097 raise 1098 except Exception: 1099 sys.stderr.write('Unable to print the message and arguments' 1100 ' - possible formatting error.\nUse the' 1101 ' traceback above to help find the error.\n' 1102 ) 1103 except OSError: #pragma: no cover 1104 pass # see issue 5971 1105 finally: 1106 del exc 1107 1108 def __repr__(self): 1109 level = getLevelName(self.level) 1110 return '<%s (%s)>' % (self.__class__.__name__, level)
Handler instances dispatch logging events to specific destinations.
The base handler class. Acts as a placeholder which defines the Handler interface. Handlers can optionally use Formatter instances to format records as desired. By default, no formatter is specified; in this case, the 'raw' message as determined by record.message is logged.
932 def __init__(self, level=NOTSET): 933 """ 934 Initializes the instance - basically setting the formatter to None 935 and the filter list to empty. 936 """ 937 Filterer.__init__(self) 938 self._name = None 939 self.level = _checkLevel(level) 940 self.formatter = None 941 self._closed = False 942 # Add the handler to the global _handlerList (for cleanup on shutdown) 943 _addHandlerRef(self) 944 self.createLock()
Initializes the instance - basically setting the formatter to None and the filter list to empty.
959 def createLock(self): 960 """ 961 Acquire a thread lock for serializing access to the underlying I/O. 962 """ 963 self.lock = threading.RLock() 964 _register_at_fork_reinit_lock(self)
Acquire a thread lock for serializing access to the underlying I/O.
969 def acquire(self): 970 """ 971 Acquire the I/O thread lock. 972 """ 973 if self.lock: 974 self.lock.acquire()
Acquire the I/O thread lock.
976 def release(self): 977 """ 978 Release the I/O thread lock. 979 """ 980 if self.lock: 981 self.lock.release()
Release the I/O thread lock.
983 def setLevel(self, level): 984 """ 985 Set the logging level of this handler. level must be an int or a str. 986 """ 987 self.level = _checkLevel(level)
Set the logging level of this handler. level must be an int or a str.
989 def format(self, record): 990 """ 991 Format the specified record. 992 993 If a formatter is set, use it. Otherwise, use the default formatter 994 for the module. 995 """ 996 if self.formatter: 997 fmt = self.formatter 998 else: 999 fmt = _defaultFormatter 1000 return fmt.format(record)
Format the specified record.
If a formatter is set, use it. Otherwise, use the default formatter for the module.
1002 def emit(self, record): 1003 """ 1004 Do whatever it takes to actually log the specified logging record. 1005 1006 This version is intended to be implemented by subclasses and so 1007 raises a NotImplementedError. 1008 """ 1009 raise NotImplementedError('emit must be implemented ' 1010 'by Handler subclasses')
Do whatever it takes to actually log the specified logging record.
This version is intended to be implemented by subclasses and so raises a NotImplementedError.
1012 def handle(self, record): 1013 """ 1014 Conditionally emit the specified logging record. 1015 1016 Emission depends on filters which may have been added to the handler. 1017 Wrap the actual emission of the record with acquisition/release of 1018 the I/O thread lock. 1019 1020 Returns an instance of the log record that was emitted 1021 if it passed all filters, otherwise a false value is returned. 1022 """ 1023 rv = self.filter(record) 1024 if isinstance(rv, LogRecord): 1025 record = rv 1026 if rv: 1027 with self.lock: 1028 self.emit(record) 1029 return rv
Conditionally emit the specified logging record.
Emission depends on filters which may have been added to the handler. Wrap the actual emission of the record with acquisition/release of the I/O thread lock.
Returns an instance of the log record that was emitted if it passed all filters, otherwise a false value is returned.
1031 def setFormatter(self, fmt): 1032 """ 1033 Set the formatter for this handler. 1034 """ 1035 self.formatter = fmt
Set the formatter for this handler.
1037 def flush(self): 1038 """ 1039 Ensure all logging output has been flushed. 1040 1041 This version does nothing and is intended to be implemented by 1042 subclasses. 1043 """ 1044 pass
Ensure all logging output has been flushed.
This version does nothing and is intended to be implemented by subclasses.
1046 def close(self): 1047 """ 1048 Tidy up any resources used by the handler. 1049 1050 This version removes the handler from an internal map of handlers, 1051 _handlers, which is used for handler lookup by name. Subclasses 1052 should ensure that this gets called from overridden close() 1053 methods. 1054 """ 1055 #get the module data lock, as we're updating a shared structure. 1056 with _lock: 1057 self._closed = True 1058 if self._name and self._name in _handlers: 1059 del _handlers[self._name]
Tidy up any resources used by the handler.
This version removes the handler from an internal map of handlers, _handlers, which is used for handler lookup by name. Subclasses should ensure that this gets called from overridden close() methods.
1061 def handleError(self, record): 1062 """ 1063 Handle errors which occur during an emit() call. 1064 1065 This method should be called from handlers when an exception is 1066 encountered during an emit() call. If raiseExceptions is false, 1067 exceptions get silently ignored. This is what is mostly wanted 1068 for a logging system - most users will not care about errors in 1069 the logging system, they are more interested in application errors. 1070 You could, however, replace this with a custom handler if you wish. 1071 The record which was being processed is passed in to this method. 1072 """ 1073 if raiseExceptions and sys.stderr: # see issue 13807 1074 exc = sys.exception() 1075 try: 1076 sys.stderr.write('--- Logging error ---\n') 1077 traceback.print_exception(exc, limit=None, file=sys.stderr) 1078 sys.stderr.write('Call stack:\n') 1079 # Walk the stack frame up until we're out of logging, 1080 # so as to print the calling context. 1081 frame = exc.__traceback__.tb_frame 1082 while (frame and os.path.dirname(frame.f_code.co_filename) == 1083 __path__[0]): 1084 frame = frame.f_back 1085 if frame: 1086 traceback.print_stack(frame, file=sys.stderr) 1087 else: 1088 # couldn't find the right stack frame, for some reason 1089 sys.stderr.write('Logged from file %s, line %s\n' % ( 1090 record.filename, record.lineno)) 1091 # Issue 18671: output logging message and arguments 1092 try: 1093 sys.stderr.write('Message: %r\n' 1094 'Arguments: %s\n' % (record.msg, 1095 record.args)) 1096 except RecursionError: # See issue 36272 1097 raise 1098 except Exception: 1099 sys.stderr.write('Unable to print the message and arguments' 1100 ' - possible formatting error.\nUse the' 1101 ' traceback above to help find the error.\n' 1102 ) 1103 except OSError: #pragma: no cover 1104 pass # see issue 5971 1105 finally: 1106 del exc
Handle errors which occur during an emit() call.
This method should be called from handlers when an exception is encountered during an emit() call. If raiseExceptions is false, exceptions get silently ignored. This is what is mostly wanted for a logging system - most users will not care about errors in the logging system, they are more interested in application errors. You could, however, replace this with a custom handler if you wish. The record which was being processed is passed in to this method.
Inherited Members
287class LogRecord(object): 288 """ 289 A LogRecord instance represents an event being logged. 290 291 LogRecord instances are created every time something is logged. They 292 contain all the information pertinent to the event being logged. The 293 main information passed in is in msg and args, which are combined 294 using str(msg) % args to create the message field of the record. The 295 record also includes information such as when the record was created, 296 the source line where the logging call was made, and any exception 297 information to be logged. 298 """ 299 def __init__(self, name, level, pathname, lineno, 300 msg, args, exc_info, func=None, sinfo=None, **kwargs): 301 """ 302 Initialize a logging record with interesting information. 303 """ 304 ct = time.time_ns() 305 self.name = name 306 self.msg = msg 307 # 308 # The following statement allows passing of a dictionary as a sole 309 # argument, so that you can do something like 310 # logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2}) 311 # Suggested by Stefan Behnel. 312 # Note that without the test for args[0], we get a problem because 313 # during formatting, we test to see if the arg is present using 314 # 'if self.args:'. If the event being logged is e.g. 'Value is %d' 315 # and if the passed arg fails 'if self.args:' then no formatting 316 # is done. For example, logger.warning('Value is %d', 0) would log 317 # 'Value is %d' instead of 'Value is 0'. 318 # For the use case of passing a dictionary, this should not be a 319 # problem. 320 # Issue #21172: a request was made to relax the isinstance check 321 # to hasattr(args[0], '__getitem__'). However, the docs on string 322 # formatting still seem to suggest a mapping object is required. 323 # Thus, while not removing the isinstance check, it does now look 324 # for collections.abc.Mapping rather than, as before, dict. 325 if (args and len(args) == 1 and isinstance(args[0], collections.abc.Mapping) 326 and args[0]): 327 args = args[0] 328 self.args = args 329 self.levelname = getLevelName(level) 330 self.levelno = level 331 self.pathname = pathname 332 try: 333 self.filename = os.path.basename(pathname) 334 self.module = os.path.splitext(self.filename)[0] 335 except (TypeError, ValueError, AttributeError): 336 self.filename = pathname 337 self.module = "Unknown module" 338 self.exc_info = exc_info 339 self.exc_text = None # used to cache the traceback text 340 self.stack_info = sinfo 341 self.lineno = lineno 342 self.funcName = func 343 self.created = ct / 1e9 # ns to float seconds 344 # Get the number of whole milliseconds (0-999) in the fractional part of seconds. 345 # Eg: 1_677_903_920_999_998_503 ns --> 999_998_503 ns--> 999 ms 346 # Convert to float by adding 0.0 for historical reasons. See gh-89047 347 self.msecs = (ct % 1_000_000_000) // 1_000_000 + 0.0 348 if self.msecs == 999.0 and int(self.created) != ct // 1_000_000_000: 349 # ns -> sec conversion can round up, e.g: 350 # 1_677_903_920_999_999_900 ns --> 1_677_903_921.0 sec 351 self.msecs = 0.0 352 353 self.relativeCreated = (ct - _startTime) / 1e6 354 if logThreads: 355 self.thread = threading.get_ident() 356 self.threadName = threading.current_thread().name 357 else: # pragma: no cover 358 self.thread = None 359 self.threadName = None 360 if not logMultiprocessing: # pragma: no cover 361 self.processName = None 362 else: 363 self.processName = 'MainProcess' 364 mp = sys.modules.get('multiprocessing') 365 if mp is not None: 366 # Errors may occur if multiprocessing has not finished loading 367 # yet - e.g. if a custom import hook causes third-party code 368 # to run when multiprocessing calls import. See issue 8200 369 # for an example 370 try: 371 self.processName = mp.current_process().name 372 except Exception: #pragma: no cover 373 pass 374 if logProcesses and hasattr(os, 'getpid'): 375 self.process = os.getpid() 376 else: 377 self.process = None 378 379 self.taskName = None 380 if logAsyncioTasks: 381 asyncio = sys.modules.get('asyncio') 382 if asyncio: 383 try: 384 self.taskName = asyncio.current_task().get_name() 385 except Exception: 386 pass 387 388 def __repr__(self): 389 return '<LogRecord: %s, %s, %s, %s, "%s">'%(self.name, self.levelno, 390 self.pathname, self.lineno, self.msg) 391 392 def getMessage(self): 393 """ 394 Return the message for this LogRecord. 395 396 Return the message for this LogRecord after merging any user-supplied 397 arguments with the message. 398 """ 399 msg = str(self.msg) 400 if self.args: 401 msg = msg % self.args 402 return msg
A LogRecord instance represents an event being logged.
LogRecord instances are created every time something is logged. They contain all the information pertinent to the event being logged. The main information passed in is in msg and args, which are combined using str(msg) % args to create the message field of the record. The record also includes information such as when the record was created, the source line where the logging call was made, and any exception information to be logged.
299 def __init__(self, name, level, pathname, lineno, 300 msg, args, exc_info, func=None, sinfo=None, **kwargs): 301 """ 302 Initialize a logging record with interesting information. 303 """ 304 ct = time.time_ns() 305 self.name = name 306 self.msg = msg 307 # 308 # The following statement allows passing of a dictionary as a sole 309 # argument, so that you can do something like 310 # logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2}) 311 # Suggested by Stefan Behnel. 312 # Note that without the test for args[0], we get a problem because 313 # during formatting, we test to see if the arg is present using 314 # 'if self.args:'. If the event being logged is e.g. 'Value is %d' 315 # and if the passed arg fails 'if self.args:' then no formatting 316 # is done. For example, logger.warning('Value is %d', 0) would log 317 # 'Value is %d' instead of 'Value is 0'. 318 # For the use case of passing a dictionary, this should not be a 319 # problem. 320 # Issue #21172: a request was made to relax the isinstance check 321 # to hasattr(args[0], '__getitem__'). However, the docs on string 322 # formatting still seem to suggest a mapping object is required. 323 # Thus, while not removing the isinstance check, it does now look 324 # for collections.abc.Mapping rather than, as before, dict. 325 if (args and len(args) == 1 and isinstance(args[0], collections.abc.Mapping) 326 and args[0]): 327 args = args[0] 328 self.args = args 329 self.levelname = getLevelName(level) 330 self.levelno = level 331 self.pathname = pathname 332 try: 333 self.filename = os.path.basename(pathname) 334 self.module = os.path.splitext(self.filename)[0] 335 except (TypeError, ValueError, AttributeError): 336 self.filename = pathname 337 self.module = "Unknown module" 338 self.exc_info = exc_info 339 self.exc_text = None # used to cache the traceback text 340 self.stack_info = sinfo 341 self.lineno = lineno 342 self.funcName = func 343 self.created = ct / 1e9 # ns to float seconds 344 # Get the number of whole milliseconds (0-999) in the fractional part of seconds. 345 # Eg: 1_677_903_920_999_998_503 ns --> 999_998_503 ns--> 999 ms 346 # Convert to float by adding 0.0 for historical reasons. See gh-89047 347 self.msecs = (ct % 1_000_000_000) // 1_000_000 + 0.0 348 if self.msecs == 999.0 and int(self.created) != ct // 1_000_000_000: 349 # ns -> sec conversion can round up, e.g: 350 # 1_677_903_920_999_999_900 ns --> 1_677_903_921.0 sec 351 self.msecs = 0.0 352 353 self.relativeCreated = (ct - _startTime) / 1e6 354 if logThreads: 355 self.thread = threading.get_ident() 356 self.threadName = threading.current_thread().name 357 else: # pragma: no cover 358 self.thread = None 359 self.threadName = None 360 if not logMultiprocessing: # pragma: no cover 361 self.processName = None 362 else: 363 self.processName = 'MainProcess' 364 mp = sys.modules.get('multiprocessing') 365 if mp is not None: 366 # Errors may occur if multiprocessing has not finished loading 367 # yet - e.g. if a custom import hook causes third-party code 368 # to run when multiprocessing calls import. See issue 8200 369 # for an example 370 try: 371 self.processName = mp.current_process().name 372 except Exception: #pragma: no cover 373 pass 374 if logProcesses and hasattr(os, 'getpid'): 375 self.process = os.getpid() 376 else: 377 self.process = None 378 379 self.taskName = None 380 if logAsyncioTasks: 381 asyncio = sys.modules.get('asyncio') 382 if asyncio: 383 try: 384 self.taskName = asyncio.current_task().get_name() 385 except Exception: 386 pass
Initialize a logging record with interesting information.
392 def getMessage(self): 393 """ 394 Return the message for this LogRecord. 395 396 Return the message for this LogRecord after merging any user-supplied 397 arguments with the message. 398 """ 399 msg = str(self.msg) 400 if self.args: 401 msg = msg % self.args 402 return msg
Return the message for this LogRecord.
Return the message for this LogRecord after merging any user-supplied arguments with the message.
1464class Logger(Filterer): 1465 """ 1466 Instances of the Logger class represent a single logging channel. A 1467 "logging channel" indicates an area of an application. Exactly how an 1468 "area" is defined is up to the application developer. Since an 1469 application can have any number of areas, logging channels are identified 1470 by a unique string. Application areas can be nested (e.g. an area 1471 of "input processing" might include sub-areas "read CSV files", "read 1472 XLS files" and "read Gnumeric files"). To cater for this natural nesting, 1473 channel names are organized into a namespace hierarchy where levels are 1474 separated by periods, much like the Java or Python package namespace. So 1475 in the instance given above, channel names might be "input" for the upper 1476 level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels. 1477 There is no arbitrary limit to the depth of nesting. 1478 """ 1479 _tls = threading.local() 1480 1481 def __init__(self, name, level=NOTSET): 1482 """ 1483 Initialize the logger with a name and an optional level. 1484 """ 1485 Filterer.__init__(self) 1486 self.name = name 1487 self.level = _checkLevel(level) 1488 self.parent = None 1489 self.propagate = True 1490 self.handlers = [] 1491 self.disabled = False 1492 self._cache = {} 1493 1494 def setLevel(self, level): 1495 """ 1496 Set the logging level of this logger. level must be an int or a str. 1497 """ 1498 self.level = _checkLevel(level) 1499 self.manager._clear_cache() 1500 1501 def debug(self, msg, *args, **kwargs): 1502 """ 1503 Log 'msg % args' with severity 'DEBUG'. 1504 1505 To pass exception information, use the keyword argument exc_info with 1506 a true value, e.g. 1507 1508 logger.debug("Houston, we have a %s", "thorny problem", exc_info=True) 1509 """ 1510 if self.isEnabledFor(DEBUG): 1511 self._log(DEBUG, msg, args, **kwargs) 1512 1513 def info(self, msg, *args, **kwargs): 1514 """ 1515 Log 'msg % args' with severity 'INFO'. 1516 1517 To pass exception information, use the keyword argument exc_info with 1518 a true value, e.g. 1519 1520 logger.info("Houston, we have a %s", "notable problem", exc_info=True) 1521 """ 1522 if self.isEnabledFor(INFO): 1523 self._log(INFO, msg, args, **kwargs) 1524 1525 def warning(self, msg, *args, **kwargs): 1526 """ 1527 Log 'msg % args' with severity 'WARNING'. 1528 1529 To pass exception information, use the keyword argument exc_info with 1530 a true value, e.g. 1531 1532 logger.warning("Houston, we have a %s", "bit of a problem", exc_info=True) 1533 """ 1534 if self.isEnabledFor(WARNING): 1535 self._log(WARNING, msg, args, **kwargs) 1536 1537 def warn(self, msg, *args, **kwargs): 1538 warnings.warn("The 'warn' method is deprecated, " 1539 "use 'warning' instead", DeprecationWarning, 2) 1540 self.warning(msg, *args, **kwargs) 1541 1542 def error(self, msg, *args, **kwargs): 1543 """ 1544 Log 'msg % args' with severity 'ERROR'. 1545 1546 To pass exception information, use the keyword argument exc_info with 1547 a true value, e.g. 1548 1549 logger.error("Houston, we have a %s", "major problem", exc_info=True) 1550 """ 1551 if self.isEnabledFor(ERROR): 1552 self._log(ERROR, msg, args, **kwargs) 1553 1554 def exception(self, msg, *args, exc_info=True, **kwargs): 1555 """ 1556 Convenience method for logging an ERROR with exception information. 1557 """ 1558 self.error(msg, *args, exc_info=exc_info, **kwargs) 1559 1560 def critical(self, msg, *args, **kwargs): 1561 """ 1562 Log 'msg % args' with severity 'CRITICAL'. 1563 1564 To pass exception information, use the keyword argument exc_info with 1565 a true value, e.g. 1566 1567 logger.critical("Houston, we have a %s", "major disaster", exc_info=True) 1568 """ 1569 if self.isEnabledFor(CRITICAL): 1570 self._log(CRITICAL, msg, args, **kwargs) 1571 1572 def fatal(self, msg, *args, **kwargs): 1573 """ 1574 Don't use this method, use critical() instead. 1575 """ 1576 self.critical(msg, *args, **kwargs) 1577 1578 def log(self, level, msg, *args, **kwargs): 1579 """ 1580 Log 'msg % args' with the integer severity 'level'. 1581 1582 To pass exception information, use the keyword argument exc_info with 1583 a true value, e.g. 1584 1585 logger.log(level, "We have a %s", "mysterious problem", exc_info=True) 1586 """ 1587 if not isinstance(level, int): 1588 if raiseExceptions: 1589 raise TypeError("level must be an integer") 1590 else: 1591 return 1592 if self.isEnabledFor(level): 1593 self._log(level, msg, args, **kwargs) 1594 1595 def findCaller(self, stack_info=False, stacklevel=1): 1596 """ 1597 Find the stack frame of the caller so that we can note the source 1598 file name, line number and function name. 1599 """ 1600 f = currentframe() 1601 #On some versions of IronPython, currentframe() returns None if 1602 #IronPython isn't run with -X:Frames. 1603 if f is None: 1604 return "(unknown file)", 0, "(unknown function)", None 1605 while stacklevel > 0: 1606 next_f = f.f_back 1607 if next_f is None: 1608 ## We've got options here. 1609 ## If we want to use the last (deepest) frame: 1610 break 1611 ## If we want to mimic the warnings module: 1612 #return ("sys", 1, "(unknown function)", None) 1613 ## If we want to be pedantic: 1614 #raise ValueError("call stack is not deep enough") 1615 f = next_f 1616 if not _is_internal_frame(f): 1617 stacklevel -= 1 1618 co = f.f_code 1619 sinfo = None 1620 if stack_info: 1621 with io.StringIO() as sio: 1622 sio.write("Stack (most recent call last):\n") 1623 traceback.print_stack(f, file=sio) 1624 sinfo = sio.getvalue() 1625 if sinfo[-1] == '\n': 1626 sinfo = sinfo[:-1] 1627 return co.co_filename, f.f_lineno, co.co_name, sinfo 1628 1629 def makeRecord(self, name, level, fn, lno, msg, args, exc_info, 1630 func=None, extra=None, sinfo=None): 1631 """ 1632 A factory method which can be overridden in subclasses to create 1633 specialized LogRecords. 1634 """ 1635 rv = _logRecordFactory(name, level, fn, lno, msg, args, exc_info, func, 1636 sinfo) 1637 if extra is not None: 1638 for key in extra: 1639 if (key in ["message", "asctime"]) or (key in rv.__dict__): 1640 raise KeyError("Attempt to overwrite %r in LogRecord" % key) 1641 rv.__dict__[key] = extra[key] 1642 return rv 1643 1644 def _log(self, level, msg, args, exc_info=None, extra=None, stack_info=False, 1645 stacklevel=1): 1646 """ 1647 Low-level logging routine which creates a LogRecord and then calls 1648 all the handlers of this logger to handle the record. 1649 """ 1650 sinfo = None 1651 if _srcfile: 1652 #IronPython doesn't track Python frames, so findCaller raises an 1653 #exception on some versions of IronPython. We trap it here so that 1654 #IronPython can use logging. 1655 try: 1656 fn, lno, func, sinfo = self.findCaller(stack_info, stacklevel) 1657 except ValueError: # pragma: no cover 1658 fn, lno, func = "(unknown file)", 0, "(unknown function)" 1659 else: # pragma: no cover 1660 fn, lno, func = "(unknown file)", 0, "(unknown function)" 1661 if exc_info: 1662 if isinstance(exc_info, BaseException): 1663 exc_info = (type(exc_info), exc_info, exc_info.__traceback__) 1664 elif not isinstance(exc_info, tuple): 1665 exc_info = sys.exc_info() 1666 record = self.makeRecord(self.name, level, fn, lno, msg, args, 1667 exc_info, func, extra, sinfo) 1668 self.handle(record) 1669 1670 def handle(self, record): 1671 """ 1672 Call the handlers for the specified record. 1673 1674 This method is used for unpickled records received from a socket, as 1675 well as those created locally. Logger-level filtering is applied. 1676 """ 1677 if self._is_disabled(): 1678 return 1679 1680 self._tls.in_progress = True 1681 try: 1682 maybe_record = self.filter(record) 1683 if not maybe_record: 1684 return 1685 if isinstance(maybe_record, LogRecord): 1686 record = maybe_record 1687 self.callHandlers(record) 1688 finally: 1689 self._tls.in_progress = False 1690 1691 def addHandler(self, hdlr): 1692 """ 1693 Add the specified handler to this logger. 1694 """ 1695 with _lock: 1696 if not (hdlr in self.handlers): 1697 self.handlers.append(hdlr) 1698 1699 def removeHandler(self, hdlr): 1700 """ 1701 Remove the specified handler from this logger. 1702 """ 1703 with _lock: 1704 if hdlr in self.handlers: 1705 self.handlers.remove(hdlr) 1706 1707 def hasHandlers(self): 1708 """ 1709 See if this logger has any handlers configured. 1710 1711 Loop through all handlers for this logger and its parents in the 1712 logger hierarchy. Return True if a handler was found, else False. 1713 Stop searching up the hierarchy whenever a logger with the "propagate" 1714 attribute set to zero is found - that will be the last logger which 1715 is checked for the existence of handlers. 1716 """ 1717 c = self 1718 rv = False 1719 while c: 1720 if c.handlers: 1721 rv = True 1722 break 1723 if not c.propagate: 1724 break 1725 else: 1726 c = c.parent 1727 return rv 1728 1729 def callHandlers(self, record): 1730 """ 1731 Pass a record to all relevant handlers. 1732 1733 Loop through all handlers for this logger and its parents in the 1734 logger hierarchy. If no handler was found, output a one-off error 1735 message to sys.stderr. Stop searching up the hierarchy whenever a 1736 logger with the "propagate" attribute set to zero is found - that 1737 will be the last logger whose handlers are called. 1738 """ 1739 c = self 1740 found = 0 1741 while c: 1742 for hdlr in c.handlers: 1743 found = found + 1 1744 if record.levelno >= hdlr.level: 1745 hdlr.handle(record) 1746 if not c.propagate: 1747 c = None #break out 1748 else: 1749 c = c.parent 1750 if (found == 0): 1751 if lastResort: 1752 if record.levelno >= lastResort.level: 1753 lastResort.handle(record) 1754 elif raiseExceptions and not self.manager.emittedNoHandlerWarning: 1755 sys.stderr.write("No handlers could be found for logger" 1756 " \"%s\"\n" % self.name) 1757 self.manager.emittedNoHandlerWarning = True 1758 1759 def getEffectiveLevel(self): 1760 """ 1761 Get the effective level for this logger. 1762 1763 Loop through this logger and its parents in the logger hierarchy, 1764 looking for a non-zero logging level. Return the first one found. 1765 """ 1766 logger = self 1767 while logger: 1768 if logger.level: 1769 return logger.level 1770 logger = logger.parent 1771 return NOTSET 1772 1773 def isEnabledFor(self, level): 1774 """ 1775 Is this logger enabled for level 'level'? 1776 """ 1777 if self._is_disabled(): 1778 return False 1779 1780 try: 1781 return self._cache[level] 1782 except KeyError: 1783 with _lock: 1784 if self.manager.disable >= level: 1785 is_enabled = self._cache[level] = False 1786 else: 1787 is_enabled = self._cache[level] = ( 1788 level >= self.getEffectiveLevel() 1789 ) 1790 return is_enabled 1791 1792 def getChild(self, suffix): 1793 """ 1794 Get a logger which is a descendant to this one. 1795 1796 This is a convenience method, such that 1797 1798 logging.getLogger('abc').getChild('def.ghi') 1799 1800 is the same as 1801 1802 logging.getLogger('abc.def.ghi') 1803 1804 It's useful, for example, when the parent logger is named using 1805 __name__ rather than a literal string. 1806 """ 1807 if self.root is not self: 1808 suffix = '.'.join((self.name, suffix)) 1809 return self.manager.getLogger(suffix) 1810 1811 def getChildren(self): 1812 1813 def _hierlevel(logger): 1814 if logger is logger.manager.root: 1815 return 0 1816 return 1 + logger.name.count('.') 1817 1818 d = self.manager.loggerDict 1819 with _lock: 1820 # exclude PlaceHolders - the last check is to ensure that lower-level 1821 # descendants aren't returned - if there are placeholders, a logger's 1822 # parent field might point to a grandparent or ancestor thereof. 1823 return set(item for item in d.values() 1824 if isinstance(item, Logger) and item.parent is self and 1825 _hierlevel(item) == 1 + _hierlevel(item.parent)) 1826 1827 def _is_disabled(self): 1828 # We need to use getattr as it will only be set the first time a log 1829 # message is recorded on any given thread 1830 return self.disabled or getattr(self._tls, 'in_progress', False) 1831 1832 def __repr__(self): 1833 level = getLevelName(self.getEffectiveLevel()) 1834 return '<%s %s (%s)>' % (self.__class__.__name__, self.name, level) 1835 1836 def __reduce__(self): 1837 if getLogger(self.name) is not self: 1838 import pickle 1839 raise pickle.PicklingError('logger cannot be pickled') 1840 return getLogger, (self.name,)
Instances of the Logger class represent a single logging channel. A "logging channel" indicates an area of an application. Exactly how an "area" is defined is up to the application developer. Since an application can have any number of areas, logging channels are identified by a unique string. Application areas can be nested (e.g. an area of "input processing" might include sub-areas "read CSV files", "read XLS files" and "read Gnumeric files"). To cater for this natural nesting, channel names are organized into a namespace hierarchy where levels are separated by periods, much like the Java or Python package namespace. So in the instance given above, channel names might be "input" for the upper level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels. There is no arbitrary limit to the depth of nesting.
1481 def __init__(self, name, level=NOTSET): 1482 """ 1483 Initialize the logger with a name and an optional level. 1484 """ 1485 Filterer.__init__(self) 1486 self.name = name 1487 self.level = _checkLevel(level) 1488 self.parent = None 1489 self.propagate = True 1490 self.handlers = [] 1491 self.disabled = False 1492 self._cache = {}
Initialize the logger with a name and an optional level.
1494 def setLevel(self, level): 1495 """ 1496 Set the logging level of this logger. level must be an int or a str. 1497 """ 1498 self.level = _checkLevel(level) 1499 self.manager._clear_cache()
Set the logging level of this logger. level must be an int or a str.
1501 def debug(self, msg, *args, **kwargs): 1502 """ 1503 Log 'msg % args' with severity 'DEBUG'. 1504 1505 To pass exception information, use the keyword argument exc_info with 1506 a true value, e.g. 1507 1508 logger.debug("Houston, we have a %s", "thorny problem", exc_info=True) 1509 """ 1510 if self.isEnabledFor(DEBUG): 1511 self._log(DEBUG, msg, args, **kwargs)
Log 'msg % args' with severity 'DEBUG'.
To pass exception information, use the keyword argument exc_info with a true value, e.g.
logger.debug("Houston, we have a %s", "thorny problem", exc_info=True)
1513 def info(self, msg, *args, **kwargs): 1514 """ 1515 Log 'msg % args' with severity 'INFO'. 1516 1517 To pass exception information, use the keyword argument exc_info with 1518 a true value, e.g. 1519 1520 logger.info("Houston, we have a %s", "notable problem", exc_info=True) 1521 """ 1522 if self.isEnabledFor(INFO): 1523 self._log(INFO, msg, args, **kwargs)
Log 'msg % args' with severity 'INFO'.
To pass exception information, use the keyword argument exc_info with a true value, e.g.
logger.info("Houston, we have a %s", "notable problem", exc_info=True)
1525 def warning(self, msg, *args, **kwargs): 1526 """ 1527 Log 'msg % args' with severity 'WARNING'. 1528 1529 To pass exception information, use the keyword argument exc_info with 1530 a true value, e.g. 1531 1532 logger.warning("Houston, we have a %s", "bit of a problem", exc_info=True) 1533 """ 1534 if self.isEnabledFor(WARNING): 1535 self._log(WARNING, msg, args, **kwargs)
Log 'msg % args' with severity 'WARNING'.
To pass exception information, use the keyword argument exc_info with a true value, e.g.
logger.warning("Houston, we have a %s", "bit of a problem", exc_info=True)
1542 def error(self, msg, *args, **kwargs): 1543 """ 1544 Log 'msg % args' with severity 'ERROR'. 1545 1546 To pass exception information, use the keyword argument exc_info with 1547 a true value, e.g. 1548 1549 logger.error("Houston, we have a %s", "major problem", exc_info=True) 1550 """ 1551 if self.isEnabledFor(ERROR): 1552 self._log(ERROR, msg, args, **kwargs)
Log 'msg % args' with severity 'ERROR'.
To pass exception information, use the keyword argument exc_info with a true value, e.g.
logger.error("Houston, we have a %s", "major problem", exc_info=True)
1554 def exception(self, msg, *args, exc_info=True, **kwargs): 1555 """ 1556 Convenience method for logging an ERROR with exception information. 1557 """ 1558 self.error(msg, *args, exc_info=exc_info, **kwargs)
Convenience method for logging an ERROR with exception information.
1560 def critical(self, msg, *args, **kwargs): 1561 """ 1562 Log 'msg % args' with severity 'CRITICAL'. 1563 1564 To pass exception information, use the keyword argument exc_info with 1565 a true value, e.g. 1566 1567 logger.critical("Houston, we have a %s", "major disaster", exc_info=True) 1568 """ 1569 if self.isEnabledFor(CRITICAL): 1570 self._log(CRITICAL, msg, args, **kwargs)
Log 'msg % args' with severity 'CRITICAL'.
To pass exception information, use the keyword argument exc_info with a true value, e.g.
logger.critical("Houston, we have a %s", "major disaster", exc_info=True)
1572 def fatal(self, msg, *args, **kwargs): 1573 """ 1574 Don't use this method, use critical() instead. 1575 """ 1576 self.critical(msg, *args, **kwargs)
Don't use this method, use critical() instead.
1578 def log(self, level, msg, *args, **kwargs): 1579 """ 1580 Log 'msg % args' with the integer severity 'level'. 1581 1582 To pass exception information, use the keyword argument exc_info with 1583 a true value, e.g. 1584 1585 logger.log(level, "We have a %s", "mysterious problem", exc_info=True) 1586 """ 1587 if not isinstance(level, int): 1588 if raiseExceptions: 1589 raise TypeError("level must be an integer") 1590 else: 1591 return 1592 if self.isEnabledFor(level): 1593 self._log(level, msg, args, **kwargs)
Log 'msg % args' with the integer severity 'level'.
To pass exception information, use the keyword argument exc_info with a true value, e.g.
logger.log(level, "We have a %s", "mysterious problem", exc_info=True)
1595 def findCaller(self, stack_info=False, stacklevel=1): 1596 """ 1597 Find the stack frame of the caller so that we can note the source 1598 file name, line number and function name. 1599 """ 1600 f = currentframe() 1601 #On some versions of IronPython, currentframe() returns None if 1602 #IronPython isn't run with -X:Frames. 1603 if f is None: 1604 return "(unknown file)", 0, "(unknown function)", None 1605 while stacklevel > 0: 1606 next_f = f.f_back 1607 if next_f is None: 1608 ## We've got options here. 1609 ## If we want to use the last (deepest) frame: 1610 break 1611 ## If we want to mimic the warnings module: 1612 #return ("sys", 1, "(unknown function)", None) 1613 ## If we want to be pedantic: 1614 #raise ValueError("call stack is not deep enough") 1615 f = next_f 1616 if not _is_internal_frame(f): 1617 stacklevel -= 1 1618 co = f.f_code 1619 sinfo = None 1620 if stack_info: 1621 with io.StringIO() as sio: 1622 sio.write("Stack (most recent call last):\n") 1623 traceback.print_stack(f, file=sio) 1624 sinfo = sio.getvalue() 1625 if sinfo[-1] == '\n': 1626 sinfo = sinfo[:-1] 1627 return co.co_filename, f.f_lineno, co.co_name, sinfo
Find the stack frame of the caller so that we can note the source file name, line number and function name.
1629 def makeRecord(self, name, level, fn, lno, msg, args, exc_info, 1630 func=None, extra=None, sinfo=None): 1631 """ 1632 A factory method which can be overridden in subclasses to create 1633 specialized LogRecords. 1634 """ 1635 rv = _logRecordFactory(name, level, fn, lno, msg, args, exc_info, func, 1636 sinfo) 1637 if extra is not None: 1638 for key in extra: 1639 if (key in ["message", "asctime"]) or (key in rv.__dict__): 1640 raise KeyError("Attempt to overwrite %r in LogRecord" % key) 1641 rv.__dict__[key] = extra[key] 1642 return rv
A factory method which can be overridden in subclasses to create specialized LogRecords.
1670 def handle(self, record): 1671 """ 1672 Call the handlers for the specified record. 1673 1674 This method is used for unpickled records received from a socket, as 1675 well as those created locally. Logger-level filtering is applied. 1676 """ 1677 if self._is_disabled(): 1678 return 1679 1680 self._tls.in_progress = True 1681 try: 1682 maybe_record = self.filter(record) 1683 if not maybe_record: 1684 return 1685 if isinstance(maybe_record, LogRecord): 1686 record = maybe_record 1687 self.callHandlers(record) 1688 finally: 1689 self._tls.in_progress = False
Call the handlers for the specified record.
This method is used for unpickled records received from a socket, as well as those created locally. Logger-level filtering is applied.
1691 def addHandler(self, hdlr): 1692 """ 1693 Add the specified handler to this logger. 1694 """ 1695 with _lock: 1696 if not (hdlr in self.handlers): 1697 self.handlers.append(hdlr)
Add the specified handler to this logger.
1699 def removeHandler(self, hdlr): 1700 """ 1701 Remove the specified handler from this logger. 1702 """ 1703 with _lock: 1704 if hdlr in self.handlers: 1705 self.handlers.remove(hdlr)
Remove the specified handler from this logger.
1707 def hasHandlers(self): 1708 """ 1709 See if this logger has any handlers configured. 1710 1711 Loop through all handlers for this logger and its parents in the 1712 logger hierarchy. Return True if a handler was found, else False. 1713 Stop searching up the hierarchy whenever a logger with the "propagate" 1714 attribute set to zero is found - that will be the last logger which 1715 is checked for the existence of handlers. 1716 """ 1717 c = self 1718 rv = False 1719 while c: 1720 if c.handlers: 1721 rv = True 1722 break 1723 if not c.propagate: 1724 break 1725 else: 1726 c = c.parent 1727 return rv
See if this logger has any handlers configured.
Loop through all handlers for this logger and its parents in the logger hierarchy. Return True if a handler was found, else False. Stop searching up the hierarchy whenever a logger with the "propagate" attribute set to zero is found - that will be the last logger which is checked for the existence of handlers.
1729 def callHandlers(self, record): 1730 """ 1731 Pass a record to all relevant handlers. 1732 1733 Loop through all handlers for this logger and its parents in the 1734 logger hierarchy. If no handler was found, output a one-off error 1735 message to sys.stderr. Stop searching up the hierarchy whenever a 1736 logger with the "propagate" attribute set to zero is found - that 1737 will be the last logger whose handlers are called. 1738 """ 1739 c = self 1740 found = 0 1741 while c: 1742 for hdlr in c.handlers: 1743 found = found + 1 1744 if record.levelno >= hdlr.level: 1745 hdlr.handle(record) 1746 if not c.propagate: 1747 c = None #break out 1748 else: 1749 c = c.parent 1750 if (found == 0): 1751 if lastResort: 1752 if record.levelno >= lastResort.level: 1753 lastResort.handle(record) 1754 elif raiseExceptions and not self.manager.emittedNoHandlerWarning: 1755 sys.stderr.write("No handlers could be found for logger" 1756 " \"%s\"\n" % self.name) 1757 self.manager.emittedNoHandlerWarning = True
Pass a record to all relevant handlers.
Loop through all handlers for this logger and its parents in the logger hierarchy. If no handler was found, output a one-off error message to sys.stderr. Stop searching up the hierarchy whenever a logger with the "propagate" attribute set to zero is found - that will be the last logger whose handlers are called.
1759 def getEffectiveLevel(self): 1760 """ 1761 Get the effective level for this logger. 1762 1763 Loop through this logger and its parents in the logger hierarchy, 1764 looking for a non-zero logging level. Return the first one found. 1765 """ 1766 logger = self 1767 while logger: 1768 if logger.level: 1769 return logger.level 1770 logger = logger.parent 1771 return NOTSET
Get the effective level for this logger.
Loop through this logger and its parents in the logger hierarchy, looking for a non-zero logging level. Return the first one found.
1773 def isEnabledFor(self, level): 1774 """ 1775 Is this logger enabled for level 'level'? 1776 """ 1777 if self._is_disabled(): 1778 return False 1779 1780 try: 1781 return self._cache[level] 1782 except KeyError: 1783 with _lock: 1784 if self.manager.disable >= level: 1785 is_enabled = self._cache[level] = False 1786 else: 1787 is_enabled = self._cache[level] = ( 1788 level >= self.getEffectiveLevel() 1789 ) 1790 return is_enabled
Is this logger enabled for level 'level'?
1792 def getChild(self, suffix): 1793 """ 1794 Get a logger which is a descendant to this one. 1795 1796 This is a convenience method, such that 1797 1798 logging.getLogger('abc').getChild('def.ghi') 1799 1800 is the same as 1801 1802 logging.getLogger('abc.def.ghi') 1803 1804 It's useful, for example, when the parent logger is named using 1805 __name__ rather than a literal string. 1806 """ 1807 if self.root is not self: 1808 suffix = '.'.join((self.name, suffix)) 1809 return self.manager.getLogger(suffix)
Get a logger which is a descendant to this one.
This is a convenience method, such that
logging.getLogger('abc').getChild('def.ghi')
is the same as
logging.getLogger('abc.def.ghi')
It's useful, for example, when the parent logger is named using __name__ rather than a literal string.
1811 def getChildren(self): 1812 1813 def _hierlevel(logger): 1814 if logger is logger.manager.root: 1815 return 0 1816 return 1 + logger.name.count('.') 1817 1818 d = self.manager.loggerDict 1819 with _lock: 1820 # exclude PlaceHolders - the last check is to ensure that lower-level 1821 # descendants aren't returned - if there are placeholders, a logger's 1822 # parent field might point to a grandparent or ancestor thereof. 1823 return set(item for item in d.values() 1824 if isinstance(item, Logger) and item.parent is self and 1825 _hierlevel(item) == 1 + _hierlevel(item.parent))
Inherited Members
1860class LoggerAdapter(object): 1861 """ 1862 An adapter for loggers which makes it easier to specify contextual 1863 information in logging output. 1864 """ 1865 1866 def __init__(self, logger, extra=None, merge_extra=False): 1867 """ 1868 Initialize the adapter with a logger and a dict-like object which 1869 provides contextual information. This constructor signature allows 1870 easy stacking of LoggerAdapters, if so desired. 1871 1872 You can effectively pass keyword arguments as shown in the 1873 following example: 1874 1875 adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2")) 1876 1877 By default, LoggerAdapter objects will drop the "extra" argument 1878 passed on the individual log calls to use its own instead. 1879 1880 Initializing it with merge_extra=True will instead merge both 1881 maps when logging, the individual call extra taking precedence 1882 over the LoggerAdapter instance extra 1883 1884 .. versionchanged:: 3.13 1885 The *merge_extra* argument was added. 1886 """ 1887 self.logger = logger 1888 self.extra = extra 1889 self.merge_extra = merge_extra 1890 1891 def process(self, msg, kwargs): 1892 """ 1893 Process the logging message and keyword arguments passed in to 1894 a logging call to insert contextual information. You can either 1895 manipulate the message itself, the keyword args or both. Return 1896 the message and kwargs modified (or not) to suit your needs. 1897 1898 Normally, you'll only need to override this one method in a 1899 LoggerAdapter subclass for your specific needs. 1900 """ 1901 if self.merge_extra and "extra" in kwargs: 1902 kwargs["extra"] = {**self.extra, **kwargs["extra"]} 1903 else: 1904 kwargs["extra"] = self.extra 1905 return msg, kwargs 1906 1907 # 1908 # Boilerplate convenience methods 1909 # 1910 def debug(self, msg, *args, **kwargs): 1911 """ 1912 Delegate a debug call to the underlying logger. 1913 """ 1914 self.log(DEBUG, msg, *args, **kwargs) 1915 1916 def info(self, msg, *args, **kwargs): 1917 """ 1918 Delegate an info call to the underlying logger. 1919 """ 1920 self.log(INFO, msg, *args, **kwargs) 1921 1922 def warning(self, msg, *args, **kwargs): 1923 """ 1924 Delegate a warning call to the underlying logger. 1925 """ 1926 self.log(WARNING, msg, *args, **kwargs) 1927 1928 def warn(self, msg, *args, **kwargs): 1929 warnings.warn("The 'warn' method is deprecated, " 1930 "use 'warning' instead", DeprecationWarning, 2) 1931 self.warning(msg, *args, **kwargs) 1932 1933 def error(self, msg, *args, **kwargs): 1934 """ 1935 Delegate an error call to the underlying logger. 1936 """ 1937 self.log(ERROR, msg, *args, **kwargs) 1938 1939 def exception(self, msg, *args, exc_info=True, **kwargs): 1940 """ 1941 Delegate an exception call to the underlying logger. 1942 """ 1943 self.log(ERROR, msg, *args, exc_info=exc_info, **kwargs) 1944 1945 def critical(self, msg, *args, **kwargs): 1946 """ 1947 Delegate a critical call to the underlying logger. 1948 """ 1949 self.log(CRITICAL, msg, *args, **kwargs) 1950 1951 def log(self, level, msg, *args, **kwargs): 1952 """ 1953 Delegate a log call to the underlying logger, after adding 1954 contextual information from this adapter instance. 1955 """ 1956 if self.isEnabledFor(level): 1957 msg, kwargs = self.process(msg, kwargs) 1958 self.logger.log(level, msg, *args, **kwargs) 1959 1960 def isEnabledFor(self, level): 1961 """ 1962 Is this logger enabled for level 'level'? 1963 """ 1964 return self.logger.isEnabledFor(level) 1965 1966 def setLevel(self, level): 1967 """ 1968 Set the specified level on the underlying logger. 1969 """ 1970 self.logger.setLevel(level) 1971 1972 def getEffectiveLevel(self): 1973 """ 1974 Get the effective level for the underlying logger. 1975 """ 1976 return self.logger.getEffectiveLevel() 1977 1978 def hasHandlers(self): 1979 """ 1980 See if the underlying logger has any handlers. 1981 """ 1982 return self.logger.hasHandlers() 1983 1984 def _log(self, level, msg, args, **kwargs): 1985 """ 1986 Low-level log implementation, proxied to allow nested logger adapters. 1987 """ 1988 return self.logger._log(level, msg, args, **kwargs) 1989 1990 @property 1991 def manager(self): 1992 return self.logger.manager 1993 1994 @manager.setter 1995 def manager(self, value): 1996 self.logger.manager = value 1997 1998 @property 1999 def name(self): 2000 return self.logger.name 2001 2002 def __repr__(self): 2003 logger = self.logger 2004 level = getLevelName(logger.getEffectiveLevel()) 2005 return '<%s %s (%s)>' % (self.__class__.__name__, logger.name, level) 2006 2007 __class_getitem__ = classmethod(GenericAlias)
An adapter for loggers which makes it easier to specify contextual information in logging output.
1866 def __init__(self, logger, extra=None, merge_extra=False): 1867 """ 1868 Initialize the adapter with a logger and a dict-like object which 1869 provides contextual information. This constructor signature allows 1870 easy stacking of LoggerAdapters, if so desired. 1871 1872 You can effectively pass keyword arguments as shown in the 1873 following example: 1874 1875 adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2")) 1876 1877 By default, LoggerAdapter objects will drop the "extra" argument 1878 passed on the individual log calls to use its own instead. 1879 1880 Initializing it with merge_extra=True will instead merge both 1881 maps when logging, the individual call extra taking precedence 1882 over the LoggerAdapter instance extra 1883 1884 .. versionchanged:: 3.13 1885 The *merge_extra* argument was added. 1886 """ 1887 self.logger = logger 1888 self.extra = extra 1889 self.merge_extra = merge_extra
Initialize the adapter with a logger and a dict-like object which provides contextual information. This constructor signature allows easy stacking of LoggerAdapters, if so desired.
You can effectively pass keyword arguments as shown in the following example:
adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2"))
By default, LoggerAdapter objects will drop the "extra" argument passed on the individual log calls to use its own instead.
Initializing it with merge_extra=True will instead merge both maps when logging, the individual call extra taking precedence over the LoggerAdapter instance extra
Changed in version 3.13: The merge_extra argument was added.
1891 def process(self, msg, kwargs): 1892 """ 1893 Process the logging message and keyword arguments passed in to 1894 a logging call to insert contextual information. You can either 1895 manipulate the message itself, the keyword args or both. Return 1896 the message and kwargs modified (or not) to suit your needs. 1897 1898 Normally, you'll only need to override this one method in a 1899 LoggerAdapter subclass for your specific needs. 1900 """ 1901 if self.merge_extra and "extra" in kwargs: 1902 kwargs["extra"] = {**self.extra, **kwargs["extra"]} 1903 else: 1904 kwargs["extra"] = self.extra 1905 return msg, kwargs
Process the logging message and keyword arguments passed in to a logging call to insert contextual information. You can either manipulate the message itself, the keyword args or both. Return the message and kwargs modified (or not) to suit your needs.
Normally, you'll only need to override this one method in a LoggerAdapter subclass for your specific needs.
1910 def debug(self, msg, *args, **kwargs): 1911 """ 1912 Delegate a debug call to the underlying logger. 1913 """ 1914 self.log(DEBUG, msg, *args, **kwargs)
Delegate a debug call to the underlying logger.
1916 def info(self, msg, *args, **kwargs): 1917 """ 1918 Delegate an info call to the underlying logger. 1919 """ 1920 self.log(INFO, msg, *args, **kwargs)
Delegate an info call to the underlying logger.
1922 def warning(self, msg, *args, **kwargs): 1923 """ 1924 Delegate a warning call to the underlying logger. 1925 """ 1926 self.log(WARNING, msg, *args, **kwargs)
Delegate a warning call to the underlying logger.
1933 def error(self, msg, *args, **kwargs): 1934 """ 1935 Delegate an error call to the underlying logger. 1936 """ 1937 self.log(ERROR, msg, *args, **kwargs)
Delegate an error call to the underlying logger.
1939 def exception(self, msg, *args, exc_info=True, **kwargs): 1940 """ 1941 Delegate an exception call to the underlying logger. 1942 """ 1943 self.log(ERROR, msg, *args, exc_info=exc_info, **kwargs)
Delegate an exception call to the underlying logger.
1945 def critical(self, msg, *args, **kwargs): 1946 """ 1947 Delegate a critical call to the underlying logger. 1948 """ 1949 self.log(CRITICAL, msg, *args, **kwargs)
Delegate a critical call to the underlying logger.
1951 def log(self, level, msg, *args, **kwargs): 1952 """ 1953 Delegate a log call to the underlying logger, after adding 1954 contextual information from this adapter instance. 1955 """ 1956 if self.isEnabledFor(level): 1957 msg, kwargs = self.process(msg, kwargs) 1958 self.logger.log(level, msg, *args, **kwargs)
Delegate a log call to the underlying logger, after adding contextual information from this adapter instance.
1960 def isEnabledFor(self, level): 1961 """ 1962 Is this logger enabled for level 'level'? 1963 """ 1964 return self.logger.isEnabledFor(level)
Is this logger enabled for level 'level'?
1966 def setLevel(self, level): 1967 """ 1968 Set the specified level on the underlying logger. 1969 """ 1970 self.logger.setLevel(level)
Set the specified level on the underlying logger.
1972 def getEffectiveLevel(self): 1973 """ 1974 Get the effective level for the underlying logger. 1975 """ 1976 return self.logger.getEffectiveLevel()
Get the effective level for the underlying logger.
2278class NullHandler(Handler): 2279 """ 2280 This handler does nothing. It's intended to be used to avoid the 2281 "No handlers could be found for logger XXX" one-off warning. This is 2282 important for library code, which may contain code to log events. If a user 2283 of the library does not configure logging, the one-off warning might be 2284 produced; to avoid this, the library developer simply needs to instantiate 2285 a NullHandler and add it to the top-level logger of the library module or 2286 package. 2287 """ 2288 def handle(self, record): 2289 """Stub.""" 2290 2291 def emit(self, record): 2292 """Stub.""" 2293 2294 def createLock(self): 2295 self.lock = None 2296 2297 def _at_fork_reinit(self): 2298 pass
This handler does nothing. It's intended to be used to avoid the "No handlers could be found for logger XXX" one-off warning. This is important for library code, which may contain code to log events. If a user of the library does not configure logging, the one-off warning might be produced; to avoid this, the library developer simply needs to instantiate a NullHandler and add it to the top-level logger of the library module or package.
1112class StreamHandler(Handler): 1113 """ 1114 A handler class which writes logging records, appropriately formatted, 1115 to a stream. Note that this class does not close the stream, as 1116 sys.stdout or sys.stderr may be used. 1117 """ 1118 1119 terminator = '\n' 1120 1121 def __init__(self, stream=None): 1122 """ 1123 Initialize the handler. 1124 1125 If stream is not specified, sys.stderr is used. 1126 """ 1127 Handler.__init__(self) 1128 if stream is None: 1129 stream = sys.stderr 1130 self.stream = stream 1131 1132 def flush(self): 1133 """ 1134 Flushes the stream. 1135 """ 1136 with self.lock: 1137 if self.stream and hasattr(self.stream, "flush"): 1138 self.stream.flush() 1139 1140 def emit(self, record): 1141 """ 1142 Emit a record. 1143 1144 If a formatter is specified, it is used to format the record. 1145 The record is then written to the stream with a trailing newline. If 1146 exception information is present, it is formatted using 1147 traceback.print_exception and appended to the stream. If the stream 1148 has an 'encoding' attribute, it is used to determine how to do the 1149 output to the stream. 1150 """ 1151 try: 1152 msg = self.format(record) 1153 stream = self.stream 1154 # issue 35046: merged two stream.writes into one. 1155 stream.write(msg + self.terminator) 1156 self.flush() 1157 except RecursionError: # See issue 36272 1158 raise 1159 except Exception: 1160 self.handleError(record) 1161 1162 def setStream(self, stream): 1163 """ 1164 Sets the StreamHandler's stream to the specified value, 1165 if it is different. 1166 1167 Returns the old stream, if the stream was changed, or None 1168 if it wasn't. 1169 """ 1170 if stream is self.stream: 1171 result = None 1172 else: 1173 result = self.stream 1174 with self.lock: 1175 self.flush() 1176 self.stream = stream 1177 return result 1178 1179 def __repr__(self): 1180 level = getLevelName(self.level) 1181 name = getattr(self.stream, 'name', '') 1182 # bpo-36015: name can be an int 1183 name = str(name) 1184 if name: 1185 name += ' ' 1186 return '<%s %s(%s)>' % (self.__class__.__name__, name, level) 1187 1188 __class_getitem__ = classmethod(GenericAlias)
A handler class which writes logging records, appropriately formatted, to a stream. Note that this class does not close the stream, as sys.stdout or sys.stderr may be used.
1121 def __init__(self, stream=None): 1122 """ 1123 Initialize the handler. 1124 1125 If stream is not specified, sys.stderr is used. 1126 """ 1127 Handler.__init__(self) 1128 if stream is None: 1129 stream = sys.stderr 1130 self.stream = stream
Initialize the handler.
If stream is not specified, sys.stderr is used.
1132 def flush(self): 1133 """ 1134 Flushes the stream. 1135 """ 1136 with self.lock: 1137 if self.stream and hasattr(self.stream, "flush"): 1138 self.stream.flush()
Flushes the stream.
1140 def emit(self, record): 1141 """ 1142 Emit a record. 1143 1144 If a formatter is specified, it is used to format the record. 1145 The record is then written to the stream with a trailing newline. If 1146 exception information is present, it is formatted using 1147 traceback.print_exception and appended to the stream. If the stream 1148 has an 'encoding' attribute, it is used to determine how to do the 1149 output to the stream. 1150 """ 1151 try: 1152 msg = self.format(record) 1153 stream = self.stream 1154 # issue 35046: merged two stream.writes into one. 1155 stream.write(msg + self.terminator) 1156 self.flush() 1157 except RecursionError: # See issue 36272 1158 raise 1159 except Exception: 1160 self.handleError(record)
Emit a record.
If a formatter is specified, it is used to format the record. The record is then written to the stream with a trailing newline. If exception information is present, it is formatted using traceback.print_exception and appended to the stream. If the stream has an 'encoding' attribute, it is used to determine how to do the output to the stream.
1162 def setStream(self, stream): 1163 """ 1164 Sets the StreamHandler's stream to the specified value, 1165 if it is different. 1166 1167 Returns the old stream, if the stream was changed, or None 1168 if it wasn't. 1169 """ 1170 if stream is self.stream: 1171 result = None 1172 else: 1173 result = self.stream 1174 with self.lock: 1175 self.flush() 1176 self.stream = stream 1177 return result
Sets the StreamHandler's stream to the specified value, if it is different.
Returns the old stream, if the stream was changed, or None if it wasn't.
157def addLevelName(level, levelName): 158 """ 159 Associate 'levelName' with 'level'. 160 161 This is used when converting levels to text during message formatting. 162 """ 163 with _lock: 164 _levelToName[level] = levelName 165 _nameToLevel[levelName] = level
Associate 'levelName' with 'level'.
This is used when converting levels to text during message formatting.
2017def basicConfig(**kwargs): 2018 """ 2019 Do basic configuration for the logging system. 2020 2021 This function does nothing if the root logger already has handlers 2022 configured, unless the keyword argument *force* is set to ``True``. 2023 It is a convenience method intended for use by simple scripts 2024 to do one-shot configuration of the logging package. 2025 2026 The default behaviour is to create a StreamHandler which writes to 2027 sys.stderr, set a formatter using the BASIC_FORMAT format string, and 2028 add the handler to the root logger. 2029 2030 A number of optional keyword arguments may be specified, which can alter 2031 the default behaviour. 2032 2033 filename Specifies that a FileHandler be created, using the specified 2034 filename, rather than a StreamHandler. 2035 filemode Specifies the mode to open the file, if filename is specified 2036 (if filemode is unspecified, it defaults to 'a'). 2037 format Use the specified format string for the handler. 2038 datefmt Use the specified date/time format. 2039 style If a format string is specified, use this to specify the 2040 type of format string (possible values '%', '{', '$', for 2041 %-formatting, :meth:`str.format` and :class:`string.Template` 2042 - defaults to '%'). 2043 level Set the root logger level to the specified level. 2044 stream Use the specified stream to initialize the StreamHandler. Note 2045 that this argument is incompatible with 'filename' - if both 2046 are present, 'stream' is ignored. 2047 handlers If specified, this should be an iterable of already created 2048 handlers, which will be added to the root logger. Any handler 2049 in the list which does not have a formatter assigned will be 2050 assigned the formatter created in this function. 2051 force If this keyword is specified as true, any existing handlers 2052 attached to the root logger are removed and closed, before 2053 carrying out the configuration as specified by the other 2054 arguments. 2055 encoding If specified together with a filename, this encoding is passed to 2056 the created FileHandler, causing it to be used when the file is 2057 opened. 2058 errors If specified together with a filename, this value is passed to the 2059 created FileHandler, causing it to be used when the file is 2060 opened in text mode. If not specified, the default value is 2061 `backslashreplace`. 2062 2063 Note that you could specify a stream created using open(filename, mode) 2064 rather than passing the filename and mode in. However, it should be 2065 remembered that StreamHandler does not close its stream (since it may be 2066 using sys.stdout or sys.stderr), whereas FileHandler closes its stream 2067 when the handler is closed. 2068 2069 .. versionchanged:: 3.2 2070 Added the ``style`` parameter. 2071 2072 .. versionchanged:: 3.3 2073 Added the ``handlers`` parameter. A ``ValueError`` is now thrown for 2074 incompatible arguments (e.g. ``handlers`` specified together with 2075 ``filename``/``filemode``, or ``filename``/``filemode`` specified 2076 together with ``stream``, or ``handlers`` specified together with 2077 ``stream``. 2078 2079 .. versionchanged:: 3.8 2080 Added the ``force`` parameter. 2081 2082 .. versionchanged:: 3.9 2083 Added the ``encoding`` and ``errors`` parameters. 2084 """ 2085 # Add thread safety in case someone mistakenly calls 2086 # basicConfig() from multiple threads 2087 with _lock: 2088 force = kwargs.pop('force', False) 2089 encoding = kwargs.pop('encoding', None) 2090 errors = kwargs.pop('errors', 'backslashreplace') 2091 if force: 2092 for h in root.handlers[:]: 2093 root.removeHandler(h) 2094 h.close() 2095 if len(root.handlers) == 0: 2096 handlers = kwargs.pop("handlers", None) 2097 if handlers is None: 2098 if "stream" in kwargs and "filename" in kwargs: 2099 raise ValueError("'stream' and 'filename' should not be " 2100 "specified together") 2101 else: 2102 if "stream" in kwargs or "filename" in kwargs: 2103 raise ValueError("'stream' or 'filename' should not be " 2104 "specified together with 'handlers'") 2105 if handlers is None: 2106 filename = kwargs.pop("filename", None) 2107 mode = kwargs.pop("filemode", 'a') 2108 if filename: 2109 if 'b' in mode: 2110 errors = None 2111 else: 2112 encoding = io.text_encoding(encoding) 2113 h = FileHandler(filename, mode, 2114 encoding=encoding, errors=errors) 2115 else: 2116 stream = kwargs.pop("stream", None) 2117 h = StreamHandler(stream) 2118 handlers = [h] 2119 dfs = kwargs.pop("datefmt", None) 2120 style = kwargs.pop("style", '%') 2121 if style not in _STYLES: 2122 raise ValueError('Style must be one of: %s' % ','.join( 2123 _STYLES.keys())) 2124 fs = kwargs.pop("format", _STYLES[style][1]) 2125 fmt = Formatter(fs, dfs, style) 2126 for h in handlers: 2127 if h.formatter is None: 2128 h.setFormatter(fmt) 2129 root.addHandler(h) 2130 level = kwargs.pop("level", None) 2131 if level is not None: 2132 root.setLevel(level) 2133 if kwargs: 2134 keys = ', '.join(kwargs.keys()) 2135 raise ValueError('Unrecognised argument(s): %s' % keys)
Do basic configuration for the logging system.
This function does nothing if the root logger already has handlers
configured, unless the keyword argument force is set to True
.
It is a convenience method intended for use by simple scripts
to do one-shot configuration of the logging package.
The default behaviour is to create a StreamHandler which writes to sys.stderr, set a formatter using the BASIC_FORMAT format string, and add the handler to the root logger.
A number of optional keyword arguments may be specified, which can alter the default behaviour.
filename Specifies that a FileHandler be created, using the specified
filename, rather than a StreamHandler.
filemode Specifies the mode to open the file, if filename is specified
(if filemode is unspecified, it defaults to 'a').
format Use the specified format string for the handler.
datefmt Use the specified date/time format.
style If a format string is specified, use this to specify the
type of format string (possible values '%', '{', '$', for
%-formatting, str.format()
and string.Template
- defaults to '%').
level Set the root logger level to the specified level.
stream Use the specified stream to initialize the StreamHandler. Note
that this argument is incompatible with 'filename' - if both
are present, 'stream' is ignored.
handlers If specified, this should be an iterable of already created
handlers, which will be added to the root logger. Any handler
in the list which does not have a formatter assigned will be
assigned the formatter created in this function.
force If this keyword is specified as true, any existing handlers
attached to the root logger are removed and closed, before
carrying out the configuration as specified by the other
arguments.
encoding If specified together with a filename, this encoding is passed to
the created FileHandler, causing it to be used when the file is
opened.
errors If specified together with a filename, this value is passed to the
created FileHandler, causing it to be used when the file is
opened in text mode. If not specified, the default value is
backslashreplace
.
Note that you could specify a stream created using open(filename, mode) rather than passing the filename and mode in. However, it should be remembered that StreamHandler does not close its stream (since it may be using sys.stdout or sys.stderr), whereas FileHandler closes its stream when the handler is closed.
Changed in version 3.2:
Added the style
parameter.
Changed in version 3.3:
Added the handlers
parameter. A ValueError
is now thrown for
incompatible arguments (e.g. handlers
specified together with
filename
/filemode
, or filename
/filemode
specified
together with stream
, or handlers
specified together with
stream
.
Changed in version 3.8:
Added the force
parameter.
Changed in version 3.9:
Added the encoding
and errors
parameters.
2324def captureWarnings(capture): 2325 """ 2326 If capture is true, redirect all warnings to the logging package. 2327 If capture is False, ensure that warnings are not redirected to logging 2328 but to their original destinations. 2329 """ 2330 global _warnings_showwarning 2331 if capture: 2332 if _warnings_showwarning is None: 2333 _warnings_showwarning = warnings.showwarning 2334 warnings.showwarning = _showwarning 2335 else: 2336 if _warnings_showwarning is not None: 2337 warnings.showwarning = _warnings_showwarning 2338 _warnings_showwarning = None
If capture is true, redirect all warnings to the logging package. If capture is False, ensure that warnings are not redirected to logging but to their original destinations.
2152def critical(msg, *args, **kwargs): 2153 """ 2154 Log a message with severity 'CRITICAL' on the root logger. If the logger 2155 has no handlers, call basicConfig() to add a console handler with a 2156 pre-defined format. 2157 """ 2158 if len(root.handlers) == 0: 2159 basicConfig() 2160 root.critical(msg, *args, **kwargs)
Log a message with severity 'CRITICAL' on the root logger. If the logger has no handlers, call basicConfig() to add a console handler with a pre-defined format.
2211def debug(msg, *args, **kwargs): 2212 """ 2213 Log a message with severity 'DEBUG' on the root logger. If the logger has 2214 no handlers, call basicConfig() to add a console handler with a pre-defined 2215 format. 2216 """ 2217 if len(root.handlers) == 0: 2218 basicConfig() 2219 root.debug(msg, *args, **kwargs)
Log a message with severity 'DEBUG' on the root logger. If the logger has no handlers, call basicConfig() to add a console handler with a pre-defined format.
2231def disable(level=CRITICAL): 2232 """ 2233 Disable all logging calls of severity 'level' and below. 2234 """ 2235 root.manager.disable = level 2236 root.manager._clear_cache()
Disable all logging calls of severity 'level' and below.
2168def error(msg, *args, **kwargs): 2169 """ 2170 Log a message with severity 'ERROR' on the root logger. If the logger has 2171 no handlers, call basicConfig() to add a console handler with a pre-defined 2172 format. 2173 """ 2174 if len(root.handlers) == 0: 2175 basicConfig() 2176 root.error(msg, *args, **kwargs)
Log a message with severity 'ERROR' on the root logger. If the logger has no handlers, call basicConfig() to add a console handler with a pre-defined format.
2178def exception(msg, *args, exc_info=True, **kwargs): 2179 """ 2180 Log a message with severity 'ERROR' on the root logger, with exception 2181 information. If the logger has no handlers, basicConfig() is called to add 2182 a console handler with a pre-defined format. 2183 """ 2184 error(msg, *args, exc_info=exc_info, **kwargs)
Log a message with severity 'ERROR' on the root logger, with exception information. If the logger has no handlers, basicConfig() is called to add a console handler with a pre-defined format.
2162def fatal(msg, *args, **kwargs): 2163 """ 2164 Don't use this function, use critical() instead. 2165 """ 2166 critical(msg, *args, **kwargs)
Don't use this function, use critical() instead.
130def getLevelName(level): 131 """ 132 Return the textual or numeric representation of logging level 'level'. 133 134 If the level is one of the predefined levels (CRITICAL, ERROR, WARNING, 135 INFO, DEBUG) then you get the corresponding string. If you have 136 associated levels with names using addLevelName then the name you have 137 associated with 'level' is returned. 138 139 If a numeric value corresponding to one of the defined levels is passed 140 in, the corresponding string representation is returned. 141 142 If a string representation of the level is passed in, the corresponding 143 numeric value is returned. 144 145 If no matching numeric or string value is passed in, the string 146 'Level %s' % level is returned. 147 """ 148 # See Issues #22386, #27937 and #29220 for why it's this way 149 result = _levelToName.get(level) 150 if result is not None: 151 return result 152 result = _nameToLevel.get(level) 153 if result is not None: 154 return result 155 return "Level %s" % level
Return the textual or numeric representation of logging level 'level'.
If the level is one of the predefined levels (CRITICAL, ERROR, WARNING, INFO, DEBUG) then you get the corresponding string. If you have associated levels with names using addLevelName then the name you have associated with 'level' is returned.
If a numeric value corresponding to one of the defined levels is passed in, the corresponding string representation is returned.
If a string representation of the level is passed in, the corresponding numeric value is returned.
If no matching numeric or string value is passed in, the string 'Level %s' % level is returned.
2142def getLogger(name=None): 2143 """ 2144 Return a logger with the specified name, creating it if necessary. 2145 2146 If no name is specified, return the root logger. 2147 """ 2148 if not name or isinstance(name, str) and name == root.name: 2149 return root 2150 return Logger.manager.getLogger(name)
Return a logger with the specified name, creating it if necessary.
If no name is specified, return the root logger.
1333def getLoggerClass(): 1334 """ 1335 Return the class to be used when instantiating a logger. 1336 """ 1337 return _loggerClass
Return the class to be used when instantiating a logger.
2201def info(msg, *args, **kwargs): 2202 """ 2203 Log a message with severity 'INFO' on the root logger. If the logger has 2204 no handlers, call basicConfig() to add a console handler with a pre-defined 2205 format. 2206 """ 2207 if len(root.handlers) == 0: 2208 basicConfig() 2209 root.info(msg, *args, **kwargs)
Log a message with severity 'INFO' on the root logger. If the logger has no handlers, call basicConfig() to add a console handler with a pre-defined format.
2221def log(level, msg, *args, **kwargs): 2222 """ 2223 Log 'msg % args' with the integer severity 'level' on the root logger. If 2224 the logger has no handlers, call basicConfig() to add a console handler 2225 with a pre-defined format. 2226 """ 2227 if len(root.handlers) == 0: 2228 basicConfig() 2229 root.log(level, msg, *args, **kwargs)
Log 'msg % args' with the integer severity 'level' on the root logger. If the logger has no handlers, call basicConfig() to add a console handler with a pre-defined format.
426def makeLogRecord(dict): 427 """ 428 Make a LogRecord whose attributes are defined by the specified dictionary, 429 This function is useful for converting a logging event received over 430 a socket connection (which is sent as a dictionary) into a LogRecord 431 instance. 432 """ 433 rv = _logRecordFactory(None, None, "", 0, "", (), None, None) 434 rv.__dict__.update(dict) 435 return rv
Make a LogRecord whose attributes are defined by the specified dictionary, This function is useful for converting a logging event received over a socket connection (which is sent as a dictionary) into a LogRecord instance.
1320def setLoggerClass(klass): 1321 """ 1322 Set the class to be used when instantiating a logger. The class should 1323 define __init__() such that only a name argument is required, and the 1324 __init__() should call Logger.__init__() 1325 """ 1326 if klass != Logger: 1327 if not issubclass(klass, Logger): 1328 raise TypeError("logger not derived from logging.Logger: " 1329 + klass.__name__) 1330 global _loggerClass 1331 _loggerClass = klass
Set the class to be used when instantiating a logger. The class should define __init__() such that only a name argument is required, and the __init__() should call Logger.__init__()
2238def shutdown(handlerList=_handlerList): 2239 """ 2240 Perform any cleanup actions in the logging system (e.g. flushing 2241 buffers). 2242 2243 Should be called at application exit. 2244 """ 2245 for wr in reversed(handlerList[:]): 2246 #errors might occur, for example, if files are locked 2247 #we just ignore them if raiseExceptions is not set 2248 try: 2249 h = wr() 2250 if h: 2251 try: 2252 h.acquire() 2253 # MemoryHandlers might not want to be flushed on close, 2254 # but circular imports prevent us scoping this to just 2255 # those handlers. hence the default to True. 2256 if getattr(h, 'flushOnClose', True): 2257 h.flush() 2258 h.close() 2259 except (OSError, ValueError): 2260 # Ignore errors which might be caused 2261 # because handlers have been closed but 2262 # references to them are still around at 2263 # application exit. 2264 pass 2265 finally: 2266 h.release() 2267 except: # ignore everything, as we're shutting down 2268 if raiseExceptions: 2269 raise 2270 #else, swallow
Perform any cleanup actions in the logging system (e.g. flushing buffers).
Should be called at application exit.
2186def warning(msg, *args, **kwargs): 2187 """ 2188 Log a message with severity 'WARNING' on the root logger. If the logger has 2189 no handlers, call basicConfig() to add a console handler with a pre-defined 2190 format. 2191 """ 2192 if len(root.handlers) == 0: 2193 basicConfig() 2194 root.warning(msg, *args, **kwargs)
Log a message with severity 'WARNING' on the root logger. If the logger has no handlers, call basicConfig() to add a console handler with a pre-defined format.
419def getLogRecordFactory(): 420 """ 421 Return the factory to be used when instantiating a log record. 422 """ 423 424 return _logRecordFactory
Return the factory to be used when instantiating a log record.
409def setLogRecordFactory(factory): 410 """ 411 Set the factory to be used when instantiating a log record. 412 413 :param factory: A callable which will be called to instantiate 414 a log record. 415 """ 416 global _logRecordFactory 417 _logRecordFactory = factory
Set the factory to be used when instantiating a log record.
Parameters
- factory: A callable which will be called to instantiate a log record.
908def getHandlerByName(name): 909 """ 910 Get a handler with the specified *name*, or None if there isn't one with 911 that name. 912 """ 913 return _handlers.get(name)
Get a handler with the specified name, or None if there isn't one with that name.
916def getHandlerNames(): 917 """ 918 Return all known handler names as an immutable set. 919 """ 920 return frozenset(_handlers)
Return all known handler names as an immutable set.