logging
Logging package for Python. Based on PEP 282 and comments thereto in comp.lang.python.
Copyright (C) 2001-2022 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
1# Copyright 2001-2022 by Vinay Sajip. All Rights Reserved. 2# 3# Permission to use, copy, modify, and distribute this software and its 4# documentation for any purpose and without fee is hereby granted, 5# provided that the above copyright notice appear in all copies and that 6# both that copyright notice and this permission notice appear in 7# supporting documentation, and that the name of Vinay Sajip 8# not be used in advertising or publicity pertaining to distribution 9# of the software without specific, written prior permission. 10# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING 11# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL 12# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR 13# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER 14# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT 15# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 17""" 18Logging package for Python. Based on PEP 282 and comments thereto in 19comp.lang.python. 20 21Copyright (C) 2001-2022 Vinay Sajip. All Rights Reserved. 22 23To use, simply 'import logging' and log away! 24""" 25 26import sys, os, time, io, re, traceback, warnings, weakref, collections.abc 27 28from types import GenericAlias 29from string import Template 30from string import Formatter as StrFormatter 31 32 33__all__ = ['BASIC_FORMAT', 'BufferingFormatter', 'CRITICAL', 'DEBUG', 'ERROR', 34 'FATAL', 'FileHandler', 'Filter', 'Formatter', 'Handler', 'INFO', 35 'LogRecord', 'Logger', 'LoggerAdapter', 'NOTSET', 'NullHandler', 36 'StreamHandler', 'WARN', 'WARNING', 'addLevelName', 'basicConfig', 37 'captureWarnings', 'critical', 'debug', 'disable', 'error', 38 'exception', 'fatal', 'getLevelName', 'getLogger', 'getLoggerClass', 39 'info', 'log', 'makeLogRecord', 'setLoggerClass', 'shutdown', 40 'warn', 'warning', 'getLogRecordFactory', 'setLogRecordFactory', 41 'lastResort', 'raiseExceptions', 'getLevelNamesMapping', 42 'getHandlerByName', 'getHandlerNames'] 43 44import threading 45 46__author__ = "Vinay Sajip <vinay_sajip@red-dove.com>" 47__status__ = "production" 48# The following module attributes are no longer updated. 49__version__ = "0.5.1.2" 50__date__ = "07 February 2010" 51 52#--------------------------------------------------------------------------- 53# Miscellaneous module data 54#--------------------------------------------------------------------------- 55 56# 57#_startTime is used as the base when calculating the relative time of events 58# 59_startTime = time.time_ns() 60 61# 62#raiseExceptions is used to see if exceptions during handling should be 63#propagated 64# 65raiseExceptions = True 66 67# 68# If you don't want threading information in the log, set this to False 69# 70logThreads = True 71 72# 73# If you don't want multiprocessing information in the log, set this to False 74# 75logMultiprocessing = True 76 77# 78# If you don't want process information in the log, set this to False 79# 80logProcesses = True 81 82# 83# If you don't want asyncio task information in the log, set this to False 84# 85logAsyncioTasks = True 86 87#--------------------------------------------------------------------------- 88# Level related stuff 89#--------------------------------------------------------------------------- 90# 91# Default levels and level names, these can be replaced with any positive set 92# of values having corresponding names. There is a pseudo-level, NOTSET, which 93# is only really there as a lower limit for user-defined levels. Handlers and 94# loggers are initialized with NOTSET so that they will log all messages, even 95# at user-defined levels. 96# 97 98CRITICAL = 50 99FATAL = CRITICAL 100ERROR = 40 101WARNING = 30 102WARN = WARNING 103INFO = 20 104DEBUG = 10 105NOTSET = 0 106 107_levelToName = { 108 CRITICAL: 'CRITICAL', 109 ERROR: 'ERROR', 110 WARNING: 'WARNING', 111 INFO: 'INFO', 112 DEBUG: 'DEBUG', 113 NOTSET: 'NOTSET', 114} 115_nameToLevel = { 116 'CRITICAL': CRITICAL, 117 'FATAL': FATAL, 118 'ERROR': ERROR, 119 'WARN': WARNING, 120 'WARNING': WARNING, 121 'INFO': INFO, 122 'DEBUG': DEBUG, 123 'NOTSET': NOTSET, 124} 125 126def getLevelNamesMapping(): 127 return _nameToLevel.copy() 128 129def getLevelName(level): 130 """ 131 Return the textual or numeric representation of logging level 'level'. 132 133 If the level is one of the predefined levels (CRITICAL, ERROR, WARNING, 134 INFO, DEBUG) then you get the corresponding string. If you have 135 associated levels with names using addLevelName then the name you have 136 associated with 'level' is returned. 137 138 If a numeric value corresponding to one of the defined levels is passed 139 in, the corresponding string representation is returned. 140 141 If a string representation of the level is passed in, the corresponding 142 numeric value is returned. 143 144 If no matching numeric or string value is passed in, the string 145 'Level %s' % level is returned. 146 """ 147 # See Issues #22386, #27937 and #29220 for why it's this way 148 result = _levelToName.get(level) 149 if result is not None: 150 return result 151 result = _nameToLevel.get(level) 152 if result is not None: 153 return result 154 return "Level %s" % level 155 156def addLevelName(level, levelName): 157 """ 158 Associate 'levelName' with 'level'. 159 160 This is used when converting levels to text during message formatting. 161 """ 162 with _lock: 163 _levelToName[level] = levelName 164 _nameToLevel[levelName] = level 165 166if hasattr(sys, "_getframe"): 167 currentframe = lambda: sys._getframe(1) 168else: #pragma: no cover 169 def currentframe(): 170 """Return the frame object for the caller's stack frame.""" 171 try: 172 raise Exception 173 except Exception as exc: 174 return exc.__traceback__.tb_frame.f_back 175 176# 177# _srcfile is used when walking the stack to check when we've got the first 178# caller stack frame, by skipping frames whose filename is that of this 179# module's source. It therefore should contain the filename of this module's 180# source file. 181# 182# Ordinarily we would use __file__ for this, but frozen modules don't always 183# have __file__ set, for some reason (see Issue #21736). Thus, we get the 184# filename from a handy code object from a function defined in this module. 185# (There's no particular reason for picking addLevelName.) 186# 187 188_srcfile = os.path.normcase(addLevelName.__code__.co_filename) 189 190# _srcfile is only used in conjunction with sys._getframe(). 191# Setting _srcfile to None will prevent findCaller() from being called. This 192# way, you can avoid the overhead of fetching caller information. 193 194# The following is based on warnings._is_internal_frame. It makes sure that 195# frames of the import mechanism are skipped when logging at module level and 196# using a stacklevel value greater than one. 197def _is_internal_frame(frame): 198 """Signal whether the frame is a CPython or logging module internal.""" 199 filename = os.path.normcase(frame.f_code.co_filename) 200 return filename == _srcfile or ( 201 "importlib" in filename and "_bootstrap" in filename 202 ) 203 204 205def _checkLevel(level): 206 if isinstance(level, int): 207 rv = level 208 elif str(level) == level: 209 if level not in _nameToLevel: 210 raise ValueError("Unknown level: %r" % level) 211 rv = _nameToLevel[level] 212 else: 213 raise TypeError("Level not an integer or a valid string: %r" 214 % (level,)) 215 return rv 216 217#--------------------------------------------------------------------------- 218# Thread-related stuff 219#--------------------------------------------------------------------------- 220 221# 222#_lock is used to serialize access to shared data structures in this module. 223#This needs to be an RLock because fileConfig() creates and configures 224#Handlers, and so might arbitrary user threads. Since Handler code updates the 225#shared dictionary _handlers, it needs to acquire the lock. But if configuring, 226#the lock would already have been acquired - so we need an RLock. 227#The same argument applies to Loggers and Manager.loggerDict. 228# 229_lock = threading.RLock() 230 231def _prepareFork(): 232 """ 233 Prepare to fork a new child process by acquiring the module-level lock. 234 235 This should be used in conjunction with _afterFork(). 236 """ 237 # Wrap the lock acquisition in a try-except to prevent the lock from being 238 # abandoned in the event of an asynchronous exception. See gh-106238. 239 try: 240 _lock.acquire() 241 except BaseException: 242 _lock.release() 243 raise 244 245def _afterFork(): 246 """ 247 After a new child process has been forked, release the module-level lock. 248 249 This should be used in conjunction with _prepareFork(). 250 """ 251 _lock.release() 252 253 254# Prevent a held logging lock from blocking a child from logging. 255 256if not hasattr(os, 'register_at_fork'): # Windows and friends. 257 def _register_at_fork_reinit_lock(instance): 258 pass # no-op when os.register_at_fork does not exist. 259else: 260 # A collection of instances with a _at_fork_reinit method (logging.Handler) 261 # to be called in the child after forking. The weakref avoids us keeping 262 # discarded Handler instances alive. 263 _at_fork_reinit_lock_weakset = weakref.WeakSet() 264 265 def _register_at_fork_reinit_lock(instance): 266 with _lock: 267 _at_fork_reinit_lock_weakset.add(instance) 268 269 def _after_at_fork_child_reinit_locks(): 270 for handler in _at_fork_reinit_lock_weakset: 271 handler._at_fork_reinit() 272 273 # _prepareFork() was called in the parent before forking. 274 # The lock is reinitialized to unlocked state. 275 _lock._at_fork_reinit() 276 277 os.register_at_fork(before=_prepareFork, 278 after_in_child=_after_at_fork_child_reinit_locks, 279 after_in_parent=_afterFork) 280 281 282#--------------------------------------------------------------------------- 283# The logging record 284#--------------------------------------------------------------------------- 285 286class LogRecord(object): 287 """ 288 A LogRecord instance represents an event being logged. 289 290 LogRecord instances are created every time something is logged. They 291 contain all the information pertinent to the event being logged. The 292 main information passed in is in msg and args, which are combined 293 using str(msg) % args to create the message field of the record. The 294 record also includes information such as when the record was created, 295 the source line where the logging call was made, and any exception 296 information to be logged. 297 """ 298 def __init__(self, name, level, pathname, lineno, 299 msg, args, exc_info, func=None, sinfo=None, **kwargs): 300 """ 301 Initialize a logging record with interesting information. 302 """ 303 ct = time.time_ns() 304 self.name = name 305 self.msg = msg 306 # 307 # The following statement allows passing of a dictionary as a sole 308 # argument, so that you can do something like 309 # logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2}) 310 # Suggested by Stefan Behnel. 311 # Note that without the test for args[0], we get a problem because 312 # during formatting, we test to see if the arg is present using 313 # 'if self.args:'. If the event being logged is e.g. 'Value is %d' 314 # and if the passed arg fails 'if self.args:' then no formatting 315 # is done. For example, logger.warning('Value is %d', 0) would log 316 # 'Value is %d' instead of 'Value is 0'. 317 # For the use case of passing a dictionary, this should not be a 318 # problem. 319 # Issue #21172: a request was made to relax the isinstance check 320 # to hasattr(args[0], '__getitem__'). However, the docs on string 321 # formatting still seem to suggest a mapping object is required. 322 # Thus, while not removing the isinstance check, it does now look 323 # for collections.abc.Mapping rather than, as before, dict. 324 if (args and len(args) == 1 and isinstance(args[0], collections.abc.Mapping) 325 and args[0]): 326 args = args[0] 327 self.args = args 328 self.levelname = getLevelName(level) 329 self.levelno = level 330 self.pathname = pathname 331 try: 332 self.filename = os.path.basename(pathname) 333 self.module = os.path.splitext(self.filename)[0] 334 except (TypeError, ValueError, AttributeError): 335 self.filename = pathname 336 self.module = "Unknown module" 337 self.exc_info = exc_info 338 self.exc_text = None # used to cache the traceback text 339 self.stack_info = sinfo 340 self.lineno = lineno 341 self.funcName = func 342 self.created = ct / 1e9 # ns to float seconds 343 # Get the number of whole milliseconds (0-999) in the fractional part of seconds. 344 # Eg: 1_677_903_920_999_998_503 ns --> 999_998_503 ns--> 999 ms 345 # Convert to float by adding 0.0 for historical reasons. See gh-89047 346 self.msecs = (ct % 1_000_000_000) // 1_000_000 + 0.0 347 if self.msecs == 999.0 and int(self.created) != ct // 1_000_000_000: 348 # ns -> sec conversion can round up, e.g: 349 # 1_677_903_920_999_999_900 ns --> 1_677_903_921.0 sec 350 self.msecs = 0.0 351 352 self.relativeCreated = (ct - _startTime) / 1e6 353 if logThreads: 354 self.thread = threading.get_ident() 355 self.threadName = threading.current_thread().name 356 else: # pragma: no cover 357 self.thread = None 358 self.threadName = None 359 if not logMultiprocessing: # pragma: no cover 360 self.processName = None 361 else: 362 self.processName = 'MainProcess' 363 mp = sys.modules.get('multiprocessing') 364 if mp is not None: 365 # Errors may occur if multiprocessing has not finished loading 366 # yet - e.g. if a custom import hook causes third-party code 367 # to run when multiprocessing calls import. See issue 8200 368 # for an example 369 try: 370 self.processName = mp.current_process().name 371 except Exception: #pragma: no cover 372 pass 373 if logProcesses and hasattr(os, 'getpid'): 374 self.process = os.getpid() 375 else: 376 self.process = None 377 378 self.taskName = None 379 if logAsyncioTasks: 380 asyncio = sys.modules.get('asyncio') 381 if asyncio: 382 try: 383 self.taskName = asyncio.current_task().get_name() 384 except Exception: 385 pass 386 387 def __repr__(self): 388 return '<LogRecord: %s, %s, %s, %s, "%s">'%(self.name, self.levelno, 389 self.pathname, self.lineno, self.msg) 390 391 def getMessage(self): 392 """ 393 Return the message for this LogRecord. 394 395 Return the message for this LogRecord after merging any user-supplied 396 arguments with the message. 397 """ 398 msg = str(self.msg) 399 if self.args: 400 msg = msg % self.args 401 return msg 402 403# 404# Determine which class to use when instantiating log records. 405# 406_logRecordFactory = LogRecord 407 408def setLogRecordFactory(factory): 409 """ 410 Set the factory to be used when instantiating a log record. 411 412 :param factory: A callable which will be called to instantiate 413 a log record. 414 """ 415 global _logRecordFactory 416 _logRecordFactory = factory 417 418def getLogRecordFactory(): 419 """ 420 Return the factory to be used when instantiating a log record. 421 """ 422 423 return _logRecordFactory 424 425def makeLogRecord(dict): 426 """ 427 Make a LogRecord whose attributes are defined by the specified dictionary, 428 This function is useful for converting a logging event received over 429 a socket connection (which is sent as a dictionary) into a LogRecord 430 instance. 431 """ 432 rv = _logRecordFactory(None, None, "", 0, "", (), None, None) 433 rv.__dict__.update(dict) 434 return rv 435 436 437#--------------------------------------------------------------------------- 438# Formatter classes and functions 439#--------------------------------------------------------------------------- 440_str_formatter = StrFormatter() 441del StrFormatter 442 443 444class PercentStyle(object): 445 446 default_format = '%(message)s' 447 asctime_format = '%(asctime)s' 448 asctime_search = '%(asctime)' 449 validation_pattern = re.compile(r'%\(\w+\)[#0+ -]*(\*|\d+)?(\.(\*|\d+))?[diouxefgcrsa%]', re.I) 450 451 def __init__(self, fmt, *, defaults=None): 452 self._fmt = fmt or self.default_format 453 self._defaults = defaults 454 455 def usesTime(self): 456 return self._fmt.find(self.asctime_search) >= 0 457 458 def validate(self): 459 """Validate the input format, ensure it matches the correct style""" 460 if not self.validation_pattern.search(self._fmt): 461 raise ValueError("Invalid format '%s' for '%s' style" % (self._fmt, self.default_format[0])) 462 463 def _format(self, record): 464 if defaults := self._defaults: 465 values = defaults | record.__dict__ 466 else: 467 values = record.__dict__ 468 return self._fmt % values 469 470 def format(self, record): 471 try: 472 return self._format(record) 473 except KeyError as e: 474 raise ValueError('Formatting field not found in record: %s' % e) 475 476 477class StrFormatStyle(PercentStyle): 478 default_format = '{message}' 479 asctime_format = '{asctime}' 480 asctime_search = '{asctime' 481 482 fmt_spec = re.compile(r'^(.?[<>=^])?[+ -]?#?0?(\d+|{\w+})?[,_]?(\.(\d+|{\w+}))?[bcdefgnosx%]?$', re.I) 483 field_spec = re.compile(r'^(\d+|\w+)(\.\w+|\[[^]]+\])*$') 484 485 def _format(self, record): 486 if defaults := self._defaults: 487 values = defaults | record.__dict__ 488 else: 489 values = record.__dict__ 490 return self._fmt.format(**values) 491 492 def validate(self): 493 """Validate the input format, ensure it is the correct string formatting style""" 494 fields = set() 495 try: 496 for _, fieldname, spec, conversion in _str_formatter.parse(self._fmt): 497 if fieldname: 498 if not self.field_spec.match(fieldname): 499 raise ValueError('invalid field name/expression: %r' % fieldname) 500 fields.add(fieldname) 501 if conversion and conversion not in 'rsa': 502 raise ValueError('invalid conversion: %r' % conversion) 503 if spec and not self.fmt_spec.match(spec): 504 raise ValueError('bad specifier: %r' % spec) 505 except ValueError as e: 506 raise ValueError('invalid format: %s' % e) 507 if not fields: 508 raise ValueError('invalid format: no fields') 509 510 511class StringTemplateStyle(PercentStyle): 512 default_format = '${message}' 513 asctime_format = '${asctime}' 514 asctime_search = '${asctime}' 515 516 def __init__(self, *args, **kwargs): 517 super().__init__(*args, **kwargs) 518 self._tpl = Template(self._fmt) 519 520 def usesTime(self): 521 fmt = self._fmt 522 return fmt.find('$asctime') >= 0 or fmt.find(self.asctime_search) >= 0 523 524 def validate(self): 525 pattern = Template.pattern 526 fields = set() 527 for m in pattern.finditer(self._fmt): 528 d = m.groupdict() 529 if d['named']: 530 fields.add(d['named']) 531 elif d['braced']: 532 fields.add(d['braced']) 533 elif m.group(0) == '$': 534 raise ValueError('invalid format: bare \'$\' not allowed') 535 if not fields: 536 raise ValueError('invalid format: no fields') 537 538 def _format(self, record): 539 if defaults := self._defaults: 540 values = defaults | record.__dict__ 541 else: 542 values = record.__dict__ 543 return self._tpl.substitute(**values) 544 545 546BASIC_FORMAT = "%(levelname)s:%(name)s:%(message)s" 547 548_STYLES = { 549 '%': (PercentStyle, BASIC_FORMAT), 550 '{': (StrFormatStyle, '{levelname}:{name}:{message}'), 551 '$': (StringTemplateStyle, '${levelname}:${name}:${message}'), 552} 553 554class Formatter(object): 555 """ 556 Formatter instances are used to convert a LogRecord to text. 557 558 Formatters need to know how a LogRecord is constructed. They are 559 responsible for converting a LogRecord to (usually) a string which can 560 be interpreted by either a human or an external system. The base Formatter 561 allows a formatting string to be specified. If none is supplied, the 562 style-dependent default value, "%(message)s", "{message}", or 563 "${message}", is used. 564 565 The Formatter can be initialized with a format string which makes use of 566 knowledge of the LogRecord attributes - e.g. the default value mentioned 567 above makes use of the fact that the user's message and arguments are pre- 568 formatted into a LogRecord's message attribute. Currently, the useful 569 attributes in a LogRecord are described by: 570 571 %(name)s Name of the logger (logging channel) 572 %(levelno)s Numeric logging level for the message (DEBUG, INFO, 573 WARNING, ERROR, CRITICAL) 574 %(levelname)s Text logging level for the message ("DEBUG", "INFO", 575 "WARNING", "ERROR", "CRITICAL") 576 %(pathname)s Full pathname of the source file where the logging 577 call was issued (if available) 578 %(filename)s Filename portion of pathname 579 %(module)s Module (name portion of filename) 580 %(lineno)d Source line number where the logging call was issued 581 (if available) 582 %(funcName)s Function name 583 %(created)f Time when the LogRecord was created (time.time_ns() / 1e9 584 return value) 585 %(asctime)s Textual time when the LogRecord was created 586 %(msecs)d Millisecond portion of the creation time 587 %(relativeCreated)d Time in milliseconds when the LogRecord was created, 588 relative to the time the logging module was loaded 589 (typically at application startup time) 590 %(thread)d Thread ID (if available) 591 %(threadName)s Thread name (if available) 592 %(taskName)s Task name (if available) 593 %(process)d Process ID (if available) 594 %(processName)s Process name (if available) 595 %(message)s The result of record.getMessage(), computed just as 596 the record is emitted 597 """ 598 599 converter = time.localtime 600 601 def __init__(self, fmt=None, datefmt=None, style='%', validate=True, *, 602 defaults=None): 603 """ 604 Initialize the formatter with specified format strings. 605 606 Initialize the formatter either with the specified format string, or a 607 default as described above. Allow for specialized date formatting with 608 the optional datefmt argument. If datefmt is omitted, you get an 609 ISO8601-like (or RFC 3339-like) format. 610 611 Use a style parameter of '%', '{' or '$' to specify that you want to 612 use one of %-formatting, :meth:`str.format` (``{}``) formatting or 613 :class:`string.Template` formatting in your format string. 614 615 .. versionchanged:: 3.2 616 Added the ``style`` parameter. 617 """ 618 if style not in _STYLES: 619 raise ValueError('Style must be one of: %s' % ','.join( 620 _STYLES.keys())) 621 self._style = _STYLES[style][0](fmt, defaults=defaults) 622 if validate: 623 self._style.validate() 624 625 self._fmt = self._style._fmt 626 self.datefmt = datefmt 627 628 default_time_format = '%Y-%m-%d %H:%M:%S' 629 default_msec_format = '%s,%03d' 630 631 def formatTime(self, record, datefmt=None): 632 """ 633 Return the creation time of the specified LogRecord as formatted text. 634 635 This method should be called from format() by a formatter which 636 wants to make use of a formatted time. This method can be overridden 637 in formatters to provide for any specific requirement, but the 638 basic behaviour is as follows: if datefmt (a string) is specified, 639 it is used with time.strftime() to format the creation time of the 640 record. Otherwise, an ISO8601-like (or RFC 3339-like) format is used. 641 The resulting string is returned. This function uses a user-configurable 642 function to convert the creation time to a tuple. By default, 643 time.localtime() is used; to change this for a particular formatter 644 instance, set the 'converter' attribute to a function with the same 645 signature as time.localtime() or time.gmtime(). To change it for all 646 formatters, for example if you want all logging times to be shown in GMT, 647 set the 'converter' attribute in the Formatter class. 648 """ 649 ct = self.converter(record.created) 650 if datefmt: 651 s = time.strftime(datefmt, ct) 652 else: 653 s = time.strftime(self.default_time_format, ct) 654 if self.default_msec_format: 655 s = self.default_msec_format % (s, record.msecs) 656 return s 657 658 def formatException(self, ei): 659 """ 660 Format and return the specified exception information as a string. 661 662 This default implementation just uses 663 traceback.print_exception() 664 """ 665 sio = io.StringIO() 666 tb = ei[2] 667 # See issues #9427, #1553375. Commented out for now. 668 #if getattr(self, 'fullstack', False): 669 # traceback.print_stack(tb.tb_frame.f_back, file=sio) 670 traceback.print_exception(ei[0], ei[1], tb, limit=None, file=sio) 671 s = sio.getvalue() 672 sio.close() 673 if s[-1:] == "\n": 674 s = s[:-1] 675 return s 676 677 def usesTime(self): 678 """ 679 Check if the format uses the creation time of the record. 680 """ 681 return self._style.usesTime() 682 683 def formatMessage(self, record): 684 return self._style.format(record) 685 686 def formatStack(self, stack_info): 687 """ 688 This method is provided as an extension point for specialized 689 formatting of stack information. 690 691 The input data is a string as returned from a call to 692 :func:`traceback.print_stack`, but with the last trailing newline 693 removed. 694 695 The base implementation just returns the value passed in. 696 """ 697 return stack_info 698 699 def format(self, record): 700 """ 701 Format the specified record as text. 702 703 The record's attribute dictionary is used as the operand to a 704 string formatting operation which yields the returned string. 705 Before formatting the dictionary, a couple of preparatory steps 706 are carried out. The message attribute of the record is computed 707 using LogRecord.getMessage(). If the formatting string uses the 708 time (as determined by a call to usesTime(), formatTime() is 709 called to format the event time. If there is exception information, 710 it is formatted using formatException() and appended to the message. 711 """ 712 record.message = record.getMessage() 713 if self.usesTime(): 714 record.asctime = self.formatTime(record, self.datefmt) 715 s = self.formatMessage(record) 716 if record.exc_info: 717 # Cache the traceback text to avoid converting it multiple times 718 # (it's constant anyway) 719 if not record.exc_text: 720 record.exc_text = self.formatException(record.exc_info) 721 if record.exc_text: 722 if s[-1:] != "\n": 723 s = s + "\n" 724 s = s + record.exc_text 725 if record.stack_info: 726 if s[-1:] != "\n": 727 s = s + "\n" 728 s = s + self.formatStack(record.stack_info) 729 return s 730 731# 732# The default formatter to use when no other is specified 733# 734_defaultFormatter = Formatter() 735 736class BufferingFormatter(object): 737 """ 738 A formatter suitable for formatting a number of records. 739 """ 740 def __init__(self, linefmt=None): 741 """ 742 Optionally specify a formatter which will be used to format each 743 individual record. 744 """ 745 if linefmt: 746 self.linefmt = linefmt 747 else: 748 self.linefmt = _defaultFormatter 749 750 def formatHeader(self, records): 751 """ 752 Return the header string for the specified records. 753 """ 754 return "" 755 756 def formatFooter(self, records): 757 """ 758 Return the footer string for the specified records. 759 """ 760 return "" 761 762 def format(self, records): 763 """ 764 Format the specified records and return the result as a string. 765 """ 766 rv = "" 767 if len(records) > 0: 768 rv = rv + self.formatHeader(records) 769 for record in records: 770 rv = rv + self.linefmt.format(record) 771 rv = rv + self.formatFooter(records) 772 return rv 773 774#--------------------------------------------------------------------------- 775# Filter classes and functions 776#--------------------------------------------------------------------------- 777 778class Filter(object): 779 """ 780 Filter instances are used to perform arbitrary filtering of LogRecords. 781 782 Loggers and Handlers can optionally use Filter instances to filter 783 records as desired. The base filter class only allows events which are 784 below a certain point in the logger hierarchy. For example, a filter 785 initialized with "A.B" will allow events logged by loggers "A.B", 786 "A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If 787 initialized with the empty string, all events are passed. 788 """ 789 def __init__(self, name=''): 790 """ 791 Initialize a filter. 792 793 Initialize with the name of the logger which, together with its 794 children, will have its events allowed through the filter. If no 795 name is specified, allow every event. 796 """ 797 self.name = name 798 self.nlen = len(name) 799 800 def filter(self, record): 801 """ 802 Determine if the specified record is to be logged. 803 804 Returns True if the record should be logged, or False otherwise. 805 If deemed appropriate, the record may be modified in-place. 806 """ 807 if self.nlen == 0: 808 return True 809 elif self.name == record.name: 810 return True 811 elif record.name.find(self.name, 0, self.nlen) != 0: 812 return False 813 return (record.name[self.nlen] == ".") 814 815class Filterer(object): 816 """ 817 A base class for loggers and handlers which allows them to share 818 common code. 819 """ 820 def __init__(self): 821 """ 822 Initialize the list of filters to be an empty list. 823 """ 824 self.filters = [] 825 826 def addFilter(self, filter): 827 """ 828 Add the specified filter to this handler. 829 """ 830 if not (filter in self.filters): 831 self.filters.append(filter) 832 833 def removeFilter(self, filter): 834 """ 835 Remove the specified filter from this handler. 836 """ 837 if filter in self.filters: 838 self.filters.remove(filter) 839 840 def filter(self, record): 841 """ 842 Determine if a record is loggable by consulting all the filters. 843 844 The default is to allow the record to be logged; any filter can veto 845 this by returning a false value. 846 If a filter attached to a handler returns a log record instance, 847 then that instance is used in place of the original log record in 848 any further processing of the event by that handler. 849 If a filter returns any other true value, the original log record 850 is used in any further processing of the event by that handler. 851 852 If none of the filters return false values, this method returns 853 a log record. 854 If any of the filters return a false value, this method returns 855 a false value. 856 857 .. versionchanged:: 3.2 858 859 Allow filters to be just callables. 860 861 .. versionchanged:: 3.12 862 Allow filters to return a LogRecord instead of 863 modifying it in place. 864 """ 865 for f in self.filters: 866 if hasattr(f, 'filter'): 867 result = f.filter(record) 868 else: 869 result = f(record) # assume callable - will raise if not 870 if not result: 871 return False 872 if isinstance(result, LogRecord): 873 record = result 874 return record 875 876#--------------------------------------------------------------------------- 877# Handler classes and functions 878#--------------------------------------------------------------------------- 879 880_handlers = weakref.WeakValueDictionary() #map of handler names to handlers 881_handlerList = [] # added to allow handlers to be removed in reverse of order initialized 882 883def _removeHandlerRef(wr): 884 """ 885 Remove a handler reference from the internal cleanup list. 886 """ 887 # This function can be called during module teardown, when globals are 888 # set to None. It can also be called from another thread. So we need to 889 # pre-emptively grab the necessary globals and check if they're None, 890 # to prevent race conditions and failures during interpreter shutdown. 891 handlers, lock = _handlerList, _lock 892 if lock and handlers: 893 with lock: 894 try: 895 handlers.remove(wr) 896 except ValueError: 897 pass 898 899def _addHandlerRef(handler): 900 """ 901 Add a handler to the internal cleanup list using a weak reference. 902 """ 903 with _lock: 904 _handlerList.append(weakref.ref(handler, _removeHandlerRef)) 905 906 907def getHandlerByName(name): 908 """ 909 Get a handler with the specified *name*, or None if there isn't one with 910 that name. 911 """ 912 return _handlers.get(name) 913 914 915def getHandlerNames(): 916 """ 917 Return all known handler names as an immutable set. 918 """ 919 return frozenset(_handlers) 920 921 922class Handler(Filterer): 923 """ 924 Handler instances dispatch logging events to specific destinations. 925 926 The base handler class. Acts as a placeholder which defines the Handler 927 interface. Handlers can optionally use Formatter instances to format 928 records as desired. By default, no formatter is specified; in this case, 929 the 'raw' message as determined by record.message is logged. 930 """ 931 def __init__(self, level=NOTSET): 932 """ 933 Initializes the instance - basically setting the formatter to None 934 and the filter list to empty. 935 """ 936 Filterer.__init__(self) 937 self._name = None 938 self.level = _checkLevel(level) 939 self.formatter = None 940 self._closed = False 941 # Add the handler to the global _handlerList (for cleanup on shutdown) 942 _addHandlerRef(self) 943 self.createLock() 944 945 def get_name(self): 946 return self._name 947 948 def set_name(self, name): 949 with _lock: 950 if self._name in _handlers: 951 del _handlers[self._name] 952 self._name = name 953 if name: 954 _handlers[name] = self 955 956 name = property(get_name, set_name) 957 958 def createLock(self): 959 """ 960 Acquire a thread lock for serializing access to the underlying I/O. 961 """ 962 self.lock = threading.RLock() 963 _register_at_fork_reinit_lock(self) 964 965 def _at_fork_reinit(self): 966 self.lock._at_fork_reinit() 967 968 def acquire(self): 969 """ 970 Acquire the I/O thread lock. 971 """ 972 if self.lock: 973 self.lock.acquire() 974 975 def release(self): 976 """ 977 Release the I/O thread lock. 978 """ 979 if self.lock: 980 self.lock.release() 981 982 def setLevel(self, level): 983 """ 984 Set the logging level of this handler. level must be an int or a str. 985 """ 986 self.level = _checkLevel(level) 987 988 def format(self, record): 989 """ 990 Format the specified record. 991 992 If a formatter is set, use it. Otherwise, use the default formatter 993 for the module. 994 """ 995 if self.formatter: 996 fmt = self.formatter 997 else: 998 fmt = _defaultFormatter 999 return fmt.format(record) 1000 1001 def emit(self, record): 1002 """ 1003 Do whatever it takes to actually log the specified logging record. 1004 1005 This version is intended to be implemented by subclasses and so 1006 raises a NotImplementedError. 1007 """ 1008 raise NotImplementedError('emit must be implemented ' 1009 'by Handler subclasses') 1010 1011 def handle(self, record): 1012 """ 1013 Conditionally emit the specified logging record. 1014 1015 Emission depends on filters which may have been added to the handler. 1016 Wrap the actual emission of the record with acquisition/release of 1017 the I/O thread lock. 1018 1019 Returns an instance of the log record that was emitted 1020 if it passed all filters, otherwise a false value is returned. 1021 """ 1022 rv = self.filter(record) 1023 if isinstance(rv, LogRecord): 1024 record = rv 1025 if rv: 1026 with self.lock: 1027 self.emit(record) 1028 return rv 1029 1030 def setFormatter(self, fmt): 1031 """ 1032 Set the formatter for this handler. 1033 """ 1034 self.formatter = fmt 1035 1036 def flush(self): 1037 """ 1038 Ensure all logging output has been flushed. 1039 1040 This version does nothing and is intended to be implemented by 1041 subclasses. 1042 """ 1043 pass 1044 1045 def close(self): 1046 """ 1047 Tidy up any resources used by the handler. 1048 1049 This version removes the handler from an internal map of handlers, 1050 _handlers, which is used for handler lookup by name. Subclasses 1051 should ensure that this gets called from overridden close() 1052 methods. 1053 """ 1054 #get the module data lock, as we're updating a shared structure. 1055 with _lock: 1056 self._closed = True 1057 if self._name and self._name in _handlers: 1058 del _handlers[self._name] 1059 1060 def handleError(self, record): 1061 """ 1062 Handle errors which occur during an emit() call. 1063 1064 This method should be called from handlers when an exception is 1065 encountered during an emit() call. If raiseExceptions is false, 1066 exceptions get silently ignored. This is what is mostly wanted 1067 for a logging system - most users will not care about errors in 1068 the logging system, they are more interested in application errors. 1069 You could, however, replace this with a custom handler if you wish. 1070 The record which was being processed is passed in to this method. 1071 """ 1072 if raiseExceptions and sys.stderr: # see issue 13807 1073 exc = sys.exception() 1074 try: 1075 sys.stderr.write('--- Logging error ---\n') 1076 traceback.print_exception(exc, limit=None, file=sys.stderr) 1077 sys.stderr.write('Call stack:\n') 1078 # Walk the stack frame up until we're out of logging, 1079 # so as to print the calling context. 1080 frame = exc.__traceback__.tb_frame 1081 while (frame and os.path.dirname(frame.f_code.co_filename) == 1082 __path__[0]): 1083 frame = frame.f_back 1084 if frame: 1085 traceback.print_stack(frame, file=sys.stderr) 1086 else: 1087 # couldn't find the right stack frame, for some reason 1088 sys.stderr.write('Logged from file %s, line %s\n' % ( 1089 record.filename, record.lineno)) 1090 # Issue 18671: output logging message and arguments 1091 try: 1092 sys.stderr.write('Message: %r\n' 1093 'Arguments: %s\n' % (record.msg, 1094 record.args)) 1095 except RecursionError: # See issue 36272 1096 raise 1097 except Exception: 1098 sys.stderr.write('Unable to print the message and arguments' 1099 ' - possible formatting error.\nUse the' 1100 ' traceback above to help find the error.\n' 1101 ) 1102 except OSError: #pragma: no cover 1103 pass # see issue 5971 1104 finally: 1105 del exc 1106 1107 def __repr__(self): 1108 level = getLevelName(self.level) 1109 return '<%s (%s)>' % (self.__class__.__name__, level) 1110 1111class StreamHandler(Handler): 1112 """ 1113 A handler class which writes logging records, appropriately formatted, 1114 to a stream. Note that this class does not close the stream, as 1115 sys.stdout or sys.stderr may be used. 1116 """ 1117 1118 terminator = '\n' 1119 1120 def __init__(self, stream=None): 1121 """ 1122 Initialize the handler. 1123 1124 If stream is not specified, sys.stderr is used. 1125 """ 1126 Handler.__init__(self) 1127 if stream is None: 1128 stream = sys.stderr 1129 self.stream = stream 1130 1131 def flush(self): 1132 """ 1133 Flushes the stream. 1134 """ 1135 with self.lock: 1136 if self.stream and hasattr(self.stream, "flush"): 1137 self.stream.flush() 1138 1139 def emit(self, record): 1140 """ 1141 Emit a record. 1142 1143 If a formatter is specified, it is used to format the record. 1144 The record is then written to the stream with a trailing newline. If 1145 exception information is present, it is formatted using 1146 traceback.print_exception and appended to the stream. If the stream 1147 has an 'encoding' attribute, it is used to determine how to do the 1148 output to the stream. 1149 """ 1150 try: 1151 msg = self.format(record) 1152 stream = self.stream 1153 # issue 35046: merged two stream.writes into one. 1154 stream.write(msg + self.terminator) 1155 self.flush() 1156 except RecursionError: # See issue 36272 1157 raise 1158 except Exception: 1159 self.handleError(record) 1160 1161 def setStream(self, stream): 1162 """ 1163 Sets the StreamHandler's stream to the specified value, 1164 if it is different. 1165 1166 Returns the old stream, if the stream was changed, or None 1167 if it wasn't. 1168 """ 1169 if stream is self.stream: 1170 result = None 1171 else: 1172 result = self.stream 1173 with self.lock: 1174 self.flush() 1175 self.stream = stream 1176 return result 1177 1178 def __repr__(self): 1179 level = getLevelName(self.level) 1180 name = getattr(self.stream, 'name', '') 1181 # bpo-36015: name can be an int 1182 name = str(name) 1183 if name: 1184 name += ' ' 1185 return '<%s %s(%s)>' % (self.__class__.__name__, name, level) 1186 1187 __class_getitem__ = classmethod(GenericAlias) 1188 1189 1190class FileHandler(StreamHandler): 1191 """ 1192 A handler class which writes formatted logging records to disk files. 1193 """ 1194 def __init__(self, filename, mode='a', encoding=None, delay=False, errors=None): 1195 """ 1196 Open the specified file and use it as the stream for logging. 1197 """ 1198 # Issue #27493: add support for Path objects to be passed in 1199 filename = os.fspath(filename) 1200 #keep the absolute path, otherwise derived classes which use this 1201 #may come a cropper when the current directory changes 1202 self.baseFilename = os.path.abspath(filename) 1203 self.mode = mode 1204 self.encoding = encoding 1205 if "b" not in mode: 1206 self.encoding = io.text_encoding(encoding) 1207 self.errors = errors 1208 self.delay = delay 1209 # bpo-26789: FileHandler keeps a reference to the builtin open() 1210 # function to be able to open or reopen the file during Python 1211 # finalization. 1212 self._builtin_open = open 1213 if delay: 1214 #We don't open the stream, but we still need to call the 1215 #Handler constructor to set level, formatter, lock etc. 1216 Handler.__init__(self) 1217 self.stream = None 1218 else: 1219 StreamHandler.__init__(self, self._open()) 1220 1221 def close(self): 1222 """ 1223 Closes the stream. 1224 """ 1225 with self.lock: 1226 try: 1227 if self.stream: 1228 try: 1229 self.flush() 1230 finally: 1231 stream = self.stream 1232 self.stream = None 1233 if hasattr(stream, "close"): 1234 stream.close() 1235 finally: 1236 # Issue #19523: call unconditionally to 1237 # prevent a handler leak when delay is set 1238 # Also see Issue #42378: we also rely on 1239 # self._closed being set to True there 1240 StreamHandler.close(self) 1241 1242 def _open(self): 1243 """ 1244 Open the current base file with the (original) mode and encoding. 1245 Return the resulting stream. 1246 """ 1247 open_func = self._builtin_open 1248 return open_func(self.baseFilename, self.mode, 1249 encoding=self.encoding, errors=self.errors) 1250 1251 def emit(self, record): 1252 """ 1253 Emit a record. 1254 1255 If the stream was not opened because 'delay' was specified in the 1256 constructor, open it before calling the superclass's emit. 1257 1258 If stream is not open, current mode is 'w' and `_closed=True`, record 1259 will not be emitted (see Issue #42378). 1260 """ 1261 if self.stream is None: 1262 if self.mode != 'w' or not self._closed: 1263 self.stream = self._open() 1264 if self.stream: 1265 StreamHandler.emit(self, record) 1266 1267 def __repr__(self): 1268 level = getLevelName(self.level) 1269 return '<%s %s (%s)>' % (self.__class__.__name__, self.baseFilename, level) 1270 1271 1272class _StderrHandler(StreamHandler): 1273 """ 1274 This class is like a StreamHandler using sys.stderr, but always uses 1275 whatever sys.stderr is currently set to rather than the value of 1276 sys.stderr at handler construction time. 1277 """ 1278 def __init__(self, level=NOTSET): 1279 """ 1280 Initialize the handler. 1281 """ 1282 Handler.__init__(self, level) 1283 1284 @property 1285 def stream(self): 1286 return sys.stderr 1287 1288 1289_defaultLastResort = _StderrHandler(WARNING) 1290lastResort = _defaultLastResort 1291 1292#--------------------------------------------------------------------------- 1293# Manager classes and functions 1294#--------------------------------------------------------------------------- 1295 1296class PlaceHolder(object): 1297 """ 1298 PlaceHolder instances are used in the Manager logger hierarchy to take 1299 the place of nodes for which no loggers have been defined. This class is 1300 intended for internal use only and not as part of the public API. 1301 """ 1302 def __init__(self, alogger): 1303 """ 1304 Initialize with the specified logger being a child of this placeholder. 1305 """ 1306 self.loggerMap = { alogger : None } 1307 1308 def append(self, alogger): 1309 """ 1310 Add the specified logger as a child of this placeholder. 1311 """ 1312 if alogger not in self.loggerMap: 1313 self.loggerMap[alogger] = None 1314 1315# 1316# Determine which class to use when instantiating loggers. 1317# 1318 1319def setLoggerClass(klass): 1320 """ 1321 Set the class to be used when instantiating a logger. The class should 1322 define __init__() such that only a name argument is required, and the 1323 __init__() should call Logger.__init__() 1324 """ 1325 if klass != Logger: 1326 if not issubclass(klass, Logger): 1327 raise TypeError("logger not derived from logging.Logger: " 1328 + klass.__name__) 1329 global _loggerClass 1330 _loggerClass = klass 1331 1332def getLoggerClass(): 1333 """ 1334 Return the class to be used when instantiating a logger. 1335 """ 1336 return _loggerClass 1337 1338class Manager(object): 1339 """ 1340 There is [under normal circumstances] just one Manager instance, which 1341 holds the hierarchy of loggers. 1342 """ 1343 def __init__(self, rootnode): 1344 """ 1345 Initialize the manager with the root node of the logger hierarchy. 1346 """ 1347 self.root = rootnode 1348 self.disable = 0 1349 self.emittedNoHandlerWarning = False 1350 self.loggerDict = {} 1351 self.loggerClass = None 1352 self.logRecordFactory = None 1353 1354 @property 1355 def disable(self): 1356 return self._disable 1357 1358 @disable.setter 1359 def disable(self, value): 1360 self._disable = _checkLevel(value) 1361 1362 def getLogger(self, name): 1363 """ 1364 Get a logger with the specified name (channel name), creating it 1365 if it doesn't yet exist. This name is a dot-separated hierarchical 1366 name, such as "a", "a.b", "a.b.c" or similar. 1367 1368 If a PlaceHolder existed for the specified name [i.e. the logger 1369 didn't exist but a child of it did], replace it with the created 1370 logger and fix up the parent/child references which pointed to the 1371 placeholder to now point to the logger. 1372 """ 1373 rv = None 1374 if not isinstance(name, str): 1375 raise TypeError('A logger name must be a string') 1376 with _lock: 1377 if name in self.loggerDict: 1378 rv = self.loggerDict[name] 1379 if isinstance(rv, PlaceHolder): 1380 ph = rv 1381 rv = (self.loggerClass or _loggerClass)(name) 1382 rv.manager = self 1383 self.loggerDict[name] = rv 1384 self._fixupChildren(ph, rv) 1385 self._fixupParents(rv) 1386 else: 1387 rv = (self.loggerClass or _loggerClass)(name) 1388 rv.manager = self 1389 self.loggerDict[name] = rv 1390 self._fixupParents(rv) 1391 return rv 1392 1393 def setLoggerClass(self, klass): 1394 """ 1395 Set the class to be used when instantiating a logger with this Manager. 1396 """ 1397 if klass != Logger: 1398 if not issubclass(klass, Logger): 1399 raise TypeError("logger not derived from logging.Logger: " 1400 + klass.__name__) 1401 self.loggerClass = klass 1402 1403 def setLogRecordFactory(self, factory): 1404 """ 1405 Set the factory to be used when instantiating a log record with this 1406 Manager. 1407 """ 1408 self.logRecordFactory = factory 1409 1410 def _fixupParents(self, alogger): 1411 """ 1412 Ensure that there are either loggers or placeholders all the way 1413 from the specified logger to the root of the logger hierarchy. 1414 """ 1415 name = alogger.name 1416 i = name.rfind(".") 1417 rv = None 1418 while (i > 0) and not rv: 1419 substr = name[:i] 1420 if substr not in self.loggerDict: 1421 self.loggerDict[substr] = PlaceHolder(alogger) 1422 else: 1423 obj = self.loggerDict[substr] 1424 if isinstance(obj, Logger): 1425 rv = obj 1426 else: 1427 assert isinstance(obj, PlaceHolder) 1428 obj.append(alogger) 1429 i = name.rfind(".", 0, i - 1) 1430 if not rv: 1431 rv = self.root 1432 alogger.parent = rv 1433 1434 def _fixupChildren(self, ph, alogger): 1435 """ 1436 Ensure that children of the placeholder ph are connected to the 1437 specified logger. 1438 """ 1439 name = alogger.name 1440 namelen = len(name) 1441 for c in ph.loggerMap.keys(): 1442 #The if means ... if not c.parent.name.startswith(nm) 1443 if c.parent.name[:namelen] != name: 1444 alogger.parent = c.parent 1445 c.parent = alogger 1446 1447 def _clear_cache(self): 1448 """ 1449 Clear the cache for all loggers in loggerDict 1450 Called when level changes are made 1451 """ 1452 1453 with _lock: 1454 for logger in self.loggerDict.values(): 1455 if isinstance(logger, Logger): 1456 logger._cache.clear() 1457 self.root._cache.clear() 1458 1459#--------------------------------------------------------------------------- 1460# Logger classes and functions 1461#--------------------------------------------------------------------------- 1462 1463class Logger(Filterer): 1464 """ 1465 Instances of the Logger class represent a single logging channel. A 1466 "logging channel" indicates an area of an application. Exactly how an 1467 "area" is defined is up to the application developer. Since an 1468 application can have any number of areas, logging channels are identified 1469 by a unique string. Application areas can be nested (e.g. an area 1470 of "input processing" might include sub-areas "read CSV files", "read 1471 XLS files" and "read Gnumeric files"). To cater for this natural nesting, 1472 channel names are organized into a namespace hierarchy where levels are 1473 separated by periods, much like the Java or Python package namespace. So 1474 in the instance given above, channel names might be "input" for the upper 1475 level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels. 1476 There is no arbitrary limit to the depth of nesting. 1477 """ 1478 def __init__(self, name, level=NOTSET): 1479 """ 1480 Initialize the logger with a name and an optional level. 1481 """ 1482 Filterer.__init__(self) 1483 self.name = name 1484 self.level = _checkLevel(level) 1485 self.parent = None 1486 self.propagate = True 1487 self.handlers = [] 1488 self.disabled = False 1489 self._cache = {} 1490 1491 def setLevel(self, level): 1492 """ 1493 Set the logging level of this logger. level must be an int or a str. 1494 """ 1495 self.level = _checkLevel(level) 1496 self.manager._clear_cache() 1497 1498 def debug(self, msg, *args, **kwargs): 1499 """ 1500 Log 'msg % args' with severity 'DEBUG'. 1501 1502 To pass exception information, use the keyword argument exc_info with 1503 a true value, e.g. 1504 1505 logger.debug("Houston, we have a %s", "thorny problem", exc_info=True) 1506 """ 1507 if self.isEnabledFor(DEBUG): 1508 self._log(DEBUG, msg, args, **kwargs) 1509 1510 def info(self, msg, *args, **kwargs): 1511 """ 1512 Log 'msg % args' with severity 'INFO'. 1513 1514 To pass exception information, use the keyword argument exc_info with 1515 a true value, e.g. 1516 1517 logger.info("Houston, we have a %s", "notable problem", exc_info=True) 1518 """ 1519 if self.isEnabledFor(INFO): 1520 self._log(INFO, msg, args, **kwargs) 1521 1522 def warning(self, msg, *args, **kwargs): 1523 """ 1524 Log 'msg % args' with severity 'WARNING'. 1525 1526 To pass exception information, use the keyword argument exc_info with 1527 a true value, e.g. 1528 1529 logger.warning("Houston, we have a %s", "bit of a problem", exc_info=True) 1530 """ 1531 if self.isEnabledFor(WARNING): 1532 self._log(WARNING, msg, args, **kwargs) 1533 1534 def warn(self, msg, *args, **kwargs): 1535 warnings.warn("The 'warn' method is deprecated, " 1536 "use 'warning' instead", DeprecationWarning, 2) 1537 self.warning(msg, *args, **kwargs) 1538 1539 def error(self, msg, *args, **kwargs): 1540 """ 1541 Log 'msg % args' with severity 'ERROR'. 1542 1543 To pass exception information, use the keyword argument exc_info with 1544 a true value, e.g. 1545 1546 logger.error("Houston, we have a %s", "major problem", exc_info=True) 1547 """ 1548 if self.isEnabledFor(ERROR): 1549 self._log(ERROR, msg, args, **kwargs) 1550 1551 def exception(self, msg, *args, exc_info=True, **kwargs): 1552 """ 1553 Convenience method for logging an ERROR with exception information. 1554 """ 1555 self.error(msg, *args, exc_info=exc_info, **kwargs) 1556 1557 def critical(self, msg, *args, **kwargs): 1558 """ 1559 Log 'msg % args' with severity 'CRITICAL'. 1560 1561 To pass exception information, use the keyword argument exc_info with 1562 a true value, e.g. 1563 1564 logger.critical("Houston, we have a %s", "major disaster", exc_info=True) 1565 """ 1566 if self.isEnabledFor(CRITICAL): 1567 self._log(CRITICAL, msg, args, **kwargs) 1568 1569 def fatal(self, msg, *args, **kwargs): 1570 """ 1571 Don't use this method, use critical() instead. 1572 """ 1573 self.critical(msg, *args, **kwargs) 1574 1575 def log(self, level, msg, *args, **kwargs): 1576 """ 1577 Log 'msg % args' with the integer severity 'level'. 1578 1579 To pass exception information, use the keyword argument exc_info with 1580 a true value, e.g. 1581 1582 logger.log(level, "We have a %s", "mysterious problem", exc_info=True) 1583 """ 1584 if not isinstance(level, int): 1585 if raiseExceptions: 1586 raise TypeError("level must be an integer") 1587 else: 1588 return 1589 if self.isEnabledFor(level): 1590 self._log(level, msg, args, **kwargs) 1591 1592 def findCaller(self, stack_info=False, stacklevel=1): 1593 """ 1594 Find the stack frame of the caller so that we can note the source 1595 file name, line number and function name. 1596 """ 1597 f = currentframe() 1598 #On some versions of IronPython, currentframe() returns None if 1599 #IronPython isn't run with -X:Frames. 1600 if f is None: 1601 return "(unknown file)", 0, "(unknown function)", None 1602 while stacklevel > 0: 1603 next_f = f.f_back 1604 if next_f is None: 1605 ## We've got options here. 1606 ## If we want to use the last (deepest) frame: 1607 break 1608 ## If we want to mimic the warnings module: 1609 #return ("sys", 1, "(unknown function)", None) 1610 ## If we want to be pedantic: 1611 #raise ValueError("call stack is not deep enough") 1612 f = next_f 1613 if not _is_internal_frame(f): 1614 stacklevel -= 1 1615 co = f.f_code 1616 sinfo = None 1617 if stack_info: 1618 with io.StringIO() as sio: 1619 sio.write("Stack (most recent call last):\n") 1620 traceback.print_stack(f, file=sio) 1621 sinfo = sio.getvalue() 1622 if sinfo[-1] == '\n': 1623 sinfo = sinfo[:-1] 1624 return co.co_filename, f.f_lineno, co.co_name, sinfo 1625 1626 def makeRecord(self, name, level, fn, lno, msg, args, exc_info, 1627 func=None, extra=None, sinfo=None): 1628 """ 1629 A factory method which can be overridden in subclasses to create 1630 specialized LogRecords. 1631 """ 1632 rv = _logRecordFactory(name, level, fn, lno, msg, args, exc_info, func, 1633 sinfo) 1634 if extra is not None: 1635 for key in extra: 1636 if (key in ["message", "asctime"]) or (key in rv.__dict__): 1637 raise KeyError("Attempt to overwrite %r in LogRecord" % key) 1638 rv.__dict__[key] = extra[key] 1639 return rv 1640 1641 def _log(self, level, msg, args, exc_info=None, extra=None, stack_info=False, 1642 stacklevel=1): 1643 """ 1644 Low-level logging routine which creates a LogRecord and then calls 1645 all the handlers of this logger to handle the record. 1646 """ 1647 sinfo = None 1648 if _srcfile: 1649 #IronPython doesn't track Python frames, so findCaller raises an 1650 #exception on some versions of IronPython. We trap it here so that 1651 #IronPython can use logging. 1652 try: 1653 fn, lno, func, sinfo = self.findCaller(stack_info, stacklevel) 1654 except ValueError: # pragma: no cover 1655 fn, lno, func = "(unknown file)", 0, "(unknown function)" 1656 else: # pragma: no cover 1657 fn, lno, func = "(unknown file)", 0, "(unknown function)" 1658 if exc_info: 1659 if isinstance(exc_info, BaseException): 1660 exc_info = (type(exc_info), exc_info, exc_info.__traceback__) 1661 elif not isinstance(exc_info, tuple): 1662 exc_info = sys.exc_info() 1663 record = self.makeRecord(self.name, level, fn, lno, msg, args, 1664 exc_info, func, extra, sinfo) 1665 self.handle(record) 1666 1667 def handle(self, record): 1668 """ 1669 Call the handlers for the specified record. 1670 1671 This method is used for unpickled records received from a socket, as 1672 well as those created locally. Logger-level filtering is applied. 1673 """ 1674 if self.disabled: 1675 return 1676 maybe_record = self.filter(record) 1677 if not maybe_record: 1678 return 1679 if isinstance(maybe_record, LogRecord): 1680 record = maybe_record 1681 self.callHandlers(record) 1682 1683 def addHandler(self, hdlr): 1684 """ 1685 Add the specified handler to this logger. 1686 """ 1687 with _lock: 1688 if not (hdlr in self.handlers): 1689 self.handlers.append(hdlr) 1690 1691 def removeHandler(self, hdlr): 1692 """ 1693 Remove the specified handler from this logger. 1694 """ 1695 with _lock: 1696 if hdlr in self.handlers: 1697 self.handlers.remove(hdlr) 1698 1699 def hasHandlers(self): 1700 """ 1701 See if this logger has any handlers configured. 1702 1703 Loop through all handlers for this logger and its parents in the 1704 logger hierarchy. Return True if a handler was found, else False. 1705 Stop searching up the hierarchy whenever a logger with the "propagate" 1706 attribute set to zero is found - that will be the last logger which 1707 is checked for the existence of handlers. 1708 """ 1709 c = self 1710 rv = False 1711 while c: 1712 if c.handlers: 1713 rv = True 1714 break 1715 if not c.propagate: 1716 break 1717 else: 1718 c = c.parent 1719 return rv 1720 1721 def callHandlers(self, record): 1722 """ 1723 Pass a record to all relevant handlers. 1724 1725 Loop through all handlers for this logger and its parents in the 1726 logger hierarchy. If no handler was found, output a one-off error 1727 message to sys.stderr. Stop searching up the hierarchy whenever a 1728 logger with the "propagate" attribute set to zero is found - that 1729 will be the last logger whose handlers are called. 1730 """ 1731 c = self 1732 found = 0 1733 while c: 1734 for hdlr in c.handlers: 1735 found = found + 1 1736 if record.levelno >= hdlr.level: 1737 hdlr.handle(record) 1738 if not c.propagate: 1739 c = None #break out 1740 else: 1741 c = c.parent 1742 if (found == 0): 1743 if lastResort: 1744 if record.levelno >= lastResort.level: 1745 lastResort.handle(record) 1746 elif raiseExceptions and not self.manager.emittedNoHandlerWarning: 1747 sys.stderr.write("No handlers could be found for logger" 1748 " \"%s\"\n" % self.name) 1749 self.manager.emittedNoHandlerWarning = True 1750 1751 def getEffectiveLevel(self): 1752 """ 1753 Get the effective level for this logger. 1754 1755 Loop through this logger and its parents in the logger hierarchy, 1756 looking for a non-zero logging level. Return the first one found. 1757 """ 1758 logger = self 1759 while logger: 1760 if logger.level: 1761 return logger.level 1762 logger = logger.parent 1763 return NOTSET 1764 1765 def isEnabledFor(self, level): 1766 """ 1767 Is this logger enabled for level 'level'? 1768 """ 1769 if self.disabled: 1770 return False 1771 1772 try: 1773 return self._cache[level] 1774 except KeyError: 1775 with _lock: 1776 if self.manager.disable >= level: 1777 is_enabled = self._cache[level] = False 1778 else: 1779 is_enabled = self._cache[level] = ( 1780 level >= self.getEffectiveLevel() 1781 ) 1782 return is_enabled 1783 1784 def getChild(self, suffix): 1785 """ 1786 Get a logger which is a descendant to this one. 1787 1788 This is a convenience method, such that 1789 1790 logging.getLogger('abc').getChild('def.ghi') 1791 1792 is the same as 1793 1794 logging.getLogger('abc.def.ghi') 1795 1796 It's useful, for example, when the parent logger is named using 1797 __name__ rather than a literal string. 1798 """ 1799 if self.root is not self: 1800 suffix = '.'.join((self.name, suffix)) 1801 return self.manager.getLogger(suffix) 1802 1803 def getChildren(self): 1804 1805 def _hierlevel(logger): 1806 if logger is logger.manager.root: 1807 return 0 1808 return 1 + logger.name.count('.') 1809 1810 d = self.manager.loggerDict 1811 with _lock: 1812 # exclude PlaceHolders - the last check is to ensure that lower-level 1813 # descendants aren't returned - if there are placeholders, a logger's 1814 # parent field might point to a grandparent or ancestor thereof. 1815 return set(item for item in d.values() 1816 if isinstance(item, Logger) and item.parent is self and 1817 _hierlevel(item) == 1 + _hierlevel(item.parent)) 1818 1819 def __repr__(self): 1820 level = getLevelName(self.getEffectiveLevel()) 1821 return '<%s %s (%s)>' % (self.__class__.__name__, self.name, level) 1822 1823 def __reduce__(self): 1824 if getLogger(self.name) is not self: 1825 import pickle 1826 raise pickle.PicklingError('logger cannot be pickled') 1827 return getLogger, (self.name,) 1828 1829 1830class RootLogger(Logger): 1831 """ 1832 A root logger is not that different to any other logger, except that 1833 it must have a logging level and there is only one instance of it in 1834 the hierarchy. 1835 """ 1836 def __init__(self, level): 1837 """ 1838 Initialize the logger with the name "root". 1839 """ 1840 Logger.__init__(self, "root", level) 1841 1842 def __reduce__(self): 1843 return getLogger, () 1844 1845_loggerClass = Logger 1846 1847class LoggerAdapter(object): 1848 """ 1849 An adapter for loggers which makes it easier to specify contextual 1850 information in logging output. 1851 """ 1852 1853 def __init__(self, logger, extra=None, merge_extra=False): 1854 """ 1855 Initialize the adapter with a logger and a dict-like object which 1856 provides contextual information. This constructor signature allows 1857 easy stacking of LoggerAdapters, if so desired. 1858 1859 You can effectively pass keyword arguments as shown in the 1860 following example: 1861 1862 adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2")) 1863 1864 By default, LoggerAdapter objects will drop the "extra" argument 1865 passed on the individual log calls to use its own instead. 1866 1867 Initializing it with merge_extra=True will instead merge both 1868 maps when logging, the individual call extra taking precedence 1869 over the LoggerAdapter instance extra 1870 1871 .. versionchanged:: 3.13 1872 The *merge_extra* argument was added. 1873 """ 1874 self.logger = logger 1875 self.extra = extra 1876 self.merge_extra = merge_extra 1877 1878 def process(self, msg, kwargs): 1879 """ 1880 Process the logging message and keyword arguments passed in to 1881 a logging call to insert contextual information. You can either 1882 manipulate the message itself, the keyword args or both. Return 1883 the message and kwargs modified (or not) to suit your needs. 1884 1885 Normally, you'll only need to override this one method in a 1886 LoggerAdapter subclass for your specific needs. 1887 """ 1888 if self.merge_extra and "extra" in kwargs: 1889 kwargs["extra"] = {**self.extra, **kwargs["extra"]} 1890 else: 1891 kwargs["extra"] = self.extra 1892 return msg, kwargs 1893 1894 # 1895 # Boilerplate convenience methods 1896 # 1897 def debug(self, msg, *args, **kwargs): 1898 """ 1899 Delegate a debug call to the underlying logger. 1900 """ 1901 self.log(DEBUG, msg, *args, **kwargs) 1902 1903 def info(self, msg, *args, **kwargs): 1904 """ 1905 Delegate an info call to the underlying logger. 1906 """ 1907 self.log(INFO, msg, *args, **kwargs) 1908 1909 def warning(self, msg, *args, **kwargs): 1910 """ 1911 Delegate a warning call to the underlying logger. 1912 """ 1913 self.log(WARNING, msg, *args, **kwargs) 1914 1915 def warn(self, msg, *args, **kwargs): 1916 warnings.warn("The 'warn' method is deprecated, " 1917 "use 'warning' instead", DeprecationWarning, 2) 1918 self.warning(msg, *args, **kwargs) 1919 1920 def error(self, msg, *args, **kwargs): 1921 """ 1922 Delegate an error call to the underlying logger. 1923 """ 1924 self.log(ERROR, msg, *args, **kwargs) 1925 1926 def exception(self, msg, *args, exc_info=True, **kwargs): 1927 """ 1928 Delegate an exception call to the underlying logger. 1929 """ 1930 self.log(ERROR, msg, *args, exc_info=exc_info, **kwargs) 1931 1932 def critical(self, msg, *args, **kwargs): 1933 """ 1934 Delegate a critical call to the underlying logger. 1935 """ 1936 self.log(CRITICAL, msg, *args, **kwargs) 1937 1938 def log(self, level, msg, *args, **kwargs): 1939 """ 1940 Delegate a log call to the underlying logger, after adding 1941 contextual information from this adapter instance. 1942 """ 1943 if self.isEnabledFor(level): 1944 msg, kwargs = self.process(msg, kwargs) 1945 self.logger.log(level, msg, *args, **kwargs) 1946 1947 def isEnabledFor(self, level): 1948 """ 1949 Is this logger enabled for level 'level'? 1950 """ 1951 return self.logger.isEnabledFor(level) 1952 1953 def setLevel(self, level): 1954 """ 1955 Set the specified level on the underlying logger. 1956 """ 1957 self.logger.setLevel(level) 1958 1959 def getEffectiveLevel(self): 1960 """ 1961 Get the effective level for the underlying logger. 1962 """ 1963 return self.logger.getEffectiveLevel() 1964 1965 def hasHandlers(self): 1966 """ 1967 See if the underlying logger has any handlers. 1968 """ 1969 return self.logger.hasHandlers() 1970 1971 def _log(self, level, msg, args, **kwargs): 1972 """ 1973 Low-level log implementation, proxied to allow nested logger adapters. 1974 """ 1975 return self.logger._log(level, msg, args, **kwargs) 1976 1977 @property 1978 def manager(self): 1979 return self.logger.manager 1980 1981 @manager.setter 1982 def manager(self, value): 1983 self.logger.manager = value 1984 1985 @property 1986 def name(self): 1987 return self.logger.name 1988 1989 def __repr__(self): 1990 logger = self.logger 1991 level = getLevelName(logger.getEffectiveLevel()) 1992 return '<%s %s (%s)>' % (self.__class__.__name__, logger.name, level) 1993 1994 __class_getitem__ = classmethod(GenericAlias) 1995 1996root = RootLogger(WARNING) 1997Logger.root = root 1998Logger.manager = Manager(Logger.root) 1999 2000#--------------------------------------------------------------------------- 2001# Configuration classes and functions 2002#--------------------------------------------------------------------------- 2003 2004def basicConfig(**kwargs): 2005 """ 2006 Do basic configuration for the logging system. 2007 2008 This function does nothing if the root logger already has handlers 2009 configured, unless the keyword argument *force* is set to ``True``. 2010 It is a convenience method intended for use by simple scripts 2011 to do one-shot configuration of the logging package. 2012 2013 The default behaviour is to create a StreamHandler which writes to 2014 sys.stderr, set a formatter using the BASIC_FORMAT format string, and 2015 add the handler to the root logger. 2016 2017 A number of optional keyword arguments may be specified, which can alter 2018 the default behaviour. 2019 2020 filename Specifies that a FileHandler be created, using the specified 2021 filename, rather than a StreamHandler. 2022 filemode Specifies the mode to open the file, if filename is specified 2023 (if filemode is unspecified, it defaults to 'a'). 2024 format Use the specified format string for the handler. 2025 datefmt Use the specified date/time format. 2026 style If a format string is specified, use this to specify the 2027 type of format string (possible values '%', '{', '$', for 2028 %-formatting, :meth:`str.format` and :class:`string.Template` 2029 - defaults to '%'). 2030 level Set the root logger level to the specified level. 2031 stream Use the specified stream to initialize the StreamHandler. Note 2032 that this argument is incompatible with 'filename' - if both 2033 are present, 'stream' is ignored. 2034 handlers If specified, this should be an iterable of already created 2035 handlers, which will be added to the root logger. Any handler 2036 in the list which does not have a formatter assigned will be 2037 assigned the formatter created in this function. 2038 force If this keyword is specified as true, any existing handlers 2039 attached to the root logger are removed and closed, before 2040 carrying out the configuration as specified by the other 2041 arguments. 2042 encoding If specified together with a filename, this encoding is passed to 2043 the created FileHandler, causing it to be used when the file is 2044 opened. 2045 errors If specified together with a filename, this value is passed to the 2046 created FileHandler, causing it to be used when the file is 2047 opened in text mode. If not specified, the default value is 2048 `backslashreplace`. 2049 2050 Note that you could specify a stream created using open(filename, mode) 2051 rather than passing the filename and mode in. However, it should be 2052 remembered that StreamHandler does not close its stream (since it may be 2053 using sys.stdout or sys.stderr), whereas FileHandler closes its stream 2054 when the handler is closed. 2055 2056 .. versionchanged:: 3.2 2057 Added the ``style`` parameter. 2058 2059 .. versionchanged:: 3.3 2060 Added the ``handlers`` parameter. A ``ValueError`` is now thrown for 2061 incompatible arguments (e.g. ``handlers`` specified together with 2062 ``filename``/``filemode``, or ``filename``/``filemode`` specified 2063 together with ``stream``, or ``handlers`` specified together with 2064 ``stream``. 2065 2066 .. versionchanged:: 3.8 2067 Added the ``force`` parameter. 2068 2069 .. versionchanged:: 3.9 2070 Added the ``encoding`` and ``errors`` parameters. 2071 """ 2072 # Add thread safety in case someone mistakenly calls 2073 # basicConfig() from multiple threads 2074 with _lock: 2075 force = kwargs.pop('force', False) 2076 encoding = kwargs.pop('encoding', None) 2077 errors = kwargs.pop('errors', 'backslashreplace') 2078 if force: 2079 for h in root.handlers[:]: 2080 root.removeHandler(h) 2081 h.close() 2082 if len(root.handlers) == 0: 2083 handlers = kwargs.pop("handlers", None) 2084 if handlers is None: 2085 if "stream" in kwargs and "filename" in kwargs: 2086 raise ValueError("'stream' and 'filename' should not be " 2087 "specified together") 2088 else: 2089 if "stream" in kwargs or "filename" in kwargs: 2090 raise ValueError("'stream' or 'filename' should not be " 2091 "specified together with 'handlers'") 2092 if handlers is None: 2093 filename = kwargs.pop("filename", None) 2094 mode = kwargs.pop("filemode", 'a') 2095 if filename: 2096 if 'b' in mode: 2097 errors = None 2098 else: 2099 encoding = io.text_encoding(encoding) 2100 h = FileHandler(filename, mode, 2101 encoding=encoding, errors=errors) 2102 else: 2103 stream = kwargs.pop("stream", None) 2104 h = StreamHandler(stream) 2105 handlers = [h] 2106 dfs = kwargs.pop("datefmt", None) 2107 style = kwargs.pop("style", '%') 2108 if style not in _STYLES: 2109 raise ValueError('Style must be one of: %s' % ','.join( 2110 _STYLES.keys())) 2111 fs = kwargs.pop("format", _STYLES[style][1]) 2112 fmt = Formatter(fs, dfs, style) 2113 for h in handlers: 2114 if h.formatter is None: 2115 h.setFormatter(fmt) 2116 root.addHandler(h) 2117 level = kwargs.pop("level", None) 2118 if level is not None: 2119 root.setLevel(level) 2120 if kwargs: 2121 keys = ', '.join(kwargs.keys()) 2122 raise ValueError('Unrecognised argument(s): %s' % keys) 2123 2124#--------------------------------------------------------------------------- 2125# Utility functions at module level. 2126# Basically delegate everything to the root logger. 2127#--------------------------------------------------------------------------- 2128 2129def getLogger(name=None): 2130 """ 2131 Return a logger with the specified name, creating it if necessary. 2132 2133 If no name is specified, return the root logger. 2134 """ 2135 if not name or isinstance(name, str) and name == root.name: 2136 return root 2137 return Logger.manager.getLogger(name) 2138 2139def critical(msg, *args, **kwargs): 2140 """ 2141 Log a message with severity 'CRITICAL' on the root logger. If the logger 2142 has no handlers, call basicConfig() to add a console handler with a 2143 pre-defined format. 2144 """ 2145 if len(root.handlers) == 0: 2146 basicConfig() 2147 root.critical(msg, *args, **kwargs) 2148 2149def fatal(msg, *args, **kwargs): 2150 """ 2151 Don't use this function, use critical() instead. 2152 """ 2153 critical(msg, *args, **kwargs) 2154 2155def error(msg, *args, **kwargs): 2156 """ 2157 Log a message with severity 'ERROR' on the root logger. If the logger has 2158 no handlers, call basicConfig() to add a console handler with a pre-defined 2159 format. 2160 """ 2161 if len(root.handlers) == 0: 2162 basicConfig() 2163 root.error(msg, *args, **kwargs) 2164 2165def exception(msg, *args, exc_info=True, **kwargs): 2166 """ 2167 Log a message with severity 'ERROR' on the root logger, with exception 2168 information. If the logger has no handlers, basicConfig() is called to add 2169 a console handler with a pre-defined format. 2170 """ 2171 error(msg, *args, exc_info=exc_info, **kwargs) 2172 2173def warning(msg, *args, **kwargs): 2174 """ 2175 Log a message with severity 'WARNING' on the root logger. If the logger has 2176 no handlers, call basicConfig() to add a console handler with a pre-defined 2177 format. 2178 """ 2179 if len(root.handlers) == 0: 2180 basicConfig() 2181 root.warning(msg, *args, **kwargs) 2182 2183def warn(msg, *args, **kwargs): 2184 warnings.warn("The 'warn' function is deprecated, " 2185 "use 'warning' instead", DeprecationWarning, 2) 2186 warning(msg, *args, **kwargs) 2187 2188def info(msg, *args, **kwargs): 2189 """ 2190 Log a message with severity 'INFO' on the root logger. If the logger has 2191 no handlers, call basicConfig() to add a console handler with a pre-defined 2192 format. 2193 """ 2194 if len(root.handlers) == 0: 2195 basicConfig() 2196 root.info(msg, *args, **kwargs) 2197 2198def debug(msg, *args, **kwargs): 2199 """ 2200 Log a message with severity 'DEBUG' on the root logger. If the logger has 2201 no handlers, call basicConfig() to add a console handler with a pre-defined 2202 format. 2203 """ 2204 if len(root.handlers) == 0: 2205 basicConfig() 2206 root.debug(msg, *args, **kwargs) 2207 2208def log(level, msg, *args, **kwargs): 2209 """ 2210 Log 'msg % args' with the integer severity 'level' on the root logger. If 2211 the logger has no handlers, call basicConfig() to add a console handler 2212 with a pre-defined format. 2213 """ 2214 if len(root.handlers) == 0: 2215 basicConfig() 2216 root.log(level, msg, *args, **kwargs) 2217 2218def disable(level=CRITICAL): 2219 """ 2220 Disable all logging calls of severity 'level' and below. 2221 """ 2222 root.manager.disable = level 2223 root.manager._clear_cache() 2224 2225def shutdown(handlerList=_handlerList): 2226 """ 2227 Perform any cleanup actions in the logging system (e.g. flushing 2228 buffers). 2229 2230 Should be called at application exit. 2231 """ 2232 for wr in reversed(handlerList[:]): 2233 #errors might occur, for example, if files are locked 2234 #we just ignore them if raiseExceptions is not set 2235 try: 2236 h = wr() 2237 if h: 2238 try: 2239 h.acquire() 2240 # MemoryHandlers might not want to be flushed on close, 2241 # but circular imports prevent us scoping this to just 2242 # those handlers. hence the default to True. 2243 if getattr(h, 'flushOnClose', True): 2244 h.flush() 2245 h.close() 2246 except (OSError, ValueError): 2247 # Ignore errors which might be caused 2248 # because handlers have been closed but 2249 # references to them are still around at 2250 # application exit. 2251 pass 2252 finally: 2253 h.release() 2254 except: # ignore everything, as we're shutting down 2255 if raiseExceptions: 2256 raise 2257 #else, swallow 2258 2259#Let's try and shutdown automatically on application exit... 2260import atexit 2261atexit.register(shutdown) 2262 2263# Null handler 2264 2265class NullHandler(Handler): 2266 """ 2267 This handler does nothing. It's intended to be used to avoid the 2268 "No handlers could be found for logger XXX" one-off warning. This is 2269 important for library code, which may contain code to log events. If a user 2270 of the library does not configure logging, the one-off warning might be 2271 produced; to avoid this, the library developer simply needs to instantiate 2272 a NullHandler and add it to the top-level logger of the library module or 2273 package. 2274 """ 2275 def handle(self, record): 2276 """Stub.""" 2277 2278 def emit(self, record): 2279 """Stub.""" 2280 2281 def createLock(self): 2282 self.lock = None 2283 2284 def _at_fork_reinit(self): 2285 pass 2286 2287# Warnings integration 2288 2289_warnings_showwarning = None 2290 2291def _showwarning(message, category, filename, lineno, file=None, line=None): 2292 """ 2293 Implementation of showwarnings which redirects to logging, which will first 2294 check to see if the file parameter is None. If a file is specified, it will 2295 delegate to the original warnings implementation of showwarning. Otherwise, 2296 it will call warnings.formatwarning and will log the resulting string to a 2297 warnings logger named "py.warnings" with level logging.WARNING. 2298 """ 2299 if file is not None: 2300 if _warnings_showwarning is not None: 2301 _warnings_showwarning(message, category, filename, lineno, file, line) 2302 else: 2303 s = warnings.formatwarning(message, category, filename, lineno, line) 2304 logger = getLogger("py.warnings") 2305 if not logger.handlers: 2306 logger.addHandler(NullHandler()) 2307 # bpo-46557: Log str(s) as msg instead of logger.warning("%s", s) 2308 # since some log aggregation tools group logs by the msg arg 2309 logger.warning(str(s)) 2310 2311def captureWarnings(capture): 2312 """ 2313 If capture is true, redirect all warnings to the logging package. 2314 If capture is False, ensure that warnings are not redirected to logging 2315 but to their original destinations. 2316 """ 2317 global _warnings_showwarning 2318 if capture: 2319 if _warnings_showwarning is None: 2320 _warnings_showwarning = warnings.showwarning 2321 warnings.showwarning = _showwarning 2322 else: 2323 if _warnings_showwarning is not None: 2324 warnings.showwarning = _warnings_showwarning 2325 _warnings_showwarning = None
737class BufferingFormatter(object): 738 """ 739 A formatter suitable for formatting a number of records. 740 """ 741 def __init__(self, linefmt=None): 742 """ 743 Optionally specify a formatter which will be used to format each 744 individual record. 745 """ 746 if linefmt: 747 self.linefmt = linefmt 748 else: 749 self.linefmt = _defaultFormatter 750 751 def formatHeader(self, records): 752 """ 753 Return the header string for the specified records. 754 """ 755 return "" 756 757 def formatFooter(self, records): 758 """ 759 Return the footer string for the specified records. 760 """ 761 return "" 762 763 def format(self, records): 764 """ 765 Format the specified records and return the result as a string. 766 """ 767 rv = "" 768 if len(records) > 0: 769 rv = rv + self.formatHeader(records) 770 for record in records: 771 rv = rv + self.linefmt.format(record) 772 rv = rv + self.formatFooter(records) 773 return rv
A formatter suitable for formatting a number of records.
741 def __init__(self, linefmt=None): 742 """ 743 Optionally specify a formatter which will be used to format each 744 individual record. 745 """ 746 if linefmt: 747 self.linefmt = linefmt 748 else: 749 self.linefmt = _defaultFormatter
Optionally specify a formatter which will be used to format each individual record.
751 def formatHeader(self, records): 752 """ 753 Return the header string for the specified records. 754 """ 755 return ""
Return the header string for the specified records.
763 def format(self, records): 764 """ 765 Format the specified records and return the result as a string. 766 """ 767 rv = "" 768 if len(records) > 0: 769 rv = rv + self.formatHeader(records) 770 for record in records: 771 rv = rv + self.linefmt.format(record) 772 rv = rv + self.formatFooter(records) 773 return rv
Format the specified records and return the result as a string.
1191class FileHandler(StreamHandler): 1192 """ 1193 A handler class which writes formatted logging records to disk files. 1194 """ 1195 def __init__(self, filename, mode='a', encoding=None, delay=False, errors=None): 1196 """ 1197 Open the specified file and use it as the stream for logging. 1198 """ 1199 # Issue #27493: add support for Path objects to be passed in 1200 filename = os.fspath(filename) 1201 #keep the absolute path, otherwise derived classes which use this 1202 #may come a cropper when the current directory changes 1203 self.baseFilename = os.path.abspath(filename) 1204 self.mode = mode 1205 self.encoding = encoding 1206 if "b" not in mode: 1207 self.encoding = io.text_encoding(encoding) 1208 self.errors = errors 1209 self.delay = delay 1210 # bpo-26789: FileHandler keeps a reference to the builtin open() 1211 # function to be able to open or reopen the file during Python 1212 # finalization. 1213 self._builtin_open = open 1214 if delay: 1215 #We don't open the stream, but we still need to call the 1216 #Handler constructor to set level, formatter, lock etc. 1217 Handler.__init__(self) 1218 self.stream = None 1219 else: 1220 StreamHandler.__init__(self, self._open()) 1221 1222 def close(self): 1223 """ 1224 Closes the stream. 1225 """ 1226 with self.lock: 1227 try: 1228 if self.stream: 1229 try: 1230 self.flush() 1231 finally: 1232 stream = self.stream 1233 self.stream = None 1234 if hasattr(stream, "close"): 1235 stream.close() 1236 finally: 1237 # Issue #19523: call unconditionally to 1238 # prevent a handler leak when delay is set 1239 # Also see Issue #42378: we also rely on 1240 # self._closed being set to True there 1241 StreamHandler.close(self) 1242 1243 def _open(self): 1244 """ 1245 Open the current base file with the (original) mode and encoding. 1246 Return the resulting stream. 1247 """ 1248 open_func = self._builtin_open 1249 return open_func(self.baseFilename, self.mode, 1250 encoding=self.encoding, errors=self.errors) 1251 1252 def emit(self, record): 1253 """ 1254 Emit a record. 1255 1256 If the stream was not opened because 'delay' was specified in the 1257 constructor, open it before calling the superclass's emit. 1258 1259 If stream is not open, current mode is 'w' and `_closed=True`, record 1260 will not be emitted (see Issue #42378). 1261 """ 1262 if self.stream is None: 1263 if self.mode != 'w' or not self._closed: 1264 self.stream = self._open() 1265 if self.stream: 1266 StreamHandler.emit(self, record) 1267 1268 def __repr__(self): 1269 level = getLevelName(self.level) 1270 return '<%s %s (%s)>' % (self.__class__.__name__, self.baseFilename, level)
A handler class which writes formatted logging records to disk files.
1195 def __init__(self, filename, mode='a', encoding=None, delay=False, errors=None): 1196 """ 1197 Open the specified file and use it as the stream for logging. 1198 """ 1199 # Issue #27493: add support for Path objects to be passed in 1200 filename = os.fspath(filename) 1201 #keep the absolute path, otherwise derived classes which use this 1202 #may come a cropper when the current directory changes 1203 self.baseFilename = os.path.abspath(filename) 1204 self.mode = mode 1205 self.encoding = encoding 1206 if "b" not in mode: 1207 self.encoding = io.text_encoding(encoding) 1208 self.errors = errors 1209 self.delay = delay 1210 # bpo-26789: FileHandler keeps a reference to the builtin open() 1211 # function to be able to open or reopen the file during Python 1212 # finalization. 1213 self._builtin_open = open 1214 if delay: 1215 #We don't open the stream, but we still need to call the 1216 #Handler constructor to set level, formatter, lock etc. 1217 Handler.__init__(self) 1218 self.stream = None 1219 else: 1220 StreamHandler.__init__(self, self._open())
Open the specified file and use it as the stream for logging.
1222 def close(self): 1223 """ 1224 Closes the stream. 1225 """ 1226 with self.lock: 1227 try: 1228 if self.stream: 1229 try: 1230 self.flush() 1231 finally: 1232 stream = self.stream 1233 self.stream = None 1234 if hasattr(stream, "close"): 1235 stream.close() 1236 finally: 1237 # Issue #19523: call unconditionally to 1238 # prevent a handler leak when delay is set 1239 # Also see Issue #42378: we also rely on 1240 # self._closed being set to True there 1241 StreamHandler.close(self)
Closes the stream.
1252 def emit(self, record): 1253 """ 1254 Emit a record. 1255 1256 If the stream was not opened because 'delay' was specified in the 1257 constructor, open it before calling the superclass's emit. 1258 1259 If stream is not open, current mode is 'w' and `_closed=True`, record 1260 will not be emitted (see Issue #42378). 1261 """ 1262 if self.stream is None: 1263 if self.mode != 'w' or not self._closed: 1264 self.stream = self._open() 1265 if self.stream: 1266 StreamHandler.emit(self, record)
Emit a record.
If the stream was not opened because 'delay' was specified in the constructor, open it before calling the superclass's emit.
If stream is not open, current mode is 'w' and _closed=True
, record
will not be emitted (see Issue #42378).
779class Filter(object): 780 """ 781 Filter instances are used to perform arbitrary filtering of LogRecords. 782 783 Loggers and Handlers can optionally use Filter instances to filter 784 records as desired. The base filter class only allows events which are 785 below a certain point in the logger hierarchy. For example, a filter 786 initialized with "A.B" will allow events logged by loggers "A.B", 787 "A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If 788 initialized with the empty string, all events are passed. 789 """ 790 def __init__(self, name=''): 791 """ 792 Initialize a filter. 793 794 Initialize with the name of the logger which, together with its 795 children, will have its events allowed through the filter. If no 796 name is specified, allow every event. 797 """ 798 self.name = name 799 self.nlen = len(name) 800 801 def filter(self, record): 802 """ 803 Determine if the specified record is to be logged. 804 805 Returns True if the record should be logged, or False otherwise. 806 If deemed appropriate, the record may be modified in-place. 807 """ 808 if self.nlen == 0: 809 return True 810 elif self.name == record.name: 811 return True 812 elif record.name.find(self.name, 0, self.nlen) != 0: 813 return False 814 return (record.name[self.nlen] == ".")
Filter instances are used to perform arbitrary filtering of LogRecords.
Loggers and Handlers can optionally use Filter instances to filter records as desired. The base filter class only allows events which are below a certain point in the logger hierarchy. For example, a filter initialized with "A.B" will allow events logged by loggers "A.B", "A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If initialized with the empty string, all events are passed.
790 def __init__(self, name=''): 791 """ 792 Initialize a filter. 793 794 Initialize with the name of the logger which, together with its 795 children, will have its events allowed through the filter. If no 796 name is specified, allow every event. 797 """ 798 self.name = name 799 self.nlen = len(name)
Initialize a filter.
Initialize with the name of the logger which, together with its children, will have its events allowed through the filter. If no name is specified, allow every event.
801 def filter(self, record): 802 """ 803 Determine if the specified record is to be logged. 804 805 Returns True if the record should be logged, or False otherwise. 806 If deemed appropriate, the record may be modified in-place. 807 """ 808 if self.nlen == 0: 809 return True 810 elif self.name == record.name: 811 return True 812 elif record.name.find(self.name, 0, self.nlen) != 0: 813 return False 814 return (record.name[self.nlen] == ".")
Determine if the specified record is to be logged.
Returns True if the record should be logged, or False otherwise. If deemed appropriate, the record may be modified in-place.
555class Formatter(object): 556 """ 557 Formatter instances are used to convert a LogRecord to text. 558 559 Formatters need to know how a LogRecord is constructed. They are 560 responsible for converting a LogRecord to (usually) a string which can 561 be interpreted by either a human or an external system. The base Formatter 562 allows a formatting string to be specified. If none is supplied, the 563 style-dependent default value, "%(message)s", "{message}", or 564 "${message}", is used. 565 566 The Formatter can be initialized with a format string which makes use of 567 knowledge of the LogRecord attributes - e.g. the default value mentioned 568 above makes use of the fact that the user's message and arguments are pre- 569 formatted into a LogRecord's message attribute. Currently, the useful 570 attributes in a LogRecord are described by: 571 572 %(name)s Name of the logger (logging channel) 573 %(levelno)s Numeric logging level for the message (DEBUG, INFO, 574 WARNING, ERROR, CRITICAL) 575 %(levelname)s Text logging level for the message ("DEBUG", "INFO", 576 "WARNING", "ERROR", "CRITICAL") 577 %(pathname)s Full pathname of the source file where the logging 578 call was issued (if available) 579 %(filename)s Filename portion of pathname 580 %(module)s Module (name portion of filename) 581 %(lineno)d Source line number where the logging call was issued 582 (if available) 583 %(funcName)s Function name 584 %(created)f Time when the LogRecord was created (time.time_ns() / 1e9 585 return value) 586 %(asctime)s Textual time when the LogRecord was created 587 %(msecs)d Millisecond portion of the creation time 588 %(relativeCreated)d Time in milliseconds when the LogRecord was created, 589 relative to the time the logging module was loaded 590 (typically at application startup time) 591 %(thread)d Thread ID (if available) 592 %(threadName)s Thread name (if available) 593 %(taskName)s Task name (if available) 594 %(process)d Process ID (if available) 595 %(processName)s Process name (if available) 596 %(message)s The result of record.getMessage(), computed just as 597 the record is emitted 598 """ 599 600 converter = time.localtime 601 602 def __init__(self, fmt=None, datefmt=None, style='%', validate=True, *, 603 defaults=None): 604 """ 605 Initialize the formatter with specified format strings. 606 607 Initialize the formatter either with the specified format string, or a 608 default as described above. Allow for specialized date formatting with 609 the optional datefmt argument. If datefmt is omitted, you get an 610 ISO8601-like (or RFC 3339-like) format. 611 612 Use a style parameter of '%', '{' or '$' to specify that you want to 613 use one of %-formatting, :meth:`str.format` (``{}``) formatting or 614 :class:`string.Template` formatting in your format string. 615 616 .. versionchanged:: 3.2 617 Added the ``style`` parameter. 618 """ 619 if style not in _STYLES: 620 raise ValueError('Style must be one of: %s' % ','.join( 621 _STYLES.keys())) 622 self._style = _STYLES[style][0](fmt, defaults=defaults) 623 if validate: 624 self._style.validate() 625 626 self._fmt = self._style._fmt 627 self.datefmt = datefmt 628 629 default_time_format = '%Y-%m-%d %H:%M:%S' 630 default_msec_format = '%s,%03d' 631 632 def formatTime(self, record, datefmt=None): 633 """ 634 Return the creation time of the specified LogRecord as formatted text. 635 636 This method should be called from format() by a formatter which 637 wants to make use of a formatted time. This method can be overridden 638 in formatters to provide for any specific requirement, but the 639 basic behaviour is as follows: if datefmt (a string) is specified, 640 it is used with time.strftime() to format the creation time of the 641 record. Otherwise, an ISO8601-like (or RFC 3339-like) format is used. 642 The resulting string is returned. This function uses a user-configurable 643 function to convert the creation time to a tuple. By default, 644 time.localtime() is used; to change this for a particular formatter 645 instance, set the 'converter' attribute to a function with the same 646 signature as time.localtime() or time.gmtime(). To change it for all 647 formatters, for example if you want all logging times to be shown in GMT, 648 set the 'converter' attribute in the Formatter class. 649 """ 650 ct = self.converter(record.created) 651 if datefmt: 652 s = time.strftime(datefmt, ct) 653 else: 654 s = time.strftime(self.default_time_format, ct) 655 if self.default_msec_format: 656 s = self.default_msec_format % (s, record.msecs) 657 return s 658 659 def formatException(self, ei): 660 """ 661 Format and return the specified exception information as a string. 662 663 This default implementation just uses 664 traceback.print_exception() 665 """ 666 sio = io.StringIO() 667 tb = ei[2] 668 # See issues #9427, #1553375. Commented out for now. 669 #if getattr(self, 'fullstack', False): 670 # traceback.print_stack(tb.tb_frame.f_back, file=sio) 671 traceback.print_exception(ei[0], ei[1], tb, limit=None, file=sio) 672 s = sio.getvalue() 673 sio.close() 674 if s[-1:] == "\n": 675 s = s[:-1] 676 return s 677 678 def usesTime(self): 679 """ 680 Check if the format uses the creation time of the record. 681 """ 682 return self._style.usesTime() 683 684 def formatMessage(self, record): 685 return self._style.format(record) 686 687 def formatStack(self, stack_info): 688 """ 689 This method is provided as an extension point for specialized 690 formatting of stack information. 691 692 The input data is a string as returned from a call to 693 :func:`traceback.print_stack`, but with the last trailing newline 694 removed. 695 696 The base implementation just returns the value passed in. 697 """ 698 return stack_info 699 700 def format(self, record): 701 """ 702 Format the specified record as text. 703 704 The record's attribute dictionary is used as the operand to a 705 string formatting operation which yields the returned string. 706 Before formatting the dictionary, a couple of preparatory steps 707 are carried out. The message attribute of the record is computed 708 using LogRecord.getMessage(). If the formatting string uses the 709 time (as determined by a call to usesTime(), formatTime() is 710 called to format the event time. If there is exception information, 711 it is formatted using formatException() and appended to the message. 712 """ 713 record.message = record.getMessage() 714 if self.usesTime(): 715 record.asctime = self.formatTime(record, self.datefmt) 716 s = self.formatMessage(record) 717 if record.exc_info: 718 # Cache the traceback text to avoid converting it multiple times 719 # (it's constant anyway) 720 if not record.exc_text: 721 record.exc_text = self.formatException(record.exc_info) 722 if record.exc_text: 723 if s[-1:] != "\n": 724 s = s + "\n" 725 s = s + record.exc_text 726 if record.stack_info: 727 if s[-1:] != "\n": 728 s = s + "\n" 729 s = s + self.formatStack(record.stack_info) 730 return s
Formatter instances are used to convert a LogRecord to text.
Formatters need to know how a LogRecord is constructed. They are responsible for converting a LogRecord to (usually) a string which can be interpreted by either a human or an external system. The base Formatter allows a formatting string to be specified. If none is supplied, the style-dependent default value, "%(message)s", "{message}", or "${message}", is used.
The Formatter can be initialized with a format string which makes use of knowledge of the LogRecord attributes - e.g. the default value mentioned above makes use of the fact that the user's message and arguments are pre- formatted into a LogRecord's message attribute. Currently, the useful attributes in a LogRecord are described by:
%(name)s Name of the logger (logging channel) %(levelno)s Numeric logging level for the message (DEBUG, INFO, WARNING, ERROR, CRITICAL) %(levelname)s Text logging level for the message ("DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL") %(pathname)s Full pathname of the source file where the logging call was issued (if available) %(filename)s Filename portion of pathname %(module)s Module (name portion of filename) %(lineno)d Source line number where the logging call was issued (if available) %(funcName)s Function name %(created)f Time when the LogRecord was created (time.time_ns() / 1e9 return value) %(asctime)s Textual time when the LogRecord was created %(msecs)d Millisecond portion of the creation time %(relativeCreated)d Time in milliseconds when the LogRecord was created, relative to the time the logging module was loaded (typically at application startup time) %(thread)d Thread ID (if available) %(threadName)s Thread name (if available) %(taskName)s Task name (if available) %(process)d Process ID (if available) %(processName)s Process name (if available) %(message)s The result of record.getMessage(), computed just as the record is emitted
602 def __init__(self, fmt=None, datefmt=None, style='%', validate=True, *, 603 defaults=None): 604 """ 605 Initialize the formatter with specified format strings. 606 607 Initialize the formatter either with the specified format string, or a 608 default as described above. Allow for specialized date formatting with 609 the optional datefmt argument. If datefmt is omitted, you get an 610 ISO8601-like (or RFC 3339-like) format. 611 612 Use a style parameter of '%', '{' or '$' to specify that you want to 613 use one of %-formatting, :meth:`str.format` (``{}``) formatting or 614 :class:`string.Template` formatting in your format string. 615 616 .. versionchanged:: 3.2 617 Added the ``style`` parameter. 618 """ 619 if style not in _STYLES: 620 raise ValueError('Style must be one of: %s' % ','.join( 621 _STYLES.keys())) 622 self._style = _STYLES[style][0](fmt, defaults=defaults) 623 if validate: 624 self._style.validate() 625 626 self._fmt = self._style._fmt 627 self.datefmt = datefmt
Initialize the formatter with specified format strings.
Initialize the formatter either with the specified format string, or a default as described above. Allow for specialized date formatting with the optional datefmt argument. If datefmt is omitted, you get an ISO8601-like (or RFC 3339-like) format.
Use a style parameter of '%', '{' or '$' to specify that you want to
use one of %-formatting, str.format()
({}
) formatting or
string.Template
formatting in your format string.
Changed in version 3.2:
Added the style
parameter.
localtime([seconds]) -> (tm_year,tm_mon,tm_mday,tm_hour,tm_min, tm_sec,tm_wday,tm_yday,tm_isdst)
Convert seconds since the Epoch to a time tuple expressing local time. When 'seconds' is not passed in, convert the current time instead.
632 def formatTime(self, record, datefmt=None): 633 """ 634 Return the creation time of the specified LogRecord as formatted text. 635 636 This method should be called from format() by a formatter which 637 wants to make use of a formatted time. This method can be overridden 638 in formatters to provide for any specific requirement, but the 639 basic behaviour is as follows: if datefmt (a string) is specified, 640 it is used with time.strftime() to format the creation time of the 641 record. Otherwise, an ISO8601-like (or RFC 3339-like) format is used. 642 The resulting string is returned. This function uses a user-configurable 643 function to convert the creation time to a tuple. By default, 644 time.localtime() is used; to change this for a particular formatter 645 instance, set the 'converter' attribute to a function with the same 646 signature as time.localtime() or time.gmtime(). To change it for all 647 formatters, for example if you want all logging times to be shown in GMT, 648 set the 'converter' attribute in the Formatter class. 649 """ 650 ct = self.converter(record.created) 651 if datefmt: 652 s = time.strftime(datefmt, ct) 653 else: 654 s = time.strftime(self.default_time_format, ct) 655 if self.default_msec_format: 656 s = self.default_msec_format % (s, record.msecs) 657 return s
Return the creation time of the specified LogRecord as formatted text.
This method should be called from format() by a formatter which wants to make use of a formatted time. This method can be overridden in formatters to provide for any specific requirement, but the basic behaviour is as follows: if datefmt (a string) is specified, it is used with time.strftime() to format the creation time of the record. Otherwise, an ISO8601-like (or RFC 3339-like) format is used. The resulting string is returned. This function uses a user-configurable function to convert the creation time to a tuple. By default, time.localtime() is used; to change this for a particular formatter instance, set the 'converter' attribute to a function with the same signature as time.localtime() or time.gmtime(). To change it for all formatters, for example if you want all logging times to be shown in GMT, set the 'converter' attribute in the Formatter class.
659 def formatException(self, ei): 660 """ 661 Format and return the specified exception information as a string. 662 663 This default implementation just uses 664 traceback.print_exception() 665 """ 666 sio = io.StringIO() 667 tb = ei[2] 668 # See issues #9427, #1553375. Commented out for now. 669 #if getattr(self, 'fullstack', False): 670 # traceback.print_stack(tb.tb_frame.f_back, file=sio) 671 traceback.print_exception(ei[0], ei[1], tb, limit=None, file=sio) 672 s = sio.getvalue() 673 sio.close() 674 if s[-1:] == "\n": 675 s = s[:-1] 676 return s
Format and return the specified exception information as a string.
This default implementation just uses traceback.print_exception()
678 def usesTime(self): 679 """ 680 Check if the format uses the creation time of the record. 681 """ 682 return self._style.usesTime()
Check if the format uses the creation time of the record.
687 def formatStack(self, stack_info): 688 """ 689 This method is provided as an extension point for specialized 690 formatting of stack information. 691 692 The input data is a string as returned from a call to 693 :func:`traceback.print_stack`, but with the last trailing newline 694 removed. 695 696 The base implementation just returns the value passed in. 697 """ 698 return stack_info
This method is provided as an extension point for specialized formatting of stack information.
The input data is a string as returned from a call to
traceback.print_stack()
, but with the last trailing newline
removed.
The base implementation just returns the value passed in.
700 def format(self, record): 701 """ 702 Format the specified record as text. 703 704 The record's attribute dictionary is used as the operand to a 705 string formatting operation which yields the returned string. 706 Before formatting the dictionary, a couple of preparatory steps 707 are carried out. The message attribute of the record is computed 708 using LogRecord.getMessage(). If the formatting string uses the 709 time (as determined by a call to usesTime(), formatTime() is 710 called to format the event time. If there is exception information, 711 it is formatted using formatException() and appended to the message. 712 """ 713 record.message = record.getMessage() 714 if self.usesTime(): 715 record.asctime = self.formatTime(record, self.datefmt) 716 s = self.formatMessage(record) 717 if record.exc_info: 718 # Cache the traceback text to avoid converting it multiple times 719 # (it's constant anyway) 720 if not record.exc_text: 721 record.exc_text = self.formatException(record.exc_info) 722 if record.exc_text: 723 if s[-1:] != "\n": 724 s = s + "\n" 725 s = s + record.exc_text 726 if record.stack_info: 727 if s[-1:] != "\n": 728 s = s + "\n" 729 s = s + self.formatStack(record.stack_info) 730 return s
Format the specified record as text.
The record's attribute dictionary is used as the operand to a string formatting operation which yields the returned string. Before formatting the dictionary, a couple of preparatory steps are carried out. The message attribute of the record is computed using LogRecord.getMessage(). If the formatting string uses the time (as determined by a call to usesTime(), formatTime() is called to format the event time. If there is exception information, it is formatted using formatException() and appended to the message.
923class Handler(Filterer): 924 """ 925 Handler instances dispatch logging events to specific destinations. 926 927 The base handler class. Acts as a placeholder which defines the Handler 928 interface. Handlers can optionally use Formatter instances to format 929 records as desired. By default, no formatter is specified; in this case, 930 the 'raw' message as determined by record.message is logged. 931 """ 932 def __init__(self, level=NOTSET): 933 """ 934 Initializes the instance - basically setting the formatter to None 935 and the filter list to empty. 936 """ 937 Filterer.__init__(self) 938 self._name = None 939 self.level = _checkLevel(level) 940 self.formatter = None 941 self._closed = False 942 # Add the handler to the global _handlerList (for cleanup on shutdown) 943 _addHandlerRef(self) 944 self.createLock() 945 946 def get_name(self): 947 return self._name 948 949 def set_name(self, name): 950 with _lock: 951 if self._name in _handlers: 952 del _handlers[self._name] 953 self._name = name 954 if name: 955 _handlers[name] = self 956 957 name = property(get_name, set_name) 958 959 def createLock(self): 960 """ 961 Acquire a thread lock for serializing access to the underlying I/O. 962 """ 963 self.lock = threading.RLock() 964 _register_at_fork_reinit_lock(self) 965 966 def _at_fork_reinit(self): 967 self.lock._at_fork_reinit() 968 969 def acquire(self): 970 """ 971 Acquire the I/O thread lock. 972 """ 973 if self.lock: 974 self.lock.acquire() 975 976 def release(self): 977 """ 978 Release the I/O thread lock. 979 """ 980 if self.lock: 981 self.lock.release() 982 983 def setLevel(self, level): 984 """ 985 Set the logging level of this handler. level must be an int or a str. 986 """ 987 self.level = _checkLevel(level) 988 989 def format(self, record): 990 """ 991 Format the specified record. 992 993 If a formatter is set, use it. Otherwise, use the default formatter 994 for the module. 995 """ 996 if self.formatter: 997 fmt = self.formatter 998 else: 999 fmt = _defaultFormatter 1000 return fmt.format(record) 1001 1002 def emit(self, record): 1003 """ 1004 Do whatever it takes to actually log the specified logging record. 1005 1006 This version is intended to be implemented by subclasses and so 1007 raises a NotImplementedError. 1008 """ 1009 raise NotImplementedError('emit must be implemented ' 1010 'by Handler subclasses') 1011 1012 def handle(self, record): 1013 """ 1014 Conditionally emit the specified logging record. 1015 1016 Emission depends on filters which may have been added to the handler. 1017 Wrap the actual emission of the record with acquisition/release of 1018 the I/O thread lock. 1019 1020 Returns an instance of the log record that was emitted 1021 if it passed all filters, otherwise a false value is returned. 1022 """ 1023 rv = self.filter(record) 1024 if isinstance(rv, LogRecord): 1025 record = rv 1026 if rv: 1027 with self.lock: 1028 self.emit(record) 1029 return rv 1030 1031 def setFormatter(self, fmt): 1032 """ 1033 Set the formatter for this handler. 1034 """ 1035 self.formatter = fmt 1036 1037 def flush(self): 1038 """ 1039 Ensure all logging output has been flushed. 1040 1041 This version does nothing and is intended to be implemented by 1042 subclasses. 1043 """ 1044 pass 1045 1046 def close(self): 1047 """ 1048 Tidy up any resources used by the handler. 1049 1050 This version removes the handler from an internal map of handlers, 1051 _handlers, which is used for handler lookup by name. Subclasses 1052 should ensure that this gets called from overridden close() 1053 methods. 1054 """ 1055 #get the module data lock, as we're updating a shared structure. 1056 with _lock: 1057 self._closed = True 1058 if self._name and self._name in _handlers: 1059 del _handlers[self._name] 1060 1061 def handleError(self, record): 1062 """ 1063 Handle errors which occur during an emit() call. 1064 1065 This method should be called from handlers when an exception is 1066 encountered during an emit() call. If raiseExceptions is false, 1067 exceptions get silently ignored. This is what is mostly wanted 1068 for a logging system - most users will not care about errors in 1069 the logging system, they are more interested in application errors. 1070 You could, however, replace this with a custom handler if you wish. 1071 The record which was being processed is passed in to this method. 1072 """ 1073 if raiseExceptions and sys.stderr: # see issue 13807 1074 exc = sys.exception() 1075 try: 1076 sys.stderr.write('--- Logging error ---\n') 1077 traceback.print_exception(exc, limit=None, file=sys.stderr) 1078 sys.stderr.write('Call stack:\n') 1079 # Walk the stack frame up until we're out of logging, 1080 # so as to print the calling context. 1081 frame = exc.__traceback__.tb_frame 1082 while (frame and os.path.dirname(frame.f_code.co_filename) == 1083 __path__[0]): 1084 frame = frame.f_back 1085 if frame: 1086 traceback.print_stack(frame, file=sys.stderr) 1087 else: 1088 # couldn't find the right stack frame, for some reason 1089 sys.stderr.write('Logged from file %s, line %s\n' % ( 1090 record.filename, record.lineno)) 1091 # Issue 18671: output logging message and arguments 1092 try: 1093 sys.stderr.write('Message: %r\n' 1094 'Arguments: %s\n' % (record.msg, 1095 record.args)) 1096 except RecursionError: # See issue 36272 1097 raise 1098 except Exception: 1099 sys.stderr.write('Unable to print the message and arguments' 1100 ' - possible formatting error.\nUse the' 1101 ' traceback above to help find the error.\n' 1102 ) 1103 except OSError: #pragma: no cover 1104 pass # see issue 5971 1105 finally: 1106 del exc 1107 1108 def __repr__(self): 1109 level = getLevelName(self.level) 1110 return '<%s (%s)>' % (self.__class__.__name__, level)
Handler instances dispatch logging events to specific destinations.
The base handler class. Acts as a placeholder which defines the Handler interface. Handlers can optionally use Formatter instances to format records as desired. By default, no formatter is specified; in this case, the 'raw' message as determined by record.message is logged.
932 def __init__(self, level=NOTSET): 933 """ 934 Initializes the instance - basically setting the formatter to None 935 and the filter list to empty. 936 """ 937 Filterer.__init__(self) 938 self._name = None 939 self.level = _checkLevel(level) 940 self.formatter = None 941 self._closed = False 942 # Add the handler to the global _handlerList (for cleanup on shutdown) 943 _addHandlerRef(self) 944 self.createLock()
Initializes the instance - basically setting the formatter to None and the filter list to empty.
959 def createLock(self): 960 """ 961 Acquire a thread lock for serializing access to the underlying I/O. 962 """ 963 self.lock = threading.RLock() 964 _register_at_fork_reinit_lock(self)
Acquire a thread lock for serializing access to the underlying I/O.
969 def acquire(self): 970 """ 971 Acquire the I/O thread lock. 972 """ 973 if self.lock: 974 self.lock.acquire()
Acquire the I/O thread lock.
976 def release(self): 977 """ 978 Release the I/O thread lock. 979 """ 980 if self.lock: 981 self.lock.release()
Release the I/O thread lock.
983 def setLevel(self, level): 984 """ 985 Set the logging level of this handler. level must be an int or a str. 986 """ 987 self.level = _checkLevel(level)
Set the logging level of this handler. level must be an int or a str.
989 def format(self, record): 990 """ 991 Format the specified record. 992 993 If a formatter is set, use it. Otherwise, use the default formatter 994 for the module. 995 """ 996 if self.formatter: 997 fmt = self.formatter 998 else: 999 fmt = _defaultFormatter 1000 return fmt.format(record)
Format the specified record.
If a formatter is set, use it. Otherwise, use the default formatter for the module.
1002 def emit(self, record): 1003 """ 1004 Do whatever it takes to actually log the specified logging record. 1005 1006 This version is intended to be implemented by subclasses and so 1007 raises a NotImplementedError. 1008 """ 1009 raise NotImplementedError('emit must be implemented ' 1010 'by Handler subclasses')
Do whatever it takes to actually log the specified logging record.
This version is intended to be implemented by subclasses and so raises a NotImplementedError.
1012 def handle(self, record): 1013 """ 1014 Conditionally emit the specified logging record. 1015 1016 Emission depends on filters which may have been added to the handler. 1017 Wrap the actual emission of the record with acquisition/release of 1018 the I/O thread lock. 1019 1020 Returns an instance of the log record that was emitted 1021 if it passed all filters, otherwise a false value is returned. 1022 """ 1023 rv = self.filter(record) 1024 if isinstance(rv, LogRecord): 1025 record = rv 1026 if rv: 1027 with self.lock: 1028 self.emit(record) 1029 return rv
Conditionally emit the specified logging record.
Emission depends on filters which may have been added to the handler. Wrap the actual emission of the record with acquisition/release of the I/O thread lock.
Returns an instance of the log record that was emitted if it passed all filters, otherwise a false value is returned.
1031 def setFormatter(self, fmt): 1032 """ 1033 Set the formatter for this handler. 1034 """ 1035 self.formatter = fmt
Set the formatter for this handler.
1037 def flush(self): 1038 """ 1039 Ensure all logging output has been flushed. 1040 1041 This version does nothing and is intended to be implemented by 1042 subclasses. 1043 """ 1044 pass
Ensure all logging output has been flushed.
This version does nothing and is intended to be implemented by subclasses.
1046 def close(self): 1047 """ 1048 Tidy up any resources used by the handler. 1049 1050 This version removes the handler from an internal map of handlers, 1051 _handlers, which is used for handler lookup by name. Subclasses 1052 should ensure that this gets called from overridden close() 1053 methods. 1054 """ 1055 #get the module data lock, as we're updating a shared structure. 1056 with _lock: 1057 self._closed = True 1058 if self._name and self._name in _handlers: 1059 del _handlers[self._name]
Tidy up any resources used by the handler.
This version removes the handler from an internal map of handlers, _handlers, which is used for handler lookup by name. Subclasses should ensure that this gets called from overridden close() methods.
1061 def handleError(self, record): 1062 """ 1063 Handle errors which occur during an emit() call. 1064 1065 This method should be called from handlers when an exception is 1066 encountered during an emit() call. If raiseExceptions is false, 1067 exceptions get silently ignored. This is what is mostly wanted 1068 for a logging system - most users will not care about errors in 1069 the logging system, they are more interested in application errors. 1070 You could, however, replace this with a custom handler if you wish. 1071 The record which was being processed is passed in to this method. 1072 """ 1073 if raiseExceptions and sys.stderr: # see issue 13807 1074 exc = sys.exception() 1075 try: 1076 sys.stderr.write('--- Logging error ---\n') 1077 traceback.print_exception(exc, limit=None, file=sys.stderr) 1078 sys.stderr.write('Call stack:\n') 1079 # Walk the stack frame up until we're out of logging, 1080 # so as to print the calling context. 1081 frame = exc.__traceback__.tb_frame 1082 while (frame and os.path.dirname(frame.f_code.co_filename) == 1083 __path__[0]): 1084 frame = frame.f_back 1085 if frame: 1086 traceback.print_stack(frame, file=sys.stderr) 1087 else: 1088 # couldn't find the right stack frame, for some reason 1089 sys.stderr.write('Logged from file %s, line %s\n' % ( 1090 record.filename, record.lineno)) 1091 # Issue 18671: output logging message and arguments 1092 try: 1093 sys.stderr.write('Message: %r\n' 1094 'Arguments: %s\n' % (record.msg, 1095 record.args)) 1096 except RecursionError: # See issue 36272 1097 raise 1098 except Exception: 1099 sys.stderr.write('Unable to print the message and arguments' 1100 ' - possible formatting error.\nUse the' 1101 ' traceback above to help find the error.\n' 1102 ) 1103 except OSError: #pragma: no cover 1104 pass # see issue 5971 1105 finally: 1106 del exc
Handle errors which occur during an emit() call.
This method should be called from handlers when an exception is encountered during an emit() call. If raiseExceptions is false, exceptions get silently ignored. This is what is mostly wanted for a logging system - most users will not care about errors in the logging system, they are more interested in application errors. You could, however, replace this with a custom handler if you wish. The record which was being processed is passed in to this method.
Inherited Members
287class LogRecord(object): 288 """ 289 A LogRecord instance represents an event being logged. 290 291 LogRecord instances are created every time something is logged. They 292 contain all the information pertinent to the event being logged. The 293 main information passed in is in msg and args, which are combined 294 using str(msg) % args to create the message field of the record. The 295 record also includes information such as when the record was created, 296 the source line where the logging call was made, and any exception 297 information to be logged. 298 """ 299 def __init__(self, name, level, pathname, lineno, 300 msg, args, exc_info, func=None, sinfo=None, **kwargs): 301 """ 302 Initialize a logging record with interesting information. 303 """ 304 ct = time.time_ns() 305 self.name = name 306 self.msg = msg 307 # 308 # The following statement allows passing of a dictionary as a sole 309 # argument, so that you can do something like 310 # logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2}) 311 # Suggested by Stefan Behnel. 312 # Note that without the test for args[0], we get a problem because 313 # during formatting, we test to see if the arg is present using 314 # 'if self.args:'. If the event being logged is e.g. 'Value is %d' 315 # and if the passed arg fails 'if self.args:' then no formatting 316 # is done. For example, logger.warning('Value is %d', 0) would log 317 # 'Value is %d' instead of 'Value is 0'. 318 # For the use case of passing a dictionary, this should not be a 319 # problem. 320 # Issue #21172: a request was made to relax the isinstance check 321 # to hasattr(args[0], '__getitem__'). However, the docs on string 322 # formatting still seem to suggest a mapping object is required. 323 # Thus, while not removing the isinstance check, it does now look 324 # for collections.abc.Mapping rather than, as before, dict. 325 if (args and len(args) == 1 and isinstance(args[0], collections.abc.Mapping) 326 and args[0]): 327 args = args[0] 328 self.args = args 329 self.levelname = getLevelName(level) 330 self.levelno = level 331 self.pathname = pathname 332 try: 333 self.filename = os.path.basename(pathname) 334 self.module = os.path.splitext(self.filename)[0] 335 except (TypeError, ValueError, AttributeError): 336 self.filename = pathname 337 self.module = "Unknown module" 338 self.exc_info = exc_info 339 self.exc_text = None # used to cache the traceback text 340 self.stack_info = sinfo 341 self.lineno = lineno 342 self.funcName = func 343 self.created = ct / 1e9 # ns to float seconds 344 # Get the number of whole milliseconds (0-999) in the fractional part of seconds. 345 # Eg: 1_677_903_920_999_998_503 ns --> 999_998_503 ns--> 999 ms 346 # Convert to float by adding 0.0 for historical reasons. See gh-89047 347 self.msecs = (ct % 1_000_000_000) // 1_000_000 + 0.0 348 if self.msecs == 999.0 and int(self.created) != ct // 1_000_000_000: 349 # ns -> sec conversion can round up, e.g: 350 # 1_677_903_920_999_999_900 ns --> 1_677_903_921.0 sec 351 self.msecs = 0.0 352 353 self.relativeCreated = (ct - _startTime) / 1e6 354 if logThreads: 355 self.thread = threading.get_ident() 356 self.threadName = threading.current_thread().name 357 else: # pragma: no cover 358 self.thread = None 359 self.threadName = None 360 if not logMultiprocessing: # pragma: no cover 361 self.processName = None 362 else: 363 self.processName = 'MainProcess' 364 mp = sys.modules.get('multiprocessing') 365 if mp is not None: 366 # Errors may occur if multiprocessing has not finished loading 367 # yet - e.g. if a custom import hook causes third-party code 368 # to run when multiprocessing calls import. See issue 8200 369 # for an example 370 try: 371 self.processName = mp.current_process().name 372 except Exception: #pragma: no cover 373 pass 374 if logProcesses and hasattr(os, 'getpid'): 375 self.process = os.getpid() 376 else: 377 self.process = None 378 379 self.taskName = None 380 if logAsyncioTasks: 381 asyncio = sys.modules.get('asyncio') 382 if asyncio: 383 try: 384 self.taskName = asyncio.current_task().get_name() 385 except Exception: 386 pass 387 388 def __repr__(self): 389 return '<LogRecord: %s, %s, %s, %s, "%s">'%(self.name, self.levelno, 390 self.pathname, self.lineno, self.msg) 391 392 def getMessage(self): 393 """ 394 Return the message for this LogRecord. 395 396 Return the message for this LogRecord after merging any user-supplied 397 arguments with the message. 398 """ 399 msg = str(self.msg) 400 if self.args: 401 msg = msg % self.args 402 return msg
A LogRecord instance represents an event being logged.
LogRecord instances are created every time something is logged. They contain all the information pertinent to the event being logged. The main information passed in is in msg and args, which are combined using str(msg) % args to create the message field of the record. The record also includes information such as when the record was created, the source line where the logging call was made, and any exception information to be logged.
299 def __init__(self, name, level, pathname, lineno, 300 msg, args, exc_info, func=None, sinfo=None, **kwargs): 301 """ 302 Initialize a logging record with interesting information. 303 """ 304 ct = time.time_ns() 305 self.name = name 306 self.msg = msg 307 # 308 # The following statement allows passing of a dictionary as a sole 309 # argument, so that you can do something like 310 # logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2}) 311 # Suggested by Stefan Behnel. 312 # Note that without the test for args[0], we get a problem because 313 # during formatting, we test to see if the arg is present using 314 # 'if self.args:'. If the event being logged is e.g. 'Value is %d' 315 # and if the passed arg fails 'if self.args:' then no formatting 316 # is done. For example, logger.warning('Value is %d', 0) would log 317 # 'Value is %d' instead of 'Value is 0'. 318 # For the use case of passing a dictionary, this should not be a 319 # problem. 320 # Issue #21172: a request was made to relax the isinstance check 321 # to hasattr(args[0], '__getitem__'). However, the docs on string 322 # formatting still seem to suggest a mapping object is required. 323 # Thus, while not removing the isinstance check, it does now look 324 # for collections.abc.Mapping rather than, as before, dict. 325 if (args and len(args) == 1 and isinstance(args[0], collections.abc.Mapping) 326 and args[0]): 327 args = args[0] 328 self.args = args 329 self.levelname = getLevelName(level) 330 self.levelno = level 331 self.pathname = pathname 332 try: 333 self.filename = os.path.basename(pathname) 334 self.module = os.path.splitext(self.filename)[0] 335 except (TypeError, ValueError, AttributeError): 336 self.filename = pathname 337 self.module = "Unknown module" 338 self.exc_info = exc_info 339 self.exc_text = None # used to cache the traceback text 340 self.stack_info = sinfo 341 self.lineno = lineno 342 self.funcName = func 343 self.created = ct / 1e9 # ns to float seconds 344 # Get the number of whole milliseconds (0-999) in the fractional part of seconds. 345 # Eg: 1_677_903_920_999_998_503 ns --> 999_998_503 ns--> 999 ms 346 # Convert to float by adding 0.0 for historical reasons. See gh-89047 347 self.msecs = (ct % 1_000_000_000) // 1_000_000 + 0.0 348 if self.msecs == 999.0 and int(self.created) != ct // 1_000_000_000: 349 # ns -> sec conversion can round up, e.g: 350 # 1_677_903_920_999_999_900 ns --> 1_677_903_921.0 sec 351 self.msecs = 0.0 352 353 self.relativeCreated = (ct - _startTime) / 1e6 354 if logThreads: 355 self.thread = threading.get_ident() 356 self.threadName = threading.current_thread().name 357 else: # pragma: no cover 358 self.thread = None 359 self.threadName = None 360 if not logMultiprocessing: # pragma: no cover 361 self.processName = None 362 else: 363 self.processName = 'MainProcess' 364 mp = sys.modules.get('multiprocessing') 365 if mp is not None: 366 # Errors may occur if multiprocessing has not finished loading 367 # yet - e.g. if a custom import hook causes third-party code 368 # to run when multiprocessing calls import. See issue 8200 369 # for an example 370 try: 371 self.processName = mp.current_process().name 372 except Exception: #pragma: no cover 373 pass 374 if logProcesses and hasattr(os, 'getpid'): 375 self.process = os.getpid() 376 else: 377 self.process = None 378 379 self.taskName = None 380 if logAsyncioTasks: 381 asyncio = sys.modules.get('asyncio') 382 if asyncio: 383 try: 384 self.taskName = asyncio.current_task().get_name() 385 except Exception: 386 pass
Initialize a logging record with interesting information.
392 def getMessage(self): 393 """ 394 Return the message for this LogRecord. 395 396 Return the message for this LogRecord after merging any user-supplied 397 arguments with the message. 398 """ 399 msg = str(self.msg) 400 if self.args: 401 msg = msg % self.args 402 return msg
Return the message for this LogRecord.
Return the message for this LogRecord after merging any user-supplied arguments with the message.
1464class Logger(Filterer): 1465 """ 1466 Instances of the Logger class represent a single logging channel. A 1467 "logging channel" indicates an area of an application. Exactly how an 1468 "area" is defined is up to the application developer. Since an 1469 application can have any number of areas, logging channels are identified 1470 by a unique string. Application areas can be nested (e.g. an area 1471 of "input processing" might include sub-areas "read CSV files", "read 1472 XLS files" and "read Gnumeric files"). To cater for this natural nesting, 1473 channel names are organized into a namespace hierarchy where levels are 1474 separated by periods, much like the Java or Python package namespace. So 1475 in the instance given above, channel names might be "input" for the upper 1476 level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels. 1477 There is no arbitrary limit to the depth of nesting. 1478 """ 1479 def __init__(self, name, level=NOTSET): 1480 """ 1481 Initialize the logger with a name and an optional level. 1482 """ 1483 Filterer.__init__(self) 1484 self.name = name 1485 self.level = _checkLevel(level) 1486 self.parent = None 1487 self.propagate = True 1488 self.handlers = [] 1489 self.disabled = False 1490 self._cache = {} 1491 1492 def setLevel(self, level): 1493 """ 1494 Set the logging level of this logger. level must be an int or a str. 1495 """ 1496 self.level = _checkLevel(level) 1497 self.manager._clear_cache() 1498 1499 def debug(self, msg, *args, **kwargs): 1500 """ 1501 Log 'msg % args' with severity 'DEBUG'. 1502 1503 To pass exception information, use the keyword argument exc_info with 1504 a true value, e.g. 1505 1506 logger.debug("Houston, we have a %s", "thorny problem", exc_info=True) 1507 """ 1508 if self.isEnabledFor(DEBUG): 1509 self._log(DEBUG, msg, args, **kwargs) 1510 1511 def info(self, msg, *args, **kwargs): 1512 """ 1513 Log 'msg % args' with severity 'INFO'. 1514 1515 To pass exception information, use the keyword argument exc_info with 1516 a true value, e.g. 1517 1518 logger.info("Houston, we have a %s", "notable problem", exc_info=True) 1519 """ 1520 if self.isEnabledFor(INFO): 1521 self._log(INFO, msg, args, **kwargs) 1522 1523 def warning(self, msg, *args, **kwargs): 1524 """ 1525 Log 'msg % args' with severity 'WARNING'. 1526 1527 To pass exception information, use the keyword argument exc_info with 1528 a true value, e.g. 1529 1530 logger.warning("Houston, we have a %s", "bit of a problem", exc_info=True) 1531 """ 1532 if self.isEnabledFor(WARNING): 1533 self._log(WARNING, msg, args, **kwargs) 1534 1535 def warn(self, msg, *args, **kwargs): 1536 warnings.warn("The 'warn' method is deprecated, " 1537 "use 'warning' instead", DeprecationWarning, 2) 1538 self.warning(msg, *args, **kwargs) 1539 1540 def error(self, msg, *args, **kwargs): 1541 """ 1542 Log 'msg % args' with severity 'ERROR'. 1543 1544 To pass exception information, use the keyword argument exc_info with 1545 a true value, e.g. 1546 1547 logger.error("Houston, we have a %s", "major problem", exc_info=True) 1548 """ 1549 if self.isEnabledFor(ERROR): 1550 self._log(ERROR, msg, args, **kwargs) 1551 1552 def exception(self, msg, *args, exc_info=True, **kwargs): 1553 """ 1554 Convenience method for logging an ERROR with exception information. 1555 """ 1556 self.error(msg, *args, exc_info=exc_info, **kwargs) 1557 1558 def critical(self, msg, *args, **kwargs): 1559 """ 1560 Log 'msg % args' with severity 'CRITICAL'. 1561 1562 To pass exception information, use the keyword argument exc_info with 1563 a true value, e.g. 1564 1565 logger.critical("Houston, we have a %s", "major disaster", exc_info=True) 1566 """ 1567 if self.isEnabledFor(CRITICAL): 1568 self._log(CRITICAL, msg, args, **kwargs) 1569 1570 def fatal(self, msg, *args, **kwargs): 1571 """ 1572 Don't use this method, use critical() instead. 1573 """ 1574 self.critical(msg, *args, **kwargs) 1575 1576 def log(self, level, msg, *args, **kwargs): 1577 """ 1578 Log 'msg % args' with the integer severity 'level'. 1579 1580 To pass exception information, use the keyword argument exc_info with 1581 a true value, e.g. 1582 1583 logger.log(level, "We have a %s", "mysterious problem", exc_info=True) 1584 """ 1585 if not isinstance(level, int): 1586 if raiseExceptions: 1587 raise TypeError("level must be an integer") 1588 else: 1589 return 1590 if self.isEnabledFor(level): 1591 self._log(level, msg, args, **kwargs) 1592 1593 def findCaller(self, stack_info=False, stacklevel=1): 1594 """ 1595 Find the stack frame of the caller so that we can note the source 1596 file name, line number and function name. 1597 """ 1598 f = currentframe() 1599 #On some versions of IronPython, currentframe() returns None if 1600 #IronPython isn't run with -X:Frames. 1601 if f is None: 1602 return "(unknown file)", 0, "(unknown function)", None 1603 while stacklevel > 0: 1604 next_f = f.f_back 1605 if next_f is None: 1606 ## We've got options here. 1607 ## If we want to use the last (deepest) frame: 1608 break 1609 ## If we want to mimic the warnings module: 1610 #return ("sys", 1, "(unknown function)", None) 1611 ## If we want to be pedantic: 1612 #raise ValueError("call stack is not deep enough") 1613 f = next_f 1614 if not _is_internal_frame(f): 1615 stacklevel -= 1 1616 co = f.f_code 1617 sinfo = None 1618 if stack_info: 1619 with io.StringIO() as sio: 1620 sio.write("Stack (most recent call last):\n") 1621 traceback.print_stack(f, file=sio) 1622 sinfo = sio.getvalue() 1623 if sinfo[-1] == '\n': 1624 sinfo = sinfo[:-1] 1625 return co.co_filename, f.f_lineno, co.co_name, sinfo 1626 1627 def makeRecord(self, name, level, fn, lno, msg, args, exc_info, 1628 func=None, extra=None, sinfo=None): 1629 """ 1630 A factory method which can be overridden in subclasses to create 1631 specialized LogRecords. 1632 """ 1633 rv = _logRecordFactory(name, level, fn, lno, msg, args, exc_info, func, 1634 sinfo) 1635 if extra is not None: 1636 for key in extra: 1637 if (key in ["message", "asctime"]) or (key in rv.__dict__): 1638 raise KeyError("Attempt to overwrite %r in LogRecord" % key) 1639 rv.__dict__[key] = extra[key] 1640 return rv 1641 1642 def _log(self, level, msg, args, exc_info=None, extra=None, stack_info=False, 1643 stacklevel=1): 1644 """ 1645 Low-level logging routine which creates a LogRecord and then calls 1646 all the handlers of this logger to handle the record. 1647 """ 1648 sinfo = None 1649 if _srcfile: 1650 #IronPython doesn't track Python frames, so findCaller raises an 1651 #exception on some versions of IronPython. We trap it here so that 1652 #IronPython can use logging. 1653 try: 1654 fn, lno, func, sinfo = self.findCaller(stack_info, stacklevel) 1655 except ValueError: # pragma: no cover 1656 fn, lno, func = "(unknown file)", 0, "(unknown function)" 1657 else: # pragma: no cover 1658 fn, lno, func = "(unknown file)", 0, "(unknown function)" 1659 if exc_info: 1660 if isinstance(exc_info, BaseException): 1661 exc_info = (type(exc_info), exc_info, exc_info.__traceback__) 1662 elif not isinstance(exc_info, tuple): 1663 exc_info = sys.exc_info() 1664 record = self.makeRecord(self.name, level, fn, lno, msg, args, 1665 exc_info, func, extra, sinfo) 1666 self.handle(record) 1667 1668 def handle(self, record): 1669 """ 1670 Call the handlers for the specified record. 1671 1672 This method is used for unpickled records received from a socket, as 1673 well as those created locally. Logger-level filtering is applied. 1674 """ 1675 if self.disabled: 1676 return 1677 maybe_record = self.filter(record) 1678 if not maybe_record: 1679 return 1680 if isinstance(maybe_record, LogRecord): 1681 record = maybe_record 1682 self.callHandlers(record) 1683 1684 def addHandler(self, hdlr): 1685 """ 1686 Add the specified handler to this logger. 1687 """ 1688 with _lock: 1689 if not (hdlr in self.handlers): 1690 self.handlers.append(hdlr) 1691 1692 def removeHandler(self, hdlr): 1693 """ 1694 Remove the specified handler from this logger. 1695 """ 1696 with _lock: 1697 if hdlr in self.handlers: 1698 self.handlers.remove(hdlr) 1699 1700 def hasHandlers(self): 1701 """ 1702 See if this logger has any handlers configured. 1703 1704 Loop through all handlers for this logger and its parents in the 1705 logger hierarchy. Return True if a handler was found, else False. 1706 Stop searching up the hierarchy whenever a logger with the "propagate" 1707 attribute set to zero is found - that will be the last logger which 1708 is checked for the existence of handlers. 1709 """ 1710 c = self 1711 rv = False 1712 while c: 1713 if c.handlers: 1714 rv = True 1715 break 1716 if not c.propagate: 1717 break 1718 else: 1719 c = c.parent 1720 return rv 1721 1722 def callHandlers(self, record): 1723 """ 1724 Pass a record to all relevant handlers. 1725 1726 Loop through all handlers for this logger and its parents in the 1727 logger hierarchy. If no handler was found, output a one-off error 1728 message to sys.stderr. Stop searching up the hierarchy whenever a 1729 logger with the "propagate" attribute set to zero is found - that 1730 will be the last logger whose handlers are called. 1731 """ 1732 c = self 1733 found = 0 1734 while c: 1735 for hdlr in c.handlers: 1736 found = found + 1 1737 if record.levelno >= hdlr.level: 1738 hdlr.handle(record) 1739 if not c.propagate: 1740 c = None #break out 1741 else: 1742 c = c.parent 1743 if (found == 0): 1744 if lastResort: 1745 if record.levelno >= lastResort.level: 1746 lastResort.handle(record) 1747 elif raiseExceptions and not self.manager.emittedNoHandlerWarning: 1748 sys.stderr.write("No handlers could be found for logger" 1749 " \"%s\"\n" % self.name) 1750 self.manager.emittedNoHandlerWarning = True 1751 1752 def getEffectiveLevel(self): 1753 """ 1754 Get the effective level for this logger. 1755 1756 Loop through this logger and its parents in the logger hierarchy, 1757 looking for a non-zero logging level. Return the first one found. 1758 """ 1759 logger = self 1760 while logger: 1761 if logger.level: 1762 return logger.level 1763 logger = logger.parent 1764 return NOTSET 1765 1766 def isEnabledFor(self, level): 1767 """ 1768 Is this logger enabled for level 'level'? 1769 """ 1770 if self.disabled: 1771 return False 1772 1773 try: 1774 return self._cache[level] 1775 except KeyError: 1776 with _lock: 1777 if self.manager.disable >= level: 1778 is_enabled = self._cache[level] = False 1779 else: 1780 is_enabled = self._cache[level] = ( 1781 level >= self.getEffectiveLevel() 1782 ) 1783 return is_enabled 1784 1785 def getChild(self, suffix): 1786 """ 1787 Get a logger which is a descendant to this one. 1788 1789 This is a convenience method, such that 1790 1791 logging.getLogger('abc').getChild('def.ghi') 1792 1793 is the same as 1794 1795 logging.getLogger('abc.def.ghi') 1796 1797 It's useful, for example, when the parent logger is named using 1798 __name__ rather than a literal string. 1799 """ 1800 if self.root is not self: 1801 suffix = '.'.join((self.name, suffix)) 1802 return self.manager.getLogger(suffix) 1803 1804 def getChildren(self): 1805 1806 def _hierlevel(logger): 1807 if logger is logger.manager.root: 1808 return 0 1809 return 1 + logger.name.count('.') 1810 1811 d = self.manager.loggerDict 1812 with _lock: 1813 # exclude PlaceHolders - the last check is to ensure that lower-level 1814 # descendants aren't returned - if there are placeholders, a logger's 1815 # parent field might point to a grandparent or ancestor thereof. 1816 return set(item for item in d.values() 1817 if isinstance(item, Logger) and item.parent is self and 1818 _hierlevel(item) == 1 + _hierlevel(item.parent)) 1819 1820 def __repr__(self): 1821 level = getLevelName(self.getEffectiveLevel()) 1822 return '<%s %s (%s)>' % (self.__class__.__name__, self.name, level) 1823 1824 def __reduce__(self): 1825 if getLogger(self.name) is not self: 1826 import pickle 1827 raise pickle.PicklingError('logger cannot be pickled') 1828 return getLogger, (self.name,)
Instances of the Logger class represent a single logging channel. A "logging channel" indicates an area of an application. Exactly how an "area" is defined is up to the application developer. Since an application can have any number of areas, logging channels are identified by a unique string. Application areas can be nested (e.g. an area of "input processing" might include sub-areas "read CSV files", "read XLS files" and "read Gnumeric files"). To cater for this natural nesting, channel names are organized into a namespace hierarchy where levels are separated by periods, much like the Java or Python package namespace. So in the instance given above, channel names might be "input" for the upper level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels. There is no arbitrary limit to the depth of nesting.
1479 def __init__(self, name, level=NOTSET): 1480 """ 1481 Initialize the logger with a name and an optional level. 1482 """ 1483 Filterer.__init__(self) 1484 self.name = name 1485 self.level = _checkLevel(level) 1486 self.parent = None 1487 self.propagate = True 1488 self.handlers = [] 1489 self.disabled = False 1490 self._cache = {}
Initialize the logger with a name and an optional level.
1492 def setLevel(self, level): 1493 """ 1494 Set the logging level of this logger. level must be an int or a str. 1495 """ 1496 self.level = _checkLevel(level) 1497 self.manager._clear_cache()
Set the logging level of this logger. level must be an int or a str.
1499 def debug(self, msg, *args, **kwargs): 1500 """ 1501 Log 'msg % args' with severity 'DEBUG'. 1502 1503 To pass exception information, use the keyword argument exc_info with 1504 a true value, e.g. 1505 1506 logger.debug("Houston, we have a %s", "thorny problem", exc_info=True) 1507 """ 1508 if self.isEnabledFor(DEBUG): 1509 self._log(DEBUG, msg, args, **kwargs)
Log 'msg % args' with severity 'DEBUG'.
To pass exception information, use the keyword argument exc_info with a true value, e.g.
logger.debug("Houston, we have a %s", "thorny problem", exc_info=True)
1511 def info(self, msg, *args, **kwargs): 1512 """ 1513 Log 'msg % args' with severity 'INFO'. 1514 1515 To pass exception information, use the keyword argument exc_info with 1516 a true value, e.g. 1517 1518 logger.info("Houston, we have a %s", "notable problem", exc_info=True) 1519 """ 1520 if self.isEnabledFor(INFO): 1521 self._log(INFO, msg, args, **kwargs)
Log 'msg % args' with severity 'INFO'.
To pass exception information, use the keyword argument exc_info with a true value, e.g.
logger.info("Houston, we have a %s", "notable problem", exc_info=True)
1523 def warning(self, msg, *args, **kwargs): 1524 """ 1525 Log 'msg % args' with severity 'WARNING'. 1526 1527 To pass exception information, use the keyword argument exc_info with 1528 a true value, e.g. 1529 1530 logger.warning("Houston, we have a %s", "bit of a problem", exc_info=True) 1531 """ 1532 if self.isEnabledFor(WARNING): 1533 self._log(WARNING, msg, args, **kwargs)
Log 'msg % args' with severity 'WARNING'.
To pass exception information, use the keyword argument exc_info with a true value, e.g.
logger.warning("Houston, we have a %s", "bit of a problem", exc_info=True)
1540 def error(self, msg, *args, **kwargs): 1541 """ 1542 Log 'msg % args' with severity 'ERROR'. 1543 1544 To pass exception information, use the keyword argument exc_info with 1545 a true value, e.g. 1546 1547 logger.error("Houston, we have a %s", "major problem", exc_info=True) 1548 """ 1549 if self.isEnabledFor(ERROR): 1550 self._log(ERROR, msg, args, **kwargs)
Log 'msg % args' with severity 'ERROR'.
To pass exception information, use the keyword argument exc_info with a true value, e.g.
logger.error("Houston, we have a %s", "major problem", exc_info=True)
1552 def exception(self, msg, *args, exc_info=True, **kwargs): 1553 """ 1554 Convenience method for logging an ERROR with exception information. 1555 """ 1556 self.error(msg, *args, exc_info=exc_info, **kwargs)
Convenience method for logging an ERROR with exception information.
1558 def critical(self, msg, *args, **kwargs): 1559 """ 1560 Log 'msg % args' with severity 'CRITICAL'. 1561 1562 To pass exception information, use the keyword argument exc_info with 1563 a true value, e.g. 1564 1565 logger.critical("Houston, we have a %s", "major disaster", exc_info=True) 1566 """ 1567 if self.isEnabledFor(CRITICAL): 1568 self._log(CRITICAL, msg, args, **kwargs)
Log 'msg % args' with severity 'CRITICAL'.
To pass exception information, use the keyword argument exc_info with a true value, e.g.
logger.critical("Houston, we have a %s", "major disaster", exc_info=True)
1570 def fatal(self, msg, *args, **kwargs): 1571 """ 1572 Don't use this method, use critical() instead. 1573 """ 1574 self.critical(msg, *args, **kwargs)
Don't use this method, use critical() instead.
1576 def log(self, level, msg, *args, **kwargs): 1577 """ 1578 Log 'msg % args' with the integer severity 'level'. 1579 1580 To pass exception information, use the keyword argument exc_info with 1581 a true value, e.g. 1582 1583 logger.log(level, "We have a %s", "mysterious problem", exc_info=True) 1584 """ 1585 if not isinstance(level, int): 1586 if raiseExceptions: 1587 raise TypeError("level must be an integer") 1588 else: 1589 return 1590 if self.isEnabledFor(level): 1591 self._log(level, msg, args, **kwargs)
Log 'msg % args' with the integer severity 'level'.
To pass exception information, use the keyword argument exc_info with a true value, e.g.
logger.log(level, "We have a %s", "mysterious problem", exc_info=True)
1593 def findCaller(self, stack_info=False, stacklevel=1): 1594 """ 1595 Find the stack frame of the caller so that we can note the source 1596 file name, line number and function name. 1597 """ 1598 f = currentframe() 1599 #On some versions of IronPython, currentframe() returns None if 1600 #IronPython isn't run with -X:Frames. 1601 if f is None: 1602 return "(unknown file)", 0, "(unknown function)", None 1603 while stacklevel > 0: 1604 next_f = f.f_back 1605 if next_f is None: 1606 ## We've got options here. 1607 ## If we want to use the last (deepest) frame: 1608 break 1609 ## If we want to mimic the warnings module: 1610 #return ("sys", 1, "(unknown function)", None) 1611 ## If we want to be pedantic: 1612 #raise ValueError("call stack is not deep enough") 1613 f = next_f 1614 if not _is_internal_frame(f): 1615 stacklevel -= 1 1616 co = f.f_code 1617 sinfo = None 1618 if stack_info: 1619 with io.StringIO() as sio: 1620 sio.write("Stack (most recent call last):\n") 1621 traceback.print_stack(f, file=sio) 1622 sinfo = sio.getvalue() 1623 if sinfo[-1] == '\n': 1624 sinfo = sinfo[:-1] 1625 return co.co_filename, f.f_lineno, co.co_name, sinfo
Find the stack frame of the caller so that we can note the source file name, line number and function name.
1627 def makeRecord(self, name, level, fn, lno, msg, args, exc_info, 1628 func=None, extra=None, sinfo=None): 1629 """ 1630 A factory method which can be overridden in subclasses to create 1631 specialized LogRecords. 1632 """ 1633 rv = _logRecordFactory(name, level, fn, lno, msg, args, exc_info, func, 1634 sinfo) 1635 if extra is not None: 1636 for key in extra: 1637 if (key in ["message", "asctime"]) or (key in rv.__dict__): 1638 raise KeyError("Attempt to overwrite %r in LogRecord" % key) 1639 rv.__dict__[key] = extra[key] 1640 return rv
A factory method which can be overridden in subclasses to create specialized LogRecords.
1668 def handle(self, record): 1669 """ 1670 Call the handlers for the specified record. 1671 1672 This method is used for unpickled records received from a socket, as 1673 well as those created locally. Logger-level filtering is applied. 1674 """ 1675 if self.disabled: 1676 return 1677 maybe_record = self.filter(record) 1678 if not maybe_record: 1679 return 1680 if isinstance(maybe_record, LogRecord): 1681 record = maybe_record 1682 self.callHandlers(record)
Call the handlers for the specified record.
This method is used for unpickled records received from a socket, as well as those created locally. Logger-level filtering is applied.
1684 def addHandler(self, hdlr): 1685 """ 1686 Add the specified handler to this logger. 1687 """ 1688 with _lock: 1689 if not (hdlr in self.handlers): 1690 self.handlers.append(hdlr)
Add the specified handler to this logger.
1692 def removeHandler(self, hdlr): 1693 """ 1694 Remove the specified handler from this logger. 1695 """ 1696 with _lock: 1697 if hdlr in self.handlers: 1698 self.handlers.remove(hdlr)
Remove the specified handler from this logger.
1700 def hasHandlers(self): 1701 """ 1702 See if this logger has any handlers configured. 1703 1704 Loop through all handlers for this logger and its parents in the 1705 logger hierarchy. Return True if a handler was found, else False. 1706 Stop searching up the hierarchy whenever a logger with the "propagate" 1707 attribute set to zero is found - that will be the last logger which 1708 is checked for the existence of handlers. 1709 """ 1710 c = self 1711 rv = False 1712 while c: 1713 if c.handlers: 1714 rv = True 1715 break 1716 if not c.propagate: 1717 break 1718 else: 1719 c = c.parent 1720 return rv
See if this logger has any handlers configured.
Loop through all handlers for this logger and its parents in the logger hierarchy. Return True if a handler was found, else False. Stop searching up the hierarchy whenever a logger with the "propagate" attribute set to zero is found - that will be the last logger which is checked for the existence of handlers.
1722 def callHandlers(self, record): 1723 """ 1724 Pass a record to all relevant handlers. 1725 1726 Loop through all handlers for this logger and its parents in the 1727 logger hierarchy. If no handler was found, output a one-off error 1728 message to sys.stderr. Stop searching up the hierarchy whenever a 1729 logger with the "propagate" attribute set to zero is found - that 1730 will be the last logger whose handlers are called. 1731 """ 1732 c = self 1733 found = 0 1734 while c: 1735 for hdlr in c.handlers: 1736 found = found + 1 1737 if record.levelno >= hdlr.level: 1738 hdlr.handle(record) 1739 if not c.propagate: 1740 c = None #break out 1741 else: 1742 c = c.parent 1743 if (found == 0): 1744 if lastResort: 1745 if record.levelno >= lastResort.level: 1746 lastResort.handle(record) 1747 elif raiseExceptions and not self.manager.emittedNoHandlerWarning: 1748 sys.stderr.write("No handlers could be found for logger" 1749 " \"%s\"\n" % self.name) 1750 self.manager.emittedNoHandlerWarning = True
Pass a record to all relevant handlers.
Loop through all handlers for this logger and its parents in the logger hierarchy. If no handler was found, output a one-off error message to sys.stderr. Stop searching up the hierarchy whenever a logger with the "propagate" attribute set to zero is found - that will be the last logger whose handlers are called.
1752 def getEffectiveLevel(self): 1753 """ 1754 Get the effective level for this logger. 1755 1756 Loop through this logger and its parents in the logger hierarchy, 1757 looking for a non-zero logging level. Return the first one found. 1758 """ 1759 logger = self 1760 while logger: 1761 if logger.level: 1762 return logger.level 1763 logger = logger.parent 1764 return NOTSET
Get the effective level for this logger.
Loop through this logger and its parents in the logger hierarchy, looking for a non-zero logging level. Return the first one found.
1766 def isEnabledFor(self, level): 1767 """ 1768 Is this logger enabled for level 'level'? 1769 """ 1770 if self.disabled: 1771 return False 1772 1773 try: 1774 return self._cache[level] 1775 except KeyError: 1776 with _lock: 1777 if self.manager.disable >= level: 1778 is_enabled = self._cache[level] = False 1779 else: 1780 is_enabled = self._cache[level] = ( 1781 level >= self.getEffectiveLevel() 1782 ) 1783 return is_enabled
Is this logger enabled for level 'level'?
1785 def getChild(self, suffix): 1786 """ 1787 Get a logger which is a descendant to this one. 1788 1789 This is a convenience method, such that 1790 1791 logging.getLogger('abc').getChild('def.ghi') 1792 1793 is the same as 1794 1795 logging.getLogger('abc.def.ghi') 1796 1797 It's useful, for example, when the parent logger is named using 1798 __name__ rather than a literal string. 1799 """ 1800 if self.root is not self: 1801 suffix = '.'.join((self.name, suffix)) 1802 return self.manager.getLogger(suffix)
Get a logger which is a descendant to this one.
This is a convenience method, such that
logging.getLogger('abc').getChild('def.ghi')
is the same as
logging.getLogger('abc.def.ghi')
It's useful, for example, when the parent logger is named using __name__ rather than a literal string.
1804 def getChildren(self): 1805 1806 def _hierlevel(logger): 1807 if logger is logger.manager.root: 1808 return 0 1809 return 1 + logger.name.count('.') 1810 1811 d = self.manager.loggerDict 1812 with _lock: 1813 # exclude PlaceHolders - the last check is to ensure that lower-level 1814 # descendants aren't returned - if there are placeholders, a logger's 1815 # parent field might point to a grandparent or ancestor thereof. 1816 return set(item for item in d.values() 1817 if isinstance(item, Logger) and item.parent is self and 1818 _hierlevel(item) == 1 + _hierlevel(item.parent))
Inherited Members
1848class LoggerAdapter(object): 1849 """ 1850 An adapter for loggers which makes it easier to specify contextual 1851 information in logging output. 1852 """ 1853 1854 def __init__(self, logger, extra=None, merge_extra=False): 1855 """ 1856 Initialize the adapter with a logger and a dict-like object which 1857 provides contextual information. This constructor signature allows 1858 easy stacking of LoggerAdapters, if so desired. 1859 1860 You can effectively pass keyword arguments as shown in the 1861 following example: 1862 1863 adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2")) 1864 1865 By default, LoggerAdapter objects will drop the "extra" argument 1866 passed on the individual log calls to use its own instead. 1867 1868 Initializing it with merge_extra=True will instead merge both 1869 maps when logging, the individual call extra taking precedence 1870 over the LoggerAdapter instance extra 1871 1872 .. versionchanged:: 3.13 1873 The *merge_extra* argument was added. 1874 """ 1875 self.logger = logger 1876 self.extra = extra 1877 self.merge_extra = merge_extra 1878 1879 def process(self, msg, kwargs): 1880 """ 1881 Process the logging message and keyword arguments passed in to 1882 a logging call to insert contextual information. You can either 1883 manipulate the message itself, the keyword args or both. Return 1884 the message and kwargs modified (or not) to suit your needs. 1885 1886 Normally, you'll only need to override this one method in a 1887 LoggerAdapter subclass for your specific needs. 1888 """ 1889 if self.merge_extra and "extra" in kwargs: 1890 kwargs["extra"] = {**self.extra, **kwargs["extra"]} 1891 else: 1892 kwargs["extra"] = self.extra 1893 return msg, kwargs 1894 1895 # 1896 # Boilerplate convenience methods 1897 # 1898 def debug(self, msg, *args, **kwargs): 1899 """ 1900 Delegate a debug call to the underlying logger. 1901 """ 1902 self.log(DEBUG, msg, *args, **kwargs) 1903 1904 def info(self, msg, *args, **kwargs): 1905 """ 1906 Delegate an info call to the underlying logger. 1907 """ 1908 self.log(INFO, msg, *args, **kwargs) 1909 1910 def warning(self, msg, *args, **kwargs): 1911 """ 1912 Delegate a warning call to the underlying logger. 1913 """ 1914 self.log(WARNING, msg, *args, **kwargs) 1915 1916 def warn(self, msg, *args, **kwargs): 1917 warnings.warn("The 'warn' method is deprecated, " 1918 "use 'warning' instead", DeprecationWarning, 2) 1919 self.warning(msg, *args, **kwargs) 1920 1921 def error(self, msg, *args, **kwargs): 1922 """ 1923 Delegate an error call to the underlying logger. 1924 """ 1925 self.log(ERROR, msg, *args, **kwargs) 1926 1927 def exception(self, msg, *args, exc_info=True, **kwargs): 1928 """ 1929 Delegate an exception call to the underlying logger. 1930 """ 1931 self.log(ERROR, msg, *args, exc_info=exc_info, **kwargs) 1932 1933 def critical(self, msg, *args, **kwargs): 1934 """ 1935 Delegate a critical call to the underlying logger. 1936 """ 1937 self.log(CRITICAL, msg, *args, **kwargs) 1938 1939 def log(self, level, msg, *args, **kwargs): 1940 """ 1941 Delegate a log call to the underlying logger, after adding 1942 contextual information from this adapter instance. 1943 """ 1944 if self.isEnabledFor(level): 1945 msg, kwargs = self.process(msg, kwargs) 1946 self.logger.log(level, msg, *args, **kwargs) 1947 1948 def isEnabledFor(self, level): 1949 """ 1950 Is this logger enabled for level 'level'? 1951 """ 1952 return self.logger.isEnabledFor(level) 1953 1954 def setLevel(self, level): 1955 """ 1956 Set the specified level on the underlying logger. 1957 """ 1958 self.logger.setLevel(level) 1959 1960 def getEffectiveLevel(self): 1961 """ 1962 Get the effective level for the underlying logger. 1963 """ 1964 return self.logger.getEffectiveLevel() 1965 1966 def hasHandlers(self): 1967 """ 1968 See if the underlying logger has any handlers. 1969 """ 1970 return self.logger.hasHandlers() 1971 1972 def _log(self, level, msg, args, **kwargs): 1973 """ 1974 Low-level log implementation, proxied to allow nested logger adapters. 1975 """ 1976 return self.logger._log(level, msg, args, **kwargs) 1977 1978 @property 1979 def manager(self): 1980 return self.logger.manager 1981 1982 @manager.setter 1983 def manager(self, value): 1984 self.logger.manager = value 1985 1986 @property 1987 def name(self): 1988 return self.logger.name 1989 1990 def __repr__(self): 1991 logger = self.logger 1992 level = getLevelName(logger.getEffectiveLevel()) 1993 return '<%s %s (%s)>' % (self.__class__.__name__, logger.name, level) 1994 1995 __class_getitem__ = classmethod(GenericAlias)
An adapter for loggers which makes it easier to specify contextual information in logging output.
1854 def __init__(self, logger, extra=None, merge_extra=False): 1855 """ 1856 Initialize the adapter with a logger and a dict-like object which 1857 provides contextual information. This constructor signature allows 1858 easy stacking of LoggerAdapters, if so desired. 1859 1860 You can effectively pass keyword arguments as shown in the 1861 following example: 1862 1863 adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2")) 1864 1865 By default, LoggerAdapter objects will drop the "extra" argument 1866 passed on the individual log calls to use its own instead. 1867 1868 Initializing it with merge_extra=True will instead merge both 1869 maps when logging, the individual call extra taking precedence 1870 over the LoggerAdapter instance extra 1871 1872 .. versionchanged:: 3.13 1873 The *merge_extra* argument was added. 1874 """ 1875 self.logger = logger 1876 self.extra = extra 1877 self.merge_extra = merge_extra
Initialize the adapter with a logger and a dict-like object which provides contextual information. This constructor signature allows easy stacking of LoggerAdapters, if so desired.
You can effectively pass keyword arguments as shown in the following example:
adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2"))
By default, LoggerAdapter objects will drop the "extra" argument passed on the individual log calls to use its own instead.
Initializing it with merge_extra=True will instead merge both maps when logging, the individual call extra taking precedence over the LoggerAdapter instance extra
Changed in version 3.13: The merge_extra argument was added.
1879 def process(self, msg, kwargs): 1880 """ 1881 Process the logging message and keyword arguments passed in to 1882 a logging call to insert contextual information. You can either 1883 manipulate the message itself, the keyword args or both. Return 1884 the message and kwargs modified (or not) to suit your needs. 1885 1886 Normally, you'll only need to override this one method in a 1887 LoggerAdapter subclass for your specific needs. 1888 """ 1889 if self.merge_extra and "extra" in kwargs: 1890 kwargs["extra"] = {**self.extra, **kwargs["extra"]} 1891 else: 1892 kwargs["extra"] = self.extra 1893 return msg, kwargs
Process the logging message and keyword arguments passed in to a logging call to insert contextual information. You can either manipulate the message itself, the keyword args or both. Return the message and kwargs modified (or not) to suit your needs.
Normally, you'll only need to override this one method in a LoggerAdapter subclass for your specific needs.
1898 def debug(self, msg, *args, **kwargs): 1899 """ 1900 Delegate a debug call to the underlying logger. 1901 """ 1902 self.log(DEBUG, msg, *args, **kwargs)
Delegate a debug call to the underlying logger.
1904 def info(self, msg, *args, **kwargs): 1905 """ 1906 Delegate an info call to the underlying logger. 1907 """ 1908 self.log(INFO, msg, *args, **kwargs)
Delegate an info call to the underlying logger.
1910 def warning(self, msg, *args, **kwargs): 1911 """ 1912 Delegate a warning call to the underlying logger. 1913 """ 1914 self.log(WARNING, msg, *args, **kwargs)
Delegate a warning call to the underlying logger.
1921 def error(self, msg, *args, **kwargs): 1922 """ 1923 Delegate an error call to the underlying logger. 1924 """ 1925 self.log(ERROR, msg, *args, **kwargs)
Delegate an error call to the underlying logger.
1927 def exception(self, msg, *args, exc_info=True, **kwargs): 1928 """ 1929 Delegate an exception call to the underlying logger. 1930 """ 1931 self.log(ERROR, msg, *args, exc_info=exc_info, **kwargs)
Delegate an exception call to the underlying logger.
1933 def critical(self, msg, *args, **kwargs): 1934 """ 1935 Delegate a critical call to the underlying logger. 1936 """ 1937 self.log(CRITICAL, msg, *args, **kwargs)
Delegate a critical call to the underlying logger.
1939 def log(self, level, msg, *args, **kwargs): 1940 """ 1941 Delegate a log call to the underlying logger, after adding 1942 contextual information from this adapter instance. 1943 """ 1944 if self.isEnabledFor(level): 1945 msg, kwargs = self.process(msg, kwargs) 1946 self.logger.log(level, msg, *args, **kwargs)
Delegate a log call to the underlying logger, after adding contextual information from this adapter instance.
1948 def isEnabledFor(self, level): 1949 """ 1950 Is this logger enabled for level 'level'? 1951 """ 1952 return self.logger.isEnabledFor(level)
Is this logger enabled for level 'level'?
1954 def setLevel(self, level): 1955 """ 1956 Set the specified level on the underlying logger. 1957 """ 1958 self.logger.setLevel(level)
Set the specified level on the underlying logger.
1960 def getEffectiveLevel(self): 1961 """ 1962 Get the effective level for the underlying logger. 1963 """ 1964 return self.logger.getEffectiveLevel()
Get the effective level for the underlying logger.
2266class NullHandler(Handler): 2267 """ 2268 This handler does nothing. It's intended to be used to avoid the 2269 "No handlers could be found for logger XXX" one-off warning. This is 2270 important for library code, which may contain code to log events. If a user 2271 of the library does not configure logging, the one-off warning might be 2272 produced; to avoid this, the library developer simply needs to instantiate 2273 a NullHandler and add it to the top-level logger of the library module or 2274 package. 2275 """ 2276 def handle(self, record): 2277 """Stub.""" 2278 2279 def emit(self, record): 2280 """Stub.""" 2281 2282 def createLock(self): 2283 self.lock = None 2284 2285 def _at_fork_reinit(self): 2286 pass
This handler does nothing. It's intended to be used to avoid the "No handlers could be found for logger XXX" one-off warning. This is important for library code, which may contain code to log events. If a user of the library does not configure logging, the one-off warning might be produced; to avoid this, the library developer simply needs to instantiate a NullHandler and add it to the top-level logger of the library module or package.
1112class StreamHandler(Handler): 1113 """ 1114 A handler class which writes logging records, appropriately formatted, 1115 to a stream. Note that this class does not close the stream, as 1116 sys.stdout or sys.stderr may be used. 1117 """ 1118 1119 terminator = '\n' 1120 1121 def __init__(self, stream=None): 1122 """ 1123 Initialize the handler. 1124 1125 If stream is not specified, sys.stderr is used. 1126 """ 1127 Handler.__init__(self) 1128 if stream is None: 1129 stream = sys.stderr 1130 self.stream = stream 1131 1132 def flush(self): 1133 """ 1134 Flushes the stream. 1135 """ 1136 with self.lock: 1137 if self.stream and hasattr(self.stream, "flush"): 1138 self.stream.flush() 1139 1140 def emit(self, record): 1141 """ 1142 Emit a record. 1143 1144 If a formatter is specified, it is used to format the record. 1145 The record is then written to the stream with a trailing newline. If 1146 exception information is present, it is formatted using 1147 traceback.print_exception and appended to the stream. If the stream 1148 has an 'encoding' attribute, it is used to determine how to do the 1149 output to the stream. 1150 """ 1151 try: 1152 msg = self.format(record) 1153 stream = self.stream 1154 # issue 35046: merged two stream.writes into one. 1155 stream.write(msg + self.terminator) 1156 self.flush() 1157 except RecursionError: # See issue 36272 1158 raise 1159 except Exception: 1160 self.handleError(record) 1161 1162 def setStream(self, stream): 1163 """ 1164 Sets the StreamHandler's stream to the specified value, 1165 if it is different. 1166 1167 Returns the old stream, if the stream was changed, or None 1168 if it wasn't. 1169 """ 1170 if stream is self.stream: 1171 result = None 1172 else: 1173 result = self.stream 1174 with self.lock: 1175 self.flush() 1176 self.stream = stream 1177 return result 1178 1179 def __repr__(self): 1180 level = getLevelName(self.level) 1181 name = getattr(self.stream, 'name', '') 1182 # bpo-36015: name can be an int 1183 name = str(name) 1184 if name: 1185 name += ' ' 1186 return '<%s %s(%s)>' % (self.__class__.__name__, name, level) 1187 1188 __class_getitem__ = classmethod(GenericAlias)
A handler class which writes logging records, appropriately formatted, to a stream. Note that this class does not close the stream, as sys.stdout or sys.stderr may be used.
1121 def __init__(self, stream=None): 1122 """ 1123 Initialize the handler. 1124 1125 If stream is not specified, sys.stderr is used. 1126 """ 1127 Handler.__init__(self) 1128 if stream is None: 1129 stream = sys.stderr 1130 self.stream = stream
Initialize the handler.
If stream is not specified, sys.stderr is used.
1132 def flush(self): 1133 """ 1134 Flushes the stream. 1135 """ 1136 with self.lock: 1137 if self.stream and hasattr(self.stream, "flush"): 1138 self.stream.flush()
Flushes the stream.
1140 def emit(self, record): 1141 """ 1142 Emit a record. 1143 1144 If a formatter is specified, it is used to format the record. 1145 The record is then written to the stream with a trailing newline. If 1146 exception information is present, it is formatted using 1147 traceback.print_exception and appended to the stream. If the stream 1148 has an 'encoding' attribute, it is used to determine how to do the 1149 output to the stream. 1150 """ 1151 try: 1152 msg = self.format(record) 1153 stream = self.stream 1154 # issue 35046: merged two stream.writes into one. 1155 stream.write(msg + self.terminator) 1156 self.flush() 1157 except RecursionError: # See issue 36272 1158 raise 1159 except Exception: 1160 self.handleError(record)
Emit a record.
If a formatter is specified, it is used to format the record. The record is then written to the stream with a trailing newline. If exception information is present, it is formatted using traceback.print_exception and appended to the stream. If the stream has an 'encoding' attribute, it is used to determine how to do the output to the stream.
1162 def setStream(self, stream): 1163 """ 1164 Sets the StreamHandler's stream to the specified value, 1165 if it is different. 1166 1167 Returns the old stream, if the stream was changed, or None 1168 if it wasn't. 1169 """ 1170 if stream is self.stream: 1171 result = None 1172 else: 1173 result = self.stream 1174 with self.lock: 1175 self.flush() 1176 self.stream = stream 1177 return result
Sets the StreamHandler's stream to the specified value, if it is different.
Returns the old stream, if the stream was changed, or None if it wasn't.
157def addLevelName(level, levelName): 158 """ 159 Associate 'levelName' with 'level'. 160 161 This is used when converting levels to text during message formatting. 162 """ 163 with _lock: 164 _levelToName[level] = levelName 165 _nameToLevel[levelName] = level
Associate 'levelName' with 'level'.
This is used when converting levels to text during message formatting.
2005def basicConfig(**kwargs): 2006 """ 2007 Do basic configuration for the logging system. 2008 2009 This function does nothing if the root logger already has handlers 2010 configured, unless the keyword argument *force* is set to ``True``. 2011 It is a convenience method intended for use by simple scripts 2012 to do one-shot configuration of the logging package. 2013 2014 The default behaviour is to create a StreamHandler which writes to 2015 sys.stderr, set a formatter using the BASIC_FORMAT format string, and 2016 add the handler to the root logger. 2017 2018 A number of optional keyword arguments may be specified, which can alter 2019 the default behaviour. 2020 2021 filename Specifies that a FileHandler be created, using the specified 2022 filename, rather than a StreamHandler. 2023 filemode Specifies the mode to open the file, if filename is specified 2024 (if filemode is unspecified, it defaults to 'a'). 2025 format Use the specified format string for the handler. 2026 datefmt Use the specified date/time format. 2027 style If a format string is specified, use this to specify the 2028 type of format string (possible values '%', '{', '$', for 2029 %-formatting, :meth:`str.format` and :class:`string.Template` 2030 - defaults to '%'). 2031 level Set the root logger level to the specified level. 2032 stream Use the specified stream to initialize the StreamHandler. Note 2033 that this argument is incompatible with 'filename' - if both 2034 are present, 'stream' is ignored. 2035 handlers If specified, this should be an iterable of already created 2036 handlers, which will be added to the root logger. Any handler 2037 in the list which does not have a formatter assigned will be 2038 assigned the formatter created in this function. 2039 force If this keyword is specified as true, any existing handlers 2040 attached to the root logger are removed and closed, before 2041 carrying out the configuration as specified by the other 2042 arguments. 2043 encoding If specified together with a filename, this encoding is passed to 2044 the created FileHandler, causing it to be used when the file is 2045 opened. 2046 errors If specified together with a filename, this value is passed to the 2047 created FileHandler, causing it to be used when the file is 2048 opened in text mode. If not specified, the default value is 2049 `backslashreplace`. 2050 2051 Note that you could specify a stream created using open(filename, mode) 2052 rather than passing the filename and mode in. However, it should be 2053 remembered that StreamHandler does not close its stream (since it may be 2054 using sys.stdout or sys.stderr), whereas FileHandler closes its stream 2055 when the handler is closed. 2056 2057 .. versionchanged:: 3.2 2058 Added the ``style`` parameter. 2059 2060 .. versionchanged:: 3.3 2061 Added the ``handlers`` parameter. A ``ValueError`` is now thrown for 2062 incompatible arguments (e.g. ``handlers`` specified together with 2063 ``filename``/``filemode``, or ``filename``/``filemode`` specified 2064 together with ``stream``, or ``handlers`` specified together with 2065 ``stream``. 2066 2067 .. versionchanged:: 3.8 2068 Added the ``force`` parameter. 2069 2070 .. versionchanged:: 3.9 2071 Added the ``encoding`` and ``errors`` parameters. 2072 """ 2073 # Add thread safety in case someone mistakenly calls 2074 # basicConfig() from multiple threads 2075 with _lock: 2076 force = kwargs.pop('force', False) 2077 encoding = kwargs.pop('encoding', None) 2078 errors = kwargs.pop('errors', 'backslashreplace') 2079 if force: 2080 for h in root.handlers[:]: 2081 root.removeHandler(h) 2082 h.close() 2083 if len(root.handlers) == 0: 2084 handlers = kwargs.pop("handlers", None) 2085 if handlers is None: 2086 if "stream" in kwargs and "filename" in kwargs: 2087 raise ValueError("'stream' and 'filename' should not be " 2088 "specified together") 2089 else: 2090 if "stream" in kwargs or "filename" in kwargs: 2091 raise ValueError("'stream' or 'filename' should not be " 2092 "specified together with 'handlers'") 2093 if handlers is None: 2094 filename = kwargs.pop("filename", None) 2095 mode = kwargs.pop("filemode", 'a') 2096 if filename: 2097 if 'b' in mode: 2098 errors = None 2099 else: 2100 encoding = io.text_encoding(encoding) 2101 h = FileHandler(filename, mode, 2102 encoding=encoding, errors=errors) 2103 else: 2104 stream = kwargs.pop("stream", None) 2105 h = StreamHandler(stream) 2106 handlers = [h] 2107 dfs = kwargs.pop("datefmt", None) 2108 style = kwargs.pop("style", '%') 2109 if style not in _STYLES: 2110 raise ValueError('Style must be one of: %s' % ','.join( 2111 _STYLES.keys())) 2112 fs = kwargs.pop("format", _STYLES[style][1]) 2113 fmt = Formatter(fs, dfs, style) 2114 for h in handlers: 2115 if h.formatter is None: 2116 h.setFormatter(fmt) 2117 root.addHandler(h) 2118 level = kwargs.pop("level", None) 2119 if level is not None: 2120 root.setLevel(level) 2121 if kwargs: 2122 keys = ', '.join(kwargs.keys()) 2123 raise ValueError('Unrecognised argument(s): %s' % keys)
Do basic configuration for the logging system.
This function does nothing if the root logger already has handlers
configured, unless the keyword argument force is set to True
.
It is a convenience method intended for use by simple scripts
to do one-shot configuration of the logging package.
The default behaviour is to create a StreamHandler which writes to sys.stderr, set a formatter using the BASIC_FORMAT format string, and add the handler to the root logger.
A number of optional keyword arguments may be specified, which can alter the default behaviour.
filename Specifies that a FileHandler be created, using the specified
filename, rather than a StreamHandler.
filemode Specifies the mode to open the file, if filename is specified
(if filemode is unspecified, it defaults to 'a').
format Use the specified format string for the handler.
datefmt Use the specified date/time format.
style If a format string is specified, use this to specify the
type of format string (possible values '%', '{', '$', for
%-formatting, str.format()
and string.Template
- defaults to '%').
level Set the root logger level to the specified level.
stream Use the specified stream to initialize the StreamHandler. Note
that this argument is incompatible with 'filename' - if both
are present, 'stream' is ignored.
handlers If specified, this should be an iterable of already created
handlers, which will be added to the root logger. Any handler
in the list which does not have a formatter assigned will be
assigned the formatter created in this function.
force If this keyword is specified as true, any existing handlers
attached to the root logger are removed and closed, before
carrying out the configuration as specified by the other
arguments.
encoding If specified together with a filename, this encoding is passed to
the created FileHandler, causing it to be used when the file is
opened.
errors If specified together with a filename, this value is passed to the
created FileHandler, causing it to be used when the file is
opened in text mode. If not specified, the default value is
backslashreplace
.
Note that you could specify a stream created using open(filename, mode) rather than passing the filename and mode in. However, it should be remembered that StreamHandler does not close its stream (since it may be using sys.stdout or sys.stderr), whereas FileHandler closes its stream when the handler is closed.
Changed in version 3.2:
Added the style
parameter.
Changed in version 3.3:
Added the handlers
parameter. A ValueError
is now thrown for
incompatible arguments (e.g. handlers
specified together with
filename
/filemode
, or filename
/filemode
specified
together with stream
, or handlers
specified together with
stream
.
Changed in version 3.8:
Added the force
parameter.
Changed in version 3.9:
Added the encoding
and errors
parameters.
2312def captureWarnings(capture): 2313 """ 2314 If capture is true, redirect all warnings to the logging package. 2315 If capture is False, ensure that warnings are not redirected to logging 2316 but to their original destinations. 2317 """ 2318 global _warnings_showwarning 2319 if capture: 2320 if _warnings_showwarning is None: 2321 _warnings_showwarning = warnings.showwarning 2322 warnings.showwarning = _showwarning 2323 else: 2324 if _warnings_showwarning is not None: 2325 warnings.showwarning = _warnings_showwarning 2326 _warnings_showwarning = None
If capture is true, redirect all warnings to the logging package. If capture is False, ensure that warnings are not redirected to logging but to their original destinations.
2140def critical(msg, *args, **kwargs): 2141 """ 2142 Log a message with severity 'CRITICAL' on the root logger. If the logger 2143 has no handlers, call basicConfig() to add a console handler with a 2144 pre-defined format. 2145 """ 2146 if len(root.handlers) == 0: 2147 basicConfig() 2148 root.critical(msg, *args, **kwargs)
Log a message with severity 'CRITICAL' on the root logger. If the logger has no handlers, call basicConfig() to add a console handler with a pre-defined format.
2199def debug(msg, *args, **kwargs): 2200 """ 2201 Log a message with severity 'DEBUG' on the root logger. If the logger has 2202 no handlers, call basicConfig() to add a console handler with a pre-defined 2203 format. 2204 """ 2205 if len(root.handlers) == 0: 2206 basicConfig() 2207 root.debug(msg, *args, **kwargs)
Log a message with severity 'DEBUG' on the root logger. If the logger has no handlers, call basicConfig() to add a console handler with a pre-defined format.
2219def disable(level=CRITICAL): 2220 """ 2221 Disable all logging calls of severity 'level' and below. 2222 """ 2223 root.manager.disable = level 2224 root.manager._clear_cache()
Disable all logging calls of severity 'level' and below.
2156def error(msg, *args, **kwargs): 2157 """ 2158 Log a message with severity 'ERROR' on the root logger. If the logger has 2159 no handlers, call basicConfig() to add a console handler with a pre-defined 2160 format. 2161 """ 2162 if len(root.handlers) == 0: 2163 basicConfig() 2164 root.error(msg, *args, **kwargs)
Log a message with severity 'ERROR' on the root logger. If the logger has no handlers, call basicConfig() to add a console handler with a pre-defined format.
2166def exception(msg, *args, exc_info=True, **kwargs): 2167 """ 2168 Log a message with severity 'ERROR' on the root logger, with exception 2169 information. If the logger has no handlers, basicConfig() is called to add 2170 a console handler with a pre-defined format. 2171 """ 2172 error(msg, *args, exc_info=exc_info, **kwargs)
Log a message with severity 'ERROR' on the root logger, with exception information. If the logger has no handlers, basicConfig() is called to add a console handler with a pre-defined format.
2150def fatal(msg, *args, **kwargs): 2151 """ 2152 Don't use this function, use critical() instead. 2153 """ 2154 critical(msg, *args, **kwargs)
Don't use this function, use critical() instead.
130def getLevelName(level): 131 """ 132 Return the textual or numeric representation of logging level 'level'. 133 134 If the level is one of the predefined levels (CRITICAL, ERROR, WARNING, 135 INFO, DEBUG) then you get the corresponding string. If you have 136 associated levels with names using addLevelName then the name you have 137 associated with 'level' is returned. 138 139 If a numeric value corresponding to one of the defined levels is passed 140 in, the corresponding string representation is returned. 141 142 If a string representation of the level is passed in, the corresponding 143 numeric value is returned. 144 145 If no matching numeric or string value is passed in, the string 146 'Level %s' % level is returned. 147 """ 148 # See Issues #22386, #27937 and #29220 for why it's this way 149 result = _levelToName.get(level) 150 if result is not None: 151 return result 152 result = _nameToLevel.get(level) 153 if result is not None: 154 return result 155 return "Level %s" % level
Return the textual or numeric representation of logging level 'level'.
If the level is one of the predefined levels (CRITICAL, ERROR, WARNING, INFO, DEBUG) then you get the corresponding string. If you have associated levels with names using addLevelName then the name you have associated with 'level' is returned.
If a numeric value corresponding to one of the defined levels is passed in, the corresponding string representation is returned.
If a string representation of the level is passed in, the corresponding numeric value is returned.
If no matching numeric or string value is passed in, the string 'Level %s' % level is returned.
2130def getLogger(name=None): 2131 """ 2132 Return a logger with the specified name, creating it if necessary. 2133 2134 If no name is specified, return the root logger. 2135 """ 2136 if not name or isinstance(name, str) and name == root.name: 2137 return root 2138 return Logger.manager.getLogger(name)
Return a logger with the specified name, creating it if necessary.
If no name is specified, return the root logger.
1333def getLoggerClass(): 1334 """ 1335 Return the class to be used when instantiating a logger. 1336 """ 1337 return _loggerClass
Return the class to be used when instantiating a logger.
2189def info(msg, *args, **kwargs): 2190 """ 2191 Log a message with severity 'INFO' on the root logger. If the logger has 2192 no handlers, call basicConfig() to add a console handler with a pre-defined 2193 format. 2194 """ 2195 if len(root.handlers) == 0: 2196 basicConfig() 2197 root.info(msg, *args, **kwargs)
Log a message with severity 'INFO' on the root logger. If the logger has no handlers, call basicConfig() to add a console handler with a pre-defined format.
2209def log(level, msg, *args, **kwargs): 2210 """ 2211 Log 'msg % args' with the integer severity 'level' on the root logger. If 2212 the logger has no handlers, call basicConfig() to add a console handler 2213 with a pre-defined format. 2214 """ 2215 if len(root.handlers) == 0: 2216 basicConfig() 2217 root.log(level, msg, *args, **kwargs)
Log 'msg % args' with the integer severity 'level' on the root logger. If the logger has no handlers, call basicConfig() to add a console handler with a pre-defined format.
426def makeLogRecord(dict): 427 """ 428 Make a LogRecord whose attributes are defined by the specified dictionary, 429 This function is useful for converting a logging event received over 430 a socket connection (which is sent as a dictionary) into a LogRecord 431 instance. 432 """ 433 rv = _logRecordFactory(None, None, "", 0, "", (), None, None) 434 rv.__dict__.update(dict) 435 return rv
Make a LogRecord whose attributes are defined by the specified dictionary, This function is useful for converting a logging event received over a socket connection (which is sent as a dictionary) into a LogRecord instance.
1320def setLoggerClass(klass): 1321 """ 1322 Set the class to be used when instantiating a logger. The class should 1323 define __init__() such that only a name argument is required, and the 1324 __init__() should call Logger.__init__() 1325 """ 1326 if klass != Logger: 1327 if not issubclass(klass, Logger): 1328 raise TypeError("logger not derived from logging.Logger: " 1329 + klass.__name__) 1330 global _loggerClass 1331 _loggerClass = klass
Set the class to be used when instantiating a logger. The class should define __init__() such that only a name argument is required, and the __init__() should call Logger.__init__()
2226def shutdown(handlerList=_handlerList): 2227 """ 2228 Perform any cleanup actions in the logging system (e.g. flushing 2229 buffers). 2230 2231 Should be called at application exit. 2232 """ 2233 for wr in reversed(handlerList[:]): 2234 #errors might occur, for example, if files are locked 2235 #we just ignore them if raiseExceptions is not set 2236 try: 2237 h = wr() 2238 if h: 2239 try: 2240 h.acquire() 2241 # MemoryHandlers might not want to be flushed on close, 2242 # but circular imports prevent us scoping this to just 2243 # those handlers. hence the default to True. 2244 if getattr(h, 'flushOnClose', True): 2245 h.flush() 2246 h.close() 2247 except (OSError, ValueError): 2248 # Ignore errors which might be caused 2249 # because handlers have been closed but 2250 # references to them are still around at 2251 # application exit. 2252 pass 2253 finally: 2254 h.release() 2255 except: # ignore everything, as we're shutting down 2256 if raiseExceptions: 2257 raise 2258 #else, swallow
Perform any cleanup actions in the logging system (e.g. flushing buffers).
Should be called at application exit.
2174def warning(msg, *args, **kwargs): 2175 """ 2176 Log a message with severity 'WARNING' on the root logger. If the logger has 2177 no handlers, call basicConfig() to add a console handler with a pre-defined 2178 format. 2179 """ 2180 if len(root.handlers) == 0: 2181 basicConfig() 2182 root.warning(msg, *args, **kwargs)
Log a message with severity 'WARNING' on the root logger. If the logger has no handlers, call basicConfig() to add a console handler with a pre-defined format.
419def getLogRecordFactory(): 420 """ 421 Return the factory to be used when instantiating a log record. 422 """ 423 424 return _logRecordFactory
Return the factory to be used when instantiating a log record.
409def setLogRecordFactory(factory): 410 """ 411 Set the factory to be used when instantiating a log record. 412 413 :param factory: A callable which will be called to instantiate 414 a log record. 415 """ 416 global _logRecordFactory 417 _logRecordFactory = factory
Set the factory to be used when instantiating a log record.
Parameters
- factory: A callable which will be called to instantiate a log record.
908def getHandlerByName(name): 909 """ 910 Get a handler with the specified *name*, or None if there isn't one with 911 that name. 912 """ 913 return _handlers.get(name)
Get a handler with the specified name, or None if there isn't one with that name.
916def getHandlerNames(): 917 """ 918 Return all known handler names as an immutable set. 919 """ 920 return frozenset(_handlers)
Return all known handler names as an immutable set.